diff options
Diffstat (limited to 'drivers/net/ethernet')
304 files changed, 34417 insertions, 11670 deletions
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index bb032be7fe31..4cd53fc338b5 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -730,12 +730,12 @@ static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev) { struct altera_tse_private *priv = netdev_priv(dev); struct device_node *np = priv->device->of_node; - int ret = 0; + int ret; - priv->phy_iface = of_get_phy_mode(np); + ret = of_get_phy_mode(np, &priv->phy_iface); /* Avoid get phy addr and create mdio if no phy is present */ - if (!priv->phy_iface) + if (ret) return 0; /* try to get PHY address from device tree, use PHY autodetection if diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile index 0020726db204..6e0a6e234483 100644 --- a/drivers/net/ethernet/aquantia/atlantic/Makefile +++ b/drivers/net/ethernet/aquantia/atlantic/Makefile @@ -4,15 +4,8 @@ # aQuantia Ethernet Controller AQtion Linux Driver # Copyright(c) 2014-2017 aQuantia Corporation. # -# Contact Information: <rdc-drv@aquantia.com> -# aQuantia Corporation, 105 E. Tasman Dr. San Jose, CA 95134, USA -# ################################################################################ -# -# Makefile for the AQtion(tm) Ethernet driver -# - obj-$(CONFIG_AQTION) += atlantic.o atlantic-objs := aq_main.o \ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index 8c633caf79d2..f0c41f7408e5 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h @@ -70,14 +70,11 @@ /*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/ -#define AQ_NIC_FC_OFF 0U -#define AQ_NIC_FC_TX 1U -#define AQ_NIC_FC_RX 2U -#define AQ_NIC_FC_FULL 3U -#define AQ_NIC_FC_AUTO 4U - #define AQ_CFG_FC_MODE AQ_NIC_FC_FULL +/* Default WOL modes used on initialization */ +#define AQ_CFG_WOL_MODES WAKE_MAGIC + #define AQ_CFG_SPEED_MSK 0xFFFFU /* 0xFFFFU==auto_neg */ #define AQ_CFG_IS_AUTONEG_DEF 1U diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index 1ae8aabcc41a..a1f99bef4a68 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -18,7 +18,9 @@ static void aq_ethtool_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p) { struct aq_nic_s *aq_nic = netdev_priv(ndev); - u32 regs_count = aq_nic_get_regs_count(aq_nic); + u32 regs_count; + + regs_count = aq_nic_get_regs_count(aq_nic); memset(p, 0, regs_count * sizeof(u32)); aq_nic_get_regs(aq_nic, regs, p); @@ -27,7 +29,9 @@ static void aq_ethtool_get_regs(struct net_device *ndev, static int aq_ethtool_get_regs_len(struct net_device *ndev) { struct aq_nic_s *aq_nic = netdev_priv(ndev); - u32 regs_count = aq_nic_get_regs_count(aq_nic); + u32 regs_count; + + regs_count = aq_nic_get_regs_count(aq_nic); return regs_count * sizeof(u32); } @@ -92,11 +96,21 @@ static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = { "Queue[%d] InErrors", }; +static const char aq_ethtool_priv_flag_names[][ETH_GSTRING_LEN] = { + "DMASystemLoopback", + "PKTSystemLoopback", + "DMANetworkLoopback", + "PHYInternalLoopback", + "PHYExternalLoopback", +}; + static void aq_ethtool_stats(struct net_device *ndev, struct ethtool_stats *stats, u64 *data) { struct aq_nic_s *aq_nic = netdev_priv(ndev); - struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + struct aq_nic_cfg_s *cfg; + + cfg = aq_nic_get_cfg(aq_nic); memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) + ARRAY_SIZE(aq_ethtool_queue_stat_names) * @@ -107,11 +121,15 @@ static void aq_ethtool_stats(struct net_device *ndev, static void aq_ethtool_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *drvinfo) { - struct aq_nic_s *aq_nic = netdev_priv(ndev); - struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); struct pci_dev *pdev = to_pci_dev(ndev->dev.parent); - u32 firmware_version = aq_nic_get_fw_version(aq_nic); - u32 regs_count = aq_nic_get_regs_count(aq_nic); + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg; + u32 firmware_version; + u32 regs_count; + + cfg = aq_nic_get_cfg(aq_nic); + firmware_version = aq_nic_get_fw_version(aq_nic); + regs_count = aq_nic_get_regs_count(aq_nic); strlcat(drvinfo->driver, AQ_CFG_DRV_NAME, sizeof(drvinfo->driver)); strlcat(drvinfo->version, AQ_CFG_DRV_VERSION, sizeof(drvinfo->version)); @@ -132,12 +150,15 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev, static void aq_ethtool_get_strings(struct net_device *ndev, u32 stringset, u8 *data) { - int i, si; struct aq_nic_s *aq_nic = netdev_priv(ndev); - struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + struct aq_nic_cfg_s *cfg; u8 *p = data; + int i, si; - if (stringset == ETH_SS_STATS) { + cfg = aq_nic_get_cfg(aq_nic); + + switch (stringset) { + case ETH_SS_STATS: memcpy(p, aq_ethtool_stat_names, sizeof(aq_ethtool_stat_names)); p = p + sizeof(aq_ethtool_stat_names); @@ -150,23 +171,63 @@ static void aq_ethtool_get_strings(struct net_device *ndev, p += ETH_GSTRING_LEN; } } + break; + case ETH_SS_PRIV_FLAGS: + memcpy(p, aq_ethtool_priv_flag_names, + sizeof(aq_ethtool_priv_flag_names)); + break; } } -static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset) +static int aq_ethtool_set_phys_id(struct net_device *ndev, + enum ethtool_phys_id_state state) { + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_hw_s *hw = aq_nic->aq_hw; int ret = 0; + + if (!aq_nic->aq_fw_ops->led_control) + return -EOPNOTSUPP; + + mutex_lock(&aq_nic->fwreq_mutex); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + ret = aq_nic->aq_fw_ops->led_control(hw, AQ_HW_LED_BLINK | + AQ_HW_LED_BLINK << 2 | AQ_HW_LED_BLINK << 4); + break; + case ETHTOOL_ID_INACTIVE: + ret = aq_nic->aq_fw_ops->led_control(hw, AQ_HW_LED_DEFAULT); + break; + default: + break; + } + + mutex_unlock(&aq_nic->fwreq_mutex); + + return ret; +} + +static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset) +{ struct aq_nic_s *aq_nic = netdev_priv(ndev); - struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + struct aq_nic_cfg_s *cfg; + int ret = 0; + + cfg = aq_nic_get_cfg(aq_nic); switch (stringset) { case ETH_SS_STATS: ret = ARRAY_SIZE(aq_ethtool_stat_names) + cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names); break; + case ETH_SS_PRIV_FLAGS: + ret = ARRAY_SIZE(aq_ethtool_priv_flag_names); + break; default: ret = -EOPNOTSUPP; } + return ret; } @@ -178,7 +239,9 @@ static u32 aq_ethtool_get_rss_indir_size(struct net_device *ndev) static u32 aq_ethtool_get_rss_key_size(struct net_device *ndev) { struct aq_nic_s *aq_nic = netdev_priv(ndev); - struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + struct aq_nic_cfg_s *cfg; + + cfg = aq_nic_get_cfg(aq_nic); return sizeof(cfg->aq_rss.hash_secret_key); } @@ -187,9 +250,11 @@ static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key, u8 *hfunc) { struct aq_nic_s *aq_nic = netdev_priv(ndev); - struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + struct aq_nic_cfg_s *cfg; unsigned int i = 0U; + cfg = aq_nic_get_cfg(aq_nic); + if (hfunc) *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ if (indir) { @@ -199,6 +264,7 @@ static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key, if (key) memcpy(key, cfg->aq_rss.hash_secret_key, sizeof(cfg->aq_rss.hash_secret_key)); + return 0; } @@ -242,9 +308,11 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev, u32 *rule_locs) { struct aq_nic_s *aq_nic = netdev_priv(ndev); - struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + struct aq_nic_cfg_s *cfg; int err = 0; + cfg = aq_nic_get_cfg(aq_nic); + switch (cmd->cmd) { case ETHTOOL_GRXRINGS: cmd->data = cfg->vecs; @@ -269,8 +337,8 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev, static int aq_ethtool_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *cmd) { - int err = 0; struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = 0; switch (cmd->cmd) { case ETHTOOL_SRXCLSRLINS: @@ -291,7 +359,9 @@ static int aq_ethtool_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal) { struct aq_nic_s *aq_nic = netdev_priv(ndev); - struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + struct aq_nic_cfg_s *cfg; + + cfg = aq_nic_get_cfg(aq_nic); if (cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON || cfg->itr == AQ_CFG_INTERRUPT_MODERATION_AUTO) { @@ -305,6 +375,7 @@ static int aq_ethtool_get_coalesce(struct net_device *ndev, coal->rx_max_coalesced_frames = 1; coal->tx_max_coalesced_frames = 1; } + return 0; } @@ -312,7 +383,9 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal) { struct aq_nic_s *aq_nic = netdev_priv(ndev); - struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + struct aq_nic_cfg_s *cfg; + + cfg = aq_nic_get_cfg(aq_nic); /* This is not yet supported */ @@ -354,13 +427,12 @@ static void aq_ethtool_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) { struct aq_nic_s *aq_nic = netdev_priv(ndev); - struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + struct aq_nic_cfg_s *cfg; - wol->supported = WAKE_MAGIC; - wol->wolopts = 0; + cfg = aq_nic_get_cfg(aq_nic); - if (cfg->wol) - wol->wolopts |= WAKE_MAGIC; + wol->supported = AQ_NIC_WOL_MODES; + wol->wolopts = cfg->wol; } static int aq_ethtool_set_wol(struct net_device *ndev, @@ -368,14 +440,17 @@ static int aq_ethtool_set_wol(struct net_device *ndev, { struct pci_dev *pdev = to_pci_dev(ndev->dev.parent); struct aq_nic_s *aq_nic = netdev_priv(ndev); - struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + struct aq_nic_cfg_s *cfg; int err = 0; - if (wol->wolopts & WAKE_MAGIC) - cfg->wol |= AQ_NIC_WOL_ENABLED; - else - cfg->wol &= ~AQ_NIC_WOL_ENABLED; - err = device_set_wakeup_enable(&pdev->dev, wol->wolopts); + cfg = aq_nic_get_cfg(aq_nic); + + if (wol->wolopts & ~AQ_NIC_WOL_MODES) + return -EOPNOTSUPP; + + cfg->wol = wol->wolopts; + + err = device_set_wakeup_enable(&pdev->dev, !!cfg->wol); return err; } @@ -513,7 +588,7 @@ static void aq_ethtool_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) { struct aq_nic_s *aq_nic = netdev_priv(ndev); - u32 fc = aq_nic->aq_nic_cfg.flow_control; + u32 fc = aq_nic->aq_nic_cfg.fc.req; pause->autoneg = 0; @@ -535,14 +610,14 @@ static int aq_ethtool_set_pauseparam(struct net_device *ndev, return -EOPNOTSUPP; if (pause->rx_pause) - aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_RX; + aq_nic->aq_hw->aq_nic_cfg->fc.req |= AQ_NIC_FC_RX; else - aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_RX; + aq_nic->aq_hw->aq_nic_cfg->fc.req &= ~AQ_NIC_FC_RX; if (pause->tx_pause) - aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_TX; + aq_nic->aq_hw->aq_nic_cfg->fc.req |= AQ_NIC_FC_TX; else - aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_TX; + aq_nic->aq_hw->aq_nic_cfg->fc.req &= ~AQ_NIC_FC_TX; mutex_lock(&aq_nic->fwreq_mutex); err = aq_nic->aq_fw_ops->set_flow_control(aq_nic->aq_hw); @@ -555,23 +630,28 @@ static void aq_get_ringparam(struct net_device *ndev, struct ethtool_ringparam *ring) { struct aq_nic_s *aq_nic = netdev_priv(ndev); - struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic); + struct aq_nic_cfg_s *cfg; - ring->rx_pending = aq_nic_cfg->rxds; - ring->tx_pending = aq_nic_cfg->txds; + cfg = aq_nic_get_cfg(aq_nic); - ring->rx_max_pending = aq_nic_cfg->aq_hw_caps->rxds_max; - ring->tx_max_pending = aq_nic_cfg->aq_hw_caps->txds_max; + ring->rx_pending = cfg->rxds; + ring->tx_pending = cfg->txds; + + ring->rx_max_pending = cfg->aq_hw_caps->rxds_max; + ring->tx_max_pending = cfg->aq_hw_caps->txds_max; } static int aq_set_ringparam(struct net_device *ndev, struct ethtool_ringparam *ring) { - int err = 0; - bool ndev_running = false; struct aq_nic_s *aq_nic = netdev_priv(ndev); - struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic); - const struct aq_hw_caps_s *hw_caps = aq_nic_cfg->aq_hw_caps; + const struct aq_hw_caps_s *hw_caps; + bool ndev_running = false; + struct aq_nic_cfg_s *cfg; + int err = 0; + + cfg = aq_nic_get_cfg(aq_nic); + hw_caps = cfg->aq_hw_caps; if (ring->rx_mini_pending || ring->rx_jumbo_pending) { err = -EOPNOTSUPP; @@ -585,18 +665,18 @@ static int aq_set_ringparam(struct net_device *ndev, aq_nic_free_vectors(aq_nic); - aq_nic_cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min); - aq_nic_cfg->rxds = min(aq_nic_cfg->rxds, hw_caps->rxds_max); - aq_nic_cfg->rxds = ALIGN(aq_nic_cfg->rxds, AQ_HW_RXD_MULTIPLE); + cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min); + cfg->rxds = min(cfg->rxds, hw_caps->rxds_max); + cfg->rxds = ALIGN(cfg->rxds, AQ_HW_RXD_MULTIPLE); - aq_nic_cfg->txds = max(ring->tx_pending, hw_caps->txds_min); - aq_nic_cfg->txds = min(aq_nic_cfg->txds, hw_caps->txds_max); - aq_nic_cfg->txds = ALIGN(aq_nic_cfg->txds, AQ_HW_TXD_MULTIPLE); + cfg->txds = max(ring->tx_pending, hw_caps->txds_min); + cfg->txds = min(cfg->txds, hw_caps->txds_max); + cfg->txds = ALIGN(cfg->txds, AQ_HW_TXD_MULTIPLE); - for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < aq_nic_cfg->vecs; + for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < cfg->vecs; aq_nic->aq_vecs++) { aq_nic->aq_vec[aq_nic->aq_vecs] = - aq_vec_alloc(aq_nic, aq_nic->aq_vecs, aq_nic_cfg); + aq_vec_alloc(aq_nic, aq_nic->aq_vecs, cfg); if (unlikely(!aq_nic->aq_vec[aq_nic->aq_vecs])) { err = -ENOMEM; goto err_exit; @@ -609,12 +689,61 @@ err_exit: return err; } +static u32 aq_get_msg_level(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + return aq_nic->msg_enable; +} + +static void aq_set_msg_level(struct net_device *ndev, u32 data) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + aq_nic->msg_enable = data; +} + +static u32 aq_ethtool_get_priv_flags(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + return aq_nic->aq_nic_cfg.priv_flags; +} + +static int aq_ethtool_set_priv_flags(struct net_device *ndev, u32 flags) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg; + u32 priv_flags; + + cfg = aq_nic_get_cfg(aq_nic); + priv_flags = cfg->priv_flags; + + if (flags & ~AQ_PRIV_FLAGS_MASK) + return -EOPNOTSUPP; + + cfg->priv_flags = flags; + + if ((priv_flags ^ flags) & BIT(AQ_HW_LOOPBACK_DMA_NET)) { + if (netif_running(ndev)) { + dev_close(ndev); + + dev_open(ndev, NULL); + } + } else if ((priv_flags ^ flags) & AQ_HW_LOOPBACK_MASK) { + aq_nic_set_loopback(aq_nic); + } + + return 0; +} + const struct ethtool_ops aq_ethtool_ops = { .get_link = aq_ethtool_get_link, .get_regs_len = aq_ethtool_get_regs_len, .get_regs = aq_ethtool_get_regs, .get_drvinfo = aq_ethtool_get_drvinfo, .get_strings = aq_ethtool_get_strings, + .set_phys_id = aq_ethtool_set_phys_id, .get_rxfh_indir_size = aq_ethtool_get_rss_indir_size, .get_wol = aq_ethtool_get_wol, .set_wol = aq_ethtool_set_wol, @@ -630,8 +759,12 @@ const struct ethtool_ops aq_ethtool_ops = { .set_rxfh = aq_ethtool_set_rss, .get_rxnfc = aq_ethtool_get_rxnfc, .set_rxnfc = aq_ethtool_set_rxnfc, + .get_msglevel = aq_get_msg_level, + .set_msglevel = aq_set_msg_level, .get_sset_count = aq_ethtool_get_sset_count, .get_ethtool_stats = aq_ethtool_stats, + .get_priv_flags = aq_ethtool_get_priv_flags, + .set_priv_flags = aq_ethtool_set_priv_flags, .get_link_ksettings = aq_ethtool_get_link_ksettings, .set_link_ksettings = aq_ethtool_set_link_ksettings, .get_coalesce = aq_ethtool_get_coalesce, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h index 632b5531db4a..6d5be5ebeb13 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h @@ -12,5 +12,6 @@ #include "aq_common.h" extern const struct ethtool_ops aq_ethtool_ops; +#define AQ_PRIV_FLAGS_MASK (AQ_HW_LOOPBACK_MASK) #endif /* AQ_ETHTOOL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index 5246cf44ce51..cc70c606b6ef 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -119,6 +119,23 @@ struct aq_stats_s { #define AQ_HW_MULTICAST_ADDRESS_MAX 32U +#define AQ_HW_LED_BLINK 0x2U +#define AQ_HW_LED_DEFAULT 0x0U + +enum aq_priv_flags { + AQ_HW_LOOPBACK_DMA_SYS, + AQ_HW_LOOPBACK_PKT_SYS, + AQ_HW_LOOPBACK_DMA_NET, + AQ_HW_LOOPBACK_PHYINT_SYS, + AQ_HW_LOOPBACK_PHYEXT_SYS, +}; + +#define AQ_HW_LOOPBACK_MASK (BIT(AQ_HW_LOOPBACK_DMA_SYS) |\ + BIT(AQ_HW_LOOPBACK_PKT_SYS) |\ + BIT(AQ_HW_LOOPBACK_DMA_NET) |\ + BIT(AQ_HW_LOOPBACK_PHYINT_SYS) |\ + BIT(AQ_HW_LOOPBACK_PHYEXT_SYS)) + struct aq_hw_s { atomic_t flags; u8 rbl_enabled:1; @@ -137,6 +154,7 @@ struct aq_hw_s { atomic_t dpc; u32 mbox_addr; u32 rpc_addr; + u32 settings_addr; u32 rpc_tid; struct hw_atl_utils_fw_rpc rpc; s64 ptp_clk_offset; @@ -276,6 +294,8 @@ struct aq_hw_ops { u64 *timestamp); int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc); + + int (*hw_set_loopback)(struct aq_hw_s *self, u32 mode, bool enable); }; struct aq_fw_ops { @@ -304,6 +324,10 @@ struct aq_fw_ops { int (*set_flow_control)(struct aq_hw_s *self); + int (*led_control)(struct aq_hw_s *self, u32 mode); + + int (*set_phyloopback)(struct aq_hw_s *self, u32 mode, bool enable); + int (*set_power)(struct aq_hw_s *self, unsigned int power_state, u8 *mac); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c index 9c7a226d81b6..7dbf49adcea6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c @@ -59,6 +59,7 @@ u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg) u64 value = aq_hw_read_reg(hw, reg); value |= (u64)aq_hw_read_reg(hw, reg + 4) << 32; + return value; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index a26d4a69efad..538f460a3da7 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -53,8 +53,8 @@ struct net_device *aq_ndev_alloc(void) static int aq_ndev_open(struct net_device *ndev) { - int err = 0; struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = 0; err = aq_nic_init(aq_nic); if (err < 0) @@ -74,19 +74,20 @@ static int aq_ndev_open(struct net_device *ndev) err_exit: if (err < 0) - aq_nic_deinit(aq_nic); + aq_nic_deinit(aq_nic, true); + return err; } static int aq_ndev_close(struct net_device *ndev) { - int err = 0; struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = 0; err = aq_nic_stop(aq_nic); if (err < 0) goto err_exit; - aq_nic_deinit(aq_nic); + aq_nic_deinit(aq_nic, true); err_exit: return err; @@ -120,7 +121,9 @@ static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev) static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu) { struct aq_nic_s *aq_nic = netdev_priv(ndev); - int err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN); + int err; + + err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN); if (err < 0) goto err_exit; @@ -133,8 +136,8 @@ err_exit: static int aq_ndev_set_features(struct net_device *ndev, netdev_features_t features) { - bool is_vlan_rx_strip = !!(features & NETIF_F_HW_VLAN_CTAG_RX); bool is_vlan_tx_insert = !!(features & NETIF_F_HW_VLAN_CTAG_TX); + bool is_vlan_rx_strip = !!(features & NETIF_F_HW_VLAN_CTAG_RX); struct aq_nic_s *aq_nic = netdev_priv(ndev); bool need_ndev_restart = false; struct aq_nic_cfg_s *aq_cfg; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 433adc099e44..a17a4da7bc15 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -41,10 +41,6 @@ static void aq_nic_update_ndev_stats(struct aq_nic_s *self); static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) { - struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; - struct aq_rss_parameters *rss_params = &cfg->aq_rss; - int i = 0; - static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = { 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d, 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18, @@ -52,6 +48,11 @@ static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70, 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c }; + struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; + struct aq_rss_parameters *rss_params; + int i = 0; + + rss_params = &cfg->aq_rss; rss_params->hash_secret_key_size = sizeof(rss_key); memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key)); @@ -78,7 +79,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self) cfg->is_rss = AQ_CFG_IS_RSS_DEF; cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF; cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF; - cfg->flow_control = AQ_CFG_FC_MODE; + cfg->fc.req = AQ_CFG_FC_MODE; + cfg->wol = AQ_CFG_WOL_MODES; cfg->mtu = AQ_CFG_MTU_DEF; cfg->link_speed_msk = AQ_CFG_SPEED_MSK; @@ -142,10 +144,14 @@ static int aq_nic_update_link_status(struct aq_nic_s *self) if (err) return err; + if (self->aq_fw_ops->get_flow_control) + self->aq_fw_ops->get_flow_control(self->aq_hw, &fc); + self->aq_nic_cfg.fc.cur = fc; + if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) { - pr_info("%s: link change old %d new %d\n", - AQ_CFG_DRV_NAME, self->link_status.mbps, - self->aq_hw->aq_link_status.mbps); + netdev_info(self->ndev, "%s: link change old %d new %d\n", + AQ_CFG_DRV_NAME, self->link_status.mbps, + self->aq_hw->aq_link_status.mbps); aq_nic_update_interrupt_moderation_settings(self); if (self->aq_ptp) { @@ -159,8 +165,6 @@ static int aq_nic_update_link_status(struct aq_nic_s *self) * on any link event. * We should query FW whether it negotiated FC. */ - if (self->aq_fw_ops->get_flow_control) - self->aq_fw_ops->get_flow_control(self->aq_hw, &fc); if (self->aq_hw_ops->hw_set_fc) self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0); } @@ -179,6 +183,7 @@ static int aq_nic_update_link_status(struct aq_nic_s *self) netif_tx_disable(self->ndev); aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN); } + return 0; } @@ -193,6 +198,7 @@ static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private) self->aq_hw_ops->hw_irq_enable(self->aq_hw, BIT(self->aq_nic_cfg.link_irq_vec)); + return IRQ_HANDLED; } @@ -223,7 +229,8 @@ static void aq_nic_service_timer_cb(struct timer_list *t) { struct aq_nic_s *self = from_timer(self, t, service_timer); - mod_timer(&self->service_timer, jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL); + mod_timer(&self->service_timer, + jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL); aq_ndev_schedule_work(&self->service_task); } @@ -302,9 +309,11 @@ void aq_nic_ndev_init(struct aq_nic_s *self) self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_LRO | NETIF_F_TSO; + self->ndev->gso_partial_features = NETIF_F_GSO_UDP_L4; self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + self->msg_enable = NETIF_MSG_DRV | NETIF_MSG_LINK; self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN; @@ -324,8 +333,8 @@ struct net_device *aq_nic_get_ndev(struct aq_nic_s *self) int aq_nic_init(struct aq_nic_s *self) { struct aq_vec_s *aq_vec = NULL; - int err = 0; unsigned int i = 0U; + int err = 0; self->power_state = AQ_HW_POWER_STATE_D0; mutex_lock(&self->fwreq_mutex); @@ -369,8 +378,8 @@ err_exit: int aq_nic_start(struct aq_nic_s *self) { struct aq_vec_s *aq_vec = NULL; - int err = 0; unsigned int i = 0U; + int err = 0; err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, self->mc_list.ar, @@ -404,6 +413,8 @@ int aq_nic_start(struct aq_nic_s *self) INIT_WORK(&self->service_task, aq_nic_service_task); + aq_nic_set_loopback(self); + timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0); aq_nic_service_timer_cb(&self->service_timer); @@ -460,26 +471,45 @@ err_exit: unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, struct aq_ring_s *ring) { - unsigned int ret = 0U; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; - unsigned int frag_count = 0U; - unsigned int dx = ring->sw_tail; struct aq_ring_buff_s *first = NULL; - struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx]; + u8 ipver = ip_hdr(skb)->version; + struct aq_ring_buff_s *dx_buff; bool need_context_tag = false; + unsigned int frag_count = 0U; + unsigned int ret = 0U; + unsigned int dx; + u8 l4proto = 0; + + if (ipver == 4) + l4proto = ip_hdr(skb)->protocol; + else if (ipver == 6) + l4proto = ipv6_hdr(skb)->nexthdr; + dx = ring->sw_tail; + dx_buff = &ring->buff_ring[dx]; dx_buff->flags = 0U; if (unlikely(skb_is_gso(skb))) { dx_buff->mss = skb_shinfo(skb)->gso_size; - dx_buff->is_gso = 1U; + if (l4proto == IPPROTO_TCP) { + dx_buff->is_gso_tcp = 1U; + dx_buff->len_l4 = tcp_hdrlen(skb); + } else if (l4proto == IPPROTO_UDP) { + dx_buff->is_gso_udp = 1U; + dx_buff->len_l4 = sizeof(struct udphdr); + /* UDP GSO Hardware does not replace packet length. */ + udp_hdr(skb)->len = htons(dx_buff->mss + + dx_buff->len_l4); + } else { + WARN_ONCE(true, "Bad GSO mode"); + goto exit; + } dx_buff->len_pkt = skb->len; dx_buff->len_l2 = ETH_HLEN; - dx_buff->len_l3 = ip_hdrlen(skb); - dx_buff->len_l4 = tcp_hdrlen(skb); + dx_buff->len_l3 = skb_network_header_len(skb); dx_buff->eop_index = 0xffffU; - dx_buff->is_ipv6 = - (ip_hdr(skb)->version == 6) ? 1U : 0U; + dx_buff->is_ipv6 = (ipver == 6); need_context_tag = true; } @@ -513,24 +543,9 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, ++ret; if (skb->ip_summed == CHECKSUM_PARTIAL) { - dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? - 1U : 0U; - - if (ip_hdr(skb)->version == 4) { - dx_buff->is_tcp_cso = - (ip_hdr(skb)->protocol == IPPROTO_TCP) ? - 1U : 0U; - dx_buff->is_udp_cso = - (ip_hdr(skb)->protocol == IPPROTO_UDP) ? - 1U : 0U; - } else if (ip_hdr(skb)->version == 6) { - dx_buff->is_tcp_cso = - (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ? - 1U : 0U; - dx_buff->is_udp_cso = - (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ? - 1U : 0U; - } + dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol); + dx_buff->is_tcp_cso = (l4proto == IPPROTO_TCP); + dx_buff->is_udp_cso = (l4proto == IPPROTO_UDP); } for (; nr_frags--; ++frag_count) { @@ -585,7 +600,8 @@ mapping_error: --ret, dx = aq_ring_next_dx(ring, dx)) { dx_buff = &ring->buff_ring[dx]; - if (!dx_buff->is_gso && !dx_buff->is_vlan && dx_buff->pa) { + if (!(dx_buff->is_gso_tcp || dx_buff->is_gso_udp) && + !dx_buff->is_vlan && dx_buff->pa) { if (unlikely(dx_buff->is_sop)) { dma_unmap_single(aq_nic_get_dev(self), dx_buff->pa, @@ -606,11 +622,11 @@ exit: int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) { + unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; struct aq_ring_s *ring = NULL; unsigned int frags = 0U; - unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; - unsigned int tc = 0U; int err = NETDEV_TX_OK; + unsigned int tc = 0U; frags = skb_shinfo(skb)->nr_frags + 1; @@ -623,6 +639,11 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) aq_ring_update_queue_state(ring); + if (self->aq_nic_cfg.priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) { + err = NETDEV_TX_BUSY; + goto err_exit; + } + /* Above status update may stop the queue. Check this. */ if (__netif_subqueue_stopped(self->ndev, ring->idx)) { err = NETDEV_TX_BUSY; @@ -703,6 +724,7 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) if (err < 0) return err; } + return aq_nic_set_packet_filter(self, packet_filter); } @@ -747,10 +769,10 @@ int aq_nic_get_regs_count(struct aq_nic_s *self) void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) { - unsigned int i = 0U; - unsigned int count = 0U; struct aq_vec_s *aq_vec = NULL; struct aq_stats_s *stats; + unsigned int count = 0U; + unsigned int i = 0U; if (self->aq_fw_ops->update_stats) { mutex_lock(&self->fwreq_mutex); @@ -800,8 +822,8 @@ err_exit:; static void aq_nic_update_ndev_stats(struct aq_nic_s *self) { - struct net_device *ndev = self->ndev; struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw); + struct net_device *ndev = self->ndev; ndev->stats.rx_packets = stats->dma_pkt_rc; ndev->stats.rx_bytes = stats->dma_oct_rc; @@ -846,9 +868,12 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self, ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); - if (self->aq_nic_cfg.aq_hw_caps->flow_control) + if (self->aq_nic_cfg.aq_hw_caps->flow_control) { ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + ethtool_link_ksettings_add_link_mode(cmd, supported, + Asym_Pause); + } ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); @@ -882,13 +907,13 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self, ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); - if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX) + if (self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX) ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); /* Asym is when either RX or TX, but not both */ - if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^ - !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)) + if (!!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_TX) ^ + !!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX)) ethtool_link_ksettings_add_link_mode(cmd, advertising, Asym_Pause); @@ -971,6 +996,44 @@ u32 aq_nic_get_fw_version(struct aq_nic_s *self) return fw_version; } +int aq_nic_set_loopback(struct aq_nic_s *self) +{ + struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; + + if (!self->aq_hw_ops->hw_set_loopback || + !self->aq_fw_ops->set_phyloopback) + return -ENOTSUPP; + + mutex_lock(&self->fwreq_mutex); + self->aq_hw_ops->hw_set_loopback(self->aq_hw, + AQ_HW_LOOPBACK_DMA_SYS, + !!(cfg->priv_flags & + BIT(AQ_HW_LOOPBACK_DMA_SYS))); + + self->aq_hw_ops->hw_set_loopback(self->aq_hw, + AQ_HW_LOOPBACK_PKT_SYS, + !!(cfg->priv_flags & + BIT(AQ_HW_LOOPBACK_PKT_SYS))); + + self->aq_hw_ops->hw_set_loopback(self->aq_hw, + AQ_HW_LOOPBACK_DMA_NET, + !!(cfg->priv_flags & + BIT(AQ_HW_LOOPBACK_DMA_NET))); + + self->aq_fw_ops->set_phyloopback(self->aq_hw, + AQ_HW_LOOPBACK_PHYINT_SYS, + !!(cfg->priv_flags & + BIT(AQ_HW_LOOPBACK_PHYINT_SYS))); + + self->aq_fw_ops->set_phyloopback(self->aq_hw, + AQ_HW_LOOPBACK_PHYEXT_SYS, + !!(cfg->priv_flags & + BIT(AQ_HW_LOOPBACK_PHYEXT_SYS))); + mutex_unlock(&self->fwreq_mutex); + + return 0; +} + int aq_nic_stop(struct aq_nic_s *self) { struct aq_vec_s *aq_vec = NULL; @@ -1000,7 +1063,20 @@ int aq_nic_stop(struct aq_nic_s *self) return self->aq_hw_ops->hw_stop(self->aq_hw); } -void aq_nic_deinit(struct aq_nic_s *self) +void aq_nic_set_power(struct aq_nic_s *self) +{ + if (self->power_state != AQ_HW_POWER_STATE_D0 || + self->aq_hw->aq_nic_cfg->wol) + if (likely(self->aq_fw_ops->set_power)) { + mutex_lock(&self->fwreq_mutex); + self->aq_fw_ops->set_power(self->aq_hw, + self->power_state, + self->ndev->dev_addr); + mutex_unlock(&self->fwreq_mutex); + } +} + +void aq_nic_deinit(struct aq_nic_s *self, bool link_down) { struct aq_vec_s *aq_vec = NULL; unsigned int i = 0U; @@ -1017,23 +1093,12 @@ void aq_nic_deinit(struct aq_nic_s *self) aq_ptp_ring_free(self); aq_ptp_free(self); - if (likely(self->aq_fw_ops->deinit)) { + if (likely(self->aq_fw_ops->deinit) && link_down) { mutex_lock(&self->fwreq_mutex); self->aq_fw_ops->deinit(self->aq_hw); mutex_unlock(&self->fwreq_mutex); } - if (self->power_state != AQ_HW_POWER_STATE_D0 || - self->aq_hw->aq_nic_cfg->wol) - if (likely(self->aq_fw_ops->set_power)) { - mutex_lock(&self->fwreq_mutex); - self->aq_fw_ops->set_power(self->aq_hw, - self->power_state, - self->ndev->dev_addr); - mutex_unlock(&self->fwreq_mutex); - } - - err_exit:; } @@ -1054,44 +1119,6 @@ void aq_nic_free_vectors(struct aq_nic_s *self) err_exit:; } -int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg) -{ - int err = 0; - - if (!netif_running(self->ndev)) { - err = 0; - goto out; - } - rtnl_lock(); - if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) { - self->power_state = AQ_HW_POWER_STATE_D3; - netif_device_detach(self->ndev); - netif_tx_stop_all_queues(self->ndev); - - err = aq_nic_stop(self); - if (err < 0) - goto err_exit; - - aq_nic_deinit(self); - } else { - err = aq_nic_init(self); - if (err < 0) - goto err_exit; - - err = aq_nic_start(self); - if (err < 0) - goto err_exit; - - netif_device_attach(self->ndev); - netif_tx_start_all_queues(self->ndev); - } - -err_exit: - rtnl_unlock(); -out: - return err; -} - void aq_nic_shutdown(struct aq_nic_s *self) { int err = 0; @@ -1108,7 +1135,8 @@ void aq_nic_shutdown(struct aq_nic_s *self) if (err < 0) goto err_exit; } - aq_nic_deinit(self); + aq_nic_deinit(self, !self->aq_hw->aq_nic_cfg->wol); + aq_nic_set_power(self); err_exit: rtnl_unlock(); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index c2513b79b9e9..a752f8bb4b08 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -20,6 +20,18 @@ struct aq_vec_s; struct aq_ptp_s; enum aq_rx_filter_type; +enum aq_fc_mode { + AQ_NIC_FC_OFF = 0, + AQ_NIC_FC_TX, + AQ_NIC_FC_RX, + AQ_NIC_FC_FULL, +}; + +struct aq_fc_info { + enum aq_fc_mode req; + enum aq_fc_mode cur; +}; + struct aq_nic_cfg_s { const struct aq_hw_caps_s *aq_hw_caps; u64 features; @@ -34,7 +46,7 @@ struct aq_nic_cfg_s { u32 rxpageorder; u32 num_rss_queues; u32 mtu; - u32 flow_control; + struct aq_fc_info fc; u32 link_speed_msk; u32 wol; u8 is_vlan_rx_strip; @@ -46,6 +58,7 @@ struct aq_nic_cfg_s { bool is_polling; bool is_rss; bool is_lro; + u32 priv_flags; u8 tcs; struct aq_rss_parameters aq_rss; u32 eee_speeds; @@ -60,7 +73,8 @@ struct aq_nic_cfg_s { #define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U #define AQ_NIC_FLAG_ERR_HW 0x80000000U -#define AQ_NIC_WOL_ENABLED BIT(0) +#define AQ_NIC_WOL_MODES (WAKE_MAGIC |\ + WAKE_PHY) #define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \ ((_TC_) * AQ_CFG_TCS_MAX + (_VEC_)) @@ -70,8 +84,8 @@ struct aq_hw_rx_fl2 { }; struct aq_hw_rx_fl3l4 { - u8 active_ipv4; - u8 active_ipv6:2; + u8 active_ipv4; + u8 active_ipv6:2; u8 is_ipv6; u8 reserved_count; }; @@ -87,6 +101,7 @@ struct aq_hw_rx_fltrs_s { struct aq_nic_s { atomic_t flags; + u32 msg_enable; struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX]; struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX]; struct aq_hw_s *aq_hw; @@ -141,7 +156,8 @@ int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p); int aq_nic_get_regs_count(struct aq_nic_s *self); void aq_nic_get_stats(struct aq_nic_s *self, u64 *data); int aq_nic_stop(struct aq_nic_s *self); -void aq_nic_deinit(struct aq_nic_s *self); +void aq_nic_deinit(struct aq_nic_s *self, bool link_down); +void aq_nic_set_power(struct aq_nic_s *self); void aq_nic_free_hot_resources(struct aq_nic_s *self); void aq_nic_free_vectors(struct aq_nic_s *self); int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu); @@ -155,7 +171,7 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self, const struct ethtool_link_ksettings *cmd); struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self); u32 aq_nic_get_fw_version(struct aq_nic_s *self); -int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg); +int aq_nic_set_loopback(struct aq_nic_s *self); int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self); void aq_nic_shutdown(struct aq_nic_s *self); u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index e82c96b50373..2bb329606794 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -185,6 +185,7 @@ unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self) return AQ_HW_IRQ_MSIX; if (self->pdev->msi_enabled) return AQ_HW_IRQ_MSI; + return AQ_HW_IRQ_LEGACY; } @@ -196,12 +197,12 @@ static void aq_pci_free_irq_vectors(struct aq_nic_s *self) static int aq_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { - struct aq_nic_s *self; - int err; struct net_device *ndev; resource_size_t mmio_pa; - u32 bar; + struct aq_nic_s *self; u32 numvecs; + u32 bar; + int err; err = pci_enable_device(pdev); if (err) @@ -311,6 +312,7 @@ err_ndev: pci_release_regions(pdev); err_pci_func: pci_disable_device(pdev); + return err; } @@ -347,29 +349,98 @@ static void aq_pci_shutdown(struct pci_dev *pdev) } } -static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg) +static int aq_suspend_common(struct device *dev, bool deep) { - struct aq_nic_s *self = pci_get_drvdata(pdev); + struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev)); + + rtnl_lock(); + + nic->power_state = AQ_HW_POWER_STATE_D3; + netif_device_detach(nic->ndev); + netif_tx_stop_all_queues(nic->ndev); - return aq_nic_change_pm_state(self, &pm_msg); + aq_nic_stop(nic); + + if (deep) { + aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol); + aq_nic_set_power(nic); + } + + rtnl_unlock(); + + return 0; } -static int aq_pci_resume(struct pci_dev *pdev) +static int atl_resume_common(struct device *dev, bool deep) { - struct aq_nic_s *self = pci_get_drvdata(pdev); - pm_message_t pm_msg = PMSG_RESTORE; + struct pci_dev *pdev = to_pci_dev(dev); + struct aq_nic_s *nic; + int ret; + + nic = pci_get_drvdata(pdev); + + rtnl_lock(); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + if (deep) { + ret = aq_nic_init(nic); + if (ret) + goto err_exit; + } + + ret = aq_nic_start(nic); + if (ret) + goto err_exit; + + netif_device_attach(nic->ndev); + netif_tx_start_all_queues(nic->ndev); + +err_exit: + rtnl_unlock(); - return aq_nic_change_pm_state(self, &pm_msg); + return ret; } +static int aq_pm_freeze(struct device *dev) +{ + return aq_suspend_common(dev, false); +} + +static int aq_pm_suspend_poweroff(struct device *dev) +{ + return aq_suspend_common(dev, true); +} + +static int aq_pm_thaw(struct device *dev) +{ + return atl_resume_common(dev, false); +} + +static int aq_pm_resume_restore(struct device *dev) +{ + return atl_resume_common(dev, true); +} + +static const struct dev_pm_ops aq_pm_ops = { + .suspend = aq_pm_suspend_poweroff, + .poweroff = aq_pm_suspend_poweroff, + .freeze = aq_pm_freeze, + .resume = aq_pm_resume_restore, + .restore = aq_pm_resume_restore, + .thaw = aq_pm_thaw, +}; + static struct pci_driver aq_pci_ops = { .name = AQ_CFG_DRV_NAME, .id_table = aq_pci_tbl, .probe = aq_pci_probe, .remove = aq_pci_remove, - .suspend = aq_pci_suspend, - .resume = aq_pci_resume, .shutdown = aq_pci_shutdown, +#ifdef CONFIG_PM + .driver.pm = &aq_pm_ops, +#endif }; int aq_pci_func_register_driver(void) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c index 8175513e48c9..58e8c641e8b3 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c @@ -1057,7 +1057,7 @@ static struct ptp_clock_info aq_ptp_clock = { ptp_offset[__idx].ingress = (__ingress); } \ while (0) -static void aq_ptp_offset_init_from_fw(const struct hw_aq_ptp_offset *offsets) +static void aq_ptp_offset_init_from_fw(const struct hw_atl_ptp_offset *offsets) { int i; @@ -1098,7 +1098,7 @@ static void aq_ptp_offset_init_from_fw(const struct hw_aq_ptp_offset *offsets) } } -static void aq_ptp_offset_init(const struct hw_aq_ptp_offset *offsets) +static void aq_ptp_offset_init(const struct hw_atl_ptp_offset *offsets) { memset(ptp_offset, 0, sizeof(ptp_offset)); @@ -1106,7 +1106,7 @@ static void aq_ptp_offset_init(const struct hw_aq_ptp_offset *offsets) } static void aq_ptp_gpio_init(struct ptp_clock_info *info, - struct hw_aq_info *hw_info) + struct hw_atl_info *hw_info) { struct ptp_pin_desc pin_desc[MAX_PTP_GPIO_COUNT]; u32 extts_pin_cnt = 0; @@ -1207,7 +1207,7 @@ int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec) aq_ptp->ptp_info = aq_ptp_clock; aq_ptp_gpio_init(&aq_ptp->ptp_info, &mbox.info); clock = ptp_clock_register(&aq_ptp->ptp_info, &aq_nic->ndev->dev); - if (!clock || IS_ERR(clock)) { + if (IS_ERR(clock)) { netdev_err(aq_nic->ndev, "ptp_clock_register failed\n"); err = PTR_ERR(clock); goto err_exit; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index f756cc0bbdf0..951d86f8b66e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -30,8 +30,8 @@ static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order, struct device *dev) { struct page *page; - dma_addr_t daddr; int ret = -ENOMEM; + dma_addr_t daddr; page = dev_alloc_pages(order); if (unlikely(!page)) @@ -118,6 +118,7 @@ err_exit: aq_ring_free(self); self = NULL; } + return self; } @@ -144,6 +145,7 @@ err_exit: aq_ring_free(self); self = NULL; } + return self; } @@ -175,6 +177,7 @@ err_exit: aq_ring_free(self); self = NULL; } + return self; } @@ -207,6 +210,7 @@ int aq_ring_init(struct aq_ring_s *self) self->hw_head = 0; self->sw_head = 0; self->sw_tail = 0; + return 0; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index be3702a4dcc9..991e4d31b094 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -65,19 +65,20 @@ struct __packed aq_ring_buff_s { }; union { struct { - u16 len; + u32 len:16; u32 is_ip_cso:1; u32 is_udp_cso:1; u32 is_tcp_cso:1; u32 is_cso_err:1; u32 is_sop:1; u32 is_eop:1; - u32 is_gso:1; + u32 is_gso_tcp:1; + u32 is_gso_udp:1; u32 is_mapped:1; u32 is_cleaned:1; u32 is_error:1; u32 is_vlan:1; - u32 rsvd3:5; + u32 rsvd3:4; u16 eop_index; u16 rsvd4; }; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index a95c263a45aa..f40a427970dc 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -103,8 +103,8 @@ err_exit: struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg) { - struct aq_vec_s *self = NULL; struct aq_ring_s *ring = NULL; + struct aq_vec_s *self = NULL; unsigned int i = 0U; int err = 0; @@ -159,6 +159,7 @@ err_exit: aq_vec_free(self); self = NULL; } + return self; } @@ -263,6 +264,7 @@ void aq_vec_deinit(struct aq_vec_s *self) aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]); } + err_exit:; } @@ -361,9 +363,9 @@ void aq_vec_add_stats(struct aq_vec_s *self, int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count) { - unsigned int count = 0U; struct aq_ring_stats_rx_s stats_rx; struct aq_ring_stats_tx_s stats_tx; + unsigned int count = 0U; memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 359a4d387185..9b1062b8af64 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -119,10 +119,10 @@ err_exit: static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self) { - u32 tc = 0U; - u32 buff_size = 0U; - unsigned int i_priority = 0U; bool is_rx_flow_control = false; + unsigned int i_priority = 0U; + u32 buff_size = 0U; + u32 tc = 0U; /* TPS Descriptor rate init */ hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); @@ -155,7 +155,7 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self) /* QoS Rx buf size per TC */ tc = 0; - is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control); + is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->fc.req); buff_size = HW_ATL_A0_RXBUF_MAX; hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc); @@ -180,9 +180,9 @@ static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self, struct aq_rss_parameters *rss_params) { struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; - int err = 0; - unsigned int i = 0U; unsigned int addr = 0U; + unsigned int i = 0U; + int err = 0; u32 val; for (i = 10, addr = 0U; i--; ++addr) { @@ -207,12 +207,12 @@ err_exit: static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self, struct aq_rss_parameters *rss_params) { - u8 *indirection_table = rss_params->indirection_table; - u32 i = 0U; u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues); - int err = 0; + u8 *indirection_table = rss_params->indirection_table; u16 bitary[1 + (HW_ATL_A0_RSS_REDIRECTION_MAX * HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)]; + int err = 0; + u32 i = 0U; u32 val; memset(bitary, 0, sizeof(bitary)); @@ -321,9 +321,9 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self) static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) { - int err = 0; unsigned int h = 0U; unsigned int l = 0U; + int err = 0; if (!mac_addr) { err = -EINVAL; @@ -352,10 +352,9 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr) [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U }, [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U }, }; - + struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg; int err = 0; - struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg; hw_atl_a0_hw_init_tx_path(self); hw_atl_a0_hw_init_rx_path(self); @@ -404,6 +403,7 @@ static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self, struct aq_ring_s *ring) { hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx); + return aq_hw_err_from_flags(self); } @@ -411,6 +411,7 @@ static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self, struct aq_ring_s *ring) { hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx); + return aq_hw_err_from_flags(self); } @@ -418,6 +419,7 @@ static int hw_atl_a0_hw_start(struct aq_hw_s *self) { hw_atl_tpb_tx_buff_en_set(self, 1); hw_atl_rpb_rx_buff_en_set(self, 1); + return aq_hw_err_from_flags(self); } @@ -425,6 +427,7 @@ static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self, struct aq_ring_s *ring) { hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx); + return 0; } @@ -435,8 +438,8 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self, struct aq_ring_buff_s *buff = NULL; struct hw_atl_txd_s *txd = NULL; unsigned int buff_pa_len = 0U; - unsigned int pkt_len = 0U; unsigned int frag_count = 0U; + unsigned int pkt_len = 0U; bool is_gso = false; buff = &ring->buff_ring[ring->sw_tail]; @@ -451,7 +454,7 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self, buff = &ring->buff_ring[ring->sw_tail]; - if (buff->is_gso) { + if (buff->is_gso_tcp) { txd->ctl |= (buff->len_l3 << 31) | (buff->len_l2 << 24) | HW_ATL_A0_TXD_CTL_CMD_TCP | @@ -500,6 +503,7 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self, } hw_atl_a0_hw_tx_ring_tail_update(self, ring); + return aq_hw_err_from_flags(self); } @@ -507,8 +511,8 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring, struct aq_ring_param_s *aq_ring_param) { - u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa; u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); + u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa; hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx); @@ -549,8 +553,8 @@ static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring, struct aq_ring_param_s *aq_ring_param) { - u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa; u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); + u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa; hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr, aq_ring->idx); @@ -599,8 +603,8 @@ static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self, static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self, struct aq_ring_s *ring) { - int err = 0; unsigned int hw_head = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx); + int err = 0; if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) { err = -ENXIO; @@ -720,6 +724,7 @@ static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask) { hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask) | (1U << HW_ATL_A0_ERR_INT)); + return aq_hw_err_from_flags(self); } @@ -737,6 +742,7 @@ static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask) static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask) { *mask = hw_atl_itr_irq_statuslsw_get(self); + return aq_hw_err_from_flags(self); } @@ -859,6 +865,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self) static int hw_atl_a0_hw_stop(struct aq_hw_s *self) { hw_atl_a0_hw_irq_disable(self, HW_ATL_A0_INT_MASK); + return aq_hw_err_from_flags(self); } @@ -866,6 +873,7 @@ static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring) { hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx); + return aq_hw_err_from_flags(self); } @@ -873,6 +881,7 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring) { hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx); + return aq_hw_err_from_flags(self); } diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index c7297ca03624..58e891af6e09 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -43,7 +43,9 @@ NETIF_F_NTUPLE | \ NETIF_F_HW_VLAN_CTAG_FILTER | \ NETIF_F_HW_VLAN_CTAG_RX | \ - NETIF_F_HW_VLAN_CTAG_TX, \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_GSO_UDP_L4 | \ + NETIF_F_GSO_PARTIAL, \ .hw_priv_flags = IFF_UNICAST_FLT, \ .flow_control = true, \ .mtu = HW_ATL_B0_MTU_JUMBO, \ @@ -107,14 +109,15 @@ static int hw_atl_b0_hw_reset(struct aq_hw_s *self) static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc) { hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc); + return 0; } static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) { - u32 tc = 0U; - u32 buff_size = 0U; unsigned int i_priority = 0U; + u32 buff_size = 0U; + u32 tc = 0U; /* TPS Descriptor rate init */ hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); @@ -167,7 +170,7 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self) (1024U / 32U) * 50U) / 100U, tc); - hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc); + hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc); /* Init TC2 for PTP_RX */ tc = 2; @@ -188,9 +191,9 @@ static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self, struct aq_rss_parameters *rss_params) { struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; - int err = 0; - unsigned int i = 0U; unsigned int addr = 0U; + unsigned int i = 0U; + int err = 0; u32 val; for (i = 10, addr = 0U; i--; ++addr) { @@ -215,12 +218,12 @@ err_exit: static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self, struct aq_rss_parameters *rss_params) { - u8 *indirection_table = rss_params->indirection_table; - u32 i = 0U; u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues); - int err = 0; + u8 *indirection_table = rss_params->indirection_table; u16 bitary[1 + (HW_ATL_B0_RSS_REDIRECTION_MAX * HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)]; + int err = 0; + u32 i = 0U; u32 val; memset(bitary, 0, sizeof(bitary)); @@ -304,6 +307,7 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, hw_atl_itr_rsc_delay_set(self, 1U); } + return aq_hw_err_from_flags(self); } @@ -382,9 +386,9 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self) static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) { - int err = 0; unsigned int h = 0U; unsigned int l = 0U; + int err = 0; if (!mac_addr) { err = -EINVAL; @@ -413,11 +417,10 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr) [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U }, [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U }, }; - + struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg; int err = 0; u32 val; - struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg; hw_atl_b0_hw_init_tx_path(self); hw_atl_b0_hw_init_rx_path(self); @@ -460,8 +463,10 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr) /* Interrupts */ hw_atl_reg_gen_irq_map_set(self, - ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) | - ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U); + ((HW_ATL_B0_ERR_INT << 0x18) | + (1U << 0x1F)) | + ((HW_ATL_B0_ERR_INT << 0x10) | + (1U << 0x17)), 0U); /* Enable link interrupt */ if (aq_nic_cfg->link_irq_vec) @@ -478,6 +483,7 @@ static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, struct aq_ring_s *ring) { hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx); + return aq_hw_err_from_flags(self); } @@ -485,6 +491,7 @@ static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, struct aq_ring_s *ring) { hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx); + return aq_hw_err_from_flags(self); } @@ -492,6 +499,7 @@ static int hw_atl_b0_hw_start(struct aq_hw_s *self) { hw_atl_tpb_tx_buff_en_set(self, 1); hw_atl_rpb_rx_buff_en_set(self, 1); + return aq_hw_err_from_flags(self); } @@ -499,6 +507,7 @@ static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self, struct aq_ring_s *ring) { hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx); + return 0; } @@ -509,8 +518,8 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, struct aq_ring_buff_s *buff = NULL; struct hw_atl_txd_s *txd = NULL; unsigned int buff_pa_len = 0U; - unsigned int pkt_len = 0U; unsigned int frag_count = 0U; + unsigned int pkt_len = 0U; bool is_vlan = false; bool is_gso = false; @@ -526,8 +535,9 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, buff = &ring->buff_ring[ring->sw_tail]; - if (buff->is_gso) { - txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TCP; + if (buff->is_gso_tcp || buff->is_gso_udp) { + if (buff->is_gso_tcp) + txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TCP; txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC; txd->ctl |= (buff->len_l3 << 31) | (buff->len_l2 << 24); @@ -547,7 +557,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, txd->ctl |= buff->vlan_tx_tag << 4; is_vlan = true; } - if (!buff->is_gso && !buff->is_vlan) { + if (!buff->is_gso_tcp && !buff->is_gso_udp && !buff->is_vlan) { buff_pa_len = buff->len; txd->buf_addr = buff->pa; @@ -586,6 +596,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, } hw_atl_b0_hw_tx_ring_tail_update(self, ring); + return aq_hw_err_from_flags(self); } @@ -593,9 +604,9 @@ static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring, struct aq_ring_param_s *aq_ring_param) { - u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa; u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); u32 vlan_rx_stripping = self->aq_nic_cfg->is_vlan_rx_strip; + u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa; hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx); @@ -636,8 +647,8 @@ static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring, struct aq_ring_param_s *aq_ring_param) { - u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa; u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32); + u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa; hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr, aq_ring->idx); @@ -726,8 +737,10 @@ static int hw_atl_b0_hw_ring_hwts_rx_receive(struct aq_hw_s *self, static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self, struct aq_ring_s *ring) { + unsigned int hw_head_; int err = 0; - unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx); + + hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx); if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) { err = -ENXIO; @@ -843,6 +856,7 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask) { hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask)); + return aq_hw_err_from_flags(self); } @@ -852,12 +866,14 @@ static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask) hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask)); atomic_inc(&self->dpc); + return aq_hw_err_from_flags(self); } static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask) { *mask = hw_atl_itr_irq_statuslsw_get(self); + return aq_hw_err_from_flags(self); } @@ -866,8 +882,8 @@ static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask) static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self, unsigned int packet_filter) { - unsigned int i = 0U; struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; + unsigned int i = 0U; hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC)); @@ -905,29 +921,30 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self, u32 count) { int err = 0; + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) { err = -EBADRQC; goto err_exit; } - for (self->aq_nic_cfg->mc_list_count = 0U; - self->aq_nic_cfg->mc_list_count < count; - ++self->aq_nic_cfg->mc_list_count) { - u32 i = self->aq_nic_cfg->mc_list_count; + for (cfg->mc_list_count = 0U; + cfg->mc_list_count < count; + ++cfg->mc_list_count) { + u32 i = cfg->mc_list_count; u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]); u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | (ar_mac[i][4] << 8) | ar_mac[i][5]; hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i); - hw_atl_rpfl2unicast_dest_addresslsw_set(self, - l, HW_ATL_B0_MAC_MIN + i); + hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, + HW_ATL_B0_MAC_MIN + i); - hw_atl_rpfl2unicast_dest_addressmsw_set(self, - h, HW_ATL_B0_MAC_MIN + i); + hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, + HW_ATL_B0_MAC_MIN + i); hw_atl_rpfl2_uc_flr_en_set(self, - (self->aq_nic_cfg->is_mc_list_enabled), + (cfg->is_mc_list_enabled), HW_ATL_B0_MAC_MIN + i); } @@ -1054,6 +1071,7 @@ static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring) { hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx); + return aq_hw_err_from_flags(self); } @@ -1061,6 +1079,7 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring) { hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx); + return aq_hw_err_from_flags(self); } @@ -1427,6 +1446,31 @@ static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable) return aq_hw_err_from_flags(self); } +static int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable) +{ + switch (mode) { + case AQ_HW_LOOPBACK_DMA_SYS: + hw_atl_tpb_tx_dma_sys_lbk_en_set(self, enable); + hw_atl_rpb_dma_sys_lbk_set(self, enable); + break; + case AQ_HW_LOOPBACK_PKT_SYS: + hw_atl_tpo_tx_pkt_sys_lbk_en_set(self, enable); + hw_atl_rpf_tpo_to_rpf_sys_lbk_set(self, enable); + break; + case AQ_HW_LOOPBACK_DMA_NET: + hw_atl_rpf_vlan_prom_mode_en_set(self, enable); + hw_atl_rpfl2promiscuous_mode_en_set(self, enable); + hw_atl_tpb_tx_tx_clk_gate_en_set(self, !enable); + hw_atl_tpb_tx_dma_net_lbk_en_set(self, enable); + hw_atl_rpb_dma_net_lbk_set(self, enable); + break; + default: + return -EINVAL; + } + + return 0; +} + const struct aq_hw_ops hw_atl_ops_b0 = { .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set, .hw_init = hw_atl_b0_hw_init, @@ -1481,5 +1525,9 @@ const struct aq_hw_ops hw_atl_ops_b0 = { .rx_extract_ts = hw_atl_b0_rx_extract_ts, .extract_hwts = hw_atl_b0_extract_hwts, .hw_set_offload = hw_atl_b0_hw_offload_set, - .hw_set_fc = hw_atl_b0_set_fc, + .hw_get_hw_stats = hw_atl_utils_get_hw_stats, + .hw_get_fw_version = hw_atl_utils_get_fw_version, + .hw_set_offload = hw_atl_b0_hw_offload_set, + .hw_set_loopback = hw_atl_b0_set_loopback, + .hw_set_fc = hw_atl_b0_set_fc, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index 6cadc9054544..d1f68fc16291 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -563,6 +563,13 @@ void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk) HW_ATL_RPB_DMA_SYS_LBK_SHIFT, dma_sys_lbk); } +void hw_atl_rpb_dma_net_lbk_set(struct aq_hw_s *aq_hw, u32 dma_net_lbk) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_DMA_NET_LBK_ADR, + HW_ATL_RPB_DMA_NET_LBK_MSK, + HW_ATL_RPB_DMA_NET_LBK_SHIFT, dma_net_lbk); +} + void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw, u32 rx_traf_class_mode) { @@ -1341,7 +1348,26 @@ void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_ tx_dma_sys_lbk_en); } +void hw_atl_tpb_tx_dma_net_lbk_en_set(struct aq_hw_s *aq_hw, + u32 tx_dma_net_lbk_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_DMA_NET_LBK_ADR, + HW_ATL_TPB_DMA_NET_LBK_MSK, + HW_ATL_TPB_DMA_NET_LBK_SHIFT, + tx_dma_net_lbk_en); +} + +void hw_atl_tpb_tx_tx_clk_gate_en_set(struct aq_hw_s *aq_hw, + u32 tx_clk_gate_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_CLK_GATE_EN_ADR, + HW_ATL_TPB_TX_CLK_GATE_EN_MSK, + HW_ATL_TPB_TX_CLK_GATE_EN_SHIFT, + tx_clk_gate_en); +} + void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, + u32 tx_pkt_buff_size_per_tc, u32 buffer) { aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer), diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h index 5750b0c9cae7..62992b23c0e8 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h @@ -288,6 +288,9 @@ void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, /* set dma system loopback */ void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk); +/* set dma network loopback */ +void hw_atl_rpb_dma_net_lbk_set(struct aq_hw_s *aq_hw, u32 dma_net_lbk); + /* set rx traffic class mode */ void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw, u32 rx_traf_class_mode); @@ -629,6 +632,14 @@ void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw, /* set tx dma system loopback enable */ void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en); +/* set tx dma network loopback enable */ +void hw_atl_tpb_tx_dma_net_lbk_en_set(struct aq_hw_s *aq_hw, + u32 tx_dma_net_lbk_en); + +/* set tx clock gating enable */ +void hw_atl_tpb_tx_tx_clk_gate_en_set(struct aq_hw_s *aq_hw, + u32 tx_clk_gate_en); + /* set tx packet buffer size (per tc) */ void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw, u32 tx_pkt_buff_size_per_tc, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index ec3bcdcefc4d..18de2f7b8959 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -554,6 +554,24 @@ /* default value of bitfield dma_sys_loopback */ #define HW_ATL_RPB_DMA_SYS_LBK_DEFAULT 0x0 +/* rx dma_net_loopback bitfield definitions + * preprocessor definitions for the bitfield "dma_net_loopback". + * port="pif_rpb_dma_net_lbk_i" + */ + +/* register address for bitfield dma_net_loopback */ +#define HW_ATL_RPB_DMA_NET_LBK_ADR 0x00005000 +/* bitmask for bitfield dma_net_loopback */ +#define HW_ATL_RPB_DMA_NET_LBK_MSK 0x00000010 +/* inverted bitmask for bitfield dma_net_loopback */ +#define HW_ATL_RPB_DMA_NET_LBK_MSKN 0xffffffef +/* lower bit position of bitfield dma_net_loopback */ +#define HW_ATL_RPB_DMA_NET_LBK_SHIFT 4 +/* width of bitfield dma_net_loopback */ +#define HW_ATL_RPB_DMA_NET_LBK_WIDTH 1 +/* default value of bitfield dma_net_loopback */ +#define HW_ATL_RPB_DMA_NET_LBK_DEFAULT 0x0 + /* rx rx_tc_mode bitfield definitions * preprocessor definitions for the bitfield "rx_tc_mode". * port="pif_rpb_rx_tc_mode_i,pif_rpf_rx_tc_mode_i" @@ -2107,6 +2125,24 @@ /* default value of bitfield dma_sys_loopback */ #define HW_ATL_TPB_DMA_SYS_LBK_DEFAULT 0x0 +/* tx dma_net_loopback bitfield definitions + * preprocessor definitions for the bitfield "dma_net_loopback". + * port="pif_tpb_dma_net_lbk_i" + */ + +/* register address for bitfield dma_net_loopback */ +#define HW_ATL_TPB_DMA_NET_LBK_ADR 0x00007000 +/* bitmask for bitfield dma_net_loopback */ +#define HW_ATL_TPB_DMA_NET_LBK_MSK 0x00000010 +/* inverted bitmask for bitfield dma_net_loopback */ +#define HW_ATL_TPB_DMA_NET_LBK_MSKN 0xffffffef +/* lower bit position of bitfield dma_net_loopback */ +#define HW_ATL_TPB_DMA_NET_LBK_SHIFT 4 +/* width of bitfield dma_net_loopback */ +#define HW_ATL_TPB_DMA_NET_LBK_WIDTH 1 +/* default value of bitfield dma_net_loopback */ +#define HW_ATL_TPB_DMA_NET_LBK_DEFAULT 0x0 + /* tx tx{b}_buf_size[7:0] bitfield definitions * preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]". * parameter: buffer {b} | stride size 0x10 | range [0, 7] @@ -2144,6 +2180,24 @@ /* default value of bitfield tx_scp_ins_en */ #define HW_ATL_TPB_TX_SCP_INS_EN_DEFAULT 0x0 +/* tx tx_clk_gate_en bitfield definitions + * preprocessor definitions for the bitfield "tx_clk_gate_en". + * port="pif_tpb_clk_gate_en_i" + */ + +/* register address for bitfield tx_clk_gate_en */ +#define HW_ATL_TPB_TX_CLK_GATE_EN_ADR 0x00007900 +/* bitmask for bitfield tx_clk_gate_en */ +#define HW_ATL_TPB_TX_CLK_GATE_EN_MSK 0x00000010 +/* inverted bitmask for bitfield tx_clk_gate_en */ +#define HW_ATL_TPB_TX_CLK_GATE_EN_MSKN 0xffffffef +/* lower bit position of bitfield tx_clk_gate_en */ +#define HW_ATL_TPB_TX_CLK_GATE_EN_SHIFT 4 +/* width of bitfield tx_clk_gate_en */ +#define HW_ATL_TPB_TX_CLK_GATE_EN_WIDTH 1 +/* default value of bitfield tx_clk_gate_en */ +#define HW_ATL_TPB_TX_CLK_GATE_EN_DEFAULT 0x1 + /* tx ipv4_chk_en bitfield definitions * preprocessor definitions for the bitfield "ipv4_chk_en". * port="pif_tpo_ipv4_chk_en_i" diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 6fc5640065bd..8910b62e67ed 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -47,6 +47,11 @@ #define FORCE_FLASHLESS 0 +enum mcp_area { + MCP_AREA_CONFIG = 0x80000000, + MCP_AREA_SETTINGS = 0x20000000, +}; + static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual); static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, @@ -87,6 +92,7 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops) } self->aq_fw_ops = *fw_ops; err = self->aq_fw_ops->init(self); + return err; } @@ -237,9 +243,9 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self) int hw_atl_utils_soft_reset(struct aq_hw_s *self) { - int k; u32 boot_exit_code = 0; u32 val; + int k; for (k = 0; k < 1000; ++k) { u32 flb_status = aq_hw_read_reg(self, @@ -327,10 +333,75 @@ err_exit: return err; } -int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt) +static int hw_atl_utils_write_b1_mbox(struct aq_hw_s *self, u32 addr, + u32 *p, u32 cnt, enum mcp_area area) { + u32 data_offset = 0; + u32 offset = addr; + int err = 0; u32 val; + + switch (area) { + case MCP_AREA_CONFIG: + offset -= self->rpc_addr; + break; + + case MCP_AREA_SETTINGS: + offset -= self->settings_addr; + break; + } + + offset = offset / sizeof(u32); + + for (; data_offset < cnt; ++data_offset, ++offset) { + aq_hw_write_reg(self, 0x328, p[data_offset]); + aq_hw_write_reg(self, 0x32C, + (area | (0xFFFF & (offset * 4)))); + hw_atl_mcp_up_force_intr_set(self, 1); + /* 1000 times by 10us = 10ms */ + err = readx_poll_timeout_atomic(hw_atl_scrpad12_get, + self, val, + (val & 0xF0000000) != + area, + 10U, 10000U); + + if (err < 0) + break; + } + + return err; +} + +static int hw_atl_utils_write_b0_mbox(struct aq_hw_s *self, u32 addr, + u32 *p, u32 cnt) +{ + u32 offset = 0; int err = 0; + u32 val; + + aq_hw_write_reg(self, 0x208, addr); + + for (; offset < cnt; ++offset) { + aq_hw_write_reg(self, 0x20C, p[offset]); + aq_hw_write_reg(self, 0x200, 0xC000); + + err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get, + self, val, + (val & 0x100) == 0U, + 10U, 10000U); + + if (err < 0) + break; + } + + return err; +} + +static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 addr, u32 *p, + u32 cnt, enum mcp_area area) +{ + int err = 0; + u32 val; err = readx_poll_timeout_atomic(hw_atl_sem_ram_get, self, val, val == 1U, @@ -338,54 +409,47 @@ int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt) if (err < 0) goto err_exit; - if (IS_CHIP_FEATURE(REVISION_B1)) { - u32 offset = 0; - - for (; offset < cnt; ++offset) { - aq_hw_write_reg(self, 0x328, p[offset]); - aq_hw_write_reg(self, 0x32C, - (0x80000000 | (0xFFFF & (offset * 4)))); - hw_atl_mcp_up_force_intr_set(self, 1); - /* 1000 times by 10us = 10ms */ - err = readx_poll_timeout_atomic(hw_atl_scrpad12_get, - self, val, - (val & 0xF0000000) != - 0x80000000, - 10U, 10000U); - } - } else { - u32 offset = 0; - - aq_hw_write_reg(self, 0x208, a); + if (IS_CHIP_FEATURE(REVISION_B1)) + err = hw_atl_utils_write_b1_mbox(self, addr, p, cnt, area); + else + err = hw_atl_utils_write_b0_mbox(self, addr, p, cnt); - for (; offset < cnt; ++offset) { - aq_hw_write_reg(self, 0x20C, p[offset]); - aq_hw_write_reg(self, 0x200, 0xC000); + hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); - err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get, - self, val, - (val & 0x100) == 0, - 1000U, 10000U); - } - } + if (err < 0) + goto err_exit; - hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); + err = aq_hw_err_from_flags(self); err_exit: return err; } +int hw_atl_write_fwcfg_dwords(struct aq_hw_s *self, u32 *p, u32 cnt) +{ + return hw_atl_utils_fw_upload_dwords(self, self->rpc_addr, p, + cnt, MCP_AREA_CONFIG); +} + +int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p, + u32 cnt) +{ + return hw_atl_utils_fw_upload_dwords(self, self->settings_addr + offset, + p, cnt, MCP_AREA_SETTINGS); +} + static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual) { - int err = 0; const u32 dw_major_mask = 0xff000000U; const u32 dw_minor_mask = 0x00ffffffU; + int err = 0; err = (dw_major_mask & (ver_expected ^ ver_actual)) ? -EOPNOTSUPP : 0; if (err < 0) goto err_exit; err = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ? -EOPNOTSUPP : 0; + err_exit: return err; } @@ -430,17 +494,16 @@ struct aq_hw_atl_utils_fw_rpc_tid_s { int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size) { - int err = 0; struct aq_hw_atl_utils_fw_rpc_tid_s sw; + int err = 0; if (!IS_CHIP_FEATURE(MIPS)) { err = -1; goto err_exit; } - err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr, - (u32 *)(void *)&self->rpc, - (rpc_size + sizeof(u32) - - sizeof(u8)) / sizeof(u32)); + err = hw_atl_write_fwcfg_dwords(self, (u32 *)(void *)&self->rpc, + (rpc_size + sizeof(u32) - + sizeof(u8)) / sizeof(u32)); if (err < 0) goto err_exit; @@ -455,9 +518,9 @@ err_exit: int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, struct hw_atl_utils_fw_rpc **rpc) { - int err = 0; struct aq_hw_atl_utils_fw_rpc_tid_s sw; struct aq_hw_atl_utils_fw_rpc_tid_s fw; + int err = 0; do { sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR); @@ -561,10 +624,10 @@ static int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed) static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state) { - int err = 0; - u32 transaction_id = 0; - struct hw_atl_utils_mbox_header mbox; u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR); + struct hw_atl_utils_mbox_header mbox; + u32 transaction_id = 0; + int err = 0; if (state == MPI_RESET) { hw_atl_utils_mpi_read_mbox(self, &mbox); @@ -592,20 +655,26 @@ static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, val |= state & HW_ATL_MPI_STATE_MSK; aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val); + err_exit: return err; } int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self) { - u32 cp0x036C = hw_atl_utils_mpi_get_state(self); - u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT; struct aq_hw_link_status_s *link_status = &self->aq_link_status; + u32 mpi_state; + u32 speed; - if (!link_speed_mask) { + mpi_state = hw_atl_utils_mpi_get_state(self); + speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G | + FW2X_RATE_2G5 | FW2X_RATE_5G | + FW2X_RATE_10G); + + if (!speed) { link_status->mbps = 0U; } else { - switch (link_speed_mask) { + switch (speed) { case HAL_ATLANTIC_RATE_10G: link_status->mbps = 10000U; break; @@ -638,14 +707,15 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self) int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, u8 *mac) { + u32 mac_addr[2]; + u32 efuse_addr; int err = 0; u32 h = 0U; u32 l = 0U; - u32 mac_addr[2]; if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) { - unsigned int rnd = 0; unsigned int ucp_0x370 = 0; + unsigned int rnd = 0; get_random_bytes(&rnd, sizeof(unsigned int)); @@ -653,11 +723,10 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370); } - err = hw_atl_utils_fw_downld_dwords(self, - aq_hw_read_reg(self, 0x00000374U) + - (40U * 4U), - mac_addr, - ARRAY_SIZE(mac_addr)); + efuse_addr = aq_hw_read_reg(self, 0x00000374U); + + err = hw_atl_utils_fw_downld_dwords(self, efuse_addr + (40U * 4U), + mac_addr, ARRAY_SIZE(mac_addr)); if (err < 0) { mac_addr[0] = 0U; mac_addr[1] = 0U; @@ -719,14 +788,15 @@ unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps) default: break; } + return ret; } void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p) { - u32 chip_features = 0U; u32 val = hw_atl_reg_glb_mif_id_get(self); u32 mif_rev = val & 0xFFU; + u32 chip_features = 0U; if ((0xFU & mif_rev) == 1U) { chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 | @@ -753,13 +823,14 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self) { hw_atl_utils_mpi_set_speed(self, 0); hw_atl_utils_mpi_set_state(self, MPI_DEINIT); + return 0; } int hw_atl_utils_update_stats(struct aq_hw_s *self) { - struct hw_atl_utils_mbox mbox; struct aq_stats_s *cs = &self->curr_stats; + struct hw_atl_utils_mbox mbox; hw_atl_utils_mpi_read_stats(self, &mbox); @@ -836,16 +907,19 @@ int hw_atl_utils_hw_get_regs(struct aq_hw_s *self, for (i = 0; i < aq_hw_caps->mac_regs_count; i++) regs_buff[i] = aq_hw_read_reg(self, hw_atl_utils_hw_mac_regs[i]); + return 0; } int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version) { *fw_version = aq_hw_read_reg(self, 0x18U); + return 0; } -static int aq_fw1x_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac) +static int aq_fw1x_set_wake_magic(struct aq_hw_s *self, bool wol_enabled, + u8 *mac) { struct hw_atl_utils_fw_rpc *prpc = NULL; unsigned int rpc_size = 0U; @@ -858,22 +932,26 @@ static int aq_fw1x_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac) memset(prpc, 0, sizeof(*prpc)); if (wol_enabled) { - rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_wol); + rpc_size = offsetof(struct hw_atl_utils_fw_rpc, msg_wol_add) + + sizeof(prpc->msg_wol_add); + prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD; - prpc->msg_wol.priority = + prpc->msg_wol_add.priority = HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR; - prpc->msg_wol.pattern_id = + prpc->msg_wol_add.pattern_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN; - prpc->msg_wol.wol_packet_type = + prpc->msg_wol_add.packet_type = HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT; - ether_addr_copy((u8 *)&prpc->msg_wol.wol_pattern, mac); + ether_addr_copy((u8 *)&prpc->msg_wol_add.magic_packet_pattern, + mac); } else { - rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_del_id); + rpc_size = sizeof(prpc->msg_wol_remove) + + offsetof(struct hw_atl_utils_fw_rpc, msg_wol_remove); prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL; - prpc->msg_wol.pattern_id = + prpc->msg_wol_add.pattern_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN; } @@ -890,8 +968,8 @@ static int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state, unsigned int rpc_size = 0U; int err = 0; - if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) { - err = aq_fw1x_set_wol(self, 1, mac); + if (self->aq_nic_cfg->wol & WAKE_MAGIC) { + err = aq_fw1x_set_wake_magic(self, 1, mac); if (err < 0) goto err_exit; @@ -965,4 +1043,5 @@ const struct aq_fw_ops aq_fw_1x_ops = { .set_flow_control = NULL, .send_fw_request = NULL, .enable_ptp = NULL, + .led_control = NULL, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index ee11b107f0a5..42f0c5c6ec2d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -70,104 +70,41 @@ struct __packed hw_atl_stats_s { u32 dpc; }; -union __packed ip_addr { - struct { - u8 addr[16]; - } v6; - struct { - u8 padding[12]; - u8 addr[4]; - } v4; -}; - -struct __packed hw_atl_utils_fw_rpc { - u32 msg_id; - +struct __packed drv_msg_enable_wakeup { union { - struct { - u32 pong; - } msg_ping; + u32 pattern_mask; struct { - u8 mac_addr[6]; - u32 ip_addr_cnt; + u32 reason_arp_v4_pkt : 1; + u32 reason_ipv4_ping_pkt : 1; + u32 reason_ipv6_ns_pkt : 1; + u32 reason_ipv6_ping_pkt : 1; + u32 reason_link_up : 1; + u32 reason_link_down : 1; + u32 reason_maximum : 1; + }; + }; - struct { - union ip_addr addr; - union ip_addr mask; - } ip[1]; - } msg_arp; + union { + u32 offload_mask; + }; +}; - struct { - u32 len; - u8 packet[1514U]; - } msg_inject; +struct __packed magic_packet_pattern_s { + u8 mac_addr[ETH_ALEN]; +}; - struct { - u32 priority; - u32 wol_packet_type; - u32 pattern_id; - u32 next_wol_pattern_offset; - - union { - struct { - u32 flags; - u8 ipv4_source_address[4]; - u8 ipv4_dest_address[4]; - u16 tcp_source_port_number; - u16 tcp_dest_port_number; - } ipv4_tcp_syn_parameters; - - struct { - u32 flags; - u8 ipv6_source_address[16]; - u8 ipv6_dest_address[16]; - u16 tcp_source_port_number; - u16 tcp_dest_port_number; - } ipv6_tcp_syn_parameters; - - struct { - u32 flags; - } eapol_request_id_message_parameters; - - struct { - u32 flags; - u32 mask_offset; - u32 mask_size; - u32 pattern_offset; - u32 pattern_size; - } wol_bit_map_pattern; - - struct { - u8 mac_addr[ETH_ALEN]; - } wol_magic_packet_patter; - } wol_pattern; - } msg_wol; +struct __packed drv_msg_wol_add { + u32 priority; + u32 packet_type; + u32 pattern_id; + u32 next_pattern_offset; - struct { - union { - u32 pattern_mask; - - struct { - u32 reason_arp_v4_pkt : 1; - u32 reason_ipv4_ping_pkt : 1; - u32 reason_ipv6_ns_pkt : 1; - u32 reason_ipv6_ping_pkt : 1; - u32 reason_link_up : 1; - u32 reason_link_down : 1; - u32 reason_maximum : 1; - }; - }; - - union { - u32 offload_mask; - }; - } msg_enable_wakeup; + struct magic_packet_pattern_s magic_packet_pattern; +}; - struct { - u32 id; - } msg_del_id; - }; +struct __packed drv_msg_wol_remove { + u32 id; }; struct __packed hw_atl_utils_mbox_header { @@ -176,7 +113,7 @@ struct __packed hw_atl_utils_mbox_header { u32 error; }; -struct __packed hw_aq_ptp_offset { +struct __packed hw_atl_ptp_offset { u16 ingress_100; u16 egress_100; u16 ingress_1000; @@ -189,6 +126,13 @@ struct __packed hw_aq_ptp_offset { u16 egress_10000; }; +struct __packed hw_atl_cable_diag { + u8 fault; + u8 distance; + u8 far_distance; + u8 reserved; +}; + enum gpio_pin_function { GPIO_PIN_FUNCTION_NC, GPIO_PIN_FUNCTION_VAUX_ENABLE, @@ -204,14 +148,14 @@ enum gpio_pin_function { GPIO_PIN_FUNCTION_SIZE }; -struct __packed hw_aq_info { +struct __packed hw_atl_info { u8 reserved[6]; u16 phy_fault_code; u16 phy_temperature; u8 cable_len; u8 reserved1; - u32 cable_diag_data[4]; - struct hw_aq_ptp_offset ptp_offset; + struct hw_atl_cable_diag cable_diag_data[4]; + struct hw_atl_ptp_offset ptp_offset; u8 reserved2[12]; u32 caps_lo; u32 caps_hi; @@ -233,28 +177,25 @@ struct __packed hw_aq_info { struct __packed hw_atl_utils_mbox { struct hw_atl_utils_mbox_header header; struct hw_atl_stats_s stats; - struct hw_aq_info info; + struct hw_atl_info info; }; -/* fw2x */ -typedef u32 fw_offset_t; - struct __packed offload_ip_info { u8 v4_local_addr_count; u8 v4_addr_count; u8 v6_local_addr_count; u8 v6_addr_count; - fw_offset_t v4_addr; - fw_offset_t v4_prefix; - fw_offset_t v6_addr; - fw_offset_t v6_prefix; + u32 v4_addr; + u32 v4_prefix; + u32 v6_addr; + u32 v6_prefix; }; struct __packed offload_port_info { u16 udp_port_count; u16 tcp_port_count; - fw_offset_t udp_port; - fw_offset_t tcp_port; + u32 udp_port; + u32 tcp_port; }; struct __packed offload_ka_info { @@ -262,15 +203,15 @@ struct __packed offload_ka_info { u16 v6_ka_count; u32 retry_count; u32 retry_interval; - fw_offset_t v4_ka; - fw_offset_t v6_ka; + u32 v4_ka; + u32 v6_ka; }; struct __packed offload_rr_info { u32 rr_count; u32 rr_buf_len; - fw_offset_t rr_id_x; - fw_offset_t rr_buf; + u32 rr_id_x; + u32 rr_buf; }; struct __packed offload_info { @@ -287,6 +228,19 @@ struct __packed offload_info { u8 buf[0]; }; +struct __packed hw_atl_utils_fw_rpc { + u32 msg_id; + + union { + /* fw1x structures */ + struct drv_msg_wol_add msg_wol_add; + struct drv_msg_wol_remove msg_wol_remove; + struct drv_msg_enable_wakeup msg_enable_wakeup; + /* fw2x structures */ + struct offload_info fw2x_offloads; + }; +}; + /* Mailbox FW Request interface */ struct __packed hw_fw_request_ptp_gpio_ctrl { u32 index; @@ -323,9 +277,54 @@ struct __packed hw_fw_request_iface { }; }; +struct __packed hw_atl_utils_settings { + u32 mtu; + u32 downshift_retry_count; + u32 link_pause_frame_quanta_100m; + u32 link_pause_frame_threshold_100m; + u32 link_pause_frame_quanta_1g; + u32 link_pause_frame_threshold_1g; + u32 link_pause_frame_quanta_2p5g; + u32 link_pause_frame_threshold_2p5g; + u32 link_pause_frame_quanta_5g; + u32 link_pause_frame_threshold_5g; + u32 link_pause_frame_quanta_10g; + u32 link_pause_frame_threshold_10g; + u32 pfc_quanta_class_0; + u32 pfc_threshold_class_0; + u32 pfc_quanta_class_1; + u32 pfc_threshold_class_1; + u32 pfc_quanta_class_2; + u32 pfc_threshold_class_2; + u32 pfc_quanta_class_3; + u32 pfc_threshold_class_3; + u32 pfc_quanta_class_4; + u32 pfc_threshold_class_4; + u32 pfc_quanta_class_5; + u32 pfc_threshold_class_5; + u32 pfc_quanta_class_6; + u32 pfc_threshold_class_6; + u32 pfc_quanta_class_7; + u32 pfc_threshold_class_7; + u32 eee_link_down_timeout; + u32 eee_link_up_timeout; + u32 eee_max_link_drops; + u32 eee_rates_mask; + u32 wake_timer; + u32 thermal_shutdown_off_temp; + u32 thermal_shutdown_warning_temp; + u32 thermal_shutdown_cold_temp; + u32 msm_options; + u32 dac_cable_serdes_modes; + u32 media_detect; +}; + enum hw_atl_rx_action_with_traffic { HW_ATL_RX_DISCARD, HW_ATL_RX_HOST, + HW_ATL_RX_MNGMNT, + HW_ATL_RX_HOST_AND_MNGMNT, + HW_ATL_RX_WOL }; struct aq_rx_filter_vlan { @@ -407,20 +406,12 @@ enum hal_atl_utils_fw_state_e { #define HAL_ATLANTIC_RATE_100M BIT(5) #define HAL_ATLANTIC_RATE_INVALID BIT(6) -#define HAL_ATLANTIC_UTILS_FW_MSG_PING 0x1U -#define HAL_ATLANTIC_UTILS_FW_MSG_ARP 0x2U -#define HAL_ATLANTIC_UTILS_FW_MSG_INJECT 0x3U #define HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD 0x4U #define HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR 0x10000000U #define HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN 0x1U #define HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT 0x2U #define HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL 0x5U #define HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP 0x6U -#define HAL_ATLANTIC_UTILS_FW_MSG_MSM_PFC 0x7U -#define HAL_ATLANTIC_UTILS_FW_MSG_PROVISIONING 0x8U -#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_ADD 0x9U -#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_DEL 0xAU -#define HAL_ATLANTIC_UTILS_FW_MSG_CABLE_DIAG 0xDU enum hw_atl_fw2x_rate { FW2X_RATE_100M = 0x20, @@ -605,7 +596,10 @@ struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self); int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt); -int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt); +int hw_atl_write_fwcfg_dwords(struct aq_hw_s *self, u32 *p, u32 cnt); + +int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p, + u32 cnt); int hw_atl_utils_fw_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index f649ac949d06..97ebf849695f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -17,6 +17,7 @@ #include "hw_atl_utils.h" #include "hw_atl_llh.h" +#define HW_ATL_FW2X_MPI_LED_ADDR 0x31c #define HW_ATL_FW2X_MPI_RPC_ADDR 0x334 #define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360 @@ -34,12 +35,16 @@ #define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY) #define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL) +#define HW_ATL_FW2X_CTRL_WAKE_ON_LINK BIT(CTRL_WAKE_ON_LINK) #define HW_ATL_FW2X_CTRL_SLEEP_PROXY BIT(CTRL_SLEEP_PROXY) #define HW_ATL_FW2X_CTRL_WOL BIT(CTRL_WOL) #define HW_ATL_FW2X_CTRL_LINK_DROP BIT(CTRL_LINK_DROP) #define HW_ATL_FW2X_CTRL_PAUSE BIT(CTRL_PAUSE) #define HW_ATL_FW2X_CTRL_TEMPERATURE BIT(CTRL_TEMPERATURE) #define HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE BIT(CTRL_ASYMMETRIC_PAUSE) +#define HW_ATL_FW2X_CTRL_INT_LOOPBACK BIT(CTRL_INT_LOOPBACK) +#define HW_ATL_FW2X_CTRL_EXT_LOOPBACK BIT(CTRL_EXT_LOOPBACK) +#define HW_ATL_FW2X_CTRL_DOWNSHIFT BIT(CTRL_DOWNSHIFT) #define HW_ATL_FW2X_CTRL_FORCE_RECONNECT BIT(CTRL_FORCE_RECONNECT) #define HW_ATL_FW2X_CAP_EEE_1G_MASK BIT(CAPS_HI_1000BASET_FD_EEE) @@ -50,6 +55,9 @@ #define HAL_ATLANTIC_WOL_FILTERS_COUNT 8 #define HAL_ATLANTIC_UTILS_FW2X_MSG_WOL 0x0E +#define HW_ATL_FW_VER_LED 0x03010026U +#define HW_ATL_FW_VER_MEDIA_CONTROL 0x0301005aU + struct __packed fw2x_msg_wol_pattern { u8 mask[16]; u32 crc; @@ -74,6 +82,7 @@ static int aq_fw2x_set_state(struct aq_hw_s *self, static u32 aq_fw2x_mbox_get(struct aq_hw_s *self); static u32 aq_fw2x_rpc_get(struct aq_hw_s *self); +static int aq_fw2x_settings_get(struct aq_hw_s *self, u32 *addr); static u32 aq_fw2x_state2_get(struct aq_hw_s *self); static int aq_fw2x_init(struct aq_hw_s *self) @@ -91,6 +100,8 @@ static int aq_fw2x_init(struct aq_hw_s *self) self->rpc_addr != 0U, 1000U, 100000U); + err = aq_fw2x_settings_get(self, &self->settings_addr); + return err; } @@ -170,17 +181,26 @@ static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed) return 0; } -static void aq_fw2x_set_mpi_flow_control(struct aq_hw_s *self, u32 *mpi_state) +static void aq_fw2x_upd_flow_control_bits(struct aq_hw_s *self, + u32 *mpi_state, u32 fc) { - if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_RX) - *mpi_state |= BIT(CAPS_HI_PAUSE); - else - *mpi_state &= ~BIT(CAPS_HI_PAUSE); + *mpi_state &= ~(HW_ATL_FW2X_CTRL_PAUSE | + HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE); - if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_TX) - *mpi_state |= BIT(CAPS_HI_ASYMMETRIC_PAUSE); - else - *mpi_state &= ~BIT(CAPS_HI_ASYMMETRIC_PAUSE); + switch (fc) { + /* There is not explicit mode of RX only pause frames, + * thus, we join this mode with FC full. + * FC full is either Rx, either Tx, or both. + */ + case AQ_NIC_FC_FULL: + case AQ_NIC_FC_RX: + *mpi_state |= HW_ATL_FW2X_CTRL_PAUSE | + HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE; + break; + case AQ_NIC_FC_TX: + *mpi_state |= HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE; + break; + } } static void aq_fw2x_upd_eee_rate_bits(struct aq_hw_s *self, u32 *mpi_opts, @@ -204,7 +224,8 @@ static int aq_fw2x_set_state(struct aq_hw_s *self, case MPI_INIT: mpi_state &= ~BIT(CAPS_HI_LINK_DROP); aq_fw2x_upd_eee_rate_bits(self, &mpi_state, cfg->eee_speeds); - aq_fw2x_set_mpi_flow_control(self, &mpi_state); + aq_fw2x_upd_flow_control_bits(self, &mpi_state, + self->aq_nic_cfg->fc.req); break; case MPI_DEINIT: mpi_state |= BIT(CAPS_HI_LINK_DROP); @@ -215,15 +236,20 @@ static int aq_fw2x_set_state(struct aq_hw_s *self, break; } aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state); + return 0; } static int aq_fw2x_update_link_status(struct aq_hw_s *self) { - u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR); - u32 speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G | - FW2X_RATE_2G5 | FW2X_RATE_5G | FW2X_RATE_10G); struct aq_hw_link_status_s *link_status = &self->aq_link_status; + u32 mpi_state; + u32 speed; + + mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR); + speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G | + FW2X_RATE_2G5 | FW2X_RATE_5G | + FW2X_RATE_10G); if (speed) { if (speed & FW2X_RATE_10G) @@ -247,11 +273,11 @@ static int aq_fw2x_update_link_status(struct aq_hw_s *self) static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac) { + u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR); + u32 mac_addr[2] = { 0 }; int err = 0; u32 h = 0U; u32 l = 0U; - u32 mac_addr[2] = { 0 }; - u32 efuse_addr = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_EFUSE_ADDR); if (efuse_addr != 0) { err = hw_atl_utils_fw_downld_dwords(self, @@ -285,15 +311,16 @@ static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac) h >>= 8; mac[0] = (u8)(0xFFU & h); } + return err; } static int aq_fw2x_update_stats(struct aq_hw_s *self) { - int err = 0; u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); u32 orig_stats_val = mpi_opts & BIT(CAPS_HI_STATISTICS); u32 stats_val; + int err = 0; /* Toggle statistics bit for FW to update */ mpi_opts = mpi_opts ^ BIT(CAPS_HI_STATISTICS); @@ -320,9 +347,9 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp) int err = 0; u32 val; - phy_temp_offset = self->mbox_addr + - offsetof(struct hw_atl_utils_mbox, info) + - offsetof(struct hw_aq_info, phy_temperature); + phy_temp_offset = self->mbox_addr + offsetof(struct hw_atl_utils_mbox, + info.phy_temperature); + /* Toggle statistics bit for FW to 0x36C.18 (CTRL_TEMPERATURE) */ mpi_opts = mpi_opts ^ HW_ATL_FW2X_CTRL_TEMPERATURE; aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); @@ -345,87 +372,46 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp) return 0; } -static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac) +static int aq_fw2x_set_wol(struct aq_hw_s *self, u8 *mac) { struct hw_atl_utils_fw_rpc *rpc = NULL; - struct offload_info *cfg = NULL; - unsigned int rpc_size = 0U; - u32 mpi_opts; + struct offload_info *info = NULL; + u32 wol_bits = 0; + u32 rpc_size; int err = 0; u32 val; - rpc_size = sizeof(rpc->msg_id) + sizeof(*cfg); - - err = hw_atl_utils_fw_rpc_wait(self, &rpc); - if (err < 0) - goto err_exit; - - memset(rpc, 0, rpc_size); - cfg = (struct offload_info *)(&rpc->msg_id + 1); - - memcpy(cfg->mac_addr, mac, ETH_ALEN); - cfg->len = sizeof(*cfg); - - /* Clear bit 0x36C.23 and 0x36C.22 */ - mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); - mpi_opts &= ~HW_ATL_FW2X_CTRL_SLEEP_PROXY; - mpi_opts &= ~HW_ATL_FW2X_CTRL_LINK_DROP; - - aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); - - err = hw_atl_utils_fw_rpc_call(self, rpc_size); - if (err < 0) - goto err_exit; - - /* Set bit 0x36C.23 */ - mpi_opts |= HW_ATL_FW2X_CTRL_SLEEP_PROXY; - aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); - - err = readx_poll_timeout_atomic(aq_fw2x_state2_get, - self, val, - val & HW_ATL_FW2X_CTRL_SLEEP_PROXY, - 1U, 100000U); - -err_exit: - return err; -} - -static int aq_fw2x_set_wol_params(struct aq_hw_s *self, u8 *mac) -{ - struct hw_atl_utils_fw_rpc *rpc = NULL; - struct fw2x_msg_wol *msg = NULL; - u32 mpi_opts; - int err = 0; - u32 val; - - err = hw_atl_utils_fw_rpc_wait(self, &rpc); - if (err < 0) - goto err_exit; - - msg = (struct fw2x_msg_wol *)rpc; - - memset(msg, 0, sizeof(*msg)); - - msg->msg_id = HAL_ATLANTIC_UTILS_FW2X_MSG_WOL; - msg->magic_packet_enabled = true; - memcpy(msg->hw_addr, mac, ETH_ALEN); + if (self->aq_nic_cfg->wol & WAKE_PHY) { + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, + HW_ATL_FW2X_CTRL_LINK_DROP); + readx_poll_timeout_atomic(aq_fw2x_state2_get, self, val, + (val & + HW_ATL_FW2X_CTRL_LINK_DROP) != 0, + 1000, 100000); + wol_bits |= HW_ATL_FW2X_CTRL_WAKE_ON_LINK; + } - mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); - mpi_opts &= ~(HW_ATL_FW2X_CTRL_SLEEP_PROXY | HW_ATL_FW2X_CTRL_WOL); + if (self->aq_nic_cfg->wol & WAKE_MAGIC) { + wol_bits |= HW_ATL_FW2X_CTRL_SLEEP_PROXY | + HW_ATL_FW2X_CTRL_WOL; - aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + err = hw_atl_utils_fw_rpc_wait(self, &rpc); + if (err < 0) + goto err_exit; - err = hw_atl_utils_fw_rpc_call(self, sizeof(*msg)); - if (err < 0) - goto err_exit; + rpc_size = sizeof(*info) + + offsetof(struct hw_atl_utils_fw_rpc, fw2x_offloads); + memset(rpc, 0, rpc_size); + info = &rpc->fw2x_offloads; + memcpy(info->mac_addr, mac, ETH_ALEN); + info->len = sizeof(*info); - /* Set bit 0x36C.24 */ - mpi_opts |= HW_ATL_FW2X_CTRL_WOL; - aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + err = hw_atl_utils_fw_rpc_call(self, rpc_size); + if (err < 0) + goto err_exit; + } - err = readx_poll_timeout_atomic(aq_fw2x_state2_get, - self, val, val & HW_ATL_FW2X_CTRL_WOL, - 1U, 10000U); + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, wol_bits); err_exit: return err; @@ -436,14 +422,9 @@ static int aq_fw2x_set_power(struct aq_hw_s *self, unsigned int power_state, { int err = 0; - if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) { - err = aq_fw2x_set_sleep_proxy(self, mac); - if (err < 0) - goto err_exit; - err = aq_fw2x_set_wol_params(self, mac); - } + if (self->aq_nic_cfg->wol) + err = aq_fw2x_set_wol(self, mac); -err_exit: return err; } @@ -460,8 +441,7 @@ static int aq_fw2x_send_fw_request(struct aq_hw_s *self, dword_cnt = size / sizeof(u32); if (size % sizeof(u32)) dword_cnt++; - err = hw_atl_utils_fw_upload_dwords(self, aq_fw2x_rpc_get(self), - (void *)fw_req, dword_cnt); + err = hw_atl_write_fwcfg_dwords(self, (void *)fw_req, dword_cnt); if (err < 0) goto err_exit; @@ -495,6 +475,16 @@ static void aq_fw3x_enable_ptp(struct aq_hw_s *self, int enable) aq_hw_write_reg(self, HW_ATL_FW3X_EXT_CONTROL_ADDR, ptp_opts); } +static int aq_fw2x_led_control(struct aq_hw_s *self, u32 mode) +{ + if (self->fw_ver_actual < HW_ATL_FW_VER_LED) + return -EOPNOTSUPP; + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_LED_ADDR, mode); + + return 0; +} + static int aq_fw2x_set_eee_rate(struct aq_hw_s *self, u32 speed) { u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); @@ -512,11 +502,12 @@ static int aq_fw2x_get_eee_rate(struct aq_hw_s *self, u32 *rate, u32 mpi_state; u32 caps_hi; int err = 0; - u32 addr = self->mbox_addr + offsetof(struct hw_atl_utils_mbox, info) + - offsetof(struct hw_aq_info, caps_hi); + u32 offset; + + offset = self->mbox_addr + offsetof(struct hw_atl_utils_mbox, + info.caps_hi); - err = hw_atl_utils_fw_downld_dwords(self, addr, &caps_hi, - sizeof(caps_hi) / sizeof(u32)); + err = hw_atl_utils_fw_downld_dwords(self, offset, &caps_hi, 1); if (err) return err; @@ -544,7 +535,8 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self) { u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); - aq_fw2x_set_mpi_flow_control(self, &mpi_state); + aq_fw2x_upd_flow_control_bits(self, &mpi_state, + self->aq_nic_cfg->fc.req); aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state); @@ -554,17 +546,41 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self) static u32 aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fcmode) { u32 mpi_state = aq_fw2x_state2_get(self); + *fcmode = 0; if (mpi_state & HW_ATL_FW2X_CAP_PAUSE) - if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE) - *fcmode = AQ_NIC_FC_RX; + *fcmode |= AQ_NIC_FC_RX; + + if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE) + *fcmode |= AQ_NIC_FC_TX; + + return 0; +} + +static int aq_fw2x_set_phyloopback(struct aq_hw_s *self, u32 mode, bool enable) +{ + u32 mpi_opts; + + switch (mode) { + case AQ_HW_LOOPBACK_PHYINT_SYS: + mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + if (enable) + mpi_opts |= HW_ATL_FW2X_CTRL_INT_LOOPBACK; else - *fcmode = AQ_NIC_FC_RX | AQ_NIC_FC_TX; - else - if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE) - *fcmode = AQ_NIC_FC_TX; + mpi_opts &= ~HW_ATL_FW2X_CTRL_INT_LOOPBACK; + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + break; + case AQ_HW_LOOPBACK_PHYEXT_SYS: + mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + if (enable) + mpi_opts |= HW_ATL_FW2X_CTRL_EXT_LOOPBACK; else - *fcmode = 0; + mpi_opts &= ~HW_ATL_FW2X_CTRL_EXT_LOOPBACK; + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + break; + default: + return -EINVAL; + } return 0; } @@ -579,6 +595,19 @@ static u32 aq_fw2x_rpc_get(struct aq_hw_s *self) return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_RPC_ADDR); } +static int aq_fw2x_settings_get(struct aq_hw_s *self, u32 *addr) +{ + int err = 0; + u32 offset; + + offset = self->mbox_addr + offsetof(struct hw_atl_utils_mbox, + info.setting_address); + + err = hw_atl_utils_fw_downld_dwords(self, offset, addr, 1); + + return err; +} + static u32 aq_fw2x_state2_get(struct aq_hw_s *self) { return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR); @@ -602,4 +631,6 @@ const struct aq_fw_ops aq_fw_2x_ops = { .get_flow_control = aq_fw2x_get_flow_control, .send_fw_request = aq_fw2x_send_fw_request, .enable_ptp = aq_fw3x_enable_ptp, + .led_control = aq_fw2x_led_control, + .set_phyloopback = aq_fw2x_set_phyloopback, }; diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c index 78e52d217e56..539166112993 100644 --- a/drivers/net/ethernet/arc/emac_arc.c +++ b/drivers/net/ethernet/arc/emac_arc.c @@ -20,9 +20,10 @@ static int emac_arc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct net_device *ndev; struct arc_emac_priv *priv; - int interface, err; + phy_interface_t interface; + struct net_device *ndev; + int err; if (!dev->of_node) return -ENODEV; @@ -37,9 +38,13 @@ static int emac_arc_probe(struct platform_device *pdev) priv->drv_name = DRV_NAME; priv->drv_version = DRV_VERSION; - interface = of_get_phy_mode(dev->of_node); - if (interface < 0) - interface = PHY_INTERFACE_MODE_MII; + err = of_get_phy_mode(dev->of_node, &interface); + if (err) { + if (err == -ENODEV) + interface = PHY_INTERFACE_MODE_MII; + else + goto out_netdev; + } priv->clk = devm_clk_get(dev, "hclk"); if (IS_ERR(priv->clk)) { diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index 664d664e0925..aae231c5224f 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -97,8 +97,9 @@ static int emac_rockchip_probe(struct platform_device *pdev) struct net_device *ndev; struct rockchip_priv_data *priv; const struct of_device_id *match; + phy_interface_t interface; u32 data; - int err, interface; + int err; if (!pdev->dev.of_node) return -ENODEV; @@ -114,7 +115,9 @@ static int emac_rockchip_probe(struct platform_device *pdev) priv->emac.drv_version = DRV_VERSION; priv->emac.set_mac_speed = emac_rockchip_set_mac_speed; - interface = of_get_phy_mode(dev->of_node); + err = of_get_phy_mode(dev->of_node, &interface); + if (err) + goto out_netdev; /* RK3036/RK3066/RK3188 SoCs only support RMII */ if (interface != PHY_INTERFACE_MODE_RMII) { diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 1b1a09095c0d..8f5021091eee 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -1744,10 +1744,9 @@ static int ag71xx_probe(struct platform_device *pdev) eth_random_addr(ndev->dev_addr); } - ag->phy_if_mode = of_get_phy_mode(np); - if (ag->phy_if_mode < 0) { + err = of_get_phy_mode(np, ag->phy_if_mode); + if (err) { netif_err(ag, probe, ndev, "missing phy-mode property in DT\n"); - err = ag->phy_if_mode; goto err_free; } diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index 37752d9514e7..30b455013bf3 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -1371,8 +1371,8 @@ static int nb8800_probe(struct platform_device *pdev) priv = netdev_priv(dev); priv->base = base; - priv->phy_mode = of_get_phy_mode(pdev->dev.of_node); - if (priv->phy_mode < 0) + ret = of_get_phy_mode(pdev->dev.of_node, &priv->phy_mode); + if (ret) priv->phy_mode = PHY_INTERFACE_MODE_RGMII; priv->clk = devm_clk_get(&pdev->dev, NULL); diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h index aacc3cce2cc0..40941fb6065b 100644 --- a/drivers/net/ethernet/aurora/nb8800.h +++ b/drivers/net/ethernet/aurora/nb8800.h @@ -287,7 +287,7 @@ struct nb8800_priv { struct device_node *phy_node; /* PHY connection type from DT */ - int phy_mode; + phy_interface_t phy_mode; /* Current link status */ int speed; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index a977a459bd20..825af709708e 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2479,9 +2479,9 @@ static int bcm_sysport_probe(struct platform_device *pdev) priv->netdev = dev; priv->pdev = pdev; - priv->phy_interface = of_get_phy_mode(dn); + ret = of_get_phy_mode(dn, &priv->phy_interface); /* Default to GMII interface mode */ - if ((int)priv->phy_interface < 0) + if (ret) priv->phy_interface = PHY_INTERFACE_MODE_GMII; /* In the case of a fixed PHY, the DT node associated diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index d10b421ed1f1..5e037a305b83 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1934,7 +1934,8 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, } /* select a non-FCoE queue */ - return netdev_pick_tx(dev, skb, NULL) % (BNX2X_NUM_ETH_QUEUES(bp)); + return netdev_pick_tx(dev, skb, NULL) % + (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); } void bnx2x_set_num_queues(struct bnx2x *bp) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h index 226ab29f4cb6..3f8435208bf4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h @@ -32,31 +32,31 @@ * IRO[142].m2) + ((sbId) * IRO[142].m3)) #define CSTORM_IGU_MODE_OFFSET (IRO[161].base) #define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ - (IRO[323].base + ((pfId) * IRO[323].m1)) -#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ (IRO[324].base + ((pfId) * IRO[324].m1)) +#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ + (IRO[325].base + ((pfId) * IRO[325].m1)) #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ - (IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2)) + (IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2)) #define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ - (IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2)) + (IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2)) #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ - (IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2)) + (IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2)) #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ - (IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2)) + (IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2)) #define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ - (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2)) + (IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2)) #define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ - (IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2)) + (IRO[322].base + ((pfId) * IRO[322].m1) + ((iscsiEqId) * IRO[322].m2)) #define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ - (IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2)) + (IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2)) #define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ - (IRO[322].base + ((pfId) * IRO[322].m1)) + (IRO[323].base + ((pfId) * IRO[323].m1)) #define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[314].base + ((pfId) * IRO[314].m1)) + (IRO[315].base + ((pfId) * IRO[315].m1)) #define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[313].base + ((pfId) * IRO[313].m1)) + (IRO[314].base + ((pfId) * IRO[314].m1)) #define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[312].base + ((pfId) * IRO[312].m1)) + (IRO[313].base + ((pfId) * IRO[313].m1)) #define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ (IRO[155].base + ((funcId) * IRO[155].m1)) #define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ @@ -99,81 +99,81 @@ #define TSTORM_FUNC_EN_OFFSET(funcId) \ (IRO[107].base + ((funcId) * IRO[107].m1)) #define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ - (IRO[278].base + ((pfId) * IRO[278].m1)) -#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \ (IRO[279].base + ((pfId) * IRO[279].m1)) -#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \ +#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \ (IRO[280].base + ((pfId) * IRO[280].m1)) -#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \ +#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \ (IRO[281].base + ((pfId) * IRO[281].m1)) +#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \ + (IRO[282].base + ((pfId) * IRO[282].m1)) #define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[277].base + ((pfId) * IRO[277].m1)) + (IRO[278].base + ((pfId) * IRO[278].m1)) #define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[276].base + ((pfId) * IRO[276].m1)) + (IRO[277].base + ((pfId) * IRO[277].m1)) #define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[275].base + ((pfId) * IRO[275].m1)) + (IRO[276].base + ((pfId) * IRO[276].m1)) #define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ - (IRO[274].base + ((pfId) * IRO[274].m1)) + (IRO[275].base + ((pfId) * IRO[275].m1)) #define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ - (IRO[284].base + ((pfId) * IRO[284].m1)) + (IRO[285].base + ((pfId) * IRO[285].m1)) #define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ - (IRO[270].base + ((pfId) * IRO[270].m1)) -#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ (IRO[271].base + ((pfId) * IRO[271].m1)) -#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \ +#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ (IRO[272].base + ((pfId) * IRO[272].m1)) -#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ +#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \ (IRO[273].base + ((pfId) * IRO[273].m1)) +#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ + (IRO[274].base + ((pfId) * IRO[274].m1)) #define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \ (IRO[206].base + ((pfId) * IRO[206].m1)) #define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ (IRO[109].base + ((funcId) * IRO[109].m1)) #define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \ - (IRO[223].base + ((pfId) * IRO[223].m1)) + (IRO[224].base + ((pfId) * IRO[224].m1)) #define TSTORM_VF_TO_PF_OFFSET(funcId) \ (IRO[108].base + ((funcId) * IRO[108].m1)) -#define USTORM_AGG_DATA_OFFSET (IRO[212].base) -#define USTORM_AGG_DATA_SIZE (IRO[212].size) +#define USTORM_AGG_DATA_OFFSET (IRO[213].base) +#define USTORM_AGG_DATA_SIZE (IRO[213].size) #define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[181].base) #define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \ (IRO[180].base + ((assertListEntry) * IRO[180].m1)) #define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ (IRO[187].base + ((portId) * IRO[187].m1)) #define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ - (IRO[325].base + ((pfId) * IRO[325].m1)) + (IRO[326].base + ((pfId) * IRO[326].m1)) #define USTORM_FUNC_EN_OFFSET(funcId) \ (IRO[182].base + ((funcId) * IRO[182].m1)) #define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ - (IRO[289].base + ((pfId) * IRO[289].m1)) -#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ (IRO[290].base + ((pfId) * IRO[290].m1)) +#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ + (IRO[291].base + ((pfId) * IRO[291].m1)) #define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ - (IRO[294].base + ((pfId) * IRO[294].m1)) + (IRO[295].base + ((pfId) * IRO[295].m1)) #define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ - (IRO[291].base + ((pfId) * IRO[291].m1)) + (IRO[292].base + ((pfId) * IRO[292].m1)) #define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[287].base + ((pfId) * IRO[287].m1)) + (IRO[288].base + ((pfId) * IRO[288].m1)) #define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[286].base + ((pfId) * IRO[286].m1)) + (IRO[287].base + ((pfId) * IRO[287].m1)) #define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[285].base + ((pfId) * IRO[285].m1)) + (IRO[286].base + ((pfId) * IRO[286].m1)) #define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ - (IRO[288].base + ((pfId) * IRO[288].m1)) + (IRO[289].base + ((pfId) * IRO[289].m1)) #define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ - (IRO[292].base + ((pfId) * IRO[292].m1)) -#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ (IRO[293].base + ((pfId) * IRO[293].m1)) +#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ + (IRO[294].base + ((pfId) * IRO[294].m1)) #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ (IRO[186].base + ((pfId) * IRO[186].m1)) #define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ (IRO[184].base + ((funcId) * IRO[184].m1)) #define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \ - (IRO[215].base + ((portId) * IRO[215].m1) + ((clientId) * \ - IRO[215].m2)) + (IRO[216].base + ((portId) * IRO[216].m1) + ((clientId) * \ + IRO[216].m2)) #define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \ - (IRO[216].base + ((qzoneId) * IRO[216].m1)) -#define USTORM_TPA_BTR_OFFSET (IRO[213].base) -#define USTORM_TPA_BTR_SIZE (IRO[213].size) + (IRO[217].base + ((qzoneId) * IRO[217].m1)) +#define USTORM_TPA_BTR_OFFSET (IRO[214].base) +#define USTORM_TPA_BTR_SIZE (IRO[214].size) #define USTORM_VF_TO_PF_OFFSET(funcId) \ (IRO[183].base + ((funcId) * IRO[183].m1)) #define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base) @@ -188,39 +188,39 @@ #define XSTORM_FUNC_EN_OFFSET(funcId) \ (IRO[47].base + ((funcId) * IRO[47].m1)) #define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ - (IRO[302].base + ((pfId) * IRO[302].m1)) + (IRO[303].base + ((pfId) * IRO[303].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ - (IRO[305].base + ((pfId) * IRO[305].m1)) -#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \ (IRO[306].base + ((pfId) * IRO[306].m1)) -#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ +#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \ (IRO[307].base + ((pfId) * IRO[307].m1)) -#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ +#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ (IRO[308].base + ((pfId) * IRO[308].m1)) -#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ +#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ (IRO[309].base + ((pfId) * IRO[309].m1)) -#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ +#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ (IRO[310].base + ((pfId) * IRO[310].m1)) -#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ +#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ (IRO[311].base + ((pfId) * IRO[311].m1)) +#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ + (IRO[312].base + ((pfId) * IRO[312].m1)) #define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[301].base + ((pfId) * IRO[301].m1)) + (IRO[302].base + ((pfId) * IRO[302].m1)) #define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[300].base + ((pfId) * IRO[300].m1)) + (IRO[301].base + ((pfId) * IRO[301].m1)) #define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[299].base + ((pfId) * IRO[299].m1)) + (IRO[300].base + ((pfId) * IRO[300].m1)) #define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ - (IRO[304].base + ((pfId) * IRO[304].m1)) + (IRO[305].base + ((pfId) * IRO[305].m1)) #define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ - (IRO[303].base + ((pfId) * IRO[303].m1)) + (IRO[304].base + ((pfId) * IRO[304].m1)) #define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ - (IRO[298].base + ((pfId) * IRO[298].m1)) + (IRO[299].base + ((pfId) * IRO[299].m1)) #define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ - (IRO[297].base + ((pfId) * IRO[297].m1)) + (IRO[298].base + ((pfId) * IRO[298].m1)) #define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ - (IRO[296].base + ((pfId) * IRO[296].m1)) + (IRO[297].base + ((pfId) * IRO[297].m1)) #define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ - (IRO[295].base + ((pfId) * IRO[295].m1)) + (IRO[296].base + ((pfId) * IRO[296].m1)) #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ (IRO[44].base + ((pfId) * IRO[44].m1)) #define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ @@ -233,12 +233,12 @@ #define XSTORM_SPQ_PROD_OFFSET(funcId) \ (IRO[31].base + ((funcId) * IRO[31].m1)) #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \ - (IRO[217].base + ((portId) * IRO[217].m1)) -#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \ (IRO[218].base + ((portId) * IRO[218].m1)) +#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \ + (IRO[219].base + ((portId) * IRO[219].m1)) #define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \ - (IRO[220].base + (((pfId)>>1) * IRO[220].m1) + (((pfId)&1) * \ - IRO[220].m2)) + (IRO[221].base + (((pfId)>>1) * IRO[221].m1) + (((pfId)&1) * \ + IRO[221].m2)) #define XSTORM_VF_TO_PF_OFFSET(funcId) \ (IRO[48].base + ((funcId) * IRO[48].m1)) #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 78326a6c0aba..622fadc50316 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -3024,7 +3024,7 @@ struct afex_stats { #define BCM_5710_FW_MAJOR_VERSION 7 #define BCM_5710_FW_MINOR_VERSION 13 -#define BCM_5710_FW_REVISION_VERSION 11 +#define BCM_5710_FW_REVISION_VERSION 15 #define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_COMPILE_FLAGS 1 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index d581d0ae6584..9638d65d8261 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -5611,9 +5611,9 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy, return 0; } -static int bnx2x_link_settings_status(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; @@ -5685,7 +5685,7 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy, return rc; } -static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy, +static u8 bnx2x_warpcore_read_status(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { @@ -7364,9 +7364,9 @@ static void bnx2x_8073_specific_func(struct bnx2x_phy *phy, } } -static int bnx2x_8073_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static void bnx2x_8073_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; u16 val = 0, tmp1; @@ -7427,7 +7427,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy, if (params->loopback_mode == LOOPBACK_EXT) { bnx2x_807x_force_10G(bp, phy); DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n"); - return 0; + return; } else { bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002); @@ -7509,7 +7509,6 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n", ((val & (1<<5)) > 0), ((val & (1<<7)) > 0)); - return 0; } static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, @@ -7676,9 +7675,9 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy, /******************************************************************/ /* BCM8705 PHY SECTION */ /******************************************************************/ -static int bnx2x_8705_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static void bnx2x_8705_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; DP(NETIF_MSG_LINK, "init 8705\n"); @@ -7700,7 +7699,6 @@ static int bnx2x_8705_config_init(struct bnx2x_phy *phy, MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1); /* BCM8705 doesn't have microcode, hence the 0 */ bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0); - return 0; } static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy, @@ -8887,9 +8885,9 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy, /******************************************************************/ /* BCM8706 PHY SECTION */ /******************************************************************/ -static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static void bnx2x_8706_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { u32 tx_en_mode; u16 cnt, val, tmp1; @@ -8989,13 +8987,11 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1); } - - return 0; } -static int bnx2x_8706_read_status(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static u8 bnx2x_8706_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { return bnx2x_8706_8726_read_status(phy, params, vars); } @@ -9070,9 +9066,9 @@ static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy, } -static int bnx2x_8726_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static void bnx2x_8726_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; DP(NETIF_MSG_LINK, "Initializing BCM8726\n"); @@ -9150,9 +9146,6 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy, MDIO_PMA_REG_8726_TX_CTRL2, phy->tx_preemphasis[1]); } - - return 0; - } static void bnx2x_8726_link_reset(struct bnx2x_phy *phy, @@ -9288,9 +9281,9 @@ static void bnx2x_8727_config_speed(struct bnx2x_phy *phy, } } -static int bnx2x_8727_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static void bnx2x_8727_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { u32 tx_en_mode; u16 tmp1, mod_abs, tmp2; @@ -9370,8 +9363,6 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, (tmp2 & 0x7fff)); } - - return 0; } static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, @@ -9946,9 +9937,9 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, return 0; } -static int bnx2x_8481_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static void bnx2x_8481_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; /* Restore normal power mode*/ @@ -9960,7 +9951,7 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy, bnx2x_wait_reset_complete(bp, phy, params); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); - return bnx2x_848xx_cmn_config_init(phy, params, vars); + bnx2x_848xx_cmn_config_init(phy, params, vars); } #define PHY848xx_CMDHDLR_WAIT 300 @@ -10210,8 +10201,8 @@ static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp, return reset_gpios; } -static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy, - struct link_params *params) +static void bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy, + struct link_params *params) { struct bnx2x *bp = params->bp; u8 reset_gpios; @@ -10239,8 +10230,6 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy, udelay(10); DP(NETIF_MSG_LINK, "84833 hw reset on pin values 0x%x\n", reset_gpios); - - return 0; } static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy, @@ -10283,9 +10272,9 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy, } #define PHY84833_CONSTANT_LATENCY 1193 -static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static void bnx2x_848x3_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; u8 port, initialize = 1; @@ -10430,7 +10419,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, if (rc) { DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n"); bnx2x_8483x_disable_eee(phy, params, vars); - return rc; + return; } if ((phy->req_duplex == DUPLEX_FULL) && @@ -10442,7 +10431,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, rc = bnx2x_8483x_disable_eee(phy, params, vars); if (rc) { DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n"); - return rc; + return; } } else { vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; @@ -10481,7 +10470,6 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, MDIO_84833_TOP_CFG_XGPHY_STRAP1, (u16)~MDIO_84833_SUPER_ISOLATE); } - return rc; } static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, @@ -11038,9 +11026,9 @@ static void bnx2x_54618se_specific_func(struct bnx2x_phy *phy, } } -static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static void bnx2x_54618se_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; u8 port; @@ -11240,8 +11228,6 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, autoneg_val); - - return 0; } @@ -11465,9 +11451,9 @@ static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy, MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100); } -static int bnx2x_7101_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static void bnx2x_7101_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { u16 fw_ver1, fw_ver2, val; struct bnx2x *bp = params->bp; @@ -11502,7 +11488,6 @@ static int bnx2x_7101_config_init(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2); bnx2x_save_spirom_version(bp, params->port, (u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr); - return 0; } static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy, @@ -11636,14 +11621,14 @@ static const struct bnx2x_phy phy_null = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)NULL, - .read_status = (read_status_t)NULL, - .link_reset = (link_reset_t)NULL, - .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)NULL, - .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)NULL + .config_init = NULL, + .read_status = NULL, + .link_reset = NULL, + .config_loopback = NULL, + .format_fw_ver = NULL, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct bnx2x_phy phy_serdes = { @@ -11671,14 +11656,14 @@ static const struct bnx2x_phy phy_serdes = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_xgxs_config_init, - .read_status = (read_status_t)bnx2x_link_settings_status, - .link_reset = (link_reset_t)bnx2x_int_link_reset, - .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)NULL, - .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)NULL + .config_init = bnx2x_xgxs_config_init, + .read_status = bnx2x_link_settings_status, + .link_reset = bnx2x_int_link_reset, + .config_loopback = NULL, + .format_fw_ver = NULL, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct bnx2x_phy phy_xgxs = { @@ -11707,14 +11692,14 @@ static const struct bnx2x_phy phy_xgxs = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_xgxs_config_init, - .read_status = (read_status_t)bnx2x_link_settings_status, - .link_reset = (link_reset_t)bnx2x_int_link_reset, - .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback, - .format_fw_ver = (format_fw_ver_t)NULL, - .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func + .config_init = bnx2x_xgxs_config_init, + .read_status = bnx2x_link_settings_status, + .link_reset = bnx2x_int_link_reset, + .config_loopback = bnx2x_set_xgxs_loopback, + .format_fw_ver = NULL, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = bnx2x_xgxs_specific_func }; static const struct bnx2x_phy phy_warpcore = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, @@ -11745,14 +11730,14 @@ static const struct bnx2x_phy phy_warpcore = { .speed_cap_mask = 0, /* req_duplex = */0, /* rsrv = */0, - .config_init = (config_init_t)bnx2x_warpcore_config_init, - .read_status = (read_status_t)bnx2x_warpcore_read_status, - .link_reset = (link_reset_t)bnx2x_warpcore_link_reset, - .config_loopback = (config_loopback_t)bnx2x_set_warpcore_loopback, - .format_fw_ver = (format_fw_ver_t)NULL, - .hw_reset = (hw_reset_t)bnx2x_warpcore_hw_reset, - .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)NULL + .config_init = bnx2x_warpcore_config_init, + .read_status = bnx2x_warpcore_read_status, + .link_reset = bnx2x_warpcore_link_reset, + .config_loopback = bnx2x_set_warpcore_loopback, + .format_fw_ver = NULL, + .hw_reset = bnx2x_warpcore_hw_reset, + .set_link_led = NULL, + .phy_specific_func = NULL }; @@ -11776,14 +11761,14 @@ static const struct bnx2x_phy phy_7101 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_7101_config_init, - .read_status = (read_status_t)bnx2x_7101_read_status, - .link_reset = (link_reset_t)bnx2x_common_ext_link_reset, - .config_loopback = (config_loopback_t)bnx2x_7101_config_loopback, - .format_fw_ver = (format_fw_ver_t)bnx2x_7101_format_ver, - .hw_reset = (hw_reset_t)bnx2x_7101_hw_reset, - .set_link_led = (set_link_led_t)bnx2x_7101_set_link_led, - .phy_specific_func = (phy_specific_func_t)NULL + .config_init = bnx2x_7101_config_init, + .read_status = bnx2x_7101_read_status, + .link_reset = bnx2x_common_ext_link_reset, + .config_loopback = bnx2x_7101_config_loopback, + .format_fw_ver = bnx2x_7101_format_ver, + .hw_reset = bnx2x_7101_hw_reset, + .set_link_led = bnx2x_7101_set_link_led, + .phy_specific_func = NULL }; static const struct bnx2x_phy phy_8073 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, @@ -11807,14 +11792,14 @@ static const struct bnx2x_phy phy_8073 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_8073_config_init, - .read_status = (read_status_t)bnx2x_8073_read_status, - .link_reset = (link_reset_t)bnx2x_8073_link_reset, - .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, - .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func + .config_init = bnx2x_8073_config_init, + .read_status = bnx2x_8073_read_status, + .link_reset = bnx2x_8073_link_reset, + .config_loopback = NULL, + .format_fw_ver = bnx2x_format_ver, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = bnx2x_8073_specific_func }; static const struct bnx2x_phy phy_8705 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705, @@ -11835,14 +11820,14 @@ static const struct bnx2x_phy phy_8705 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_8705_config_init, - .read_status = (read_status_t)bnx2x_8705_read_status, - .link_reset = (link_reset_t)bnx2x_common_ext_link_reset, - .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_null_format_ver, - .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)NULL + .config_init = bnx2x_8705_config_init, + .read_status = bnx2x_8705_read_status, + .link_reset = bnx2x_common_ext_link_reset, + .config_loopback = NULL, + .format_fw_ver = bnx2x_null_format_ver, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct bnx2x_phy phy_8706 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, @@ -11864,14 +11849,14 @@ static const struct bnx2x_phy phy_8706 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_8706_config_init, - .read_status = (read_status_t)bnx2x_8706_read_status, - .link_reset = (link_reset_t)bnx2x_common_ext_link_reset, - .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, - .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)NULL + .config_init = bnx2x_8706_config_init, + .read_status = bnx2x_8706_read_status, + .link_reset = bnx2x_common_ext_link_reset, + .config_loopback = NULL, + .format_fw_ver = bnx2x_format_ver, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct bnx2x_phy phy_8726 = { @@ -11896,14 +11881,14 @@ static const struct bnx2x_phy phy_8726 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_8726_config_init, - .read_status = (read_status_t)bnx2x_8726_read_status, - .link_reset = (link_reset_t)bnx2x_8726_link_reset, - .config_loopback = (config_loopback_t)bnx2x_8726_config_loopback, - .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, - .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)NULL + .config_init = bnx2x_8726_config_init, + .read_status = bnx2x_8726_read_status, + .link_reset = bnx2x_8726_link_reset, + .config_loopback = bnx2x_8726_config_loopback, + .format_fw_ver = bnx2x_format_ver, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct bnx2x_phy phy_8727 = { @@ -11927,14 +11912,14 @@ static const struct bnx2x_phy phy_8727 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_8727_config_init, - .read_status = (read_status_t)bnx2x_8727_read_status, - .link_reset = (link_reset_t)bnx2x_8727_link_reset, - .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, - .hw_reset = (hw_reset_t)bnx2x_8727_hw_reset, - .set_link_led = (set_link_led_t)bnx2x_8727_set_link_led, - .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func + .config_init = bnx2x_8727_config_init, + .read_status = bnx2x_8727_read_status, + .link_reset = bnx2x_8727_link_reset, + .config_loopback = NULL, + .format_fw_ver = bnx2x_format_ver, + .hw_reset = bnx2x_8727_hw_reset, + .set_link_led = bnx2x_8727_set_link_led, + .phy_specific_func = bnx2x_8727_specific_func }; static const struct bnx2x_phy phy_8481 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, @@ -11962,14 +11947,14 @@ static const struct bnx2x_phy phy_8481 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_8481_config_init, - .read_status = (read_status_t)bnx2x_848xx_read_status, - .link_reset = (link_reset_t)bnx2x_8481_link_reset, - .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, - .hw_reset = (hw_reset_t)bnx2x_8481_hw_reset, - .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t)NULL + .config_init = bnx2x_8481_config_init, + .read_status = bnx2x_848xx_read_status, + .link_reset = bnx2x_8481_link_reset, + .config_loopback = NULL, + .format_fw_ver = bnx2x_848xx_format_ver, + .hw_reset = bnx2x_8481_hw_reset, + .set_link_led = bnx2x_848xx_set_link_led, + .phy_specific_func = NULL }; static const struct bnx2x_phy phy_84823 = { @@ -11999,14 +11984,14 @@ static const struct bnx2x_phy phy_84823 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_848x3_config_init, - .read_status = (read_status_t)bnx2x_848xx_read_status, - .link_reset = (link_reset_t)bnx2x_848x3_link_reset, - .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, - .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func + .config_init = bnx2x_848x3_config_init, + .read_status = bnx2x_848xx_read_status, + .link_reset = bnx2x_848x3_link_reset, + .config_loopback = NULL, + .format_fw_ver = bnx2x_848xx_format_ver, + .hw_reset = NULL, + .set_link_led = bnx2x_848xx_set_link_led, + .phy_specific_func = bnx2x_848xx_specific_func }; static const struct bnx2x_phy phy_84833 = { @@ -12034,14 +12019,14 @@ static const struct bnx2x_phy phy_84833 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_848x3_config_init, - .read_status = (read_status_t)bnx2x_848xx_read_status, - .link_reset = (link_reset_t)bnx2x_848x3_link_reset, - .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, - .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, - .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func + .config_init = bnx2x_848x3_config_init, + .read_status = bnx2x_848xx_read_status, + .link_reset = bnx2x_848x3_link_reset, + .config_loopback = NULL, + .format_fw_ver = bnx2x_848xx_format_ver, + .hw_reset = bnx2x_84833_hw_reset_phy, + .set_link_led = bnx2x_848xx_set_link_led, + .phy_specific_func = bnx2x_848xx_specific_func }; static const struct bnx2x_phy phy_84834 = { @@ -12068,14 +12053,14 @@ static const struct bnx2x_phy phy_84834 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_848x3_config_init, - .read_status = (read_status_t)bnx2x_848xx_read_status, - .link_reset = (link_reset_t)bnx2x_848x3_link_reset, - .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, - .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, - .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func + .config_init = bnx2x_848x3_config_init, + .read_status = bnx2x_848xx_read_status, + .link_reset = bnx2x_848x3_link_reset, + .config_loopback = NULL, + .format_fw_ver = bnx2x_848xx_format_ver, + .hw_reset = bnx2x_84833_hw_reset_phy, + .set_link_led = bnx2x_848xx_set_link_led, + .phy_specific_func = bnx2x_848xx_specific_func }; static const struct bnx2x_phy phy_84858 = { @@ -12102,14 +12087,14 @@ static const struct bnx2x_phy phy_84858 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_848x3_config_init, - .read_status = (read_status_t)bnx2x_848xx_read_status, - .link_reset = (link_reset_t)bnx2x_848x3_link_reset, - .config_loopback = (config_loopback_t)NULL, - .format_fw_ver = (format_fw_ver_t)bnx2x_8485x_format_ver, - .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, - .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func + .config_init = bnx2x_848x3_config_init, + .read_status = bnx2x_848xx_read_status, + .link_reset = bnx2x_848x3_link_reset, + .config_loopback = NULL, + .format_fw_ver = bnx2x_8485x_format_ver, + .hw_reset = bnx2x_84833_hw_reset_phy, + .set_link_led = bnx2x_848xx_set_link_led, + .phy_specific_func = bnx2x_848xx_specific_func }; static const struct bnx2x_phy phy_54618se = { @@ -12136,14 +12121,14 @@ static const struct bnx2x_phy phy_54618se = { .speed_cap_mask = 0, /* req_duplex = */0, /* rsrv = */0, - .config_init = (config_init_t)bnx2x_54618se_config_init, - .read_status = (read_status_t)bnx2x_54618se_read_status, - .link_reset = (link_reset_t)bnx2x_54618se_link_reset, - .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback, - .format_fw_ver = (format_fw_ver_t)NULL, - .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led, - .phy_specific_func = (phy_specific_func_t)bnx2x_54618se_specific_func + .config_init = bnx2x_54618se_config_init, + .read_status = bnx2x_54618se_read_status, + .link_reset = bnx2x_54618se_link_reset, + .config_loopback = bnx2x_54618se_config_loopback, + .format_fw_ver = NULL, + .hw_reset = NULL, + .set_link_led = bnx2x_5461x_set_link_led, + .phy_specific_func = bnx2x_54618se_specific_func }; /*****************************************************************/ /* */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index 7115f5025664..cae03c89dc73 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -127,15 +127,15 @@ struct link_vars; struct link_params; struct bnx2x_phy; -typedef u8 (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params, - struct link_vars *vars); +typedef void (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params, + struct link_vars *vars); typedef u8 (*read_status_t)(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars); typedef void (*link_reset_t)(struct bnx2x_phy *phy, struct link_params *params); typedef void (*config_loopback_t)(struct bnx2x_phy *phy, struct link_params *params); -typedef u8 (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len); +typedef int (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len); typedef void (*hw_reset_t)(struct bnx2x_phy *phy, struct link_params *params); typedef void (*set_link_led_t)(struct bnx2x_phy *phy, struct link_params *params, u8 mode); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 0edbb0a76847..5097a44686b3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -2397,15 +2397,21 @@ static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) /* send the ramrod on all the queues of the PF */ for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; + int tx_idx; /* Set the appropriate Queue object */ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; - /* Update the Queue state */ - rc = bnx2x_queue_state_change(bp, &q_params); - if (rc) { - BNX2X_ERR("Failed to configure Tx switching\n"); - return rc; + for (tx_idx = FIRST_TX_COS_INDEX; + tx_idx < fp->max_cos; tx_idx++) { + q_params.params.update.cid_index = tx_idx; + + /* Update the Queue state */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to configure Tx switching\n"); + return rc; + } } } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 9da4fbee3cf7..35bc579d594a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1767,8 +1767,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, rc = -EIO; if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { - netdev_warn(bp->dev, "RX buffer error %x\n", rx_err); - bnxt_sched_reset(bp, rxr); + bnapi->cp_ring.rx_buf_errors++; + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { + netdev_warn(bp->dev, "RX buffer error %x\n", + rx_err); + bnxt_sched_reset(bp, rxr); + } } goto next_rx_no_len; } @@ -4269,6 +4273,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, /* Wait until hwrm response cmpl interrupt is processed */ while (bp->hwrm_intr_seq_id != (u16)~seq_id && i++ < tmo_count) { + /* Abort the wait for completion if the FW health + * check has failed. + */ + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + return -EBUSY; /* on first few passes, just barely sleep */ if (i < HWRM_SHORT_TIMEOUT_COUNTER) usleep_range(HWRM_SHORT_MIN_TIMEOUT, @@ -4292,6 +4301,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, /* Check if response len is updated */ for (i = 0; i < tmo_count; i++) { + /* Abort the wait for completion if the FW health + * check has failed. + */ + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + return -EBUSY; len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> HWRM_RESP_LEN_SFT; if (len) @@ -4432,7 +4446,8 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE | FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) - flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT; + flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | + FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; req.flags = cpu_to_le32(flags); req.ver_maj_8b = DRV_VER_MAJ; req.ver_min_8b = DRV_VER_MIN; @@ -4596,21 +4611,21 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct hwrm_cfa_ntuple_filter_alloc_output *resp; struct flow_keys *keys = &fltr->fkeys; struct bnxt_vnic_info *vnic; - u32 dst_ena = 0; + u32 flags = 0; int rc = 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; - if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX) { - dst_ena = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX; - req.rfs_ring_tbl_idx = cpu_to_le16(fltr->rxq); - vnic = &bp->vnic_info[0]; + if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { + flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; + req.dst_id = cpu_to_le16(fltr->rxq); } else { vnic = &bp->vnic_info[fltr->rxq + 1]; + req.dst_id = cpu_to_le16(vnic->fw_vnic_id); } - req.dst_id = cpu_to_le16(vnic->fw_vnic_id); - req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS | dst_ena); + req.flags = cpu_to_le32(flags); + req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); req.ethertype = htons(ETH_P_IP); memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); @@ -6938,6 +6953,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->flags |= BNXT_FLAG_ROCEV2_CAP; if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; + if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) + bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) @@ -7037,8 +7054,8 @@ static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) flags = le32_to_cpu(resp->flags); if (flags & - CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED) - bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX; + CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; hwrm_cfa_adv_qcaps_exit: mutex_unlock(&bp->hwrm_cmd_lock); @@ -9688,7 +9705,7 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp) static bool bnxt_rfs_supported(struct bnxt *bp) { if (bp->flags & BNXT_FLAG_CHIP_P5) { - if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX) + if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) return true; return false; } @@ -10110,6 +10127,7 @@ static void bnxt_force_fw_reset(struct bnxt *bp) void bnxt_fw_exception(struct bnxt *bp) { + netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); bnxt_rtnl_lock_sp(bp); bnxt_force_fw_reset(bp); @@ -10738,6 +10756,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) smp_mb__before_atomic(); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); bnxt_ulp_start(bp, rc); + bnxt_dl_health_status_update(bp, true); rtnl_unlock(); break; } @@ -10745,6 +10764,8 @@ static void bnxt_fw_reset_task(struct work_struct *work) fw_reset_abort: clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) + bnxt_dl_health_status_update(bp, false); bp->fw_reset_state = 0; rtnl_lock(); dev_close(bp->dev); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index a3545c846bfb..37549cac3de6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -12,11 +12,11 @@ #define BNXT_H #define DRV_MODULE_NAME "bnxt_en" -#define DRV_MODULE_VERSION "1.10.0" +#define DRV_MODULE_VERSION "1.10.1" #define DRV_VER_MAJ 1 #define DRV_VER_MIN 10 -#define DRV_VER_UPD 0 +#define DRV_VER_UPD 1 #include <linux/interrupt.h> #include <linux/rhashtable.h> @@ -932,6 +932,7 @@ struct bnxt_cp_ring_info { dma_addr_t hw_stats_map; u32 hw_stats_ctx_id; u64 rx_l4_csum_errors; + u64 rx_buf_errors; u64 missed_irqs; struct bnxt_ring_struct cp_ring_struct; @@ -1383,6 +1384,7 @@ struct bnxt_fw_health { u32 last_fw_reset_cnt; u8 enabled:1; u8 master:1; + u8 fatal:1; u8 tmr_multiplier; u8 tmr_counter; u8 fw_reset_seq_cnt; @@ -1666,10 +1668,11 @@ struct bnxt { #define BNXT_FW_CAP_ERROR_RECOVERY 0x00002000 #define BNXT_FW_CAP_PKG_VER 0x00004000 #define BNXT_FW_CAP_CFA_ADV_FLOW 0x00008000 - #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX 0x00010000 + #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 0x00010000 #define BNXT_FW_CAP_PCIE_STATS_SUPPORTED 0x00020000 #define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000 #define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000 + #define BNXT_FW_CAP_HOT_RESET 0x00200000 #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) u32 hwrm_spec_code; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index ae4ddf33fe5c..707827176231 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -91,6 +91,7 @@ static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter, if (!priv_ctx) return -EOPNOTSUPP; + bp->fw_health->fatal = true; event = fw_reporter_ctx->sp_event; if (event == BNXT_FW_RESET_NOTIFY_SP_EVENT) bnxt_fw_reset(bp); @@ -199,6 +200,26 @@ void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event) } } +void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy) +{ + struct bnxt_fw_health *health = bp->fw_health; + u8 state; + + if (healthy) + state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY; + else + state = DEVLINK_HEALTH_REPORTER_STATE_ERROR; + + if (health->fatal) + devlink_health_reporter_state_update(health->fw_fatal_reporter, + state); + else + devlink_health_reporter_state_update(health->fw_reset_reporter, + state); + + health->fatal = false; +} + static const struct devlink_ops bnxt_dl_ops = { #ifdef CONFIG_BNXT_SRIOV .eswitch_mode_set = bnxt_dl_eswitch_mode_set, @@ -314,10 +335,17 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, } else { rc = hwrm_send_message_silent(bp, msg, msg_len, HWRM_CMD_TIMEOUT); - if (!rc) + if (!rc) { bnxt_copy_from_nvm_data(val, data, nvm_param.nvm_num_bits, nvm_param.dl_num_bytes); + } else { + struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; + + if (resp->cmd_err == + NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST) + rc = -EOPNOTSUPP; + } } dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr); if (rc == -EACCES) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index 2f4fd0a7d04b..665d4bdcd8c0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -57,6 +57,7 @@ struct bnxt_dl_nvm_param { }; void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event); +void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy); int bnxt_dl_register(struct bnxt *bp); void bnxt_dl_unregister(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index f2220b826d61..0641020b56d5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -173,6 +173,7 @@ static const char * const bnxt_ring_tpa2_stats_str[] = { static const char * const bnxt_ring_sw_stats_str[] = { "rx_l4_csum_errors", + "rx_buf_errors", "missed_irqs", }; @@ -552,6 +553,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, for (k = 0; k < stat_fields; j++, k++) buf[j] = le64_to_cpu(hw_stats[k]); buf[j++] = cpr->rx_l4_csum_errors; + buf[j++] = cpr->rx_buf_errors; buf[j++] = cpr->missed_irqs; bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += @@ -1785,6 +1787,8 @@ static int bnxt_firmware_reset(struct net_device *dev, case BNXT_FW_RESET_CHIP: req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; + if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) + req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; break; case BNXT_FW_RESET_AP: req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP; @@ -2981,7 +2985,8 @@ static int bnxt_reset(struct net_device *dev, u32 *flags) return -EOPNOTSUPP; } - if (pci_vfs_assigned(bp->pdev)) { + if (pci_vfs_assigned(bp->pdev) && + !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) { netdev_err(dev, "Reset not allowed when VFs are assigned to VMs\n"); return -EBUSY; @@ -2994,7 +2999,9 @@ static int bnxt_reset(struct net_device *dev, u32 *flags) rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP); if (!rc) { - netdev_info(dev, "Reset request successful. Reload driver to complete reset\n"); + netdev_info(dev, "Reset request successful.\n"); + if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) + netdev_info(dev, "Reload driver to complete reset\n"); *flags = 0; } } else if (*flags == ETH_RESET_AP) { @@ -3038,7 +3045,8 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, mutex_lock(&bp->hwrm_cmd_lock); while (1) { *seq_ptr = cpu_to_le16(seq); - rc = _hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); + rc = _hwrm_send_message(bp, msg, msg_len, + HWRM_COREDUMP_TIMEOUT); if (rc) break; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 03b197eb793b..7cf27dffadb5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -176,6 +176,9 @@ struct cmd_nums { #define HWRM_RESERVED6 0x65UL #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL + #define HWRM_QUEUE_MPLS_QCAPS 0x80UL + #define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL + #define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL #define HWRM_CFA_L2_FILTER_FREE 0x91UL #define HWRM_CFA_L2_FILTER_CFG 0x92UL @@ -208,7 +211,7 @@ struct cmd_nums { #define HWRM_FW_QSTATUS 0xc1UL #define HWRM_FW_HEALTH_CHECK 0xc2UL #define HWRM_FW_SYNC 0xc3UL - #define HWRM_FW_STATE_BUFFER_QCAPS 0xc4UL + #define HWRM_FW_STATE_QCAPS 0xc4UL #define HWRM_FW_STATE_QUIESCE 0xc5UL #define HWRM_FW_STATE_BACKUP 0xc6UL #define HWRM_FW_STATE_RESTORE 0xc7UL @@ -225,8 +228,11 @@ struct cmd_nums { #define HWRM_PORT_PRBS_TEST 0xd5UL #define HWRM_PORT_SFP_SIDEBAND_CFG 0xd6UL #define HWRM_PORT_SFP_SIDEBAND_QCFG 0xd7UL + #define HWRM_FW_STATE_UNQUIESCE 0xd8UL + #define HWRM_PORT_DSC_DUMP 0xd9UL #define HWRM_TEMP_MONITOR_QUERY 0xe0UL #define HWRM_REG_POWER_QUERY 0xe1UL + #define HWRM_CORE_FREQUENCY_QUERY 0xe2UL #define HWRM_WOL_FILTER_ALLOC 0xf0UL #define HWRM_WOL_FILTER_FREE 0xf1UL #define HWRM_WOL_FILTER_QCFG 0xf2UL @@ -308,6 +314,7 @@ struct cmd_nums { #define HWRM_ENGINE_STATS_CONFIG 0x155UL #define HWRM_ENGINE_STATS_CLEAR 0x156UL #define HWRM_ENGINE_STATS_QUERY 0x157UL + #define HWRM_ENGINE_STATS_QUERY_CONTINUOUS_ERROR 0x158UL #define HWRM_ENGINE_RQ_ALLOC 0x15eUL #define HWRM_ENGINE_RQ_FREE 0x15fUL #define HWRM_ENGINE_CQ_ALLOC 0x160UL @@ -390,6 +397,7 @@ struct ret_codes { #define HWRM_ERR_CODE_KEY_HASH_COLLISION 0xdUL #define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL + #define HWRM_ERR_CODE_BUSY 0x10UL #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL @@ -420,9 +428,9 @@ struct hwrm_err_output { #define HWRM_TARGET_ID_TOOLS 0xFFFD #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 -#define HWRM_VERSION_UPDATE 0 -#define HWRM_VERSION_RSVD 100 -#define HWRM_VERSION_STR "1.10.0.100" +#define HWRM_VERSION_UPDATE 1 +#define HWRM_VERSION_RSVD 12 +#define HWRM_VERSION_STR "1.10.1.12" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -637,6 +645,8 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL + #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL + #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR @@ -1115,6 +1125,7 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL #define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL #define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL + #define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL u8 mac_address[6]; __le16 max_rsscos_ctx; __le16 max_cmpl_rings; @@ -1255,7 +1266,8 @@ struct hwrm_func_qcfg_output { u8 unused_1; u8 always_1; __le32 reset_addr_poll; - u8 unused_2[3]; + __le16 legacy_l2_db_size_kb; + u8 unused_2[1]; u8 valid; }; @@ -1500,6 +1512,7 @@ struct hwrm_func_drv_rgtr_input { #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL + #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL __le32 enables; #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL @@ -1762,7 +1775,7 @@ struct hwrm_func_backing_store_qcaps_input { __le64 resp_addr; }; -/* hwrm_func_backing_store_qcaps_output (size:576b/72B) */ +/* hwrm_func_backing_store_qcaps_output (size:640b/80B) */ struct hwrm_func_backing_store_qcaps_output { __le16 error_code; __le16 req_type; @@ -1792,6 +1805,10 @@ struct hwrm_func_backing_store_qcaps_output { __le32 tim_max_entries; __le16 mrav_num_entries_units; u8 tqm_entries_multiple; + u8 ctx_kind_initializer; + __le32 rsvd; + __le16 rsvd1; + u8 rsvd2; u8 valid; }; @@ -2524,6 +2541,7 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE __le32 preemphasis; @@ -2761,8 +2779,8 @@ struct hwrm_port_mac_ptp_qcfg_output { __le16 resp_len; u8 flags; #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL - #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL u8 unused_0[3]; __le32 rx_ts_reg_off_lower; __le32 rx_ts_reg_off_upper; @@ -3177,10 +3195,12 @@ struct hwrm_port_phy_qcaps_output { __le16 seq_id; __le16 resp_len; u8 flags; - #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL - #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL - #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfcUL - #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 2 + #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL + #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL + #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL + #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xf0UL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 4 u8 port_cnt; #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL @@ -4980,6 +5000,15 @@ struct hwrm_vnic_rss_cfg_output { u8 valid; }; +/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */ +struct hwrm_vnic_rss_cfg_cmd_err { + u8 code; + #define VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL + #define VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY 0x1UL + #define VNIC_RSS_CFG_CMD_ERR_CODE_LAST VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY + u8 unused_0[7]; +}; + /* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */ struct hwrm_vnic_plcmodes_cfg_input { __le16 req_type; @@ -5807,7 +5836,7 @@ struct hwrm_cfa_encap_record_free_output { u8 valid; }; -/* hwrm_cfa_ntuple_filter_alloc_input (size:1088b/136B) */ +/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */ struct hwrm_cfa_ntuple_filter_alloc_input { __le16 req_type; __le16 cmpl_ring; @@ -5815,10 +5844,12 @@ struct hwrm_cfa_ntuple_filter_alloc_input { __le16 target_id; __le64 resp_addr; __le32 flags; - #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_ARP_REPLY 0x10UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX 0x20UL __le32 enables; #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL @@ -5887,8 +5918,6 @@ struct hwrm_cfa_ntuple_filter_alloc_input { __be16 dst_port; __be16 dst_port_mask; __le64 ntuple_filter_id_hint; - __le16 rfs_ring_tbl_idx; - u8 unused_0[6]; }; /* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */ @@ -5954,7 +5983,8 @@ struct hwrm_cfa_ntuple_filter_cfg_input { #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL __le32 flags; - #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL + #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL + #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL __le64 ntuple_filter_id; __le32 new_dst_id; __le32 new_mirror_vnic_id; @@ -6534,18 +6564,21 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output { __le16 seq_id; __le16 resp_len; __le32 flags; - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL u8 unused_0[3]; u8 valid; }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 174412a55e53..0cc6ec51f45f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -166,8 +166,8 @@ bnxt_fill_l2_rewrite_fields(struct bnxt_tc_actions *actions, actions->l2_rewrite_dmac[j] = cpu_to_be16(*(p + j)); } - if (!is_wildcard(ð_addr_mask[ETH_ALEN], ETH_ALEN)) { - if (!is_exactmatch(ð_addr_mask[ETH_ALEN], ETH_ALEN)) + if (!is_wildcard(ð_addr_mask[ETH_ALEN / 2], ETH_ALEN)) { + if (!is_exactmatch(ð_addr_mask[ETH_ALEN / 2], ETH_ALEN)) return -EINVAL; /* FW expects smac to be in u16 array format */ p = ð_addr[ETH_ALEN / 2]; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h index 286754903543..10c62b094914 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h @@ -64,9 +64,9 @@ struct bnxt_tc_tunnel_key { #define bnxt_eth_addr_key_mask_invalid(eth_addr, eth_addr_mask) \ ((is_wildcard(&(eth_addr)[0], ETH_ALEN) && \ - is_wildcard(&(eth_addr)[ETH_ALEN], ETH_ALEN)) || \ + is_wildcard(&(eth_addr)[ETH_ALEN / 2], ETH_ALEN)) || \ (is_wildcard(&(eth_addr_mask)[0], ETH_ALEN) && \ - is_wildcard(&(eth_addr_mask)[ETH_ALEN], ETH_ALEN))) + is_wildcard(&(eth_addr_mask)[ETH_ALEN / 2], ETH_ALEN))) struct bnxt_tc_actions { u32 flags; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 155599dcee76..61ab7d21f6bd 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -5208,6 +5208,8 @@ static void cnic_init_rings(struct cnic_dev *dev) cnic_init_bnx2x_tx_ring(dev, data); cnic_init_bnx2x_rx_ring(dev, data); + data->general.fp_hsi_ver = ETH_FP_HSI_VERSION; + l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff; l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 4f689fb7a61c..120fa05a39ff 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1996,8 +1996,6 @@ static void reset_umac(struct bcmgenet_priv *priv) /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */ bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD); - udelay(2); - bcmgenet_umac_writel(priv, 0, UMAC_CMD); } static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) @@ -2578,7 +2576,8 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) } /* Init rDma */ - bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); + bcmgenet_rdma_writel(priv, priv->dma_max_burst_length, + DMA_SCB_BURST_SIZE); /* Initialize Rx queues */ ret = bcmgenet_init_rx_queues(priv->dev); @@ -2591,7 +2590,8 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) } /* Init tDma */ - bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); + bcmgenet_tdma_writel(priv, priv->dma_max_burst_length, + DMA_SCB_BURST_SIZE); /* Initialize Tx queues */ bcmgenet_init_tx_queues(priv->dev); @@ -2614,8 +2614,10 @@ static void bcmgenet_irq_task(struct work_struct *work) spin_unlock_irq(&priv->lock); if (status & UMAC_IRQ_PHY_DET_R && - priv->dev->phydev->autoneg != AUTONEG_ENABLE) + priv->dev->phydev->autoneg != AUTONEG_ENABLE) { phy_init_hw(priv->dev->phydev); + genphy_config_aneg(priv->dev->phydev); + } /* Link UP/DOWN event */ if (status & UMAC_IRQ_LINK_EVENT) @@ -2879,12 +2881,6 @@ static int bcmgenet_open(struct net_device *dev) if (priv->internal_phy) bcmgenet_power_up(priv, GENET_POWER_PASSIVE); - ret = bcmgenet_mii_connect(dev); - if (ret) { - netdev_err(dev, "failed to connect to PHY\n"); - goto err_clk_disable; - } - /* take MAC out of reset */ bcmgenet_umac_reset(priv); @@ -2894,12 +2890,6 @@ static int bcmgenet_open(struct net_device *dev) reg = bcmgenet_umac_readl(priv, UMAC_CMD); priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); - ret = bcmgenet_mii_config(dev, true); - if (ret) { - netdev_err(dev, "unsupported PHY\n"); - goto err_disconnect_phy; - } - bcmgenet_set_hw_addr(priv, dev->dev_addr); if (priv->internal_phy) { @@ -2915,7 +2905,7 @@ static int bcmgenet_open(struct net_device *dev) ret = bcmgenet_init_dma(priv); if (ret) { netdev_err(dev, "failed to initialize DMA\n"); - goto err_disconnect_phy; + goto err_clk_disable; } /* Always enable ring 16 - descriptor ring */ @@ -2938,19 +2928,25 @@ static int bcmgenet_open(struct net_device *dev) goto err_irq0; } + ret = bcmgenet_mii_probe(dev); + if (ret) { + netdev_err(dev, "failed to connect to PHY\n"); + goto err_irq1; + } + bcmgenet_netif_start(dev); netif_tx_start_all_queues(dev); return 0; +err_irq1: + free_irq(priv->irq1, priv); err_irq0: free_irq(priv->irq0, priv); err_fini_dma: bcmgenet_dma_teardown(priv); bcmgenet_fini_dma(priv); -err_disconnect_phy: - phy_disconnect(dev->phydev); err_clk_disable: if (priv->internal_phy) bcmgenet_power_down(priv, GENET_POWER_PASSIVE); @@ -3426,12 +3422,48 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) params->words_per_bd); } +struct bcmgenet_plat_data { + enum bcmgenet_version version; + u32 dma_max_burst_length; +}; + +static const struct bcmgenet_plat_data v1_plat_data = { + .version = GENET_V1, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v2_plat_data = { + .version = GENET_V2, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v3_plat_data = { + .version = GENET_V3, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v4_plat_data = { + .version = GENET_V4, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data v5_plat_data = { + .version = GENET_V5, + .dma_max_burst_length = DMA_MAX_BURST_LENGTH, +}; + +static const struct bcmgenet_plat_data bcm2711_plat_data = { + .version = GENET_V5, + .dma_max_burst_length = 0x08, +}; + static const struct of_device_id bcmgenet_match[] = { - { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 }, - { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 }, - { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 }, - { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 }, - { .compatible = "brcm,genet-v5", .data = (void *)GENET_V5 }, + { .compatible = "brcm,genet-v1", .data = &v1_plat_data }, + { .compatible = "brcm,genet-v2", .data = &v2_plat_data }, + { .compatible = "brcm,genet-v3", .data = &v3_plat_data }, + { .compatible = "brcm,genet-v4", .data = &v4_plat_data }, + { .compatible = "brcm,genet-v5", .data = &v5_plat_data }, + { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data }, { }, }; MODULE_DEVICE_TABLE(of, bcmgenet_match); @@ -3441,6 +3473,7 @@ static int bcmgenet_probe(struct platform_device *pdev) struct bcmgenet_platform_data *pd = pdev->dev.platform_data; struct device_node *dn = pdev->dev.of_node; const struct of_device_id *of_id = NULL; + const struct bcmgenet_plat_data *pdata; struct bcmgenet_priv *priv; struct net_device *dev; const void *macaddr; @@ -3464,13 +3497,16 @@ static int bcmgenet_probe(struct platform_device *pdev) priv = netdev_priv(dev); priv->irq0 = platform_get_irq(pdev, 0); + if (priv->irq0 < 0) { + err = priv->irq0; + goto err; + } priv->irq1 = platform_get_irq(pdev, 1); - priv->wol_irq = platform_get_irq(pdev, 2); - if (!priv->irq0 || !priv->irq1) { - dev_err(&pdev->dev, "can't find IRQs\n"); - err = -EINVAL; + if (priv->irq1 < 0) { + err = priv->irq1; goto err; } + priv->wol_irq = platform_get_irq_optional(pdev, 2); if (dn) macaddr = of_get_mac_address(dn); @@ -3519,10 +3555,14 @@ static int bcmgenet_probe(struct platform_device *pdev) priv->dev = dev; priv->pdev = pdev; - if (of_id) - priv->version = (enum bcmgenet_version)of_id->data; - else + if (of_id) { + pdata = of_id->data; + priv->version = pdata->version; + priv->dma_max_burst_length = pdata->dma_max_burst_length; + } else { priv->version = pd->genet_version; + priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH; + } priv->clk = devm_clk_get(&priv->pdev->dev, "enet"); if (IS_ERR(priv->clk)) { @@ -3635,8 +3675,6 @@ static int bcmgenet_resume(struct device *d) if (priv->internal_phy) bcmgenet_power_up(priv, GENET_POWER_PASSIVE); - phy_init_hw(dev->phydev); - bcmgenet_umac_reset(priv); init_umac(priv); @@ -3645,7 +3683,10 @@ static int bcmgenet_resume(struct device *d) if (priv->wolopts) clk_disable_unprepare(priv->clk_wol); + phy_init_hw(dev->phydev); + /* Speed settings must be restored */ + genphy_config_aneg(dev->phydev); bcmgenet_mii_config(priv->dev, false); bcmgenet_set_hw_addr(priv, dev->dev_addr); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 7fbf573d8d52..a5659197598f 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -664,6 +664,7 @@ struct bcmgenet_priv { bool crc_fwd_en; unsigned int dma_rx_chk_bit; + u32 dma_max_burst_length; u32 msg_enable; @@ -720,8 +721,8 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF); /* MDIO routines */ int bcmgenet_mii_init(struct net_device *dev); -int bcmgenet_mii_connect(struct net_device *dev); int bcmgenet_mii_config(struct net_device *dev, bool init); +int bcmgenet_mii_probe(struct net_device *dev); void bcmgenet_mii_exit(struct net_device *dev); void bcmgenet_phy_power_set(struct net_device *dev, bool enable); void bcmgenet_mii_setup(struct net_device *dev); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 17bb8d60a157..6392a2530183 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -173,46 +173,6 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) bcmgenet_fixed_phy_link_update); } -int bcmgenet_mii_connect(struct net_device *dev) -{ - struct bcmgenet_priv *priv = netdev_priv(dev); - struct device_node *dn = priv->pdev->dev.of_node; - struct phy_device *phydev; - u32 phy_flags = 0; - int ret; - - /* Communicate the integrated PHY revision */ - if (priv->internal_phy) - phy_flags = priv->gphy_rev; - - /* Initialize link state variables that bcmgenet_mii_setup() uses */ - priv->old_link = -1; - priv->old_speed = -1; - priv->old_duplex = -1; - priv->old_pause = -1; - - if (dn) { - phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, - phy_flags, priv->phy_interface); - if (!phydev) { - pr_err("could not attach to PHY\n"); - return -ENODEV; - } - } else { - phydev = dev->phydev; - phydev->dev_flags = phy_flags; - - ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup, - priv->phy_interface); - if (ret) { - pr_err("could not attach to PHY\n"); - return -ENODEV; - } - } - - return 0; -} - int bcmgenet_mii_config(struct net_device *dev, bool init) { struct bcmgenet_priv *priv = netdev_priv(dev); @@ -221,13 +181,42 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) const char *phy_name = NULL; u32 id_mode_dis = 0; u32 port_ctrl; + int bmcr = -1; + int ret; u32 reg; - priv->ext_phy = !priv->internal_phy && - (priv->phy_interface != PHY_INTERFACE_MODE_MOCA); + /* MAC clocking workaround during reset of umac state machines */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (reg & CMD_SW_RESET) { + /* An MII PHY must be isolated to prevent TXC contention */ + if (priv->phy_interface == PHY_INTERFACE_MODE_MII) { + ret = phy_read(phydev, MII_BMCR); + if (ret >= 0) { + bmcr = ret; + ret = phy_write(phydev, MII_BMCR, + bmcr | BMCR_ISOLATE); + } + if (ret) { + netdev_err(dev, "failed to isolate PHY\n"); + return ret; + } + } + /* Switch MAC clocking to RGMII generated clock */ + bcmgenet_sys_writel(priv, PORT_MODE_EXT_GPHY, SYS_PORT_CTRL); + /* Ensure 5 clks with Rx disabled + * followed by 5 clks with Reset asserted + */ + udelay(4); + reg &= ~(CMD_SW_RESET | CMD_LCL_LOOP_EN); + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + /* Ensure 5 more clocks before Rx is enabled */ + udelay(2); + } switch (priv->phy_interface) { case PHY_INTERFACE_MODE_INTERNAL: + phy_name = "internal PHY"; + /* fall through */ case PHY_INTERFACE_MODE_MOCA: /* Irrespective of the actually configured PHY speed (100 or * 1000) GENETv4 only has an internal GPHY so we will just end @@ -239,11 +228,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) else port_ctrl = PORT_MODE_INT_EPHY; - bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); - - if (priv->internal_phy) { - phy_name = "internal PHY"; - } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + if (!phy_name) { phy_name = "MoCA"; bcmgenet_moca_phy_setup(priv); } @@ -252,8 +237,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) case PHY_INTERFACE_MODE_MII: phy_name = "external MII"; phy_set_max_speed(phydev, SPEED_100); - bcmgenet_sys_writel(priv, - PORT_MODE_EXT_EPHY, SYS_PORT_CTRL); + port_ctrl = PORT_MODE_EXT_EPHY; break; case PHY_INTERFACE_MODE_REVMII: @@ -268,31 +252,43 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) port_ctrl = PORT_MODE_EXT_RVMII_50; else port_ctrl = PORT_MODE_EXT_RVMII_25; - bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); break; case PHY_INTERFACE_MODE_RGMII: /* RGMII_NO_ID: TXC transitions at the same time as TXD * (requires PCB or receiver-side delay) - * RGMII: Add 2ns delay on TXC (90 degree shift) * * ID is implicitly disabled for 100Mbps (RG)MII operation. */ + phy_name = "external RGMII (no delay)"; id_mode_dis = BIT(16); - /* fall through */ + port_ctrl = PORT_MODE_EXT_GPHY; + break; + case PHY_INTERFACE_MODE_RGMII_TXID: - if (id_mode_dis) - phy_name = "external RGMII (no delay)"; - else - phy_name = "external RGMII (TX delay)"; - bcmgenet_sys_writel(priv, - PORT_MODE_EXT_GPHY, SYS_PORT_CTRL); + /* RGMII_TXID: Add 2ns delay on TXC (90 degree shift) */ + phy_name = "external RGMII (TX delay)"; + port_ctrl = PORT_MODE_EXT_GPHY; + break; + + case PHY_INTERFACE_MODE_RGMII_RXID: + phy_name = "external RGMII (RX delay)"; + port_ctrl = PORT_MODE_EXT_GPHY; break; default: dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface); return -EINVAL; } + bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); + + /* Restore the MII PHY after isolation */ + if (bmcr >= 0) + phy_write(phydev, MII_BMCR, bmcr); + + priv->ext_phy = !priv->internal_phy && + (priv->phy_interface != PHY_INTERFACE_MODE_MOCA); + /* This is an external PHY (xMII), so we need to enable the RGMII * block for the interface to work */ @@ -306,21 +302,71 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); } - if (init) { - linkmode_copy(phydev->advertising, phydev->supported); + if (init) + dev_info(kdev, "configuring instance for %s\n", phy_name); - /* The internal PHY has its link interrupts routed to the - * Ethernet MAC ISRs. On GENETv5 there is a hardware issue - * that prevents the signaling of link UP interrupts when - * the link operates at 10Mbps, so fallback to polling for - * those versions of GENET. - */ - if (priv->internal_phy && !GENET_IS_V5(priv)) - phydev->irq = PHY_IGNORE_INTERRUPT; + return 0; +} - dev_info(kdev, "configuring instance for %s\n", phy_name); +int bcmgenet_mii_probe(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device_node *dn = priv->pdev->dev.of_node; + struct phy_device *phydev; + u32 phy_flags = 0; + int ret; + + /* Communicate the integrated PHY revision */ + if (priv->internal_phy) + phy_flags = priv->gphy_rev; + + /* Initialize link state variables that bcmgenet_mii_setup() uses */ + priv->old_link = -1; + priv->old_speed = -1; + priv->old_duplex = -1; + priv->old_pause = -1; + + if (dn) { + phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, + phy_flags, priv->phy_interface); + if (!phydev) { + pr_err("could not attach to PHY\n"); + return -ENODEV; + } + } else { + phydev = dev->phydev; + phydev->dev_flags = phy_flags; + + ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup, + priv->phy_interface); + if (ret) { + pr_err("could not attach to PHY\n"); + return -ENODEV; + } } + /* Configure port multiplexer based on what the probed PHY device since + * reading the 'max-speed' property determines the maximum supported + * PHY speed which is needed for bcmgenet_mii_config() to configure + * things appropriately. + */ + ret = bcmgenet_mii_config(dev, true); + if (ret) { + phy_disconnect(dev->phydev); + return ret; + } + + linkmode_copy(phydev->advertising, phydev->supported); + + /* The internal PHY has its link interrupts routed to the + * Ethernet MAC ISRs. On GENETv5 there is a hardware issue + * that prevents the signaling of link UP interrupts when + * the link operates at 10Mbps, so fallback to polling for + * those versions of GENET. + */ + if (priv->internal_phy && !GENET_IS_V5(priv)) + dev->phydev->irq = PHY_IGNORE_INTERRUPT; + return 0; } @@ -436,7 +482,7 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) struct device_node *dn = priv->pdev->dev.of_node; struct device *kdev = &priv->pdev->dev; struct phy_device *phydev; - int phy_mode; + phy_interface_t phy_mode; int ret; /* Fetch the PHY phandle */ @@ -454,10 +500,10 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) } /* Get the link mode */ - phy_mode = of_get_phy_mode(dn); - if (phy_mode < 0) { + ret = of_get_phy_mode(dn, &phy_mode); + if (ret) { dev_err(kdev, "invalid PHY mode property\n"); - return phy_mode; + return ret; } priv->phy_interface = phy_mode; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 77f3511b97de..ca3aa1250dd1 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6280,6 +6280,10 @@ static int tg3_ptp_enable(struct ptp_clock_info *ptp, switch (rq->type) { case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + if (rq->perout.index != 0) return -EINVAL; diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index f4b3bd85dfe3..53b50c24d9c9 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig @@ -22,7 +22,7 @@ if NET_VENDOR_CADENCE config MACB tristate "Cadence MACB/GEM support" depends on HAS_DMA && COMMON_CLK - select PHYLIB + select PHYLINK ---help--- The Cadence MACB ethernet interface is found on many Atmel AT32 and AT91 parts. This driver also supports the Cadence GEM (Gigabit diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 03983bd46eef..19fe4f4867c7 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -7,7 +7,7 @@ #ifndef _MACB_H #define _MACB_H -#include <linux/phy.h> +#include <linux/phylink.h> #include <linux/ptp_clock_kernel.h> #include <linux/net_tstamp.h> #include <linux/interrupt.h> @@ -1185,15 +1185,14 @@ struct macb { struct macb_or_gem_ops macbgem_ops; struct mii_bus *mii_bus; - struct device_node *phy_node; - int link; - int speed; - int duplex; + struct phylink *phylink; + struct phylink_config phylink_config; u32 caps; unsigned int dma_burst_length; phy_interface_t phy_interface; + int speed; /* AT91RM9200 transmit */ struct sk_buff *skb; /* holds skb until xmit interrupt completes */ diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 1e1b774e1953..8fc2e21f0bb1 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -25,7 +25,7 @@ #include <linux/dma-mapping.h> #include <linux/platform_data/macb.h> #include <linux/platform_device.h> -#include <linux/phy.h> +#include <linux/phylink.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_gpio.h> @@ -388,6 +388,27 @@ mdio_pm_exit: return status; } +static void macb_init_buffers(struct macb *bp) +{ + struct macb_queue *queue; + unsigned int q; + + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { + queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + if (bp->hw_dma_cap & HW_DMA_CAP_64B) + queue_writel(queue, RBQPH, + upper_32_bits(queue->rx_ring_dma)); +#endif + queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + if (bp->hw_dma_cap & HW_DMA_CAP_64B) + queue_writel(queue, TBQPH, + upper_32_bits(queue->tx_ring_dma)); +#endif + } +} + /** * macb_set_tx_clk() - Set a clock to a new frequency * @clk Pointer to the clock to change @@ -432,114 +453,178 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) netdev_err(dev, "adjusting tx_clk failed.\n"); } -static void macb_handle_link_change(struct net_device *dev) +static void macb_validate(struct phylink_config *config, + unsigned long *supported, + struct phylink_link_state *state) { - struct macb *bp = netdev_priv(dev); - struct phy_device *phydev = dev->phydev; + struct net_device *ndev = to_net_dev(config->dev); + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + struct macb *bp = netdev_priv(ndev); + + /* We only support MII, RMII, GMII, RGMII & SGMII. */ + if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_RMII && + state->interface != PHY_INTERFACE_MODE_GMII && + state->interface != PHY_INTERFACE_MODE_SGMII && + !phy_interface_mode_is_rgmii(state->interface)) { + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + return; + } + + if (!macb_is_gem(bp) && + (state->interface == PHY_INTERFACE_MODE_GMII || + phy_interface_mode_is_rgmii(state->interface))) { + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + return; + } + + phylink_set_port_modes(mask); + phylink_set(mask, Autoneg); + phylink_set(mask, Asym_Pause); + + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + + if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE && + (state->interface == PHY_INTERFACE_MODE_NA || + state->interface == PHY_INTERFACE_MODE_GMII || + state->interface == PHY_INTERFACE_MODE_SGMII || + phy_interface_mode_is_rgmii(state->interface))) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + + if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF)) + phylink_set(mask, 1000baseT_Half); + } + + bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static int macb_mac_link_state(struct phylink_config *config, + struct phylink_link_state *state) +{ + return -EOPNOTSUPP; +} + +static void macb_mac_an_restart(struct phylink_config *config) +{ + /* Not supported */ +} + +static void macb_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct macb *bp = netdev_priv(ndev); unsigned long flags; - int status_change = 0; + u32 old_ctrl, ctrl; spin_lock_irqsave(&bp->lock, flags); - if (phydev->link) { - if ((bp->speed != phydev->speed) || - (bp->duplex != phydev->duplex)) { - u32 reg; + old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR); - reg = macb_readl(bp, NCFGR); - reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); - if (macb_is_gem(bp)) - reg &= ~GEM_BIT(GBE); + /* Clear all the bits we might set later */ + ctrl &= ~(GEM_BIT(GBE) | MACB_BIT(SPD) | MACB_BIT(FD) | MACB_BIT(PAE) | + GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL)); - if (phydev->duplex) - reg |= MACB_BIT(FD); - if (phydev->speed == SPEED_100) - reg |= MACB_BIT(SPD); - if (phydev->speed == SPEED_1000 && - bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) - reg |= GEM_BIT(GBE); + if (state->speed == SPEED_1000) + ctrl |= GEM_BIT(GBE); + else if (state->speed == SPEED_100) + ctrl |= MACB_BIT(SPD); - macb_or_gem_writel(bp, NCFGR, reg); + if (state->duplex) + ctrl |= MACB_BIT(FD); - bp->speed = phydev->speed; - bp->duplex = phydev->duplex; - status_change = 1; - } - } + /* We do not support MLO_PAUSE_RX yet */ + if (state->pause & MLO_PAUSE_TX) + ctrl |= MACB_BIT(PAE); - if (phydev->link != bp->link) { - if (!phydev->link) { - bp->speed = 0; - bp->duplex = -1; - } - bp->link = phydev->link; + if (state->interface == PHY_INTERFACE_MODE_SGMII) + ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); - status_change = 1; - } + /* Apply the new configuration, if any */ + if (old_ctrl ^ ctrl) + macb_or_gem_writel(bp, NCFGR, ctrl); + + bp->speed = state->speed; spin_unlock_irqrestore(&bp->lock, flags); +} - if (status_change) { - if (phydev->link) { - /* Update the TX clock rate if and only if the link is - * up and there has been a link change. - */ - macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); +static void macb_mac_link_down(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct macb *bp = netdev_priv(ndev); + struct macb_queue *queue; + unsigned int q; + u32 ctrl; - netif_carrier_on(dev); - netdev_info(dev, "link up (%d/%s)\n", - phydev->speed, - phydev->duplex == DUPLEX_FULL ? - "Full" : "Half"); - } else { - netif_carrier_off(dev); - netdev_info(dev, "link down\n"); - } - } + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) + queue_writel(queue, IDR, + bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); + + /* Disable Rx and Tx */ + ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE)); + macb_writel(bp, NCR, ctrl); + + netif_tx_stop_all_queues(ndev); } -/* based on au1000_eth. c*/ -static int macb_mii_probe(struct net_device *dev) +static void macb_mac_link_up(struct phylink_config *config, unsigned int mode, + phy_interface_t interface, struct phy_device *phy) { - struct macb *bp = netdev_priv(dev); - struct phy_device *phydev; - struct device_node *np; - int ret, i; + struct net_device *ndev = to_net_dev(config->dev); + struct macb *bp = netdev_priv(ndev); + struct macb_queue *queue; + unsigned int q; - np = bp->pdev->dev.of_node; - ret = 0; + macb_set_tx_clk(bp->tx_clk, bp->speed, ndev); - if (np) { - if (of_phy_is_fixed_link(np)) { - bp->phy_node = of_node_get(np); - } else { - bp->phy_node = of_parse_phandle(np, "phy-handle", 0); - /* fallback to standard phy registration if no - * phy-handle was found nor any phy found during - * dt phy registration - */ - if (!bp->phy_node && !phy_find_first(bp->mii_bus)) { - for (i = 0; i < PHY_MAX_ADDR; i++) { - phydev = mdiobus_scan(bp->mii_bus, i); - if (IS_ERR(phydev) && - PTR_ERR(phydev) != -ENODEV) { - ret = PTR_ERR(phydev); - break; - } - } + /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down + * cleared the pipeline and control registers. + */ + bp->macbgem_ops.mog_init_rings(bp); + macb_init_buffers(bp); - if (ret) - return -ENODEV; - } - } - } + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) + queue_writel(queue, IER, + bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); + + /* Enable Rx and Tx */ + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); + + netif_tx_wake_all_queues(ndev); +} + +static const struct phylink_mac_ops macb_phylink_ops = { + .validate = macb_validate, + .mac_link_state = macb_mac_link_state, + .mac_an_restart = macb_mac_an_restart, + .mac_config = macb_mac_config, + .mac_link_down = macb_mac_link_down, + .mac_link_up = macb_mac_link_up, +}; - if (bp->phy_node) { - phydev = of_phy_connect(dev, bp->phy_node, - &macb_handle_link_change, 0, - bp->phy_interface); - if (!phydev) - return -ENODEV; +static int macb_phylink_connect(struct macb *bp) +{ + struct net_device *dev = bp->dev; + struct phy_device *phydev; + int ret; + + if (bp->pdev->dev.of_node && + of_parse_phandle(bp->pdev->dev.of_node, "phy-handle", 0)) { + ret = phylink_of_phy_connect(bp->phylink, bp->pdev->dev.of_node, + 0); + if (ret) { + netdev_err(dev, "Could not attach PHY (%d)\n", ret); + return ret; + } } else { phydev = phy_find_first(bp->mii_bus); if (!phydev) { @@ -548,27 +633,33 @@ static int macb_mii_probe(struct net_device *dev) } /* attach the mac to the phy */ - ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, - bp->phy_interface); + ret = phylink_connect_phy(bp->phylink, phydev); if (ret) { - netdev_err(dev, "Could not attach to PHY\n"); + netdev_err(dev, "Could not attach to PHY (%d)\n", ret); return ret; } } - /* mask with MAC supported features */ - if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) - phy_set_max_speed(phydev, SPEED_1000); - else - phy_set_max_speed(phydev, SPEED_100); + phylink_start(bp->phylink); - if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF) - phy_remove_link_mode(phydev, - ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + return 0; +} - bp->link = 0; - bp->speed = 0; - bp->duplex = -1; +/* based on au1000_eth. c*/ +static int macb_mii_probe(struct net_device *dev) +{ + struct macb *bp = netdev_priv(dev); + + bp->phylink_config.dev = &dev->dev; + bp->phylink_config.type = PHYLINK_NETDEV; + + bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode, + bp->phy_interface, &macb_phylink_ops); + if (IS_ERR(bp->phylink)) { + netdev_err(dev, "Could not create a phylink instance (%ld)\n", + PTR_ERR(bp->phylink)); + return PTR_ERR(bp->phylink); + } return 0; } @@ -598,20 +689,10 @@ static int macb_mii_init(struct macb *bp) dev_set_drvdata(&bp->dev->dev, bp->mii_bus); np = bp->pdev->dev.of_node; - if (np && of_phy_is_fixed_link(np)) { - if (of_phy_register_fixed_link(np) < 0) { - dev_err(&bp->pdev->dev, - "broken fixed-link specification %pOF\n", np); - goto err_out_free_mdiobus; - } - - err = mdiobus_register(bp->mii_bus); - } else { - err = of_mdiobus_register(bp->mii_bus, np); - } + err = of_mdiobus_register(bp->mii_bus, np); if (err) - goto err_out_free_fixed_link; + goto err_out_free_mdiobus; err = macb_mii_probe(bp->dev); if (err) @@ -621,11 +702,7 @@ static int macb_mii_init(struct macb *bp) err_out_unregister_bus: mdiobus_unregister(bp->mii_bus); -err_out_free_fixed_link: - if (np && of_phy_is_fixed_link(np)) - of_phy_deregister_fixed_link(np); err_out_free_mdiobus: - of_node_put(bp->phy_node); mdiobus_free(bp->mii_bus); err_out: return err; @@ -1314,26 +1391,14 @@ static void macb_hresp_error_task(unsigned long data) bp->macbgem_ops.mog_init_rings(bp); /* Initialize TX and RX buffers */ - for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { - queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - if (bp->hw_dma_cap & HW_DMA_CAP_64B) - queue_writel(queue, RBQPH, - upper_32_bits(queue->rx_ring_dma)); -#endif - queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - if (bp->hw_dma_cap & HW_DMA_CAP_64B) - queue_writel(queue, TBQPH, - upper_32_bits(queue->tx_ring_dma)); -#endif + macb_init_buffers(bp); - /* Enable interrupts */ + /* Enable interrupts */ + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) queue_writel(queue, IER, bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); - } ctrl |= MACB_BIT(RE) | MACB_BIT(TE); macb_writel(bp, NCR, ctrl); @@ -2221,19 +2286,13 @@ static void macb_configure_dma(struct macb *bp) static void macb_init_hw(struct macb *bp) { - struct macb_queue *queue; - unsigned int q; - u32 config; macb_reset_hw(bp); macb_set_hwaddr(bp); config = macb_mdc_clk_div(bp); - if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) - config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ - config |= MACB_BIT(PAE); /* PAuse Enable */ config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ if (bp->caps & MACB_CAPS_JUMBO) config |= MACB_BIT(JFRAME); /* Enable jumbo frames */ @@ -2249,36 +2308,11 @@ static void macb_init_hw(struct macb *bp) macb_writel(bp, NCFGR, config); if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) gem_writel(bp, JML, bp->jumbo_max_len); - bp->speed = SPEED_10; - bp->duplex = DUPLEX_HALF; bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; if (bp->caps & MACB_CAPS_JUMBO) bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; macb_configure_dma(bp); - - /* Initialize TX and RX buffers */ - for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { - queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - if (bp->hw_dma_cap & HW_DMA_CAP_64B) - queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma)); -#endif - queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - if (bp->hw_dma_cap & HW_DMA_CAP_64B) - queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); -#endif - - /* Enable interrupts */ - queue_writel(queue, IER, - bp->rx_intr_mask | - MACB_TX_INT_FLAGS | - MACB_BIT(HRESP)); - } - - /* Enable TX and RX */ - macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); } /* The hash address register is 64 bits long and takes up two @@ -2402,8 +2436,8 @@ static void macb_set_rx_mode(struct net_device *dev) static int macb_open(struct net_device *dev) { - struct macb *bp = netdev_priv(dev); size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; + struct macb *bp = netdev_priv(dev); struct macb_queue *queue; unsigned int q; int err; @@ -2414,15 +2448,6 @@ static int macb_open(struct net_device *dev) if (err < 0) goto pm_exit; - /* carrier starts down */ - netif_carrier_off(dev); - - /* if the phy is not yet register, retry later*/ - if (!dev->phydev) { - err = -EAGAIN; - goto pm_exit; - } - /* RX buffers initialization */ macb_init_rx_buffer_size(bp, bufsz); @@ -2436,11 +2461,11 @@ static int macb_open(struct net_device *dev) for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) napi_enable(&queue->napi); - bp->macbgem_ops.mog_init_rings(bp); macb_init_hw(bp); - /* schedule a link state check */ - phy_start(dev->phydev); + err = macb_phylink_connect(bp); + if (err) + goto pm_exit; netif_tx_start_all_queues(dev); @@ -2467,8 +2492,8 @@ static int macb_close(struct net_device *dev) for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) napi_disable(&queue->napi); - if (dev->phydev) - phy_stop(dev->phydev); + phylink_stop(bp->phylink); + phylink_disconnect_phy(bp->phylink); spin_lock_irqsave(&bp->lock, flags); macb_reset_hw(bp); @@ -2702,17 +2727,18 @@ static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) wol->supported = 0; wol->wolopts = 0; - if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { - wol->supported = WAKE_MAGIC; - - if (bp->wol & MACB_WOL_ENABLED) - wol->wolopts |= WAKE_MAGIC; - } + if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) + phylink_ethtool_get_wol(bp->phylink, wol); } static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct macb *bp = netdev_priv(netdev); + int ret; + + ret = phylink_ethtool_set_wol(bp->phylink, wol); + if (!ret) + return 0; if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || (wol->wolopts & ~WAKE_MAGIC)) @@ -2728,6 +2754,22 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return 0; } +static int macb_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *kset) +{ + struct macb *bp = netdev_priv(netdev); + + return phylink_ethtool_ksettings_get(bp->phylink, kset); +} + +static int macb_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *kset) +{ + struct macb *bp = netdev_priv(netdev); + + return phylink_ethtool_ksettings_set(bp->phylink, kset); +} + static void macb_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { @@ -3164,8 +3206,8 @@ static const struct ethtool_ops macb_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, .get_wol = macb_get_wol, .set_wol = macb_set_wol, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_link_ksettings = macb_get_link_ksettings, + .set_link_ksettings = macb_set_link_ksettings, .get_ringparam = macb_get_ringparam, .set_ringparam = macb_set_ringparam, }; @@ -3178,8 +3220,8 @@ static const struct ethtool_ops gem_ethtool_ops = { .get_ethtool_stats = gem_get_ethtool_stats, .get_strings = gem_get_ethtool_strings, .get_sset_count = gem_get_sset_count, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_link_ksettings = macb_get_link_ksettings, + .set_link_ksettings = macb_set_link_ksettings, .get_ringparam = macb_get_ringparam, .set_ringparam = macb_set_ringparam, .get_rxnfc = gem_get_rxnfc, @@ -3188,26 +3230,21 @@ static const struct ethtool_ops gem_ethtool_ops = { static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct phy_device *phydev = dev->phydev; struct macb *bp = netdev_priv(dev); if (!netif_running(dev)) return -EINVAL; - if (!phydev) - return -ENODEV; - - if (!bp->ptp_info) - return phy_mii_ioctl(phydev, rq, cmd); - - switch (cmd) { - case SIOCSHWTSTAMP: - return bp->ptp_info->set_hwtst(dev, rq, cmd); - case SIOCGHWTSTAMP: - return bp->ptp_info->get_hwtst(dev, rq); - default: - return phy_mii_ioctl(phydev, rq, cmd); + if (bp->ptp_info) { + switch (cmd) { + case SIOCSHWTSTAMP: + return bp->ptp_info->set_hwtst(dev, rq, cmd); + case SIOCGHWTSTAMP: + return bp->ptp_info->get_hwtst(dev, rq); + } } + + return phylink_mii_ioctl(bp->phylink, rq, cmd); } static inline void macb_set_txcsum_feature(struct macb *bp, @@ -3330,7 +3367,8 @@ static void macb_configure_caps(struct macb *bp, #ifdef CONFIG_MACB_USE_HWSTAMP if (gem_has_ptp(bp)) { if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5))) - pr_err("GEM doesn't support hardware ptp.\n"); + dev_err(&bp->pdev->dev, + "GEM doesn't support hardware ptp.\n"); else { bp->hw_dma_cap |= HW_DMA_CAP_PTP; bp->ptp_info = &gem_ptp_info; @@ -3707,8 +3745,9 @@ static int at91ether_open(struct net_device *dev) MACB_BIT(ISR_ROVR) | MACB_BIT(HRESP)); - /* schedule a link state check */ - phy_start(dev->phydev); + ret = macb_phylink_connect(lp); + if (ret) + return ret; netif_start_queue(dev); @@ -3737,6 +3776,9 @@ static int at91ether_close(struct net_device *dev) netif_stop_queue(dev); + phylink_stop(lp->phylink); + phylink_disconnect_phy(lp->phylink); + dma_free_coherent(&lp->pdev->dev, AT91ETHER_MAX_RX_DESCR * macb_dma_desc_get_size(lp), @@ -4181,7 +4223,7 @@ static int macb_probe(struct platform_device *pdev) struct clk *tsu_clk = NULL; unsigned int queue_mask, num_queues; bool native_io; - struct phy_device *phydev; + phy_interface_t interface; struct net_device *dev; struct resource *regs; void __iomem *mem; @@ -4308,12 +4350,14 @@ static int macb_probe(struct platform_device *pdev) macb_get_hwaddr(bp); } - err = of_get_phy_mode(np); - if (err < 0) + err = of_get_phy_mode(np, &interface); + if (err) /* not found in DT, MII by default */ bp->phy_interface = PHY_INTERFACE_MODE_MII; else - bp->phy_interface = err; + bp->phy_interface = interface; + + bp->speed = SPEED_UNKNOWN; /* IP specific init */ err = init(pdev); @@ -4324,8 +4368,6 @@ static int macb_probe(struct platform_device *pdev) if (err) goto err_out_free_netdev; - phydev = dev->phydev; - netif_carrier_off(dev); err = register_netdev(dev); @@ -4337,8 +4379,6 @@ static int macb_probe(struct platform_device *pdev) tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task, (unsigned long)bp); - phy_attached_info(phydev); - netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), dev->base_addr, dev->irq, dev->dev_addr); @@ -4349,11 +4389,7 @@ static int macb_probe(struct platform_device *pdev) return 0; err_out_unregister_mdio: - phy_disconnect(dev->phydev); mdiobus_unregister(bp->mii_bus); - of_node_put(bp->phy_node); - if (np && of_phy_is_fixed_link(np)) - of_phy_deregister_fixed_link(np); mdiobus_free(bp->mii_bus); err_out_free_netdev: @@ -4377,18 +4413,12 @@ static int macb_remove(struct platform_device *pdev) { struct net_device *dev; struct macb *bp; - struct device_node *np = pdev->dev.of_node; dev = platform_get_drvdata(pdev); if (dev) { bp = netdev_priv(dev); - if (dev->phydev) - phy_disconnect(dev->phydev); mdiobus_unregister(bp->mii_bus); - if (np && of_phy_is_fixed_link(np)) - of_phy_deregister_fixed_link(np); - dev->phydev = NULL; mdiobus_free(bp->mii_bus); unregister_netdev(dev); @@ -4403,7 +4433,7 @@ static int macb_remove(struct platform_device *pdev) clk_disable_unprepare(bp->tsu_clk); pm_runtime_set_suspended(&pdev->dev); } - of_node_put(bp->phy_node); + phylink_destroy(bp->phylink); free_netdev(dev); } @@ -4421,7 +4451,6 @@ static int __maybe_unused macb_suspend(struct device *dev) if (!netif_running(netdev)) return 0; - if (bp->wol & MACB_WOL_ENABLED) { macb_writel(bp, IER, MACB_BIT(WOL)); macb_writel(bp, WOL, MACB_BIT(MAG)); @@ -4432,8 +4461,9 @@ static int __maybe_unused macb_suspend(struct device *dev) for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) napi_disable(&queue->napi); - phy_stop(netdev->phydev); - phy_suspend(netdev->phydev); + rtnl_lock(); + phylink_stop(bp->phylink); + rtnl_unlock(); spin_lock_irqsave(&bp->lock, flags); macb_reset_hw(bp); spin_unlock_irqrestore(&bp->lock, flags); @@ -4481,12 +4511,11 @@ static int __maybe_unused macb_resume(struct device *dev) for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) napi_enable(&queue->napi); - phy_resume(netdev->phydev); - phy_init_hw(netdev->phydev); - phy_start(netdev->phydev); + rtnl_lock(); + phylink_start(bp->phylink); + rtnl_unlock(); } - bp->macbgem_ops.mog_init_rings(bp); macb_init_hw(bp); macb_set_rx_mode(netdev); macb_restore_features(bp); diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 0e5de88fd6e8..cdd7e5da4a74 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -1499,7 +1499,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev) netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM; - netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM; + netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN; mac = of_get_mac_address(pdev->dev.of_node); diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index 20390f6afbb4..a4b4d475abf8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -8,7 +8,8 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \ cxgb4_uld.o srq.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \ cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o cxgb4_mps.o \ - cudbg_common.o cudbg_lib.o cudbg_zlib.o + cudbg_common.o cudbg_lib.o cudbg_zlib.o cxgb4_tc_mqprio.o \ + cxgb4_tc_matchall.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index 69746696a929..f5be3ee1bdb4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -325,6 +325,9 @@ enum cudbg_qdesc_qtype { CUDBG_QTYPE_CRYPTO_FLQ, CUDBG_QTYPE_TLS_RXQ, CUDBG_QTYPE_TLS_FLQ, + CUDBG_QTYPE_ETHOFLD_TXQ, + CUDBG_QTYPE_ETHOFLD_RXQ, + CUDBG_QTYPE_ETHOFLD_FLQ, CUDBG_QTYPE_MAX, }; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index c2e92786608b..19c11568113a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -4,6 +4,7 @@ */ #include <linux/sort.h> +#include <linux/string.h> #include "t4_regs.h" #include "cxgb4.h" @@ -776,24 +777,18 @@ static int cudbg_get_mem_region(struct adapter *padap, struct cudbg_mem_desc *mem_desc) { u8 mc, found = 0; - u32 i, idx = 0; - int rc; + u32 idx = 0; + int rc, i; rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc); if (rc) return rc; - for (i = 0; i < ARRAY_SIZE(cudbg_region); i++) { - if (!strcmp(cudbg_region[i], region_name)) { - found = 1; - idx = i; - break; - } - } - if (!found) + i = match_string(cudbg_region, ARRAY_SIZE(cudbg_region), region_name); + if (i < 0) return -EINVAL; - found = 0; + idx = i; for (i = 0; i < meminfo->mem_c; i++) { if (meminfo->mem[i].idx >= ARRAY_SIZE(cudbg_region)) continue; /* Skip holes */ @@ -2930,6 +2925,10 @@ void cudbg_fill_qdesc_num_and_size(const struct adapter *padap, tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE * MAX_RXQ_DESC_SIZE; + /* ETHOFLD TXQ, RXQ, and FLQ */ + tot_entries += MAX_OFLD_QSETS * 3; + tot_size += MAX_OFLD_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE; + tot_size += sizeof(struct cudbg_ver_hdr) + sizeof(struct cudbg_qdesc_info) + sizeof(struct cudbg_qdesc_entry) * tot_entries; @@ -3087,6 +3086,23 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, } } + /* ETHOFLD TXQ */ + if (s->eohw_txq) + for (i = 0; i < s->eoqsets; i++) + QDESC_GET_TXQ(&s->eohw_txq[i].q, + CUDBG_QTYPE_ETHOFLD_TXQ, out); + + /* ETHOFLD RXQ and FLQ */ + if (s->eohw_rxq) { + for (i = 0; i < s->eoqsets; i++) + QDESC_GET_RXQ(&s->eohw_rxq[i].rspq, + CUDBG_QTYPE_ETHOFLD_RXQ, out); + + for (i = 0; i < s->eoqsets; i++) + QDESC_GET_FLQ(&s->eohw_rxq[i].fl, + CUDBG_QTYPE_ETHOFLD_FLQ, out); + } + out_unlock: mutex_unlock(&uld_mutex); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 1fbb640e896a..3121ed83d8e2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -392,6 +392,7 @@ struct adapter_params { struct arch_specific_params arch; /* chip specific params */ unsigned char offload; unsigned char crypto; /* HW capability for crypto */ + unsigned char ethofld; /* QoS support */ unsigned char bypass; unsigned char hash_filter; @@ -602,6 +603,8 @@ struct port_info { u8 vivld; u8 smt_idx; u8 rx_cchan; + + bool tc_block_shared; }; struct dentry; @@ -711,6 +714,7 @@ struct sge_eth_rxq { /* SW Ethernet Rx queue */ struct sge_rspq rspq; struct sge_fl fl; struct sge_eth_stats stats; + struct msix_info *msix; } ____cacheline_aligned_in_smp; struct sge_ofld_stats { /* offload queue statistics */ @@ -724,6 +728,7 @@ struct sge_ofld_rxq { /* SW offload Rx queue */ struct sge_rspq rspq; struct sge_fl fl; struct sge_ofld_stats stats; + struct msix_info *msix; } ____cacheline_aligned_in_smp; struct tx_desc { @@ -788,7 +793,6 @@ struct sge_ctrl_txq { /* state for an SGE control Tx queue */ struct sge_uld_rxq_info { char name[IFNAMSIZ]; /* name of ULD driver */ struct sge_ofld_rxq *uldrxq; /* Rxq's for ULD */ - u16 *msix_tbl; /* msix_tbl for uld */ u16 *rspq_id; /* response queue id's of rxq */ u16 nrxq; /* # of ingress uld queues */ u16 nciq; /* # of completion queues */ @@ -801,6 +805,55 @@ struct sge_uld_txq_info { u16 ntxq; /* # of egress uld queues */ }; +enum sge_eosw_state { + CXGB4_EO_STATE_CLOSED = 0, /* Not ready to accept traffic */ + CXGB4_EO_STATE_FLOWC_OPEN_SEND, /* Send FLOWC open request */ + CXGB4_EO_STATE_FLOWC_OPEN_REPLY, /* Waiting for FLOWC open reply */ + CXGB4_EO_STATE_ACTIVE, /* Ready to accept traffic */ + CXGB4_EO_STATE_FLOWC_CLOSE_SEND, /* Send FLOWC close request */ + CXGB4_EO_STATE_FLOWC_CLOSE_REPLY, /* Waiting for FLOWC close reply */ +}; + +struct sge_eosw_desc { + struct sk_buff *skb; /* SKB to free after getting completion */ + dma_addr_t addr[MAX_SKB_FRAGS + 1]; /* DMA mapped addresses */ +}; + +struct sge_eosw_txq { + spinlock_t lock; /* Per queue lock to synchronize completions */ + enum sge_eosw_state state; /* Current ETHOFLD State */ + struct sge_eosw_desc *desc; /* Descriptor ring to hold packets */ + u32 ndesc; /* Number of descriptors */ + u32 pidx; /* Current Producer Index */ + u32 last_pidx; /* Last successfully transmitted Producer Index */ + u32 cidx; /* Current Consumer Index */ + u32 last_cidx; /* Last successfully reclaimed Consumer Index */ + u32 flowc_idx; /* Descriptor containing a FLOWC request */ + u32 inuse; /* Number of packets held in ring */ + + u32 cred; /* Current available credits */ + u32 ncompl; /* # of completions posted */ + u32 last_compl; /* # of credits consumed since last completion req */ + + u32 eotid; /* Index into EOTID table in software */ + u32 hwtid; /* Hardware EOTID index */ + + u32 hwqid; /* Underlying hardware queue index */ + struct net_device *netdev; /* Pointer to netdevice */ + struct tasklet_struct qresume_tsk; /* Restarts the queue */ + struct completion completion; /* completion for FLOWC rendezvous */ +}; + +struct sge_eohw_txq { + spinlock_t lock; /* Per queue lock */ + struct sge_txq q; /* HW Txq */ + struct adapter *adap; /* Backpointer to adapter */ + unsigned long tso; /* # of TSO requests */ + unsigned long tx_cso; /* # of Tx checksum offloads */ + unsigned long vlan_ins; /* # of Tx VLAN insertions */ + unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ +}; + struct sge { struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; struct sge_eth_txq ptptxq; @@ -814,11 +867,16 @@ struct sge { struct sge_rspq intrq ____cacheline_aligned_in_smp; spinlock_t intrq_lock; + struct sge_eohw_txq *eohw_txq; + struct sge_ofld_rxq *eohw_rxq; + u16 max_ethqsets; /* # of available Ethernet queue sets */ u16 ethqsets; /* # of active Ethernet queue sets */ u16 ethtxq_rover; /* Tx queue to clean up next */ u16 ofldqsets; /* # of active ofld queue sets */ u16 nqs_per_uld; /* # of Rx queues per ULD */ + u16 eoqsets; /* # of ETHOFLD queues */ + u16 timer_val[SGE_NTIMERS]; u8 counter_val[SGE_NCOUNTERS]; u16 dbqtimer_tick; @@ -841,6 +899,9 @@ struct sge { unsigned long *blocked_fl; struct timer_list rx_timer; /* refills starving FLs */ struct timer_list tx_timer; /* checks Tx queues */ + + int fwevtq_msix_idx; /* Index to firmware event queue MSI-X info */ + int nd_msix_idx; /* Index to non-data interrupts MSI-X info */ }; #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) @@ -870,13 +931,13 @@ struct hash_mac_addr { unsigned int iface_mac; }; -struct uld_msix_bmap { +struct msix_bmap { unsigned long *msix_bmap; unsigned int mapsize; spinlock_t lock; /* lock for acquiring bitmap */ }; -struct uld_msix_info { +struct msix_info { unsigned short vec; char desc[IFNAMSIZ + 10]; unsigned int idx; @@ -945,14 +1006,9 @@ struct adapter { struct cxgb4_virt_res vres; unsigned int swintr; - struct msix_info { - unsigned short vec; - char desc[IFNAMSIZ + 10]; - cpumask_var_t aff_mask; - } msix_info[MAX_INGQ + 1]; - struct uld_msix_info *msix_info_ulds; /* msix info for uld's */ - struct uld_msix_bmap msix_bmap_ulds; /* msix bitmap for all uld */ - int msi_idx; + /* MSI-X Info for NIC and OFLD queues */ + struct msix_info *msix_info; + struct msix_bmap msix_bmap; struct doorbell_stats db_stats; struct sge sge; @@ -1044,6 +1100,12 @@ struct adapter { #if IS_ENABLED(CONFIG_THERMAL) struct ch_thermal ch_thermal; #endif + + /* TC MQPRIO offload */ + struct cxgb4_tc_mqprio *tc_mqprio; + + /* TC MATCHALL classifier offload */ + struct cxgb4_tc_matchall *tc_matchall; }; /* Support for "sched-class" command to allow a TX Scheduling Class to be @@ -1073,10 +1135,12 @@ enum { enum { SCHED_CLASS_LEVEL_CL_RL = 0, /* class rate limiter */ + SCHED_CLASS_LEVEL_CH_RL = 2, /* channel rate limiter */ }; enum { SCHED_CLASS_MODE_CLASS = 0, /* per-class scheduling */ + SCHED_CLASS_MODE_FLOW, /* per-flow scheduling */ }; enum { @@ -1100,6 +1164,14 @@ struct ch_sched_queue { s8 class; /* class index */ }; +/* Support for "sched_flowc" command to allow one or more FLOWC + * to be bound to a TX Scheduling Class. + */ +struct ch_sched_flowc { + s32 tid; /* TID to bind */ + s8 class; /* class index */ +}; + /* Defined bit width of user definable filter tuples */ #define ETHTYPE_BITWIDTH 16 @@ -1214,8 +1286,11 @@ struct ch_filter_specification { u16 nat_lport; /* local port to use after NAT'ing */ u16 nat_fport; /* foreign port to use after NAT'ing */ + u32 tc_prio; /* TC's filter priority index */ + u64 tc_cookie; /* Unique cookie identifying TC rules */ + /* reservation for future additions */ - u8 rsvd[24]; + u8 rsvd[12]; /* Filter rule value/mask pairs. */ @@ -1293,6 +1368,11 @@ static inline int is_uld(const struct adapter *adap) return (adap->params.offload || adap->params.crypto); } +static inline int is_ethofld(const struct adapter *adap) +{ + return adap->params.ethofld; +} + static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) { return readl(adap->regs + reg_addr); @@ -1426,6 +1506,9 @@ int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid, int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, struct net_device *dev, unsigned int iqid, unsigned int uld_type); +int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq, + struct net_device *dev, u32 iqid); +void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq); irqreturn_t t4_sge_intr_msix(int irq, void *cookie); int t4_sge_init(struct adapter *adap); void t4_sge_start(struct adapter *adap); @@ -1890,6 +1973,12 @@ int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port, void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl); void free_tx_desc(struct adapter *adap, struct sge_txq *q, unsigned int n, bool unmap); +void cxgb4_eosw_txq_free_desc(struct adapter *adap, struct sge_eosw_txq *txq, + u32 ndesc); +int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc); +void cxgb4_ethofld_restart(unsigned long data); +int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *si); void free_txq(struct adapter *adap, struct sge_txq *q); void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, bool unmap); @@ -1948,5 +2037,10 @@ int cxgb4_alloc_raw_mac_filt(struct adapter *adap, int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid, int *tcam_idx, const u8 *addr, bool persistent, u8 *smt_idx); - +int cxgb4_get_msix_idx_from_bmap(struct adapter *adap); +void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx); +int cxgb_open(struct net_device *dev); +int cxgb_close(struct net_device *dev); +void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q); +void cxgb4_quiesce_rx(struct sge_rspq *q); #endif /* __CXGB4_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index ae6a47dd7dc9..a13b03f771cc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2658,6 +2658,7 @@ static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld) static int sge_qinfo_show(struct seq_file *seq, void *v) { + int eth_entries, ctrl_entries, eo_entries = 0; int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 }; int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 }; int uld_txq_entries[CXGB4_TX_MAX] = { 0 }; @@ -2665,11 +2666,12 @@ static int sge_qinfo_show(struct seq_file *seq, void *v) const struct sge_uld_rxq_info *urxq_info; struct adapter *adap = seq->private; int i, n, r = (uintptr_t)v - 1; - int eth_entries, ctrl_entries; struct sge *s = &adap->sge; eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4); ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4); + if (adap->sge.eohw_txq) + eo_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4); mutex_lock(&uld_mutex); if (s->uld_txq_info) @@ -2761,6 +2763,54 @@ do { \ } r -= eth_entries; + if (r < eo_entries) { + int base_qset = r * 4; + const struct sge_ofld_rxq *rx = &s->eohw_rxq[base_qset]; + const struct sge_eohw_txq *tx = &s->eohw_txq[base_qset]; + + n = min(4, s->eoqsets - 4 * r); + + S("QType:", "ETHOFLD"); + S("Interface:", + rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); + T("TxQ ID:", q.cntxt_id); + T("TxQ size:", q.size); + T("TxQ inuse:", q.in_use); + T("TxQ CIDX:", q.cidx); + T("TxQ PIDX:", q.pidx); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + R("RspQ CIDX:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); + S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]); + R("FL ID:", fl.cntxt_id); + S3("u", "FL size:", rx->fl.size ? rx->fl.size - 8 : 0); + R("FL pend:", fl.pend_cred); + R("FL avail:", fl.avail); + R("FL PIDX:", fl.pidx); + R("FL CIDX:", fl.cidx); + RL("RxPackets:", stats.pkts); + RL("RxImm:", stats.imm); + RL("RxAN", stats.an); + RL("RxNoMem", stats.nomem); + TL("TSO:", tso); + TL("TxCSO:", tx_cso); + TL("VLANins:", vlan_ins); + TL("TxQFull:", q.stops); + TL("TxQRestarts:", q.restarts); + TL("TxMapErr:", mapping_err); + RL("FLAllocErr:", fl.alloc_failed); + RL("FLLrgAlcErr:", fl.large_alloc_failed); + RL("FLMapErr:", fl.mapping_err); + RL("FLLow:", fl.low); + RL("FLStarving:", fl.starving); + + goto unlock; + } + + r -= eo_entries; if (r < uld_txq_entries[CXGB4_TX_OFLD]) { const struct sge_uld_txq *tx; @@ -3007,6 +3057,7 @@ static int sge_queue_entries(const struct adapter *adap) mutex_unlock(&uld_mutex); return DIV_ROUND_UP(adap->sge.ethqsets, 4) + + (adap->sge.eohw_txq ? DIV_ROUND_UP(adap->sge.eoqsets, 4) : 0) + tot_uld_entries + DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 43b0f8c57da7..1d39fca11810 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -440,36 +440,48 @@ int cxgb4_get_free_ftid(struct net_device *dev, int family) { struct adapter *adap = netdev2adap(dev); struct tid_info *t = &adap->tids; + bool found = false; + u8 i, n, cnt; int ftid; - spin_lock_bh(&t->ftid_lock); - if (family == PF_INET) { - ftid = find_first_zero_bit(t->ftid_bmap, t->nftids); - if (ftid >= t->nftids) - ftid = -1; - } else { - if (is_t6(adap->params.chip)) { - ftid = bitmap_find_free_region(t->ftid_bmap, - t->nftids, 1); - if (ftid < 0) - goto out_unlock; - - /* this is only a lookup, keep the found region - * unallocated - */ - bitmap_release_region(t->ftid_bmap, ftid, 1); - } else { - ftid = bitmap_find_free_region(t->ftid_bmap, - t->nftids, 2); - if (ftid < 0) - goto out_unlock; + /* IPv4 occupy 1 slot. IPv6 occupy 2 slots on T6 and 4 slots + * on T5. + */ + n = 1; + if (family == PF_INET6) { + n++; + if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6) + n += 2; + } + + if (n > t->nftids) + return -ENOMEM; - bitmap_release_region(t->ftid_bmap, ftid, 2); + /* Find free filter slots from the end of TCAM. Appropriate + * checks must be done by caller later to ensure the prio + * passed by TC doesn't conflict with prio saved by existing + * rules in the TCAM. + */ + spin_lock_bh(&t->ftid_lock); + ftid = t->nftids - 1; + while (ftid >= n - 1) { + cnt = 0; + for (i = 0; i < n; i++) { + if (test_bit(ftid - i, t->ftid_bmap)) + break; + cnt++; } + if (cnt == n) { + ftid &= ~(n - 1); + found = true; + break; + } + + ftid -= n; } -out_unlock: spin_unlock_bh(&t->ftid_lock); - return ftid; + + return found ? ftid : -ENOMEM; } static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family, @@ -510,6 +522,60 @@ static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family, spin_unlock_bh(&t->ftid_lock); } +bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio) +{ + struct adapter *adap = netdev2adap(dev); + struct filter_entry *prev_fe, *next_fe; + struct tid_info *t = &adap->tids; + u32 prev_ftid, next_ftid; + bool valid = true; + + /* Only insert the rule if both of the following conditions + * are met: + * 1. The immediate previous rule has priority <= @prio. + * 2. The immediate next rule has priority >= @prio. + */ + spin_lock_bh(&t->ftid_lock); + /* Don't insert if there's a rule already present at @idx. */ + if (test_bit(idx, t->ftid_bmap)) { + valid = false; + goto out_unlock; + } + + next_ftid = find_next_bit(t->ftid_bmap, t->nftids, idx); + if (next_ftid >= t->nftids) + next_ftid = idx; + + next_fe = &adap->tids.ftid_tab[next_ftid]; + + prev_ftid = find_last_bit(t->ftid_bmap, idx); + if (prev_ftid >= idx) + prev_ftid = idx; + + /* See if the filter entry belongs to an IPv6 rule, which + * occupy 4 slots on T5 and 2 slots on T6. Adjust the + * reference to the previously inserted filter entry + * accordingly. + */ + if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6) { + prev_fe = &adap->tids.ftid_tab[prev_ftid & ~0x3]; + if (!prev_fe->fs.type) + prev_fe = &adap->tids.ftid_tab[prev_ftid]; + } else { + prev_fe = &adap->tids.ftid_tab[prev_ftid & ~0x1]; + if (!prev_fe->fs.type) + prev_fe = &adap->tids.ftid_tab[prev_ftid]; + } + + if ((prev_fe->valid && prio < prev_fe->fs.tc_prio) || + (next_fe->valid && prio > next_fe->fs.tc_prio)) + valid = false; + +out_unlock: + spin_unlock_bh(&t->ftid_lock); + return valid; +} + /* Delete the filter at a specified index. */ static int del_filter_wr(struct adapter *adapter, int fidx) { @@ -806,6 +872,12 @@ static void fill_default_mask(struct ch_filter_specification *fs) fs->mask.tos |= ~0; if (fs->val.proto && !fs->mask.proto) fs->mask.proto |= ~0; + if (fs->val.pfvf_vld && !fs->mask.pfvf_vld) + fs->mask.pfvf_vld |= ~0; + if (fs->val.pf && !fs->mask.pf) + fs->mask.pf |= ~0; + if (fs->val.vf && !fs->mask.vf) + fs->mask.vf |= ~0; for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) { lip |= fs->val.lip[i]; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h index b0751c0611ec..b3e4a645043d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h @@ -53,4 +53,5 @@ void clear_all_filters(struct adapter *adapter); void init_hash_filter(struct adapter *adap); bool is_filter_exact_match(struct adapter *adap, struct ch_filter_specification *fs); +bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio); #endif /* __CXGB4_FILTER_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 38024877751c..e8a1826a1e90 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -65,6 +65,7 @@ #include <linux/uaccess.h> #include <linux/crash_dump.h> #include <net/udp_tunnel.h> +#include <net/xfrm.h> #include "cxgb4.h" #include "cxgb4_filter.h" @@ -82,6 +83,8 @@ #include "sched.h" #include "cxgb4_tc_u32.h" #include "cxgb4_tc_flower.h" +#include "cxgb4_tc_mqprio.h" +#include "cxgb4_tc_matchall.h" #include "cxgb4_ptp.h" #include "cxgb4_cudbg.h" @@ -184,6 +187,8 @@ static struct dentry *cxgb4_debugfs_root; LIST_HEAD(adapter_list); DEFINE_MUTEX(uld_mutex); +static int cfg_queues(struct adapter *adap); + static void link_report(struct net_device *dev) { if (!netif_carrier_ok(dev)) @@ -683,31 +688,6 @@ static irqreturn_t t4_nondata_intr(int irq, void *cookie) return IRQ_HANDLED; } -/* - * Name the MSI-X interrupts. - */ -static void name_msix_vecs(struct adapter *adap) -{ - int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc); - - /* non-data interrupts */ - snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name); - - /* FW events */ - snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", - adap->port[0]->name); - - /* Ethernet queues */ - for_each_port(adap, j) { - struct net_device *d = adap->port[j]; - const struct port_info *pi = netdev_priv(d); - - for (i = 0; i < pi->nqsets; i++, msi_idx++) - snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", - d->name, i); - } -} - int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec, cpumask_var_t *aff_mask, int idx) { @@ -741,15 +721,19 @@ static int request_msix_queue_irqs(struct adapter *adap) struct sge *s = &adap->sge; struct msix_info *minfo; int err, ethqidx; - int msi_index = 2; - err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, - adap->msix_info[1].desc, &s->fw_evtq); + if (s->fwevtq_msix_idx < 0) + return -ENOMEM; + + err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec, + t4_sge_intr_msix, 0, + adap->msix_info[s->fwevtq_msix_idx].desc, + &s->fw_evtq); if (err) return err; for_each_ethrxq(s, ethqidx) { - minfo = &adap->msix_info[msi_index]; + minfo = s->ethrxq[ethqidx].msix; err = request_irq(minfo->vec, t4_sge_intr_msix, 0, minfo->desc, @@ -759,18 +743,16 @@ static int request_msix_queue_irqs(struct adapter *adap) cxgb4_set_msix_aff(adap, minfo->vec, &minfo->aff_mask, ethqidx); - msi_index++; } return 0; unwind: while (--ethqidx >= 0) { - msi_index--; - minfo = &adap->msix_info[msi_index]; + minfo = s->ethrxq[ethqidx].msix; cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq); } - free_irq(adap->msix_info[1].vec, &s->fw_evtq); + free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq); return err; } @@ -778,11 +760,11 @@ static void free_msix_queue_irqs(struct adapter *adap) { struct sge *s = &adap->sge; struct msix_info *minfo; - int i, msi_index = 2; + int i; - free_irq(adap->msix_info[1].vec, &s->fw_evtq); + free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq); for_each_ethrxq(s, i) { - minfo = &adap->msix_info[msi_index++]; + minfo = s->ethrxq[i].msix; cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); free_irq(minfo->vec, &s->ethrxq[i].rspq); } @@ -899,6 +881,12 @@ static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid) return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan; } +void cxgb4_quiesce_rx(struct sge_rspq *q) +{ + if (q->handler) + napi_disable(&q->napi); +} + /* * Wait until all NAPI handlers are descheduled. */ @@ -909,19 +897,24 @@ static void quiesce_rx(struct adapter *adap) for (i = 0; i < adap->sge.ingr_sz; i++) { struct sge_rspq *q = adap->sge.ingr_map[i]; - if (q && q->handler) - napi_disable(&q->napi); + if (!q) + continue; + + cxgb4_quiesce_rx(q); } } /* Disable interrupt and napi handler */ static void disable_interrupts(struct adapter *adap) { + struct sge *s = &adap->sge; + if (adap->flags & CXGB4_FULL_INIT_DONE) { t4_intr_disable(adap); if (adap->flags & CXGB4_USING_MSIX) { free_msix_queue_irqs(adap); - free_irq(adap->msix_info[0].vec, adap); + free_irq(adap->msix_info[s->nd_msix_idx].vec, + adap); } else { free_irq(adap->pdev->irq, adap); } @@ -929,6 +922,17 @@ static void disable_interrupts(struct adapter *adap) } } +void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q) +{ + if (q->handler) + napi_enable(&q->napi); + + /* 0-increment GTS to start the timer and enable interrupts */ + t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), + SEINTARM_V(q->intr_params) | + INGRESSQID_V(q->cntxt_id)); +} + /* * Enable NAPI scheduling and interrupt generation for all Rx queues. */ @@ -941,37 +945,63 @@ static void enable_rx(struct adapter *adap) if (!q) continue; - if (q->handler) - napi_enable(&q->napi); - /* 0-increment GTS to start the timer and enable interrupts */ - t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), - SEINTARM_V(q->intr_params) | - INGRESSQID_V(q->cntxt_id)); + cxgb4_enable_rx(adap, q); } } +static int setup_non_data_intr(struct adapter *adap) +{ + int msix; + + adap->sge.nd_msix_idx = -1; + if (!(adap->flags & CXGB4_USING_MSIX)) + return 0; + + /* Request MSI-X vector for non-data interrupt */ + msix = cxgb4_get_msix_idx_from_bmap(adap); + if (msix < 0) + return -ENOMEM; + + snprintf(adap->msix_info[msix].desc, + sizeof(adap->msix_info[msix].desc), + "%s", adap->port[0]->name); + + adap->sge.nd_msix_idx = msix; + return 0; +} static int setup_fw_sge_queues(struct adapter *adap) { struct sge *s = &adap->sge; - int err = 0; + int msix, err = 0; bitmap_zero(s->starving_fl, s->egr_sz); bitmap_zero(s->txq_maperr, s->egr_sz); - if (adap->flags & CXGB4_USING_MSIX) - adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */ - else { + if (adap->flags & CXGB4_USING_MSIX) { + s->fwevtq_msix_idx = -1; + msix = cxgb4_get_msix_idx_from_bmap(adap); + if (msix < 0) + return -ENOMEM; + + snprintf(adap->msix_info[msix].desc, + sizeof(adap->msix_info[msix].desc), + "%s-FWeventq", adap->port[0]->name); + } else { err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, NULL, NULL, NULL, -1); if (err) return err; - adap->msi_idx = -((int)s->intrq.abs_id + 1); + msix = -((int)s->intrq.abs_id + 1); } err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], - adap->msi_idx, NULL, fwevtq_handler, NULL, -1); + msix, NULL, fwevtq_handler, NULL, -1); + if (err && msix >= 0) + cxgb4_free_msix_idx_in_bmap(adap, msix); + + s->fwevtq_msix_idx = msix; return err; } @@ -985,14 +1015,17 @@ static int setup_fw_sge_queues(struct adapter *adap) */ static int setup_sge_queues(struct adapter *adap) { - int err, i, j; - struct sge *s = &adap->sge; struct sge_uld_rxq_info *rxq_info = NULL; + struct sge *s = &adap->sge; unsigned int cmplqid = 0; + int err, i, j, msix = 0; if (is_uld(adap)) rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA]; + if (!(adap->flags & CXGB4_USING_MSIX)) + msix = -((int)s->intrq.abs_id + 1); + for_each_port(adap, i) { struct net_device *dev = adap->port[i]; struct port_info *pi = netdev_priv(dev); @@ -1000,10 +1033,21 @@ static int setup_sge_queues(struct adapter *adap) struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; for (j = 0; j < pi->nqsets; j++, q++) { - if (adap->msi_idx > 0) - adap->msi_idx++; + if (msix >= 0) { + msix = cxgb4_get_msix_idx_from_bmap(adap); + if (msix < 0) { + err = msix; + goto freeout; + } + + snprintf(adap->msix_info[msix].desc, + sizeof(adap->msix_info[msix].desc), + "%s-Rx%d", dev->name, j); + q->msix = &adap->msix_info[msix]; + } + err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, - adap->msi_idx, &q->fl, + msix, &q->fl, t4_ethrx_handler, NULL, t4_get_tp_ch_map(adap, @@ -1090,6 +1134,18 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, } #endif /* CONFIG_CHELSIO_T4_DCB */ + if (dev->num_tc) { + struct port_info *pi = netdev2pinfo(dev); + + /* Send unsupported traffic pattern to normal NIC queues. */ + txq = netdev_pick_tx(dev, skb, sb_dev); + if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) || + ip_hdr(skb)->protocol != IPPROTO_TCP) + txq = txq % pi->nqsets; + + return txq; + } + if (select_queue) { txq = (skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) @@ -1456,19 +1512,23 @@ static int tid_init(struct tid_info *t) struct adapter *adap = container_of(t, struct adapter, tids); unsigned int max_ftids = t->nftids + t->nsftids; unsigned int natids = t->natids; + unsigned int eotid_bmap_size; unsigned int stid_bmap_size; unsigned int ftid_bmap_size; size_t size; stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); ftid_bmap_size = BITS_TO_LONGS(t->nftids); + eotid_bmap_size = BITS_TO_LONGS(t->neotids); size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) + t->nstids * sizeof(*t->stid_tab) + t->nsftids * sizeof(*t->stid_tab) + stid_bmap_size * sizeof(long) + max_ftids * sizeof(*t->ftid_tab) + - ftid_bmap_size * sizeof(long); + ftid_bmap_size * sizeof(long) + + t->neotids * sizeof(*t->eotid_tab) + + eotid_bmap_size * sizeof(long); t->tid_tab = kvzalloc(size, GFP_KERNEL); if (!t->tid_tab) @@ -1479,6 +1539,8 @@ static int tid_init(struct tid_info *t) t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids]; t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size]; t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids]; + t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size]; + t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids]; spin_lock_init(&t->stid_lock); spin_lock_init(&t->atid_lock); spin_lock_init(&t->ftid_lock); @@ -1505,6 +1567,9 @@ static int tid_init(struct tid_info *t) if (!t->stid_base && CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) __set_bit(0, t->stid_bmap); + + if (t->neotids) + bitmap_zero(t->eotid_bmap, t->neotids); } bitmap_zero(t->ftid_bmap, t->nftids); @@ -2361,6 +2426,7 @@ static void update_clip(const struct adapter *adap) */ static int cxgb_up(struct adapter *adap) { + struct sge *s = &adap->sge; int err; mutex_lock(&uld_mutex); @@ -2372,16 +2438,20 @@ static int cxgb_up(struct adapter *adap) goto freeq; if (adap->flags & CXGB4_USING_MSIX) { - name_msix_vecs(adap); - err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, - adap->msix_info[0].desc, adap); + if (s->nd_msix_idx < 0) { + err = -ENOMEM; + goto irq_err; + } + + err = request_irq(adap->msix_info[s->nd_msix_idx].vec, + t4_nondata_intr, 0, + adap->msix_info[s->nd_msix_idx].desc, adap); if (err) goto irq_err; + err = request_msix_queue_irqs(adap); - if (err) { - free_irq(adap->msix_info[0].vec, adap); - goto irq_err; - } + if (err) + goto irq_err_free_nd_msix; } else { err = request_irq(adap->pdev->irq, t4_intr_handler(adap), (adap->flags & CXGB4_USING_MSI) ? 0 @@ -2403,11 +2473,13 @@ static int cxgb_up(struct adapter *adap) #endif return err; - irq_err: +irq_err_free_nd_msix: + free_irq(adap->msix_info[s->nd_msix_idx].vec, adap); +irq_err: dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); - freeq: +freeq: t4_free_sge_resources(adap); - rel_lock: +rel_lock: mutex_unlock(&uld_mutex); return err; } @@ -2429,11 +2501,11 @@ static void cxgb_down(struct adapter *adapter) /* * net_device operations */ -static int cxgb_open(struct net_device *dev) +int cxgb_open(struct net_device *dev) { - int err; struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; + int err; netif_carrier_off(dev); @@ -2456,7 +2528,7 @@ static int cxgb_open(struct net_device *dev) return err; } -static int cxgb_close(struct net_device *dev) +int cxgb_close(struct net_device *dev) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; @@ -3163,8 +3235,33 @@ static int cxgb_setup_tc_cls_u32(struct net_device *dev, } } -static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, - void *cb_priv) +static int cxgb_setup_tc_matchall(struct net_device *dev, + struct tc_cls_matchall_offload *cls_matchall, + bool ingress) +{ + struct adapter *adap = netdev2adap(dev); + + if (!adap->tc_matchall) + return -ENOMEM; + + switch (cls_matchall->command) { + case TC_CLSMATCHALL_REPLACE: + return cxgb4_tc_matchall_replace(dev, cls_matchall, ingress); + case TC_CLSMATCHALL_DESTROY: + return cxgb4_tc_matchall_destroy(dev, cls_matchall, ingress); + case TC_CLSMATCHALL_STATS: + if (ingress) + return cxgb4_tc_matchall_stats(dev, cls_matchall); + break; + default: + break; + } + + return -EOPNOTSUPP; +} + +static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) { struct net_device *dev = cb_priv; struct port_info *pi = netdev2pinfo(dev); @@ -3185,24 +3282,81 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, return cxgb_setup_tc_cls_u32(dev, type_data); case TC_SETUP_CLSFLOWER: return cxgb_setup_tc_flower(dev, type_data); + case TC_SETUP_CLSMATCHALL: + return cxgb_setup_tc_matchall(dev, type_data, true); default: return -EOPNOTSUPP; } } +static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct net_device *dev = cb_priv; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + + if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { + dev_err(adap->pdev_dev, + "Failed to setup tc on port %d. Link Down?\n", + pi->port_id); + return -EINVAL; + } + + if (!tc_cls_can_offload_and_chain0(dev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSMATCHALL: + return cxgb_setup_tc_matchall(dev, type_data, false); + default: + break; + } + + return -EOPNOTSUPP; +} + +static int cxgb_setup_tc_mqprio(struct net_device *dev, + struct tc_mqprio_qopt_offload *mqprio) +{ + struct adapter *adap = netdev2adap(dev); + + if (!is_ethofld(adap) || !adap->tc_mqprio) + return -ENOMEM; + + return cxgb4_setup_tc_mqprio(dev, mqprio); +} + static LIST_HEAD(cxgb_block_cb_list); +static int cxgb_setup_tc_block(struct net_device *dev, + struct flow_block_offload *f) +{ + struct port_info *pi = netdev_priv(dev); + flow_setup_cb_t *cb; + bool ingress_only; + + pi->tc_block_shared = f->block_shared; + if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { + cb = cxgb_setup_tc_block_egress_cb; + ingress_only = false; + } else { + cb = cxgb_setup_tc_block_ingress_cb; + ingress_only = true; + } + + return flow_block_cb_setup_simple(f, &cxgb_block_cb_list, + cb, pi, dev, ingress_only); +} + static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { - struct port_info *pi = netdev2pinfo(dev); - switch (type) { + case TC_SETUP_QDISC_MQPRIO: + return cxgb_setup_tc_mqprio(dev, type_data); case TC_SETUP_BLOCK: - return flow_block_cb_setup_simple(type_data, - &cxgb_block_cb_list, - cxgb_setup_tc_block_cb, - pi, dev, true); + return cxgb_setup_tc_block(dev, type_data); default: return -EOPNOTSUPP; } @@ -4286,14 +4440,14 @@ static struct fw_info *find_fw_info(int chip) /* * Phase 0 of initialization: contact FW, obtain config, perform basic init. */ -static int adap_init0(struct adapter *adap) +static int adap_init0(struct adapter *adap, int vpd_skip) { - int ret; - u32 v, port_vec; - enum dev_state state; - u32 params[7], val[7]; struct fw_caps_config_cmd caps_cmd; + u32 params[7], val[7]; + enum dev_state state; + u32 v, port_vec; int reset = 1; + int ret; /* Grab Firmware Device Log parameters as early as possible so we have * access to it for debugging, etc. @@ -4448,9 +4602,11 @@ static int adap_init0(struct adapter *adap) * could have FLASHed a new VPD which won't be read by the firmware * until we do the RESET ... */ - ret = t4_get_vpd_params(adap, &adap->params.vpd); - if (ret < 0) - goto bye; + if (!vpd_skip) { + ret = t4_get_vpd_params(adap, &adap->params.vpd); + if (ret < 0) + goto bye; + } /* Find out what ports are available to us. Note that we need to do * this before calling adap_init0_no_config() since it needs nports @@ -4600,11 +4756,18 @@ static int adap_init0(struct adapter *adap) adap->clipt_start = val[0]; adap->clipt_end = val[1]; - /* We don't yet have a PARAMs calls to retrieve the number of Traffic - * Classes supported by the hardware/firmware so we hard code it here - * for now. - */ - adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16; + /* Get the supported number of traffic classes */ + params[0] = FW_PARAM_DEV(NUM_TM_CLASS); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val); + if (ret < 0) { + /* We couldn't retrieve the number of Traffic Classes + * supported by the hardware/firmware. So we hard + * code it here. + */ + adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16; + } else { + adap->params.nsched_cls = val[0]; + } /* query params related to active filter region */ params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START); @@ -4689,7 +4852,8 @@ static int adap_init0(struct adapter *adap) adap->params.offload = 1; if (caps_cmd.ofldcaps || - (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER))) { + (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) || + (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD))) { /* query offload-related parameters */ params[0] = FW_PARAM_DEV(NTID); params[1] = FW_PARAM_PFVF(SERVER_START); @@ -4731,6 +4895,19 @@ static int adap_init0(struct adapter *adap) } else { adap->num_ofld_uld += 1; } + + if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD)) { + params[0] = FW_PARAM_PFVF(ETHOFLD_START); + params[1] = FW_PARAM_PFVF(ETHOFLD_END); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, + params, val); + if (!ret) { + adap->tids.eotid_base = val[0]; + adap->tids.neotids = min_t(u32, MAX_ATIDS, + val[1] - val[0] + 1); + adap->params.ethofld = 1; + } + } } if (caps_cmd.rdmacaps) { params[0] = FW_PARAM_PFVF(STAG_START); @@ -5050,10 +5227,93 @@ static void eeh_resume(struct pci_dev *pdev) rtnl_unlock(); } +static void eeh_reset_prepare(struct pci_dev *pdev) +{ + struct adapter *adapter = pci_get_drvdata(pdev); + int i; + + if (adapter->pf != 4) + return; + + adapter->flags &= ~CXGB4_FW_OK; + + notify_ulds(adapter, CXGB4_STATE_DOWN); + + for_each_port(adapter, i) + if (adapter->port[i]->reg_state == NETREG_REGISTERED) + cxgb_close(adapter->port[i]); + + disable_interrupts(adapter); + cxgb4_free_mps_ref_entries(adapter); + + adap_free_hma_mem(adapter); + + if (adapter->flags & CXGB4_FULL_INIT_DONE) + cxgb_down(adapter); +} + +static void eeh_reset_done(struct pci_dev *pdev) +{ + struct adapter *adapter = pci_get_drvdata(pdev); + int err, i; + + if (adapter->pf != 4) + return; + + err = t4_wait_dev_ready(adapter->regs); + if (err < 0) { + dev_err(adapter->pdev_dev, + "Device not ready, err %d", err); + return; + } + + setup_memwin(adapter); + + err = adap_init0(adapter, 1); + if (err) { + dev_err(adapter->pdev_dev, + "Adapter init failed, err %d", err); + return; + } + + setup_memwin_rdma(adapter); + + if (adapter->flags & CXGB4_FW_OK) { + err = t4_port_init(adapter, adapter->pf, adapter->pf, 0); + if (err) { + dev_err(adapter->pdev_dev, + "Port init failed, err %d", err); + return; + } + } + + err = cfg_queues(adapter); + if (err) { + dev_err(adapter->pdev_dev, + "Config queues failed, err %d", err); + return; + } + + cxgb4_init_mps_ref_entries(adapter); + + err = setup_fw_sge_queues(adapter); + if (err) { + dev_err(adapter->pdev_dev, + "FW sge queue allocation failed, err %d", err); + return; + } + + for_each_port(adapter, i) + if (adapter->port[i]->reg_state == NETREG_REGISTERED) + cxgb_open(adapter->port[i]); +} + static const struct pci_error_handlers cxgb4_eeh = { .error_detected = eeh_err_detected, .slot_reset = eeh_slot_reset, .resume = eeh_resume, + .reset_prepare = eeh_reset_prepare, + .reset_done = eeh_reset_done, }; /* Return true if the Link Configuration supports "High Speeds" (those greater @@ -5070,26 +5330,25 @@ static inline bool is_x_10g_port(const struct link_config *lc) return high_speeds != 0; } -/* - * Perform default configuration of DMA queues depending on the number and type +/* Perform default configuration of DMA queues depending on the number and type * of ports we found and the number of available CPUs. Most settings can be * modified by the admin prior to actual use. */ static int cfg_queues(struct adapter *adap) { + u32 avail_qsets, avail_eth_qsets, avail_uld_qsets; + u32 niqflint, neq, num_ulds; struct sge *s = &adap->sge; - int i, n10g = 0, qidx = 0; - int niqflint, neq, avail_eth_qsets; - int max_eth_qsets = 32; + u32 i, n10g = 0, qidx = 0; #ifndef CONFIG_CHELSIO_T4_DCB int q10g = 0; #endif - /* Reduce memory usage in kdump environment, disable all offload. - */ + /* Reduce memory usage in kdump environment, disable all offload. */ if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) { adap->params.offload = 0; adap->params.crypto = 0; + adap->params.ethofld = 0; } /* Calculate the number of Ethernet Queue Sets available based on @@ -5108,14 +5367,11 @@ static int cfg_queues(struct adapter *adap) if (!(adap->flags & CXGB4_USING_MSIX)) niqflint--; neq = adap->params.pfres.neq / 2; - avail_eth_qsets = min(niqflint, neq); + avail_qsets = min(niqflint, neq); - if (avail_eth_qsets > max_eth_qsets) - avail_eth_qsets = max_eth_qsets; - - if (avail_eth_qsets < adap->params.nports) { + if (avail_qsets < adap->params.nports) { dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n", - avail_eth_qsets, adap->params.nports); + avail_qsets, adap->params.nports); return -ENOMEM; } @@ -5123,6 +5379,7 @@ static int cfg_queues(struct adapter *adap) for_each_port(adap, i) n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); + avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS); #ifdef CONFIG_CHELSIO_T4_DCB /* For Data Center Bridging support we need to be able to support up * to 8 Traffic Priorities; each of which will be assigned to its @@ -5142,8 +5399,7 @@ static int cfg_queues(struct adapter *adap) qidx += pi->nqsets; } #else /* !CONFIG_CHELSIO_T4_DCB */ - /* - * We default to 1 queue per non-10G port and up to # of cores queues + /* We default to 1 queue per non-10G port and up to # of cores queues * per 10G port. */ if (n10g) @@ -5165,19 +5421,40 @@ static int cfg_queues(struct adapter *adap) s->ethqsets = qidx; s->max_ethqsets = qidx; /* MSI-X may lower it later */ + avail_qsets -= qidx; if (is_uld(adap)) { - /* - * For offload we use 1 queue/channel if all ports are up to 1G, + /* For offload we use 1 queue/channel if all ports are up to 1G, * otherwise we divide all available queues amongst the channels * capped by the number of available cores. */ - if (n10g) { - i = min_t(int, MAX_OFLD_QSETS, num_online_cpus()); - s->ofldqsets = roundup(i, adap->params.nports); - } else { + num_ulds = adap->num_uld + adap->num_ofld_uld; + i = min_t(u32, MAX_OFLD_QSETS, num_online_cpus()); + avail_uld_qsets = roundup(i, adap->params.nports); + if (avail_qsets < num_ulds * adap->params.nports) { + adap->params.offload = 0; + adap->params.crypto = 0; + s->ofldqsets = 0; + } else if (avail_qsets < num_ulds * avail_uld_qsets || !n10g) { s->ofldqsets = adap->params.nports; + } else { + s->ofldqsets = avail_uld_qsets; + } + + avail_qsets -= num_ulds * s->ofldqsets; + } + + /* ETHOFLD Queues used for QoS offload should follow same + * allocation scheme as normal Ethernet Queues. + */ + if (is_ethofld(adap)) { + if (avail_qsets < s->max_ethqsets) { + adap->params.ethofld = 0; + s->eoqsets = 0; + } else { + s->eoqsets = s->max_ethqsets; } + avail_qsets -= s->eoqsets; } for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { @@ -5230,42 +5507,62 @@ static void reduce_ethqs(struct adapter *adap, int n) } } -static int get_msix_info(struct adapter *adap) +static int alloc_msix_info(struct adapter *adap, u32 num_vec) { - struct uld_msix_info *msix_info; - unsigned int max_ingq = 0; - - if (is_offload(adap)) - max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld; - if (is_pci_uld(adap)) - max_ingq += MAX_OFLD_QSETS * adap->num_uld; - - if (!max_ingq) - goto out; + struct msix_info *msix_info; - msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL); + msix_info = kcalloc(num_vec, sizeof(*msix_info), GFP_KERNEL); if (!msix_info) return -ENOMEM; - adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq), - sizeof(long), GFP_KERNEL); - if (!adap->msix_bmap_ulds.msix_bmap) { + adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec), + sizeof(long), GFP_KERNEL); + if (!adap->msix_bmap.msix_bmap) { kfree(msix_info); return -ENOMEM; } - spin_lock_init(&adap->msix_bmap_ulds.lock); - adap->msix_info_ulds = msix_info; -out: + + spin_lock_init(&adap->msix_bmap.lock); + adap->msix_bmap.mapsize = num_vec; + + adap->msix_info = msix_info; return 0; } static void free_msix_info(struct adapter *adap) { - if (!(adap->num_uld && adap->num_ofld_uld)) - return; + kfree(adap->msix_bmap.msix_bmap); + kfree(adap->msix_info); +} - kfree(adap->msix_info_ulds); - kfree(adap->msix_bmap_ulds.msix_bmap); +int cxgb4_get_msix_idx_from_bmap(struct adapter *adap) +{ + struct msix_bmap *bmap = &adap->msix_bmap; + unsigned int msix_idx; + unsigned long flags; + + spin_lock_irqsave(&bmap->lock, flags); + msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize); + if (msix_idx < bmap->mapsize) { + __set_bit(msix_idx, bmap->msix_bmap); + } else { + spin_unlock_irqrestore(&bmap->lock, flags); + return -ENOSPC; + } + + spin_unlock_irqrestore(&bmap->lock, flags); + return msix_idx; +} + +void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, + unsigned int msix_idx) +{ + struct msix_bmap *bmap = &adap->msix_bmap; + unsigned long flags; + + spin_lock_irqsave(&bmap->lock, flags); + __clear_bit(msix_idx, bmap->msix_bmap); + spin_unlock_irqrestore(&bmap->lock, flags); } /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ @@ -5273,88 +5570,161 @@ static void free_msix_info(struct adapter *adap) static int enable_msix(struct adapter *adap) { - int ofld_need = 0, uld_need = 0; - int i, j, want, need, allocated; + u32 eth_need, uld_need = 0, ethofld_need = 0; + u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0; + u8 num_uld = 0, nchan = adap->params.nports; + u32 i, want, need, num_vec; struct sge *s = &adap->sge; - unsigned int nchan = adap->params.nports; struct msix_entry *entries; - int max_ingq = MAX_INGQ; - - if (is_pci_uld(adap)) - max_ingq += (MAX_OFLD_QSETS * adap->num_uld); - if (is_offload(adap)) - max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld); - entries = kmalloc_array(max_ingq + 1, sizeof(*entries), - GFP_KERNEL); - if (!entries) - return -ENOMEM; - - /* map for msix */ - if (get_msix_info(adap)) { - adap->params.offload = 0; - adap->params.crypto = 0; - } - - for (i = 0; i < max_ingq + 1; ++i) - entries[i].entry = i; + struct port_info *pi; + int allocated, ret; - want = s->max_ethqsets + EXTRA_VECS; - if (is_offload(adap)) { - want += adap->num_ofld_uld * s->ofldqsets; - ofld_need = adap->num_ofld_uld * nchan; - } - if (is_pci_uld(adap)) { - want += adap->num_uld * s->ofldqsets; - uld_need = adap->num_uld * nchan; - } + want = s->max_ethqsets; #ifdef CONFIG_CHELSIO_T4_DCB /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for * each port. */ - need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need; + need = 8 * nchan; #else - need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need; + need = nchan; #endif + eth_need = need; + if (is_uld(adap)) { + num_uld = adap->num_ofld_uld + adap->num_uld; + want += num_uld * s->ofldqsets; + uld_need = num_uld * nchan; + need += uld_need; + } + + if (is_ethofld(adap)) { + want += s->eoqsets; + ethofld_need = eth_need; + need += ethofld_need; + } + + want += EXTRA_VECS; + need += EXTRA_VECS; + + entries = kmalloc_array(want, sizeof(*entries), GFP_KERNEL); + if (!entries) + return -ENOMEM; + + for (i = 0; i < want; i++) + entries[i].entry = i; + allocated = pci_enable_msix_range(adap->pdev, entries, need, want); if (allocated < 0) { - dev_info(adap->pdev_dev, "not enough MSI-X vectors left," - " not using MSI-X\n"); - kfree(entries); - return allocated; + /* Disable offload and attempt to get vectors for NIC + * only mode. + */ + want = s->max_ethqsets + EXTRA_VECS; + need = eth_need + EXTRA_VECS; + allocated = pci_enable_msix_range(adap->pdev, entries, + need, want); + if (allocated < 0) { + dev_info(adap->pdev_dev, + "Disabling MSI-X due to insufficient MSI-X vectors\n"); + ret = allocated; + goto out_free; + } + + dev_info(adap->pdev_dev, + "Disabling offload due to insufficient MSI-X vectors\n"); + adap->params.offload = 0; + adap->params.crypto = 0; + adap->params.ethofld = 0; + s->ofldqsets = 0; + s->eoqsets = 0; + uld_need = 0; + ethofld_need = 0; + } + + num_vec = allocated; + if (num_vec < want) { + /* Distribute available vectors to the various queue groups. + * Every group gets its minimum requirement and NIC gets top + * priority for leftovers. + */ + ethqsets = eth_need; + if (is_uld(adap)) + ofldqsets = nchan; + if (is_ethofld(adap)) + eoqsets = ethofld_need; + + num_vec -= need; + while (num_vec) { + if (num_vec < eth_need + ethofld_need || + ethqsets > s->max_ethqsets) + break; + + for_each_port(adap, i) { + pi = adap2pinfo(adap, i); + if (pi->nqsets < 2) + continue; + + ethqsets++; + num_vec--; + if (ethofld_need) { + eoqsets++; + num_vec--; + } + } + } + + if (is_uld(adap)) { + while (num_vec) { + if (num_vec < uld_need || + ofldqsets > s->ofldqsets) + break; + + ofldqsets++; + num_vec -= uld_need; + } + } + } else { + ethqsets = s->max_ethqsets; + if (is_uld(adap)) + ofldqsets = s->ofldqsets; + if (is_ethofld(adap)) + eoqsets = s->eoqsets; } - /* Distribute available vectors to the various queue groups. - * Every group gets its minimum requirement and NIC gets top - * priority for leftovers. - */ - i = allocated - EXTRA_VECS - ofld_need - uld_need; - if (i < s->max_ethqsets) { - s->max_ethqsets = i; - if (i < s->ethqsets) - reduce_ethqs(adap, i); + if (ethqsets < s->max_ethqsets) { + s->max_ethqsets = ethqsets; + reduce_ethqs(adap, ethqsets); } + if (is_uld(adap)) { - if (allocated < want) - s->nqs_per_uld = nchan; - else - s->nqs_per_uld = s->ofldqsets; + s->ofldqsets = ofldqsets; + s->nqs_per_uld = s->ofldqsets; } - for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i) + if (is_ethofld(adap)) + s->eoqsets = eoqsets; + + /* map for msix */ + ret = alloc_msix_info(adap, allocated); + if (ret) + goto out_disable_msix; + + for (i = 0; i < allocated; i++) { adap->msix_info[i].vec = entries[i].vector; - if (is_uld(adap)) { - for (j = 0 ; i < allocated; ++i, j++) { - adap->msix_info_ulds[j].vec = entries[i].vector; - adap->msix_info_ulds[j].idx = i; - } - adap->msix_bmap_ulds.mapsize = j; + adap->msix_info[i].idx = i; } - dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, " - "nic %d per uld %d\n", - allocated, s->max_ethqsets, s->nqs_per_uld); + + dev_info(adap->pdev_dev, + "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d\n", + allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld); kfree(entries); return 0; + +out_disable_msix: + pci_disable_msix(adap->pdev); + +out_free: + kfree(entries); + return ret; } #undef EXTRA_VECS @@ -5441,6 +5811,8 @@ static void free_some_resources(struct adapter *adapter) kvfree(adapter->srq); t4_cleanup_sched(adapter); kvfree(adapter->tids.tid_tab); + cxgb4_cleanup_tc_matchall(adapter); + cxgb4_cleanup_tc_mqprio(adapter); cxgb4_cleanup_tc_flower(adapter); cxgb4_cleanup_tc_u32(adapter); kfree(adapter->sge.egr_map); @@ -5837,7 +6209,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } setup_memwin(adapter); - err = adap_init0(adapter); + err = adap_init0(adapter, 0); #ifdef CONFIG_DEBUG_FS bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz); #endif @@ -5855,8 +6227,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_LIST_HEAD(&adapter->mac_hlist); for_each_port(adapter, i) { + /* For supporting MQPRIO Offload, need some extra + * queues for each ETHOFLD TIDs. Keep it equal to + * MAX_ATIDs for now. Once we connect to firmware + * later and query the EOTID params, we'll come to + * know the actual # of EOTIDs supported. + */ netdev = alloc_etherdev_mq(sizeof(struct port_info), - MAX_ETH_QSETS); + MAX_ETH_QSETS + MAX_ATIDS); if (!netdev) { err = -ENOMEM; goto out_free_dev; @@ -6004,6 +6382,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (cxgb4_init_tc_flower(adapter)) dev_warn(&pdev->dev, "could not offload tc flower, continuing\n"); + + if (cxgb4_init_tc_mqprio(adapter)) + dev_warn(&pdev->dev, + "could not offload tc mqprio, continuing\n"); + + if (cxgb4_init_tc_matchall(adapter)) + dev_warn(&pdev->dev, + "could not offload tc matchall, continuing\n"); } if (is_offload(adapter) || is_hashfilter(adapter)) { @@ -6040,6 +6426,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto out_free_dev; + err = setup_non_data_intr(adapter); + if (err) { + dev_err(adapter->pdev_dev, + "Non Data interrupt allocation failed, err: %d\n", err); + goto out_free_dev; + } + err = setup_fw_sge_queues(adapter); if (err) { dev_err(adapter->pdev_dev, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index e447976bdd3e..0fa80bef575d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -378,15 +378,14 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, } } -static void cxgb4_process_flow_actions(struct net_device *in, - struct flow_cls_offload *cls, - struct ch_filter_specification *fs) +void cxgb4_process_flow_actions(struct net_device *in, + struct flow_action *actions, + struct ch_filter_specification *fs) { - struct flow_rule *rule = flow_cls_offload_flow_rule(cls); struct flow_action_entry *act; int i; - flow_action_for_each(i, act, &rule->action) { + flow_action_for_each(i, act, actions) { switch (act->id) { case FLOW_ACTION_ACCEPT: fs->action = FILTER_PASS; @@ -544,17 +543,16 @@ static bool valid_pedit_action(struct net_device *dev, return true; } -static int cxgb4_validate_flow_actions(struct net_device *dev, - struct flow_cls_offload *cls) +int cxgb4_validate_flow_actions(struct net_device *dev, + struct flow_action *actions) { - struct flow_rule *rule = flow_cls_offload_flow_rule(cls); struct flow_action_entry *act; bool act_redir = false; bool act_pedit = false; bool act_vlan = false; int i; - flow_action_for_each(i, act, &rule->action) { + flow_action_for_each(i, act, actions) { switch (act->id) { case FLOW_ACTION_ACCEPT: case FLOW_ACTION_DROP: @@ -636,14 +634,15 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, int cxgb4_tc_flower_replace(struct net_device *dev, struct flow_cls_offload *cls) { + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct netlink_ext_ack *extack = cls->common.extack; struct adapter *adap = netdev2adap(dev); struct ch_tc_flower_entry *ch_flower; struct ch_filter_specification *fs; struct filter_ctx ctx; - int fidx; - int ret; + int fidx, ret; - if (cxgb4_validate_flow_actions(dev, cls)) + if (cxgb4_validate_flow_actions(dev, &rule->action)) return -EOPNOTSUPP; if (cxgb4_validate_flow_match(dev, cls)) @@ -658,20 +657,41 @@ int cxgb4_tc_flower_replace(struct net_device *dev, fs = &ch_flower->fs; fs->hitcnts = 1; cxgb4_process_flow_match(dev, cls, fs); - cxgb4_process_flow_actions(dev, cls, fs); + cxgb4_process_flow_actions(dev, &rule->action, fs); fs->hash = is_filter_exact_match(adap, fs); if (fs->hash) { fidx = 0; } else { - fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET); - if (fidx < 0) { - netdev_err(dev, "%s: No fidx for offload.\n", __func__); + u8 inet_family; + + inet_family = fs->type ? PF_INET6 : PF_INET; + + /* Note that TC uses prio 0 to indicate stack to + * generate automatic prio and hence doesn't pass prio + * 0 to driver. However, the hardware TCAM index + * starts from 0. Hence, the -1 here. + */ + if (cls->common.prio <= adap->tids.nftids) + fidx = cls->common.prio - 1; + else + fidx = cxgb4_get_free_ftid(dev, inet_family); + + /* Only insert FLOWER rule if its priority doesn't + * conflict with existing rules in the LETCAM. + */ + if (fidx < 0 || + !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) { + NL_SET_ERR_MSG_MOD(extack, + "No free LETCAM index available"); ret = -ENOMEM; goto free_entry; } } + fs->tc_prio = cls->common.prio; + fs->tc_cookie = cls->cookie; + init_completion(&ctx.completion); ret = __cxgb4_set_filter(dev, fidx, fs, &ctx); if (ret) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h index eb4c95248baf..e132516e9868 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h @@ -108,6 +108,12 @@ struct ch_tc_pedit_fields { #define PEDIT_TCP_SPORT_DPORT 0x0 #define PEDIT_UDP_SPORT_DPORT 0x0 +void cxgb4_process_flow_actions(struct net_device *in, + struct flow_action *actions, + struct ch_filter_specification *fs); +int cxgb4_validate_flow_actions(struct net_device *dev, + struct flow_action *actions); + int cxgb4_tc_flower_replace(struct net_device *dev, struct flow_cls_offload *cls); int cxgb4_tc_flower_destroy(struct net_device *dev, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c new file mode 100644 index 000000000000..102b370fbd3e --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c @@ -0,0 +1,354 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */ + +#include "cxgb4.h" +#include "cxgb4_tc_matchall.h" +#include "sched.h" +#include "cxgb4_uld.h" +#include "cxgb4_filter.h" +#include "cxgb4_tc_flower.h" + +static int cxgb4_matchall_egress_validate(struct net_device *dev, + struct tc_cls_matchall_offload *cls) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct flow_action *actions = &cls->rule->action; + struct port_info *pi = netdev2pinfo(dev); + struct flow_action_entry *entry; + u64 max_link_rate; + u32 i, speed; + int ret; + + if (!flow_action_has_entries(actions)) { + NL_SET_ERR_MSG_MOD(extack, + "Egress MATCHALL offload needs at least 1 policing action"); + return -EINVAL; + } else if (!flow_offload_has_one_action(actions)) { + NL_SET_ERR_MSG_MOD(extack, + "Egress MATCHALL offload only supports 1 policing action"); + return -EINVAL; + } else if (pi->tc_block_shared) { + NL_SET_ERR_MSG_MOD(extack, + "Egress MATCHALL offload not supported with shared blocks"); + return -EINVAL; + } + + ret = t4_get_link_params(pi, NULL, &speed, NULL); + if (ret) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to get max speed supported by the link"); + return -EINVAL; + } + + /* Convert from Mbps to bps */ + max_link_rate = (u64)speed * 1000 * 1000; + + flow_action_for_each(i, entry, actions) { + switch (entry->id) { + case FLOW_ACTION_POLICE: + /* Convert bytes per second to bits per second */ + if (entry->police.rate_bytes_ps * 8 > max_link_rate) { + NL_SET_ERR_MSG_MOD(extack, + "Specified policing max rate is larger than underlying link speed"); + return -ERANGE; + } + break; + default: + NL_SET_ERR_MSG_MOD(extack, + "Only policing action supported with Egress MATCHALL offload"); + return -EOPNOTSUPP; + } + } + + return 0; +} + +static int cxgb4_matchall_alloc_tc(struct net_device *dev, + struct tc_cls_matchall_offload *cls) +{ + struct ch_sched_params p = { + .type = SCHED_CLASS_TYPE_PACKET, + .u.params.level = SCHED_CLASS_LEVEL_CH_RL, + .u.params.mode = SCHED_CLASS_MODE_CLASS, + .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS, + .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS, + .u.params.class = SCHED_CLS_NONE, + .u.params.minrate = 0, + .u.params.weight = 0, + .u.params.pktsize = dev->mtu, + }; + struct netlink_ext_ack *extack = cls->common.extack; + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + struct flow_action_entry *entry; + struct sched_class *e; + u32 i; + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + + flow_action_for_each(i, entry, &cls->rule->action) + if (entry->id == FLOW_ACTION_POLICE) + break; + + /* Convert from bytes per second to Kbps */ + p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000); + p.u.params.channel = pi->tx_chan; + e = cxgb4_sched_class_alloc(dev, &p); + if (!e) { + NL_SET_ERR_MSG_MOD(extack, + "No free traffic class available for policing action"); + return -ENOMEM; + } + + tc_port_matchall->egress.hwtc = e->idx; + tc_port_matchall->egress.cookie = cls->cookie; + tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED; + return 0; +} + +static void cxgb4_matchall_free_tc(struct net_device *dev) +{ + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc); + + tc_port_matchall->egress.hwtc = SCHED_CLS_NONE; + tc_port_matchall->egress.cookie = 0; + tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED; +} + +static int cxgb4_matchall_alloc_filter(struct net_device *dev, + struct tc_cls_matchall_offload *cls) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + struct ch_filter_specification *fs; + int ret, fidx; + + /* Note that TC uses prio 0 to indicate stack to generate + * automatic prio and hence doesn't pass prio 0 to driver. + * However, the hardware TCAM index starts from 0. Hence, the + * -1 here. 1 slot is enough to create a wildcard matchall + * VIID rule. + */ + if (cls->common.prio <= adap->tids.nftids) + fidx = cls->common.prio - 1; + else + fidx = cxgb4_get_free_ftid(dev, PF_INET); + + /* Only insert MATCHALL rule if its priority doesn't conflict + * with existing rules in the LETCAM. + */ + if (fidx < 0 || + !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) { + NL_SET_ERR_MSG_MOD(extack, + "No free LETCAM index available"); + return -ENOMEM; + } + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + fs = &tc_port_matchall->ingress.fs; + memset(fs, 0, sizeof(*fs)); + + fs->tc_prio = cls->common.prio; + fs->tc_cookie = cls->cookie; + fs->hitcnts = 1; + + fs->val.pfvf_vld = 1; + fs->val.pf = adap->pf; + fs->val.vf = pi->vin; + + cxgb4_process_flow_actions(dev, &cls->rule->action, fs); + + ret = cxgb4_set_filter(dev, fidx, fs); + if (ret) + return ret; + + tc_port_matchall->ingress.tid = fidx; + tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED; + return 0; +} + +static int cxgb4_matchall_free_filter(struct net_device *dev) +{ + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + int ret; + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + + ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid, + &tc_port_matchall->ingress.fs); + if (ret) + return ret; + + tc_port_matchall->ingress.packets = 0; + tc_port_matchall->ingress.bytes = 0; + tc_port_matchall->ingress.last_used = 0; + tc_port_matchall->ingress.tid = 0; + tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED; + return 0; +} + +int cxgb4_tc_matchall_replace(struct net_device *dev, + struct tc_cls_matchall_offload *cls_matchall, + bool ingress) +{ + struct netlink_ext_ack *extack = cls_matchall->common.extack; + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + int ret; + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + if (ingress) { + if (tc_port_matchall->ingress.state == + CXGB4_MATCHALL_STATE_ENABLED) { + NL_SET_ERR_MSG_MOD(extack, + "Only 1 Ingress MATCHALL can be offloaded"); + return -ENOMEM; + } + + ret = cxgb4_validate_flow_actions(dev, + &cls_matchall->rule->action); + if (ret) + return ret; + + return cxgb4_matchall_alloc_filter(dev, cls_matchall); + } + + if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) { + NL_SET_ERR_MSG_MOD(extack, + "Only 1 Egress MATCHALL can be offloaded"); + return -ENOMEM; + } + + ret = cxgb4_matchall_egress_validate(dev, cls_matchall); + if (ret) + return ret; + + return cxgb4_matchall_alloc_tc(dev, cls_matchall); +} + +int cxgb4_tc_matchall_destroy(struct net_device *dev, + struct tc_cls_matchall_offload *cls_matchall, + bool ingress) +{ + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + if (ingress) { + if (cls_matchall->cookie != + tc_port_matchall->ingress.fs.tc_cookie) + return -ENOENT; + + return cxgb4_matchall_free_filter(dev); + } + + if (cls_matchall->cookie != tc_port_matchall->egress.cookie) + return -ENOENT; + + cxgb4_matchall_free_tc(dev); + return 0; +} + +int cxgb4_tc_matchall_stats(struct net_device *dev, + struct tc_cls_matchall_offload *cls_matchall) +{ + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + u64 packets, bytes; + int ret; + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED) + return -ENOENT; + + ret = cxgb4_get_filter_counters(dev, tc_port_matchall->ingress.tid, + &packets, &bytes, + tc_port_matchall->ingress.fs.hash); + if (ret) + return ret; + + if (tc_port_matchall->ingress.packets != packets) { + flow_stats_update(&cls_matchall->stats, + bytes - tc_port_matchall->ingress.bytes, + packets - tc_port_matchall->ingress.packets, + tc_port_matchall->ingress.last_used); + + tc_port_matchall->ingress.packets = packets; + tc_port_matchall->ingress.bytes = bytes; + tc_port_matchall->ingress.last_used = jiffies; + } + + return 0; +} + +static void cxgb4_matchall_disable_offload(struct net_device *dev) +{ + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) + cxgb4_matchall_free_tc(dev); + + if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED) + cxgb4_matchall_free_filter(dev); +} + +int cxgb4_init_tc_matchall(struct adapter *adap) +{ + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct cxgb4_tc_matchall *tc_matchall; + int ret; + + tc_matchall = kzalloc(sizeof(*tc_matchall), GFP_KERNEL); + if (!tc_matchall) + return -ENOMEM; + + tc_port_matchall = kcalloc(adap->params.nports, + sizeof(*tc_port_matchall), + GFP_KERNEL); + if (!tc_port_matchall) { + ret = -ENOMEM; + goto out_free_matchall; + } + + tc_matchall->port_matchall = tc_port_matchall; + adap->tc_matchall = tc_matchall; + return 0; + +out_free_matchall: + kfree(tc_matchall); + return ret; +} + +void cxgb4_cleanup_tc_matchall(struct adapter *adap) +{ + u8 i; + + if (adap->tc_matchall) { + if (adap->tc_matchall->port_matchall) { + for (i = 0; i < adap->params.nports; i++) { + struct net_device *dev = adap->port[i]; + + if (dev) + cxgb4_matchall_disable_offload(dev); + } + kfree(adap->tc_matchall->port_matchall); + } + kfree(adap->tc_matchall); + } +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h new file mode 100644 index 000000000000..ab6b5683dfd3 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */ + +#ifndef __CXGB4_TC_MATCHALL_H__ +#define __CXGB4_TC_MATCHALL_H__ + +#include <net/pkt_cls.h> + +enum cxgb4_matchall_state { + CXGB4_MATCHALL_STATE_DISABLED = 0, + CXGB4_MATCHALL_STATE_ENABLED, +}; + +struct cxgb4_matchall_egress_entry { + enum cxgb4_matchall_state state; /* Current MATCHALL offload state */ + u8 hwtc; /* Traffic class bound to port */ + u64 cookie; /* Used to identify the MATCHALL rule offloaded */ +}; + +struct cxgb4_matchall_ingress_entry { + enum cxgb4_matchall_state state; /* Current MATCHALL offload state */ + u32 tid; /* Index to hardware filter entry */ + struct ch_filter_specification fs; /* Filter entry */ + u64 bytes; /* # of bytes hitting the filter */ + u64 packets; /* # of packets hitting the filter */ + u64 last_used; /* Last updated jiffies time */ +}; + +struct cxgb4_tc_port_matchall { + struct cxgb4_matchall_egress_entry egress; /* Egress offload info */ + struct cxgb4_matchall_ingress_entry ingress; /* Ingress offload info */ +}; + +struct cxgb4_tc_matchall { + struct cxgb4_tc_port_matchall *port_matchall; /* Per port entry */ +}; + +int cxgb4_tc_matchall_replace(struct net_device *dev, + struct tc_cls_matchall_offload *cls_matchall, + bool ingress); +int cxgb4_tc_matchall_destroy(struct net_device *dev, + struct tc_cls_matchall_offload *cls_matchall, + bool ingress); +int cxgb4_tc_matchall_stats(struct net_device *dev, + struct tc_cls_matchall_offload *cls_matchall); + +int cxgb4_init_tc_matchall(struct adapter *adap); +void cxgb4_cleanup_tc_matchall(struct adapter *adap); +#endif /* __CXGB4_TC_MATCHALL_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c new file mode 100644 index 000000000000..db55673b77bd --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c @@ -0,0 +1,650 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */ + +#include "cxgb4.h" +#include "cxgb4_tc_mqprio.h" +#include "sched.h" + +static int cxgb4_mqprio_validate(struct net_device *dev, + struct tc_mqprio_qopt_offload *mqprio) +{ + u64 min_rate = 0, max_rate = 0, max_link_rate; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + u32 speed, qcount = 0, qoffset = 0; + int ret; + u8 i; + + if (!mqprio->qopt.num_tc) + return 0; + + if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS) { + netdev_err(dev, "Only full TC hardware offload is supported\n"); + return -EINVAL; + } else if (mqprio->mode != TC_MQPRIO_MODE_CHANNEL) { + netdev_err(dev, "Only channel mode offload is supported\n"); + return -EINVAL; + } else if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) { + netdev_err(dev, "Only bandwidth rate shaper supported\n"); + return -EINVAL; + } else if (mqprio->qopt.num_tc > adap->params.nsched_cls) { + netdev_err(dev, + "Only %u traffic classes supported by hardware\n", + adap->params.nsched_cls); + return -ERANGE; + } + + ret = t4_get_link_params(pi, NULL, &speed, NULL); + if (ret) { + netdev_err(dev, "Failed to get link speed, ret: %d\n", ret); + return -EINVAL; + } + + /* Convert from Mbps to bps */ + max_link_rate = (u64)speed * 1000 * 1000; + + for (i = 0; i < mqprio->qopt.num_tc; i++) { + qoffset = max_t(u16, mqprio->qopt.offset[i], qoffset); + qcount += mqprio->qopt.count[i]; + + /* Convert byte per second to bits per second */ + min_rate += (mqprio->min_rate[i] * 8); + max_rate += (mqprio->max_rate[i] * 8); + } + + if (qoffset >= adap->tids.neotids || qcount > adap->tids.neotids) + return -ENOMEM; + + if (min_rate > max_link_rate || max_rate > max_link_rate) { + netdev_err(dev, + "Total Min/Max (%llu/%llu) Rate > supported (%llu)\n", + min_rate, max_rate, max_link_rate); + return -EINVAL; + } + + return 0; +} + +static int cxgb4_init_eosw_txq(struct net_device *dev, + struct sge_eosw_txq *eosw_txq, + u32 eotid, u32 hwqid) +{ + struct adapter *adap = netdev2adap(dev); + struct sge_eosw_desc *ring; + + memset(eosw_txq, 0, sizeof(*eosw_txq)); + + ring = kcalloc(CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM, + sizeof(*ring), GFP_KERNEL); + if (!ring) + return -ENOMEM; + + eosw_txq->desc = ring; + eosw_txq->ndesc = CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM; + spin_lock_init(&eosw_txq->lock); + eosw_txq->state = CXGB4_EO_STATE_CLOSED; + eosw_txq->eotid = eotid; + eosw_txq->hwtid = adap->tids.eotid_base + eosw_txq->eotid; + eosw_txq->cred = adap->params.ofldq_wr_cred; + eosw_txq->hwqid = hwqid; + eosw_txq->netdev = dev; + tasklet_init(&eosw_txq->qresume_tsk, cxgb4_ethofld_restart, + (unsigned long)eosw_txq); + return 0; +} + +static void cxgb4_clean_eosw_txq(struct net_device *dev, + struct sge_eosw_txq *eosw_txq) +{ + struct adapter *adap = netdev2adap(dev); + + cxgb4_eosw_txq_free_desc(adap, eosw_txq, eosw_txq->ndesc); + eosw_txq->pidx = 0; + eosw_txq->last_pidx = 0; + eosw_txq->cidx = 0; + eosw_txq->last_cidx = 0; + eosw_txq->flowc_idx = 0; + eosw_txq->inuse = 0; + eosw_txq->cred = adap->params.ofldq_wr_cred; + eosw_txq->ncompl = 0; + eosw_txq->last_compl = 0; + eosw_txq->state = CXGB4_EO_STATE_CLOSED; +} + +static void cxgb4_free_eosw_txq(struct net_device *dev, + struct sge_eosw_txq *eosw_txq) +{ + spin_lock_bh(&eosw_txq->lock); + cxgb4_clean_eosw_txq(dev, eosw_txq); + kfree(eosw_txq->desc); + spin_unlock_bh(&eosw_txq->lock); + tasklet_kill(&eosw_txq->qresume_tsk); +} + +static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + struct sge_ofld_rxq *eorxq; + struct sge_eohw_txq *eotxq; + int ret, msix = 0; + u32 i; + + /* Allocate ETHOFLD hardware queue structures if not done already */ + if (!refcount_read(&adap->tc_mqprio->refcnt)) { + adap->sge.eohw_rxq = kcalloc(adap->sge.eoqsets, + sizeof(struct sge_ofld_rxq), + GFP_KERNEL); + if (!adap->sge.eohw_rxq) + return -ENOMEM; + + adap->sge.eohw_txq = kcalloc(adap->sge.eoqsets, + sizeof(struct sge_eohw_txq), + GFP_KERNEL); + if (!adap->sge.eohw_txq) { + kfree(adap->sge.eohw_rxq); + return -ENOMEM; + } + } + + if (!(adap->flags & CXGB4_USING_MSIX)) + msix = -((int)adap->sge.intrq.abs_id + 1); + + for (i = 0; i < pi->nqsets; i++) { + eorxq = &adap->sge.eohw_rxq[pi->first_qset + i]; + eotxq = &adap->sge.eohw_txq[pi->first_qset + i]; + + /* Allocate Rxqs for receiving ETHOFLD Tx completions */ + if (msix >= 0) { + msix = cxgb4_get_msix_idx_from_bmap(adap); + if (msix < 0) { + ret = msix; + goto out_free_queues; + } + + eorxq->msix = &adap->msix_info[msix]; + snprintf(eorxq->msix->desc, + sizeof(eorxq->msix->desc), + "%s-eorxq%d", dev->name, i); + } + + init_rspq(adap, &eorxq->rspq, + CXGB4_EOHW_RXQ_DEFAULT_INTR_USEC, + CXGB4_EOHW_RXQ_DEFAULT_PKT_CNT, + CXGB4_EOHW_RXQ_DEFAULT_DESC_NUM, + CXGB4_EOHW_RXQ_DEFAULT_DESC_SIZE); + + eorxq->fl.size = CXGB4_EOHW_FLQ_DEFAULT_DESC_NUM; + + ret = t4_sge_alloc_rxq(adap, &eorxq->rspq, false, + dev, msix, &eorxq->fl, + cxgb4_ethofld_rx_handler, + NULL, 0); + if (ret) + goto out_free_queues; + + /* Allocate ETHOFLD hardware Txqs */ + eotxq->q.size = CXGB4_EOHW_TXQ_DEFAULT_DESC_NUM; + ret = t4_sge_alloc_ethofld_txq(adap, eotxq, dev, + eorxq->rspq.cntxt_id); + if (ret) + goto out_free_queues; + + /* Allocate IRQs, set IRQ affinity, and start Rx */ + if (adap->flags & CXGB4_USING_MSIX) { + ret = request_irq(eorxq->msix->vec, t4_sge_intr_msix, 0, + eorxq->msix->desc, &eorxq->rspq); + if (ret) + goto out_free_msix; + + cxgb4_set_msix_aff(adap, eorxq->msix->vec, + &eorxq->msix->aff_mask, i); + } + + if (adap->flags & CXGB4_FULL_INIT_DONE) + cxgb4_enable_rx(adap, &eorxq->rspq); + } + + refcount_inc(&adap->tc_mqprio->refcnt); + return 0; + +out_free_msix: + while (i-- > 0) { + eorxq = &adap->sge.eohw_rxq[pi->first_qset + i]; + + if (adap->flags & CXGB4_FULL_INIT_DONE) + cxgb4_quiesce_rx(&eorxq->rspq); + + if (adap->flags & CXGB4_USING_MSIX) { + cxgb4_clear_msix_aff(eorxq->msix->vec, + eorxq->msix->aff_mask); + free_irq(eorxq->msix->vec, &eorxq->rspq); + } + } + +out_free_queues: + for (i = 0; i < pi->nqsets; i++) { + eorxq = &adap->sge.eohw_rxq[pi->first_qset + i]; + eotxq = &adap->sge.eohw_txq[pi->first_qset + i]; + + if (eorxq->rspq.desc) + free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl); + if (eorxq->msix) + cxgb4_free_msix_idx_in_bmap(adap, eorxq->msix->idx); + t4_sge_free_ethofld_txq(adap, eotxq); + } + + kfree(adap->sge.eohw_txq); + kfree(adap->sge.eohw_rxq); + + return ret; +} + +static void cxgb4_mqprio_free_hw_resources(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + struct sge_ofld_rxq *eorxq; + struct sge_eohw_txq *eotxq; + u32 i; + + /* Return if no ETHOFLD structures have been allocated yet */ + if (!refcount_read(&adap->tc_mqprio->refcnt)) + return; + + /* Return if no hardware queues have been allocated */ + if (!adap->sge.eohw_rxq[pi->first_qset].rspq.desc) + return; + + for (i = 0; i < pi->nqsets; i++) { + eorxq = &adap->sge.eohw_rxq[pi->first_qset + i]; + eotxq = &adap->sge.eohw_txq[pi->first_qset + i]; + + /* Device removal path will already disable NAPI + * before unregistering netdevice. So, only disable + * NAPI if we're not in device removal path + */ + if (!(adap->flags & CXGB4_SHUTTING_DOWN)) + cxgb4_quiesce_rx(&eorxq->rspq); + + if (adap->flags & CXGB4_USING_MSIX) { + cxgb4_clear_msix_aff(eorxq->msix->vec, + eorxq->msix->aff_mask); + free_irq(eorxq->msix->vec, &eorxq->rspq); + } + + free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl); + t4_sge_free_ethofld_txq(adap, eotxq); + } + + /* Free up ETHOFLD structures if there are no users */ + if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) { + kfree(adap->sge.eohw_txq); + kfree(adap->sge.eohw_rxq); + } +} + +static int cxgb4_mqprio_alloc_tc(struct net_device *dev, + struct tc_mqprio_qopt_offload *mqprio) +{ + struct ch_sched_params p = { + .type = SCHED_CLASS_TYPE_PACKET, + .u.params.level = SCHED_CLASS_LEVEL_CL_RL, + .u.params.mode = SCHED_CLASS_MODE_FLOW, + .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS, + .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS, + .u.params.class = SCHED_CLS_NONE, + .u.params.weight = 0, + .u.params.pktsize = dev->mtu, + }; + struct cxgb4_tc_port_mqprio *tc_port_mqprio; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + struct sched_class *e; + int ret; + u8 i; + + tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id]; + p.u.params.channel = pi->tx_chan; + for (i = 0; i < mqprio->qopt.num_tc; i++) { + /* Convert from bytes per second to Kbps */ + p.u.params.minrate = div_u64(mqprio->min_rate[i] * 8, 1000); + p.u.params.maxrate = div_u64(mqprio->max_rate[i] * 8, 1000); + + e = cxgb4_sched_class_alloc(dev, &p); + if (!e) { + ret = -ENOMEM; + goto out_err; + } + + tc_port_mqprio->tc_hwtc_map[i] = e->idx; + } + + return 0; + +out_err: + while (i--) + cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]); + + return ret; +} + +static void cxgb4_mqprio_free_tc(struct net_device *dev) +{ + struct cxgb4_tc_port_mqprio *tc_port_mqprio; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + u8 i; + + tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id]; + for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++) + cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]); +} + +static int cxgb4_mqprio_class_bind(struct net_device *dev, + struct sge_eosw_txq *eosw_txq, + u8 tc) +{ + struct ch_sched_flowc fe; + int ret; + + init_completion(&eosw_txq->completion); + + fe.tid = eosw_txq->eotid; + fe.class = tc; + + ret = cxgb4_sched_class_bind(dev, &fe, SCHED_FLOWC); + if (ret) + return ret; + + ret = wait_for_completion_timeout(&eosw_txq->completion, + CXGB4_FLOWC_WAIT_TIMEOUT); + if (!ret) + return -ETIMEDOUT; + + return 0; +} + +static void cxgb4_mqprio_class_unbind(struct net_device *dev, + struct sge_eosw_txq *eosw_txq, + u8 tc) +{ + struct adapter *adap = netdev2adap(dev); + struct ch_sched_flowc fe; + + /* If we're shutting down, interrupts are disabled and no completions + * come back. So, skip waiting for completions in this scenario. + */ + if (!(adap->flags & CXGB4_SHUTTING_DOWN)) + init_completion(&eosw_txq->completion); + + fe.tid = eosw_txq->eotid; + fe.class = tc; + cxgb4_sched_class_unbind(dev, &fe, SCHED_FLOWC); + + if (!(adap->flags & CXGB4_SHUTTING_DOWN)) + wait_for_completion_timeout(&eosw_txq->completion, + CXGB4_FLOWC_WAIT_TIMEOUT); +} + +static int cxgb4_mqprio_enable_offload(struct net_device *dev, + struct tc_mqprio_qopt_offload *mqprio) +{ + struct cxgb4_tc_port_mqprio *tc_port_mqprio; + u32 qoffset, qcount, tot_qcount, qid, hwqid; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + struct sge_eosw_txq *eosw_txq; + int eotid, ret; + u16 i, j; + u8 hwtc; + + ret = cxgb4_mqprio_alloc_hw_resources(dev); + if (ret) + return -ENOMEM; + + tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id]; + for (i = 0; i < mqprio->qopt.num_tc; i++) { + qoffset = mqprio->qopt.offset[i]; + qcount = mqprio->qopt.count[i]; + for (j = 0; j < qcount; j++) { + eotid = cxgb4_get_free_eotid(&adap->tids); + if (eotid < 0) { + ret = -ENOMEM; + goto out_free_eotids; + } + + qid = qoffset + j; + hwqid = pi->first_qset + (eotid % pi->nqsets); + eosw_txq = &tc_port_mqprio->eosw_txq[qid]; + ret = cxgb4_init_eosw_txq(dev, eosw_txq, + eotid, hwqid); + if (ret) + goto out_free_eotids; + + cxgb4_alloc_eotid(&adap->tids, eotid, eosw_txq); + + hwtc = tc_port_mqprio->tc_hwtc_map[i]; + ret = cxgb4_mqprio_class_bind(dev, eosw_txq, hwtc); + if (ret) + goto out_free_eotids; + } + } + + memcpy(&tc_port_mqprio->mqprio, mqprio, + sizeof(struct tc_mqprio_qopt_offload)); + + /* Inform the stack about the configured tc params. + * + * Set the correct queue map. If no queue count has been + * specified, then send the traffic through default NIC + * queues; instead of ETHOFLD queues. + */ + ret = netdev_set_num_tc(dev, mqprio->qopt.num_tc); + if (ret) + goto out_free_eotids; + + tot_qcount = pi->nqsets; + for (i = 0; i < mqprio->qopt.num_tc; i++) { + qcount = mqprio->qopt.count[i]; + if (qcount) { + qoffset = mqprio->qopt.offset[i] + pi->nqsets; + } else { + qcount = pi->nqsets; + qoffset = 0; + } + + ret = netdev_set_tc_queue(dev, i, qcount, qoffset); + if (ret) + goto out_reset_tc; + + tot_qcount += mqprio->qopt.count[i]; + } + + ret = netif_set_real_num_tx_queues(dev, tot_qcount); + if (ret) + goto out_reset_tc; + + tc_port_mqprio->state = CXGB4_MQPRIO_STATE_ACTIVE; + return 0; + +out_reset_tc: + netdev_reset_tc(dev); + i = mqprio->qopt.num_tc; + +out_free_eotids: + while (i-- > 0) { + qoffset = mqprio->qopt.offset[i]; + qcount = mqprio->qopt.count[i]; + for (j = 0; j < qcount; j++) { + eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j]; + + hwtc = tc_port_mqprio->tc_hwtc_map[i]; + cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc); + + cxgb4_free_eotid(&adap->tids, eosw_txq->eotid); + cxgb4_free_eosw_txq(dev, eosw_txq); + } + } + + cxgb4_mqprio_free_hw_resources(dev); + return ret; +} + +static void cxgb4_mqprio_disable_offload(struct net_device *dev) +{ + struct cxgb4_tc_port_mqprio *tc_port_mqprio; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + struct sge_eosw_txq *eosw_txq; + u32 qoffset, qcount; + u16 i, j; + u8 hwtc; + + tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id]; + if (tc_port_mqprio->state != CXGB4_MQPRIO_STATE_ACTIVE) + return; + + netdev_reset_tc(dev); + netif_set_real_num_tx_queues(dev, pi->nqsets); + + for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++) { + qoffset = tc_port_mqprio->mqprio.qopt.offset[i]; + qcount = tc_port_mqprio->mqprio.qopt.count[i]; + for (j = 0; j < qcount; j++) { + eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j]; + + hwtc = tc_port_mqprio->tc_hwtc_map[i]; + cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc); + + cxgb4_free_eotid(&adap->tids, eosw_txq->eotid); + cxgb4_free_eosw_txq(dev, eosw_txq); + } + } + + cxgb4_mqprio_free_hw_resources(dev); + + /* Free up the traffic classes */ + cxgb4_mqprio_free_tc(dev); + + memset(&tc_port_mqprio->mqprio, 0, + sizeof(struct tc_mqprio_qopt_offload)); + + tc_port_mqprio->state = CXGB4_MQPRIO_STATE_DISABLED; +} + +int cxgb4_setup_tc_mqprio(struct net_device *dev, + struct tc_mqprio_qopt_offload *mqprio) +{ + bool needs_bring_up = false; + int ret; + + ret = cxgb4_mqprio_validate(dev, mqprio); + if (ret) + return ret; + + /* To configure tc params, the current allocated EOTIDs must + * be freed up. However, they can't be freed up if there's + * traffic running on the interface. So, ensure interface is + * down before configuring tc params. + */ + if (netif_running(dev)) { + cxgb_close(dev); + needs_bring_up = true; + } + + cxgb4_mqprio_disable_offload(dev); + + /* If requested for clear, then just return since resources are + * already freed up by now. + */ + if (!mqprio->qopt.num_tc) + goto out; + + /* Allocate free available traffic classes and configure + * their rate parameters. + */ + ret = cxgb4_mqprio_alloc_tc(dev, mqprio); + if (ret) + goto out; + + ret = cxgb4_mqprio_enable_offload(dev, mqprio); + if (ret) { + cxgb4_mqprio_free_tc(dev); + goto out; + } + +out: + if (needs_bring_up) + cxgb_open(dev); + + return ret; +} + +int cxgb4_init_tc_mqprio(struct adapter *adap) +{ + struct cxgb4_tc_port_mqprio *tc_port_mqprio, *port_mqprio; + struct cxgb4_tc_mqprio *tc_mqprio; + struct sge_eosw_txq *eosw_txq; + int ret = 0; + u8 i; + + tc_mqprio = kzalloc(sizeof(*tc_mqprio), GFP_KERNEL); + if (!tc_mqprio) + return -ENOMEM; + + tc_port_mqprio = kcalloc(adap->params.nports, sizeof(*tc_port_mqprio), + GFP_KERNEL); + if (!tc_port_mqprio) { + ret = -ENOMEM; + goto out_free_mqprio; + } + + tc_mqprio->port_mqprio = tc_port_mqprio; + for (i = 0; i < adap->params.nports; i++) { + port_mqprio = &tc_mqprio->port_mqprio[i]; + eosw_txq = kcalloc(adap->tids.neotids, sizeof(*eosw_txq), + GFP_KERNEL); + if (!eosw_txq) { + ret = -ENOMEM; + goto out_free_ports; + } + port_mqprio->eosw_txq = eosw_txq; + } + + adap->tc_mqprio = tc_mqprio; + refcount_set(&adap->tc_mqprio->refcnt, 0); + return 0; + +out_free_ports: + for (i = 0; i < adap->params.nports; i++) { + port_mqprio = &tc_mqprio->port_mqprio[i]; + kfree(port_mqprio->eosw_txq); + } + kfree(tc_port_mqprio); + +out_free_mqprio: + kfree(tc_mqprio); + return ret; +} + +void cxgb4_cleanup_tc_mqprio(struct adapter *adap) +{ + struct cxgb4_tc_port_mqprio *port_mqprio; + u8 i; + + if (adap->tc_mqprio) { + if (adap->tc_mqprio->port_mqprio) { + for (i = 0; i < adap->params.nports; i++) { + struct net_device *dev = adap->port[i]; + + if (dev) + cxgb4_mqprio_disable_offload(dev); + port_mqprio = &adap->tc_mqprio->port_mqprio[i]; + kfree(port_mqprio->eosw_txq); + } + kfree(adap->tc_mqprio->port_mqprio); + } + kfree(adap->tc_mqprio); + } +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h new file mode 100644 index 000000000000..c532f1ef8451 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */ + +#ifndef __CXGB4_TC_MQPRIO_H__ +#define __CXGB4_TC_MQPRIO_H__ + +#include <net/pkt_cls.h> + +#define CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM 128 + +#define CXGB4_EOHW_TXQ_DEFAULT_DESC_NUM 1024 + +#define CXGB4_EOHW_RXQ_DEFAULT_DESC_NUM 1024 +#define CXGB4_EOHW_RXQ_DEFAULT_DESC_SIZE 64 +#define CXGB4_EOHW_RXQ_DEFAULT_INTR_USEC 5 +#define CXGB4_EOHW_RXQ_DEFAULT_PKT_CNT 8 + +#define CXGB4_EOHW_FLQ_DEFAULT_DESC_NUM 72 + +#define CXGB4_FLOWC_WAIT_TIMEOUT (5 * HZ) + +enum cxgb4_mqprio_state { + CXGB4_MQPRIO_STATE_DISABLED = 0, + CXGB4_MQPRIO_STATE_ACTIVE, +}; + +struct cxgb4_tc_port_mqprio { + enum cxgb4_mqprio_state state; /* Current MQPRIO offload state */ + struct tc_mqprio_qopt_offload mqprio; /* MQPRIO offload params */ + struct sge_eosw_txq *eosw_txq; /* Netdev SW Tx queue array */ + u8 tc_hwtc_map[TC_QOPT_MAX_QUEUE]; /* MQPRIO tc to hardware tc map */ +}; + +struct cxgb4_tc_mqprio { + refcount_t refcnt; /* Refcount for adapter-wide resources */ + struct cxgb4_tc_port_mqprio *port_mqprio; /* Per port MQPRIO info */ +}; + +int cxgb4_setup_tc_mqprio(struct net_device *dev, + struct tc_mqprio_qopt_offload *mqprio); +int cxgb4_init_tc_mqprio(struct adapter *adap); +void cxgb4_cleanup_tc_mqprio(struct adapter *adap); +#endif /* __CXGB4_TC_MQPRIO_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index 02fc63fa7f25..133f8623ba86 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c @@ -36,6 +36,7 @@ #include <net/tc_act/tc_mirred.h> #include "cxgb4.h" +#include "cxgb4_filter.h" #include "cxgb4_tc_u32_parse.h" #include "cxgb4_tc_u32.h" @@ -148,6 +149,7 @@ static int fill_action_fields(struct adapter *adap, int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) { const struct cxgb4_match_field *start, *link_start = NULL; + struct netlink_ext_ack *extack = cls->common.extack; struct adapter *adapter = netdev2adap(dev); __be16 protocol = cls->common.protocol; struct ch_filter_specification fs; @@ -164,14 +166,21 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6)) return -EOPNOTSUPP; - /* Fetch the location to insert the filter. */ - filter_id = cls->knode.handle & 0xFFFFF; + /* Note that TC uses prio 0 to indicate stack to generate + * automatic prio and hence doesn't pass prio 0 to driver. + * However, the hardware TCAM index starts from 0. Hence, the + * -1 here. + */ + filter_id = TC_U32_NODE(cls->knode.handle) - 1; - if (filter_id > adapter->tids.nftids) { - dev_err(adapter->pdev_dev, - "Location %d out of range for insertion. Max: %d\n", - filter_id, adapter->tids.nftids); - return -ERANGE; + /* Only insert U32 rule if its priority doesn't conflict with + * existing rules in the LETCAM. + */ + if (filter_id >= adapter->tids.nftids || + !cxgb4_filter_prio_in_range(dev, filter_id, cls->common.prio)) { + NL_SET_ERR_MSG_MOD(extack, + "No free LETCAM index available"); + return -ENOMEM; } t = adapter->tc_u32; @@ -190,6 +199,9 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) memset(&fs, 0, sizeof(fs)); + fs.tc_prio = cls->common.prio; + fs.tc_cookie = cls->knode.handle; + if (protocol == htons(ETH_P_IPV6)) { start = cxgb4_ipv6_fields; is_ipv6 = true; @@ -350,14 +362,10 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) return -EOPNOTSUPP; /* Fetch the location to delete the filter. */ - filter_id = cls->knode.handle & 0xFFFFF; - - if (filter_id > adapter->tids.nftids) { - dev_err(adapter->pdev_dev, - "Location %d out of range for deletion. Max: %d\n", - filter_id, adapter->tids.nftids); + filter_id = TC_U32_NODE(cls->knode.handle) - 1; + if (filter_id >= adapter->tids.nftids || + cls->knode.handle != adapter->tids.ftid_tab[filter_id].fs.tc_cookie) return -ERANGE; - } t = adapter->tc_u32; handle = cls->knode.handle; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 86b528d8364c..cce33d279094 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -53,35 +53,6 @@ #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++) -static int get_msix_idx_from_bmap(struct adapter *adap) -{ - struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds; - unsigned long flags; - unsigned int msix_idx; - - spin_lock_irqsave(&bmap->lock, flags); - msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize); - if (msix_idx < bmap->mapsize) { - __set_bit(msix_idx, bmap->msix_bmap); - } else { - spin_unlock_irqrestore(&bmap->lock, flags); - return -ENOSPC; - } - - spin_unlock_irqrestore(&bmap->lock, flags); - return msix_idx; -} - -static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx) -{ - struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds; - unsigned long flags; - - spin_lock_irqsave(&bmap->lock, flags); - __clear_bit(msix_idx, bmap->msix_bmap); - spin_unlock_irqrestore(&bmap->lock, flags); -} - /* Flush the aggregated lro sessions */ static void uldrx_flush_handler(struct sge_rspq *q) { @@ -138,9 +109,9 @@ static int alloc_uld_rxqs(struct adapter *adap, struct sge_uld_rxq_info *rxq_info, bool lro) { unsigned int nq = rxq_info->nrxq + rxq_info->nciq; - int i, err, msi_idx, que_idx = 0, bmap_idx = 0; struct sge_ofld_rxq *q = rxq_info->uldrxq; unsigned short *ids = rxq_info->rspq_id; + int i, err, msi_idx, que_idx = 0; struct sge *s = &adap->sge; unsigned int per_chan; @@ -159,12 +130,18 @@ static int alloc_uld_rxqs(struct adapter *adap, } if (msi_idx >= 0) { - bmap_idx = get_msix_idx_from_bmap(adap); - if (bmap_idx < 0) { + msi_idx = cxgb4_get_msix_idx_from_bmap(adap); + if (msi_idx < 0) { err = -ENOSPC; goto freeout; } - msi_idx = adap->msix_info_ulds[bmap_idx].idx; + + snprintf(adap->msix_info[msi_idx].desc, + sizeof(adap->msix_info[msi_idx].desc), + "%s-%s%d", + adap->port[0]->name, rxq_info->name, i); + + q->msix = &adap->msix_info[msi_idx]; } err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[que_idx++ / per_chan], @@ -175,8 +152,7 @@ static int alloc_uld_rxqs(struct adapter *adap, 0); if (err) goto freeout; - if (msi_idx >= 0) - rxq_info->msix_tbl[i] = bmap_idx; + memset(&q->stats, 0, sizeof(q->stats)); if (ids) ids[i] = q->rspq.abs_id; @@ -188,6 +164,8 @@ freeout: if (q->rspq.desc) free_rspq_fl(adap, &q->rspq, q->fl.size ? &q->fl : NULL); + if (q->msix) + cxgb4_free_msix_idx_in_bmap(adap, q->msix->idx); } return err; } @@ -198,14 +176,6 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; int i, ret = 0; - if (adap->flags & CXGB4_USING_MSIX) { - rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq), - sizeof(unsigned short), - GFP_KERNEL); - if (!rxq_info->msix_tbl) - return -ENOMEM; - } - ret = !(!alloc_uld_rxqs(adap, rxq_info, lro)); /* Tell uP to route control queue completions to rdma rspq */ @@ -261,8 +231,6 @@ static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type) t4_free_uld_rxqs(adap, rxq_info->nciq, rxq_info->uldrxq + rxq_info->nrxq); t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq); - if (adap->flags & CXGB4_USING_MSIX) - kfree(rxq_info->msix_tbl); } static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, @@ -355,13 +323,12 @@ static int request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; - struct uld_msix_info *minfo; + struct msix_info *minfo; + unsigned int idx; int err = 0; - unsigned int idx, bmap_idx; for_each_uldrxq(rxq_info, idx) { - bmap_idx = rxq_info->msix_tbl[idx]; - minfo = &adap->msix_info_ulds[bmap_idx]; + minfo = rxq_info->uldrxq[idx].msix; err = request_irq(minfo->vec, t4_sge_intr_msix, 0, minfo->desc, @@ -376,10 +343,9 @@ request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) unwind: while (idx-- > 0) { - bmap_idx = rxq_info->msix_tbl[idx]; - minfo = &adap->msix_info_ulds[bmap_idx]; + minfo = rxq_info->uldrxq[idx].msix; cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); - free_msix_idx_in_bmap(adap, bmap_idx); + cxgb4_free_msix_idx_in_bmap(adap, minfo->idx); free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq); } return err; @@ -389,69 +355,45 @@ static void free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; - struct uld_msix_info *minfo; - unsigned int idx, bmap_idx; + struct msix_info *minfo; + unsigned int idx; for_each_uldrxq(rxq_info, idx) { - bmap_idx = rxq_info->msix_tbl[idx]; - minfo = &adap->msix_info_ulds[bmap_idx]; - + minfo = rxq_info->uldrxq[idx].msix; cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); - free_msix_idx_in_bmap(adap, bmap_idx); + cxgb4_free_msix_idx_in_bmap(adap, minfo->idx); free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq); } } -static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type) +static void enable_rx_uld(struct adapter *adap, unsigned int uld_type) { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; - int n = sizeof(adap->msix_info_ulds[0].desc); - unsigned int idx, bmap_idx; + int idx; for_each_uldrxq(rxq_info, idx) { - bmap_idx = rxq_info->msix_tbl[idx]; - - snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d", - adap->port[0]->name, rxq_info->name, idx); - } -} - -static void enable_rx(struct adapter *adap, struct sge_rspq *q) -{ - if (!q) - return; + struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq; - if (q->handler) - napi_enable(&q->napi); - - /* 0-increment GTS to start the timer and enable interrupts */ - t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), - SEINTARM_V(q->intr_params) | - INGRESSQID_V(q->cntxt_id)); -} + if (!q) + continue; -static void quiesce_rx(struct adapter *adap, struct sge_rspq *q) -{ - if (q && q->handler) - napi_disable(&q->napi); + cxgb4_enable_rx(adap, q); + } } -static void enable_rx_uld(struct adapter *adap, unsigned int uld_type) +static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type) { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; int idx; - for_each_uldrxq(rxq_info, idx) - enable_rx(adap, &rxq_info->uldrxq[idx].rspq); -} + for_each_uldrxq(rxq_info, idx) { + struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq; -static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type) -{ - struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; - int idx; + if (!q) + continue; - for_each_uldrxq(rxq_info, idx) - quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq); + cxgb4_quiesce_rx(q); + } } static void @@ -750,7 +692,6 @@ void cxgb4_register_uld(enum cxgb4_uld type, if (ret) goto free_queues; if (adap->flags & CXGB4_USING_MSIX) { - name_msix_vecs_uld(adap, type); ret = request_msix_queue_irqs_uld(adap, type); if (ret) goto free_rxq; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index cee582e36134..861b25d28ed6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -89,6 +89,10 @@ union aopen_entry { union aopen_entry *next; }; +struct eotid_entry { + void *data; +}; + /* * Holds the size, base address, free list start, etc of the TID, server TID, * and active-open TID tables. The tables themselves are allocated dynamically. @@ -126,6 +130,12 @@ struct tid_info { unsigned int v6_stids_in_use; unsigned int sftids_in_use; + /* ETHOFLD range */ + struct eotid_entry *eotid_tab; + unsigned long *eotid_bmap; + unsigned int eotid_base; + unsigned int neotids; + /* TIDs in the TCAM */ atomic_t tids_in_use; /* TIDs in the HASH */ @@ -176,6 +186,35 @@ static inline void cxgb4_insert_tid(struct tid_info *t, void *data, atomic_inc(&t->conns_in_use); } +static inline struct eotid_entry *cxgb4_lookup_eotid(struct tid_info *t, + u32 eotid) +{ + return eotid < t->neotids ? &t->eotid_tab[eotid] : NULL; +} + +static inline int cxgb4_get_free_eotid(struct tid_info *t) +{ + int eotid; + + eotid = find_first_zero_bit(t->eotid_bmap, t->neotids); + if (eotid >= t->neotids) + eotid = -1; + + return eotid; +} + +static inline void cxgb4_alloc_eotid(struct tid_info *t, u32 eotid, void *data) +{ + set_bit(eotid, t->eotid_bmap); + t->eotid_tab[eotid].data = data; +} + +static inline void cxgb4_free_eotid(struct tid_info *t, u32 eotid) +{ + clear_bit(eotid, t->eotid_bmap); + t->eotid_tab[eotid].data = NULL; +} + int cxgb4_alloc_atid(struct tid_info *t, void *data); int cxgb4_alloc_stid(struct tid_info *t, int family, void *data); int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index 60218dc676a8..3e61bd5d0c29 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -50,6 +50,7 @@ static int t4_sched_class_fw_cmd(struct port_info *pi, e = &s->tab[p->u.params.class]; switch (op) { case SCHED_FW_OP_ADD: + case SCHED_FW_OP_DEL: err = t4_sched_params(adap, p->type, p->u.params.level, p->u.params.mode, p->u.params.rateunit, @@ -92,45 +93,69 @@ static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg, pf = adap->pf; vf = 0; + + err = t4_set_params(adap, adap->mbox, pf, vf, 1, + &fw_param, &fw_class); + break; + } + case SCHED_FLOWC: { + struct sched_flowc_entry *fe; + + fe = (struct sched_flowc_entry *)arg; + + fw_class = bind ? fe->param.class : FW_SCHED_CLS_NONE; + err = cxgb4_ethofld_send_flowc(adap->port[pi->port_id], + fe->param.tid, fw_class); break; } default: err = -ENOTSUPP; - goto out; + break; } - err = t4_set_params(adap, adap->mbox, pf, vf, 1, &fw_param, &fw_class); - -out: return err; } -static struct sched_class *t4_sched_queue_lookup(struct port_info *pi, - const unsigned int qid, - int *index) +static void *t4_sched_entry_lookup(struct port_info *pi, + enum sched_bind_type type, + const u32 val) { struct sched_table *s = pi->sched_tbl; struct sched_class *e, *end; - struct sched_class *found = NULL; - int i; + void *found = NULL; - /* Look for a class with matching bound queue parameters */ + /* Look for an entry with matching @val */ end = &s->tab[s->sched_size]; for (e = &s->tab[0]; e != end; ++e) { - struct sched_queue_entry *qe; - - i = 0; - if (e->state == SCHED_STATE_UNUSED) + if (e->state == SCHED_STATE_UNUSED || + e->bind_type != type) continue; - list_for_each_entry(qe, &e->queue_list, list) { - if (qe->cntxt_id == qid) { - found = e; - if (index) - *index = i; - break; + switch (type) { + case SCHED_QUEUE: { + struct sched_queue_entry *qe; + + list_for_each_entry(qe, &e->entry_list, list) { + if (qe->cntxt_id == val) { + found = qe; + break; + } + } + break; + } + case SCHED_FLOWC: { + struct sched_flowc_entry *fe; + + list_for_each_entry(fe, &e->entry_list, list) { + if (fe->param.tid == val) { + found = fe; + break; + } } - i++; + break; + } + default: + return NULL; } if (found) @@ -142,52 +167,41 @@ static struct sched_class *t4_sched_queue_lookup(struct port_info *pi, static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) { - struct adapter *adap = pi->adapter; - struct sched_class *e; struct sched_queue_entry *qe = NULL; + struct adapter *adap = pi->adapter; struct sge_eth_txq *txq; - unsigned int qid; - int index = -1; + struct sched_class *e; int err = 0; if (p->queue < 0 || p->queue >= pi->nqsets) return -ERANGE; txq = &adap->sge.ethtxq[pi->first_qset + p->queue]; - qid = txq->q.cntxt_id; - /* Find the existing class that the queue is bound to */ - e = t4_sched_queue_lookup(pi, qid, &index); - if (e && index >= 0) { - int i = 0; - - list_for_each_entry(qe, &e->queue_list, list) { - if (i == index) - break; - i++; - } + /* Find the existing entry that the queue is bound to */ + qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id); + if (qe) { err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, false); if (err) return err; + e = &pi->sched_tbl->tab[qe->param.class]; list_del(&qe->list); kvfree(qe); - if (atomic_dec_and_test(&e->refcnt)) { - e->state = SCHED_STATE_UNUSED; - memset(&e->info, 0, sizeof(e->info)); - } + if (atomic_dec_and_test(&e->refcnt)) + cxgb4_sched_class_free(adap->port[pi->port_id], e->idx); } return err; } static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) { - struct adapter *adap = pi->adapter; struct sched_table *s = pi->sched_tbl; - struct sched_class *e; struct sched_queue_entry *qe = NULL; + struct adapter *adap = pi->adapter; struct sge_eth_txq *txq; + struct sched_class *e; unsigned int qid; int err = 0; @@ -215,7 +229,8 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) if (err) goto out_err; - list_add_tail(&qe->list, &e->queue_list); + list_add_tail(&qe->list, &e->entry_list); + e->bind_type = SCHED_QUEUE; atomic_inc(&e->refcnt); return err; @@ -224,6 +239,71 @@ out_err: return err; } +static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p) +{ + struct sched_flowc_entry *fe = NULL; + struct adapter *adap = pi->adapter; + struct sched_class *e; + int err = 0; + + if (p->tid < 0 || p->tid >= adap->tids.neotids) + return -ERANGE; + + /* Find the existing entry that the flowc is bound to */ + fe = t4_sched_entry_lookup(pi, SCHED_FLOWC, p->tid); + if (fe) { + err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC, + false); + if (err) + return err; + + e = &pi->sched_tbl->tab[fe->param.class]; + list_del(&fe->list); + kvfree(fe); + if (atomic_dec_and_test(&e->refcnt)) + cxgb4_sched_class_free(adap->port[pi->port_id], e->idx); + } + return err; +} + +static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p) +{ + struct sched_table *s = pi->sched_tbl; + struct sched_flowc_entry *fe = NULL; + struct adapter *adap = pi->adapter; + struct sched_class *e; + int err = 0; + + if (p->tid < 0 || p->tid >= adap->tids.neotids) + return -ERANGE; + + fe = kvzalloc(sizeof(*fe), GFP_KERNEL); + if (!fe) + return -ENOMEM; + + /* Unbind flowc from any existing class */ + err = t4_sched_flowc_unbind(pi, p); + if (err) + goto out_err; + + /* Bind flowc to specified class */ + memcpy(&fe->param, p, sizeof(fe->param)); + + e = &s->tab[fe->param.class]; + err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC, true); + if (err) + goto out_err; + + list_add_tail(&fe->list, &e->entry_list); + e->bind_type = SCHED_FLOWC; + atomic_inc(&e->refcnt); + return err; + +out_err: + kvfree(fe); + return err; +} + static void t4_sched_class_unbind_all(struct port_info *pi, struct sched_class *e, enum sched_bind_type type) @@ -235,10 +315,17 @@ static void t4_sched_class_unbind_all(struct port_info *pi, case SCHED_QUEUE: { struct sched_queue_entry *qe; - list_for_each_entry(qe, &e->queue_list, list) + list_for_each_entry(qe, &e->entry_list, list) t4_sched_queue_unbind(pi, &qe->param); break; } + case SCHED_FLOWC: { + struct sched_flowc_entry *fe; + + list_for_each_entry(fe, &e->entry_list, list) + t4_sched_flowc_unbind(pi, &fe->param); + break; + } default: break; } @@ -262,6 +349,15 @@ static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg, err = t4_sched_queue_unbind(pi, qe); break; } + case SCHED_FLOWC: { + struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg; + + if (bind) + err = t4_sched_flowc_bind(pi, fe); + else + err = t4_sched_flowc_unbind(pi, fe); + break; + } default: err = -ENOTSUPP; break; @@ -299,6 +395,12 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg, class_id = qe->class; break; } + case SCHED_FLOWC: { + struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg; + + class_id = fe->class; + break; + } default: return -ENOTSUPP; } @@ -340,6 +442,12 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, class_id = qe->class; break; } + case SCHED_FLOWC: { + struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg; + + class_id = fe->class; + break; + } default: return -ENOTSUPP; } @@ -355,8 +463,8 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi, const struct ch_sched_params *p) { struct sched_table *s = pi->sched_tbl; - struct sched_class *e, *end; struct sched_class *found = NULL; + struct sched_class *e, *end; if (!p) { /* Get any available unused class */ @@ -400,7 +508,7 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi, static struct sched_class *t4_sched_class_alloc(struct port_info *pi, struct ch_sched_params *p) { - struct sched_class *e; + struct sched_class *e = NULL; u8 class_id; int err; @@ -415,10 +523,13 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi, if (class_id != SCHED_CLS_NONE) return NULL; - /* See if there's an exisiting class with same - * requested sched params + /* See if there's an exisiting class with same requested sched + * params. Classes can only be shared among FLOWC types. For + * other types, always request a new class. */ - e = t4_sched_class_lookup(pi, p); + if (p->u.params.mode == SCHED_CLASS_MODE_FLOW) + e = t4_sched_class_lookup(pi, p); + if (!e) { struct ch_sched_params np; @@ -467,9 +578,57 @@ struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev, return t4_sched_class_alloc(pi, p); } -static void t4_sched_class_free(struct port_info *pi, struct sched_class *e) +/** + * cxgb4_sched_class_free - free a scheduling class + * @dev: net_device pointer + * @e: scheduling class + * + * Frees a scheduling class if there are no users. + */ +void cxgb4_sched_class_free(struct net_device *dev, u8 classid) { - t4_sched_class_unbind_all(pi, e, SCHED_QUEUE); + struct port_info *pi = netdev2pinfo(dev); + struct sched_table *s = pi->sched_tbl; + struct ch_sched_params p; + struct sched_class *e; + u32 speed; + int ret; + + e = &s->tab[classid]; + if (!atomic_read(&e->refcnt) && e->state != SCHED_STATE_UNUSED) { + /* Port based rate limiting needs explicit reset back + * to max rate. But, we'll do explicit reset for all + * types, instead of just port based type, to be on + * the safer side. + */ + memcpy(&p, &e->info, sizeof(p)); + /* Always reset mode to 0. Otherwise, FLOWC mode will + * still be enabled even after resetting the traffic + * class. + */ + p.u.params.mode = 0; + p.u.params.minrate = 0; + p.u.params.pktsize = 0; + + ret = t4_get_link_params(pi, NULL, &speed, NULL); + if (!ret) + p.u.params.maxrate = speed * 1000; /* Mbps to Kbps */ + else + p.u.params.maxrate = SCHED_MAX_RATE_KBPS; + + t4_sched_class_fw_cmd(pi, &p, SCHED_FW_OP_DEL); + + e->state = SCHED_STATE_UNUSED; + memset(&e->info, 0, sizeof(e->info)); + } +} + +static void t4_sched_class_free(struct net_device *dev, struct sched_class *e) +{ + struct port_info *pi = netdev2pinfo(dev); + + t4_sched_class_unbind_all(pi, e, e->bind_type); + cxgb4_sched_class_free(dev, e->idx); } struct sched_table *t4_init_sched(unsigned int sched_size) @@ -487,7 +646,7 @@ struct sched_table *t4_init_sched(unsigned int sched_size) memset(&s->tab[i], 0, sizeof(struct sched_class)); s->tab[i].idx = i; s->tab[i].state = SCHED_STATE_UNUSED; - INIT_LIST_HEAD(&s->tab[i].queue_list); + INIT_LIST_HEAD(&s->tab[i].entry_list); atomic_set(&s->tab[i].refcnt, 0); } return s; @@ -510,7 +669,7 @@ void t4_cleanup_sched(struct adapter *adap) e = &s->tab[i]; if (e->state == SCHED_STATE_ACTIVE) - t4_sched_class_free(pi, e); + t4_sched_class_free(adap->port[j], e); } kvfree(s); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h index 168fb4ce3759..e92ff68bdd0a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.h +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h @@ -52,10 +52,12 @@ enum { enum sched_fw_ops { SCHED_FW_OP_ADD, + SCHED_FW_OP_DEL, }; enum sched_bind_type { SCHED_QUEUE, + SCHED_FLOWC, }; struct sched_queue_entry { @@ -64,11 +66,17 @@ struct sched_queue_entry { struct ch_sched_queue param; }; +struct sched_flowc_entry { + struct list_head list; + struct ch_sched_flowc param; +}; + struct sched_class { u8 state; u8 idx; struct ch_sched_params info; - struct list_head queue_list; + enum sched_bind_type bind_type; + struct list_head entry_list; atomic_t refcnt; }; @@ -102,6 +110,7 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev, struct ch_sched_params *p); +void cxgb4_sched_class_free(struct net_device *dev, u8 classid); struct sched_table *t4_init_sched(unsigned int size); void t4_cleanup_sched(struct adapter *adap); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 928bfea5457b..a0400b9a11e9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -55,6 +55,8 @@ #include "t4fw_api.h" #include "cxgb4_ptp.h" #include "cxgb4_uld.h" +#include "cxgb4_tc_mqprio.h" +#include "sched.h" /* * Rx buffer size. We use largish buffers if possible but settle for single @@ -269,7 +271,6 @@ out_err: } EXPORT_SYMBOL(cxgb4_map_skb); -#ifdef CONFIG_NEED_DMA_MAP_STATE static void unmap_skb(struct device *dev, const struct sk_buff *skb, const dma_addr_t *addr) { @@ -284,6 +285,7 @@ static void unmap_skb(struct device *dev, const struct sk_buff *skb, dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE); } +#ifdef CONFIG_NEED_DMA_MAP_STATE /** * deferred_unmap_destructor - unmap a packet when it is freed * @skb: the packet @@ -1309,6 +1311,35 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb, tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len)); } +static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb, + struct cpl_tx_pkt_lso_core *lso) +{ + int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; + int l3hdr_len = skb_network_header_len(skb); + const struct skb_shared_info *ssi; + bool ipv6 = false; + + ssi = skb_shinfo(skb); + if (ssi->gso_type & SKB_GSO_TCPV6) + ipv6 = true; + + lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | + LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F | + LSO_IPV6_V(ipv6) | + LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | + LSO_IPHDR_LEN_V(l3hdr_len / 4) | + LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); + lso->ipid_ofst = htons(0); + lso->mss = htons(ssi->gso_size); + lso->seqno_offset = htonl(0); + if (is_t4(adap->params.chip)) + lso->len = htonl(skb->len); + else + lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len)); + + return (void *)(lso + 1); +} + /** * t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update * @adap: the adapter @@ -1347,6 +1378,31 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq, return reclaimed; } +static inline int cxgb4_validate_skb(struct sk_buff *skb, + struct net_device *dev, + u32 min_pkt_len) +{ + u32 max_pkt_len; + + /* The chip min packet length is 10 octets but some firmware + * commands have a minimum packet length requirement. So, play + * safe and reject anything shorter than @min_pkt_len. + */ + if (unlikely(skb->len < min_pkt_len)) + return -EINVAL; + + /* Discard the packet if the length is greater than mtu */ + max_pkt_len = ETH_HLEN + dev->mtu; + + if (skb_vlan_tagged(skb)) + max_pkt_len += VLAN_HLEN; + + if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) + return -EINVAL; + + return 0; +} + /** * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue * @skb: the packet @@ -1356,41 +1412,24 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq, */ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) { - u32 wr_mid, ctrl0, op; - u64 cntrl, *end, *sgl; - int qidx, credits; - unsigned int flits, ndesc; - struct adapter *adap; - struct sge_eth_txq *q; - const struct port_info *pi; + enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; + bool ptp_enabled = is_ptp_enabled(skb, dev); + dma_addr_t addr[MAX_SKB_FRAGS + 1]; + const struct skb_shared_info *ssi; struct fw_eth_tx_pkt_wr *wr; struct cpl_tx_pkt_core *cpl; - const struct skb_shared_info *ssi; - dma_addr_t addr[MAX_SKB_FRAGS + 1]; + int len, qidx, credits, ret; + const struct port_info *pi; + unsigned int flits, ndesc; bool immediate = false; - int len, max_pkt_len; - bool ptp_enabled = is_ptp_enabled(skb, dev); + u32 wr_mid, ctrl0, op; + u64 cntrl, *end, *sgl; + struct sge_eth_txq *q; unsigned int chip_ver; - enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; - -#ifdef CONFIG_CHELSIO_T4_FCOE - int err; -#endif /* CONFIG_CHELSIO_T4_FCOE */ - - /* - * The chip min packet length is 10 octets but play safe and reject - * anything shorter than an Ethernet header. - */ - if (unlikely(skb->len < ETH_HLEN)) { -out_free: dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } + struct adapter *adap; - /* Discard the packet if the length is greater than mtu */ - max_pkt_len = ETH_HLEN + dev->mtu; - if (skb_vlan_tagged(skb)) - max_pkt_len += VLAN_HLEN; - if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) + ret = cxgb4_validate_skb(skb, dev, ETH_HLEN); + if (ret) goto out_free; pi = netdev_priv(dev); @@ -1421,8 +1460,8 @@ out_free: dev_kfree_skb_any(skb); cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; #ifdef CONFIG_CHELSIO_T4_FCOE - err = cxgb_fcoe_offload(skb, adap, pi, &cntrl); - if (unlikely(err == -ENOTSUPP)) { + ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl); + if (unlikely(ret == -ENOTSUPP)) { if (ptp_enabled) spin_unlock(&adap->ptp_lock); goto out_free; @@ -1490,9 +1529,6 @@ out_free: dev_kfree_skb_any(skb); len += sizeof(*cpl); if (ssi->gso_size) { struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); - bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; - int l3hdr_len = skb_network_header_len(skb); - int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1); if (tnl_type) @@ -1519,30 +1555,8 @@ out_free: dev_kfree_skb_any(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) cntrl = hwcsum(adap->params.chip, skb); } else { - lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | - LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F | - LSO_IPV6_V(v6) | - LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | - LSO_IPHDR_LEN_V(l3hdr_len / 4) | - LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); - lso->ipid_ofst = htons(0); - lso->mss = htons(ssi->gso_size); - lso->seqno_offset = htonl(0); - if (is_t4(adap->params.chip)) - lso->len = htonl(skb->len); - else - lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len)); - cpl = (void *)(lso + 1); - - if (CHELSIO_CHIP_VERSION(adap->params.chip) - <= CHELSIO_T5) - cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); - else - cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); - - cntrl |= TXPKT_CSUM_TYPE_V(v6 ? - TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | - TXPKT_IPHDR_LEN_V(l3hdr_len); + cpl = write_tso_wr(adap, skb, lso); + cntrl = hwcsum(adap->params.chip, skb); } sgl = (u64 *)(cpl + 1); /* sgl start here */ if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) { @@ -1622,6 +1636,10 @@ out_free: dev_kfree_skb_any(skb); if (ptp_enabled) spin_unlock(&adap->ptp_lock); return NETDEV_TX_OK; + +out_free: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; } /* Constants ... */ @@ -1710,32 +1728,25 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, dma_addr_t addr[MAX_SKB_FRAGS + 1]; const struct skb_shared_info *ssi; struct fw_eth_tx_pkt_vm_wr *wr; - int qidx, credits, max_pkt_len; struct cpl_tx_pkt_core *cpl; const struct port_info *pi; unsigned int flits, ndesc; struct sge_eth_txq *txq; struct adapter *adapter; + int qidx, credits, ret; + size_t fw_hdr_copy_len; u64 cntrl, *end; u32 wr_mid; - const size_t fw_hdr_copy_len = sizeof(wr->ethmacdst) + - sizeof(wr->ethmacsrc) + - sizeof(wr->ethtype) + - sizeof(wr->vlantci); /* The chip minimum packet length is 10 octets but the firmware * command that we are using requires that we copy the Ethernet header * (including the VLAN tag) into the header so we reject anything * smaller than that ... */ - if (unlikely(skb->len < fw_hdr_copy_len)) - goto out_free; - - /* Discard the packet if the length is greater than mtu */ - max_pkt_len = ETH_HLEN + dev->mtu; - if (skb_vlan_tag_present(skb)) - max_pkt_len += VLAN_HLEN; - if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) + fw_hdr_copy_len = sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) + + sizeof(wr->ethtype) + sizeof(wr->vlantci); + ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len); + if (ret) goto out_free; /* Figure out which TX Queue we're going to use. */ @@ -1991,34 +2002,451 @@ out_free: return NETDEV_TX_OK; } +/** + * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs + * @q: the SGE control Tx queue + * + * This is a variant of cxgb4_reclaim_completed_tx() that is used + * for Tx queues that send only immediate data (presently just + * the control queues) and thus do not have any sk_buffs to release. + */ +static inline void reclaim_completed_tx_imm(struct sge_txq *q) +{ + int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); + int reclaim = hw_cidx - q->cidx; + + if (reclaim < 0) + reclaim += q->size; + + q->in_use -= reclaim; + q->cidx = hw_cidx; +} + +static inline void eosw_txq_advance_index(u32 *idx, u32 n, u32 max) +{ + u32 val = *idx + n; + + if (val >= max) + val -= max; + + *idx = val; +} + +void cxgb4_eosw_txq_free_desc(struct adapter *adap, + struct sge_eosw_txq *eosw_txq, u32 ndesc) +{ + struct sge_eosw_desc *d; + + d = &eosw_txq->desc[eosw_txq->last_cidx]; + while (ndesc--) { + if (d->skb) { + if (d->addr[0]) { + unmap_skb(adap->pdev_dev, d->skb, d->addr); + memset(d->addr, 0, sizeof(d->addr)); + } + dev_consume_skb_any(d->skb); + d->skb = NULL; + } + eosw_txq_advance_index(&eosw_txq->last_cidx, 1, + eosw_txq->ndesc); + d = &eosw_txq->desc[eosw_txq->last_cidx]; + } +} + +static inline void eosw_txq_advance(struct sge_eosw_txq *eosw_txq, u32 n) +{ + eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc); + eosw_txq->inuse += n; +} + +static inline int eosw_txq_enqueue(struct sge_eosw_txq *eosw_txq, + struct sk_buff *skb) +{ + if (eosw_txq->inuse == eosw_txq->ndesc) + return -ENOMEM; + + eosw_txq->desc[eosw_txq->pidx].skb = skb; + return 0; +} + +static inline struct sk_buff *eosw_txq_peek(struct sge_eosw_txq *eosw_txq) +{ + return eosw_txq->desc[eosw_txq->last_pidx].skb; +} + +static inline u8 ethofld_calc_tx_flits(struct adapter *adap, + struct sk_buff *skb, u32 hdr_len) +{ + u8 flits, nsgl = 0; + u32 wrlen; + + wrlen = sizeof(struct fw_eth_tx_eo_wr) + sizeof(struct cpl_tx_pkt_core); + if (skb_shinfo(skb)->gso_size) + wrlen += sizeof(struct cpl_tx_pkt_lso_core); + + wrlen += roundup(hdr_len, 16); + + /* Packet headers + WR + CPLs */ + flits = DIV_ROUND_UP(wrlen, 8); + + if (skb_shinfo(skb)->nr_frags > 0) + nsgl = sgl_len(skb_shinfo(skb)->nr_frags); + else if (skb->len - hdr_len) + nsgl = sgl_len(1); + + return flits + nsgl; +} + +static inline void *write_eo_wr(struct adapter *adap, + struct sge_eosw_txq *eosw_txq, + struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr, + u32 hdr_len, u32 wrlen) +{ + const struct skb_shared_info *ssi = skb_shinfo(skb); + struct cpl_tx_pkt_core *cpl; + u32 immd_len, wrlen16; + bool compl = false; + + wrlen16 = DIV_ROUND_UP(wrlen, 16); + immd_len = sizeof(struct cpl_tx_pkt_core); + if (skb_shinfo(skb)->gso_size) { + if (skb->encapsulation && + CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) + immd_len += sizeof(struct cpl_tx_tnl_lso); + else + immd_len += sizeof(struct cpl_tx_pkt_lso_core); + } + immd_len += hdr_len; + + if (!eosw_txq->ncompl || + eosw_txq->last_compl >= adap->params.ofldq_wr_cred / 2) { + compl = true; + eosw_txq->ncompl++; + eosw_txq->last_compl = 0; + } + + wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) | + FW_ETH_TX_EO_WR_IMMDLEN_V(immd_len) | + FW_WR_COMPL_V(compl)); + wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16_V(wrlen16) | + FW_WR_FLOWID_V(eosw_txq->hwtid)); + wr->r3 = 0; + wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; + wr->u.tcpseg.ethlen = skb_network_offset(skb); + wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb)); + wr->u.tcpseg.tcplen = tcp_hdrlen(skb); + wr->u.tcpseg.tsclk_tsoff = 0; + wr->u.tcpseg.r4 = 0; + wr->u.tcpseg.r5 = 0; + wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len); + + if (ssi->gso_size) { + struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); + + wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size); + cpl = write_tso_wr(adap, skb, lso); + } else { + wr->u.tcpseg.mss = cpu_to_be16(0xffff); + cpl = (void *)(wr + 1); + } + + eosw_txq->cred -= wrlen16; + eosw_txq->last_compl += wrlen16; + return cpl; +} + +static void ethofld_hard_xmit(struct net_device *dev, + struct sge_eosw_txq *eosw_txq) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + u32 wrlen, wrlen16, hdr_len, data_len; + enum sge_eosw_state next_state; + u64 cntrl, *start, *end, *sgl; + struct sge_eohw_txq *eohw_txq; + struct cpl_tx_pkt_core *cpl; + struct fw_eth_tx_eo_wr *wr; + bool skip_eotx_wr = false; + struct sge_eosw_desc *d; + struct sk_buff *skb; + u8 flits, ndesc; + int left; + + eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid]; + spin_lock(&eohw_txq->lock); + reclaim_completed_tx_imm(&eohw_txq->q); + + d = &eosw_txq->desc[eosw_txq->last_pidx]; + skb = d->skb; + skb_tx_timestamp(skb); + + wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx]; + if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE && + eosw_txq->last_pidx == eosw_txq->flowc_idx)) { + hdr_len = skb->len; + data_len = 0; + flits = DIV_ROUND_UP(hdr_len, 8); + if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND) + next_state = CXGB4_EO_STATE_FLOWC_OPEN_REPLY; + else + next_state = CXGB4_EO_STATE_FLOWC_CLOSE_REPLY; + skip_eotx_wr = true; + } else { + hdr_len = eth_get_headlen(dev, skb->data, skb_headlen(skb)); + data_len = skb->len - hdr_len; + flits = ethofld_calc_tx_flits(adap, skb, hdr_len); + } + ndesc = flits_to_desc(flits); + wrlen = flits * 8; + wrlen16 = DIV_ROUND_UP(wrlen, 16); + + /* If there are no CPL credits, then wait for credits + * to come back and retry again + */ + if (unlikely(wrlen16 > eosw_txq->cred)) + goto out_unlock; + + if (unlikely(skip_eotx_wr)) { + start = (u64 *)wr; + eosw_txq->state = next_state; + goto write_wr_headers; + } + + cpl = write_eo_wr(adap, eosw_txq, skb, wr, hdr_len, wrlen); + cntrl = hwcsum(adap->params.chip, skb); + if (skb_vlan_tag_present(skb)) + cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); + + cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | + TXPKT_INTF_V(pi->tx_chan) | + TXPKT_PF_V(adap->pf)); + cpl->pack = 0; + cpl->len = cpu_to_be16(skb->len); + cpl->ctrl1 = cpu_to_be64(cntrl); + + start = (u64 *)(cpl + 1); + +write_wr_headers: + sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start, + hdr_len); + if (data_len) { + if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, d->addr))) { + memset(d->addr, 0, sizeof(d->addr)); + eohw_txq->mapping_err++; + goto out_unlock; + } + + end = (u64 *)wr + flits; + if (unlikely(start > sgl)) { + left = (u8 *)end - (u8 *)eohw_txq->q.stat; + end = (void *)eohw_txq->q.desc + left; + } + + if (unlikely((u8 *)sgl >= (u8 *)eohw_txq->q.stat)) { + /* If current position is already at the end of the + * txq, reset the current to point to start of the queue + * and update the end ptr as well. + */ + left = (u8 *)end - (u8 *)eohw_txq->q.stat; + + end = (void *)eohw_txq->q.desc + left; + sgl = (void *)eohw_txq->q.desc; + } + + cxgb4_write_sgl(skb, &eohw_txq->q, (void *)sgl, end, hdr_len, + d->addr); + } + + txq_advance(&eohw_txq->q, ndesc); + cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc); + eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc); + +out_unlock: + spin_unlock(&eohw_txq->lock); +} + +static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq) +{ + struct sk_buff *skb; + int pktcount; + + switch (eosw_txq->state) { + case CXGB4_EO_STATE_ACTIVE: + case CXGB4_EO_STATE_FLOWC_OPEN_SEND: + case CXGB4_EO_STATE_FLOWC_CLOSE_SEND: + pktcount = eosw_txq->pidx - eosw_txq->last_pidx; + if (pktcount < 0) + pktcount += eosw_txq->ndesc; + break; + case CXGB4_EO_STATE_FLOWC_OPEN_REPLY: + case CXGB4_EO_STATE_FLOWC_CLOSE_REPLY: + case CXGB4_EO_STATE_CLOSED: + default: + return; + } + + while (pktcount--) { + skb = eosw_txq_peek(eosw_txq); + if (!skb) { + eosw_txq_advance_index(&eosw_txq->last_pidx, 1, + eosw_txq->ndesc); + continue; + } + + ethofld_hard_xmit(dev, eosw_txq); + } +} + +static netdev_tx_t cxgb4_ethofld_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct cxgb4_tc_port_mqprio *tc_port_mqprio; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + struct sge_eosw_txq *eosw_txq; + u32 qid; + int ret; + + ret = cxgb4_validate_skb(skb, dev, ETH_HLEN); + if (ret) + goto out_free; + + tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id]; + qid = skb_get_queue_mapping(skb) - pi->nqsets; + eosw_txq = &tc_port_mqprio->eosw_txq[qid]; + spin_lock_bh(&eosw_txq->lock); + if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) + goto out_unlock; + + ret = eosw_txq_enqueue(eosw_txq, skb); + if (ret) + goto out_unlock; + + /* SKB is queued for processing until credits are available. + * So, call the destructor now and we'll free the skb later + * after it has been successfully transmitted. + */ + skb_orphan(skb); + + eosw_txq_advance(eosw_txq, 1); + ethofld_xmit(dev, eosw_txq); + spin_unlock_bh(&eosw_txq->lock); + return NETDEV_TX_OK; + +out_unlock: + spin_unlock_bh(&eosw_txq->lock); +out_free: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct port_info *pi = netdev_priv(dev); + u16 qid = skb_get_queue_mapping(skb); if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM)) return cxgb4_vf_eth_xmit(skb, dev); + if (unlikely(qid >= pi->nqsets)) + return cxgb4_ethofld_xmit(skb, dev); + return cxgb4_eth_xmit(skb, dev); } /** - * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs - * @q: the SGE control Tx queue + * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc. + * @dev - netdevice + * @eotid - ETHOFLD tid to bind/unbind + * @tc - traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid * - * This is a variant of cxgb4_reclaim_completed_tx() that is used - * for Tx queues that send only immediate data (presently just - * the control queues) and thus do not have any sk_buffs to release. + * Send a FLOWC work request to bind an ETHOFLD TID to a traffic class. + * If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from + * a traffic class. */ -static inline void reclaim_completed_tx_imm(struct sge_txq *q) +int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc) { - int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); - int reclaim = hw_cidx - q->cidx; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + enum sge_eosw_state next_state; + struct sge_eosw_txq *eosw_txq; + u32 len, len16, nparams = 6; + struct fw_flowc_wr *flowc; + struct eotid_entry *entry; + struct sge_ofld_rxq *rxq; + struct sk_buff *skb; + int ret = 0; - if (reclaim < 0) - reclaim += q->size; + len = sizeof(*flowc) + sizeof(struct fw_flowc_mnemval) * nparams; + len16 = DIV_ROUND_UP(len, 16); - q->in_use -= reclaim; - q->cidx = hw_cidx; + entry = cxgb4_lookup_eotid(&adap->tids, eotid); + if (!entry) + return -ENOMEM; + + eosw_txq = (struct sge_eosw_txq *)entry->data; + if (!eosw_txq) + return -ENOMEM; + + skb = alloc_skb(len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + spin_lock_bh(&eosw_txq->lock); + if (tc != FW_SCHED_CLS_NONE) { + if (eosw_txq->state != CXGB4_EO_STATE_CLOSED) + goto out_unlock; + + next_state = CXGB4_EO_STATE_FLOWC_OPEN_SEND; + } else { + if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) + goto out_unlock; + + next_state = CXGB4_EO_STATE_FLOWC_CLOSE_SEND; + } + + flowc = __skb_put(skb, len); + memset(flowc, 0, len); + + rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid]; + flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(len16) | + FW_WR_FLOWID_V(eosw_txq->hwtid)); + flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | + FW_FLOWC_WR_NPARAMS_V(nparams) | + FW_WR_COMPL_V(1)); + flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; + flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf)); + flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; + flowc->mnemval[1].val = cpu_to_be32(pi->tx_chan); + flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; + flowc->mnemval[2].val = cpu_to_be32(pi->tx_chan); + flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; + flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id); + flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; + flowc->mnemval[4].val = cpu_to_be32(tc); + flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_EOSTATE; + flowc->mnemval[5].val = cpu_to_be32(tc == FW_SCHED_CLS_NONE ? + FW_FLOWC_MNEM_EOSTATE_CLOSING : + FW_FLOWC_MNEM_EOSTATE_ESTABLISHED); + + eosw_txq->cred -= len16; + eosw_txq->ncompl++; + eosw_txq->last_compl = 0; + + ret = eosw_txq_enqueue(eosw_txq, skb); + if (ret) { + dev_consume_skb_any(skb); + goto out_unlock; + } + + eosw_txq->state = next_state; + eosw_txq->flowc_idx = eosw_txq->pidx; + eosw_txq_advance(eosw_txq, 1); + ethofld_xmit(dev, eosw_txq); + +out_unlock: + spin_unlock_bh(&eosw_txq->lock); + return ret; } /** @@ -3311,6 +3739,112 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) return work_done; } +void cxgb4_ethofld_restart(unsigned long data) +{ + struct sge_eosw_txq *eosw_txq = (struct sge_eosw_txq *)data; + int pktcount; + + spin_lock(&eosw_txq->lock); + pktcount = eosw_txq->cidx - eosw_txq->last_cidx; + if (pktcount < 0) + pktcount += eosw_txq->ndesc; + + if (pktcount) { + cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev), + eosw_txq, pktcount); + eosw_txq->inuse -= pktcount; + } + + /* There may be some packets waiting for completions. So, + * attempt to send these packets now. + */ + ethofld_xmit(eosw_txq->netdev, eosw_txq); + spin_unlock(&eosw_txq->lock); +} + +/* cxgb4_ethofld_rx_handler - Process ETHOFLD Tx completions + * @q: the response queue that received the packet + * @rsp: the response queue descriptor holding the CPL message + * @si: the gather list of packet fragments + * + * Process a ETHOFLD Tx completion. Increment the cidx here, but + * free up the descriptors in a tasklet later. + */ +int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *si) +{ + u8 opcode = ((const struct rss_header *)rsp)->opcode; + + /* skip RSS header */ + rsp++; + + if (opcode == CPL_FW4_ACK) { + const struct cpl_fw4_ack *cpl; + struct sge_eosw_txq *eosw_txq; + struct eotid_entry *entry; + struct sk_buff *skb; + u32 hdr_len, eotid; + u8 flits, wrlen16; + int credits; + + cpl = (const struct cpl_fw4_ack *)rsp; + eotid = CPL_FW4_ACK_FLOWID_G(ntohl(OPCODE_TID(cpl))) - + q->adap->tids.eotid_base; + entry = cxgb4_lookup_eotid(&q->adap->tids, eotid); + if (!entry) + goto out_done; + + eosw_txq = (struct sge_eosw_txq *)entry->data; + if (!eosw_txq) + goto out_done; + + spin_lock(&eosw_txq->lock); + credits = cpl->credits; + while (credits > 0) { + skb = eosw_txq->desc[eosw_txq->cidx].skb; + if (!skb) + break; + + if (unlikely((eosw_txq->state == + CXGB4_EO_STATE_FLOWC_OPEN_REPLY || + eosw_txq->state == + CXGB4_EO_STATE_FLOWC_CLOSE_REPLY) && + eosw_txq->cidx == eosw_txq->flowc_idx)) { + flits = DIV_ROUND_UP(skb->len, 8); + if (eosw_txq->state == + CXGB4_EO_STATE_FLOWC_OPEN_REPLY) + eosw_txq->state = CXGB4_EO_STATE_ACTIVE; + else + eosw_txq->state = CXGB4_EO_STATE_CLOSED; + complete(&eosw_txq->completion); + } else { + hdr_len = eth_get_headlen(eosw_txq->netdev, + skb->data, + skb_headlen(skb)); + flits = ethofld_calc_tx_flits(q->adap, skb, + hdr_len); + } + eosw_txq_advance_index(&eosw_txq->cidx, 1, + eosw_txq->ndesc); + wrlen16 = DIV_ROUND_UP(flits * 8, 16); + credits -= wrlen16; + } + + eosw_txq->cred += cpl->credits; + eosw_txq->ncompl--; + + spin_unlock(&eosw_txq->lock); + + /* Schedule a tasklet to reclaim SKBs and restart ETHOFLD Tx, + * if there were packets waiting for completion. + */ + tasklet_schedule(&eosw_txq->qresume_tsk); + } + +out_done: + return 0; +} + /* * The MSI-X interrupt handler for an SGE response queue. */ @@ -3912,30 +4446,30 @@ int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid, return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); } -int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, - struct net_device *dev, unsigned int iqid, - unsigned int uld_type) +static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q, + struct net_device *dev, u32 cmd, u32 iqid) { unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); - int ret, nentries; - struct fw_eq_ofld_cmd c; - struct sge *s = &adap->sge; struct port_info *pi = netdev_priv(dev); - int cmd = FW_EQ_OFLD_CMD; + struct sge *s = &adap->sge; + struct fw_eq_ofld_cmd c; + u32 fb_min, nentries; + int ret; /* Add status entries */ - nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); - - txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, - sizeof(struct tx_desc), sizeof(struct tx_sw_desc), - &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, - NUMA_NO_NODE); - if (!txq->q.desc) + nentries = q->size + s->stat_len / sizeof(struct tx_desc); + q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc), + sizeof(struct tx_sw_desc), &q->phys_addr, + &q->sdesc, s->stat_len, NUMA_NO_NODE); + if (!q->desc) return -ENOMEM; + if (chip_ver <= CHELSIO_T5) + fb_min = FETCHBURSTMIN_64B_X; + else + fb_min = FETCHBURSTMIN_64B_T6_X; + memset(&c, 0, sizeof(c)); - if (unlikely(uld_type == CXGB4_TX_CRYPTO)) - cmd = FW_EQ_CTRL_CMD; c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F | FW_CMD_WRITE_F | FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN_V(adap->pf) | @@ -3947,27 +4481,42 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) | FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid)); c.dcaen_to_eqsize = - htonl(FW_EQ_OFLD_CMD_FBMIN_V(chip_ver <= CHELSIO_T5 - ? FETCHBURSTMIN_64B_X - : FETCHBURSTMIN_64B_T6_X) | + htonl(FW_EQ_OFLD_CMD_FBMIN_V(fb_min) | FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | FW_EQ_OFLD_CMD_EQSIZE_V(nentries)); - c.eqaddr = cpu_to_be64(txq->q.phys_addr); + c.eqaddr = cpu_to_be64(q->phys_addr); ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); if (ret) { - kfree(txq->q.sdesc); - txq->q.sdesc = NULL; + kfree(q->sdesc); + q->sdesc = NULL; dma_free_coherent(adap->pdev_dev, nentries * sizeof(struct tx_desc), - txq->q.desc, txq->q.phys_addr); - txq->q.desc = NULL; + q->desc, q->phys_addr); + q->desc = NULL; return ret; } + init_txq(adap, q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); + return 0; +} + +int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, + struct net_device *dev, unsigned int iqid, + unsigned int uld_type) +{ + u32 cmd = FW_EQ_OFLD_CMD; + int ret; + + if (unlikely(uld_type == CXGB4_TX_CRYPTO)) + cmd = FW_EQ_CTRL_CMD; + + ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid); + if (ret) + return ret; + txq->q.q_type = CXGB4_TXQ_ULD; - init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); txq->adap = adap; skb_queue_head_init(&txq->sendq); tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq); @@ -3976,6 +4525,25 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, return 0; } +int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq, + struct net_device *dev, u32 iqid) +{ + int ret; + + ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid); + if (ret) + return ret; + + txq->q.q_type = CXGB4_TXQ_ULD; + spin_lock_init(&txq->lock); + txq->adap = adap; + txq->tso = 0; + txq->tx_cso = 0; + txq->vlan_ins = 0; + txq->mapping_err = 0; + return 0; +} + void free_txq(struct adapter *adap, struct sge_txq *q) { struct sge *s = &adap->sge; @@ -4031,6 +4599,17 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q) q->fl.size ? &q->fl : NULL); } +void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq) +{ + if (txq->q.desc) { + t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0, + txq->q.cntxt_id); + free_tx_desc(adap, &txq->q, txq->q.in_use, false); + kfree(txq->q.sdesc); + free_txq(adap, &txq->q); + } +} + /** * t4_free_sge_resources - free SGE resources * @adap: the adapter @@ -4060,6 +4639,10 @@ void t4_free_sge_resources(struct adapter *adap) if (eq->rspq.desc) free_rspq_fl(adap, &eq->rspq, eq->fl.size ? &eq->fl : NULL); + if (eq->msix) { + cxgb4_free_msix_idx_in_bmap(adap, eq->msix->idx); + eq->msix = NULL; + } etq = &adap->sge.ethtxq[i]; if (etq->q.desc) { @@ -4086,8 +4669,15 @@ void t4_free_sge_resources(struct adapter *adap) } } - if (adap->sge.fw_evtq.desc) + if (adap->sge.fw_evtq.desc) { free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); + if (adap->sge.fwevtq_msix_idx >= 0) + cxgb4_free_msix_idx_in_bmap(adap, + adap->sge.fwevtq_msix_idx); + } + + if (adap->sge.nd_msix_idx >= 0) + cxgb4_free_msix_idx_in_bmap(adap, adap->sge.nd_msix_idx); if (adap->sge.intrq.desc) free_rspq_fl(adap, &adap->sge.intrq, NULL); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index f2a7824da42b..19d18acfc9a6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -8777,8 +8777,8 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp, unsigned int *speedp, unsigned int *mtup) { unsigned int fw_caps = pi->adapter->params.fw_caps_support; - struct fw_port_cmd port_cmd; unsigned int action, link_ok, mtu; + struct fw_port_cmd port_cmd; fw_port_cap32_t linkattr; int ret; @@ -8813,9 +8813,12 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp, be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32)); } - *link_okp = link_ok; - *speedp = fwcap_to_speed(linkattr); - *mtup = mtu; + if (link_okp) + *link_okp = link_ok; + if (speedp) + *speedp = fwcap_to_speed(linkattr); + if (mtup) + *mtup = mtu; return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index 38dd41eb959e..575c6abcdae7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -1421,6 +1421,11 @@ enum { CPL_FW4_ACK_FLAGS_FLOWC = 0x4, /* fw_flowc_wr complete */ }; +#define CPL_FW4_ACK_FLOWID_S 0 +#define CPL_FW4_ACK_FLOWID_M 0xffffff +#define CPL_FW4_ACK_FLOWID_G(x) \ + (((x) >> CPL_FW4_ACK_FLOWID_S) & CPL_FW4_ACK_FLOWID_M) + struct cpl_fw6_msg { u8 opcode; u8 type; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 65313f6b5704..414e5cca293e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -87,6 +87,7 @@ enum fw_wr_opcodes { FW_ULPTX_WR = 0x04, FW_TP_WR = 0x05, FW_ETH_TX_PKT_WR = 0x08, + FW_ETH_TX_EO_WR = 0x1c, FW_OFLD_CONNECTION_WR = 0x2f, FW_FLOWC_WR = 0x0a, FW_OFLD_TX_DATA_WR = 0x0b, @@ -534,6 +535,35 @@ struct fw_eth_tx_pkt_wr { __be64 r3; }; +enum fw_eth_tx_eo_type { + FW_ETH_TX_EO_TYPE_TCPSEG = 1, +}; + +struct fw_eth_tx_eo_wr { + __be32 op_immdlen; + __be32 equiq_to_len16; + __be64 r3; + union fw_eth_tx_eo { + struct fw_eth_tx_eo_tcpseg { + __u8 type; + __u8 ethlen; + __be16 iplen; + __u8 tcplen; + __u8 tsclk_tsoff; + __be16 r4; + __be16 mss; + __be16 r5; + __be32 plen; + } tcpseg; + } u; +}; + +#define FW_ETH_TX_EO_WR_IMMDLEN_S 0 +#define FW_ETH_TX_EO_WR_IMMDLEN_M 0x1ff +#define FW_ETH_TX_EO_WR_IMMDLEN_V(x) ((x) << FW_ETH_TX_EO_WR_IMMDLEN_S) +#define FW_ETH_TX_EO_WR_IMMDLEN_G(x) \ + (((x) >> FW_ETH_TX_EO_WR_IMMDLEN_S) & FW_ETH_TX_EO_WR_IMMDLEN_M) + struct fw_ofld_connection_wr { __be32 op_compl; __be32 len16_pkd; @@ -660,6 +690,12 @@ enum fw_flowc_mnem_tcpstate { FW_FLOWC_MNEM_TCPSTATE_TIMEWAIT = 10, /* not expected */ }; +enum fw_flowc_mnem_eostate { + FW_FLOWC_MNEM_EOSTATE_ESTABLISHED = 1, /* default */ + /* graceful close, after sending outstanding payload */ + FW_FLOWC_MNEM_EOSTATE_CLOSING = 2, +}; + enum fw_flowc_mnem { FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */ FW_FLOWC_MNEM_CH, @@ -1134,6 +1170,7 @@ enum fw_caps_config_nic { FW_CAPS_CONFIG_NIC = 0x00000001, FW_CAPS_CONFIG_NIC_VM = 0x00000002, FW_CAPS_CONFIG_NIC_HASHFILTER = 0x00000020, + FW_CAPS_CONFIG_NIC_ETHOFLD = 0x00000040, }; enum fw_caps_config_ofld { @@ -1276,6 +1313,7 @@ enum fw_params_param_dev { FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD = 0x28, FW_PARAMS_PARAM_DEV_DBQ_TIMER = 0x29, FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK = 0x2A, + FW_PARAMS_PARAM_DEV_NUM_TM_CLASS = 0x2B, FW_PARAMS_PARAM_DEV_FILTER = 0x2E, }; diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index f1a0c4dceda0..f37c9a08c4cf 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -763,6 +763,7 @@ static int ep93xx_eth_remove(struct platform_device *pdev) { struct net_device *dev; struct ep93xx_priv *ep; + struct resource *mem; dev = platform_get_drvdata(pdev); if (dev == NULL) @@ -778,8 +779,8 @@ static int ep93xx_eth_remove(struct platform_device *pdev) iounmap(ep->base_addr); if (ep->res != NULL) { - release_resource(ep->res); - kfree(ep->res); + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(mem->start, resource_size(mem)); } free_netdev(dev); diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index e736ce2c58ca..a8f4c69252ff 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -2524,6 +2524,7 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev) struct gemini_ethernet_port *port = platform_get_drvdata(pdev); gemini_port_remove(port); + free_netdev(port->netdev); return 0; } diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index da0c506349d1..a6f2063f1475 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1612,7 +1612,7 @@ static int ftgmac100_setup_mdio(struct net_device *netdev) { struct ftgmac100 *priv = netdev_priv(netdev); struct platform_device *pdev = to_platform_device(priv->dev); - int phy_intf = PHY_INTERFACE_MODE_RGMII; + phy_interface_t phy_intf = PHY_INTERFACE_MODE_RGMII; struct device_node *np = pdev->dev.of_node; int i, err = 0; u32 reg; @@ -1637,8 +1637,8 @@ static int ftgmac100_setup_mdio(struct net_device *netdev) /* Get PHY mode from device-tree */ if (np) { /* Default to RGMII. It's a gigabit part after all */ - phy_intf = of_get_phy_mode(np); - if (phy_intf < 0) + err = of_get_phy_mode(np, &phy_intf); + if (err) phy_intf = PHY_INTERFACE_MODE_RGMII; /* Aspeed only supports these. I don't know about other IP diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig index fbef2829f3de..c6fb8e4021ac 100644 --- a/drivers/net/ethernet/freescale/dpaa2/Kconfig +++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig @@ -2,6 +2,7 @@ config FSL_DPAA2_ETH tristate "Freescale DPAA2 Ethernet" depends on FSL_MC_BUS && FSL_MC_DPIO + select PHYLINK help This is the DPAA2 Ethernet driver supporting Freescale SoCs with DPAA2 (DataPath Acceleration Architecture v2). diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index acc56606d3a5..7ff147e89426 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -2257,8 +2257,16 @@ err_set_cdan: err_service_reg: free_channel(priv, channel); err_alloc_ch: - if (err == -EPROBE_DEFER) + if (err == -EPROBE_DEFER) { + for (i = 0; i < priv->num_channels; i++) { + channel = priv->channel[i]; + nctx = &channel->nctx; + dpaa2_io_service_deregister(channel->dpio, nctx, dev); + free_channel(priv, channel); + } + priv->num_channels = 0; return err; + } if (cpumask_empty(&priv->dpio_cpumask)) { dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index 0883620631b8..96676abcebd5 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -173,6 +173,7 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev, static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { + struct dpaa2_eth_priv *priv = netdev_priv(netdev); u8 *p = data; int i; @@ -186,15 +187,22 @@ static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } + if (priv->mac) + dpaa2_mac_get_strings(p); break; } } static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset) { + int num_ss_stats = DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS; + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + switch (sset) { case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */ - return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS; + if (priv->mac) + num_ss_stats += dpaa2_mac_get_sset_count(); + return num_ss_stats; default: return -EOPNOTSUPP; } @@ -293,6 +301,9 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, return; } *(data + i++) = buf_cnt; + + if (priv->mac) + dpaa2_mac_get_ethtool_stats(priv->mac, data + i); } static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask, diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c index fea388d86f20..84233e467ed1 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c @@ -7,14 +7,19 @@ #define phylink_to_dpaa2_mac(config) \ container_of((config), struct dpaa2_mac, phylink_config) -static phy_interface_t phy_mode(enum dpmac_eth_if eth_if) +static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode) { + *if_mode = PHY_INTERFACE_MODE_NA; + switch (eth_if) { case DPMAC_ETH_IF_RGMII: - return PHY_INTERFACE_MODE_RGMII; + *if_mode = PHY_INTERFACE_MODE_RGMII; + break; default: return -EINVAL; } + + return 0; } /* Caller must call of_node_put on the returned value */ @@ -44,17 +49,18 @@ static struct device_node *dpaa2_mac_get_node(u16 dpmac_id) static int dpaa2_mac_get_if_mode(struct device_node *node, struct dpmac_attr attr) { - int if_mode; + phy_interface_t if_mode; + int err; - if_mode = of_get_phy_mode(node); - if (if_mode >= 0) + err = of_get_phy_mode(node, &if_mode); + if (!err) return if_mode; - if_mode = phy_mode(attr.eth_if); - if (if_mode >= 0) + err = phy_mode(attr.eth_if, &if_mode); + if (!err) return if_mode; - return -ENODEV; + return err; } static bool dpaa2_mac_phy_mode_mismatch(struct dpaa2_mac *mac, @@ -299,3 +305,71 @@ void dpaa2_mac_disconnect(struct dpaa2_mac *mac) phylink_destroy(mac->phylink); dpmac_close(mac->mc_io, 0, mac->mc_dev->mc_handle); } + +static char dpaa2_mac_ethtool_stats[][ETH_GSTRING_LEN] = { + [DPMAC_CNT_ING_ALL_FRAME] = "[mac] rx all frames", + [DPMAC_CNT_ING_GOOD_FRAME] = "[mac] rx frames ok", + [DPMAC_CNT_ING_ERR_FRAME] = "[mac] rx frame errors", + [DPMAC_CNT_ING_FRAME_DISCARD] = "[mac] rx frame discards", + [DPMAC_CNT_ING_UCAST_FRAME] = "[mac] rx u-cast", + [DPMAC_CNT_ING_BCAST_FRAME] = "[mac] rx b-cast", + [DPMAC_CNT_ING_MCAST_FRAME] = "[mac] rx m-cast", + [DPMAC_CNT_ING_FRAME_64] = "[mac] rx 64 bytes", + [DPMAC_CNT_ING_FRAME_127] = "[mac] rx 65-127 bytes", + [DPMAC_CNT_ING_FRAME_255] = "[mac] rx 128-255 bytes", + [DPMAC_CNT_ING_FRAME_511] = "[mac] rx 256-511 bytes", + [DPMAC_CNT_ING_FRAME_1023] = "[mac] rx 512-1023 bytes", + [DPMAC_CNT_ING_FRAME_1518] = "[mac] rx 1024-1518 bytes", + [DPMAC_CNT_ING_FRAME_1519_MAX] = "[mac] rx 1519-max bytes", + [DPMAC_CNT_ING_FRAG] = "[mac] rx frags", + [DPMAC_CNT_ING_JABBER] = "[mac] rx jabber", + [DPMAC_CNT_ING_ALIGN_ERR] = "[mac] rx align errors", + [DPMAC_CNT_ING_OVERSIZED] = "[mac] rx oversized", + [DPMAC_CNT_ING_VALID_PAUSE_FRAME] = "[mac] rx pause", + [DPMAC_CNT_ING_BYTE] = "[mac] rx bytes", + [DPMAC_CNT_EGR_GOOD_FRAME] = "[mac] tx frames ok", + [DPMAC_CNT_EGR_UCAST_FRAME] = "[mac] tx u-cast", + [DPMAC_CNT_EGR_MCAST_FRAME] = "[mac] tx m-cast", + [DPMAC_CNT_EGR_BCAST_FRAME] = "[mac] tx b-cast", + [DPMAC_CNT_EGR_ERR_FRAME] = "[mac] tx frame errors", + [DPMAC_CNT_EGR_UNDERSIZED] = "[mac] tx undersized", + [DPMAC_CNT_EGR_VALID_PAUSE_FRAME] = "[mac] tx b-pause", + [DPMAC_CNT_EGR_BYTE] = "[mac] tx bytes", +}; + +#define DPAA2_MAC_NUM_STATS ARRAY_SIZE(dpaa2_mac_ethtool_stats) + +int dpaa2_mac_get_sset_count(void) +{ + return DPAA2_MAC_NUM_STATS; +} + +void dpaa2_mac_get_strings(u8 *data) +{ + u8 *p = data; + int i; + + for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) { + strlcpy(p, dpaa2_mac_ethtool_stats[i], ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } +} + +void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data) +{ + struct fsl_mc_device *dpmac_dev = mac->mc_dev; + int i, err; + u64 value; + + for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) { + err = dpmac_get_counter(mac->mc_io, 0, dpmac_dev->mc_handle, + i, &value); + if (err) { + netdev_err_once(mac->net_dev, + "dpmac_get_counter error %d\n", err); + *(data + i) = U64_MAX; + continue; + } + *(data + i) = value; + } +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h index 8634d0de7ef3..4da8079b9155 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h @@ -29,4 +29,10 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac); void dpaa2_mac_disconnect(struct dpaa2_mac *mac); +int dpaa2_mac_get_sset_count(void); + +void dpaa2_mac_get_strings(u8 *data); + +void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data); + #endif /* DPAA2_MAC_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h index 96a9b0d0992e..3ea51dd9374b 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h @@ -22,6 +22,8 @@ #define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004) #define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD_V2(0x0c3) +#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4) + /* Macros for accessing command fields smaller than 1byte */ #define DPMAC_MASK(field) \ GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \ @@ -59,4 +61,13 @@ struct dpmac_cmd_set_link_state { __le64 advertising; }; +struct dpmac_cmd_get_counter { + u8 id; +}; + +struct dpmac_rsp_get_counter { + u64 pad; + u64 counter; +}; + #endif /* _FSL_DPMAC_CMD_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.c b/drivers/net/ethernet/freescale/dpaa2/dpmac.c index b75189deffb1..d5997b654562 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpmac.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.c @@ -147,3 +147,37 @@ int dpmac_set_link_state(struct fsl_mc_io *mc_io, /* send command to mc*/ return mc_send_command(mc_io, &cmd); } + +/** + * dpmac_get_counter() - Read a specific DPMAC counter + * @mc_io: Pointer to opaque I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPMAC object + * @id: The requested counter ID + * @value: Returned counter value + * + * Return: The requested counter; '0' otherwise. + */ +int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + enum dpmac_counter_id id, u64 *value) +{ + struct dpmac_cmd_get_counter *dpmac_cmd; + struct dpmac_rsp_get_counter *dpmac_rsp; + struct fsl_mc_command cmd = { 0 }; + int err = 0; + + cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER, + cmd_flags, + token); + dpmac_cmd = (struct dpmac_cmd_get_counter *)cmd.params; + dpmac_cmd->id = id; + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + dpmac_rsp = (struct dpmac_rsp_get_counter *)cmd.params; + *value = le64_to_cpu(dpmac_rsp->counter); + + return 0; +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.h b/drivers/net/ethernet/freescale/dpaa2/dpmac.h index 4efc410a479e..135f143097a5 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpmac.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.h @@ -141,4 +141,86 @@ int dpmac_set_link_state(struct fsl_mc_io *mc_io, u16 token, struct dpmac_link_state *link_state); +/** + * enum dpmac_counter_id - DPMAC counter types + * + * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad. + * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad. + * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad. + * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad. + * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad. + * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad. + * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger + * (up to max frame length specified), + * good or bad. + * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received + * with a wrong CRC + * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length + * specified, with a bad frame check sequence. + * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors. + * Occurs when a receive FIFO overflows. + * Includes also frames truncated as a result of + * the receive FIFO overflow. + * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error + * (optional used for wrong SFD). + * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64 + * bytes long with a good CRC. + * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length + * specified, with a good frame check sequence. + * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC) + * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted + * (regular and PFC). + * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid + * frames and valid pause frames. + * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames. + * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames. + * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received. + * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames. + * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error + * (except for undersized/fragment frame). + * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid + * frames and valid pause frames transmitted. + * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames. + * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames. + * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames. + * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error. + * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including + * pause frames. + * @DPMAC_CNT_EGR_GOOD_FRAME: counts frames transmitted without error, including + * pause frames. + */ +enum dpmac_counter_id { + DPMAC_CNT_ING_FRAME_64, + DPMAC_CNT_ING_FRAME_127, + DPMAC_CNT_ING_FRAME_255, + DPMAC_CNT_ING_FRAME_511, + DPMAC_CNT_ING_FRAME_1023, + DPMAC_CNT_ING_FRAME_1518, + DPMAC_CNT_ING_FRAME_1519_MAX, + DPMAC_CNT_ING_FRAG, + DPMAC_CNT_ING_JABBER, + DPMAC_CNT_ING_FRAME_DISCARD, + DPMAC_CNT_ING_ALIGN_ERR, + DPMAC_CNT_EGR_UNDERSIZED, + DPMAC_CNT_ING_OVERSIZED, + DPMAC_CNT_ING_VALID_PAUSE_FRAME, + DPMAC_CNT_EGR_VALID_PAUSE_FRAME, + DPMAC_CNT_ING_BYTE, + DPMAC_CNT_ING_MCAST_FRAME, + DPMAC_CNT_ING_BCAST_FRAME, + DPMAC_CNT_ING_ALL_FRAME, + DPMAC_CNT_ING_UCAST_FRAME, + DPMAC_CNT_ING_ERR_FRAME, + DPMAC_CNT_EGR_BYTE, + DPMAC_CNT_EGR_MCAST_FRAME, + DPMAC_CNT_EGR_BCAST_FRAME, + DPMAC_CNT_EGR_UCAST_FRAME, + DPMAC_CNT_EGR_ERR_FRAME, + DPMAC_CNT_ING_GOOD_FRAME, + DPMAC_CNT_EGR_GOOD_FRAME +}; + +int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + enum dpmac_counter_id id, u64 *value); + #endif /* __FSL_DPMAC_H */ diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig index c219587bd334..491659fe3e35 100644 --- a/drivers/net/ethernet/freescale/enetc/Kconfig +++ b/drivers/net/ethernet/freescale/enetc/Kconfig @@ -50,3 +50,13 @@ config FSL_ENETC_HW_TIMESTAMPING allocation has not been supported and it is too expensive to use extended RX BDs if timestamping is not used, this option enables extended RX BDs in order to support hardware timestamping. + +config FSL_ENETC_QOS + bool "ENETC hardware Time-sensitive Network support" + depends on (FSL_ENETC || FSL_ENETC_VF) && NET_SCH_TAPRIO + help + There are Time-Sensitive Network(TSN) capabilities(802.1Qbv/802.1Qci + /802.1Qbu etc.) supported by ENETC. These TSN capabilities can be set + enable/disable from user space via Qos commands(tc). In the kernel + side, it can be loaded by Qos driver. Currently, it is only support + taprio(802.1Qbv). diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile index d200c27c3bf6..d0db33e5b6b7 100644 --- a/drivers/net/ethernet/freescale/enetc/Makefile +++ b/drivers/net/ethernet/freescale/enetc/Makefile @@ -5,9 +5,11 @@ common-objs := enetc.o enetc_cbdr.o enetc_ethtool.o obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o fsl-enetc-y := enetc_pf.o enetc_mdio.o $(common-objs) fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o +fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o fsl-enetc-vf-y := enetc_vf.o $(common-objs) +fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o obj-$(CONFIG_FSL_ENETC_MDIO) += fsl-enetc-mdio.o fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index b6ff89307409..f6b00c68451b 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -742,9 +742,14 @@ void enetc_get_si_caps(struct enetc_si *si) si->num_rss = 0; val = enetc_rd(hw, ENETC_SIPCAPR0); if (val & ENETC_SIPCAPR0_RSS) { - val = enetc_rd(hw, ENETC_SIRSSCAPR); - si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(val); + u32 rss; + + rss = enetc_rd(hw, ENETC_SIRSSCAPR); + si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss); } + + if (val & ENETC_SIPCAPR0_QBV) + si->hw_features |= ENETC_SI_F_QBV; } static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size) @@ -1314,8 +1319,12 @@ static void enetc_disable_interrupts(struct enetc_ndev_priv *priv) static void adjust_link(struct net_device *ndev) { + struct enetc_ndev_priv *priv = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; + if (priv->active_offloads & ENETC_F_QBV) + enetc_sched_speed_set(ndev); + phy_print_status(phydev); } @@ -1427,8 +1436,7 @@ int enetc_close(struct net_device *ndev) return 0; } -int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type, - void *type_data) +int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) { struct enetc_ndev_priv *priv = netdev_priv(ndev); struct tc_mqprio_qopt *mqprio = type_data; @@ -1436,9 +1444,6 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type, u8 num_tc; int i; - if (type != TC_SETUP_QDISC_MQPRIO) - return -EOPNOTSUPP; - mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; num_tc = mqprio->num_tc; @@ -1483,6 +1488,19 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type, return 0; } +int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_QDISC_MQPRIO: + return enetc_setup_tc_mqprio(ndev, type_data); + case TC_SETUP_QDISC_TAPRIO: + return enetc_setup_tc_taprio(ndev, type_data); + default: + return -EOPNOTSUPP; + } +} + struct net_device_stats *enetc_get_stats(struct net_device *ndev) { struct enetc_ndev_priv *priv = netdev_priv(ndev); @@ -1599,7 +1617,10 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) if (cmd == SIOCGHWTSTAMP) return enetc_hwtstamp_get(ndev, rq); #endif - return -EINVAL; + + if (!ndev->phydev) + return -EOPNOTSUPP; + return phy_mii_ioctl(ndev->phydev, rq, cmd); } int enetc_alloc_msix(struct enetc_ndev_priv *priv) diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 541b4e2073fe..89f23156f330 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -118,6 +118,8 @@ enum enetc_errata { ENETC_ERR_UCMCSWP = BIT(2), }; +#define ENETC_SI_F_QBV BIT(0) + /* PCI IEP device data */ struct enetc_si { struct pci_dev *pdev; @@ -133,6 +135,7 @@ struct enetc_si { int num_fs_entries; int num_rss; /* number of RSS buckets */ unsigned short pad; + int hw_features; }; #define ENETC_SI_ALIGN 32 @@ -173,6 +176,7 @@ struct enetc_cls_rule { enum enetc_active_offloads { ENETC_F_RX_TSTAMP = BIT(0), ENETC_F_TX_TSTAMP = BIT(1), + ENETC_F_QBV = BIT(2), }; struct enetc_ndev_priv { @@ -188,6 +192,8 @@ struct enetc_ndev_priv { u16 msg_enable; int active_offloads; + u32 speed; /* store speed for compare update pspeed */ + struct enetc_bdr *tx_ring[16]; struct enetc_bdr *rx_ring[16]; @@ -244,3 +250,12 @@ int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse, void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes); int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count); int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count); +int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd); + +#ifdef CONFIG_FSL_ENETC_QOS +int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data); +void enetc_sched_speed_set(struct net_device *ndev); +#else +#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP +#define enetc_sched_speed_set(ndev) (void)0 +#endif diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c index de466b71bf8f..201cbc362e33 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c @@ -32,7 +32,7 @@ static int enetc_cbd_unused(struct enetc_cbdr *r) r->bd_count; } -static int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd) +int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd) { struct enetc_cbdr *ring = &si->cbd_ring; int timeout = ENETC_CBDR_TIMEOUT; @@ -66,6 +66,9 @@ static int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd) if (!timeout) return -EBUSY; + /* CBD may writeback data, feedback up level */ + *cbd = *dest_cbd; + enetc_clean_cbdr(si); return 0; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index fcb52efec075..880a8ed8bb47 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -584,6 +584,31 @@ static int enetc_get_ts_info(struct net_device *ndev, return 0; } +static void enetc_get_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + + if (dev->phydev) + phy_ethtool_get_wol(dev->phydev, wol); +} + +static int enetc_set_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + int ret; + + if (!dev->phydev) + return -EOPNOTSUPP; + + ret = phy_ethtool_set_wol(dev->phydev, wol); + if (!ret) + device_set_wakeup_enable(&dev->dev, wol->wolopts); + + return ret; +} + static const struct ethtool_ops enetc_pf_ethtool_ops = { .get_regs_len = enetc_get_reglen, .get_regs = enetc_get_regs, @@ -601,6 +626,8 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = { .set_link_ksettings = phy_ethtool_set_link_ksettings, .get_link = ethtool_op_get_link, .get_ts_info = enetc_get_ts_info, + .get_wol = enetc_get_wol, + .set_wol = enetc_set_wol, }; static const struct ethtool_ops enetc_vf_ethtool_ops = { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index 88276299f447..924ddb6d358a 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -18,6 +18,7 @@ #define ENETC_SICTR0 0x18 #define ENETC_SICTR1 0x1c #define ENETC_SIPCAPR0 0x20 +#define ENETC_SIPCAPR0_QBV BIT(4) #define ENETC_SIPCAPR0_RSS BIT(8) #define ENETC_SIPCAPR1 0x24 #define ENETC_SITGTGR 0x30 @@ -148,6 +149,11 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_PORT_BASE 0x10000 #define ENETC_PMR 0x0000 #define ENETC_PMR_EN GENMASK(18, 16) +#define ENETC_PMR_PSPEED_MASK GENMASK(11, 8) +#define ENETC_PMR_PSPEED_10M 0 +#define ENETC_PMR_PSPEED_100M BIT(8) +#define ENETC_PMR_PSPEED_1000M BIT(9) +#define ENETC_PMR_PSPEED_2500M BIT(10) #define ENETC_PSR 0x0004 /* RO */ #define ENETC_PSIPMR 0x0018 #define ENETC_PSIPMR_SET_UP(n) BIT(n) /* n = SI index */ @@ -440,22 +446,6 @@ union enetc_rx_bd { #define EMETC_MAC_ADDR_FILT_RES 3 /* # of reserved entries at the beginning */ #define ENETC_MAX_NUM_VFS 2 -struct enetc_cbd { - union { - struct { - __le32 addr[2]; - __le32 opt[4]; - }; - __le32 data[6]; - }; - __le16 index; - __le16 length; - u8 cmd; - u8 cls; - u8 _res; - u8 status_flags; -}; - #define ENETC_CBD_FLAGS_SF BIT(7) /* short format */ #define ENETC_CBD_STATUS_MASK 0xf @@ -554,3 +544,70 @@ static inline void enetc_set_bdr_prio(struct enetc_hw *hw, int bdr_idx, val |= ENETC_TBMR_SET_PRIO(prio); enetc_txbdr_wr(hw, bdr_idx, ENETC_TBMR, val); } + +enum bdcr_cmd_class { + BDCR_CMD_UNSPEC = 0, + BDCR_CMD_MAC_FILTER, + BDCR_CMD_VLAN_FILTER, + BDCR_CMD_RSS, + BDCR_CMD_RFS, + BDCR_CMD_PORT_GCL, + BDCR_CMD_RECV_CLASSIFIER, + __BDCR_CMD_MAX_LEN, + BDCR_CMD_MAX_LEN = __BDCR_CMD_MAX_LEN - 1, +}; + +/* class 5, command 0 */ +struct tgs_gcl_conf { + u8 atc; /* init gate value */ + u8 res[7]; + struct { + u8 res1[4]; + __le16 acl_len; + u8 res2[2]; + }; +}; + +/* gate control list entry */ +struct gce { + __le32 period; + u8 gate; + u8 res[3]; +}; + +/* tgs_gcl_conf address point to this data space */ +struct tgs_gcl_data { + __le32 btl; + __le32 bth; + __le32 ct; + __le32 cte; + struct gce entry[0]; +}; + +struct enetc_cbd { + union{ + struct { + __le32 addr[2]; + union { + __le32 opt[4]; + struct tgs_gcl_conf gcl_conf; + }; + }; /* Long format */ + __le32 data[6]; + }; + __le16 index; + __le16 length; + u8 cmd; + u8 cls; + u8 _res; + u8 status_flags; +}; + +/* port time gating control register */ +#define ENETC_QBV_PTGCR_OFFSET 0x11a00 +#define ENETC_QBV_TGE BIT(31) +#define ENETC_QBV_TGPE BIT(30) + +/* Port time gating capability register */ +#define ENETC_QBV_PTGCAPR_OFFSET 0x11a08 +#define ENETC_QBV_MAX_GCL_LEN_MASK GENMASK(15, 0) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index b73421c3e25b..e7482d483b28 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -742,6 +742,9 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, ndev->priv_flags |= IFF_UNICAST_FLT; + if (si->hw_features & ENETC_SI_F_QBV) + priv->active_offloads |= ENETC_F_QBV; + /* pick up primary MAC address from SI */ enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr); } @@ -784,8 +787,8 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv) } } - priv->if_mode = of_get_phy_mode(np); - if ((int)priv->if_mode < 0) { + err = of_get_phy_mode(np, &priv->if_mode); + if (err) { dev_err(priv->dev, "missing phy type\n"); of_node_put(priv->phy_node); if (of_phy_is_fixed_link(np)) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c new file mode 100644 index 000000000000..66a3da61ca16 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2019 NXP */ + +#include "enetc.h" + +#include <net/pkt_sched.h> + +static u16 enetc_get_max_gcl_len(struct enetc_hw *hw) +{ + return enetc_rd(hw, ENETC_QBV_PTGCAPR_OFFSET) + & ENETC_QBV_MAX_GCL_LEN_MASK; +} + +void enetc_sched_speed_set(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; + u32 old_speed = priv->speed; + u32 speed, pspeed; + + if (phydev->speed == old_speed) + return; + + speed = phydev->speed; + switch (speed) { + case SPEED_1000: + pspeed = ENETC_PMR_PSPEED_1000M; + break; + case SPEED_2500: + pspeed = ENETC_PMR_PSPEED_2500M; + break; + case SPEED_100: + pspeed = ENETC_PMR_PSPEED_100M; + break; + case SPEED_10: + default: + pspeed = ENETC_PMR_PSPEED_10M; + netdev_err(ndev, "Qbv PSPEED set speed link down.\n"); + } + + priv->speed = speed; + enetc_port_wr(&priv->si->hw, ENETC_PMR, + (enetc_port_rd(&priv->si->hw, ENETC_PMR) + & (~ENETC_PMR_PSPEED_MASK)) + | pspeed); +} + +static int enetc_setup_taprio(struct net_device *ndev, + struct tc_taprio_qopt_offload *admin_conf) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_cbd cbd = {.cmd = 0}; + struct tgs_gcl_conf *gcl_config; + struct tgs_gcl_data *gcl_data; + struct gce *gce; + dma_addr_t dma; + u16 data_size; + u16 gcl_len; + u32 tge; + int err; + int i; + + if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw)) + return -EINVAL; + gcl_len = admin_conf->num_entries; + + tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET); + if (!admin_conf->enable) { + enetc_wr(&priv->si->hw, + ENETC_QBV_PTGCR_OFFSET, + tge & (~ENETC_QBV_TGE)); + return 0; + } + + if (admin_conf->cycle_time > U32_MAX || + admin_conf->cycle_time_extension > U32_MAX) + return -EINVAL; + + /* Configure the (administrative) gate control list using the + * control BD descriptor. + */ + gcl_config = &cbd.gcl_conf; + + data_size = struct_size(gcl_data, entry, gcl_len); + gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL); + if (!gcl_data) + return -ENOMEM; + + gce = (struct gce *)(gcl_data + 1); + + /* Set all gates open as default */ + gcl_config->atc = 0xff; + gcl_config->acl_len = cpu_to_le16(gcl_len); + + if (!admin_conf->base_time) { + gcl_data->btl = + cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR0)); + gcl_data->bth = + cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR1)); + } else { + gcl_data->btl = + cpu_to_le32(lower_32_bits(admin_conf->base_time)); + gcl_data->bth = + cpu_to_le32(upper_32_bits(admin_conf->base_time)); + } + + gcl_data->ct = cpu_to_le32(admin_conf->cycle_time); + gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension); + + for (i = 0; i < gcl_len; i++) { + struct tc_taprio_sched_entry *temp_entry; + struct gce *temp_gce = gce + i; + + temp_entry = &admin_conf->entries[i]; + + temp_gce->gate = (u8)temp_entry->gate_mask; + temp_gce->period = cpu_to_le32(temp_entry->interval); + } + + cbd.length = cpu_to_le16(data_size); + cbd.status_flags = 0; + + dma = dma_map_single(&priv->si->pdev->dev, gcl_data, + data_size, DMA_TO_DEVICE); + if (dma_mapping_error(&priv->si->pdev->dev, dma)) { + netdev_err(priv->si->ndev, "DMA mapping failed!\n"); + kfree(gcl_data); + return -ENOMEM; + } + + cbd.addr[0] = lower_32_bits(dma); + cbd.addr[1] = upper_32_bits(dma); + cbd.cls = BDCR_CMD_PORT_GCL; + cbd.status_flags = 0; + + enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET, + tge | ENETC_QBV_TGE); + + err = enetc_send_cmd(priv->si, &cbd); + if (err) + enetc_wr(&priv->si->hw, + ENETC_QBV_PTGCR_OFFSET, + tge & (~ENETC_QBV_TGE)); + + dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE); + kfree(gcl_data); + + return err; +} + +int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data) +{ + struct tc_taprio_qopt_offload *taprio = type_data; + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int err; + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_set_bdr_prio(&priv->si->hw, + priv->tx_ring[i]->index, + taprio->enable ? i : 0); + + err = enetc_setup_taprio(ndev, taprio); + + if (err) + for (i = 0; i < priv->num_tx_rings; i++) + enetc_set_bdr_prio(&priv->si->hw, + priv->tx_ring[i]->index, + taprio->enable ? 0 : i); + + return err; +} diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 7d37ba9f6819..b886b075650e 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3393,6 +3393,7 @@ fec_probe(struct platform_device *pdev) { struct fec_enet_private *fep; struct fec_platform_data *pdata; + phy_interface_t interface; struct net_device *ndev; int i, irq, ret = 0; const struct of_device_id *of_id; @@ -3465,15 +3466,15 @@ fec_probe(struct platform_device *pdev) } fep->phy_node = phy_node; - ret = of_get_phy_mode(pdev->dev.of_node); - if (ret < 0) { + ret = of_get_phy_mode(pdev->dev.of_node, &interface); + if (ret) { pdata = dev_get_platdata(&pdev->dev); if (pdata) fep->phy_interface = pdata->phy; else fep->phy_interface = PHY_INTERFACE_MODE_MII; } else { - fep->phy_interface = ret; + fep->phy_interface = interface; } fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); @@ -3644,6 +3645,8 @@ fec_drv_remove(struct platform_device *pdev) regulator_disable(fep->reg_phy); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); + clk_disable_unprepare(fep->clk_ahb); + clk_disable_unprepare(fep->clk_ipg); if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); of_node_put(fep->phy_node); diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 7ab8095db192..f0806ace1ae2 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -608,7 +608,7 @@ static int mac_probe(struct platform_device *_of_dev) const u8 *mac_addr; u32 val; u8 fman_id; - int phy_if; + phy_interface_t phy_if; dev = &_of_dev->dev; mac_node = dev->of_node; @@ -776,8 +776,8 @@ static int mac_probe(struct platform_device *_of_dev) } /* Get the PHY connection type */ - phy_if = of_get_phy_mode(mac_node); - if (phy_if < 0) { + err = of_get_phy_mode(mac_node, &phy_if); + if (err) { dev_warn(dev, "of_get_phy_mode() for %pOF failed. Defaulting to SGMII\n", mac_node); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 51ad86417cb1..72868a28b621 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -641,6 +641,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) const char *model; const void *mac_addr; int err = 0, i; + phy_interface_t interface; struct net_device *dev = NULL; struct gfar_private *priv = NULL; struct device_node *np = ofdev->dev.of_node; @@ -805,9 +806,9 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) * rgmii-id really needs to be specified. Other types can be * detected by hardware */ - err = of_get_phy_mode(np); - if (err >= 0) - priv->interface = err; + err = of_get_phy_mode(np, &interface); + if (!err) + priv->interface = interface; else priv->interface = gfar_get_interface(dev); diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index f472a6dbbe6f..432c6a818ae5 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -90,11 +90,11 @@ extern const char gfar_driver_version[]; #define DEFAULT_RX_LFC_THR 16 #define DEFAULT_LFC_PTVVAL 4 -/* prevent fragmenation by HW in DSA environments */ -#define GFAR_RXB_SIZE roundup(1536 + 8, 64) -#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \ - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define GFAR_RXB_TRUESIZE 2048 +#define GFAR_SKBFRAG_OVR (RXBUF_ALIGNMENT \ + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define GFAR_RXB_SIZE rounddown(GFAR_RXB_TRUESIZE - GFAR_SKBFRAG_OVR, 64) +#define GFAR_SKBFRAG_SIZE (GFAR_RXB_SIZE + GFAR_SKBFRAG_OVR) #define TX_RING_MOD_MASK(size) (size-1) #define RX_RING_MOD_MASK(size) (size-1) diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 4606a7e4a6d1..3e9b6d543c77 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -211,7 +211,7 @@ struct hip04_priv { #if defined(CONFIG_HI13X1_GMAC) void __iomem *sysctrl_base; #endif - int phy_mode; + phy_interface_t phy_mode; int chan; unsigned int port; unsigned int group; @@ -961,10 +961,9 @@ static int hip04_mac_probe(struct platform_device *pdev) goto init_fail; } - priv->phy_mode = of_get_phy_mode(node); - if (priv->phy_mode < 0) { + ret = of_get_phy_mode(node, &priv->phy_mode); + if (ret) { dev_warn(d, "not find phy-mode\n"); - ret = -EINVAL; goto init_fail; } diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index c41b19c760f8..247de9105d10 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -1193,10 +1193,9 @@ static int hix5hd2_dev_probe(struct platform_device *pdev) if (ret) goto err_free_mdio; - priv->phy_mode = of_get_phy_mode(node); - if ((int)priv->phy_mode < 0) { + ret = of_get_phy_mode(node, &priv->phy_mode); + if (ret) { netdev_err(ndev, "not find phy-mode\n"); - ret = -EINVAL; goto err_mdiobus; } diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 6d0457eb4faa..08339278c722 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -199,7 +199,6 @@ hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags) ring->q = q; ring->flags = flags; - spin_lock_init(&ring->lock); ring->coal_param = q->handle->coal_param; assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr); diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index e9c67c06bfd2..6ab9458302e1 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -274,9 +274,6 @@ struct hnae_ring { /* statistic */ struct ring_stats stats; - /* ring lock for poll one */ - spinlock_t lock; - dma_addr_t desc_dma_addr; u32 buf_size; /* size for hnae_desc->addr, preset by AE */ u16 desc_num; /* total number of desc */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index a48396dd4ebb..14ab20491fd0 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -943,15 +943,6 @@ static int is_valid_clean_head(struct hnae_ring *ring, int h) return u > c ? (h > c && h <= u) : (h > c || h <= u); } -/* netif_tx_lock will turn down the performance, set only when necessary */ -#ifdef CONFIG_NET_POLL_CONTROLLER -#define NETIF_TX_LOCK(ring) spin_lock(&(ring)->lock) -#define NETIF_TX_UNLOCK(ring) spin_unlock(&(ring)->lock) -#else -#define NETIF_TX_LOCK(ring) -#define NETIF_TX_UNLOCK(ring) -#endif - /* reclaim all desc in one budget * return error or number of desc left */ @@ -965,21 +956,16 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, int head; int bytes, pkts; - NETIF_TX_LOCK(ring); - head = readl_relaxed(ring->io_base + RCB_REG_HEAD); rmb(); /* make sure head is ready before touch any data */ - if (is_ring_empty(ring) || head == ring->next_to_clean) { - NETIF_TX_UNLOCK(ring); + if (is_ring_empty(ring) || head == ring->next_to_clean) return 0; /* no data to poll */ - } if (!is_valid_clean_head(ring, head)) { netdev_err(ndev, "wrong head (%d, %d-%d)\n", head, ring->next_to_use, ring->next_to_clean); ring->stats.io_err_cnt++; - NETIF_TX_UNLOCK(ring); return -EIO; } @@ -994,8 +980,6 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, ring->stats.tx_pkts += pkts; ring->stats.tx_bytes += bytes; - NETIF_TX_UNLOCK(ring); - dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); netdev_tx_completed_queue(dev_queue, pkts, bytes); @@ -1055,16 +1039,12 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data) int head; int bytes, pkts; - NETIF_TX_LOCK(ring); - head = ring->next_to_use; /* ntu :soft setted ring position*/ bytes = 0; pkts = 0; while (head != ring->next_to_clean) hns_nic_reclaim_one_desc(ring, &bytes, &pkts); - NETIF_TX_UNLOCK(ring); - dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); netdev_tx_reset_queue(dev_queue); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 45f59165bcd2..3b5e2d7251e7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ // Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HNAE3_H diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 345633f9dbf6..9d47abd5c37c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ // Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HNS3_ENET_H diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index b104d3c3b757..6e0212b79438 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -70,11 +70,6 @@ static const struct hns3_stats hns3_rxq_stats[] = { #define HNS3_NIC_LB_TEST_TX_CNT_ERR 2 #define HNS3_NIC_LB_TEST_RX_CNT_ERR 3 -struct hns3_link_mode_mapping { - u32 hns3_link_mode; - u32 ethtool_link_mode; -}; - static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) { struct hnae3_handle *h = hns3_get_handle(ndev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index af96e7904925..d97da67f07a1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ // Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HCLGE_CMD_H diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index 49ad8483723d..d6c3952aba04 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -124,7 +124,7 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, if (ret) return ret; - for (i = 0; i < HNAE3_MAX_TC; i++) { + for (i = 0; i < hdev->tc_max; i++) { switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_STRICT: if (hdev->tm_info.tc_info[i].tc_sch_mode != @@ -318,6 +318,7 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) struct net_device *netdev = h->kinfo.netdev; struct hclge_dev *hdev = vport->back; u8 i, j, pfc_map, *prio_tc; + int ret; if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE) @@ -347,7 +348,21 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) hclge_tm_pfc_info_update(hdev); - return hclge_pause_setup_hw(hdev, false); + ret = hclge_pause_setup_hw(hdev, false); + if (ret) + return ret; + + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + + ret = hclge_buffer_alloc(hdev); + if (ret) { + hclge_notify_client(hdev, HNAE3_UP_CLIENT); + return ret; + } + + return hclge_notify_client(hdev, HNAE3_UP_CLIENT); } /* DCBX configuration */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h index 278f21e02736..b04702e65689 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ // Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HCLGE_DCB_H__ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 4f8f0684beb2..7c7038676d6d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3679,12 +3679,28 @@ static int hclge_set_rst_done(struct hclge_dev *hdev) { struct hclge_pf_rst_done_cmd *req; struct hclge_desc desc; + int ret; req = (struct hclge_pf_rst_done_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false); req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT; - return hclge_cmd_send(&hdev->hw, &desc, 1); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + /* To be compatible with the old firmware, which does not support + * command HCLGE_OPC_PF_RST_DONE, just print a warning and + * return success + */ + if (ret == -EOPNOTSUPP) { + dev_warn(&hdev->pdev->dev, + "current firmware does not support command(0x%x)!\n", + HCLGE_OPC_PF_RST_DONE); + return 0; + } else if (ret) { + dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n", + ret); + } + + return ret; } static int hclge_reset_prepare_up(struct hclge_dev *hdev) @@ -6350,11 +6366,23 @@ static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid, func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0); req = (struct hclge_mac_vlan_switch_cmd *)desc.data; + + /* read current config parameter */ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM, - false); + true); req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL; req->func_id = cpu_to_le32(func_id); - req->switch_param = switch_param; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "read mac vlan switch parameter fail, ret = %d\n", ret); + return ret; + } + + /* modify and write new config parameter */ + hclge_cmd_reuse_desc(&desc, false); + req->switch_param = (req->switch_param & param_mask) | switch_param; req->param_mask = param_mask; ret = hclge_cmd_send(&hdev->hw, &desc, 1); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 599f76a05f41..88f6c4cbac7c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ // Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HCLGE_MAIN_H @@ -225,8 +225,6 @@ enum hclge_evt_cause { HCLGE_VECTOR0_EVENT_OTHER, }; -#define HCLGE_MPF_ENBALE 1 - enum HCLGE_MAC_SPEED { HCLGE_MAC_SPEED_UNKNOWN = 0, /* unknown */ HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h index ef095d9c566f..dd9a1218a7b0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ // Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HCLGE_MDIO_H diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 95ef6e1204cf..45bcb67f90fd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +/* SPDX-License-Identifier: GPL-2.0+ */ // Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HCLGE_TM_H diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index ef86155de9e0..2f4c81bf4169 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -150,8 +150,6 @@ enum hclgevf_states { HCLGEVF_STATE_CMD_DISABLE, }; -#define HCLGEVF_MPF_ENBALE 1 - struct hclgevf_mac { u8 media_type; u8 module_type; diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c index 6e70658d50c4..db45373ea31c 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c @@ -670,13 +670,10 @@ int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages) static int ehea_is_hugepage(unsigned long pfn) { - int page_order; - if (pfn & EHEA_HUGEPAGE_PFN_MASK) return 0; - page_order = compound_order(pfn_to_page(pfn)); - if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT) + if (page_shift(pfn_to_page(pfn)) != EHEA_HUGEPAGESHIFT) return 0; return 1; diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 9e43c9ace9c2..2e40425d8a34 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2849,6 +2849,7 @@ static int emac_init_config(struct emac_instance *dev) { struct device_node *np = dev->ofdev->dev.of_node; const void *p; + int err; /* Read config from device-tree */ if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1)) @@ -2897,8 +2898,8 @@ static int emac_init_config(struct emac_instance *dev) dev->mal_burst_size = 256; /* PHY mode needs some decoding */ - dev->phy_mode = of_get_phy_mode(np); - if (dev->phy_mode < 0) + err = of_get_phy_mode(np, &dev->phy_mode); + if (err) dev->phy_mode = PHY_INTERFACE_MODE_NA; /* Check EMAC version */ diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index e9cda024cbf6..89a1b0fea158 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -171,7 +171,7 @@ struct emac_instance { struct mal_commac commac; /* PHY infos */ - int phy_mode; + phy_interface_t phy_mode; u32 phy_map; u32 phy_address; u32 phy_feat_exc; diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c index b9e821de2ac6..57a25c7a9e70 100644 --- a/drivers/net/ethernet/ibm/emac/zmii.c +++ b/drivers/net/ethernet/ibm/emac/zmii.c @@ -78,7 +78,8 @@ static inline u32 zmii_mode_mask(int mode, int input) } } -int zmii_attach(struct platform_device *ofdev, int input, int *mode) +int zmii_attach(struct platform_device *ofdev, int input, + phy_interface_t *mode) { struct zmii_instance *dev = platform_get_drvdata(ofdev); struct zmii_regs __iomem *p = dev->base; diff --git a/drivers/net/ethernet/ibm/emac/zmii.h b/drivers/net/ethernet/ibm/emac/zmii.h index 41d46e9b87ba..65daedc78594 100644 --- a/drivers/net/ethernet/ibm/emac/zmii.h +++ b/drivers/net/ethernet/ibm/emac/zmii.h @@ -50,7 +50,8 @@ struct zmii_instance { int zmii_init(void); void zmii_exit(void); -int zmii_attach(struct platform_device *ofdev, int input, int *mode); +int zmii_attach(struct platform_device *ofdev, int input, + phy_interface_t *mode); void zmii_detach(struct platform_device *ofdev, int input); void zmii_get_mdio(struct platform_device *ofdev, int input); void zmii_put_mdio(struct platform_device *ofdev, int input); diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index c5be4ebd8437..84121aab7ff1 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1011,6 +1011,29 @@ static int ibmveth_send(struct ibmveth_adapter *adapter, return 0; } +static int ibmveth_is_packet_unsupported(struct sk_buff *skb, + struct net_device *netdev) +{ + struct ethhdr *ether_header; + int ret = 0; + + ether_header = eth_hdr(skb); + + if (ether_addr_equal(ether_header->h_dest, netdev->dev_addr)) { + netdev_dbg(netdev, "veth doesn't support loopback packets, dropping packet.\n"); + netdev->stats.tx_dropped++; + ret = -EOPNOTSUPP; + } + + if (!ether_addr_equal(ether_header->h_source, netdev->dev_addr)) { + netdev_dbg(netdev, "source packet MAC address does not match veth device's, dropping packet.\n"); + netdev->stats.tx_dropped++; + ret = -EOPNOTSUPP; + } + + return ret; +} + static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) { @@ -1022,6 +1045,9 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, dma_addr_t dma_addr; unsigned long mss = 0; + if (ibmveth_is_packet_unsupported(skb, netdev)) + goto out; + /* veth doesn't handle frag_list, so linearize the skb. * When GRO is enabled SKB's can have frag_list. */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 86493fea56e4..416da9619928 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -3565,8 +3565,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; - pr_info("%s changing MTU from %d to %d\n", - netdev->name, netdev->mtu, new_mtu); + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (netif_running(netdev)) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 032b88619054..fe7997c18a10 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6031,7 +6031,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) usleep_range(1000, 1100); /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ adapter->max_frame_size = max_frame; - e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); netdev->mtu = new_mtu; pm_runtime_get_sync(netdev->dev.parent); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index b14441944b4b..f306084ca12c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -534,6 +534,7 @@ void fm10k_iov_suspend(struct pci_dev *pdev); int fm10k_iov_resume(struct pci_dev *pdev); void fm10k_iov_disable(struct pci_dev *pdev); int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs); +void fm10k_iov_update_stats(struct fm10k_intfc *interface); s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid); int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac); int fm10k_ndo_set_vf_vlan(struct net_device *netdev, @@ -542,6 +543,8 @@ int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int __always_unused min_rate, int max_rate); int fm10k_ndo_get_vf_config(struct net_device *netdev, int vf_idx, struct ifla_vf_info *ivi); +int fm10k_ndo_get_vf_stats(struct net_device *netdev, + int vf_idx, struct ifla_vf_stats *stats); /* DebugFS */ #ifdef CONFIG_DEBUG_FS diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index afe1fafd2447..8c50a128df29 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -520,6 +520,27 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs) return num_vfs; } +/** + * fm10k_iov_update_stats - Update stats for all VFs + * @interface: device private structure + * + * Updates the VF statistics for all enabled VFs. Expects to be called by + * fm10k_update_stats and assumes that locking via the __FM10K_UPDATING_STATS + * bit is already handled. + */ +void fm10k_iov_update_stats(struct fm10k_intfc *interface) +{ + struct fm10k_iov_data *iov_data = interface->iov_data; + struct fm10k_hw *hw = &interface->hw; + int i; + + if (!iov_data) + return; + + for (i = 0; i < iov_data->num_vfs; i++) + hw->iov.ops.update_stats(hw, iov_data->vf_info[i].stats, i); +} + static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface, struct fm10k_vf_info *vf_info) { @@ -650,3 +671,30 @@ int fm10k_ndo_get_vf_config(struct net_device *netdev, return 0; } + +int fm10k_ndo_get_vf_stats(struct net_device *netdev, + int vf_idx, struct ifla_vf_stats *stats) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_iov_data *iov_data = interface->iov_data; + struct fm10k_hw *hw = &interface->hw; + struct fm10k_hw_stats_q *hw_stats; + u32 idx, qpp; + + /* verify SR-IOV is active and that vf idx is valid */ + if (!iov_data || vf_idx >= iov_data->num_vfs) + return -EINVAL; + + qpp = fm10k_queues_per_pool(hw); + hw_stats = iov_data->vf_info[vf_idx].stats; + + for (idx = 0; idx < qpp; idx++) { + stats->rx_packets += hw_stats[idx].rx_packets.count; + stats->tx_packets += hw_stats[idx].tx_packets.count; + stats->rx_bytes += hw_stats[idx].rx_bytes.count; + stats->tx_bytes += hw_stats[idx].tx_bytes.count; + stats->rx_dropped += hw_stats[idx].rx_drops.count; + } + + return 0; +} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 2be9222510e7..17738b0a9873 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -11,7 +11,7 @@ #include "fm10k.h" -#define DRV_VERSION "0.26.1-k" +#define DRV_VERSION "0.27.1-k" #define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver" const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 09f7a246e134..68baee04dc58 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1643,6 +1643,7 @@ static const struct net_device_ops fm10k_netdev_ops = { .ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan, .ndo_set_vf_rate = fm10k_ndo_set_vf_bw, .ndo_get_vf_config = fm10k_ndo_get_vf_config, + .ndo_get_vf_stats = fm10k_ndo_get_vf_stats, .ndo_udp_tunnel_add = fm10k_udp_tunnel_add, .ndo_udp_tunnel_del = fm10k_udp_tunnel_del, .ndo_dfwd_add_station = fm10k_dfwd_add_station, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index bb236fa44048..d122d0087191 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -630,6 +630,9 @@ void fm10k_update_stats(struct fm10k_intfc *interface) net_stats->rx_errors = rx_errors; net_stats->rx_dropped = interface->stats.nodesc_drop.count; + /* Update VF statistics */ + fm10k_iov_update_stats(interface); + clear_bit(__FM10K_UPDATING_STATS, interface->state); } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h index 160bc5b78f99..ceb9b791f799 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ +/* Copyright(c) 2013 - 2019 Intel Corporation. */ #ifndef _FM10K_TLV_H_ #define _FM10K_TLV_H_ @@ -76,8 +76,8 @@ struct fm10k_tlv_attr { #define FM10K_TLV_ATTR_S32(id) { id, FM10K_TLV_SIGNED, 4 } #define FM10K_TLV_ATTR_S64(id) { id, FM10K_TLV_SIGNED, 8 } #define FM10K_TLV_ATTR_LE_STRUCT(id, len) { id, FM10K_TLV_LE_STRUCT, len } -#define FM10K_TLV_ATTR_NESTED(id) { id, FM10K_TLV_NESTED } -#define FM10K_TLV_ATTR_LAST { FM10K_TLV_ERROR } +#define FM10K_TLV_ATTR_NESTED(id) { id, FM10K_TLV_NESTED, 0 } +#define FM10K_TLV_ATTR_LAST { FM10K_TLV_ERROR, 0, 0 } struct fm10k_msg_data { unsigned int id; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index 15ac1c7885bc..63968c5d7c5d 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -581,6 +581,7 @@ struct fm10k_vf_info { * at the same offset as the mailbox */ struct fm10k_mbx_info mbx; /* PF side of VF mailbox */ + struct fm10k_hw_stats_q stats[FM10K_MAX_QUEUES_POOL]; int rate; /* Tx BW cap as defined by OS */ u16 glort; /* resource tag for this VF */ u16 sw_vid; /* Switch API assigned VLAN */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index a23f89fb33ee..aa5f1c0aa721 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -20,6 +20,8 @@ /* API version 1.7 implements additional link and PHY-specific APIs */ #define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007 +/* API version 1.9 for X722 implements additional link and PHY-specific APIs */ +#define I40E_MINOR_VER_GET_LINK_INFO_X722 0x0009 /* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */ #define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006 diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 8b25a6d9c81d..d4055037af89 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -29,6 +29,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_QSFP_C: case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T4: + case I40E_DEV_ID_10G_BASE_T_BC: case I40E_DEV_ID_10G_B: case I40E_DEV_ID_10G_SFP: case I40E_DEV_ID_20G_KR2: @@ -1895,7 +1896,8 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; - if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { + if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE && + hw->mac.type != I40E_MAC_X722) { __le32 tmp; memcpy(&tmp, resp->link_type, sizeof(tmp)); @@ -4910,6 +4912,7 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, break; case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T4: + case I40E_DEV_ID_10G_BASE_T_BC: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_25G_B: case I40E_DEV_ID_25G_SFP28: diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b3d7edbb1389..1ccabeafa44c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2664,8 +2664,8 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu) return -EINVAL; } - netdev_info(netdev, "changing MTU from %d to %d\n", - netdev->mtu, new_mtu); + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (netif_running(netdev)) i40e_vsi_reinit_locked(vsi); @@ -12870,6 +12870,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_set_features = i40e_set_features, .ndo_set_vf_mac = i40e_ndo_set_vf_mac, .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, + .ndo_get_vf_stats = i40e_get_vf_stats, .ndo_set_vf_rate = i40e_ndo_set_vf_bw, .ndo_get_vf_config = i40e_ndo_get_vf_config, .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index a2710664d653..6a3f0fc56c3b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -4524,3 +4524,51 @@ out: clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } + +/** + * i40e_get_vf_stats - populate some stats for the VF + * @netdev: the netdev of the PF + * @vf_id: the host OS identifier (0-127) + * @vf_stats: pointer to the OS memory to be initialized + */ +int i40e_get_vf_stats(struct net_device *netdev, int vf_id, + struct ifla_vf_stats *vf_stats) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_eth_stats *stats; + struct i40e_vsi *vsi; + struct i40e_vf *vf; + + /* validate the request */ + if (i40e_validate_vf(pf, vf_id)) + return -EINVAL; + + vf = &pf->vf[vf_id]; + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { + dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id); + return -EBUSY; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) + return -EINVAL; + + i40e_update_eth_stats(vsi); + stats = &vsi->eth_stats; + + memset(vf_stats, 0, sizeof(*vf_stats)); + + vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast + + stats->rx_multicast; + vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast + + stats->tx_multicast; + vf_stats->rx_bytes = stats->rx_bytes; + vf_stats->tx_bytes = stats->tx_bytes; + vf_stats->broadcast = stats->rx_broadcast; + vf_stats->multicast = stats->rx_multicast; + vf_stats->rx_dropped = stats->rx_discards; + vf_stats->tx_dropped = stats->tx_discards; + + return 0; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 1ce06240a702..631248c0981a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -138,5 +138,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable); void i40e_vc_notify_link_state(struct i40e_pf *pf); void i40e_vc_notify_reset(struct i40e_pf *pf); +int i40e_get_vf_stats(struct net_device *netdev, int vf_id, + struct ifla_vf_stats *vf_stats); #endif /* _I40E_VIRTCHNL_PF_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index a05dfecdd9b4..d07e1a890428 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -689,8 +689,6 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) i40e_xdp_ring_update_tail(xdp_ring); xsk_umem_consume_tx_done(xdp_ring->xsk_umem); - if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) - xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem); } return !!budget && work_done; @@ -769,12 +767,8 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, i40e_update_tx_stats(tx_ring, completed_frames, total_bytes); out_xmit: - if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) { - if (tx_ring->next_to_clean == tx_ring->next_to_use) - xsk_set_tx_need_wakeup(tx_ring->xsk_umem); - else - xsk_clear_tx_need_wakeup(tx_ring->xsk_umem); - } + if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) + xsk_set_tx_need_wakeup(tx_ring->xsk_umem); xmit_done = i40e_xmit_zc(tx_ring, budget); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 8f310e520b06..821987da5698 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -314,7 +314,7 @@ iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx) q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); q_vector->ring_mask |= BIT(r_idx); wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), - q_vector->rx.current_itr); + q_vector->rx.current_itr >> 1); q_vector->rx.current_itr = q_vector->rx.target_itr; } @@ -340,7 +340,7 @@ iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx) q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); q_vector->num_ringpairs++; wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), - q_vector->tx.target_itr); + q_vector->tx.target_itr >> 1); q_vector->tx.current_itr = q_vector->tx.target_itr; } diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 9edde960b4f2..7cb829132d28 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -13,9 +13,12 @@ ice-y := ice_main.o \ ice_nvm.o \ ice_switch.o \ ice_sched.o \ + ice_base.o \ ice_lib.o \ + ice_txrx_lib.o \ ice_txrx.o \ ice_flex_pipe.o \ ice_ethtool.o ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o -ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_lib.o +ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o +ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 45e100666049..8d7e8fc55585 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -29,10 +29,13 @@ #include <linux/ip.h> #include <linux/sctp.h> #include <linux/ipv6.h> +#include <linux/pkt_sched.h> #include <linux/if_bridge.h> #include <linux/ctype.h> +#include <linux/bpf.h> #include <linux/avf/virtchnl.h> #include <net/ipv6.h> +#include <net/xdp_sock.h> #include "ice_devids.h" #include "ice_type.h" #include "ice_txrx.h" @@ -42,6 +45,7 @@ #include "ice_sched.h" #include "ice_virtchnl_pf.h" #include "ice_sriov.h" +#include "ice_xsk.h" extern const char ice_drv_ver[]; #define ICE_BAR0 0 @@ -78,8 +82,7 @@ extern const char ice_drv_ver[]; #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) -#define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - \ - (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))) +#define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD) #define ICE_UP_TABLE_TRANSLATE(val, i) \ (((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \ @@ -127,6 +130,14 @@ extern const char ice_drv_ver[]; ICE_PROMISC_VLAN_TX | \ ICE_PROMISC_VLAN_RX) +struct ice_txq_meta { + u32 q_teid; /* Tx-scheduler element identifier */ + u16 q_id; /* Entry in VSI's txq_map bitmap */ + u16 q_handle; /* Relative index of Tx queue within TC */ + u16 vsi_idx; /* VSI index that Tx queue belongs to */ + u8 tc; /* TC number that Tx queue belongs to */ +}; + struct ice_tc_info { u16 qoffset; u16 qcount_tx; @@ -169,6 +180,7 @@ enum ice_state { __ICE_NEEDS_RESTART, __ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ __ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */ + __ICE_DCBNL_DEVRESET, /* set by dcbnl devreset */ __ICE_PFR_REQ, /* set by driver and peers */ __ICE_CORER_REQ, /* set by driver and peers */ __ICE_GLOBR_REQ, /* set by driver and peers */ @@ -274,6 +286,13 @@ struct ice_vsi { u16 num_rx_desc; u16 num_tx_desc; struct ice_tc_cfg tc_cfg; + struct bpf_prog *xdp_prog; + struct ice_ring **xdp_rings; /* XDP ring array */ + u16 num_xdp_txq; /* Used XDP queues */ + u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ + struct xdp_umem **xsk_umems; + u16 num_xsk_umems_used; + u16 num_xsk_umems; } ____cacheline_internodealigned_in_smp; /* struct that defines an interrupt vector */ @@ -313,6 +332,7 @@ enum ice_pf_flags { ICE_FLAG_NO_MEDIA, ICE_FLAG_FW_LLDP_AGENT, ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ + ICE_FLAG_LEGACY_RX, ICE_PF_FLAGS_NBITS /* must be last */ }; @@ -346,6 +366,7 @@ struct ice_pf { struct work_struct serv_task; struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */ struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ + struct mutex tc_mutex; /* lock to protect TC changes */ u32 msg_enable; u32 hw_csum_rx_error; u32 oicr_idx; /* Other interrupt cause MSIX vector index */ @@ -417,6 +438,37 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev) return np->vsi->back; } +static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi) +{ + return !!vsi->xdp_prog; +} + +static inline void ice_set_ring_xdp(struct ice_ring *ring) +{ + ring->flags |= ICE_TX_FLAGS_RING_XDP; +} + +/** + * ice_xsk_umem - get XDP UMEM bound to a ring + * @ring - ring to use + * + * Returns a pointer to xdp_umem structure if there is an UMEM present, + * NULL otherwise. + */ +static inline struct xdp_umem *ice_xsk_umem(struct ice_ring *ring) +{ + struct xdp_umem **umems = ring->vsi->xsk_umems; + int qid = ring->q_index; + + if (ice_ring_is_xdp(ring)) + qid -= ring->vsi->num_xdp_txq; + + if (!umems || !umems[qid] || !ice_is_xdp_ena_vsi(ring->vsi)) + return NULL; + + return umems[qid]; +} + /** * ice_get_main_vsi - Get the PF VSI * @pf: PF instance @@ -443,14 +495,15 @@ int ice_up(struct ice_vsi *vsi); int ice_down(struct ice_vsi *vsi); int ice_vsi_cfg(struct ice_vsi *vsi); struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi); +int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog); +int ice_destroy_xdp_rings(struct ice_vsi *vsi); +int +ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags); int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); void ice_print_link_msg(struct ice_vsi *vsi, bool isup); -#ifdef CONFIG_DCB -int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked); -void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked); -#endif /* CONFIG_DCB */ int ice_open(struct net_device *netdev); int ice_stop(struct net_device *netdev); diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 023e3d2fee5f..5421fc413f94 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -742,6 +742,10 @@ struct ice_aqc_add_elem { struct ice_aqc_txsched_elem_data generic[1]; }; +struct ice_aqc_conf_elem { + struct ice_aqc_txsched_elem_data generic[1]; +}; + struct ice_aqc_get_elem { struct ice_aqc_txsched_elem_data generic[1]; }; @@ -783,6 +787,44 @@ struct ice_aqc_port_ets_elem { __le32 tc_node_teid[8]; /* Used for response, reserved in command */ }; +/* Rate limiting profile for + * Add RL profile (indirect 0x0410) + * Query RL profile (indirect 0x0411) + * Remove RL profile (indirect 0x0415) + * These indirect commands acts on single or multiple + * RL profiles with specified data. + */ +struct ice_aqc_rl_profile { + __le16 num_profiles; + __le16 num_processed; /* Only for response. Reserved in Command. */ + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_rl_profile_elem { + u8 level; + u8 flags; +#define ICE_AQC_RL_PROFILE_TYPE_S 0x0 +#define ICE_AQC_RL_PROFILE_TYPE_M (0x3 << ICE_AQC_RL_PROFILE_TYPE_S) +#define ICE_AQC_RL_PROFILE_TYPE_CIR 0 +#define ICE_AQC_RL_PROFILE_TYPE_EIR 1 +#define ICE_AQC_RL_PROFILE_TYPE_SRL 2 +/* The following flag is used for Query RL Profile Data */ +#define ICE_AQC_RL_PROFILE_INVAL_S 0x7 +#define ICE_AQC_RL_PROFILE_INVAL_M (0x1 << ICE_AQC_RL_PROFILE_INVAL_S) + + __le16 profile_id; + __le16 max_burst_size; + __le16 rl_multiply; + __le16 wake_up_calc; + __le16 rl_encode; +}; + +struct ice_aqc_rl_profile_generic_elem { + struct ice_aqc_rl_profile_elem generic[1]; +}; + /* Query Scheduler Resource Allocation (indirect 0x0412) * This indirect command retrieves the scheduler resources allocated by * EMP Firmware to the given PF. @@ -1044,6 +1086,10 @@ struct ice_aqc_get_link_status_data { #define ICE_AQ_LINK_TOPO_CONFLICT BIT(0) #define ICE_AQ_LINK_MEDIA_CONFLICT BIT(1) #define ICE_AQ_LINK_TOPO_CORRUPT BIT(2) +#define ICE_AQ_LINK_TOPO_UNREACH_PRT BIT(4) +#define ICE_AQ_LINK_TOPO_UNDRUTIL_PRT BIT(5) +#define ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA BIT(6) +#define ICE_AQ_LINK_TOPO_UNSUPP_MEDIA BIT(7) u8 reserved1; u8 link_info; #define ICE_AQ_LINK_UP BIT(0) /* Link Status */ @@ -1147,6 +1193,33 @@ struct ice_aqc_set_port_id_led { u8 rsvd[13]; }; +/* Read/Write SFF EEPROM command (indirect 0x06EE) */ +struct ice_aqc_sff_eeprom { + u8 lport_num; + u8 lport_num_valid; +#define ICE_AQC_SFF_PORT_NUM_VALID BIT(0) + __le16 i2c_bus_addr; +#define ICE_AQC_SFF_I2CBUS_7BIT_M 0x7F +#define ICE_AQC_SFF_I2CBUS_10BIT_M 0x3FF +#define ICE_AQC_SFF_I2CBUS_TYPE_M BIT(10) +#define ICE_AQC_SFF_I2CBUS_TYPE_7BIT 0 +#define ICE_AQC_SFF_I2CBUS_TYPE_10BIT ICE_AQC_SFF_I2CBUS_TYPE_M +#define ICE_AQC_SFF_SET_EEPROM_PAGE_S 11 +#define ICE_AQC_SFF_SET_EEPROM_PAGE_M (0x3 << ICE_AQC_SFF_SET_EEPROM_PAGE_S) +#define ICE_AQC_SFF_NO_PAGE_CHANGE 0 +#define ICE_AQC_SFF_SET_23_ON_MISMATCH 1 +#define ICE_AQC_SFF_SET_22_ON_MISMATCH 2 +#define ICE_AQC_SFF_IS_WRITE BIT(15) + __le16 i2c_mem_addr; + __le16 eeprom_page; +#define ICE_AQC_SFF_EEPROM_BANK_S 0 +#define ICE_AQC_SFF_EEPROM_BANK_M (0xFF << ICE_AQC_SFF_EEPROM_BANK_S) +#define ICE_AQC_SFF_EEPROM_PAGE_S 8 +#define ICE_AQC_SFF_EEPROM_PAGE_M (0xFF << ICE_AQC_SFF_EEPROM_PAGE_S) + __le32 addr_high; + __le32 addr_low; +}; + /* NVM Read command (indirect 0x0701) * NVM Erase commands (direct 0x0702) * NVM Update commands (indirect 0x0703) @@ -1618,6 +1691,7 @@ struct ice_aq_desc { struct ice_aqc_get_phy_caps get_phy; struct ice_aqc_set_phy_cfg set_phy; struct ice_aqc_restart_an restart_an; + struct ice_aqc_sff_eeprom read_write_sff_param; struct ice_aqc_set_port_id_led set_port_id_led; struct ice_aqc_get_sw_cfg get_sw_conf; struct ice_aqc_sw_rules sw_rules; @@ -1625,6 +1699,7 @@ struct ice_aq_desc { struct ice_aqc_sched_elem_cmd sched_elem_cmd; struct ice_aqc_query_txsched_res query_sched_res; struct ice_aqc_query_port_ets port_ets; + struct ice_aqc_rl_profile rl_profile; struct ice_aqc_nvm nvm; struct ice_aqc_nvm_checksum nvm_checksum; struct ice_aqc_pf_vf_msg virt; @@ -1726,12 +1801,15 @@ enum ice_adminq_opc { /* transmit scheduler commands */ ice_aqc_opc_get_dflt_topo = 0x0400, ice_aqc_opc_add_sched_elems = 0x0401, + ice_aqc_opc_cfg_sched_elems = 0x0403, ice_aqc_opc_get_sched_elems = 0x0404, ice_aqc_opc_suspend_sched_elems = 0x0409, ice_aqc_opc_resume_sched_elems = 0x040A, ice_aqc_opc_query_port_ets = 0x040E, ice_aqc_opc_delete_sched_elems = 0x040F, + ice_aqc_opc_add_rl_profiles = 0x0410, ice_aqc_opc_query_sched_res = 0x0412, + ice_aqc_opc_remove_rl_profiles = 0x0415, /* PHY commands */ ice_aqc_opc_get_phy_caps = 0x0600, @@ -1741,6 +1819,7 @@ enum ice_adminq_opc { ice_aqc_opc_set_event_mask = 0x0613, ice_aqc_opc_set_mac_lb = 0x0620, ice_aqc_opc_set_port_id_led = 0x06E9, + ice_aqc_opc_sff_eeprom = 0x06EE, /* NVM commands */ ice_aqc_opc_nvm_read = 0x0701, diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c new file mode 100644 index 000000000000..69d2da14fe5c --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -0,0 +1,857 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019, Intel Corporation. */ + +#include "ice_base.h" +#include "ice_dcb_lib.h" + +/** + * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI + * @qs_cfg: gathered variables needed for PF->VSI queues assignment + * + * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap + */ +static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) +{ + int offset, i; + + mutex_lock(qs_cfg->qs_mutex); + offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size, + 0, qs_cfg->q_count, 0); + if (offset >= qs_cfg->pf_map_size) { + mutex_unlock(qs_cfg->qs_mutex); + return -ENOMEM; + } + + bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count); + for (i = 0; i < qs_cfg->q_count; i++) + qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset; + mutex_unlock(qs_cfg->qs_mutex); + + return 0; +} + +/** + * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI + * @qs_cfg: gathered variables needed for pf->vsi queues assignment + * + * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap + */ +static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg) +{ + int i, index = 0; + + mutex_lock(qs_cfg->qs_mutex); + for (i = 0; i < qs_cfg->q_count; i++) { + index = find_next_zero_bit(qs_cfg->pf_map, + qs_cfg->pf_map_size, index); + if (index >= qs_cfg->pf_map_size) + goto err_scatter; + set_bit(index, qs_cfg->pf_map); + qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index; + } + mutex_unlock(qs_cfg->qs_mutex); + + return 0; +err_scatter: + for (index = 0; index < i; index++) { + clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map); + qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0; + } + mutex_unlock(qs_cfg->qs_mutex); + + return -ENOMEM; +} + +/** + * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled + * @pf: the PF being configured + * @pf_q: the PF queue + * @ena: enable or disable state of the queue + * + * This routine will wait for the given Rx queue of the PF to reach the + * enabled or disabled state. + * Returns -ETIMEDOUT in case of failing to reach the requested state after + * multiple retries; else will return 0 in case of success. + */ +static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) +{ + int i; + + for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) { + if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) & + QRX_CTRL_QENA_STAT_M)) + return 0; + + usleep_range(20, 40); + } + + return -ETIMEDOUT; +} + +/** + * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector + * @vsi: the VSI being configured + * @v_idx: index of the vector in the VSI struct + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + */ +static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) +{ + struct ice_pf *pf = vsi->back; + struct ice_q_vector *q_vector; + + /* allocate q_vector */ + q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + q_vector->vsi = vsi; + q_vector->v_idx = v_idx; + if (vsi->type == ICE_VSI_VF) + goto out; + /* only set affinity_mask if the CPU is online */ + if (cpu_online(v_idx)) + cpumask_set_cpu(v_idx, &q_vector->affinity_mask); + + /* This will not be called in the driver load path because the netdev + * will not be created yet. All other cases with register the NAPI + * handler here (i.e. resume, reset/rebuild, etc.) + */ + if (vsi->netdev) + netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, + NAPI_POLL_WEIGHT); + +out: + /* tie q_vector and VSI together */ + vsi->q_vectors[v_idx] = q_vector; + + return 0; +} + +/** + * ice_free_q_vector - Free memory allocated for a specific interrupt vector + * @vsi: VSI having the memory freed + * @v_idx: index of the vector to be freed + */ +static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) +{ + struct ice_q_vector *q_vector; + struct ice_pf *pf = vsi->back; + struct ice_ring *ring; + + if (!vsi->q_vectors[v_idx]) { + dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n", + v_idx); + return; + } + q_vector = vsi->q_vectors[v_idx]; + + ice_for_each_ring(ring, q_vector->tx) + ring->q_vector = NULL; + ice_for_each_ring(ring, q_vector->rx) + ring->q_vector = NULL; + + /* only VSI with an associated netdev is set up with NAPI */ + if (vsi->netdev) + netif_napi_del(&q_vector->napi); + + devm_kfree(&pf->pdev->dev, q_vector); + vsi->q_vectors[v_idx] = NULL; +} + +/** + * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set + * @hw: board specific structure + */ +static void ice_cfg_itr_gran(struct ice_hw *hw) +{ + u32 regval = rd32(hw, GLINT_CTL); + + /* no need to update global register if ITR gran is already set */ + if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) && + (((regval & GLINT_CTL_ITR_GRAN_200_M) >> + GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) && + (((regval & GLINT_CTL_ITR_GRAN_100_M) >> + GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) && + (((regval & GLINT_CTL_ITR_GRAN_50_M) >> + GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) && + (((regval & GLINT_CTL_ITR_GRAN_25_M) >> + GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US)) + return; + + regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) & + GLINT_CTL_ITR_GRAN_200_M) | + ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) & + GLINT_CTL_ITR_GRAN_100_M) | + ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) & + GLINT_CTL_ITR_GRAN_50_M) | + ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) & + GLINT_CTL_ITR_GRAN_25_M); + wr32(hw, GLINT_CTL, regval); +} + +/** + * ice_calc_q_handle - calculate the queue handle + * @vsi: VSI that ring belongs to + * @ring: ring to get the absolute queue index + * @tc: traffic class number + */ +static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc) +{ + WARN_ONCE(ice_ring_is_xdp(ring) && tc, + "XDP ring can't belong to TC other than 0"); + + /* Idea here for calculation is that we subtract the number of queue + * count from TC that ring belongs to from it's absolute queue index + * and as a result we get the queue's index within TC. + */ + return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset; +} + +/** + * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance + * @ring: The Tx ring to configure + * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized + * @pf_q: queue index in the PF space + * + * Configure the Tx descriptor ring in TLAN context. + */ +static void +ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) +{ + struct ice_vsi *vsi = ring->vsi; + struct ice_hw *hw = &vsi->back->hw; + + tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; + + tlan_ctx->port_num = vsi->port_info->lport; + + /* Transmit Queue Length */ + tlan_ctx->qlen = ring->count; + + ice_set_cgd_num(tlan_ctx, ring); + + /* PF number */ + tlan_ctx->pf_num = hw->pf_id; + + /* queue belongs to a specific VSI type + * VF / VM index should be programmed per vmvf_type setting: + * for vmvf_type = VF, it is VF number between 0-256 + * for vmvf_type = VM, it is VM number between 0-767 + * for PF or EMP this field should be set to zero + */ + switch (vsi->type) { + case ICE_VSI_LB: + /* fall through */ + case ICE_VSI_PF: + tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; + break; + case ICE_VSI_VF: + /* Firmware expects vmvf_num to be absolute VF ID */ + tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; + tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; + break; + default: + return; + } + + /* make sure the context is associated with the right VSI */ + tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); + + tlan_ctx->tso_ena = ICE_TX_LEGACY; + tlan_ctx->tso_qnum = pf_q; + + /* Legacy or Advanced Host Interface: + * 0: Advanced Host Interface + * 1: Legacy Host Interface + */ + tlan_ctx->legacy_int = ICE_TX_LEGACY; +} + +/** + * ice_setup_rx_ctx - Configure a receive ring context + * @ring: The Rx ring to configure + * + * Configure the Rx descriptor ring in RLAN context. + */ +int ice_setup_rx_ctx(struct ice_ring *ring) +{ + int chain_len = ICE_MAX_CHAINED_RX_BUFS; + struct ice_vsi *vsi = ring->vsi; + u32 rxdid = ICE_RXDID_FLEX_NIC; + struct ice_rlan_ctx rlan_ctx; + struct ice_hw *hw; + u32 regval; + u16 pf_q; + int err; + + hw = &vsi->back->hw; + + /* what is Rx queue number in global space of 2K Rx queues */ + pf_q = vsi->rxq_map[ring->q_index]; + + /* clear the context structure first */ + memset(&rlan_ctx, 0, sizeof(rlan_ctx)); + + ring->rx_buf_len = vsi->rx_buf_len; + + if (ring->vsi->type == ICE_VSI_PF) { + if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) + xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, + ring->q_index); + + ring->xsk_umem = ice_xsk_umem(ring); + if (ring->xsk_umem) { + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + + ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr - + XDP_PACKET_HEADROOM; + /* For AF_XDP ZC, we disallow packets to span on + * multiple buffers, thus letting us skip that + * handling in the fast-path. + */ + chain_len = 1; + ring->zca.free = ice_zca_free; + err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_ZERO_COPY, + &ring->zca); + if (err) + return err; + + dev_info(&vsi->back->pdev->dev, "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n", + ring->q_index); + } else { + if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) + xdp_rxq_info_reg(&ring->xdp_rxq, + ring->netdev, + ring->q_index); + + err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL); + if (err) + return err; + } + } + /* Receive Queue Base Address. + * Indicates the starting address of the descriptor queue defined in + * 128 Byte units. + */ + rlan_ctx.base = ring->dma >> 7; + + rlan_ctx.qlen = ring->count; + + /* Receive Packet Data Buffer Size. + * The Packet Data Buffer Size is defined in 128 byte units. + */ + rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; + + /* use 32 byte descriptors */ + rlan_ctx.dsize = 1; + + /* Strip the Ethernet CRC bytes before the packet is posted to host + * memory. + */ + rlan_ctx.crcstrip = 1; + + /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ + rlan_ctx.l2tsel = 1; + + rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; + rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; + rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; + + /* This controls whether VLAN is stripped from inner headers + * The VLAN in the inner L2 header is stripped to the receive + * descriptor if enabled by this flag. + */ + rlan_ctx.showiv = 0; + + /* Max packet size for this queue - must not be set to a larger value + * than 5 x DBUF + */ + rlan_ctx.rxmax = min_t(u16, vsi->max_frame, + chain_len * ring->rx_buf_len); + + /* Rx queue threshold in units of 64 */ + rlan_ctx.lrxqthresh = 1; + + /* Enable Flexible Descriptors in the queue context which + * allows this driver to select a specific receive descriptor format + */ + if (vsi->type != ICE_VSI_VF) { + regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); + regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & + QRXFLXP_CNTXT_RXDID_IDX_M; + + /* increasing context priority to pick up profile ID; + * default is 0x01; setting to 0x03 to ensure profile + * is programming if prev context is of same priority + */ + regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & + QRXFLXP_CNTXT_RXDID_PRIO_M; + + wr32(hw, QRXFLXP_CNTXT(pf_q), regval); + } + + /* Absolute queue number out of 2K needs to be passed */ + err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); + if (err) { + dev_err(&vsi->back->pdev->dev, + "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", + pf_q, err); + return -EIO; + } + + if (vsi->type == ICE_VSI_VF) + return 0; + + /* configure Rx buffer alignment */ + if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) + ice_clear_ring_build_skb_ena(ring); + else + ice_set_ring_build_skb_ena(ring); + + /* init queue specific tail register */ + ring->tail = hw->hw_addr + QRX_TAIL(pf_q); + writel(0, ring->tail); + + err = ring->xsk_umem ? + ice_alloc_rx_bufs_slow_zc(ring, ICE_DESC_UNUSED(ring)) : + ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); + if (err) + dev_info(&vsi->back->pdev->dev, + "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n", + ring->xsk_umem ? "UMEM enabled " : "", + ring->q_index, pf_q); + + return 0; +} + +/** + * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI + * @qs_cfg: gathered variables needed for pf->vsi queues assignment + * + * This function first tries to find contiguous space. If it is not successful, + * it tries with the scatter approach. + * + * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap + */ +int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg) +{ + int ret = 0; + + ret = __ice_vsi_get_qs_contig(qs_cfg); + if (ret) { + /* contig failed, so try with scatter approach */ + qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER; + qs_cfg->q_count = min_t(u16, qs_cfg->q_count, + qs_cfg->scatter_count); + ret = __ice_vsi_get_qs_sc(qs_cfg); + } + return ret; +} + +/** + * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring + * @vsi: the VSI being configured + * @ena: start or stop the Rx rings + * @rxq_idx: Rx queue index + */ +int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) +{ + int pf_q = vsi->rxq_map[rxq_idx]; + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + int ret = 0; + u32 rx_reg; + + rx_reg = rd32(hw, QRX_CTRL(pf_q)); + + /* Skip if the queue is already in the requested state */ + if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) + return 0; + + /* turn on/off the queue */ + if (ena) + rx_reg |= QRX_CTRL_QENA_REQ_M; + else + rx_reg &= ~QRX_CTRL_QENA_REQ_M; + wr32(hw, QRX_CTRL(pf_q), rx_reg); + + /* wait for the change to finish */ + ret = ice_pf_rxq_wait(pf, pf_q, ena); + if (ret) + dev_err(&pf->pdev->dev, + "VSI idx %d Rx ring %d %sable timeout\n", + vsi->idx, pf_q, (ena ? "en" : "dis")); + + return ret; +} + +/** + * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors + * @vsi: the VSI being configured + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + */ +int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + int v_idx = 0, num_q_vectors; + int err; + + if (vsi->q_vectors[0]) { + dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n", + vsi->vsi_num); + return -EEXIST; + } + + num_q_vectors = vsi->num_q_vectors; + + for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { + err = ice_vsi_alloc_q_vector(vsi, v_idx); + if (err) + goto err_out; + } + + return 0; + +err_out: + while (v_idx--) + ice_free_q_vector(vsi, v_idx); + + dev_err(&pf->pdev->dev, + "Failed to allocate %d q_vector for VSI %d, ret=%d\n", + vsi->num_q_vectors, vsi->vsi_num, err); + vsi->num_q_vectors = 0; + return err; +} + +/** + * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors + * @vsi: the VSI being configured + * + * This function maps descriptor rings to the queue-specific vectors allotted + * through the MSI-X enabling code. On a constrained vector budget, we map Tx + * and Rx rings to the vector as "efficiently" as possible. + */ +void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) +{ + int q_vectors = vsi->num_q_vectors; + int tx_rings_rem, rx_rings_rem; + int v_id; + + /* initially assigning remaining rings count to VSIs num queue value */ + tx_rings_rem = vsi->num_txq; + rx_rings_rem = vsi->num_rxq; + + for (v_id = 0; v_id < q_vectors; v_id++) { + struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; + int tx_rings_per_v, rx_rings_per_v, q_id, q_base; + + /* Tx rings mapping to vector */ + tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); + q_vector->num_ring_tx = tx_rings_per_v; + q_vector->tx.ring = NULL; + q_vector->tx.itr_idx = ICE_TX_ITR; + q_base = vsi->num_txq - tx_rings_rem; + + for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { + struct ice_ring *tx_ring = vsi->tx_rings[q_id]; + + tx_ring->q_vector = q_vector; + tx_ring->next = q_vector->tx.ring; + q_vector->tx.ring = tx_ring; + } + tx_rings_rem -= tx_rings_per_v; + + /* Rx rings mapping to vector */ + rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); + q_vector->num_ring_rx = rx_rings_per_v; + q_vector->rx.ring = NULL; + q_vector->rx.itr_idx = ICE_RX_ITR; + q_base = vsi->num_rxq - rx_rings_rem; + + for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { + struct ice_ring *rx_ring = vsi->rx_rings[q_id]; + + rx_ring->q_vector = q_vector; + rx_ring->next = q_vector->rx.ring; + q_vector->rx.ring = rx_ring; + } + rx_rings_rem -= rx_rings_per_v; + } +} + +/** + * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors + * @vsi: the VSI having memory freed + */ +void ice_vsi_free_q_vectors(struct ice_vsi *vsi) +{ + int v_idx; + + ice_for_each_q_vector(vsi, v_idx) + ice_free_q_vector(vsi, v_idx); +} + +/** + * ice_vsi_cfg_txq - Configure single Tx queue + * @vsi: the VSI that queue belongs to + * @ring: Tx ring to be configured + * @qg_buf: queue group buffer + */ +int +ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, + struct ice_aqc_add_tx_qgrp *qg_buf) +{ + struct ice_tlan_ctx tlan_ctx = { 0 }; + struct ice_aqc_add_txqs_perq *txq; + struct ice_pf *pf = vsi->back; + u8 buf_len = sizeof(*qg_buf); + enum ice_status status; + u16 pf_q; + u8 tc; + + pf_q = ring->reg_idx; + ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); + /* copy context contents into the qg_buf */ + qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); + ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, + ice_tlan_ctx_info); + + /* init queue specific tail reg. It is referred as + * transmit comm scheduler queue doorbell. + */ + ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); + + if (IS_ENABLED(CONFIG_DCB)) + tc = ring->dcb_tc; + else + tc = 0; + + /* Add unique software queue handle of the Tx queue per + * TC into the VSI Tx ring + */ + ring->q_handle = ice_calc_q_handle(vsi, ring, tc); + + status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle, + 1, qg_buf, buf_len, NULL); + if (status) { + dev_err(&pf->pdev->dev, + "Failed to set LAN Tx queue context, error: %d\n", + status); + return -ENODEV; + } + + /* Add Tx Queue TEID into the VSI Tx ring from the + * response. This will complete configuring and + * enabling the queue. + */ + txq = &qg_buf->txqs[0]; + if (pf_q == le16_to_cpu(txq->txq_id)) + ring->txq_teid = le32_to_cpu(txq->q_teid); + + return 0; +} + +/** + * ice_cfg_itr - configure the initial interrupt throttle values + * @hw: pointer to the HW structure + * @q_vector: interrupt vector that's being configured + * + * Configure interrupt throttling values for the ring containers that are + * associated with the interrupt vector passed in. + */ +void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector) +{ + ice_cfg_itr_gran(hw); + + if (q_vector->num_ring_rx) { + struct ice_ring_container *rc = &q_vector->rx; + + /* if this value is set then don't overwrite with default */ + if (!rc->itr_setting) + rc->itr_setting = ICE_DFLT_RX_ITR; + + rc->target_itr = ITR_TO_REG(rc->itr_setting); + rc->next_update = jiffies + 1; + rc->current_itr = rc->target_itr; + wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), + ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); + } + + if (q_vector->num_ring_tx) { + struct ice_ring_container *rc = &q_vector->tx; + + /* if this value is set then don't overwrite with default */ + if (!rc->itr_setting) + rc->itr_setting = ICE_DFLT_TX_ITR; + + rc->target_itr = ITR_TO_REG(rc->itr_setting); + rc->next_update = jiffies + 1; + rc->current_itr = rc->target_itr; + wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), + ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); + } +} + +/** + * ice_cfg_txq_interrupt - configure interrupt on Tx queue + * @vsi: the VSI being configured + * @txq: Tx queue being mapped to MSI-X vector + * @msix_idx: MSI-X vector index within the function + * @itr_idx: ITR index of the interrupt cause + * + * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector + * within the function space. + */ +void +ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx) +{ + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + u32 val; + + itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M; + + val = QINT_TQCTL_CAUSE_ENA_M | itr_idx | + ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M); + + wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); + if (ice_is_xdp_ena_vsi(vsi)) { + u32 xdp_txq = txq + vsi->num_xdp_txq; + + wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), + val); + } + ice_flush(hw); +} + +/** + * ice_cfg_rxq_interrupt - configure interrupt on Rx queue + * @vsi: the VSI being configured + * @rxq: Rx queue being mapped to MSI-X vector + * @msix_idx: MSI-X vector index within the function + * @itr_idx: ITR index of the interrupt cause + * + * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector + * within the function space. + */ +void +ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx) +{ + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + u32 val; + + itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M; + + val = QINT_RQCTL_CAUSE_ENA_M | itr_idx | + ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M); + + wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); + + ice_flush(hw); +} + +/** + * ice_trigger_sw_intr - trigger a software interrupt + * @hw: pointer to the HW structure + * @q_vector: interrupt vector to trigger the software interrupt for + */ +void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector) +{ + wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), + (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) | + GLINT_DYN_CTL_SWINT_TRIG_M | + GLINT_DYN_CTL_INTENA_M); +} + +/** + * ice_vsi_stop_tx_ring - Disable single Tx ring + * @vsi: the VSI being configured + * @rst_src: reset source + * @rel_vmvf_num: Relative ID of VF/VM + * @ring: Tx ring to be stopped + * @txq_meta: Meta data of Tx ring to be stopped + */ +int +ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num, struct ice_ring *ring, + struct ice_txq_meta *txq_meta) +{ + struct ice_pf *pf = vsi->back; + struct ice_q_vector *q_vector; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + u32 val; + + /* clear cause_ena bit for disabled queues */ + val = rd32(hw, QINT_TQCTL(ring->reg_idx)); + val &= ~QINT_TQCTL_CAUSE_ENA_M; + wr32(hw, QINT_TQCTL(ring->reg_idx), val); + + /* software is expected to wait for 100 ns */ + ndelay(100); + + /* trigger a software interrupt for the vector + * associated to the queue to schedule NAPI handler + */ + q_vector = ring->q_vector; + if (q_vector) + ice_trigger_sw_intr(hw, q_vector); + + status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx, + txq_meta->tc, 1, &txq_meta->q_handle, + &txq_meta->q_id, &txq_meta->q_teid, rst_src, + rel_vmvf_num, NULL); + + /* if the disable queue command was exercised during an + * active reset flow, ICE_ERR_RESET_ONGOING is returned. + * This is not an error as the reset operation disables + * queues at the hardware level anyway. + */ + if (status == ICE_ERR_RESET_ONGOING) { + dev_dbg(&vsi->back->pdev->dev, + "Reset in progress. LAN Tx queues already disabled\n"); + } else if (status == ICE_ERR_DOES_NOT_EXIST) { + dev_dbg(&vsi->back->pdev->dev, + "LAN Tx queues do not exist, nothing to disable\n"); + } else if (status) { + dev_err(&vsi->back->pdev->dev, + "Failed to disable LAN Tx queues, error: %d\n", status); + return -ENODEV; + } + + return 0; +} + +/** + * ice_fill_txq_meta - Prepare the Tx queue's meta data + * @vsi: VSI that ring belongs to + * @ring: ring that txq_meta will be based on + * @txq_meta: a helper struct that wraps Tx queue's information + * + * Set up a helper struct that will contain all the necessary fields that + * are needed for stopping Tx queue + */ +void +ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, + struct ice_txq_meta *txq_meta) +{ + u8 tc; + + if (IS_ENABLED(CONFIG_DCB)) + tc = ring->dcb_tc; + else + tc = 0; + + txq_meta->q_id = ring->reg_idx; + txq_meta->q_teid = ring->txq_teid; + txq_meta->q_handle = ring->q_handle; + txq_meta->vsi_idx = vsi->idx; + txq_meta->tc = tc; +} diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h new file mode 100644 index 000000000000..407995e8e944 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_base.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019, Intel Corporation. */ + +#ifndef _ICE_BASE_H_ +#define _ICE_BASE_H_ + +#include "ice.h" + +int ice_setup_rx_ctx(struct ice_ring *ring); +int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg); +int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx); +int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi); +void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi); +void ice_vsi_free_q_vectors(struct ice_vsi *vsi); +int +ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, + struct ice_aqc_add_tx_qgrp *qg_buf); +void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector); +void +ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx); +void +ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx); +void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector); +int +ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num, struct ice_ring *ring, + struct ice_txq_meta *txq_meta); +void +ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, + struct ice_txq_meta *txq_meta); +#endif /* _ICE_BASE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 3a6b3950eb0e..36be501ae623 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -855,6 +855,9 @@ enum ice_status ice_init_hw(struct ice_hw *hw) goto err_unroll_sched; } INIT_LIST_HEAD(&hw->agg_list); + /* Initialize max burst size */ + if (!hw->max_burst_size) + ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); status = ice_init_fltr_mgmt_struct(hw); if (status) @@ -1067,6 +1070,72 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) } /** + * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA + * @hw: pointer to hardware structure + * @module_tlv: pointer to module TLV to return + * @module_tlv_len: pointer to module TLV length to return + * @module_type: module type requested + * + * Finds the requested sub module TLV type from the Preserved Field + * Area (PFA) and returns the TLV pointer and length. The caller can + * use these to read the variable length TLV value. + */ +enum ice_status +ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, + u16 module_type) +{ + enum ice_status status; + u16 pfa_len, pfa_ptr; + u16 next_tlv; + + status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n"); + return status; + } + status = ice_read_sr_word(hw, pfa_ptr, &pfa_len); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n"); + return status; + } + /* Starting with first TLV after PFA length, iterate through the list + * of TLVs to find the requested one. + */ + next_tlv = pfa_ptr + 1; + while (next_tlv < pfa_ptr + pfa_len) { + u16 tlv_sub_module_type; + u16 tlv_len; + + /* Read TLV type */ + status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n"); + break; + } + /* Read TLV length */ + status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n"); + break; + } + if (tlv_sub_module_type == module_type) { + if (tlv_len) { + *module_tlv = next_tlv; + *module_tlv_len = tlv_len; + return 0; + } + return ICE_ERR_INVAL_SIZE; + } + /* Check next TLV, i.e. current TLV pointer + length + 2 words + * (for current TLV's type and length) + */ + next_tlv = next_tlv + tlv_len + 2; + } + /* Module does not exist */ + return ICE_ERR_DOES_NOT_EXIST; +} + +/** * ice_copy_rxq_ctx_to_hw * @hw: pointer to the hardware structure * @ice_rxq_ctx: pointer to the rxq context @@ -1182,56 +1251,6 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = { { 0 } }; -/** - * ice_debug_cq - * @hw: pointer to the hardware structure - * @mask: debug mask - * @desc: pointer to control queue descriptor - * @buf: pointer to command buffer - * @buf_len: max length of buf - * - * Dumps debug log about control command with descriptor contents. - */ -void -ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf, - u16 buf_len) -{ - struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc; - u16 len; - -#ifndef CONFIG_DYNAMIC_DEBUG - if (!(mask & hw->debug_mask)) - return; -#endif - - if (!desc) - return; - - len = le16_to_cpu(cq_desc->datalen); - - ice_debug(hw, mask, - "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", - le16_to_cpu(cq_desc->opcode), - le16_to_cpu(cq_desc->flags), - le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval)); - ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", - le32_to_cpu(cq_desc->cookie_high), - le32_to_cpu(cq_desc->cookie_low)); - ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", - le32_to_cpu(cq_desc->params.generic.param0), - le32_to_cpu(cq_desc->params.generic.param1)); - ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", - le32_to_cpu(cq_desc->params.generic.addr_high), - le32_to_cpu(cq_desc->params.generic.addr_low)); - if (buf && cq_desc->datalen != 0) { - ice_debug(hw, mask, "Buffer:\n"); - if (buf_len < len) - len = buf_len; - - ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len); - } -} - /* FW Admin Queue command wrappers */ /* Software lock/mutex that is meant to be held while the Global Config Lock @@ -2556,6 +2575,52 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, } /** + * ice_aq_sff_eeprom + * @hw: pointer to the HW struct + * @lport: bits [7:0] = logical port, bit [8] = logical port valid + * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) + * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. + * @page: QSFP page + * @set_page: set or ignore the page + * @data: pointer to data buffer to be read/written to the I2C device. + * @length: 1-16 for read, 1 for write. + * @write: 0 read, 1 for write. + * @cd: pointer to command details structure or NULL + * + * Read/Write SFF EEPROM (0x06EE) + */ +enum ice_status +ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, + u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, + bool write, struct ice_sq_cd *cd) +{ + struct ice_aqc_sff_eeprom *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + if (!data || (mem_addr & 0xff00)) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); + cmd = &desc.params.read_write_sff_param; + desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF); + cmd->lport_num = (u8)(lport & 0xff); + cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); + cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) & + ICE_AQC_SFF_I2CBUS_7BIT_M) | + ((set_page << + ICE_AQC_SFF_SET_EEPROM_PAGE_S) & + ICE_AQC_SFF_SET_EEPROM_PAGE_M)); + cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); + cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S); + if (write) + cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE); + + status = ice_aq_send_cmd(hw, &desc, data, length, cd); + return status; +} + +/** * __ice_aq_get_set_rss_lut * @hw: pointer to the hardware structure * @vsi_id: VSI FW index @@ -3148,7 +3213,7 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) * @tc: TC number * @q_handle: software queue handle */ -static struct ice_q_ctx * +struct ice_q_ctx * ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) { struct ice_vsi_ctx *vsi; @@ -3245,9 +3310,12 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, node.node_teid = buf->txqs[0].q_teid; node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; q_ctx->q_handle = q_handle; + q_ctx->q_teid = le32_to_cpu(node.node_teid); - /* add a leaf node into schduler tree queue layer */ + /* add a leaf node into scheduler tree queue layer */ status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); + if (!status) + status = ice_sched_replay_q_bw(pi, q_ctx); ena_txq_exit: mutex_unlock(&pi->sched_lock); diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index c3df92f57777..b22aa561e253 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -6,16 +6,18 @@ #include "ice.h" #include "ice_type.h" +#include "ice_nvm.h" #include "ice_flex_pipe.h" #include "ice_switch.h" #include <linux/avf/virtchnl.h> enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw); -void -ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, u16 buf_len); enum ice_status ice_init_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw); +enum ice_status +ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, + u16 module_type); enum ice_status ice_check_reset(struct ice_hw *hw); enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req); enum ice_status ice_create_all_ctrlq(struct ice_hw *hw); @@ -117,6 +119,10 @@ ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd); enum ice_status ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, struct ice_sq_cd *cd); +enum ice_status +ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, + u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, + bool write, struct ice_sq_cd *cd); enum ice_status ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, @@ -133,6 +139,8 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); void ice_replay_post(struct ice_hw *hw); void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); +struct ice_q_ctx * +ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle); void ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 2353166c654e..dd946866d7b8 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -810,6 +810,52 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) } /** + * ice_debug_cq + * @hw: pointer to the hardware structure + * @desc: pointer to control queue descriptor + * @buf: pointer to command buffer + * @buf_len: max length of buf + * + * Dumps debug log about control command with descriptor contents. + */ +static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len) +{ + struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc; + u16 len; + + if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) && + !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask)) + return; + + if (!desc) + return; + + len = le16_to_cpu(cq_desc->datalen); + + ice_debug(hw, ICE_DBG_AQ_DESC, + "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", + le16_to_cpu(cq_desc->opcode), + le16_to_cpu(cq_desc->flags), + le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval)); + ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n", + le32_to_cpu(cq_desc->cookie_high), + le32_to_cpu(cq_desc->cookie_low)); + ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n", + le32_to_cpu(cq_desc->params.generic.param0), + le32_to_cpu(cq_desc->params.generic.param1)); + ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n", + le32_to_cpu(cq_desc->params.generic.addr_high), + le32_to_cpu(cq_desc->params.generic.addr_low)); + if (buf && cq_desc->datalen != 0) { + ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n"); + if (buf_len < len) + len = buf_len; + + ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf, len); + } +} + +/** * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ) * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue @@ -934,10 +980,10 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, } /* Debug desc and buffer */ - ice_debug(hw, ICE_DBG_AQ_MSG, + ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n"); - ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc_on_ring, buf, buf_size); + ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size); (cq->sq.next_to_use)++; if (cq->sq.next_to_use == cq->sq.count) @@ -948,7 +994,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (ice_sq_done(hw, cq)) break; - mdelay(1); + udelay(ICE_CTL_Q_SQ_CMD_USEC); total_delay++; } while (total_delay < cq->sq_cmd_timeout); @@ -971,7 +1017,8 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, retval = le16_to_cpu(desc->retval); if (retval) { ice_debug(hw, ICE_DBG_AQ_MSG, - "Control Send Queue command completed with error 0x%x\n", + "Control Send Queue command 0x%04X completed with error 0x%X\n", + le16_to_cpu(desc->opcode), retval); /* strip off FW internal code */ @@ -986,7 +1033,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n"); - ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, buf, buf_size); + ice_debug_cq(hw, (void *)desc, buf, buf_size); /* save writeback AQ if requested */ if (details->wb_desc) @@ -1075,7 +1122,8 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (flags & ICE_AQ_FLAG_ERR) { ret_code = ICE_ERR_AQ_ERROR; ice_debug(hw, ICE_DBG_AQ_MSG, - "Control Receive Queue Event received with error 0x%x\n", + "Control Receive Queue Event 0x%04X received with error 0x%X\n", + le16_to_cpu(desc->opcode), cq->rq_last_status); } memcpy(&e->desc, desc, sizeof(e->desc)); @@ -1084,10 +1132,9 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (e->msg_buf && e->msg_len) memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len); - ice_debug(hw, ICE_DBG_AQ_MSG, "ARQ: desc and buffer:\n"); + ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n"); - ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, e->msg_buf, - cq->rq_buf_size); + ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size); /* Restore the original datalen and buffer address in the desc, * FW updates datalen to indicate the event message size diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h index 44945c2165d8..4df9da359135 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.h +++ b/drivers/net/ethernet/intel/ice/ice_controlq.h @@ -31,8 +31,9 @@ enum ice_ctl_q { ICE_CTL_Q_MAILBOX, }; -/* Control Queue default settings */ -#define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */ +/* Control Queue timeout settings - max delay 250ms */ +#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */ +#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */ struct ice_ctl_q_ring { void *dma_head; /* Virtual address to DMA head */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index dd7efff121bd..713e8a892e14 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -965,9 +965,9 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) pi->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) { /* Get current DCBX configuration */ ret = ice_get_dcb_cfg(pi); - pi->is_sw_lldp = (hw->adminq.sq_last_status == ICE_AQ_RC_EPERM); if (ret) return ret; + pi->is_sw_lldp = false; } else if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) { return ICE_ERR_NOT_READY; } @@ -975,8 +975,8 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) /* Configure the LLDP MIB change event */ if (enable_mib_change) { ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL); - if (!ret) - pi->is_sw_lldp = false; + if (ret) + pi->is_sw_lldp = true; } return ret; diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index dd47869c4ad4..1150dbd98d0b 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -2,6 +2,9 @@ /* Copyright (c) 2019, Intel Corporation. */ #include "ice_dcb_lib.h" +#include "ice_dcb_nl.h" + +static void ice_pf_dcb_recfg(struct ice_pf *pf); /** * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration @@ -100,6 +103,16 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg) } /** + * ice_dcb_get_tc - Get the TC associated with the queue + * @vsi: ptr to the VSI + * @queue_index: queue number associated with VSI + */ +u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index) +{ + return vsi->tx_rings[queue_index]->dcb_tc; +} + +/** * ice_vsi_cfg_dcb_rings - Update rings to reflect DCB TC * @vsi: VSI owner of rings being updated */ @@ -138,56 +151,24 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) } /** - * ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs - * @pf: pointer to the PF struct - * - * Assumed caller has already disabled all VSIs before - * calling this function. Reconfiguring DCB based on - * local_dcbx_cfg. - */ -static void ice_pf_dcb_recfg(struct ice_pf *pf) -{ - struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg; - u8 tc_map = 0; - int v, ret; - - /* Update each VSI */ - ice_for_each_vsi(pf, v) { - if (!pf->vsi[v]) - continue; - - if (pf->vsi[v]->type == ICE_VSI_PF) - tc_map = ice_dcb_get_ena_tc(dcbcfg); - else - tc_map = ICE_DFLT_TRAFFIC_CLASS; - - ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map); - if (ret) { - dev_err(&pf->pdev->dev, - "Failed to config TC for VSI index: %d\n", - pf->vsi[v]->idx); - continue; - } - - ice_vsi_map_rings_to_vectors(pf->vsi[v]); - } -} - -/** * ice_pf_dcb_cfg - Apply new DCB configuration * @pf: pointer to the PF struct * @new_cfg: DCBX config to apply * @locked: is the RTNL held */ -static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) { - struct ice_dcbx_cfg *old_cfg, *curr_cfg; struct ice_aqc_port_ets_elem buf = { 0 }; - int ret = 0; + struct ice_dcbx_cfg *old_cfg, *curr_cfg; + int ret = ICE_DCB_NO_HW_CHG; + struct ice_vsi *pf_vsi; curr_cfg = &pf->hw.port_info->local_dcbx_cfg; + /* FW does not care if change happened */ + if (!pf->hw.port_info->is_sw_lldp) + ret = ICE_DCB_HW_CHG_RST; + /* Enable DCB tagging only when more than one TC */ if (ice_dcb_get_num_tc(new_cfg) > 1) { dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n"); @@ -203,18 +184,28 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) } /* Store old config in case FW config fails */ - old_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*old_cfg), GFP_KERNEL); - memcpy(old_cfg, curr_cfg, sizeof(*old_cfg)); + old_cfg = kmemdup(curr_cfg, sizeof(*old_cfg), GFP_KERNEL); + if (!old_cfg) + return -ENOMEM; + + dev_info(&pf->pdev->dev, "Commit DCB Configuration to the hardware\n"); + pf_vsi = ice_get_main_vsi(pf); + if (!pf_vsi) { + dev_dbg(&pf->pdev->dev, "PF VSI doesn't exist\n"); + ret = -EINVAL; + goto free_cfg; + } /* avoid race conditions by holding the lock while disabling and * re-enabling the VSI */ if (!locked) rtnl_lock(); - ice_pf_dis_all_vsi(pf, true); + ice_dis_vsi(pf_vsi, true); memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg)); memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec)); + memcpy(&new_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec)); /* Only send new config to HW if we are in SW LLDP mode. Otherwise, * the new config came from the HW in the first place. @@ -238,10 +229,11 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) ice_pf_dcb_recfg(pf); out: - ice_pf_ena_all_vsi(pf, true); + ice_ena_vsi(pf_vsi, true); if (!locked) rtnl_unlock(); - devm_kfree(&pf->pdev->dev, old_cfg); +free_cfg: + kfree(old_cfg); return ret; } @@ -437,9 +429,10 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked) /** * ice_dcb_sw_default_config - Apply a default DCB config * @pf: PF to apply config to + * @ets_willing: configure ets willing * @locked: was this function called with RTNL held */ -static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked) +static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked) { struct ice_aqc_port_ets_elem buf = { 0 }; struct ice_dcbx_cfg *dcbcfg; @@ -454,7 +447,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked) memset(dcbcfg, 0, sizeof(*dcbcfg)); memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg)); - dcbcfg->etscfg.willing = 1; + dcbcfg->etscfg.willing = ets_willing ? 1 : 0; dcbcfg->etscfg.maxtcs = hw->func_caps.common_cap.maxtc; dcbcfg->etscfg.tcbwtable[0] = 100; dcbcfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS; @@ -480,6 +473,104 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked) } /** + * ice_dcb_tc_contig - Check that TCs are contiguous + * @prio_table: pointer to priority table + * + * Check if TCs begin with TC0 and are contiguous + */ +static bool ice_dcb_tc_contig(u8 *prio_table) +{ + u8 max_tc = 0; + int i; + + for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) { + u8 cur_tc = prio_table[i]; + + if (cur_tc > max_tc) + return false; + else if (cur_tc == max_tc) + max_tc++; + } + + return true; +} + +/** + * ice_dcb_noncontig_cfg - Configure DCB for non-contiguous TCs + * @pf: pointer to the PF struct + * + * If non-contiguous TCs, then configure SW DCB with TC0 and ETS non-willing + */ +static int ice_dcb_noncontig_cfg(struct ice_pf *pf) +{ + struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg; + int ret; + + /* Configure SW DCB default with ETS non-willing */ + ret = ice_dcb_sw_dflt_cfg(pf, false, true); + if (ret) { + dev_err(&pf->pdev->dev, + "Failed to set local DCB config %d\n", ret); + return ret; + } + + /* Reconfigure with ETS willing so that FW will send LLDP MIB event */ + dcbcfg->etscfg.willing = 1; + ret = ice_set_dcb_cfg(pf->hw.port_info); + if (ret) + dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n"); + + return ret; +} + +/** + * ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs + * @pf: pointer to the PF struct + * + * Assumed caller has already disabled all VSIs before + * calling this function. Reconfiguring DCB based on + * local_dcbx_cfg. + */ +static void ice_pf_dcb_recfg(struct ice_pf *pf) +{ + struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg; + u8 tc_map = 0; + int v, ret; + + /* Update each VSI */ + ice_for_each_vsi(pf, v) { + if (!pf->vsi[v]) + continue; + + if (pf->vsi[v]->type == ICE_VSI_PF) { + tc_map = ice_dcb_get_ena_tc(dcbcfg); + + /* If DCBX request non-contiguous TC, then configure + * default TC + */ + if (!ice_dcb_tc_contig(dcbcfg->etscfg.prio_table)) { + tc_map = ICE_DFLT_TRAFFIC_CLASS; + ice_dcb_noncontig_cfg(pf); + } + } else { + tc_map = ICE_DFLT_TRAFFIC_CLASS; + } + + ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map); + if (ret) { + dev_err(&pf->pdev->dev, + "Failed to config TC for VSI index: %d\n", + pf->vsi[v]->idx); + continue; + } + + ice_vsi_map_rings_to_vectors(pf->vsi[v]); + if (pf->vsi[v]->type == ICE_VSI_PF) + ice_dcbnl_set_all(pf->vsi[v]); + } +} + +/** * ice_init_pf_dcb - initialize DCB for a PF * @pf: PF to initialize DCB for * @locked: Was function called with RTNL held @@ -503,11 +594,13 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) "DCB is enabled in the hardware, max number of TCs supported on this port are %d\n", pf->hw.func_caps.common_cap.maxtc); if (err) { + struct ice_vsi *pf_vsi; + /* FW LLDP is disabled, activate SW DCBX/LLDP mode */ dev_info(&pf->pdev->dev, "FW LLDP is disabled, DCBx/LLDP in SW mode.\n"); clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); - err = ice_dcb_sw_dflt_cfg(pf, locked); + err = ice_dcb_sw_dflt_cfg(pf, true, locked); if (err) { dev_err(&pf->pdev->dev, "Failed to set local DCB config %d\n", err); @@ -515,6 +608,19 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) goto dcb_init_err; } + /* If the FW DCBX engine is not running then Rx LLDP packets + * need to be redirected up the stack. + */ + pf_vsi = ice_get_main_vsi(pf); + if (!pf_vsi) { + dev_err(&pf->pdev->dev, + "Failed to set local DCB config\n"); + err = -EIO; + goto dcb_init_err; + } + + ice_cfg_sw_lldp(pf_vsi, false, true); + pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; return 0; } @@ -627,6 +733,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_dcbx_cfg tmp_dcbx_cfg; bool need_reconfig = false; struct ice_port_info *pi; + struct ice_vsi *pf_vsi; u8 type; int ret; @@ -686,6 +793,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg); + ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg); if (!need_reconfig) return; @@ -698,8 +806,14 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, clear_bit(ICE_FLAG_DCB_ENA, pf->flags); } + pf_vsi = ice_get_main_vsi(pf); + if (!pf_vsi) { + dev_dbg(&pf->pdev->dev, "PF VSI doesn't exist\n"); + return; + } + rtnl_lock(); - ice_pf_dis_all_vsi(pf, true); + ice_dis_vsi(pf_vsi, true); ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); if (ret) { @@ -711,6 +825,6 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, /* changes in configuration update VSI */ ice_pf_dcb_recfg(pf); - ice_pf_ena_all_vsi(pf, true); + ice_ena_vsi(pf_vsi, true); rtnl_unlock(); } diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h index 661a6f7bca64..e90e25b7da77 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h @@ -5,14 +5,21 @@ #define _ICE_DCB_LIB_H_ #include "ice.h" +#include "ice_base.h" #include "ice_lib.h" #ifdef CONFIG_DCB -#define ICE_TC_MAX_BW 100 /* Default Max BW percentage */ +#define ICE_TC_MAX_BW 100 /* Default Max BW percentage */ +#define ICE_DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */ +#define ICE_DCB_NO_HW_CHG 1 /* DCB configuration did not change */ +#define ICE_DCB_HW_CHG 2 /* DCB configuration changed, no reset */ void ice_dcb_rebuild(struct ice_pf *pf); u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg); u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg); +u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index); +int +ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked); void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi); int ice_init_pf_dcb(struct ice_pf *pf, bool locked); void ice_update_dcb_stats(struct ice_pf *pf); @@ -41,6 +48,13 @@ static inline u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg __always_unused *dcbcfg) return 1; } +static inline u8 +ice_dcb_get_tc(struct ice_vsi __always_unused *vsi, + int __always_unused queue_index) +{ + return 0; +} + static inline int ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked) { @@ -49,6 +63,14 @@ ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked) } static inline int +ice_pf_dcb_cfg(struct ice_pf __always_unused *pf, + struct ice_dcbx_cfg __always_unused *new_cfg, + bool __always_unused locked) +{ + return -EOPNOTSUPP; +} + +static inline int ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring, struct ice_tx_buf __always_unused *first) { diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c new file mode 100644 index 000000000000..3c90fc0a3feb --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c @@ -0,0 +1,933 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019, Intel Corporation. */ + +#include "ice.h" +#include "ice_dcb.h" +#include "ice_dcb_lib.h" +#include "ice_dcb_nl.h" +#include <net/dcbnl.h> + +#define ICE_APP_PROT_ID_ROCE 0x8915 + +/** + * ice_dcbnl_devreset - perform enough of a ifdown/ifup to sync DCBNL info + * @netdev: device associated with interface that needs reset + */ +static void ice_dcbnl_devreset(struct net_device *netdev) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + + while (ice_is_reset_in_progress(pf->state)) + usleep_range(1000, 2000); + + set_bit(__ICE_DCBNL_DEVRESET, pf->state); + dev_close(netdev); + netdev_state_change(netdev); + dev_open(netdev, NULL); + netdev_state_change(netdev); + clear_bit(__ICE_DCBNL_DEVRESET, pf->state); +} + +/** + * ice_dcbnl_getets - retrieve local ETS configuration + * @netdev: the relevant netdev + * @ets: struct to hold ETS configuration + */ +static int ice_dcbnl_getets(struct net_device *netdev, struct ieee_ets *ets) +{ + struct ice_dcbx_cfg *dcbxcfg; + struct ice_port_info *pi; + struct ice_pf *pf; + + pf = ice_netdev_to_pf(netdev); + pi = pf->hw.port_info; + dcbxcfg = &pi->local_dcbx_cfg; + + ets->willing = dcbxcfg->etscfg.willing; + ets->ets_cap = dcbxcfg->etscfg.maxtcs; + ets->cbs = dcbxcfg->etscfg.cbs; + memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable, sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_rx_bw, dcbxcfg->etscfg.tcbwtable, sizeof(ets->tc_rx_bw)); + memcpy(ets->tc_tsa, dcbxcfg->etscfg.tsatable, sizeof(ets->tc_tsa)); + memcpy(ets->prio_tc, dcbxcfg->etscfg.prio_table, sizeof(ets->prio_tc)); + memcpy(ets->tc_reco_bw, dcbxcfg->etsrec.tcbwtable, + sizeof(ets->tc_reco_bw)); + memcpy(ets->tc_reco_tsa, dcbxcfg->etsrec.tsatable, + sizeof(ets->tc_reco_tsa)); + memcpy(ets->reco_prio_tc, dcbxcfg->etscfg.prio_table, + sizeof(ets->reco_prio_tc)); + + return 0; +} + +/** + * ice_dcbnl_setets - set IEEE ETS configuration + * @netdev: pointer to relevant netdev + * @ets: struct to hold ETS configuration + */ +static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_dcbx_cfg *new_cfg; + int bwcfg = 0, bwrec = 0; + int err, i, max_tc = 0; + + if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + new_cfg = &pf->hw.port_info->desired_dcbx_cfg; + + mutex_lock(&pf->tc_mutex); + + new_cfg->etscfg.willing = ets->willing; + new_cfg->etscfg.cbs = ets->cbs; + ice_for_each_traffic_class(i) { + new_cfg->etscfg.tcbwtable[i] = ets->tc_tx_bw[i]; + bwcfg += ets->tc_tx_bw[i]; + new_cfg->etscfg.tsatable[i] = ets->tc_tsa[i]; + new_cfg->etscfg.prio_table[i] = ets->prio_tc[i]; + if (ets->prio_tc[i] > max_tc) + max_tc = ets->prio_tc[i]; + new_cfg->etsrec.tcbwtable[i] = ets->tc_reco_bw[i]; + bwrec += ets->tc_reco_bw[i]; + new_cfg->etsrec.tsatable[i] = ets->tc_reco_tsa[i]; + new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i]; + } + + /* max_tc is a 1-8 value count of number of TC's, not a 0-7 value + * for the TC's index number. Add one to value if not zero, and + * for zero set it to the FW's default value + */ + if (max_tc) + max_tc++; + else + max_tc = IEEE_8021QAZ_MAX_TCS; + + new_cfg->etscfg.maxtcs = max_tc; + + if (!bwcfg) + new_cfg->etscfg.tcbwtable[0] = 100; + + if (!bwrec) + new_cfg->etsrec.tcbwtable[0] = 100; + + err = ice_pf_dcb_cfg(pf, new_cfg, true); + /* return of zero indicates new cfg applied */ + if (err == ICE_DCB_HW_CHG_RST) + ice_dcbnl_devreset(netdev); + if (err == ICE_DCB_NO_HW_CHG) + err = ICE_DCB_HW_CHG_RST; + + mutex_unlock(&pf->tc_mutex); + return err; +} + +/** + * ice_dcbnl_getnumtcs - Get max number of traffic classes supported + * @dev: pointer to netdev struct + * @tcid: TC ID + * @num: total number of TCs supported by the adapter + * + * Return the total number of TCs supported + */ +static int +ice_dcbnl_getnumtcs(struct net_device *dev, int __always_unused tcid, u8 *num) +{ + struct ice_pf *pf = ice_netdev_to_pf(dev); + + if (!test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)) + return -EINVAL; + + *num = IEEE_8021QAZ_MAX_TCS; + return 0; +} + +/** + * ice_dcbnl_getdcbx - retrieve current DCBX capability + * @netdev: pointer to the netdev struct + */ +static u8 ice_dcbnl_getdcbx(struct net_device *netdev) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + + return pf->dcbx_cap; +} + +/** + * ice_dcbnl_setdcbx - set required DCBX capability + * @netdev: the corresponding netdev + * @mode: required mode + */ +static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + + /* No support for LLD_MANAGED modes or CEE+IEEE */ + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || + !(mode & DCB_CAP_DCBX_HOST)) + return ICE_DCB_NO_HW_CHG; + + /* Already set to the given mode no change */ + if (mode == pf->dcbx_cap) + return ICE_DCB_NO_HW_CHG; + + pf->dcbx_cap = mode; + if (mode & DCB_CAP_DCBX_VER_CEE) + pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE; + else + pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE; + + dev_info(&pf->pdev->dev, "DCBx mode = 0x%x\n", mode); + return ICE_DCB_HW_CHG_RST; +} + +/** + * ice_dcbnl_get_perm_hw_addr - MAC address used by DCBX + * @netdev: pointer to netdev struct + * @perm_addr: buffer to return permanent MAC address + */ +static void ice_dcbnl_get_perm_hw_addr(struct net_device *netdev, u8 *perm_addr) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_port_info *pi = pf->hw.port_info; + int i, j; + + memset(perm_addr, 0xff, MAX_ADDR_LEN); + + for (i = 0; i < netdev->addr_len; i++) + perm_addr[i] = pi->mac.perm_addr[i]; + + for (j = 0; j < netdev->addr_len; j++, i++) + perm_addr[i] = pi->mac.perm_addr[j]; +} + +/** + * ice_get_pfc_delay - Retrieve PFC Link Delay + * @hw: pointer to HW struct + * @delay: holds the PFC Link Delay value + */ +static void ice_get_pfc_delay(struct ice_hw *hw, u16 *delay) +{ + u32 val; + + val = rd32(hw, PRTDCB_GENC); + *delay = (u16)((val & PRTDCB_GENC_PFCLDA_M) >> PRTDCB_GENC_PFCLDA_S); +} + +/** + * ice_dcbnl_getpfc - retrieve local IEEE PFC config + * @netdev: pointer to netdev struct + * @pfc: struct to hold PFC info + */ +static int ice_dcbnl_getpfc(struct net_device *netdev, struct ieee_pfc *pfc) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_port_info *pi = pf->hw.port_info; + struct ice_dcbx_cfg *dcbxcfg; + int i; + + dcbxcfg = &pi->local_dcbx_cfg; + pfc->pfc_cap = dcbxcfg->pfc.pfccap; + pfc->pfc_en = dcbxcfg->pfc.pfcena; + pfc->mbc = dcbxcfg->pfc.mbc; + ice_get_pfc_delay(&pf->hw, &pfc->delay); + + ice_for_each_traffic_class(i) { + pfc->requests[i] = pf->stats.priority_xoff_tx[i]; + pfc->indications[i] = pf->stats.priority_xoff_rx[i]; + } + + return 0; +} + +/** + * ice_dcbnl_setpfc - set local IEEE PFC config + * @netdev: pointer to relevant netdev + * @pfc: pointer to struct holding PFC config + */ +static int ice_dcbnl_setpfc(struct net_device *netdev, struct ieee_pfc *pfc) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_dcbx_cfg *new_cfg; + int err; + + if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + mutex_lock(&pf->tc_mutex); + + new_cfg = &pf->hw.port_info->desired_dcbx_cfg; + + if (pfc->pfc_cap) + new_cfg->pfc.pfccap = pfc->pfc_cap; + else + new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc; + + new_cfg->pfc.pfcena = pfc->pfc_en; + + err = ice_pf_dcb_cfg(pf, new_cfg, true); + if (err == ICE_DCB_HW_CHG_RST) + ice_dcbnl_devreset(netdev); + if (err == ICE_DCB_NO_HW_CHG) + err = ICE_DCB_HW_CHG_RST; + mutex_unlock(&pf->tc_mutex); + return err; +} + +/** + * ice_dcbnl_get_pfc_cfg - Get CEE PFC config + * @netdev: pointer to netdev struct + * @prio: corresponding user priority + * @setting: the PFC setting for given priority + */ +static void +ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_port_info *pi = pf->hw.port_info; + + if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return; + + if (prio >= ICE_MAX_USER_PRIORITY) + return; + + *setting = (pi->local_dcbx_cfg.pfc.pfcena >> prio) & 0x1; + dev_dbg(&pf->pdev->dev, + "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n", + prio, *setting, pi->local_dcbx_cfg.pfc.pfcena); +} + +/** + * ice_dcbnl_set_pfc_cfg - Set CEE PFC config + * @netdev: the corresponding netdev + * @prio: User Priority + * @set: PFC setting to apply + */ +static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_dcbx_cfg *new_cfg; + + if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return; + + if (prio >= ICE_MAX_USER_PRIORITY) + return; + + new_cfg = &pf->hw.port_info->desired_dcbx_cfg; + + new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc; + if (set) + new_cfg->pfc.pfcena |= BIT(prio); + else + new_cfg->pfc.pfcena &= ~BIT(prio); + + dev_dbg(&pf->pdev->dev, "Set PFC config UP:%d set:%d pfcena:0x%x\n", + prio, set, new_cfg->pfc.pfcena); +} + +/** + * ice_dcbnl_getpfcstate - get CEE PFC mode + * @netdev: pointer to netdev struct + */ +static u8 ice_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_port_info *pi = pf->hw.port_info; + + /* Return enabled if any UP enabled for PFC */ + if (pi->local_dcbx_cfg.pfc.pfcena) + return 1; + + return 0; +} + +/** + * ice_dcbnl_getstate - get DCB enabled state + * @netdev: pointer to netdev struct + */ +static u8 ice_dcbnl_getstate(struct net_device *netdev) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + u8 state = 0; + + state = test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); + + dev_dbg(&pf->pdev->dev, "DCB enabled state = %d\n", state); + return state; +} + +/** + * ice_dcbnl_setstate - Set CEE DCB state + * @netdev: pointer to relevant netdev + * @state: state value to set + */ +static u8 ice_dcbnl_setstate(struct net_device *netdev, u8 state) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + + if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return ICE_DCB_NO_HW_CHG; + + /* Nothing to do */ + if (!!state == test_bit(ICE_FLAG_DCB_ENA, pf->flags)) + return ICE_DCB_NO_HW_CHG; + + if (state) { + set_bit(ICE_FLAG_DCB_ENA, pf->flags); + memcpy(&pf->hw.port_info->desired_dcbx_cfg, + &pf->hw.port_info->local_dcbx_cfg, + sizeof(struct ice_dcbx_cfg)); + } else { + clear_bit(ICE_FLAG_DCB_ENA, pf->flags); + } + + return ICE_DCB_HW_CHG; +} + +/** + * ice_dcbnl_get_pg_tc_cfg_tx - get CEE PG Tx config + * @netdev: pointer to netdev struct + * @prio: the corresponding user priority + * @prio_type: traffic priority type + * @pgid: the BW group ID the traffic class belongs to + * @bw_pct: BW percentage for the corresponding BWG + * @up_map: prio mapped to corresponding TC + */ +static void +ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio, + u8 __always_unused *prio_type, u8 *pgid, + u8 __always_unused *bw_pct, + u8 __always_unused *up_map) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_port_info *pi = pf->hw.port_info; + + if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return; + + if (prio >= ICE_MAX_USER_PRIORITY) + return; + + *pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio]; + dev_dbg(&pf->pdev->dev, + "Get PG config prio=%d tc=%d\n", prio, *pgid); +} + +/** + * ice_dcbnl_set_pg_tc_cfg_tx - set CEE PG Tx config + * @netdev: pointer to relevant netdev + * @tc: the corresponding traffic class + * @prio_type: the traffic priority type + * @bwg_id: the BW group ID the TC belongs to + * @bw_pct: the BW perventage for the BWG + * @up_map: prio mapped to corresponding TC + */ +static void +ice_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, + u8 __always_unused prio_type, + u8 __always_unused bwg_id, + u8 __always_unused bw_pct, u8 up_map) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_dcbx_cfg *new_cfg; + int i; + + if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return; + + if (tc >= ICE_MAX_TRAFFIC_CLASS) + return; + + new_cfg = &pf->hw.port_info->desired_dcbx_cfg; + + /* prio_type, bwg_id and bw_pct per UP are not supported */ + + ice_for_each_traffic_class(i) { + if (up_map & BIT(i)) + new_cfg->etscfg.prio_table[i] = tc; + } + new_cfg->etscfg.tsatable[tc] = ICE_IEEE_TSA_ETS; +} + +/** + * ice_dcbnl_get_pg_bwg_cfg_tx - Get CEE PGBW config + * @netdev: pointer to the netdev struct + * @pgid: corresponding traffic class + * @bw_pct: the BW percentage for the corresponding TC + */ +static void +ice_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_port_info *pi = pf->hw.port_info; + + if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return; + + if (pgid >= ICE_MAX_TRAFFIC_CLASS) + return; + + *bw_pct = pi->local_dcbx_cfg.etscfg.tcbwtable[pgid]; + dev_dbg(&pf->pdev->dev, "Get PG BW config tc=%d bw_pct=%d\n", + pgid, *bw_pct); +} + +/** + * ice_dcbnl_set_pg_bwg_cfg_tx - set CEE PG Tx BW config + * @netdev: the corresponding netdev + * @pgid: Correspongind traffic class + * @bw_pct: the BW percentage for the specified TC + */ +static void +ice_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 bw_pct) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_dcbx_cfg *new_cfg; + + if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return; + + if (pgid >= ICE_MAX_TRAFFIC_CLASS) + return; + + new_cfg = &pf->hw.port_info->desired_dcbx_cfg; + + new_cfg->etscfg.tcbwtable[pgid] = bw_pct; +} + +/** + * ice_dcbnl_get_pg_tc_cfg_rx - Get CEE PG Rx config + * @netdev: pointer to netdev struct + * @prio: the corresponding user priority + * @prio_type: the traffic priority type + * @pgid: the PG ID + * @bw_pct: the BW percentage for the corresponding BWG + * @up_map: prio mapped to corresponding TC + */ +static void +ice_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio, + u8 __always_unused *prio_type, u8 *pgid, + u8 __always_unused *bw_pct, + u8 __always_unused *up_map) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_port_info *pi = pf->hw.port_info; + + if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return; + + if (prio >= ICE_MAX_USER_PRIORITY) + return; + + *pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio]; +} + +/** + * ice_dcbnl_get_pg_bwg_cfg_rx - Get CEE PG BW Rx config + * @netdev: pointer to netdev struct + * @pgid: the corresponding traffic class + * @bw_pct: the BW percentage for the corresponding TC + */ +static void +ice_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid, + u8 *bw_pct) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + + if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return; + + *bw_pct = 0; +} + +/** + * ice_dcbnl_get_cap - Get DCBX capabilities of adapter + * @netdev: pointer to netdev struct + * @capid: the capability type + * @cap: the capability value + */ +static u8 ice_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + + if (!(test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))) + return ICE_DCB_NO_HW_CHG; + + switch (capid) { + case DCB_CAP_ATTR_PG: + *cap = true; + break; + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_UP2TC: + *cap = false; + break; + case DCB_CAP_ATTR_PG_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_GSP: + *cap = false; + break; + case DCB_CAP_ATTR_BCN: + *cap = false; + break; + case DCB_CAP_ATTR_DCBX: + *cap = pf->dcbx_cap; + break; + default: + *cap = false; + break; + } + + dev_dbg(&pf->pdev->dev, "DCBX Get Capability cap=%d capval=0x%x\n", + capid, *cap); + return 0; +} + +/** + * ice_dcbnl_getapp - get CEE APP + * @netdev: pointer to netdev struct + * @idtype: the App selector + * @id: the App ethtype or port number + */ +static int ice_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct dcb_app app = { + .selector = idtype, + .protocol = id, + }; + + if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return -EINVAL; + + return dcb_getapp(netdev, &app); +} + +/** + * ice_dcbnl_find_app - Search for APP in given DCB config + * @cfg: struct to hold DCBX config + * @app: struct to hold app data to look for + */ +static bool +ice_dcbnl_find_app(struct ice_dcbx_cfg *cfg, + struct ice_dcb_app_priority_table *app) +{ + int i; + + for (i = 0; i < cfg->numapps; i++) { + if (app->selector == cfg->app[i].selector && + app->prot_id == cfg->app[i].prot_id && + app->priority == cfg->app[i].priority) + return true; + } + + return false; +} + +/** + * ice_dcbnl_setapp - set local IEEE App config + * @netdev: relevant netdev struct + * @app: struct to hold app config info + */ +static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_dcb_app_priority_table new_app; + struct ice_dcbx_cfg *old_cfg, *new_cfg; + int ret; + + if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + mutex_lock(&pf->tc_mutex); + + new_cfg = &pf->hw.port_info->desired_dcbx_cfg; + + old_cfg = &pf->hw.port_info->local_dcbx_cfg; + + if (old_cfg->numapps == ICE_DCBX_MAX_APPS) { + ret = -EINVAL; + goto setapp_out; + } + + ret = dcb_ieee_setapp(netdev, app); + if (ret) + goto setapp_out; + + new_app.selector = app->selector; + new_app.prot_id = app->protocol; + new_app.priority = app->priority; + if (ice_dcbnl_find_app(old_cfg, &new_app)) { + ret = 0; + goto setapp_out; + } + + new_cfg->app[new_cfg->numapps++] = new_app; + ret = ice_pf_dcb_cfg(pf, new_cfg, true); + /* return of zero indicates new cfg applied */ + if (ret == ICE_DCB_HW_CHG_RST) + ice_dcbnl_devreset(netdev); + if (ret == ICE_DCB_NO_HW_CHG) + ret = ICE_DCB_HW_CHG_RST; + +setapp_out: + mutex_unlock(&pf->tc_mutex); + return ret; +} + +/** + * ice_dcbnl_delapp - Delete local IEEE App config + * @netdev: relevant netdev + * @app: struct to hold app too delete + * + * Will not delete first application required by the FW + */ +static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_dcbx_cfg *old_cfg, *new_cfg; + int i, j, ret = 0; + + if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) + return -EINVAL; + + mutex_lock(&pf->tc_mutex); + ret = dcb_ieee_delapp(netdev, app); + if (ret) + goto delapp_out; + + old_cfg = &pf->hw.port_info->local_dcbx_cfg; + + if (old_cfg->numapps == 1) + goto delapp_out; + + new_cfg = &pf->hw.port_info->desired_dcbx_cfg; + + for (i = 1; i < new_cfg->numapps; i++) { + if (app->selector == new_cfg->app[i].selector && + app->protocol == new_cfg->app[i].prot_id && + app->priority == new_cfg->app[i].priority) { + new_cfg->app[i].selector = 0; + new_cfg->app[i].prot_id = 0; + new_cfg->app[i].priority = 0; + break; + } + } + + /* Did not find DCB App */ + if (i == new_cfg->numapps) { + ret = -EINVAL; + goto delapp_out; + } + + new_cfg->numapps--; + + for (j = i; j < new_cfg->numapps; j++) { + new_cfg->app[i].selector = old_cfg->app[j + 1].selector; + new_cfg->app[i].prot_id = old_cfg->app[j + 1].prot_id; + new_cfg->app[i].priority = old_cfg->app[j + 1].priority; + } + + ret = ice_pf_dcb_cfg(pf, new_cfg, true); + /* return of zero indicates new cfg applied */ + if (ret == ICE_DCB_HW_CHG_RST) + ice_dcbnl_devreset(netdev); + if (ret == ICE_DCB_NO_HW_CHG) + ret = ICE_DCB_HW_CHG_RST; + +delapp_out: + mutex_unlock(&pf->tc_mutex); + return ret; +} + +/** + * ice_dcbnl_cee_set_all - Commit CEE DCB settings to HW + * @netdev: the corresponding netdev + */ +static u8 ice_dcbnl_cee_set_all(struct net_device *netdev) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_dcbx_cfg *new_cfg; + int err; + + if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || + !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return ICE_DCB_NO_HW_CHG; + + new_cfg = &pf->hw.port_info->desired_dcbx_cfg; + + mutex_lock(&pf->tc_mutex); + + err = ice_pf_dcb_cfg(pf, new_cfg, true); + + mutex_unlock(&pf->tc_mutex); + return (err != ICE_DCB_HW_CHG_RST) ? ICE_DCB_NO_HW_CHG : err; +} + +static const struct dcbnl_rtnl_ops dcbnl_ops = { + /* IEEE 802.1Qaz std */ + .ieee_getets = ice_dcbnl_getets, + .ieee_setets = ice_dcbnl_setets, + .ieee_getpfc = ice_dcbnl_getpfc, + .ieee_setpfc = ice_dcbnl_setpfc, + .ieee_setapp = ice_dcbnl_setapp, + .ieee_delapp = ice_dcbnl_delapp, + + /* CEE std */ + .getstate = ice_dcbnl_getstate, + .setstate = ice_dcbnl_setstate, + .getpermhwaddr = ice_dcbnl_get_perm_hw_addr, + .setpgtccfgtx = ice_dcbnl_set_pg_tc_cfg_tx, + .setpgbwgcfgtx = ice_dcbnl_set_pg_bwg_cfg_tx, + .getpgtccfgtx = ice_dcbnl_get_pg_tc_cfg_tx, + .getpgbwgcfgtx = ice_dcbnl_get_pg_bwg_cfg_tx, + .getpgtccfgrx = ice_dcbnl_get_pg_tc_cfg_rx, + .getpgbwgcfgrx = ice_dcbnl_get_pg_bwg_cfg_rx, + .setpfccfg = ice_dcbnl_set_pfc_cfg, + .getpfccfg = ice_dcbnl_get_pfc_cfg, + .setall = ice_dcbnl_cee_set_all, + .getcap = ice_dcbnl_get_cap, + .getnumtcs = ice_dcbnl_getnumtcs, + .getpfcstate = ice_dcbnl_getpfcstate, + .getapp = ice_dcbnl_getapp, + + /* DCBX configuration */ + .getdcbx = ice_dcbnl_getdcbx, + .setdcbx = ice_dcbnl_setdcbx, +}; + +/** + * ice_dcbnl_set_all - set all the apps and ieee data from DCBX config + * @vsi: pointer to VSI struct + */ +void ice_dcbnl_set_all(struct ice_vsi *vsi) +{ + struct net_device *netdev = vsi->netdev; + struct ice_dcbx_cfg *dcbxcfg; + struct ice_port_info *pi; + struct dcb_app sapp; + struct ice_pf *pf; + int i; + + if (!netdev) + return; + + pf = ice_netdev_to_pf(netdev); + pi = pf->hw.port_info; + + /* SW DCB taken care of by SW Default Config */ + if (pf->dcbx_cap & DCB_CAP_DCBX_HOST) + return; + + /* DCB not enabled */ + if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags)) + return; + + dcbxcfg = &pi->local_dcbx_cfg; + + for (i = 0; i < dcbxcfg->numapps; i++) { + u8 prio, tc_map; + + prio = dcbxcfg->app[i].priority; + tc_map = BIT(dcbxcfg->etscfg.prio_table[prio]); + + /* Add APP only if the TC is enabled for this VSI */ + if (tc_map & vsi->tc_cfg.ena_tc) { + sapp.selector = dcbxcfg->app[i].selector; + sapp.protocol = dcbxcfg->app[i].prot_id; + sapp.priority = prio; + dcb_ieee_setapp(netdev, &sapp); + } + } + /* Notify user-space of the changes */ + dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0); +} + +/** + * ice_dcbnl_vsi_del_app - Delete APP on all VSIs + * @vsi: pointer to the main VSI + * @app: APP to delete + * + * Delete given APP from all the VSIs for given PF + */ +static void +ice_dcbnl_vsi_del_app(struct ice_vsi *vsi, + struct ice_dcb_app_priority_table *app) +{ + struct dcb_app sapp; + int err; + + sapp.selector = app->selector; + sapp.protocol = app->prot_id; + sapp.priority = app->priority; + err = ice_dcbnl_delapp(vsi->netdev, &sapp); + dev_dbg(&vsi->back->pdev->dev, + "Deleting app for VSI idx=%d err=%d sel=%d proto=0x%x, prio=%d\n", + vsi->idx, err, app->selector, app->prot_id, app->priority); +} + +/** + * ice_dcbnl_flush_apps - Delete all removed APPs + * @pf: the corresponding PF + * @old_cfg: old DCBX configuration data + * @new_cfg: new DCBX configuration data + * + * Find and delete all APPS that are not present in the passed + * DCB configuration + */ +void +ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg, + struct ice_dcbx_cfg *new_cfg) +{ + struct ice_vsi *main_vsi = ice_get_main_vsi(pf); + int i; + + if (!main_vsi) + return; + + for (i = 0; i < old_cfg->numapps; i++) { + struct ice_dcb_app_priority_table app = old_cfg->app[i]; + + /* The APP is not available anymore delete it */ + if (!ice_dcbnl_find_app(new_cfg, &app)) + ice_dcbnl_vsi_del_app(main_vsi, &app); + } +} + +/** + * ice_dcbnl_setup - setup DCBNL + * @vsi: VSI to get associated netdev from + */ +void ice_dcbnl_setup(struct ice_vsi *vsi) +{ + struct net_device *netdev = vsi->netdev; + struct ice_pf *pf; + + pf = ice_netdev_to_pf(netdev); + if (!test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)) + return; + + netdev->dcbnl_ops = &dcbnl_ops; + ice_dcbnl_set_all(vsi); +} diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.h b/drivers/net/ethernet/intel/ice/ice_dcb_nl.h new file mode 100644 index 000000000000..6c630a362293 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019, Intel Corporation. */ + +#ifndef _ICE_DCB_NL_H_ +#define _ICE_DCB_NL_H_ + +#ifdef CONFIG_DCB +void ice_dcbnl_setup(struct ice_vsi *vsi); +void ice_dcbnl_set_all(struct ice_vsi *vsi); +void +ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg, + struct ice_dcbx_cfg *new_cfg); +#else +#define ice_dcbnl_setup(vsi) do {} while (0) +#define ice_dcbnl_set_all(vsi) do {} while (0) +#define ice_dcbnl_flush_apps(pf, old_cfg, new_cfg) do {} while (0) +#endif /* CONFIG_DCB */ + +#endif /* _ICE_DCB_NL_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 7e23034df955..1f00091f7906 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -156,6 +156,7 @@ struct ice_priv_flag { static const struct ice_priv_flag ice_gstrings_priv_flags[] = { ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA), ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT), + ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX), }; #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) @@ -623,7 +624,7 @@ static int ice_lbtest_receive_frames(struct ice_ring *rx_ring) continue; rx_buf = &rx_ring->rx_buf[i]; - received_buf = page_address(rx_buf->page); + received_buf = page_address(rx_buf->page) + rx_buf->page_offset; if (ice_lbtest_check_frame(received_buf)) valid_frames++; @@ -1205,11 +1206,6 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) status = ice_init_pf_dcb(pf, true); if (status) dev_warn(&pf->pdev->dev, "Fail to init DCB\n"); - - /* Forward LLDP packets to default VSI so that they - * are passed up the stack - */ - ice_cfg_sw_lldp(vsi, false, true); } else { enum ice_status status; bool dcbx_agent_status; @@ -1256,6 +1252,11 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) "Fail to enable MIB change events\n"); } } + if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) { + /* down and up VSI so that changes of Rx cfg are reflected. */ + ice_down(vsi); + ice_up(vsi); + } clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags); return ret; } @@ -2577,6 +2578,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct ice_ring *tx_rings = NULL, *rx_rings = NULL; struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_ring *xdp_rings = NULL; struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; int i, timeout = 50, err = 0; @@ -2611,6 +2613,13 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) return 0; } + /* If there is a AF_XDP UMEM attached to any of Rx rings, + * disallow changing the number of descriptors -- regardless + * if the netdev is running or not. + */ + if (ice_xsk_any_rx_ring_ena(vsi)) + return -EBUSY; + while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { timeout--; if (!timeout) @@ -2624,6 +2633,11 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) vsi->tx_rings[i]->count = new_tx_cnt; for (i = 0; i < vsi->alloc_rxq; i++) vsi->rx_rings[i]->count = new_rx_cnt; + if (ice_is_xdp_ena_vsi(vsi)) + for (i = 0; i < vsi->num_xdp_txq; i++) + vsi->xdp_rings[i]->count = new_tx_cnt; + vsi->num_tx_desc = new_tx_cnt; + vsi->num_rx_desc = new_rx_cnt; netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n"); goto done; } @@ -2635,14 +2649,14 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n", vsi->tx_rings[0]->count, new_tx_cnt); - tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, + tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*tx_rings), GFP_KERNEL); if (!tx_rings) { err = -ENOMEM; goto done; } - for (i = 0; i < vsi->alloc_txq; i++) { + ice_for_each_txq(vsi, i) { /* clone ring and setup updated count */ tx_rings[i] = *vsi->tx_rings[i]; tx_rings[i].count = new_tx_cnt; @@ -2650,15 +2664,43 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) tx_rings[i].tx_buf = NULL; err = ice_setup_tx_ring(&tx_rings[i]); if (err) { - while (i) { - i--; + while (i--) ice_clean_tx_ring(&tx_rings[i]); - } devm_kfree(&pf->pdev->dev, tx_rings); goto done; } } + if (!ice_is_xdp_ena_vsi(vsi)) + goto process_rx; + + /* alloc updated XDP resources */ + netdev_info(netdev, "Changing XDP descriptor count from %d to %d\n", + vsi->xdp_rings[0]->count, new_tx_cnt); + + xdp_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_xdp_txq, + sizeof(*xdp_rings), GFP_KERNEL); + if (!xdp_rings) { + err = -ENOMEM; + goto free_tx; + } + + for (i = 0; i < vsi->num_xdp_txq; i++) { + /* clone ring and setup updated count */ + xdp_rings[i] = *vsi->xdp_rings[i]; + xdp_rings[i].count = new_tx_cnt; + xdp_rings[i].desc = NULL; + xdp_rings[i].tx_buf = NULL; + err = ice_setup_tx_ring(&xdp_rings[i]); + if (err) { + while (i--) + ice_clean_tx_ring(&xdp_rings[i]); + devm_kfree(&pf->pdev->dev, xdp_rings); + goto free_tx; + } + ice_set_ring_xdp(&xdp_rings[i]); + } + process_rx: if (new_rx_cnt == vsi->rx_rings[0]->count) goto process_link; @@ -2667,14 +2709,14 @@ process_rx: netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n", vsi->rx_rings[0]->count, new_rx_cnt); - rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, + rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_rxq, sizeof(*rx_rings), GFP_KERNEL); if (!rx_rings) { err = -ENOMEM; goto done; } - for (i = 0; i < vsi->alloc_rxq; i++) { + ice_for_each_rxq(vsi, i) { /* clone ring and setup updated count */ rx_rings[i] = *vsi->rx_rings[i]; rx_rings[i].count = new_rx_cnt; @@ -2712,7 +2754,7 @@ process_link: ice_down(vsi); if (tx_rings) { - for (i = 0; i < vsi->alloc_txq; i++) { + ice_for_each_txq(vsi, i) { ice_free_tx_ring(vsi->tx_rings[i]); *vsi->tx_rings[i] = tx_rings[i]; } @@ -2720,7 +2762,7 @@ process_link: } if (rx_rings) { - for (i = 0; i < vsi->alloc_rxq; i++) { + ice_for_each_rxq(vsi, i) { ice_free_rx_ring(vsi->rx_rings[i]); /* copy the real tail offset */ rx_rings[i].tail = vsi->rx_rings[i]->tail; @@ -2737,6 +2779,16 @@ process_link: devm_kfree(&pf->pdev->dev, rx_rings); } + if (xdp_rings) { + for (i = 0; i < vsi->num_xdp_txq; i++) { + ice_free_tx_ring(vsi->xdp_rings[i]); + *vsi->xdp_rings[i] = xdp_rings[i]; + } + devm_kfree(&pf->pdev->dev, xdp_rings); + } + + vsi->num_tx_desc = new_tx_cnt; + vsi->num_rx_desc = new_rx_cnt; ice_up(vsi); } goto done; @@ -2744,7 +2796,7 @@ process_link: free_tx: /* error cleanup if the Rx allocations failed after getting Tx */ if (tx_rings) { - for (i = 0; i < vsi->alloc_txq; i++) + ice_for_each_txq(vsi, i) ice_free_tx_ring(&tx_rings[i]); devm_kfree(&pf->pdev->dev, tx_rings); } @@ -3398,6 +3450,151 @@ ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num, return __ice_set_coalesce(netdev, ec, q_num); } +#define ICE_I2C_EEPROM_DEV_ADDR 0xA0 +#define ICE_I2C_EEPROM_DEV_ADDR2 0xA2 +#define ICE_MODULE_TYPE_SFP 0x03 +#define ICE_MODULE_TYPE_QSFP_PLUS 0x0D +#define ICE_MODULE_TYPE_QSFP28 0x11 +#define ICE_MODULE_SFF_ADDR_MODE 0x04 +#define ICE_MODULE_SFF_DIAG_CAPAB 0x40 +#define ICE_MODULE_REVISION_ADDR 0x01 +#define ICE_MODULE_SFF_8472_COMP 0x5E +#define ICE_MODULE_SFF_8472_SWAP 0x5C +#define ICE_MODULE_QSFP_MAX_LEN 640 + +/** + * ice_get_module_info - get SFF module type and revision information + * @netdev: network interface device structure + * @modinfo: module EEPROM size and layout information structure + */ +static int +ice_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + u8 sff8472_comp = 0; + u8 sff8472_swap = 0; + u8 sff8636_rev = 0; + u8 value = 0; + + status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00, + 0, &value, 1, 0, NULL); + if (status) + return -EIO; + + switch (value) { + case ICE_MODULE_TYPE_SFP: + status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, + ICE_MODULE_SFF_8472_COMP, 0x00, 0, + &sff8472_comp, 1, 0, NULL); + if (status) + return -EIO; + status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, + ICE_MODULE_SFF_8472_SWAP, 0x00, 0, + &sff8472_swap, 1, 0, NULL); + if (status) + return -EIO; + + if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else if (sff8472_comp && + (sff8472_swap & ICE_MODULE_SFF_DIAG_CAPAB)) { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } + break; + case ICE_MODULE_TYPE_QSFP_PLUS: + case ICE_MODULE_TYPE_QSFP28: + status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, + ICE_MODULE_REVISION_ADDR, 0x00, 0, + &sff8636_rev, 1, 0, NULL); + if (status) + return -EIO; + /* Check revision compliance */ + if (sff8636_rev > 0x02) { + /* Module is SFF-8636 compliant */ + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN; + } + break; + default: + netdev_warn(netdev, + "SFF Module Type not recognized.\n"); + return -EINVAL; + } + return 0; +} + +/** + * ice_get_module_eeprom - fill buffer with SFF EEPROM contents + * @netdev: network interface device structure + * @ee: EEPROM dump request structure + * @data: buffer to be filled with EEPROM contents + */ +static int +ice_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + u8 addr = ICE_I2C_EEPROM_DEV_ADDR; + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + bool is_sfp = false; + u16 offset = 0; + u8 value = 0; + u8 page = 0; + int i; + + status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, + &value, 1, 0, NULL); + if (status) + return -EIO; + + if (!ee || !ee->len || !data) + return -EINVAL; + + if (value == ICE_MODULE_TYPE_SFP) + is_sfp = true; + + for (i = 0; i < ee->len; i++) { + offset = i + ee->offset; + + /* Check if we need to access the other memory page */ + if (is_sfp) { + if (offset >= ETH_MODULE_SFF_8079_LEN) { + offset -= ETH_MODULE_SFF_8079_LEN; + addr = ICE_I2C_EEPROM_DEV_ADDR2; + } + } else { + while (offset >= ETH_MODULE_SFF_8436_LEN) { + /* Compute memory page number and offset. */ + offset -= ETH_MODULE_SFF_8436_LEN / 2; + page++; + } + } + + status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, !is_sfp, + &value, 1, 0, NULL); + if (status) + value = 0; + data[i] = value; + } + return 0; +} + static const struct ethtool_ops ice_ethtool_ops = { .get_link_ksettings = ice_get_link_ksettings, .set_link_ksettings = ice_set_link_ksettings, @@ -3433,6 +3630,8 @@ static const struct ethtool_ops ice_ethtool_ops = { .set_per_queue_coalesce = ice_set_per_q_coalesce, .get_fecparam = ice_get_fecparam, .set_fecparam = ice_set_fecparam, + .get_module_info = ice_get_module_info, + .get_module_eeprom = ice_get_module_eeprom, }; static const struct ethtool_ops ice_ethtool_safe_mode_ops = { diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 152fbd556e9b..e8f32350fed2 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -52,6 +52,9 @@ #define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0) #define PF_MBX_ATQLEN_ATQENABLE_M BIT(31) #define PF_MBX_ATQT 0x0022E300 +#define PRTDCB_GENC 0x00083000 +#define PRTDCB_GENC_PFCLDA_S 16 +#define PRTDCB_GENC_PFCLDA_M ICE_M(0xFFFF, 16) #define PRTDCB_GENS 0x00083020 #define PRTDCB_GENS_DCBX_STATUS_S 0 #define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0) diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 2aac8f13daeb..ad34f22d44ef 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -211,7 +211,7 @@ enum ice_flex_rx_mdid { /* Rx/Tx Flag64 packet flag bits */ enum ice_flg64_bits { ICE_FLG_PKT_DSI = 0, - ICE_FLG_EVLAN_x8100 = 15, + ICE_FLG_EVLAN_x8100 = 14, ICE_FLG_EVLAN_x9100, ICE_FLG_VLAN_x8100, ICE_FLG_TNL_MAC = 22, diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index cc755382df25..d71f7ce0a265 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2,232 +2,26 @@ /* Copyright (c) 2018, Intel Corporation. */ #include "ice.h" +#include "ice_base.h" #include "ice_lib.h" #include "ice_dcb_lib.h" /** - * ice_setup_rx_ctx - Configure a receive ring context - * @ring: The Rx ring to configure - * - * Configure the Rx descriptor ring in RLAN context. - */ -static int ice_setup_rx_ctx(struct ice_ring *ring) -{ - struct ice_vsi *vsi = ring->vsi; - struct ice_hw *hw = &vsi->back->hw; - u32 rxdid = ICE_RXDID_FLEX_NIC; - struct ice_rlan_ctx rlan_ctx; - u32 regval; - u16 pf_q; - int err; - - /* what is Rx queue number in global space of 2K Rx queues */ - pf_q = vsi->rxq_map[ring->q_index]; - - /* clear the context structure first */ - memset(&rlan_ctx, 0, sizeof(rlan_ctx)); - - rlan_ctx.base = ring->dma >> 7; - - rlan_ctx.qlen = ring->count; - - /* Receive Packet Data Buffer Size. - * The Packet Data Buffer Size is defined in 128 byte units. - */ - rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; - - /* use 32 byte descriptors */ - rlan_ctx.dsize = 1; - - /* Strip the Ethernet CRC bytes before the packet is posted to host - * memory. - */ - rlan_ctx.crcstrip = 1; - - /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ - rlan_ctx.l2tsel = 1; - - rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; - rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; - rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; - - /* This controls whether VLAN is stripped from inner headers - * The VLAN in the inner L2 header is stripped to the receive - * descriptor if enabled by this flag. - */ - rlan_ctx.showiv = 0; - - /* Max packet size for this queue - must not be set to a larger value - * than 5 x DBUF - */ - rlan_ctx.rxmax = min_t(u16, vsi->max_frame, - ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len); - - /* Rx queue threshold in units of 64 */ - rlan_ctx.lrxqthresh = 1; - - /* Enable Flexible Descriptors in the queue context which - * allows this driver to select a specific receive descriptor format - */ - if (vsi->type != ICE_VSI_VF) { - regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); - regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & - QRXFLXP_CNTXT_RXDID_IDX_M; - - /* increasing context priority to pick up profile ID; - * default is 0x01; setting to 0x03 to ensure profile - * is programming if prev context is of same priority - */ - regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & - QRXFLXP_CNTXT_RXDID_PRIO_M; - - wr32(hw, QRXFLXP_CNTXT(pf_q), regval); - } - - /* Absolute queue number out of 2K needs to be passed */ - err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); - if (err) { - dev_err(&vsi->back->pdev->dev, - "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", - pf_q, err); - return -EIO; - } - - if (vsi->type == ICE_VSI_VF) - return 0; - - /* init queue specific tail register */ - ring->tail = hw->hw_addr + QRX_TAIL(pf_q); - writel(0, ring->tail); - ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); - - return 0; -} - -/** - * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance - * @ring: The Tx ring to configure - * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized - * @pf_q: queue index in the PF space - * - * Configure the Tx descriptor ring in TLAN context. + * ice_vsi_type_str - maps VSI type enum to string equivalents + * @type: VSI type enum */ -static void -ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) +const char *ice_vsi_type_str(enum ice_vsi_type type) { - struct ice_vsi *vsi = ring->vsi; - struct ice_hw *hw = &vsi->back->hw; - - tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; - - tlan_ctx->port_num = vsi->port_info->lport; - - /* Transmit Queue Length */ - tlan_ctx->qlen = ring->count; - - ice_set_cgd_num(tlan_ctx, ring); - - /* PF number */ - tlan_ctx->pf_num = hw->pf_id; - - /* queue belongs to a specific VSI type - * VF / VM index should be programmed per vmvf_type setting: - * for vmvf_type = VF, it is VF number between 0-256 - * for vmvf_type = VM, it is VM number between 0-767 - * for PF or EMP this field should be set to zero - */ - switch (vsi->type) { - case ICE_VSI_LB: - /* fall through */ + switch (type) { case ICE_VSI_PF: - tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; - break; + return "ICE_VSI_PF"; case ICE_VSI_VF: - /* Firmware expects vmvf_num to be absolute VF ID */ - tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; - tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; - break; + return "ICE_VSI_VF"; + case ICE_VSI_LB: + return "ICE_VSI_LB"; default: - return; + return "unknown"; } - - /* make sure the context is associated with the right VSI */ - tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); - - tlan_ctx->tso_ena = ICE_TX_LEGACY; - tlan_ctx->tso_qnum = pf_q; - - /* Legacy or Advanced Host Interface: - * 0: Advanced Host Interface - * 1: Legacy Host Interface - */ - tlan_ctx->legacy_int = ICE_TX_LEGACY; -} - -/** - * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled - * @pf: the PF being configured - * @pf_q: the PF queue - * @ena: enable or disable state of the queue - * - * This routine will wait for the given Rx queue of the PF to reach the - * enabled or disabled state. - * Returns -ETIMEDOUT in case of failing to reach the requested state after - * multiple retries; else will return 0 in case of success. - */ -static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) -{ - int i; - - for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) { - if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) & - QRX_CTRL_QENA_STAT_M)) - return 0; - - usleep_range(20, 40); - } - - return -ETIMEDOUT; -} - -/** - * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring - * @vsi: the VSI being configured - * @ena: start or stop the Rx rings - * @rxq_idx: Rx queue index - */ -#ifndef CONFIG_PCI_IOV -static -#endif /* !CONFIG_PCI_IOV */ -int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) -{ - int pf_q = vsi->rxq_map[rxq_idx]; - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; - int ret = 0; - u32 rx_reg; - - rx_reg = rd32(hw, QRX_CTRL(pf_q)); - - /* Skip if the queue is already in the requested state */ - if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) - return 0; - - /* turn on/off the queue */ - if (ena) - rx_reg |= QRX_CTRL_QENA_REQ_M; - else - rx_reg &= ~QRX_CTRL_QENA_REQ_M; - wr32(hw, QRX_CTRL(pf_q), rx_reg); - - /* wait for the change to finish */ - ret = ice_pf_rxq_wait(pf, pf_q, ena); - if (ret) - dev_err(&pf->pdev->dev, - "VSI idx %d Rx ring %d %sable timeout\n", - vsi->idx, pf_q, (ena ? "en" : "dis")); - - return ret; } /** @@ -270,7 +64,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) if (!vsi->rx_rings) goto err_rings; - vsi->txq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, + /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */ + vsi->txq_map = devm_kcalloc(&pf->pdev->dev, (2 * vsi->alloc_txq), sizeof(*vsi->txq_map), GFP_KERNEL); if (!vsi->txq_map) @@ -281,7 +76,6 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) if (!vsi->rxq_map) goto err_rxq_map; - /* There is no need to allocate q_vectors for a loopback VSI. */ if (vsi->type == ICE_VSI_LB) return 0; @@ -606,88 +400,6 @@ unlock_pf: } /** - * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI - * @qs_cfg: gathered variables needed for PF->VSI queues assignment - * - * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap - */ -static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) -{ - int offset, i; - - mutex_lock(qs_cfg->qs_mutex); - offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size, - 0, qs_cfg->q_count, 0); - if (offset >= qs_cfg->pf_map_size) { - mutex_unlock(qs_cfg->qs_mutex); - return -ENOMEM; - } - - bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count); - for (i = 0; i < qs_cfg->q_count; i++) - qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset; - mutex_unlock(qs_cfg->qs_mutex); - - return 0; -} - -/** - * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI - * @qs_cfg: gathered variables needed for pf->vsi queues assignment - * - * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap - */ -static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg) -{ - int i, index = 0; - - mutex_lock(qs_cfg->qs_mutex); - for (i = 0; i < qs_cfg->q_count; i++) { - index = find_next_zero_bit(qs_cfg->pf_map, - qs_cfg->pf_map_size, index); - if (index >= qs_cfg->pf_map_size) - goto err_scatter; - set_bit(index, qs_cfg->pf_map); - qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index; - } - mutex_unlock(qs_cfg->qs_mutex); - - return 0; -err_scatter: - for (index = 0; index < i; index++) { - clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map); - qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0; - } - mutex_unlock(qs_cfg->qs_mutex); - - return -ENOMEM; -} - -/** - * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI - * @qs_cfg: gathered variables needed for pf->vsi queues assignment - * - * This function first tries to find contiguous space. If it is not successful, - * it tries with the scatter approach. - * - * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap - */ -static int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg) -{ - int ret = 0; - - ret = __ice_vsi_get_qs_contig(qs_cfg); - if (ret) { - /* contig failed, so try with scatter approach */ - qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER; - qs_cfg->q_count = min_t(u16, qs_cfg->q_count, - qs_cfg->scatter_count); - ret = __ice_vsi_get_qs_sc(qs_cfg); - } - return ret; -} - -/** * ice_vsi_get_qs - Assign queues from PF to VSI * @vsi: the VSI to assign queues to * @@ -1006,7 +718,8 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; break; case ICE_VSI_LB: - dev_dbg(&pf->pdev->dev, "Unsupported VSI type %d\n", vsi->type); + dev_dbg(&pf->pdev->dev, "Unsupported VSI type %s\n", + ice_vsi_type_str(vsi->type)); return; default: dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); @@ -1098,129 +811,6 @@ static int ice_vsi_init(struct ice_vsi *vsi) } /** - * ice_free_q_vector - Free memory allocated for a specific interrupt vector - * @vsi: VSI having the memory freed - * @v_idx: index of the vector to be freed - */ -static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) -{ - struct ice_q_vector *q_vector; - struct ice_pf *pf = vsi->back; - struct ice_ring *ring; - - if (!vsi->q_vectors[v_idx]) { - dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n", - v_idx); - return; - } - q_vector = vsi->q_vectors[v_idx]; - - ice_for_each_ring(ring, q_vector->tx) - ring->q_vector = NULL; - ice_for_each_ring(ring, q_vector->rx) - ring->q_vector = NULL; - - /* only VSI with an associated netdev is set up with NAPI */ - if (vsi->netdev) - netif_napi_del(&q_vector->napi); - - devm_kfree(&pf->pdev->dev, q_vector); - vsi->q_vectors[v_idx] = NULL; -} - -/** - * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors - * @vsi: the VSI having memory freed - */ -void ice_vsi_free_q_vectors(struct ice_vsi *vsi) -{ - int v_idx; - - ice_for_each_q_vector(vsi, v_idx) - ice_free_q_vector(vsi, v_idx); -} - -/** - * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector - * @vsi: the VSI being configured - * @v_idx: index of the vector in the VSI struct - * - * We allocate one q_vector. If allocation fails we return -ENOMEM. - */ -static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) -{ - struct ice_pf *pf = vsi->back; - struct ice_q_vector *q_vector; - - /* allocate q_vector */ - q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL); - if (!q_vector) - return -ENOMEM; - - q_vector->vsi = vsi; - q_vector->v_idx = v_idx; - if (vsi->type == ICE_VSI_VF) - goto out; - /* only set affinity_mask if the CPU is online */ - if (cpu_online(v_idx)) - cpumask_set_cpu(v_idx, &q_vector->affinity_mask); - - /* This will not be called in the driver load path because the netdev - * will not be created yet. All other cases with register the NAPI - * handler here (i.e. resume, reset/rebuild, etc.) - */ - if (vsi->netdev) - netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, - NAPI_POLL_WEIGHT); - -out: - /* tie q_vector and VSI together */ - vsi->q_vectors[v_idx] = q_vector; - - return 0; -} - -/** - * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors - * @vsi: the VSI being configured - * - * We allocate one q_vector per queue interrupt. If allocation fails we - * return -ENOMEM. - */ -static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - int v_idx = 0, num_q_vectors; - int err; - - if (vsi->q_vectors[0]) { - dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n", - vsi->vsi_num); - return -EEXIST; - } - - num_q_vectors = vsi->num_q_vectors; - - for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { - err = ice_vsi_alloc_q_vector(vsi, v_idx); - if (err) - goto err_out; - } - - return 0; - -err_out: - while (v_idx--) - ice_free_q_vector(vsi, v_idx); - - dev_err(&pf->pdev->dev, - "Failed to allocate %d q_vector for VSI %d, ret=%d\n", - vsi->num_q_vectors, vsi->vsi_num, err); - vsi->num_q_vectors = 0; - return err; -} - -/** * ice_vsi_setup_vector_base - Set up the base vector for the given VSI * @vsi: ptr to the VSI * @@ -1341,66 +931,6 @@ err_out: } /** - * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors - * @vsi: the VSI being configured - * - * This function maps descriptor rings to the queue-specific vectors allotted - * through the MSI-X enabling code. On a constrained vector budget, we map Tx - * and Rx rings to the vector as "efficiently" as possible. - */ -#ifdef CONFIG_DCB -void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) -#else -static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) -#endif /* CONFIG_DCB */ -{ - int q_vectors = vsi->num_q_vectors; - int tx_rings_rem, rx_rings_rem; - int v_id; - - /* initially assigning remaining rings count to VSIs num queue value */ - tx_rings_rem = vsi->num_txq; - rx_rings_rem = vsi->num_rxq; - - for (v_id = 0; v_id < q_vectors; v_id++) { - struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; - int tx_rings_per_v, rx_rings_per_v, q_id, q_base; - - /* Tx rings mapping to vector */ - tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); - q_vector->num_ring_tx = tx_rings_per_v; - q_vector->tx.ring = NULL; - q_vector->tx.itr_idx = ICE_TX_ITR; - q_base = vsi->num_txq - tx_rings_rem; - - for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { - struct ice_ring *tx_ring = vsi->tx_rings[q_id]; - - tx_ring->q_vector = q_vector; - tx_ring->next = q_vector->tx.ring; - q_vector->tx.ring = tx_ring; - } - tx_rings_rem -= tx_rings_per_v; - - /* Rx rings mapping to vector */ - rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); - q_vector->num_ring_rx = rx_rings_per_v; - q_vector->rx.ring = NULL; - q_vector->rx.itr_idx = ICE_RX_ITR; - q_base = vsi->num_rxq - rx_rings_rem; - - for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { - struct ice_ring *rx_ring = vsi->rx_rings[q_id]; - - rx_ring->q_vector = q_vector; - rx_ring->next = q_vector->rx.ring; - q_vector->rx.ring = rx_ring; - } - rx_rings_rem -= rx_rings_per_v; - } -} - -/** * ice_vsi_manage_rss_lut - disable/enable RSS * @vsi: the VSI being changed * @ena: boolean value indicating if this is an enable or disable request @@ -1674,6 +1204,31 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) } /** + * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length + * @vsi: VSI + */ +void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) +{ + if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { + vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; + vsi->rx_buf_len = ICE_RXBUF_2048; +#if (PAGE_SIZE < 8192) + } else if (!ICE_2K_TOO_SMALL_WITH_PADDING && + (vsi->netdev->mtu <= ETH_DATA_LEN)) { + vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; + vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; +#endif + } else { + vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; +#if (PAGE_SIZE < 8192) + vsi->rx_buf_len = ICE_RXBUF_3072; +#else + vsi->rx_buf_len = ICE_RXBUF_2048; +#endif + } +} + +/** * ice_vsi_cfg_rxqs - Configure the VSI for Rx * @vsi: the VSI being configured * @@ -1687,13 +1242,7 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) if (vsi->type == ICE_VSI_VF) goto setup_rings; - if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN) - vsi->max_frame = vsi->netdev->mtu + - ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - else - vsi->max_frame = ICE_RXBUF_2048; - - vsi->rx_buf_len = ICE_RXBUF_2048; + ice_vsi_cfg_frame_size(vsi); setup_rings: /* set up individual rings */ for (i = 0; i < vsi->num_rxq; i++) { @@ -1712,101 +1261,34 @@ setup_rings: } /** - * ice_vsi_cfg_txq - Configure single Tx queue - * @vsi: the VSI that queue belongs to - * @ring: Tx ring to be configured - * @tc_q_idx: queue index within given TC - * @qg_buf: queue group buffer - * @tc: TC that Tx ring belongs to - */ -static int -ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx, - struct ice_aqc_add_tx_qgrp *qg_buf, u8 tc) -{ - struct ice_tlan_ctx tlan_ctx = { 0 }; - struct ice_aqc_add_txqs_perq *txq; - struct ice_pf *pf = vsi->back; - u8 buf_len = sizeof(*qg_buf); - enum ice_status status; - u16 pf_q; - - pf_q = ring->reg_idx; - ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); - /* copy context contents into the qg_buf */ - qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); - ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, - ice_tlan_ctx_info); - - /* init queue specific tail reg. It is referred as - * transmit comm scheduler queue doorbell. - */ - ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); - - /* Add unique software queue handle of the Tx queue per - * TC into the VSI Tx ring - */ - ring->q_handle = tc_q_idx; - - status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle, - 1, qg_buf, buf_len, NULL); - if (status) { - dev_err(&pf->pdev->dev, - "Failed to set LAN Tx queue context, error: %d\n", - status); - return -ENODEV; - } - - /* Add Tx Queue TEID into the VSI Tx ring from the - * response. This will complete configuring and - * enabling the queue. - */ - txq = &qg_buf->txqs[0]; - if (pf_q == le16_to_cpu(txq->txq_id)) - ring->txq_teid = le32_to_cpu(txq->q_teid); - - return 0; -} - -/** * ice_vsi_cfg_txqs - Configure the VSI for Tx * @vsi: the VSI being configured * @rings: Tx ring array to be configured - * @offset: offset within vsi->txq_map * * Return 0 on success and a negative value on error * Configure the Tx VSI for operation. */ static int -ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) +ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings) { struct ice_aqc_add_tx_qgrp *qg_buf; - struct ice_pf *pf = vsi->back; - u16 q_idx = 0, i; + u16 q_idx = 0; int err = 0; - u8 tc; - qg_buf = devm_kzalloc(&pf->pdev->dev, sizeof(*qg_buf), GFP_KERNEL); + qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL); if (!qg_buf) return -ENOMEM; qg_buf->num_txqs = 1; - /* set up and configure the Tx queues for each enabled TC */ - ice_for_each_traffic_class(tc) { - if (!(vsi->tc_cfg.ena_tc & BIT(tc))) - break; - - for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { - err = ice_vsi_cfg_txq(vsi, rings[q_idx], i + offset, - qg_buf, tc); - if (err) - goto err_cfg_txqs; - - q_idx++; - } + for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) { + err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf); + if (err) + goto err_cfg_txqs; } + err_cfg_txqs: - devm_kfree(&pf->pdev->dev, qg_buf); + kfree(qg_buf); return err; } @@ -1819,159 +1301,46 @@ err_cfg_txqs: */ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi) { - return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, 0); -} - -/** - * ice_intrl_usec_to_reg - convert interrupt rate limit to register value - * @intrl: interrupt rate limit in usecs - * @gran: interrupt rate limit granularity in usecs - * - * This function converts a decimal interrupt rate limit in usecs to the format - * expected by firmware. - */ -u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) -{ - u32 val = intrl / gran; - - if (val) - return val | GLINT_RATE_INTRL_ENA_M; - return 0; -} - -/** - * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set - * @hw: board specific structure - */ -static void ice_cfg_itr_gran(struct ice_hw *hw) -{ - u32 regval = rd32(hw, GLINT_CTL); - - /* no need to update global register if ITR gran is already set */ - if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) && - (((regval & GLINT_CTL_ITR_GRAN_200_M) >> - GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) && - (((regval & GLINT_CTL_ITR_GRAN_100_M) >> - GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) && - (((regval & GLINT_CTL_ITR_GRAN_50_M) >> - GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) && - (((regval & GLINT_CTL_ITR_GRAN_25_M) >> - GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US)) - return; - - regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) & - GLINT_CTL_ITR_GRAN_200_M) | - ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) & - GLINT_CTL_ITR_GRAN_100_M) | - ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) & - GLINT_CTL_ITR_GRAN_50_M) | - ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) & - GLINT_CTL_ITR_GRAN_25_M); - wr32(hw, GLINT_CTL, regval); + return ice_vsi_cfg_txqs(vsi, vsi->tx_rings); } /** - * ice_cfg_itr - configure the initial interrupt throttle values - * @hw: pointer to the HW structure - * @q_vector: interrupt vector that's being configured - * - * Configure interrupt throttling values for the ring containers that are - * associated with the interrupt vector passed in. - */ -static void -ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector) -{ - ice_cfg_itr_gran(hw); - - if (q_vector->num_ring_rx) { - struct ice_ring_container *rc = &q_vector->rx; - - /* if this value is set then don't overwrite with default */ - if (!rc->itr_setting) - rc->itr_setting = ICE_DFLT_RX_ITR; - - rc->target_itr = ITR_TO_REG(rc->itr_setting); - rc->next_update = jiffies + 1; - rc->current_itr = rc->target_itr; - wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), - ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); - } - - if (q_vector->num_ring_tx) { - struct ice_ring_container *rc = &q_vector->tx; - - /* if this value is set then don't overwrite with default */ - if (!rc->itr_setting) - rc->itr_setting = ICE_DFLT_TX_ITR; - - rc->target_itr = ITR_TO_REG(rc->itr_setting); - rc->next_update = jiffies + 1; - rc->current_itr = rc->target_itr; - wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), - ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); - } -} - -/** - * ice_cfg_txq_interrupt - configure interrupt on Tx queue + * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI * @vsi: the VSI being configured - * @txq: Tx queue being mapped to MSI-X vector - * @msix_idx: MSI-X vector index within the function - * @itr_idx: ITR index of the interrupt cause * - * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector - * within the function space. + * Return 0 on success and a negative value on error + * Configure the Tx queues dedicated for XDP in given VSI for operation. */ -#ifdef CONFIG_PCI_IOV -void -ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx) -#else -static void -ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx) -#endif /* CONFIG_PCI_IOV */ +int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi) { - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; - u32 val; + int ret; + int i; - itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M; + ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings); + if (ret) + return ret; - val = QINT_TQCTL_CAUSE_ENA_M | itr_idx | - ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M); + for (i = 0; i < vsi->num_xdp_txq; i++) + vsi->xdp_rings[i]->xsk_umem = ice_xsk_umem(vsi->xdp_rings[i]); - wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); + return ret; } /** - * ice_cfg_rxq_interrupt - configure interrupt on Rx queue - * @vsi: the VSI being configured - * @rxq: Rx queue being mapped to MSI-X vector - * @msix_idx: MSI-X vector index within the function - * @itr_idx: ITR index of the interrupt cause + * ice_intrl_usec_to_reg - convert interrupt rate limit to register value + * @intrl: interrupt rate limit in usecs + * @gran: interrupt rate limit granularity in usecs * - * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector - * within the function space. + * This function converts a decimal interrupt rate limit in usecs to the format + * expected by firmware. */ -#ifdef CONFIG_PCI_IOV -void -ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx) -#else -static void -ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx) -#endif /* CONFIG_PCI_IOV */ +u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) { - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; - u32 val; - - itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M; - - val = QINT_RQCTL_CAUSE_ENA_M | itr_idx | - ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M); - - wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); + u32 val = intrl / gran; - ice_flush(hw); + if (val) + return val | GLINT_RATE_INTRL_ENA_M; + return 0; } /** @@ -2134,109 +1503,6 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) } /** - * ice_trigger_sw_intr - trigger a software interrupt - * @hw: pointer to the HW structure - * @q_vector: interrupt vector to trigger the software interrupt for - */ -void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector) -{ - wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), - (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) | - GLINT_DYN_CTL_SWINT_TRIG_M | - GLINT_DYN_CTL_INTENA_M); -} - -/** - * ice_vsi_stop_tx_ring - Disable single Tx ring - * @vsi: the VSI being configured - * @rst_src: reset source - * @rel_vmvf_num: Relative ID of VF/VM - * @ring: Tx ring to be stopped - * @txq_meta: Meta data of Tx ring to be stopped - */ -#ifndef CONFIG_PCI_IOV -static -#endif /* !CONFIG_PCI_IOV */ -int -ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, - u16 rel_vmvf_num, struct ice_ring *ring, - struct ice_txq_meta *txq_meta) -{ - struct ice_pf *pf = vsi->back; - struct ice_q_vector *q_vector; - struct ice_hw *hw = &pf->hw; - enum ice_status status; - u32 val; - - /* clear cause_ena bit for disabled queues */ - val = rd32(hw, QINT_TQCTL(ring->reg_idx)); - val &= ~QINT_TQCTL_CAUSE_ENA_M; - wr32(hw, QINT_TQCTL(ring->reg_idx), val); - - /* software is expected to wait for 100 ns */ - ndelay(100); - - /* trigger a software interrupt for the vector - * associated to the queue to schedule NAPI handler - */ - q_vector = ring->q_vector; - if (q_vector) - ice_trigger_sw_intr(hw, q_vector); - - status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx, - txq_meta->tc, 1, &txq_meta->q_handle, - &txq_meta->q_id, &txq_meta->q_teid, rst_src, - rel_vmvf_num, NULL); - - /* if the disable queue command was exercised during an - * active reset flow, ICE_ERR_RESET_ONGOING is returned. - * This is not an error as the reset operation disables - * queues at the hardware level anyway. - */ - if (status == ICE_ERR_RESET_ONGOING) { - dev_dbg(&vsi->back->pdev->dev, - "Reset in progress. LAN Tx queues already disabled\n"); - } else if (status == ICE_ERR_DOES_NOT_EXIST) { - dev_dbg(&vsi->back->pdev->dev, - "LAN Tx queues do not exist, nothing to disable\n"); - } else if (status) { - dev_err(&vsi->back->pdev->dev, - "Failed to disable LAN Tx queues, error: %d\n", status); - return -ENODEV; - } - - return 0; -} - -/** - * ice_fill_txq_meta - Prepare the Tx queue's meta data - * @vsi: VSI that ring belongs to - * @ring: ring that txq_meta will be based on - * @txq_meta: a helper struct that wraps Tx queue's information - * - * Set up a helper struct that will contain all the necessary fields that - * are needed for stopping Tx queue - */ -#ifndef CONFIG_PCI_IOV -static -#endif /* !CONFIG_PCI_IOV */ -void -ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, - struct ice_txq_meta *txq_meta) -{ - u8 tc = 0; - -#ifdef CONFIG_DCB - tc = ring->dcb_tc; -#endif /* CONFIG_DCB */ - txq_meta->q_id = ring->reg_idx; - txq_meta->q_teid = ring->txq_teid; - txq_meta->q_handle = ring->q_handle; - txq_meta->vsi_idx = vsi->idx; - txq_meta->tc = tc; -} - -/** * ice_vsi_stop_tx_rings - Disable Tx rings * @vsi: the VSI being configured * @rst_src: reset source @@ -2247,34 +1513,24 @@ static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, u16 rel_vmvf_num, struct ice_ring **rings) { - u16 i, q_idx = 0; - int status; - u8 tc; + u16 q_idx; if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) return -EINVAL; - /* set up the Tx queue list to be disabled for each enabled TC */ - ice_for_each_traffic_class(tc) { - if (!(vsi->tc_cfg.ena_tc & BIT(tc))) - break; - - for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { - struct ice_txq_meta txq_meta = { }; + for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) { + struct ice_txq_meta txq_meta = { }; + int status; - if (!rings || !rings[q_idx]) - return -EINVAL; + if (!rings || !rings[q_idx]) + return -EINVAL; - ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); - status = ice_vsi_stop_tx_ring(vsi, rst_src, - rel_vmvf_num, - rings[q_idx], &txq_meta); + ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); + status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num, + rings[q_idx], &txq_meta); - if (status) - return status; - - q_idx++; - } + if (status) + return status; } return 0; @@ -2294,6 +1550,15 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, } /** + * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings + * @vsi: the VSI being configured + */ +int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi) +{ + return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings); +} + +/** * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI * @vsi: VSI to enable or disable VLAN pruning on * @ena: set to true to enable VLAN pruning and false to disable it @@ -2635,23 +1900,17 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, * out PAUSE or PFC frames. If enabled, FW can still send FC frames. * The rule is added once for PF VSI in order to create appropriate * recipe, since VSI/VSI list is ignored with drop action... - * Also add rules to handle LLDP Tx and Rx packets. Tx LLDP packets - * need to be dropped so that VFs cannot send LLDP packets to reconfig - * DCB settings in the HW. Also, if the FW DCBX engine is not running - * then Rx LLDP packets need to be redirected up the stack. + * Also add rules to handle LLDP Tx packets. Tx LLDP packets need to + * be dropped so that VFs cannot send LLDP packets to reconfig DCB + * settings in the HW. */ - if (!ice_is_safe_mode(pf)) { + if (!ice_is_safe_mode(pf)) if (vsi->type == ICE_VSI_PF) { ice_vsi_add_rem_eth_mac(vsi, true); /* Tx LLDP packets */ ice_cfg_sw_lldp(vsi, true, true); - - /* Rx LLDP packets */ - if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) - ice_cfg_sw_lldp(vsi, false, true); } - } return vsi; @@ -2690,6 +1949,11 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi) wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0); for (q = 0; q < q_vector->num_ring_tx; q++) { wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); + if (ice_is_xdp_ena_vsi(vsi)) { + u32 xdp_txq = txq + vsi->num_xdp_txq; + + wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0); + } txq++; } @@ -2790,6 +2054,62 @@ void ice_vsi_close(struct ice_vsi *vsi) } /** + * ice_ena_vsi - resume a VSI + * @vsi: the VSI being resume + * @locked: is the rtnl_lock already held + */ +int ice_ena_vsi(struct ice_vsi *vsi, bool locked) +{ + int err = 0; + + if (!test_bit(__ICE_NEEDS_RESTART, vsi->state)) + return 0; + + clear_bit(__ICE_NEEDS_RESTART, vsi->state); + + if (vsi->netdev && vsi->type == ICE_VSI_PF) { + if (netif_running(vsi->netdev)) { + if (!locked) + rtnl_lock(); + + err = ice_open(vsi->netdev); + + if (!locked) + rtnl_unlock(); + } + } + + return err; +} + +/** + * ice_dis_vsi - pause a VSI + * @vsi: the VSI being paused + * @locked: is the rtnl_lock already held + */ +void ice_dis_vsi(struct ice_vsi *vsi, bool locked) +{ + if (test_bit(__ICE_DOWN, vsi->state)) + return; + + set_bit(__ICE_NEEDS_RESTART, vsi->state); + + if (vsi->type == ICE_VSI_PF && vsi->netdev) { + if (netif_running(vsi->netdev)) { + if (!locked) + rtnl_lock(); + + ice_stop(vsi->netdev); + + if (!locked) + rtnl_unlock(); + } else { + ice_vsi_close(vsi); + } + } +} + +/** * ice_free_res - free a block of resources * @res: pointer to the resource * @index: starting index previously returned by ice_get_res @@ -3064,6 +2384,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) vsi->base_vector = 0; } + if (ice_is_xdp_ena_vsi(vsi)) + /* return value check can be skipped here, it always returns + * 0 if reset is in progress + */ + ice_destroy_xdp_rings(vsi); ice_vsi_put_qs(vsi); ice_vsi_clear_rings(vsi); ice_vsi_free_arrays(vsi); @@ -3085,7 +2410,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) if (ret < 0) goto err_vsi; - switch (vsi->type) { case ICE_VSI_PF: ret = ice_vsi_alloc_q_vectors(vsi); @@ -3105,6 +2429,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) goto err_vectors; ice_vsi_map_rings_to_vectors(vsi); + if (ice_is_xdp_ena_vsi(vsi)) { + vsi->num_xdp_txq = vsi->alloc_txq; + ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); + if (ret) + goto err_vectors; + } /* Do not exit if configuring RSS had an issue, at least * receive traffic on first queue. Hence no need to capture * return value @@ -3131,9 +2461,13 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) } /* configure VSI nodes based on number of queues and TC's */ - for (i = 0; i < vsi->tc_cfg.numtc; i++) + for (i = 0; i < vsi->tc_cfg.numtc; i++) { max_txqs[i] = vsi->alloc_txq; + if (ice_is_xdp_ena_vsi(vsi)) + max_txqs[i] += vsi->num_xdp_txq; + } + status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); if (status) { @@ -3166,6 +2500,7 @@ err_vsi: bool ice_is_reset_in_progress(unsigned long *state) { return test_bit(__ICE_RESET_OICR_RECV, state) || + test_bit(__ICE_DCBNL_DEVRESET, state) || test_bit(__ICE_PFR_REQ, state) || test_bit(__ICE_CORER_REQ, state) || test_bit(__ICE_GLOBR_REQ, state); @@ -3271,6 +2606,51 @@ char *ice_nvm_version_str(struct ice_hw *hw) } /** + * ice_update_ring_stats - Update ring statistics + * @ring: ring to update + * @cont: used to increment per-vector counters + * @pkts: number of processed packets + * @bytes: number of processed bytes + * + * This function assumes that caller has acquired a u64_stats_sync lock. + */ +static void +ice_update_ring_stats(struct ice_ring *ring, struct ice_ring_container *cont, + u64 pkts, u64 bytes) +{ + ring->stats.bytes += bytes; + ring->stats.pkts += pkts; + cont->total_bytes += bytes; + cont->total_pkts += pkts; +} + +/** + * ice_update_tx_ring_stats - Update Tx ring specific counters + * @tx_ring: ring to update + * @pkts: number of processed packets + * @bytes: number of processed bytes + */ +void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes) +{ + u64_stats_update_begin(&tx_ring->syncp); + ice_update_ring_stats(tx_ring, &tx_ring->q_vector->tx, pkts, bytes); + u64_stats_update_end(&tx_ring->syncp); +} + +/** + * ice_update_rx_ring_stats - Update Rx ring specific counters + * @rx_ring: ring to update + * @pkts: number of processed packets + * @bytes: number of processed bytes + */ +void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes) +{ + u64_stats_update_begin(&rx_ring->syncp); + ice_update_ring_stats(rx_ring, &rx_ring->q_vector->rx, pkts, bytes); + u64_stats_update_end(&rx_ring->syncp); +} + +/** * ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI * @vsi: the VSI being configured MAC filter * @macaddr: the MAC address to be added. diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 47bc033fff20..e86aa60c0254 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -6,18 +6,7 @@ #include "ice.h" -struct ice_txq_meta { - /* Tx-scheduler element identifier */ - u32 q_teid; - /* Entry in VSI's txq_map bitmap */ - u16 q_id; - /* Relative index of Tx queue within TC */ - u16 q_handle; - /* VSI index that Tx queue belongs to */ - u16 vsi_idx; - /* TC number that Tx queue belongs to */ - u8 tc; -}; +const char *ice_vsi_type_str(enum ice_vsi_type type); int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, @@ -33,24 +22,6 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi); void ice_vsi_cfg_msix(struct ice_vsi *vsi); -#ifdef CONFIG_PCI_IOV -void -ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx); - -void -ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx); - -int -ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, - u16 rel_vmvf_num, struct ice_ring *ring, - struct ice_txq_meta *txq_meta); - -void ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, - struct ice_txq_meta *txq_meta); - -int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx); -#endif /* CONFIG_PCI_IOV */ - int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid); int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid); @@ -67,6 +38,10 @@ int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, u16 rel_vmvf_num); +int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi); + +int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi); + int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc); void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); @@ -89,6 +64,10 @@ int ice_vsi_release(struct ice_vsi *vsi); void ice_vsi_close(struct ice_vsi *vsi); +int ice_ena_vsi(struct ice_vsi *vsi, bool locked); + +void ice_dis_vsi(struct ice_vsi *vsi, bool locked); + int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id); int @@ -98,16 +77,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi); bool ice_is_reset_in_progress(unsigned long *state); -void ice_vsi_free_q_vectors(struct ice_vsi *vsi); - -void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector); - void ice_vsi_put_qs(struct ice_vsi *vsi); -#ifdef CONFIG_DCB -void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi); -#endif /* CONFIG_DCB */ - void ice_vsi_dis_irq(struct ice_vsi *vsi); void ice_vsi_free_irq(struct ice_vsi *vsi); @@ -118,6 +89,12 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi); int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena); +void ice_update_tx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes); + +void ice_update_rx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes); + +void ice_vsi_cfg_frame_size(struct ice_vsi *vsi); + u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran); char *ice_nvm_version_str(struct ice_hw *hw); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 214cd6eca405..5681e3be81f2 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -6,8 +6,10 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "ice.h" +#include "ice_base.h" #include "ice_lib.h" #include "ice_dcb_lib.h" +#include "ice_dcb_nl.h" #define DRV_VERSION_MAJOR 0 #define DRV_VERSION_MINOR 8 @@ -435,42 +437,11 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf) } /** - * ice_dis_vsi - pause a VSI - * @vsi: the VSI being paused - * @locked: is the rtnl_lock already held - */ -static void ice_dis_vsi(struct ice_vsi *vsi, bool locked) -{ - if (test_bit(__ICE_DOWN, vsi->state)) - return; - - set_bit(__ICE_NEEDS_RESTART, vsi->state); - - if (vsi->type == ICE_VSI_PF && vsi->netdev) { - if (netif_running(vsi->netdev)) { - if (!locked) - rtnl_lock(); - - ice_stop(vsi->netdev); - - if (!locked) - rtnl_unlock(); - } else { - ice_vsi_close(vsi); - } - } -} - -/** * ice_pf_dis_all_vsi - Pause all VSIs on a PF * @pf: the PF * @locked: is the rtnl_lock already held */ -#ifdef CONFIG_DCB -void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) -#else static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) -#endif /* CONFIG_DCB */ { int v; @@ -636,8 +607,14 @@ static void ice_print_topo_conflict(struct ice_vsi *vsi) switch (vsi->port_info->phy.link_info.topo_media_conflict) { case ICE_AQ_LINK_TOPO_CONFLICT: case ICE_AQ_LINK_MEDIA_CONFLICT: + case ICE_AQ_LINK_TOPO_UNREACH_PRT: + case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT: + case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA: netdev_info(vsi->netdev, "Possible mis-configuration of the Ethernet port detected, please use the Intel(R) Ethernet Port Configuration Tool application to address the issue.\n"); break; + case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA: + netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); + break; default: break; } @@ -1661,6 +1638,324 @@ free_q_irqs: } /** + * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP + * @vsi: VSI to setup Tx rings used by XDP + * + * Return 0 on success and negative value on error + */ +static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) +{ + struct device *dev = &vsi->back->pdev->dev; + int i; + + for (i = 0; i < vsi->num_xdp_txq; i++) { + u16 xdp_q_idx = vsi->alloc_txq + i; + struct ice_ring *xdp_ring; + + xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); + + if (!xdp_ring) + goto free_xdp_rings; + + xdp_ring->q_index = xdp_q_idx; + xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; + xdp_ring->ring_active = false; + xdp_ring->vsi = vsi; + xdp_ring->netdev = NULL; + xdp_ring->dev = dev; + xdp_ring->count = vsi->num_tx_desc; + vsi->xdp_rings[i] = xdp_ring; + if (ice_setup_tx_ring(xdp_ring)) + goto free_xdp_rings; + ice_set_ring_xdp(xdp_ring); + xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring); + } + + return 0; + +free_xdp_rings: + for (; i >= 0; i--) + if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) + ice_free_tx_ring(vsi->xdp_rings[i]); + return -ENOMEM; +} + +/** + * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI + * @vsi: VSI to set the bpf prog on + * @prog: the bpf prog pointer + */ +static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) +{ + struct bpf_prog *old_prog; + int i; + + old_prog = xchg(&vsi->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + ice_for_each_rxq(vsi, i) + WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); +} + +/** + * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP + * @vsi: VSI to bring up Tx rings used by XDP + * @prog: bpf program that will be assigned to VSI + * + * Return 0 on success and negative value on error + */ +int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) +{ + u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; + int xdp_rings_rem = vsi->num_xdp_txq; + struct ice_pf *pf = vsi->back; + struct ice_qs_cfg xdp_qs_cfg = { + .qs_mutex = &pf->avail_q_mutex, + .pf_map = pf->avail_txqs, + .pf_map_size = pf->max_pf_txqs, + .q_count = vsi->num_xdp_txq, + .scatter_count = ICE_MAX_SCATTER_TXQS, + .vsi_map = vsi->txq_map, + .vsi_map_offset = vsi->alloc_txq, + .mapping_mode = ICE_VSI_MAP_CONTIG + }; + enum ice_status status; + int i, v_idx; + + vsi->xdp_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_xdp_txq, + sizeof(*vsi->xdp_rings), GFP_KERNEL); + if (!vsi->xdp_rings) + return -ENOMEM; + + vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; + if (__ice_vsi_get_qs(&xdp_qs_cfg)) + goto err_map_xdp; + + if (ice_xdp_alloc_setup_rings(vsi)) + goto clear_xdp_rings; + + /* follow the logic from ice_vsi_map_rings_to_vectors */ + ice_for_each_q_vector(vsi, v_idx) { + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; + int xdp_rings_per_v, q_id, q_base; + + xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, + vsi->num_q_vectors - v_idx); + q_base = vsi->num_xdp_txq - xdp_rings_rem; + + for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { + struct ice_ring *xdp_ring = vsi->xdp_rings[q_id]; + + xdp_ring->q_vector = q_vector; + xdp_ring->next = q_vector->tx.ring; + q_vector->tx.ring = xdp_ring; + } + xdp_rings_rem -= xdp_rings_per_v; + } + + /* omit the scheduler update if in reset path; XDP queues will be + * taken into account at the end of ice_vsi_rebuild, where + * ice_cfg_vsi_lan is being called + */ + if (ice_is_reset_in_progress(pf->state)) + return 0; + + /* tell the Tx scheduler that right now we have + * additional queues + */ + for (i = 0; i < vsi->tc_cfg.numtc; i++) + max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; + + status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, + max_txqs); + if (status) { + dev_err(&pf->pdev->dev, + "Failed VSI LAN queue config for XDP, error:%d\n", + status); + goto clear_xdp_rings; + } + ice_vsi_assign_bpf_prog(vsi, prog); + + return 0; +clear_xdp_rings: + for (i = 0; i < vsi->num_xdp_txq; i++) + if (vsi->xdp_rings[i]) { + kfree_rcu(vsi->xdp_rings[i], rcu); + vsi->xdp_rings[i] = NULL; + } + +err_map_xdp: + mutex_lock(&pf->avail_q_mutex); + for (i = 0; i < vsi->num_xdp_txq; i++) { + clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); + vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; + } + mutex_unlock(&pf->avail_q_mutex); + + devm_kfree(&pf->pdev->dev, vsi->xdp_rings); + return -ENOMEM; +} + +/** + * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings + * @vsi: VSI to remove XDP rings + * + * Detach XDP rings from irq vectors, clean up the PF bitmap and free + * resources + */ +int ice_destroy_xdp_rings(struct ice_vsi *vsi) +{ + u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; + struct ice_pf *pf = vsi->back; + int i, v_idx; + + /* q_vectors are freed in reset path so there's no point in detaching + * rings; in case of rebuild being triggered not from reset reset bits + * in pf->state won't be set, so additionally check first q_vector + * against NULL + */ + if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) + goto free_qmap; + + ice_for_each_q_vector(vsi, v_idx) { + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; + struct ice_ring *ring; + + ice_for_each_ring(ring, q_vector->tx) + if (!ring->tx_buf || !ice_ring_is_xdp(ring)) + break; + + /* restore the value of last node prior to XDP setup */ + q_vector->tx.ring = ring; + } + +free_qmap: + mutex_lock(&pf->avail_q_mutex); + for (i = 0; i < vsi->num_xdp_txq; i++) { + clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); + vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; + } + mutex_unlock(&pf->avail_q_mutex); + + for (i = 0; i < vsi->num_xdp_txq; i++) + if (vsi->xdp_rings[i]) { + if (vsi->xdp_rings[i]->desc) + ice_free_tx_ring(vsi->xdp_rings[i]); + kfree_rcu(vsi->xdp_rings[i], rcu); + vsi->xdp_rings[i] = NULL; + } + + devm_kfree(&pf->pdev->dev, vsi->xdp_rings); + vsi->xdp_rings = NULL; + + if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) + return 0; + + ice_vsi_assign_bpf_prog(vsi, NULL); + + /* notify Tx scheduler that we destroyed XDP queues and bring + * back the old number of child nodes + */ + for (i = 0; i < vsi->tc_cfg.numtc; i++) + max_txqs[i] = vsi->num_txq; + + return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, + max_txqs); +} + +/** + * ice_xdp_setup_prog - Add or remove XDP eBPF program + * @vsi: VSI to setup XDP for + * @prog: XDP program + * @extack: netlink extended ack + */ +static int +ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; + bool if_running = netif_running(vsi->netdev); + int ret = 0, xdp_ring_err = 0; + + if (frame_size > vsi->rx_buf_len) { + NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP"); + return -EOPNOTSUPP; + } + + /* need to stop netdev while setting up the program for Rx rings */ + if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) { + ret = ice_down(vsi); + if (ret) { + NL_SET_ERR_MSG_MOD(extack, + "Preparing device for XDP attach failed"); + return ret; + } + } + + if (!ice_is_xdp_ena_vsi(vsi) && prog) { + vsi->num_xdp_txq = vsi->alloc_txq; + xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); + if (xdp_ring_err) + NL_SET_ERR_MSG_MOD(extack, + "Setting up XDP Tx resources failed"); + } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { + xdp_ring_err = ice_destroy_xdp_rings(vsi); + if (xdp_ring_err) + NL_SET_ERR_MSG_MOD(extack, + "Freeing XDP Tx resources failed"); + } else { + ice_vsi_assign_bpf_prog(vsi, prog); + } + + if (if_running) + ret = ice_up(vsi); + + if (!ret && prog && vsi->xsk_umems) { + int i; + + ice_for_each_rxq(vsi, i) { + struct ice_ring *rx_ring = vsi->rx_rings[i]; + + if (rx_ring->xsk_umem) + napi_schedule(&rx_ring->q_vector->napi); + } + } + + return (ret || xdp_ring_err) ? -ENOMEM : 0; +} + +/** + * ice_xdp - implements XDP handler + * @dev: netdevice + * @xdp: XDP command + */ +static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + struct ice_netdev_priv *np = netdev_priv(dev); + struct ice_vsi *vsi = np->vsi; + + if (vsi->type != ICE_VSI_PF) { + NL_SET_ERR_MSG_MOD(xdp->extack, + "XDP can be loaded only on PF VSI"); + return -EINVAL; + } + + switch (xdp->command) { + case XDP_SETUP_PROG: + return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); + case XDP_QUERY_PROG: + xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; + return 0; + case XDP_SETUP_XSK_UMEM: + return ice_xsk_umem_setup(vsi, xdp->xsk.umem, + xdp->xsk.queue_id); + default: + return -EINVAL; + } +} + +/** * ice_ena_misc_vector - enable the non-queue interrupts * @pf: board private structure */ @@ -2219,6 +2514,11 @@ static int ice_setup_pf_sw(struct ice_pf *pf) status = -ENODEV; goto unroll_vsi_setup; } + /* netdev has to be configured before setting frame size */ + ice_vsi_cfg_frame_size(vsi); + + /* Setup DCB netlink interface */ + ice_dcbnl_setup(vsi); /* registering the NAPI handler requires both the queues and * netdev to be created, which are done in ice_pf_vsi_setup() @@ -2300,6 +2600,7 @@ static void ice_deinit_pf(struct ice_pf *pf) { ice_service_task_stop(pf); mutex_destroy(&pf->sw_mutex); + mutex_destroy(&pf->tc_mutex); mutex_destroy(&pf->avail_q_mutex); if (pf->avail_txqs) { @@ -2349,6 +2650,7 @@ static int ice_init_pf(struct ice_pf *pf) ice_set_pf_caps(pf); mutex_init(&pf->sw_mutex); + mutex_init(&pf->tc_mutex); /* setup service timer and periodic service task */ timer_setup(&pf->serv_tmr, ice_service_timer, 0); @@ -2598,7 +2900,7 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); break; case ICE_ERR_AQ_ERROR: - switch (hw->adminq.sq_last_status) { + switch (hw->pkg_dwnld_status) { case ICE_AQ_RC_ENOSEC: case ICE_AQ_RC_EBADSIG: dev_err(dev, @@ -2831,6 +3133,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) hw = &pf->hw; hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; + pci_save_state(pdev); + hw->back = pf; hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; @@ -2976,6 +3280,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ice_cfg_lldp_mib_change(&pf->hw, true); } + /* print PCI link speed and width */ + pcie_print_link_status(pf->pdev); + return 0; err_alloc_sw_unroll: @@ -3027,12 +3334,13 @@ static void ice_remove(struct pci_dev *pdev) } ice_deinit_pf(pf); ice_deinit_hw(&pf->hw); - ice_clear_interrupt_scheme(pf); /* Issue a PFR as part of the prescribed driver unload flow. Do not * do it via ice_schedule_reset() since there is no need to rebuild * and the service task is already stopped. */ ice_reset(&pf->hw, ICE_RESET_PFR); + pci_wait_for_pending_transaction(pdev); + ice_clear_interrupt_scheme(pf); pci_disable_pcie_error_reporting(pdev); } @@ -3347,6 +3655,48 @@ static void ice_set_rx_mode(struct net_device *netdev) } /** + * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate + * @netdev: network interface device structure + * @queue_index: Queue ID + * @maxrate: maximum bandwidth in Mbps + */ +static int +ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + enum ice_status status; + u16 q_handle; + u8 tc; + + /* Validate maxrate requested is within permitted range */ + if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) { + netdev_err(netdev, + "Invalid max rate %d specified for the queue %d\n", + maxrate, queue_index); + return -EINVAL; + } + + q_handle = vsi->tx_rings[queue_index]->q_handle; + tc = ice_dcb_get_tc(vsi, queue_index); + + /* Set BW back to default, when user set maxrate to 0 */ + if (!maxrate) + status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, + q_handle, ICE_MAX_BW); + else + status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, + q_handle, ICE_MAX_BW, maxrate * 1000); + if (status) { + netdev_err(netdev, + "Unable to set Tx max rate, error %d\n", status); + return -EIO; + } + + return 0; +} + +/** * ice_fdb_add - add an entry to the hardware database * @ndm: the input from the stack * @tb: pointer to array of nladdr (unused) @@ -3426,6 +3776,7 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; int ret = 0; /* Don't set any netdev advanced features with device in Safe Mode */ @@ -3435,6 +3786,13 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) return ret; } + /* Do not change setting during reset */ + if (ice_is_reset_in_progress(pf->state)) { + dev_err(&vsi->back->pdev->dev, + "Device is resetting, changing advanced netdev features temporarily unavailable.\n"); + return -EBUSY; + } + /* Multiple features can be changed in one call so keep features in * separate if/else statements to guarantee each feature is checked */ @@ -3505,6 +3863,8 @@ int ice_vsi_cfg(struct ice_vsi *vsi) ice_vsi_cfg_dcb_rings(vsi); err = ice_vsi_cfg_lan_txqs(vsi); + if (!err && ice_is_xdp_ena_vsi(vsi)) + err = ice_vsi_cfg_xdp_txqs(vsi); if (!err) err = ice_vsi_cfg_rxqs(vsi); @@ -3920,6 +4280,13 @@ int ice_down(struct ice_vsi *vsi) netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", vsi->vsi_num, tx_err); + if (!tx_err && ice_is_xdp_ena_vsi(vsi)) { + tx_err = ice_vsi_stop_xdp_tx_rings(vsi); + if (tx_err) + netdev_err(vsi->netdev, + "Failed stop XDP rings, VSI %d error %d\n", + vsi->vsi_num, tx_err); + } rx_err = ice_vsi_stop_rx_rings(vsi); if (rx_err) @@ -3970,8 +4337,13 @@ int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) } ice_for_each_txq(vsi, i) { - vsi->tx_rings[i]->netdev = vsi->netdev; - err = ice_setup_tx_ring(vsi->tx_rings[i]); + struct ice_ring *ring = vsi->tx_rings[i]; + + if (!ring) + return -EINVAL; + + ring->netdev = vsi->netdev; + err = ice_setup_tx_ring(ring); if (err) break; } @@ -3996,8 +4368,13 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) } ice_for_each_rxq(vsi, i) { - vsi->rx_rings[i]->netdev = vsi->netdev; - err = ice_setup_rx_ring(vsi->rx_rings[i]); + struct ice_ring *ring = vsi->rx_rings[i]; + + if (!ring) + return -EINVAL; + + ring->netdev = vsi->netdev; + err = ice_setup_rx_ring(ring); if (err) break; } @@ -4089,54 +4466,6 @@ static void ice_vsi_release_all(struct ice_pf *pf) } /** - * ice_ena_vsi - resume a VSI - * @vsi: the VSI being resume - * @locked: is the rtnl_lock already held - */ -static int ice_ena_vsi(struct ice_vsi *vsi, bool locked) -{ - int err = 0; - - if (!test_bit(__ICE_NEEDS_RESTART, vsi->state)) - return 0; - - clear_bit(__ICE_NEEDS_RESTART, vsi->state); - - if (vsi->netdev && vsi->type == ICE_VSI_PF) { - if (netif_running(vsi->netdev)) { - if (!locked) - rtnl_lock(); - - err = ice_open(vsi->netdev); - - if (!locked) - rtnl_unlock(); - } - } - - return err; -} - -/** - * ice_pf_ena_all_vsi - Resume all VSIs on a PF - * @pf: the PF - * @locked: is the rtnl_lock already held - */ -#ifdef CONFIG_DCB -int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked) -{ - int v; - - ice_for_each_vsi(pf, v) - if (pf->vsi[v]) - if (ice_ena_vsi(pf->vsi[v], locked)) - return -EIO; - - return 0; -} -#endif /* CONFIG_DCB */ - -/** * ice_vsi_rebuild_by_type - Rebuild VSI of a given type * @pf: pointer to the PF instance * @type: VSI type to rebuild @@ -4158,8 +4487,8 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) err = ice_vsi_rebuild(vsi); if (err) { dev_err(&pf->pdev->dev, - "rebuild VSI failed, err %d, VSI index %d, type %d\n", - err, vsi->idx, type); + "rebuild VSI failed, err %d, VSI index %d, type %s\n", + err, vsi->idx, ice_vsi_type_str(type)); return err; } @@ -4167,8 +4496,8 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) status = ice_replay_vsi(&pf->hw, vsi->idx); if (status) { dev_err(&pf->pdev->dev, - "replay VSI failed, status %d, VSI index %d, type %d\n", - status, vsi->idx, type); + "replay VSI failed, status %d, VSI index %d, type %s\n", + status, vsi->idx, ice_vsi_type_str(type)); return -EIO; } @@ -4181,13 +4510,13 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) err = ice_ena_vsi(vsi, false); if (err) { dev_err(&pf->pdev->dev, - "enable VSI failed, err %d, VSI index %d, type %d\n", - err, vsi->idx, type); + "enable VSI failed, err %d, VSI index %d, type %s\n", + err, vsi->idx, ice_vsi_type_str(type)); return err; } - dev_info(&pf->pdev->dev, "VSI rebuilt. VSI index %d, type %d\n", - vsi->idx, type); + dev_info(&pf->pdev->dev, "VSI rebuilt. VSI index %d, type %s\n", + vsi->idx, ice_vsi_type_str(type)); } return 0; @@ -4329,6 +4658,18 @@ clear_recovery: } /** + * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP + * @vsi: Pointer to VSI structure + */ +static int ice_max_xdp_frame_size(struct ice_vsi *vsi) +{ + if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) + return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM; + else + return ICE_RXBUF_3072; +} + +/** * ice_change_mtu - NDO callback to change the MTU * @netdev: network interface device structure * @new_mtu: new value for maximum frame size @@ -4347,6 +4688,16 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) return 0; } + if (ice_is_xdp_ena_vsi(vsi)) { + int frame_size = ice_max_xdp_frame_size(vsi); + + if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) { + netdev_err(netdev, "max MTU for XDP usage is %d\n", + frame_size - ICE_ETH_PKT_HDR_PAD); + return -EINVAL; + } + } + if (new_mtu < netdev->min_mtu) { netdev_err(netdev, "new MTU invalid. min_mtu is %d\n", netdev->min_mtu); @@ -4864,6 +5215,7 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = ice_change_mtu, .ndo_get_stats64 = ice_get_stats64, + .ndo_set_tx_maxrate = ice_set_tx_maxrate, .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, .ndo_set_vf_mac = ice_set_vf_mac, .ndo_get_vf_config = ice_get_vf_cfg, @@ -4878,4 +5230,7 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_fdb_add = ice_fdb_add, .ndo_fdb_del = ice_fdb_del, .ndo_tx_timeout = ice_tx_timeout, + .ndo_bpf = ice_xdp, + .ndo_xdp_xmit = ice_xdp_xmit, + .ndo_xsk_wakeup = ice_xsk_wakeup, }; diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index bcb431f1bd92..57c73f613f32 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -219,8 +219,7 @@ static void ice_release_nvm(struct ice_hw *hw) * * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq. */ -static enum ice_status -ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) +enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) { enum ice_status status; @@ -242,9 +241,10 @@ ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) */ enum ice_status ice_init_nvm(struct ice_hw *hw) { + u16 oem_hi, oem_lo, boot_cfg_tlv, boot_cfg_tlv_len; struct ice_nvm_info *nvm = &hw->nvm; u16 eetrack_lo, eetrack_hi; - enum ice_status status = 0; + enum ice_status status; u32 fla, gens_stat; u8 sr_size; @@ -261,15 +261,15 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) fla = rd32(hw, GLNVM_FLA); if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */ nvm->blank_nvm_mode = false; - } else { /* Blank programming mode */ + } else { + /* Blank programming mode */ nvm->blank_nvm_mode = true; - status = ICE_ERR_NVM_BLANK_MODE; ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n"); - return status; + return ICE_ERR_NVM_BLANK_MODE; } - status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &hw->nvm.ver); + status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &nvm->ver); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read DEV starter version.\n"); @@ -287,9 +287,42 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) return status; } - hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; + nvm->eetrack = (eetrack_hi << 16) | eetrack_lo; - return status; + status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len, + ICE_SR_BOOT_CFG_PTR); + if (status) { + ice_debug(hw, ICE_DBG_INIT, + "Failed to read Boot Configuration Block TLV.\n"); + return status; + } + + /* Boot Configuration Block must have length at least 2 words + * (Combo Image Version High and Combo Image Version Low) + */ + if (boot_cfg_tlv_len < 2) { + ice_debug(hw, ICE_DBG_INIT, + "Invalid Boot Configuration Block TLV size.\n"); + return ICE_ERR_INVAL_SIZE; + } + + status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OEM_VER_OFF), + &oem_hi); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read OEM_VER hi.\n"); + return status; + } + + status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OEM_VER_OFF + 1), + &oem_lo); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read OEM_VER lo.\n"); + return status; + } + + nvm->oem_ver = ((u32)oem_hi << 16) | oem_lo; + + return 0; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h new file mode 100644 index 000000000000..a9fa011c22c6 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_nvm.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019, Intel Corporation. */ + +#ifndef _ICE_NVM_H_ +#define _ICE_NVM_H_ + +enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data); +#endif /* _ICE_NVM_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index fc624b73d05d..84f609996ed5 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -411,6 +411,27 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, } /** + * ice_aq_cfg_sched_elems - configures scheduler elements + * @hw: pointer to the HW struct + * @elems_req: number of elements to configure + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @elems_cfgd: returns total number of elements configured + * @cd: pointer to command details structure or NULL + * + * Configure scheduling elements (0x0403) + */ +static enum ice_status +ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, + struct ice_aqc_conf_elem *buf, u16 buf_size, + u16 *elems_cfgd, struct ice_sq_cd *cd) +{ + return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems, + elems_req, (void *)buf, buf_size, + elems_cfgd, cd); +} + +/** * ice_aq_suspend_sched_elems - suspend scheduler elements * @hw: pointer to the HW struct * @elems_req: number of elements to suspend @@ -557,6 +578,149 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) } /** + * ice_aq_rl_profile - performs a rate limiting task + * @hw: pointer to the HW struct + * @opcode:opcode for add, query, or remove profile(s) + * @num_profiles: the number of profiles + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @num_processed: number of processed add or remove profile(s) to return + * @cd: pointer to command details structure + * + * RL profile function to add, query, or remove profile(s) + */ +static enum ice_status +ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, + u16 num_profiles, struct ice_aqc_rl_profile_generic_elem *buf, + u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd) +{ + struct ice_aqc_rl_profile *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.rl_profile; + + ice_fill_dflt_direct_cmd_desc(&desc, opcode); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + cmd->num_profiles = cpu_to_le16(num_profiles); + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (!status && num_processed) + *num_processed = le16_to_cpu(cmd->num_processed); + return status; +} + +/** + * ice_aq_add_rl_profile - adds rate limiting profile(s) + * @hw: pointer to the HW struct + * @num_profiles: the number of profile(s) to be add + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @num_profiles_added: total number of profiles added to return + * @cd: pointer to command details structure + * + * Add RL profile (0x0410) + */ +static enum ice_status +ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, + struct ice_aqc_rl_profile_generic_elem *buf, + u16 buf_size, u16 *num_profiles_added, + struct ice_sq_cd *cd) +{ + return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, + num_profiles, buf, + buf_size, num_profiles_added, cd); +} + +/** + * ice_aq_remove_rl_profile - removes RL profile(s) + * @hw: pointer to the HW struct + * @num_profiles: the number of profile(s) to remove + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @num_profiles_removed: total number of profiles removed to return + * @cd: pointer to command details structure or NULL + * + * Remove RL profile (0x0415) + */ +static enum ice_status +ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, + struct ice_aqc_rl_profile_generic_elem *buf, + u16 buf_size, u16 *num_profiles_removed, + struct ice_sq_cd *cd) +{ + return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles, + num_profiles, buf, + buf_size, num_profiles_removed, cd); +} + +/** + * ice_sched_del_rl_profile - remove RL profile + * @hw: pointer to the HW struct + * @rl_info: rate limit profile information + * + * If the profile ID is not referenced anymore, it removes profile ID with + * its associated parameters from HW DB,and locally. The caller needs to + * hold scheduler lock. + */ +static enum ice_status +ice_sched_del_rl_profile(struct ice_hw *hw, + struct ice_aqc_rl_profile_info *rl_info) +{ + struct ice_aqc_rl_profile_generic_elem *buf; + u16 num_profiles_removed; + enum ice_status status; + u16 num_profiles = 1; + + if (rl_info->prof_id_ref != 0) + return ICE_ERR_IN_USE; + + /* Safe to remove profile ID */ + buf = (struct ice_aqc_rl_profile_generic_elem *) + &rl_info->profile; + status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf), + &num_profiles_removed, NULL); + if (status || num_profiles_removed != num_profiles) + return ICE_ERR_CFG; + + /* Delete stale entry now */ + list_del(&rl_info->list_entry); + devm_kfree(ice_hw_to_dev(hw), rl_info); + return status; +} + +/** + * ice_sched_clear_rl_prof - clears RL prof entries + * @pi: port information structure + * + * This function removes all RL profile from HW as well as from SW DB. + */ +static void ice_sched_clear_rl_prof(struct ice_port_info *pi) +{ + u16 ln; + + for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) { + struct ice_aqc_rl_profile_info *rl_prof_elem; + struct ice_aqc_rl_profile_info *rl_prof_tmp; + + list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp, + &pi->rl_prof_list[ln], list_entry) { + struct ice_hw *hw = pi->hw; + enum ice_status status; + + rl_prof_elem->prof_id_ref = 0; + status = ice_sched_del_rl_profile(hw, rl_prof_elem); + if (status) { + ice_debug(hw, ICE_DBG_SCHED, + "Remove rl profile failed\n"); + /* On error, free mem required */ + list_del(&rl_prof_elem->list_entry); + devm_kfree(ice_hw_to_dev(hw), rl_prof_elem); + } + } + } +} + +/** * ice_sched_clear_agg - clears the aggregator related information * @hw: pointer to the hardware structure * @@ -592,6 +756,8 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi) { if (!pi) return; + /* remove RL profiles related lists */ + ice_sched_clear_rl_prof(pi); if (pi->root) { ice_free_sched_node(pi, pi->root); pi->root = NULL; @@ -1014,6 +1180,8 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi) /* initialize the port for handling the scheduler tree */ pi->port_state = ICE_SCHED_PORT_STATE_READY; mutex_init(&pi->sched_lock); + for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) + INIT_LIST_HEAD(&pi->rl_prof_list[i]); err_init_port: if (status && pi->root) { @@ -1036,7 +1204,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) struct ice_aqc_query_txsched_res_resp *buf; enum ice_status status = 0; __le16 max_sibl; - u8 i; + u16 i; if (hw->layer_info) return status; @@ -1062,8 +1230,8 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) * and so on. This array will be populated from root (index 0) to * qgroup layer 7. Leaf node has no children. */ - for (i = 0; i < hw->num_tx_sched_layers; i++) { - max_sibl = buf->layer_props[i].max_sibl_grp_sz; + for (i = 0; i < hw->num_tx_sched_layers - 1; i++) { + max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz; hw->max_children[i] = le16_to_cpu(max_sibl); } @@ -1670,3 +1838,1095 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) { return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN); } + +/** + * ice_sched_rm_unused_rl_prof - remove unused RL profile + * @pi: port information structure + * + * This function removes unused rate limit profiles from the HW and + * SW DB. The caller needs to hold scheduler lock. + */ +static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi) +{ + u16 ln; + + for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) { + struct ice_aqc_rl_profile_info *rl_prof_elem; + struct ice_aqc_rl_profile_info *rl_prof_tmp; + + list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp, + &pi->rl_prof_list[ln], list_entry) { + if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem)) + ice_debug(pi->hw, ICE_DBG_SCHED, + "Removed rl profile\n"); + } + } +} + +/** + * ice_sched_update_elem - update element + * @hw: pointer to the HW struct + * @node: pointer to node + * @info: node info to update + * + * It updates the HW DB, and local SW DB of node. It updates the scheduling + * parameters of node from argument info data buffer (Info->data buf) and + * returns success or error on config sched element failure. The caller + * needs to hold scheduler lock. + */ +static enum ice_status +ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, + struct ice_aqc_txsched_elem_data *info) +{ + struct ice_aqc_conf_elem buf; + enum ice_status status; + u16 elem_cfgd = 0; + u16 num_elems = 1; + + buf.generic[0] = *info; + /* Parent TEID is reserved field in this aq call */ + buf.generic[0].parent_teid = 0; + /* Element type is reserved field in this aq call */ + buf.generic[0].data.elem_type = 0; + /* Flags is reserved field in this aq call */ + buf.generic[0].data.flags = 0; + + /* Update HW DB */ + /* Configure element node */ + status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf), + &elem_cfgd, NULL); + if (status || elem_cfgd != num_elems) { + ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n"); + return ICE_ERR_CFG; + } + + /* Config success case */ + /* Now update local SW DB */ + /* Only copy the data portion of info buffer */ + node->info.data = info->data; + return status; +} + +/** + * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params + * @hw: pointer to the HW struct + * @node: sched node to configure + * @rl_type: rate limit type CIR, EIR, or shared + * @bw_alloc: BW weight/allocation + * + * This function configures node element's BW allocation. + */ +static enum ice_status +ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, + enum ice_rl_type rl_type, u8 bw_alloc) +{ + struct ice_aqc_txsched_elem_data buf; + struct ice_aqc_txsched_elem *data; + enum ice_status status; + + buf = node->info; + data = &buf.data; + if (rl_type == ICE_MIN_BW) { + data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; + data->cir_bw.bw_alloc = cpu_to_le16(bw_alloc); + } else if (rl_type == ICE_MAX_BW) { + data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; + data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc); + } else { + return ICE_ERR_PARAM; + } + + /* Configure element */ + status = ice_sched_update_elem(hw, node, &buf); + return status; +} + +/** + * ice_set_clear_cir_bw - set or clear CIR BW + * @bw_t_info: bandwidth type information structure + * @bw: bandwidth in Kbps - Kilo bits per sec + * + * Save or clear CIR bandwidth (BW) in the passed param bw_t_info. + */ +static void +ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) +{ + if (bw == ICE_SCHED_DFLT_BW) { + clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); + bw_t_info->cir_bw.bw = 0; + } else { + /* Save type of BW information */ + set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); + bw_t_info->cir_bw.bw = bw; + } +} + +/** + * ice_set_clear_eir_bw - set or clear EIR BW + * @bw_t_info: bandwidth type information structure + * @bw: bandwidth in Kbps - Kilo bits per sec + * + * Save or clear EIR bandwidth (BW) in the passed param bw_t_info. + */ +static void +ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) +{ + if (bw == ICE_SCHED_DFLT_BW) { + clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); + bw_t_info->eir_bw.bw = 0; + } else { + /* EIR BW and Shared BW profiles are mutually exclusive and + * hence only one of them may be set for any given element. + * First clear earlier saved shared BW information. + */ + clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); + bw_t_info->shared_bw = 0; + /* save EIR BW information */ + set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); + bw_t_info->eir_bw.bw = bw; + } +} + +/** + * ice_set_clear_shared_bw - set or clear shared BW + * @bw_t_info: bandwidth type information structure + * @bw: bandwidth in Kbps - Kilo bits per sec + * + * Save or clear shared bandwidth (BW) in the passed param bw_t_info. + */ +static void +ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw) +{ + if (bw == ICE_SCHED_DFLT_BW) { + clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); + bw_t_info->shared_bw = 0; + } else { + /* EIR BW and Shared BW profiles are mutually exclusive and + * hence only one of them may be set for any given element. + * First clear earlier saved EIR BW information. + */ + clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); + bw_t_info->eir_bw.bw = 0; + /* save shared BW information */ + set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); + bw_t_info->shared_bw = bw; + } +} + +/** + * ice_sched_calc_wakeup - calculate RL profile wakeup parameter + * @bw: bandwidth in Kbps + * + * This function calculates the wakeup parameter of RL profile. + */ +static u16 ice_sched_calc_wakeup(s32 bw) +{ + s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f; + s32 wakeup_f_int; + u16 wakeup = 0; + + /* Get the wakeup integer value */ + bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE); + wakeup_int = div64_long(ICE_RL_PROF_FREQUENCY, bytes_per_sec); + if (wakeup_int > 63) { + wakeup = (u16)((1 << 15) | wakeup_int); + } else { + /* Calculate fraction value up to 4 decimals + * Convert Integer value to a constant multiplier + */ + wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int; + wakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER * + ICE_RL_PROF_FREQUENCY, + bytes_per_sec); + + /* Get Fraction value */ + wakeup_f = wakeup_a - wakeup_b; + + /* Round up the Fractional value via Ceil(Fractional value) */ + if (wakeup_f > div64_long(ICE_RL_PROF_MULTIPLIER, 2)) + wakeup_f += 1; + + wakeup_f_int = (s32)div64_long(wakeup_f * ICE_RL_PROF_FRACTION, + ICE_RL_PROF_MULTIPLIER); + wakeup |= (u16)(wakeup_int << 9); + wakeup |= (u16)(0x1ff & wakeup_f_int); + } + + return wakeup; +} + +/** + * ice_sched_bw_to_rl_profile - convert BW to profile parameters + * @bw: bandwidth in Kbps + * @profile: profile parameters to return + * + * This function converts the BW to profile structure format. + */ +static enum ice_status +ice_sched_bw_to_rl_profile(u32 bw, struct ice_aqc_rl_profile_elem *profile) +{ + enum ice_status status = ICE_ERR_PARAM; + s64 bytes_per_sec, ts_rate, mv_tmp; + bool found = false; + s32 encode = 0; + s64 mv = 0; + s32 i; + + /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */ + if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW) + return status; + + /* Bytes per second from Kbps */ + bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE); + + /* encode is 6 bits but really useful are 5 bits */ + for (i = 0; i < 64; i++) { + u64 pow_result = BIT_ULL(i); + + ts_rate = div64_long((s64)ICE_RL_PROF_FREQUENCY, + pow_result * ICE_RL_PROF_TS_MULTIPLIER); + if (ts_rate <= 0) + continue; + + /* Multiplier value */ + mv_tmp = div64_long(bytes_per_sec * ICE_RL_PROF_MULTIPLIER, + ts_rate); + + /* Round to the nearest ICE_RL_PROF_MULTIPLIER */ + mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER); + + /* First multiplier value greater than the given + * accuracy bytes + */ + if (mv > ICE_RL_PROF_ACCURACY_BYTES) { + encode = i; + found = true; + break; + } + } + if (found) { + u16 wm; + + wm = ice_sched_calc_wakeup(bw); + profile->rl_multiply = cpu_to_le16(mv); + profile->wake_up_calc = cpu_to_le16(wm); + profile->rl_encode = cpu_to_le16(encode); + status = 0; + } else { + status = ICE_ERR_DOES_NOT_EXIST; + } + + return status; +} + +/** + * ice_sched_add_rl_profile - add RL profile + * @pi: port information structure + * @rl_type: type of rate limit BW - min, max, or shared + * @bw: bandwidth in Kbps - Kilo bits per sec + * @layer_num: specifies in which layer to create profile + * + * This function first checks the existing list for corresponding BW + * parameter. If it exists, it returns the associated profile otherwise + * it creates a new rate limit profile for requested BW, and adds it to + * the HW DB and local list. It returns the new profile or null on error. + * The caller needs to hold the scheduler lock. + */ +static struct ice_aqc_rl_profile_info * +ice_sched_add_rl_profile(struct ice_port_info *pi, + enum ice_rl_type rl_type, u32 bw, u8 layer_num) +{ + struct ice_aqc_rl_profile_generic_elem *buf; + struct ice_aqc_rl_profile_info *rl_prof_elem; + u16 profiles_added = 0, num_profiles = 1; + enum ice_status status; + struct ice_hw *hw; + u8 profile_type; + + if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) + return NULL; + switch (rl_type) { + case ICE_MIN_BW: + profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; + break; + case ICE_MAX_BW: + profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; + break; + case ICE_SHARED_BW: + profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; + break; + default: + return NULL; + } + + if (!pi) + return NULL; + hw = pi->hw; + list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], + list_entry) + if (rl_prof_elem->profile.flags == profile_type && + rl_prof_elem->bw == bw) + /* Return existing profile ID info */ + return rl_prof_elem; + + /* Create new profile ID */ + rl_prof_elem = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rl_prof_elem), + GFP_KERNEL); + + if (!rl_prof_elem) + return NULL; + + status = ice_sched_bw_to_rl_profile(bw, &rl_prof_elem->profile); + if (status) + goto exit_add_rl_prof; + + rl_prof_elem->bw = bw; + /* layer_num is zero relative, and fw expects level from 1 to 9 */ + rl_prof_elem->profile.level = layer_num + 1; + rl_prof_elem->profile.flags = profile_type; + rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size); + + /* Create new entry in HW DB */ + buf = (struct ice_aqc_rl_profile_generic_elem *) + &rl_prof_elem->profile; + status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf), + &profiles_added, NULL); + if (status || profiles_added != num_profiles) + goto exit_add_rl_prof; + + /* Good entry - add in the list */ + rl_prof_elem->prof_id_ref = 0; + list_add(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]); + return rl_prof_elem; + +exit_add_rl_prof: + devm_kfree(ice_hw_to_dev(hw), rl_prof_elem); + return NULL; +} + +/** + * ice_sched_cfg_node_bw_lmt - configure node sched params + * @hw: pointer to the HW struct + * @node: sched node to configure + * @rl_type: rate limit type CIR, EIR, or shared + * @rl_prof_id: rate limit profile ID + * + * This function configures node element's BW limit. + */ +static enum ice_status +ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, + enum ice_rl_type rl_type, u16 rl_prof_id) +{ + struct ice_aqc_txsched_elem_data buf; + struct ice_aqc_txsched_elem *data; + + buf = node->info; + data = &buf.data; + switch (rl_type) { + case ICE_MIN_BW: + data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; + data->cir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id); + break; + case ICE_MAX_BW: + /* EIR BW and Shared BW profiles are mutually exclusive and + * hence only one of them may be set for any given element + */ + if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) + return ICE_ERR_CFG; + data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; + data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id); + break; + case ICE_SHARED_BW: + /* Check for removing shared BW */ + if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) { + /* remove shared profile */ + data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED; + data->srl_id = 0; /* clear SRL field */ + + /* enable back EIR to default profile */ + data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; + data->eir_bw.bw_profile_idx = + cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); + break; + } + /* EIR BW and Shared BW profiles are mutually exclusive and + * hence only one of them may be set for any given element + */ + if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) && + (le16_to_cpu(data->eir_bw.bw_profile_idx) != + ICE_SCHED_DFLT_RL_PROF_ID)) + return ICE_ERR_CFG; + /* EIR BW is set to default, disable it */ + data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR; + /* Okay to enable shared BW now */ + data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED; + data->srl_id = cpu_to_le16(rl_prof_id); + break; + default: + /* Unknown rate limit type */ + return ICE_ERR_PARAM; + } + + /* Configure element */ + return ice_sched_update_elem(hw, node, &buf); +} + +/** + * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID + * @node: sched node + * @rl_type: rate limit type + * + * If existing profile matches, it returns the corresponding rate + * limit profile ID, otherwise it returns an invalid ID as error. + */ +static u16 +ice_sched_get_node_rl_prof_id(struct ice_sched_node *node, + enum ice_rl_type rl_type) +{ + u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID; + struct ice_aqc_txsched_elem *data; + + data = &node->info.data; + switch (rl_type) { + case ICE_MIN_BW: + if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR) + rl_prof_id = le16_to_cpu(data->cir_bw.bw_profile_idx); + break; + case ICE_MAX_BW: + if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR) + rl_prof_id = le16_to_cpu(data->eir_bw.bw_profile_idx); + break; + case ICE_SHARED_BW: + if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) + rl_prof_id = le16_to_cpu(data->srl_id); + break; + default: + break; + } + + return rl_prof_id; +} + +/** + * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer + * @pi: port information structure + * @rl_type: type of rate limit BW - min, max, or shared + * @layer_index: layer index + * + * This function returns requested profile creation layer. + */ +static u8 +ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type, + u8 layer_index) +{ + struct ice_hw *hw = pi->hw; + + if (layer_index >= hw->num_tx_sched_layers) + return ICE_SCHED_INVAL_LAYER_NUM; + switch (rl_type) { + case ICE_MIN_BW: + if (hw->layer_info[layer_index].max_cir_rl_profiles) + return layer_index; + break; + case ICE_MAX_BW: + if (hw->layer_info[layer_index].max_eir_rl_profiles) + return layer_index; + break; + case ICE_SHARED_BW: + /* if current layer doesn't support SRL profile creation + * then try a layer up or down. + */ + if (hw->layer_info[layer_index].max_srl_profiles) + return layer_index; + else if (layer_index < hw->num_tx_sched_layers - 1 && + hw->layer_info[layer_index + 1].max_srl_profiles) + return layer_index + 1; + else if (layer_index > 0 && + hw->layer_info[layer_index - 1].max_srl_profiles) + return layer_index - 1; + break; + default: + break; + } + return ICE_SCHED_INVAL_LAYER_NUM; +} + +/** + * ice_sched_get_srl_node - get shared rate limit node + * @node: tree node + * @srl_layer: shared rate limit layer + * + * This function returns SRL node to be used for shared rate limit purpose. + * The caller needs to hold scheduler lock. + */ +static struct ice_sched_node * +ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer) +{ + if (srl_layer > node->tx_sched_layer) + return node->children[0]; + else if (srl_layer < node->tx_sched_layer) + /* Node can't be created without a parent. It will always + * have a valid parent except root node. + */ + return node->parent; + else + return node; +} + +/** + * ice_sched_rm_rl_profile - remove RL profile ID + * @pi: port information structure + * @layer_num: layer number where profiles are saved + * @profile_type: profile type like EIR, CIR, or SRL + * @profile_id: profile ID to remove + * + * This function removes rate limit profile from layer 'layer_num' of type + * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold + * scheduler lock. + */ +static enum ice_status +ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, + u16 profile_id) +{ + struct ice_aqc_rl_profile_info *rl_prof_elem; + enum ice_status status = 0; + + if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) + return ICE_ERR_PARAM; + /* Check the existing list for RL profile */ + list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], + list_entry) + if (rl_prof_elem->profile.flags == profile_type && + le16_to_cpu(rl_prof_elem->profile.profile_id) == + profile_id) { + if (rl_prof_elem->prof_id_ref) + rl_prof_elem->prof_id_ref--; + + /* Remove old profile ID from database */ + status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem); + if (status && status != ICE_ERR_IN_USE) + ice_debug(pi->hw, ICE_DBG_SCHED, + "Remove rl profile failed\n"); + break; + } + if (status == ICE_ERR_IN_USE) + status = 0; + return status; +} + +/** + * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default + * @pi: port information structure + * @node: pointer to node structure + * @rl_type: rate limit type min, max, or shared + * @layer_num: layer number where RL profiles are saved + * + * This function configures node element's BW rate limit profile ID of + * type CIR, EIR, or SRL to default. This function needs to be called + * with the scheduler lock held. + */ +static enum ice_status +ice_sched_set_node_bw_dflt(struct ice_port_info *pi, + struct ice_sched_node *node, + enum ice_rl_type rl_type, u8 layer_num) +{ + enum ice_status status; + struct ice_hw *hw; + u8 profile_type; + u16 rl_prof_id; + u16 old_id; + + hw = pi->hw; + switch (rl_type) { + case ICE_MIN_BW: + profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; + rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; + break; + case ICE_MAX_BW: + profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; + rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; + break; + case ICE_SHARED_BW: + profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; + /* No SRL is configured for default case */ + rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID; + break; + default: + return ICE_ERR_PARAM; + } + /* Save existing RL prof ID for later clean up */ + old_id = ice_sched_get_node_rl_prof_id(node, rl_type); + /* Configure BW scheduling parameters */ + status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); + if (status) + return status; + + /* Remove stale RL profile ID */ + if (old_id == ICE_SCHED_DFLT_RL_PROF_ID || + old_id == ICE_SCHED_INVAL_PROF_ID) + return 0; + + return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id); +} + +/** + * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness + * @pi: port information structure + * @node: pointer to node structure + * @layer_num: layer number where rate limit profiles are saved + * @rl_type: rate limit type min, max, or shared + * @bw: bandwidth value + * + * This function prepares node element's bandwidth to SRL or EIR exclusively. + * EIR BW and Shared BW profiles are mutually exclusive and hence only one of + * them may be set for any given element. This function needs to be called + * with the scheduler lock held. + */ +static enum ice_status +ice_sched_set_eir_srl_excl(struct ice_port_info *pi, + struct ice_sched_node *node, + u8 layer_num, enum ice_rl_type rl_type, u32 bw) +{ + if (rl_type == ICE_SHARED_BW) { + /* SRL node passed in this case, it may be different node */ + if (bw == ICE_SCHED_DFLT_BW) + /* SRL being removed, ice_sched_cfg_node_bw_lmt() + * enables EIR to default. EIR is not set in this + * case, so no additional action is required. + */ + return 0; + + /* SRL being configured, set EIR to default here. + * ice_sched_cfg_node_bw_lmt() disables EIR when it + * configures SRL + */ + return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW, + layer_num); + } else if (rl_type == ICE_MAX_BW && + node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) { + /* Remove Shared profile. Set default shared BW call + * removes shared profile for a node. + */ + return ice_sched_set_node_bw_dflt(pi, node, + ICE_SHARED_BW, + layer_num); + } + return 0; +} + +/** + * ice_sched_set_node_bw - set node's bandwidth + * @pi: port information structure + * @node: tree node + * @rl_type: rate limit type min, max, or shared + * @bw: bandwidth in Kbps - Kilo bits per sec + * @layer_num: layer number + * + * This function adds new profile corresponding to requested BW, configures + * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile + * ID from local database. The caller needs to hold scheduler lock. + */ +static enum ice_status +ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, + enum ice_rl_type rl_type, u32 bw, u8 layer_num) +{ + struct ice_aqc_rl_profile_info *rl_prof_info; + enum ice_status status = ICE_ERR_PARAM; + struct ice_hw *hw = pi->hw; + u16 old_id, rl_prof_id; + + rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num); + if (!rl_prof_info) + return status; + + rl_prof_id = le16_to_cpu(rl_prof_info->profile.profile_id); + + /* Save existing RL prof ID for later clean up */ + old_id = ice_sched_get_node_rl_prof_id(node, rl_type); + /* Configure BW scheduling parameters */ + status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); + if (status) + return status; + + /* New changes has been applied */ + /* Increment the profile ID reference count */ + rl_prof_info->prof_id_ref++; + + /* Check for old ID removal */ + if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) || + old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id) + return 0; + + return ice_sched_rm_rl_profile(pi, layer_num, + rl_prof_info->profile.flags, + old_id); +} + +/** + * ice_sched_set_node_bw_lmt - set node's BW limit + * @pi: port information structure + * @node: tree node + * @rl_type: rate limit type min, max, or shared + * @bw: bandwidth in Kbps - Kilo bits per sec + * + * It updates node's BW limit parameters like BW RL profile ID of type CIR, + * EIR, or SRL. The caller needs to hold scheduler lock. + */ +static enum ice_status +ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, + enum ice_rl_type rl_type, u32 bw) +{ + struct ice_sched_node *cfg_node = node; + enum ice_status status; + + struct ice_hw *hw; + u8 layer_num; + + if (!pi) + return ICE_ERR_PARAM; + hw = pi->hw; + /* Remove unused RL profile IDs from HW and SW DB */ + ice_sched_rm_unused_rl_prof(pi); + layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, + node->tx_sched_layer); + if (layer_num >= hw->num_tx_sched_layers) + return ICE_ERR_PARAM; + + if (rl_type == ICE_SHARED_BW) { + /* SRL node may be different */ + cfg_node = ice_sched_get_srl_node(node, layer_num); + if (!cfg_node) + return ICE_ERR_CFG; + } + /* EIR BW and Shared BW profiles are mutually exclusive and + * hence only one of them may be set for any given element + */ + status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type, + bw); + if (status) + return status; + if (bw == ICE_SCHED_DFLT_BW) + return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type, + layer_num); + return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num); +} + +/** + * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default + * @pi: port information structure + * @node: pointer to node structure + * @rl_type: rate limit type min, max, or shared + * + * This function configures node element's BW rate limit profile ID of + * type CIR, EIR, or SRL to default. This function needs to be called + * with the scheduler lock held. + */ +static enum ice_status +ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi, + struct ice_sched_node *node, + enum ice_rl_type rl_type) +{ + return ice_sched_set_node_bw_lmt(pi, node, rl_type, + ICE_SCHED_DFLT_BW); +} + +/** + * ice_sched_validate_srl_node - Check node for SRL applicability + * @node: sched node to configure + * @sel_layer: selected SRL layer + * + * This function checks if the SRL can be applied to a selected layer node on + * behalf of the requested node (first argument). This function needs to be + * called with scheduler lock held. + */ +static enum ice_status +ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) +{ + /* SRL profiles are not available on all layers. Check if the + * SRL profile can be applied to a node above or below the + * requested node. SRL configuration is possible only if the + * selected layer's node has single child. + */ + if (sel_layer == node->tx_sched_layer || + ((sel_layer == node->tx_sched_layer + 1) && + node->num_children == 1) || + ((sel_layer == node->tx_sched_layer - 1) && + (node->parent && node->parent->num_children == 1))) + return 0; + + return ICE_ERR_CFG; +} + +/** + * ice_sched_save_q_bw - save queue node's BW information + * @q_ctx: queue context structure + * @rl_type: rate limit type min, max, or shared + * @bw: bandwidth in Kbps - Kilo bits per sec + * + * Save BW information of queue type node for post replay use. + */ +static enum ice_status +ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) +{ + switch (rl_type) { + case ICE_MIN_BW: + ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw); + break; + case ICE_MAX_BW: + ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw); + break; + case ICE_SHARED_BW: + ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw); + break; + default: + return ICE_ERR_PARAM; + } + return 0; +} + +/** + * ice_sched_set_q_bw_lmt - sets queue BW limit + * @pi: port information structure + * @vsi_handle: sw VSI handle + * @tc: traffic class + * @q_handle: software queue handle + * @rl_type: min, max, or shared + * @bw: bandwidth in Kbps + * + * This function sets BW limit of queue scheduling node. + */ +static enum ice_status +ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + u16 q_handle, enum ice_rl_type rl_type, u32 bw) +{ + enum ice_status status = ICE_ERR_PARAM; + struct ice_sched_node *node; + struct ice_q_ctx *q_ctx; + + if (!ice_is_vsi_valid(pi->hw, vsi_handle)) + return ICE_ERR_PARAM; + mutex_lock(&pi->sched_lock); + q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle); + if (!q_ctx) + goto exit_q_bw_lmt; + node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); + if (!node) { + ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n"); + goto exit_q_bw_lmt; + } + + /* Return error if it is not a leaf node */ + if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) + goto exit_q_bw_lmt; + + /* SRL bandwidth layer selection */ + if (rl_type == ICE_SHARED_BW) { + u8 sel_layer; /* selected layer */ + + sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type, + node->tx_sched_layer); + if (sel_layer >= pi->hw->num_tx_sched_layers) { + status = ICE_ERR_PARAM; + goto exit_q_bw_lmt; + } + status = ice_sched_validate_srl_node(node, sel_layer); + if (status) + goto exit_q_bw_lmt; + } + + if (bw == ICE_SCHED_DFLT_BW) + status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type); + else + status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw); + + if (!status) + status = ice_sched_save_q_bw(q_ctx, rl_type, bw); + +exit_q_bw_lmt: + mutex_unlock(&pi->sched_lock); + return status; +} + +/** + * ice_cfg_q_bw_lmt - configure queue BW limit + * @pi: port information structure + * @vsi_handle: sw VSI handle + * @tc: traffic class + * @q_handle: software queue handle + * @rl_type: min, max, or shared + * @bw: bandwidth in Kbps + * + * This function configures BW limit of queue scheduling node. + */ +enum ice_status +ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + u16 q_handle, enum ice_rl_type rl_type, u32 bw) +{ + return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, + bw); +} + +/** + * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit + * @pi: port information structure + * @vsi_handle: sw VSI handle + * @tc: traffic class + * @q_handle: software queue handle + * @rl_type: min, max, or shared + * + * This function configures BW default limit of queue scheduling node. + */ +enum ice_status +ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + u16 q_handle, enum ice_rl_type rl_type) +{ + return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, + ICE_SCHED_DFLT_BW); +} + +/** + * ice_cfg_rl_burst_size - Set burst size value + * @hw: pointer to the HW struct + * @bytes: burst size in bytes + * + * This function configures/set the burst size to requested new value. The new + * burst size value is used for future rate limit calls. It doesn't change the + * existing or previously created RL profiles. + */ +enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) +{ + u16 burst_size_to_prog; + + if (bytes < ICE_MIN_BURST_SIZE_ALLOWED || + bytes > ICE_MAX_BURST_SIZE_ALLOWED) + return ICE_ERR_PARAM; + if (ice_round_to_num(bytes, 64) <= + ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) { + /* 64 byte granularity case */ + /* Disable MSB granularity bit */ + burst_size_to_prog = ICE_64_BYTE_GRANULARITY; + /* round number to nearest 64 byte granularity */ + bytes = ice_round_to_num(bytes, 64); + /* The value is in 64 byte chunks */ + burst_size_to_prog |= (u16)(bytes / 64); + } else { + /* k bytes granularity case */ + /* Enable MSB granularity bit */ + burst_size_to_prog = ICE_KBYTE_GRANULARITY; + /* round number to nearest 1024 granularity */ + bytes = ice_round_to_num(bytes, 1024); + /* check rounding doesn't go beyond allowed */ + if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY) + bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY; + /* The value is in k bytes */ + burst_size_to_prog |= (u16)(bytes / 1024); + } + hw->max_burst_size = burst_size_to_prog; + return 0; +} + +/** + * ice_sched_replay_node_prio - re-configure node priority + * @hw: pointer to the HW struct + * @node: sched node to configure + * @priority: priority value + * + * This function configures node element's priority value. It + * needs to be called with scheduler lock held. + */ +static enum ice_status +ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node, + u8 priority) +{ + struct ice_aqc_txsched_elem_data buf; + struct ice_aqc_txsched_elem *data; + enum ice_status status; + + buf = node->info; + data = &buf.data; + data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; + data->generic = priority; + + /* Configure element */ + status = ice_sched_update_elem(hw, node, &buf); + return status; +} + +/** + * ice_sched_replay_node_bw - replay node(s) BW + * @hw: pointer to the HW struct + * @node: sched node to configure + * @bw_t_info: BW type information + * + * This function restores node's BW from bw_t_info. The caller needs + * to hold the scheduler lock. + */ +static enum ice_status +ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node, + struct ice_bw_type_info *bw_t_info) +{ + struct ice_port_info *pi = hw->port_info; + enum ice_status status = ICE_ERR_PARAM; + u16 bw_alloc; + + if (!node) + return status; + if (bitmap_empty(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT)) + return 0; + if (test_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap)) { + status = ice_sched_replay_node_prio(hw, node, + bw_t_info->generic); + if (status) + return status; + } + if (test_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap)) { + status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, + bw_t_info->cir_bw.bw); + if (status) + return status; + } + if (test_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap)) { + bw_alloc = bw_t_info->cir_bw.bw_alloc; + status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW, + bw_alloc); + if (status) + return status; + } + if (test_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap)) { + status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, + bw_t_info->eir_bw.bw); + if (status) + return status; + } + if (test_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap)) { + bw_alloc = bw_t_info->eir_bw.bw_alloc; + status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW, + bw_alloc); + if (status) + return status; + } + if (test_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap)) + status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW, + bw_t_info->shared_bw); + return status; +} + +/** + * ice_sched_replay_q_bw - replay queue type node BW + * @pi: port information structure + * @q_ctx: queue context structure + * + * This function replays queue type node bandwidth. This function needs to be + * called with scheduler lock held. + */ +enum ice_status +ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx) +{ + struct ice_sched_node *q_node; + + /* Following also checks the presence of node in tree */ + q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); + if (!q_node) + return ICE_ERR_PARAM; + return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info); +} diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h index 3902a8ad3025..f0593cfb6521 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.h +++ b/drivers/net/ethernet/intel/ice/ice_sched.h @@ -8,6 +8,36 @@ #define ICE_QGRP_LAYER_OFFSET 2 #define ICE_VSI_LAYER_OFFSET 4 +#define ICE_SCHED_INVAL_LAYER_NUM 0xFF +/* Burst size is a 12 bits register that is configured while creating the RL + * profile(s). MSB is a granularity bit and tells the granularity type + * 0 - LSB bits are in 64 bytes granularity + * 1 - LSB bits are in 1K bytes granularity + */ +#define ICE_64_BYTE_GRANULARITY 0 +#define ICE_KBYTE_GRANULARITY BIT(11) +#define ICE_MIN_BURST_SIZE_ALLOWED 64 /* In Bytes */ +#define ICE_MAX_BURST_SIZE_ALLOWED \ + ((BIT(11) - 1) * 1024) /* In Bytes */ +#define ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY \ + ((BIT(11) - 1) * 64) /* In Bytes */ +#define ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY ICE_MAX_BURST_SIZE_ALLOWED + +#define ICE_RL_PROF_FREQUENCY 446000000 +#define ICE_RL_PROF_ACCURACY_BYTES 128 +#define ICE_RL_PROF_MULTIPLIER 10000 +#define ICE_RL_PROF_TS_MULTIPLIER 32 +#define ICE_RL_PROF_FRACTION 512 + +/* BW rate limit profile parameters list entry along + * with bandwidth maintained per layer in port info + */ +struct ice_aqc_rl_profile_info { + struct ice_aqc_rl_profile_elem profile; + struct list_head list_entry; + u32 bw; /* requested */ + u16 prof_id_ref; /* profile ID to node association ref count */ +}; struct ice_sched_agg_vsi_info { struct list_head list_entry; @@ -48,4 +78,13 @@ enum ice_status ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, u8 owner, bool enable); enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle); +enum ice_status +ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + u16 q_handle, enum ice_rl_type rl_type, u32 bw); +enum ice_status +ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, + u16 q_handle, enum ice_rl_type rl_type); +enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes); +enum ice_status +ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx); #endif /* _ICE_SCHED_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 1acdd43a2edd..77d211ea3aae 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -416,8 +416,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx); } else { /* update with new HW VSI num */ - if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num) - tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num; + tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num; } return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index cb123fbe30be..fa14b9545dab 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -14,11 +14,6 @@ #define ICE_VSI_INVAL_ID 0xffff #define ICE_INVAL_Q_HANDLE 0xFFFF -/* VSI queue context structure */ -struct ice_q_ctx { - u16 q_handle; -}; - /* VSI context structure for add/get/update/free operations */ struct ice_vsi_ctx { u16 vsi_num; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 33dd103035dc..2c212f64d99f 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -5,8 +5,13 @@ #include <linux/prefetch.h> #include <linux/mm.h> +#include <linux/bpf_trace.h> +#include <net/xdp.h> +#include "ice_txrx_lib.h" +#include "ice_lib.h" #include "ice.h" #include "ice_dcb_lib.h" +#include "ice_xsk.h" #define ICE_RX_HDR_SIZE 256 @@ -19,7 +24,10 @@ static void ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) { if (tx_buf->skb) { - dev_kfree_skb_any(tx_buf->skb); + if (ice_ring_is_xdp(ring)) + page_frag_free(tx_buf->raw_buf); + else + dev_kfree_skb_any(tx_buf->skb); if (dma_unmap_len(tx_buf, len)) dma_unmap_single(ring->dev, dma_unmap_addr(tx_buf, dma), @@ -51,6 +59,11 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring) { u16 i; + if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_umem) { + ice_xsk_clean_xdp_ring(tx_ring); + goto tx_skip_free; + } + /* ring already cleared, nothing to do */ if (!tx_ring->tx_buf) return; @@ -59,6 +72,7 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring) for (i = 0; i < tx_ring->count; i++) ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); +tx_skip_free: memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); /* Zero out the descriptor ring */ @@ -136,8 +150,11 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) total_bytes += tx_buf->bytecount; total_pkts += tx_buf->gso_segs; - /* free the skb */ - napi_consume_skb(tx_buf->skb, napi_budget); + if (ice_ring_is_xdp(tx_ring)) + page_frag_free(tx_buf->raw_buf); + else + /* free the skb */ + napi_consume_skb(tx_buf->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -188,12 +205,11 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) i += tx_ring->count; tx_ring->next_to_clean = i; - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->stats.bytes += total_bytes; - tx_ring->stats.pkts += total_pkts; - u64_stats_update_end(&tx_ring->syncp); - tx_ring->q_vector->tx.total_bytes += total_bytes; - tx_ring->q_vector->tx.total_pkts += total_pkts; + + ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); + + if (ice_ring_is_xdp(tx_ring)) + return !!budget; netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes); @@ -273,6 +289,11 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring) if (!rx_ring->rx_buf) return; + if (rx_ring->xsk_umem) { + ice_xsk_clean_rx_ring(rx_ring); + goto rx_skip_free; + } + /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; @@ -289,10 +310,11 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring) */ dma_sync_single_range_for_cpu(dev, rx_buf->dma, rx_buf->page_offset, - ICE_RXBUF_2048, DMA_FROM_DEVICE); + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); /* free resources associated with mapping */ - dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE, + dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); @@ -300,6 +322,7 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring) rx_buf->page_offset = 0; } +rx_skip_free: memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); /* Zero out the descriptor ring */ @@ -319,6 +342,10 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring) void ice_free_rx_ring(struct ice_ring *rx_ring) { ice_clean_rx_ring(rx_ring); + if (rx_ring->vsi->type == ICE_VSI_PF) + if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + rx_ring->xdp_prog = NULL; devm_kfree(rx_ring->dev, rx_ring->rx_buf); rx_ring->rx_buf = NULL; @@ -363,6 +390,15 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring) rx_ring->next_to_use = 0; rx_ring->next_to_clean = 0; + + if (ice_is_xdp_ena_vsi(rx_ring->vsi)) + WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); + + if (rx_ring->vsi->type == ICE_VSI_PF && + !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) + if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, + rx_ring->q_index)) + goto err; return 0; err: @@ -372,34 +408,110 @@ err: } /** - * ice_release_rx_desc - Store the new tail and head values - * @rx_ring: ring to bump - * @val: new head index + * ice_rx_offset - Return expected offset into page to access data + * @rx_ring: Ring we are requesting offset of + * + * Returns the offset value for ring into the data buffer. */ -static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) +static unsigned int ice_rx_offset(struct ice_ring *rx_ring) { - u16 prev_ntu = rx_ring->next_to_use; + if (ice_ring_uses_build_skb(rx_ring)) + return ICE_SKB_PAD; + else if (ice_is_xdp_ena_vsi(rx_ring->vsi)) + return XDP_PACKET_HEADROOM; - rx_ring->next_to_use = val; + return 0; +} - /* update next to alloc since we have filled the ring */ - rx_ring->next_to_alloc = val; +/** + * ice_run_xdp - Executes an XDP program on initialized xdp_buff + * @rx_ring: Rx ring + * @xdp: xdp_buff used as input to the XDP program + * @xdp_prog: XDP program to run + * + * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} + */ +static int +ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp, + struct bpf_prog *xdp_prog) +{ + int err, result = ICE_XDP_PASS; + struct ice_ring *xdp_ring; + u32 act; - /* QRX_TAIL will be updated with any tail value, but hardware ignores - * the lower 3 bits. This makes it so we only bump tail on meaningful - * boundaries. Also, this allows us to bump tail on intervals of 8 up to - * the budget depending on the current traffic load. - */ - val &= ~0x7; - if (prev_ntu != val) { - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - writel(val, rx_ring->tail); + act = bpf_prog_run_xdp(xdp_prog, xdp); + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()]; + result = ice_xmit_xdp_buff(xdp, xdp_ring); + break; + case XDP_REDIRECT: + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); + result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED; + break; + default: + bpf_warn_invalid_xdp_action(act); + /* fallthrough -- not supported action */ + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + /* fallthrough -- handle aborts by dropping frame */ + case XDP_DROP: + result = ICE_XDP_CONSUMED; + break; } + + return result; +} + +/** + * ice_xdp_xmit - submit packets to XDP ring for transmission + * @dev: netdev + * @n: number of XDP frames to be transmitted + * @frames: XDP frames to be transmitted + * @flags: transmit flags + * + * Returns number of frames successfully sent. Frames that fail are + * free'ed via XDP return API. + * For error cases, a negative errno code is returned and no-frames + * are transmitted (caller must handle freeing frames). + */ +int +ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags) +{ + struct ice_netdev_priv *np = netdev_priv(dev); + unsigned int queue_index = smp_processor_id(); + struct ice_vsi *vsi = np->vsi; + struct ice_ring *xdp_ring; + int drops = 0, i; + + if (test_bit(__ICE_DOWN, vsi->state)) + return -ENETDOWN; + + if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq) + return -ENXIO; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + xdp_ring = vsi->xdp_rings[queue_index]; + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + int err; + + err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); + if (err != ICE_XDP_TX) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + if (unlikely(flags & XDP_XMIT_FLUSH)) + ice_xdp_ring_update_tail(xdp_ring); + + return n - drops; } /** @@ -423,28 +535,28 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) } /* alloc new page for storage */ - page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); + page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); if (unlikely(!page)) { rx_ring->rx_stats.alloc_page_failed++; return false; } /* map page for use */ - dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE, + dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); /* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use */ if (dma_mapping_error(rx_ring->dev, dma)) { - __free_pages(page, 0); + __free_pages(page, ice_rx_pg_order(rx_ring)); rx_ring->rx_stats.alloc_page_failed++; return false; } bi->dma = dma; bi->page = page; - bi->page_offset = 0; + bi->page_offset = ice_rx_offset(rx_ring); page_ref_add(page, USHRT_MAX - 1); bi->pagecnt_bias = USHRT_MAX; @@ -486,7 +598,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, bi->dma, bi->page_offset, - ICE_RXBUF_2048, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); /* Refresh the desc even if buffer_addrs didn't change @@ -557,9 +669,6 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) */ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) { -#if (PAGE_SIZE >= 8192) - unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048; -#endif unsigned int pagecnt_bias = rx_buf->pagecnt_bias; struct page *page = rx_buf->page; @@ -572,7 +681,9 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) if (unlikely((page_count(page) - pagecnt_bias) > 1)) return false; #else - if (rx_buf->page_offset > last_offset) +#define ICE_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) + if (rx_buf->page_offset > ICE_LAST_OFFSET) return false; #endif /* PAGE_SIZE < 8192) */ @@ -590,6 +701,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) /** * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag + * @rx_ring: Rx descriptor ring to transact packets on * @rx_buf: buffer containing page to add * @skb: sk_buff to place the data into * @size: packet length from rx_desc @@ -599,13 +711,13 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) * The function will then update the page offset. */ static void -ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb, - unsigned int size) +ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, + struct sk_buff *skb, unsigned int size) { #if (PAGE_SIZE >= 8192) - unsigned int truesize = SKB_DATA_ALIGN(size); + unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring)); #else - unsigned int truesize = ICE_RXBUF_2048; + unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; #endif if (!size) @@ -679,10 +791,64 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, } /** + * ice_build_skb - Build skb around an existing buffer + * @rx_ring: Rx descriptor ring to transact packets on + * @rx_buf: Rx buffer to pull data from + * @xdp: xdp_buff pointing to the data + * + * This function builds an skb around an existing Rx buffer, taking care + * to set up the skb correctly and avoid any memcpy overhead. + */ +static struct sk_buff * +ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, + struct xdp_buff *xdp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; +#if (PAGE_SIZE < 8192) + unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + struct sk_buff *skb; + + /* Prefetch first cache line of first page. If xdp->data_meta + * is unused, this points exactly as xdp->data, otherwise we + * likely have a consumer accessing first few bytes of meta + * data, and then actual data. + */ + prefetch(xdp->data_meta); +#if L1_CACHE_BYTES < 128 + prefetch((void *)(xdp->data + L1_CACHE_BYTES)); +#endif + /* build an skb around the page buffer */ + skb = build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; + + /* must to record Rx queue, otherwise OS features such as + * symmetric queue won't work + */ + skb_record_rx_queue(skb, rx_ring->q_index); + + /* update pointers within the skb to store the data */ + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, xdp->data_end - xdp->data); + if (metasize) + skb_metadata_set(skb, metasize); + + /* buffer is used by skb, update page_offset */ + ice_rx_buf_adjust_pg_offset(rx_buf, truesize); + + return skb; +} + +/** * ice_construct_skb - Allocate skb and populate it * @rx_ring: Rx descriptor ring to transact packets on * @rx_buf: Rx buffer to pull data from - * @size: the length of the packet + * @xdp: xdp_buff pointing to the data * * This function allocates an skb. It then populates it with the page * data from the current receive descriptor, taking care to set up the @@ -690,16 +856,16 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, */ static struct sk_buff * ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, - unsigned int size) + struct xdp_buff *xdp) { - void *va = page_address(rx_buf->page) + rx_buf->page_offset; + unsigned int size = xdp->data_end - xdp->data; unsigned int headlen; struct sk_buff *skb; /* prefetch first cache line of first page */ - prefetch(va); + prefetch(xdp->data); #if L1_CACHE_BYTES < 128 - prefetch((u8 *)va + L1_CACHE_BYTES); + prefetch((void *)(xdp->data + L1_CACHE_BYTES)); #endif /* L1_CACHE_BYTES */ /* allocate a skb to store the frags */ @@ -712,10 +878,11 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, /* Determine available headroom for copy */ headlen = size; if (headlen > ICE_RX_HDR_SIZE) - headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE); + headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ - memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); + memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, + sizeof(long))); /* if we exhaust the linear part then add what is left as a frag */ size -= headlen; @@ -723,7 +890,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, #if (PAGE_SIZE >= 8192) unsigned int truesize = SKB_DATA_ALIGN(size); #else - unsigned int truesize = ICE_RXBUF_2048; + unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; #endif skb_add_rx_frag(skb, 0, rx_buf->page, rx_buf->page_offset + headlen, size, truesize); @@ -745,11 +912,18 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, * @rx_ring: Rx descriptor ring to transact packets on * @rx_buf: Rx buffer to pull data from * - * This function will clean up the contents of the rx_buf. It will - * either recycle the buffer or unmap it and free the associated resources. + * This function will update next_to_clean and then clean up the contents + * of the rx_buf. It will either recycle the buffer or unmap it and free + * the associated resources. */ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) { + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + if (!rx_buf) return; @@ -759,8 +933,9 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) rx_ring->rx_stats.page_reuse_count++; } else { /* we are not reusing the buffer so unmap it */ - dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE, - DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); + dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, + ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + ICE_RX_DMA_ATTR); __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); } @@ -770,227 +945,31 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) } /** - * ice_cleanup_headers - Correct empty headers - * @skb: pointer to current skb being fixed - * - * Also address the case where we are pulling data in on pages only - * and as such no data is present in the skb header. - * - * In addition if skb is not at least 60 bytes we need to pad it so that - * it is large enough to qualify as a valid Ethernet frame. - * - * Returns true if an error was encountered and skb was freed. - */ -static bool ice_cleanup_headers(struct sk_buff *skb) -{ - /* if eth_skb_pad returns an error the skb was freed */ - if (eth_skb_pad(skb)) - return true; - - return false; -} - -/** - * ice_test_staterr - tests bits in Rx descriptor status and error fields - * @rx_desc: pointer to receive descriptor (in le64 format) - * @stat_err_bits: value to mask - * - * This function does some fast chicanery in order to return the - * value of the mask which is really only used for boolean tests. - * The status_error_len doesn't need to be shifted because it begins - * at offset zero. - */ -static bool -ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits) -{ - return !!(rx_desc->wb.status_error0 & - cpu_to_le16(stat_err_bits)); -} - -/** * ice_is_non_eop - process handling of non-EOP buffers * @rx_ring: Rx ring being processed * @rx_desc: Rx descriptor for current buffer * @skb: Current socket buffer containing buffer in progress * - * This function updates next to clean. If the buffer is an EOP buffer - * this function exits returning false, otherwise it will place the - * sk_buff in the next buffer to be chained and return true indicating - * that this is in fact a non-EOP buffer. + * If the buffer is an EOP buffer, this function exits returning false, + * otherwise return true indicating that this is in fact a non-EOP buffer. */ static bool ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { - u32 ntc = rx_ring->next_to_clean + 1; - - /* fetch, update, and store next to clean */ - ntc = (ntc < rx_ring->count) ? ntc : 0; - rx_ring->next_to_clean = ntc; - - prefetch(ICE_RX_DESC(rx_ring, ntc)); - /* if we are the last buffer then there is nothing else to do */ #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF))) return false; /* place skb in next buffer to be received */ - rx_ring->rx_buf[ntc].skb = skb; + rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb; rx_ring->rx_stats.non_eop_descs++; return true; } /** - * ice_ptype_to_htype - get a hash type - * @ptype: the ptype value from the descriptor - * - * Returns a hash type to be used by skb_set_hash - */ -static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype) -{ - return PKT_HASH_TYPE_NONE; -} - -/** - * ice_rx_hash - set the hash value in the skb - * @rx_ring: descriptor ring - * @rx_desc: specific descriptor - * @skb: pointer to current skb - * @rx_ptype: the ptype value from the descriptor - */ -static void -ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb, u8 rx_ptype) -{ - struct ice_32b_rx_flex_desc_nic *nic_mdid; - u32 hash; - - if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) - return; - - if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC) - return; - - nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc; - hash = le32_to_cpu(nic_mdid->rss_hash); - skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); -} - -/** - * ice_rx_csum - Indicate in skb if checksum is good - * @ring: the ring we care about - * @skb: skb currently being received and modified - * @rx_desc: the receive descriptor - * @ptype: the packet type decoded by hardware - * - * skb->protocol must be set before this function is called - */ -static void -ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, - union ice_32b_rx_flex_desc *rx_desc, u8 ptype) -{ - struct ice_rx_ptype_decoded decoded; - u32 rx_error, rx_status; - bool ipv4, ipv6; - - rx_status = le16_to_cpu(rx_desc->wb.status_error0); - rx_error = rx_status; - - decoded = ice_decode_rx_desc_ptype(ptype); - - /* Start with CHECKSUM_NONE and by default csum_level = 0 */ - skb->ip_summed = CHECKSUM_NONE; - skb_checksum_none_assert(skb); - - /* check if Rx checksum is enabled */ - if (!(ring->netdev->features & NETIF_F_RXCSUM)) - return; - - /* check if HW has decoded the packet and checksum */ - if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) - return; - - if (!(decoded.known && decoded.outer_ip)) - return; - - ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4); - ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); - - if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | - BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) - goto checksum_fail; - else if (ipv6 && (rx_status & - (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) - goto checksum_fail; - - /* check for L4 errors and handle packets that were not able to be - * checksummed due to arrival speed - */ - if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) - goto checksum_fail; - - /* Only report checksum unnecessary for TCP, UDP, or SCTP */ - switch (decoded.inner_prot) { - case ICE_RX_PTYPE_INNER_PROT_TCP: - case ICE_RX_PTYPE_INNER_PROT_UDP: - case ICE_RX_PTYPE_INNER_PROT_SCTP: - skb->ip_summed = CHECKSUM_UNNECESSARY; - default: - break; - } - return; - -checksum_fail: - ring->vsi->back->hw_csum_rx_error++; -} - -/** - * ice_process_skb_fields - Populate skb header fields from Rx descriptor - * @rx_ring: Rx descriptor ring packet is being transacted on - * @rx_desc: pointer to the EOP Rx descriptor - * @skb: pointer to current skb being populated - * @ptype: the packet type decoded by hardware - * - * This function checks the ring, descriptor, and packet information in - * order to populate the hash, checksum, VLAN, protocol, and - * other fields within the skb. - */ -static void -ice_process_skb_fields(struct ice_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb, u8 ptype) -{ - ice_rx_hash(rx_ring, rx_desc, skb, ptype); - - /* modifies the skb - consumes the enet header */ - skb->protocol = eth_type_trans(skb, rx_ring->netdev); - - ice_rx_csum(rx_ring, skb, rx_desc, ptype); -} - -/** - * ice_receive_skb - Send a completed packet up the stack - * @rx_ring: Rx ring in play - * @skb: packet to send up - * @vlan_tag: VLAN tag for packet - * - * This function sends the completed packet (via. skb) up the stack using - * gro receive functions (with/without VLAN tag) - */ -static void -ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) -{ - if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && - (vlan_tag & VLAN_VID_MASK)) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - napi_gro_receive(&rx_ring->q_vector->napi, skb); -} - -/** * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: Rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process @@ -1006,8 +985,13 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_pkts = 0; u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); + unsigned int xdp_res, xdp_xmit = 0; + struct bpf_prog *xdp_prog = NULL; + struct xdp_buff xdp; bool failure; + xdp.rxq = &rx_ring->xdp_rxq; + /* start the loop to process Rx packets bounded by 'budget' */ while (likely(total_rx_pkts < (unsigned int)budget)) { union ice_32b_rx_flex_desc *rx_desc; @@ -1042,10 +1026,57 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) /* retrieve a buffer from the ring */ rx_buf = ice_get_rx_buf(rx_ring, &skb, size); + if (!size) { + xdp.data = NULL; + xdp.data_end = NULL; + xdp.data_hard_start = NULL; + xdp.data_meta = NULL; + goto construct_skb; + } + + xdp.data = page_address(rx_buf->page) + rx_buf->page_offset; + xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring); + xdp.data_meta = xdp.data; + xdp.data_end = xdp.data + size; + + rcu_read_lock(); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + if (!xdp_prog) { + rcu_read_unlock(); + goto construct_skb; + } + + xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog); + rcu_read_unlock(); + if (!xdp_res) + goto construct_skb; + if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = ice_rx_pg_size(rx_ring) / 2; +#else + truesize = SKB_DATA_ALIGN(ice_rx_offset(rx_ring) + + size); +#endif + xdp_xmit |= xdp_res; + ice_rx_buf_adjust_pg_offset(rx_buf, truesize); + } else { + rx_buf->pagecnt_bias++; + } + total_rx_bytes += size; + total_rx_pkts++; + + cleaned_count++; + ice_put_rx_buf(rx_ring, rx_buf); + continue; +construct_skb: if (skb) - ice_add_rx_frag(rx_buf, skb, size); + ice_add_rx_frag(rx_ring, rx_buf, skb, size); + else if (ice_ring_uses_build_skb(rx_ring)) + skb = ice_build_skb(rx_ring, rx_buf, &xdp); else - skb = ice_construct_skb(rx_ring, rx_buf, size); + skb = ice_construct_skb(rx_ring, rx_buf, &xdp); /* exit if we failed to retrieve a buffer */ if (!skb) { @@ -1072,10 +1103,8 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) if (ice_test_staterr(rx_desc, stat_err_bits)) vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); - /* correct empty headers and pad skb if needed (to make valid - * ethernet frame - */ - if (ice_cleanup_headers(skb)) { + /* pad the skb if needed, to make a valid ethernet frame */ + if (eth_skb_pad(skb)) { skb = NULL; continue; } @@ -1099,13 +1128,10 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) /* return up to cleaned_count buffers to hardware */ failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); - /* update queue and vector specific stats */ - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->stats.pkts += total_rx_pkts; - rx_ring->stats.bytes += total_rx_bytes; - u64_stats_update_end(&rx_ring->syncp); - rx_ring->q_vector->rx.total_pkts += total_rx_pkts; - rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + if (xdp_prog) + ice_finalize_xdp_rx(rx_ring, xdp_xmit); + + ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); /* guarantee a trip back through this routine if there was a failure */ return failure ? budget : (int)total_rx_pkts; @@ -1483,9 +1509,14 @@ int ice_napi_poll(struct napi_struct *napi, int budget) /* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors. */ - ice_for_each_ring(ring, q_vector->tx) - if (!ice_clean_tx_irq(ring, budget)) + ice_for_each_ring(ring, q_vector->tx) { + bool wd = ring->xsk_umem ? + ice_clean_tx_irq_zc(ring, budget) : + ice_clean_tx_irq(ring, budget); + + if (!wd) clean_complete = false; + } /* Handle case where we are called by netpoll with a budget of 0 */ if (unlikely(budget <= 0)) @@ -1505,7 +1536,13 @@ int ice_napi_poll(struct napi_struct *napi, int budget) ice_for_each_ring(ring, q_vector->rx) { int cleaned; - cleaned = ice_clean_rx_irq(ring, budget_per_ring); + /* A dedicated path for zero-copy allows making a single + * comparison in the irq context instead of many inside the + * ice_clean_rx_irq function and makes the codebase cleaner. + */ + cleaned = ring->xsk_umem ? + ice_clean_rx_irq_zc(ring, budget_per_ring) : + ice_clean_rx_irq(ring, budget_per_ring); work_done += cleaned; /* if we clean as many as budgeted, we must not be done */ if (cleaned >= budget_per_ring) @@ -1527,17 +1564,6 @@ int ice_napi_poll(struct napi_struct *napi, int budget) return min_t(int, work_done, budget - 1); } -/* helper function for building cmd/type/offset */ -static __le64 -build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) -{ - return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA | - (td_cmd << ICE_TXD_QW1_CMD_S) | - (td_offset << ICE_TXD_QW1_OFFSET_S) | - ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) | - (td_tag << ICE_TXD_QW1_L2TAG1_S)); -} - /** * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions * @tx_ring: the ring to be checked @@ -1689,9 +1715,9 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, i = 0; /* write last descriptor with RS and EOP bits */ - td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS); - tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, td_offset, size, td_tag); + td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD; + tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size, + td_tag); /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 94a9280193e2..a84cc0e6dd27 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -4,8 +4,12 @@ #ifndef _ICE_TXRX_H_ #define _ICE_TXRX_H_ +#include "ice_type.h" + #define ICE_DFLT_IRQ_WORK 256 +#define ICE_RXBUF_3072 3072 #define ICE_RXBUF_2048 2048 +#define ICE_RXBUF_1536 1536 #define ICE_MAX_CHAINED_RX_BUFS 5 #define ICE_MAX_BUF_TXD 8 #define ICE_MIN_TX_LEN 17 @@ -22,6 +26,71 @@ #define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */ #define ICE_MAX_TXQ_PER_TXQG 128 +/* Attempt to maximize the headroom available for incoming frames. We use a 2K + * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame. + * This leaves us with 512 bytes of room. From that we need to deduct the + * space needed for the shared info and the padding needed to IP align the + * frame. + * + * Note: For cache line sizes 256 or larger this value is going to end + * up negative. In these cases we should fall back to the legacy + * receive path. + */ +#if (PAGE_SIZE < 8192) +#define ICE_2K_TOO_SMALL_WITH_PADDING \ +((NET_SKB_PAD + ICE_RXBUF_1536) > SKB_WITH_OVERHEAD(ICE_RXBUF_2048)) + +/** + * ice_compute_pad - compute the padding + * rx_buf_len: buffer length + * + * Figure out the size of half page based on given buffer length and + * then subtract the skb_shared_info followed by subtraction of the + * actual buffer length; this in turn results in the actual space that + * is left for padding usage + */ +static inline int ice_compute_pad(int rx_buf_len) +{ + int half_page_size; + + half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); + return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len; +} + +/** + * ice_skb_pad - determine the padding that we can supply + * + * Figure out the right Rx buffer size and based on that calculate the + * padding + */ +static inline int ice_skb_pad(void) +{ + int rx_buf_len; + + /* If a 2K buffer cannot handle a standard Ethernet frame then + * optimize padding for a 3K buffer instead of a 1.5K buffer. + * + * For a 3K buffer we need to add enough padding to allow for + * tailroom due to NET_IP_ALIGN possibly shifting us out of + * cache-line alignment. + */ + if (ICE_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = ICE_RXBUF_1536; + + /* if needed make room for NET_IP_ALIGN */ + rx_buf_len -= NET_IP_ALIGN; + + return ice_compute_pad(rx_buf_len); +} + +#define ICE_SKB_PAD ice_skb_pad() +#else +#define ICE_2K_TOO_SMALL_WITH_PADDING false +#define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#endif + /* We are assuming that the cache line is always 64 Bytes here for ice. * In order to make sure that is a correct assumption there is a check in probe * to print a warning if the read from GLPCI_CNF2 tells us that the cache line @@ -49,12 +118,24 @@ #define ICE_TX_FLAGS_VLAN_PR_S 29 #define ICE_TX_FLAGS_VLAN_S 16 +#define ICE_XDP_PASS 0 +#define ICE_XDP_CONSUMED BIT(0) +#define ICE_XDP_TX BIT(1) +#define ICE_XDP_REDIR BIT(2) + #define ICE_RX_DMA_ATTR \ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#define ICE_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) + +#define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS) + struct ice_tx_buf { struct ice_tx_desc *next_to_watch; - struct sk_buff *skb; + union { + struct sk_buff *skb; + void *raw_buf; /* used for XDP */ + }; unsigned int bytecount; unsigned short gso_segs; u32 tx_flags; @@ -76,9 +157,17 @@ struct ice_tx_offload_params { struct ice_rx_buf { struct sk_buff *skb; dma_addr_t dma; - struct page *page; - unsigned int page_offset; - u16 pagecnt_bias; + union { + struct { + struct page *page; + unsigned int page_offset; + u16 pagecnt_bias; + }; + struct { + void *addr; + u64 handle; + }; + }; }; struct ice_q_stats { @@ -198,18 +287,44 @@ struct ice_ring { }; struct rcu_head rcu; /* to avoid race on free */ + struct bpf_prog *xdp_prog; + struct xdp_umem *xsk_umem; + struct zero_copy_allocator zca; + /* CL3 - 3rd cacheline starts here */ + struct xdp_rxq_info xdp_rxq; /* CLX - the below items are only accessed infrequently and should be * in their own cache line if possible */ +#define ICE_TX_FLAGS_RING_XDP BIT(0) +#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1) + u8 flags; dma_addr_t dma; /* physical address of ring */ unsigned int size; /* length of descriptor ring in bytes */ u32 txq_teid; /* Added Tx queue TEID */ u16 rx_buf_len; -#ifdef CONFIG_DCB u8 dcb_tc; /* Traffic class of ring */ -#endif /* CONFIG_DCB */ } ____cacheline_internodealigned_in_smp; +static inline bool ice_ring_uses_build_skb(struct ice_ring *ring) +{ + return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB); +} + +static inline void ice_set_ring_build_skb_ena(struct ice_ring *ring) +{ + ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB; +} + +static inline void ice_clear_ring_build_skb_ena(struct ice_ring *ring) +{ + ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB; +} + +static inline bool ice_ring_is_xdp(struct ice_ring *ring) +{ + return !!(ring->flags & ICE_TX_FLAGS_RING_XDP); +} + struct ice_ring_container { /* head of linked-list of rings */ struct ice_ring *ring; @@ -230,6 +345,19 @@ struct ice_ring_container { #define ice_for_each_ring(pos, head) \ for (pos = (head).ring; pos; pos = pos->next) +static inline unsigned int ice_rx_pg_order(struct ice_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring->rx_buf_len > (PAGE_SIZE / 2)) + return 1; +#endif + return 0; +} + +#define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring)) + +union ice_32b_rx_flex_desc; + bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count); netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev); void ice_clean_tx_ring(struct ice_ring *tx_ring); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c new file mode 100644 index 000000000000..35bbc4ff603c --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019, Intel Corporation. */ + +#include "ice_txrx_lib.h" + +/** + * ice_release_rx_desc - Store the new tail and head values + * @rx_ring: ring to bump + * @val: new head index + */ +void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) +{ + u16 prev_ntu = rx_ring->next_to_use; + + rx_ring->next_to_use = val; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; + + /* QRX_TAIL will be updated with any tail value, but hardware ignores + * the lower 3 bits. This makes it so we only bump tail on meaningful + * boundaries. Also, this allows us to bump tail on intervals of 8 up to + * the budget depending on the current traffic load. + */ + val &= ~0x7; + if (prev_ntu != val) { + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); + } +} + +/** + * ice_ptype_to_htype - get a hash type + * @ptype: the ptype value from the descriptor + * + * Returns a hash type to be used by skb_set_hash + */ +static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype) +{ + return PKT_HASH_TYPE_NONE; +} + +/** + * ice_rx_hash - set the hash value in the skb + * @rx_ring: descriptor ring + * @rx_desc: specific descriptor + * @skb: pointer to current skb + * @rx_ptype: the ptype value from the descriptor + */ +static void +ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, + struct sk_buff *skb, u8 rx_ptype) +{ + struct ice_32b_rx_flex_desc_nic *nic_mdid; + u32 hash; + + if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) + return; + + if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC) + return; + + nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc; + hash = le32_to_cpu(nic_mdid->rss_hash); + skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); +} + +/** + * ice_rx_csum - Indicate in skb if checksum is good + * @ring: the ring we care about + * @skb: skb currently being received and modified + * @rx_desc: the receive descriptor + * @ptype: the packet type decoded by hardware + * + * skb->protocol must be set before this function is called + */ +static void +ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, + union ice_32b_rx_flex_desc *rx_desc, u8 ptype) +{ + struct ice_rx_ptype_decoded decoded; + u32 rx_error, rx_status; + bool ipv4, ipv6; + + rx_status = le16_to_cpu(rx_desc->wb.status_error0); + rx_error = rx_status; + + decoded = ice_decode_rx_desc_ptype(ptype); + + /* Start with CHECKSUM_NONE and by default csum_level = 0 */ + skb->ip_summed = CHECKSUM_NONE; + skb_checksum_none_assert(skb); + + /* check if Rx checksum is enabled */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* check if HW has decoded the packet and checksum */ + if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) + return; + + if (!(decoded.known && decoded.outer_ip)) + return; + + ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4); + ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); + + if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | + BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) + goto checksum_fail; + else if (ipv6 && (rx_status & + (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) + goto checksum_fail; + + /* check for L4 errors and handle packets that were not able to be + * checksummed due to arrival speed + */ + if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) + goto checksum_fail; + + /* Only report checksum unnecessary for TCP, UDP, or SCTP */ + switch (decoded.inner_prot) { + case ICE_RX_PTYPE_INNER_PROT_TCP: + case ICE_RX_PTYPE_INNER_PROT_UDP: + case ICE_RX_PTYPE_INNER_PROT_SCTP: + skb->ip_summed = CHECKSUM_UNNECESSARY; + default: + break; + } + return; + +checksum_fail: + ring->vsi->back->hw_csum_rx_error++; +} + +/** + * ice_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: Rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * @ptype: the packet type decoded by hardware + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, protocol, and + * other fields within the skb. + */ +void +ice_process_skb_fields(struct ice_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc, + struct sk_buff *skb, u8 ptype) +{ + ice_rx_hash(rx_ring, rx_desc, skb, ptype); + + /* modifies the skb - consumes the enet header */ + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + + ice_rx_csum(rx_ring, skb, rx_desc, ptype); +} + +/** + * ice_receive_skb - Send a completed packet up the stack + * @rx_ring: Rx ring in play + * @skb: packet to send up + * @vlan_tag: VLAN tag for packet + * + * This function sends the completed packet (via. skb) up the stack using + * gro receive functions (with/without VLAN tag) + */ +void +ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) +{ + if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (vlan_tag & VLAN_VID_MASK)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + napi_gro_receive(&rx_ring->q_vector->napi, skb); +} + +/** + * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission + * @data: packet data pointer + * @size: packet data size + * @xdp_ring: XDP ring for transmission + */ +int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring) +{ + u16 i = xdp_ring->next_to_use; + struct ice_tx_desc *tx_desc; + struct ice_tx_buf *tx_buf; + dma_addr_t dma; + + if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) { + xdp_ring->tx_stats.tx_busy++; + return ICE_XDP_CONSUMED; + } + + dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE); + if (dma_mapping_error(xdp_ring->dev, dma)) + return ICE_XDP_CONSUMED; + + tx_buf = &xdp_ring->tx_buf[i]; + tx_buf->bytecount = size; + tx_buf->gso_segs = 1; + tx_buf->raw_buf = data; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buf, len, size); + dma_unmap_addr_set(tx_buf, dma, dma); + + tx_desc = ICE_TX_DESC(xdp_ring, i); + tx_desc->buf_addr = cpu_to_le64(dma); + tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD, 0, + size, 0); + + /* Make certain all of the status bits have been updated + * before next_to_watch is written. + */ + smp_wmb(); + + i++; + if (i == xdp_ring->count) + i = 0; + + tx_buf->next_to_watch = tx_desc; + xdp_ring->next_to_use = i; + + return ICE_XDP_TX; +} + +/** + * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it + * @xdp: XDP buffer + * @xdp_ring: XDP Tx ring + * + * Returns negative on failure, 0 on success. + */ +int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring) +{ + struct xdp_frame *xdpf = convert_to_xdp_frame(xdp); + + if (unlikely(!xdpf)) + return ICE_XDP_CONSUMED; + + return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); +} + +/** + * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map + * @rx_ring: Rx ring + * @xdp_res: Result of the receive batch + * + * This function bumps XDP Tx tail and/or flush redirect map, and + * should be called when a batch of packets has been processed in the + * napi loop. + */ +void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res) +{ + if (xdp_res & ICE_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_res & ICE_XDP_TX) { + struct ice_ring *xdp_ring = + rx_ring->vsi->xdp_rings[rx_ring->q_index]; + + ice_xdp_ring_update_tail(xdp_ring); + } +} diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h new file mode 100644 index 000000000000..ba9164dad9ae --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019, Intel Corporation. */ + +#ifndef _ICE_TXRX_LIB_H_ +#define _ICE_TXRX_LIB_H_ +#include "ice.h" + +/** + * ice_test_staterr - tests bits in Rx descriptor status and error fields + * @rx_desc: pointer to receive descriptor (in le64 format) + * @stat_err_bits: value to mask + * + * This function does some fast chicanery in order to return the + * value of the mask which is really only used for boolean tests. + * The status_error_len doesn't need to be shifted because it begins + * at offset zero. + */ +static inline bool +ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits) +{ + return !!(rx_desc->wb.status_error0 & cpu_to_le16(stat_err_bits)); +} + +static inline __le64 +build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) +{ + return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA | + (td_cmd << ICE_TXD_QW1_CMD_S) | + (td_offset << ICE_TXD_QW1_OFFSET_S) | + ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) | + (td_tag << ICE_TXD_QW1_L2TAG1_S)); +} + +/** + * ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register + * @xdp_ring: XDP Tx ring + * + * This function updates the XDP Tx ring tail register. + */ +static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring) +{ + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. + */ + wmb(); + writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); +} + +void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res); +int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring); +int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring); +void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val); +void +ice_process_skb_fields(struct ice_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc, + struct sk_buff *skb, u8 ptype); +void +ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag); +#endif /* !_ICE_TXRX_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 6667d17a4206..eba8b04b8cbd 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -19,6 +19,17 @@ static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc) return test_bit(tc, &bitmap); } +static inline u64 round_up_64bit(u64 a, u32 b) +{ + return div64_long(((a) + (b) / 2), (b)); +} + +static inline u32 ice_round_to_num(u32 N, u32 R) +{ + return ((((N) % (R)) < ((R) / 2)) ? (((N) / (R)) * (R)) : + ((((N) + (R) - 1) / (R)) * (R))); +} + /* Driver always calls main vsi_handle first */ #define ICE_MAIN_VSI_HANDLE 0 @@ -35,6 +46,8 @@ static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc) #define ICE_DBG_PKG BIT_ULL(16) #define ICE_DBG_RES BIT_ULL(17) #define ICE_DBG_AQ_MSG BIT_ULL(24) +#define ICE_DBG_AQ_DESC BIT_ULL(25) +#define ICE_DBG_AQ_DESC_BUF BIT_ULL(26) #define ICE_DBG_AQ_CMD BIT_ULL(27) #define ICE_DBG_USER BIT_ULL(31) @@ -272,10 +285,56 @@ enum ice_agg_type { ICE_AGG_TYPE_QG }; +/* Rate limit types */ +enum ice_rl_type { + ICE_UNKNOWN_BW = 0, + ICE_MIN_BW, /* for CIR profile */ + ICE_MAX_BW, /* for EIR profile */ + ICE_SHARED_BW /* for shared profile */ +}; + +#define ICE_SCHED_MIN_BW 500 /* in Kbps */ +#define ICE_SCHED_MAX_BW 100000000 /* in Kbps */ +#define ICE_SCHED_DFLT_BW 0xFFFFFFFF /* unlimited */ #define ICE_SCHED_DFLT_RL_PROF_ID 0 +#define ICE_SCHED_NO_SHARED_RL_PROF_ID 0xFFFF #define ICE_SCHED_DFLT_BW_WT 1 +#define ICE_SCHED_INVAL_PROF_ID 0xFFFF +#define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */ -/* VSI type list entry to locate corresponding VSI/ag nodes */ + /* Data structure for saving BW information */ +enum ice_bw_type { + ICE_BW_TYPE_PRIO, + ICE_BW_TYPE_CIR, + ICE_BW_TYPE_CIR_WT, + ICE_BW_TYPE_EIR, + ICE_BW_TYPE_EIR_WT, + ICE_BW_TYPE_SHARED, + ICE_BW_TYPE_CNT /* This must be last */ +}; + +struct ice_bw { + u32 bw; + u16 bw_alloc; +}; + +struct ice_bw_type_info { + DECLARE_BITMAP(bw_t_bitmap, ICE_BW_TYPE_CNT); + u8 generic; + struct ice_bw cir_bw; + struct ice_bw eir_bw; + u32 shared_bw; +}; + +/* VSI queue context structure for given TC */ +struct ice_q_ctx { + u16 q_handle; + u32 q_teid; + /* bw_t_info saves queue BW information */ + struct ice_bw_type_info bw_t_info; +}; + +/* VSI type list entry to locate corresponding VSI/aggregator nodes */ struct ice_sched_vsi_info { struct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS]; struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS]; @@ -364,6 +423,8 @@ struct ice_port_info { struct mutex sched_lock; /* protect access to TXSched tree */ struct ice_sched_node * sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM]; + /* List contain profile ID(s) and other params per layer */ + struct list_head rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM]; struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */ /* DCBX info */ struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */ @@ -415,6 +476,8 @@ struct ice_hw { u8 pf_id; /* device profile info */ + u16 max_burst_size; /* driver sets this value */ + /* Tx Scheduler values */ u16 num_tx_sched_layers; u16 num_tx_sched_phys_layers; @@ -555,6 +618,8 @@ struct ice_hw_port_stats { }; /* Checksum and Shadow RAM pointers */ +#define ICE_SR_BOOT_CFG_PTR 0x132 +#define ICE_NVM_OEM_VER_OFF 0x02 #define ICE_SR_NVM_DEV_STARTER_VER 0x18 #define ICE_SR_NVM_EETRACK_LO 0x2D #define ICE_SR_NVM_EETRACK_HI 0x2E @@ -568,6 +633,7 @@ struct ice_hw_port_stats { #define ICE_OEM_VER_BUILD_MASK (0xffff << ICE_OEM_VER_BUILD_SHIFT) #define ICE_OEM_VER_SHIFT 24 #define ICE_OEM_VER_MASK (0xff << ICE_OEM_VER_SHIFT) +#define ICE_SR_PFA_PTR 0x40 #define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800 #define ICE_SR_WORDS_IN_1KB 512 diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index b45797f39b2f..2ac83ad3d1a6 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -2,6 +2,7 @@ /* Copyright (c) 2018, Intel Corporation. */ #include "ice.h" +#include "ice_base.h" #include "ice_lib.h" /** @@ -389,7 +390,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) * by the time we get here. */ if (!is_pfr) - wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0); + wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0); /* In the case of a VFLR, the HW has already reset the VF and we * just need to clean up, so don't hit the VFRTRIG register. @@ -1151,6 +1152,25 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) } /** + * ice_is_vf_disabled + * @vf: pointer to the VF info + * + * Returns true if the PF or VF is disabled, false otherwise. + */ +static bool ice_is_vf_disabled(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + + /* If the PF has been disabled, there is no need resetting VF until + * PF is active again. Similarly, if the VF has been disabled, this + * means something else is resetting the VF, so we shouldn't continue. + * Otherwise, set disable VF state bit for actual reset, and continue. + */ + return (test_bit(__ICE_VF_DIS, pf->state) || + test_bit(ICE_VF_STATE_DIS, vf->vf_states)); +} + +/** * ice_reset_vf - Reset a particular VF * @vf: pointer to the VF structure * @is_vflr: true if VFLR was issued, false if not @@ -1167,19 +1187,15 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) u32 reg; int i; - /* If the PF has been disabled, there is no need resetting VF until - * PF is active again. - */ - if (test_bit(__ICE_VF_DIS, pf->state)) - return false; - - /* If the VF has been disabled, this means something else is - * resetting the VF, so we shouldn't continue. Otherwise, set - * disable VF state bit for actual reset, and continue. - */ - if (test_and_set_bit(ICE_VF_STATE_DIS, vf->vf_states)) - return false; + if (ice_is_vf_disabled(vf)) { + dev_dbg(&pf->pdev->dev, + "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", + vf->vf_id); + return true; + } + /* Set VF disable bit state here, before triggering reset */ + set_bit(ICE_VF_STATE_DIS, vf->vf_states); ice_trigger_vf_reset(vf, is_vflr, false); vsi = pf->vsi[vf->lan_vsi_idx]; @@ -1407,7 +1423,7 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) { dev_err(dev, "This device is not capable of SR-IOV\n"); - return -ENODEV; + return -EOPNOTSUPP; } if (pre_existing_vfs && pre_existing_vfs != num_vfs) @@ -1495,12 +1511,10 @@ void ice_process_vflr_event(struct ice_pf *pf) } /** - * ice_vc_dis_vf - Disable a given VF via SW reset + * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF * @vf: pointer to the VF info - * - * Disable the VF through a SW reset */ -static void ice_vc_dis_vf(struct ice_vf *vf) +static void ice_vc_reset_vf(struct ice_vf *vf) { ice_vc_notify_vf_reset(vf); ice_reset_vf(vf, false); @@ -2159,9 +2173,11 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) vector_id = map->vector_id; vsi_id = map->vsi_id; - /* validate msg params */ - if (!(vector_id < pf->hw.func_caps.common_cap - .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id) || + /* vector_id is always 0-based for each VF, and can never be + * larger than or equal to the max allowed interrupts per VF + */ + if (!(vector_id < ICE_MAX_INTR_PER_VF) || + !ice_vc_isvalid_vsi_id(vf, vsi_id) || (!vector_id && (map->rxq_map || map->txq_map))) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -2540,7 +2556,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) } else { /* request is successful, then reset VF */ vf->num_req_qs = req_queues; - ice_vc_dis_vf(vf); + ice_vc_reset_vf(vf); dev_info(&pf->pdev->dev, "VF %d granted request of %u queues.\n", vf->vf_id, req_queues); @@ -3124,6 +3140,23 @@ out: } /** + * ice_wait_on_vf_reset + * @vf: The VF being resseting + * + * Poll to make sure a given VF is ready after reset + */ +static void ice_wait_on_vf_reset(struct ice_vf *vf) +{ + int i; + + for (i = 0; i < ICE_MAX_VF_RESET_WAIT; i++) { + if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) + break; + msleep(20); + } +} + +/** * ice_set_vf_mac * @netdev: network interface device structure * @vf_id: VF identifier @@ -3146,6 +3179,15 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) } vf = &pf->vf[vf_id]; + /* Don't set MAC on disabled VF */ + if (ice_is_vf_disabled(vf)) + return -EINVAL; + + /* In case VF is in reset mode, wait until it is completed. Depending + * on factors like queue disabling routine, this could take ~250ms + */ + ice_wait_on_vf_reset(vf); + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id); return -EBUSY; @@ -3167,7 +3209,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) "MAC on VF %d set to %pM. VF driver will be reinitialized\n", vf_id, mac); - ice_vc_dis_vf(vf); + ice_vc_reset_vf(vf); return ret; } @@ -3193,6 +3235,15 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) } vf = &pf->vf[vf_id]; + /* Don't set Trusted Mode on disabled VF */ + if (ice_is_vf_disabled(vf)) + return -EINVAL; + + /* In case VF is in reset mode, wait until it is completed. Depending + * on factors like queue disabling routine, this could take ~250ms + */ + ice_wait_on_vf_reset(vf); + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id); return -EBUSY; @@ -3203,7 +3254,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) return 0; vf->trusted = trusted; - ice_vc_dis_vf(vf); + ice_vc_reset_vf(vf); dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", vf_id, trusted ? "" : "un"); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 0d9880c8bba3..2e867ad2e81d 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -38,6 +38,7 @@ #define ICE_MAX_POLICY_INTR_PER_VF 33 #define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) #define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1) +#define ICE_MAX_VF_RESET_WAIT 15 /* Specific VF states */ enum ice_vf_states { diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c new file mode 100644 index 000000000000..fcffad0069d6 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -0,0 +1,1181 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019, Intel Corporation. */ + +#include <linux/bpf_trace.h> +#include <net/xdp_sock.h> +#include <net/xdp.h> +#include "ice.h" +#include "ice_base.h" +#include "ice_type.h" +#include "ice_xsk.h" +#include "ice_txrx.h" +#include "ice_txrx_lib.h" +#include "ice_lib.h" + +/** + * ice_qp_reset_stats - Resets all stats for rings of given index + * @vsi: VSI that contains rings of interest + * @q_idx: ring index in array + */ +static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) +{ + memset(&vsi->rx_rings[q_idx]->rx_stats, 0, + sizeof(vsi->rx_rings[q_idx]->rx_stats)); + memset(&vsi->tx_rings[q_idx]->stats, 0, + sizeof(vsi->tx_rings[q_idx]->stats)); + if (ice_is_xdp_ena_vsi(vsi)) + memset(&vsi->xdp_rings[q_idx]->stats, 0, + sizeof(vsi->xdp_rings[q_idx]->stats)); +} + +/** + * ice_qp_clean_rings - Cleans all the rings of a given index + * @vsi: VSI that contains rings of interest + * @q_idx: ring index in array + */ +static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) +{ + ice_clean_tx_ring(vsi->tx_rings[q_idx]); + if (ice_is_xdp_ena_vsi(vsi)) + ice_clean_tx_ring(vsi->xdp_rings[q_idx]); + ice_clean_rx_ring(vsi->rx_rings[q_idx]); +} + +/** + * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector + * @vsi: VSI that has netdev + * @q_vector: q_vector that has NAPI context + * @enable: true for enable, false for disable + */ +static void +ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector, + bool enable) +{ + if (!vsi->netdev || !q_vector) + return; + + if (enable) + napi_enable(&q_vector->napi); + else + napi_disable(&q_vector->napi); +} + +/** + * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring + * @vsi: the VSI that contains queue vector being un-configured + * @rx_ring: Rx ring that will have its IRQ disabled + * @q_vector: queue vector + */ +static void +ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring, + struct ice_q_vector *q_vector) +{ + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + int base = vsi->base_vector; + u16 reg; + u32 val; + + /* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle + * here only QINT_RQCTL + */ + reg = rx_ring->reg_idx; + val = rd32(hw, QINT_RQCTL(reg)); + val &= ~QINT_RQCTL_CAUSE_ENA_M; + wr32(hw, QINT_RQCTL(reg), val); + + if (q_vector) { + u16 v_idx = q_vector->v_idx; + + wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0); + ice_flush(hw); + synchronize_irq(pf->msix_entries[v_idx + base].vector); + } +} + +/** + * ice_qvec_cfg_msix - Enable IRQ for given queue vector + * @vsi: the VSI that contains queue vector + * @q_vector: queue vector + */ +static void +ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector) +{ + u16 reg_idx = q_vector->reg_idx; + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + struct ice_ring *ring; + + ice_cfg_itr(hw, q_vector); + + wr32(hw, GLINT_RATE(reg_idx), + ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); + + ice_for_each_ring(ring, q_vector->tx) + ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx, + q_vector->tx.itr_idx); + + ice_for_each_ring(ring, q_vector->rx) + ice_cfg_rxq_interrupt(vsi, ring->reg_idx, reg_idx, + q_vector->rx.itr_idx); + + ice_flush(hw); +} + +/** + * ice_qvec_ena_irq - Enable IRQ for given queue vector + * @vsi: the VSI that contains queue vector + * @q_vector: queue vector + */ +static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector) +{ + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + + ice_irq_dynamic_ena(hw, vsi, q_vector); + + ice_flush(hw); +} + +/** + * ice_qp_dis - Disables a queue pair + * @vsi: VSI of interest + * @q_idx: ring index in array + * + * Returns 0 on success, negative on failure. + */ +static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) +{ + struct ice_txq_meta txq_meta = { }; + struct ice_ring *tx_ring, *rx_ring; + struct ice_q_vector *q_vector; + int timeout = 50; + int err; + + if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq) + return -EINVAL; + + tx_ring = vsi->tx_rings[q_idx]; + rx_ring = vsi->rx_rings[q_idx]; + q_vector = rx_ring->q_vector; + + while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state)) { + timeout--; + if (!timeout) + return -EBUSY; + usleep_range(1000, 2000); + } + netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); + + ice_qvec_dis_irq(vsi, rx_ring, q_vector); + + ice_fill_txq_meta(vsi, tx_ring, &txq_meta); + err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta); + if (err) + return err; + if (ice_is_xdp_ena_vsi(vsi)) { + struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx]; + + memset(&txq_meta, 0, sizeof(txq_meta)); + ice_fill_txq_meta(vsi, xdp_ring, &txq_meta); + err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring, + &txq_meta); + if (err) + return err; + } + err = ice_vsi_ctrl_rx_ring(vsi, false, q_idx); + if (err) + return err; + + ice_qvec_toggle_napi(vsi, q_vector, false); + ice_qp_clean_rings(vsi, q_idx); + ice_qp_reset_stats(vsi, q_idx); + + return 0; +} + +/** + * ice_qp_ena - Enables a queue pair + * @vsi: VSI of interest + * @q_idx: ring index in array + * + * Returns 0 on success, negative on failure. + */ +static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) +{ + struct ice_aqc_add_tx_qgrp *qg_buf; + struct ice_ring *tx_ring, *rx_ring; + struct ice_q_vector *q_vector; + int err; + + if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq) + return -EINVAL; + + qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL); + if (!qg_buf) + return -ENOMEM; + + qg_buf->num_txqs = 1; + + tx_ring = vsi->tx_rings[q_idx]; + rx_ring = vsi->rx_rings[q_idx]; + q_vector = rx_ring->q_vector; + + err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf); + if (err) + goto free_buf; + + if (ice_is_xdp_ena_vsi(vsi)) { + struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx]; + + memset(qg_buf, 0, sizeof(*qg_buf)); + qg_buf->num_txqs = 1; + err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf); + if (err) + goto free_buf; + ice_set_ring_xdp(xdp_ring); + xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring); + } + + err = ice_setup_rx_ctx(rx_ring); + if (err) + goto free_buf; + + ice_qvec_cfg_msix(vsi, q_vector); + + err = ice_vsi_ctrl_rx_ring(vsi, true, q_idx); + if (err) + goto free_buf; + + clear_bit(__ICE_CFG_BUSY, vsi->state); + ice_qvec_toggle_napi(vsi, q_vector, true); + ice_qvec_ena_irq(vsi, q_vector); + + netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); +free_buf: + kfree(qg_buf); + return err; +} + +/** + * ice_xsk_alloc_umems - allocate a UMEM region for an XDP socket + * @vsi: VSI to allocate the UMEM on + * + * Returns 0 on success, negative on error + */ +static int ice_xsk_alloc_umems(struct ice_vsi *vsi) +{ + if (vsi->xsk_umems) + return 0; + + vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems), + GFP_KERNEL); + + if (!vsi->xsk_umems) { + vsi->num_xsk_umems = 0; + return -ENOMEM; + } + + return 0; +} + +/** + * ice_xsk_add_umem - add a UMEM region for XDP sockets + * @vsi: VSI to which the UMEM will be added + * @umem: pointer to a requested UMEM region + * @qid: queue ID + * + * Returns 0 on success, negative on error + */ +static int ice_xsk_add_umem(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid) +{ + int err; + + err = ice_xsk_alloc_umems(vsi); + if (err) + return err; + + vsi->xsk_umems[qid] = umem; + vsi->num_xsk_umems_used++; + + return 0; +} + +/** + * ice_xsk_remove_umem - Remove an UMEM for a certain ring/qid + * @vsi: VSI from which the VSI will be removed + * @qid: Ring/qid associated with the UMEM + */ +static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid) +{ + vsi->xsk_umems[qid] = NULL; + vsi->num_xsk_umems_used--; + + if (vsi->num_xsk_umems_used == 0) { + kfree(vsi->xsk_umems); + vsi->xsk_umems = NULL; + vsi->num_xsk_umems = 0; + } +} + +/** + * ice_xsk_umem_dma_map - DMA map UMEM region for XDP sockets + * @vsi: VSI to map the UMEM region + * @umem: UMEM to map + * + * Returns 0 on success, negative on error + */ +static int ice_xsk_umem_dma_map(struct ice_vsi *vsi, struct xdp_umem *umem) +{ + struct ice_pf *pf = vsi->back; + struct device *dev; + unsigned int i; + + dev = &pf->pdev->dev; + for (i = 0; i < umem->npgs; i++) { + dma_addr_t dma = dma_map_page_attrs(dev, umem->pgs[i], 0, + PAGE_SIZE, + DMA_BIDIRECTIONAL, + ICE_RX_DMA_ATTR); + if (dma_mapping_error(dev, dma)) { + dev_dbg(dev, + "XSK UMEM DMA mapping error on page num %d", i); + goto out_unmap; + } + + umem->pages[i].dma = dma; + } + + return 0; + +out_unmap: + for (; i > 0; i--) { + dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR); + umem->pages[i].dma = 0; + } + + return -EFAULT; +} + +/** + * ice_xsk_umem_dma_unmap - DMA unmap UMEM region for XDP sockets + * @vsi: VSI from which the UMEM will be unmapped + * @umem: UMEM to unmap + */ +static void ice_xsk_umem_dma_unmap(struct ice_vsi *vsi, struct xdp_umem *umem) +{ + struct ice_pf *pf = vsi->back; + struct device *dev; + unsigned int i; + + dev = &pf->pdev->dev; + for (i = 0; i < umem->npgs; i++) { + dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR); + + umem->pages[i].dma = 0; + } +} + +/** + * ice_xsk_umem_disable - disable a UMEM region + * @vsi: Current VSI + * @qid: queue ID + * + * Returns 0 on success, negative on failure + */ +static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid) +{ + if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems || + !vsi->xsk_umems[qid]) + return -EINVAL; + + ice_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]); + ice_xsk_remove_umem(vsi, qid); + + return 0; +} + +/** + * ice_xsk_umem_enable - enable a UMEM region + * @vsi: Current VSI + * @umem: pointer to a requested UMEM region + * @qid: queue ID + * + * Returns 0 on success, negative on failure + */ +static int +ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid) +{ + struct xdp_umem_fq_reuse *reuseq; + int err; + + if (vsi->type != ICE_VSI_PF) + return -EINVAL; + + vsi->num_xsk_umems = min_t(u16, vsi->num_rxq, vsi->num_txq); + if (qid >= vsi->num_xsk_umems) + return -EINVAL; + + if (vsi->xsk_umems && vsi->xsk_umems[qid]) + return -EBUSY; + + reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count); + if (!reuseq) + return -ENOMEM; + + xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq)); + + err = ice_xsk_umem_dma_map(vsi, umem); + if (err) + return err; + + err = ice_xsk_add_umem(vsi, umem, qid); + if (err) + return err; + + return 0; +} + +/** + * ice_xsk_umem_setup - enable/disable a UMEM region depending on its state + * @vsi: Current VSI + * @umem: UMEM to enable/associate to a ring, NULL to disable + * @qid: queue ID + * + * Returns 0 on success, negative on failure + */ +int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid) +{ + bool if_running, umem_present = !!umem; + int ret = 0, umem_failure = 0; + + if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi); + + if (if_running) { + ret = ice_qp_dis(vsi, qid); + if (ret) { + netdev_err(vsi->netdev, "ice_qp_dis error = %d", ret); + goto xsk_umem_if_up; + } + } + + umem_failure = umem_present ? ice_xsk_umem_enable(vsi, umem, qid) : + ice_xsk_umem_disable(vsi, qid); + +xsk_umem_if_up: + if (if_running) { + ret = ice_qp_ena(vsi, qid); + if (!ret && umem_present) + napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi); + else if (ret) + netdev_err(vsi->netdev, "ice_qp_ena error = %d", ret); + } + + if (umem_failure) { + netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d", + umem_present ? "en" : "dis", umem_failure); + return umem_failure; + } + + return ret; +} + +/** + * ice_zca_free - Callback for MEM_TYPE_ZERO_COPY allocations + * @zca: zero-cpoy allocator + * @handle: Buffer handle + */ +void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle) +{ + struct ice_rx_buf *rx_buf; + struct ice_ring *rx_ring; + struct xdp_umem *umem; + u64 hr, mask; + u16 nta; + + rx_ring = container_of(zca, struct ice_ring, zca); + umem = rx_ring->xsk_umem; + hr = umem->headroom + XDP_PACKET_HEADROOM; + + mask = umem->chunk_mask; + + nta = rx_ring->next_to_alloc; + rx_buf = &rx_ring->rx_buf[nta]; + + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + handle &= mask; + + rx_buf->dma = xdp_umem_get_dma(umem, handle); + rx_buf->dma += hr; + + rx_buf->addr = xdp_umem_get_data(umem, handle); + rx_buf->addr += hr; + + rx_buf->handle = (u64)handle + umem->headroom; +} + +/** + * ice_alloc_buf_fast_zc - Retrieve buffer address from XDP umem + * @rx_ring: ring with an xdp_umem bound to it + * @rx_buf: buffer to which xsk page address will be assigned + * + * This function allocates an Rx buffer in the hot path. + * The buffer can come from fill queue or recycle queue. + * + * Returns true if an assignment was successful, false if not. + */ +static __always_inline bool +ice_alloc_buf_fast_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) +{ + struct xdp_umem *umem = rx_ring->xsk_umem; + void *addr = rx_buf->addr; + u64 handle, hr; + + if (addr) { + rx_ring->rx_stats.page_reuse_count++; + return true; + } + + if (!xsk_umem_peek_addr(umem, &handle)) { + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + hr = umem->headroom + XDP_PACKET_HEADROOM; + + rx_buf->dma = xdp_umem_get_dma(umem, handle); + rx_buf->dma += hr; + + rx_buf->addr = xdp_umem_get_data(umem, handle); + rx_buf->addr += hr; + + rx_buf->handle = handle + umem->headroom; + + xsk_umem_discard_addr(umem); + return true; +} + +/** + * ice_alloc_buf_slow_zc - Retrieve buffer address from XDP umem + * @rx_ring: ring with an xdp_umem bound to it + * @rx_buf: buffer to which xsk page address will be assigned + * + * This function allocates an Rx buffer in the slow path. + * The buffer can come from fill queue or recycle queue. + * + * Returns true if an assignment was successful, false if not. + */ +static __always_inline bool +ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) +{ + struct xdp_umem *umem = rx_ring->xsk_umem; + u64 handle, headroom; + + if (!xsk_umem_peek_addr_rq(umem, &handle)) { + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + handle &= umem->chunk_mask; + headroom = umem->headroom + XDP_PACKET_HEADROOM; + + rx_buf->dma = xdp_umem_get_dma(umem, handle); + rx_buf->dma += headroom; + + rx_buf->addr = xdp_umem_get_data(umem, handle); + rx_buf->addr += headroom; + + rx_buf->handle = handle + umem->headroom; + + xsk_umem_discard_addr_rq(umem); + return true; +} + +/** + * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers + * @rx_ring: Rx ring + * @count: The number of buffers to allocate + * @alloc: the function pointer to call for allocation + * + * This function allocates a number of Rx buffers from the fill ring + * or the internal recycle mechanism and places them on the Rx ring. + * + * Returns false if all allocations were successful, true if any fail. + */ +static bool +ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count, + bool alloc(struct ice_ring *, struct ice_rx_buf *)) +{ + union ice_32b_rx_flex_desc *rx_desc; + u16 ntu = rx_ring->next_to_use; + struct ice_rx_buf *rx_buf; + bool ret = false; + + if (!count) + return false; + + rx_desc = ICE_RX_DESC(rx_ring, ntu); + rx_buf = &rx_ring->rx_buf[ntu]; + + do { + if (!alloc(rx_ring, rx_buf)) { + ret = true; + break; + } + + dma_sync_single_range_for_device(rx_ring->dev, rx_buf->dma, 0, + rx_ring->rx_buf_len, + DMA_BIDIRECTIONAL); + + rx_desc->read.pkt_addr = cpu_to_le64(rx_buf->dma); + rx_desc->wb.status_error0 = 0; + + rx_desc++; + rx_buf++; + ntu++; + + if (unlikely(ntu == rx_ring->count)) { + rx_desc = ICE_RX_DESC(rx_ring, 0); + rx_buf = rx_ring->rx_buf; + ntu = 0; + } + } while (--count); + + if (rx_ring->next_to_use != ntu) + ice_release_rx_desc(rx_ring, ntu); + + return ret; +} + +/** + * ice_alloc_rx_bufs_fast_zc - allocate zero copy bufs in the hot path + * @rx_ring: Rx ring + * @count: number of bufs to allocate + * + * Returns false on success, true on failure. + */ +static bool ice_alloc_rx_bufs_fast_zc(struct ice_ring *rx_ring, u16 count) +{ + return ice_alloc_rx_bufs_zc(rx_ring, count, + ice_alloc_buf_fast_zc); +} + +/** + * ice_alloc_rx_bufs_slow_zc - allocate zero copy bufs in the slow path + * @rx_ring: Rx ring + * @count: number of bufs to allocate + * + * Returns false on success, true on failure. + */ +bool ice_alloc_rx_bufs_slow_zc(struct ice_ring *rx_ring, u16 count) +{ + return ice_alloc_rx_bufs_zc(rx_ring, count, + ice_alloc_buf_slow_zc); +} + +/** + * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring + * @rx_ring: Rx ring + */ +static void ice_bump_ntc(struct ice_ring *rx_ring) +{ + int ntc = rx_ring->next_to_clean + 1; + + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + prefetch(ICE_RX_DESC(rx_ring, ntc)); +} + +/** + * ice_get_rx_buf_zc - Fetch the current Rx buffer + * @rx_ring: Rx ring + * @size: size of a buffer + * + * This function returns the current, received Rx buffer and does + * DMA synchronization. + * + * Returns a pointer to the received Rx buffer. + */ +static struct ice_rx_buf *ice_get_rx_buf_zc(struct ice_ring *rx_ring, int size) +{ + struct ice_rx_buf *rx_buf; + + rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; + + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 0, + size, DMA_BIDIRECTIONAL); + + return rx_buf; +} + +/** + * ice_reuse_rx_buf_zc - reuse an Rx buffer + * @rx_ring: Rx ring + * @old_buf: The buffer to recycle + * + * This function recycles a finished Rx buffer, and places it on the recycle + * queue (next_to_alloc). + */ +static void +ice_reuse_rx_buf_zc(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) +{ + unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask; + u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; + u16 nta = rx_ring->next_to_alloc; + struct ice_rx_buf *new_buf; + + new_buf = &rx_ring->rx_buf[nta++]; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + new_buf->dma = old_buf->dma & mask; + new_buf->dma += hr; + + new_buf->addr = (void *)((unsigned long)old_buf->addr & mask); + new_buf->addr += hr; + + new_buf->handle = old_buf->handle & mask; + new_buf->handle += rx_ring->xsk_umem->headroom; + + old_buf->addr = NULL; +} + +/** + * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer + * @rx_ring: Rx ring + * @rx_buf: zero-copy Rx buffer + * @xdp: XDP buffer + * + * This function allocates a new skb from a zero-copy Rx buffer. + * + * Returns the skb on success, NULL on failure. + */ +static struct sk_buff * +ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, + struct xdp_buff *xdp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int datasize = xdp->data_end - xdp->data; + unsigned int datasize_hard = xdp->data_end - + xdp->data_hard_start; + struct sk_buff *skb; + + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + memcpy(__skb_put(skb, datasize), xdp->data, datasize); + if (metasize) + skb_metadata_set(skb, metasize); + + ice_reuse_rx_buf_zc(rx_ring, rx_buf); + + return skb; +} + +/** + * ice_run_xdp_zc - Executes an XDP program in zero-copy path + * @rx_ring: Rx ring + * @xdp: xdp_buff used as input to the XDP program + * + * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} + */ +static int +ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp) +{ + int err, result = ICE_XDP_PASS; + struct bpf_prog *xdp_prog; + struct ice_ring *xdp_ring; + u32 act; + + rcu_read_lock(); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + if (!xdp_prog) { + rcu_read_unlock(); + return ICE_XDP_PASS; + } + + act = bpf_prog_run_xdp(xdp_prog, xdp); + xdp->handle += xdp->data - xdp->data_hard_start; + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index]; + result = ice_xmit_xdp_buff(xdp, xdp_ring); + break; + case XDP_REDIRECT: + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); + result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED; + break; + default: + bpf_warn_invalid_xdp_action(act); + /* fallthrough -- not supported action */ + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + /* fallthrough -- handle aborts by dropping frame */ + case XDP_DROP: + result = ICE_XDP_CONSUMED; + break; + } + + rcu_read_unlock(); + return result; +} + +/** + * ice_clean_rx_irq_zc - consumes packets from the hardware ring + * @rx_ring: AF_XDP Rx ring + * @budget: NAPI budget + * + * Returns number of processed packets on success, remaining budget on failure. + */ +int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); + unsigned int xdp_xmit = 0; + struct xdp_buff xdp; + bool failure = 0; + + xdp.rxq = &rx_ring->xdp_rxq; + + while (likely(total_rx_packets < (unsigned int)budget)) { + union ice_32b_rx_flex_desc *rx_desc; + unsigned int size, xdp_res = 0; + struct ice_rx_buf *rx_buf; + struct sk_buff *skb; + u16 stat_err_bits; + u16 vlan_tag = 0; + u8 rx_ptype; + + if (cleaned_count >= ICE_RX_BUF_WRITE) { + failure |= ice_alloc_rx_bufs_fast_zc(rx_ring, + cleaned_count); + cleaned_count = 0; + } + + rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); + + stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); + if (!ice_test_staterr(rx_desc, stat_err_bits)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we have + * verified the descriptor has been written back. + */ + dma_rmb(); + + size = le16_to_cpu(rx_desc->wb.pkt_len) & + ICE_RX_FLX_DESC_PKT_LEN_M; + if (!size) + break; + + rx_buf = ice_get_rx_buf_zc(rx_ring, size); + if (!rx_buf->addr) + break; + + xdp.data = rx_buf->addr; + xdp.data_meta = xdp.data; + xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; + xdp.data_end = xdp.data + size; + xdp.handle = rx_buf->handle; + + xdp_res = ice_run_xdp_zc(rx_ring, &xdp); + if (xdp_res) { + if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { + xdp_xmit |= xdp_res; + rx_buf->addr = NULL; + } else { + ice_reuse_rx_buf_zc(rx_ring, rx_buf); + } + + total_rx_bytes += size; + total_rx_packets++; + cleaned_count++; + + ice_bump_ntc(rx_ring); + continue; + } + + /* XDP_PASS path */ + skb = ice_construct_skb_zc(rx_ring, rx_buf, &xdp); + if (!skb) { + rx_ring->rx_stats.alloc_buf_failed++; + break; + } + + cleaned_count++; + ice_bump_ntc(rx_ring); + + if (eth_skb_pad(skb)) { + skb = NULL; + continue; + } + + total_rx_bytes += skb->len; + total_rx_packets++; + + stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); + if (ice_test_staterr(rx_desc, stat_err_bits)) + vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); + + rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & + ICE_RX_FLEX_DESC_PTYPE_M; + + ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); + ice_receive_skb(rx_ring, skb, vlan_tag); + } + + ice_finalize_xdp_rx(rx_ring, xdp_xmit); + ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); + + return failure ? budget : (int)total_rx_packets; +} + +/** + * ice_xmit_zc - Completes AF_XDP entries, and cleans XDP entries + * @xdp_ring: XDP Tx ring + * @budget: max number of frames to xmit + * + * Returns true if cleanup/transmission is done. + */ +static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget) +{ + struct ice_tx_desc *tx_desc = NULL; + bool work_done = true; + struct xdp_desc desc; + dma_addr_t dma; + + while (likely(budget-- > 0)) { + struct ice_tx_buf *tx_buf; + + if (unlikely(!ICE_DESC_UNUSED(xdp_ring))) { + xdp_ring->tx_stats.tx_busy++; + work_done = false; + break; + } + + tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use]; + + if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc)) + break; + + dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr); + + dma_sync_single_for_device(xdp_ring->dev, dma, desc.len, + DMA_BIDIRECTIONAL); + + tx_buf->bytecount = desc.len; + + tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use); + tx_desc->buf_addr = cpu_to_le64(dma); + tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD, + 0, desc.len, 0); + + xdp_ring->next_to_use++; + if (xdp_ring->next_to_use == xdp_ring->count) + xdp_ring->next_to_use = 0; + } + + if (tx_desc) { + ice_xdp_ring_update_tail(xdp_ring); + xsk_umem_consume_tx_done(xdp_ring->xsk_umem); + } + + return budget > 0 && work_done; +} + +/** + * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer + * @xdp_ring: XDP Tx ring + * @tx_buf: Tx buffer to clean + */ +static void +ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf) +{ + xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf); + dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); +} + +/** + * ice_clean_tx_irq_zc - Completes AF_XDP entries, and cleans XDP entries + * @xdp_ring: XDP Tx ring + * @budget: NAPI budget + * + * Returns true if cleanup/tranmission is done. + */ +bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget) +{ + int total_packets = 0, total_bytes = 0; + s16 ntc = xdp_ring->next_to_clean; + struct ice_tx_desc *tx_desc; + struct ice_tx_buf *tx_buf; + bool xmit_done = true; + u32 xsk_frames = 0; + + tx_desc = ICE_TX_DESC(xdp_ring, ntc); + tx_buf = &xdp_ring->tx_buf[ntc]; + ntc -= xdp_ring->count; + + do { + if (!(tx_desc->cmd_type_offset_bsz & + cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) + break; + + total_bytes += tx_buf->bytecount; + total_packets++; + + if (tx_buf->raw_buf) { + ice_clean_xdp_tx_buf(xdp_ring, tx_buf); + tx_buf->raw_buf = NULL; + } else { + xsk_frames++; + } + + tx_desc->cmd_type_offset_bsz = 0; + tx_buf++; + tx_desc++; + ntc++; + + if (unlikely(!ntc)) { + ntc -= xdp_ring->count; + tx_buf = xdp_ring->tx_buf; + tx_desc = ICE_TX_DESC(xdp_ring, 0); + } + + prefetch(tx_desc); + + } while (likely(--budget)); + + ntc += xdp_ring->count; + xdp_ring->next_to_clean = ntc; + + if (xsk_frames) + xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames); + + ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes); + xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK); + + return budget > 0 && xmit_done; +} + +/** + * ice_xsk_wakeup - Implements ndo_xsk_wakeup + * @netdev: net_device + * @queue_id: queue to wake up + * @flags: ignored in our case, since we have Rx and Tx in the same NAPI + * + * Returns negative on error, zero otherwise. + */ +int +ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, + u32 __always_unused flags) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_q_vector *q_vector; + struct ice_vsi *vsi = np->vsi; + struct ice_ring *ring; + + if (test_bit(__ICE_DOWN, vsi->state)) + return -ENETDOWN; + + if (!ice_is_xdp_ena_vsi(vsi)) + return -ENXIO; + + if (queue_id >= vsi->num_txq) + return -ENXIO; + + if (!vsi->xdp_rings[queue_id]->xsk_umem) + return -ENXIO; + + ring = vsi->xdp_rings[queue_id]; + + /* The idea here is that if NAPI is running, mark a miss, so + * it will run again. If not, trigger an interrupt and + * schedule the NAPI from interrupt context. If NAPI would be + * scheduled here, the interrupt affinity would not be + * honored. + */ + q_vector = ring->q_vector; + if (!napi_if_scheduled_mark_missed(&q_vector->napi)) + ice_trigger_sw_intr(&vsi->back->hw, q_vector); + + return 0; +} + +/** + * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP UMEM attached + * @vsi: VSI to be checked + * + * Returns true if any of the Rx rings has an AF_XDP UMEM attached + */ +bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi) +{ + int i; + + if (!vsi->xsk_umems) + return false; + + for (i = 0; i < vsi->num_xsk_umems; i++) { + if (vsi->xsk_umems[i]) + return true; + } + + return false; +} + +/** + * ice_xsk_clean_rx_ring - clean UMEM queues connected to a given Rx ring + * @rx_ring: ring to be cleaned + */ +void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring) +{ + u16 i; + + for (i = 0; i < rx_ring->count; i++) { + struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; + + if (!rx_buf->addr) + continue; + + xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_buf->handle); + rx_buf->addr = NULL; + } +} + +/** + * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its UMEM queues + * @xdp_ring: XDP_Tx ring + */ +void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring) +{ + u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use; + u32 xsk_frames = 0; + + while (ntc != ntu) { + struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc]; + + if (tx_buf->raw_buf) + ice_clean_xdp_tx_buf(xdp_ring, tx_buf); + else + xsk_frames++; + + tx_buf->raw_buf = NULL; + + ntc++; + if (ntc >= xdp_ring->count) + ntc = 0; + } + + if (xsk_frames) + xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames); +} diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h new file mode 100644 index 000000000000..3479e1de98fe --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_xsk.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019, Intel Corporation. */ + +#ifndef _ICE_XSK_H_ +#define _ICE_XSK_H_ +#include "ice_txrx.h" +#include "ice.h" + +struct ice_vsi; + +#ifdef CONFIG_XDP_SOCKETS +int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid); +void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle); +int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget); +bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget); +int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags); +bool ice_alloc_rx_bufs_slow_zc(struct ice_ring *rx_ring, u16 count); +bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi); +void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring); +void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring); +#else +static inline int +ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi, + struct xdp_umem __always_unused *umem, + u16 __always_unused qid) +{ + return -ENOTSUPP; +} + +static inline void +ice_zca_free(struct zero_copy_allocator __always_unused *zca, + unsigned long __always_unused handle) +{ +} + +static inline int +ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring, + int __always_unused budget) +{ + return 0; +} + +static inline bool +ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring, + int __always_unused budget) +{ + return false; +} + +static inline bool +ice_alloc_rx_bufs_slow_zc(struct ice_ring __always_unused *rx_ring, + u16 __always_unused count) +{ + return false; +} + +static inline bool ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused *vsi) +{ + return false; +} + +static inline int +ice_xsk_wakeup(struct net_device __always_unused *netdev, + u32 __always_unused queue_id, u32 __always_unused flags) +{ + return -ENOTSUPP; +} + +#define ice_xsk_clean_rx_ring(rx_ring) do {} while (0) +#define ice_xsk_clean_xdp_ring(xdp_ring) do {} while (0) +#endif /* CONFIG_XDP_SOCKETS */ +#endif /* !_ICE_XSK_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 48a40e4132f9..98346eb064d5 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -5677,8 +5677,8 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, * should have been handled by the upper layers. */ if (tx_ring->launchtime_enable) { - ts = ns_to_timespec64(first->skb->tstamp); - first->skb->tstamp = 0; + ts = ktime_to_timespec64(first->skb->tstamp); + first->skb->tstamp = ktime_set(0, 0); context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); } else { context_desc->seqnum_seed = 0; @@ -6236,7 +6236,6 @@ static void igb_get_stats64(struct net_device *netdev, static int igb_change_mtu(struct net_device *netdev, int new_mtu) { struct igb_adapter *adapter = netdev_priv(netdev); - struct pci_dev *pdev = adapter->pdev; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; /* adjust max frame to be at least the size of a standard frame */ @@ -6252,8 +6251,8 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) if (netif_running(netdev)) igb_down(adapter); - dev_info(&pdev->dev, "changing MTU from %d to %d\n", - netdev->mtu, new_mtu); + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (netif_running(netdev)) diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index fd3071f55bd3..c39e921757ba 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -521,6 +521,19 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, switch (rq->type) { case PTP_CLK_REQ_EXTTS: + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + /* Reject requests failing to enable both edges. */ + if ((rq->extts.flags & PTP_STRICT_FLAGS) && + (rq->extts.flags & PTP_ENABLE_FEATURE) && + (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES) + return -EOPNOTSUPP; + if (on) { pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS, rq->extts.index); @@ -551,6 +564,10 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, return 0; case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + if (on) { pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT, rq->perout.index); diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 0f2b68f4bb0f..6003dc3ff5fd 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2437,8 +2437,8 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; - dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", - netdev->mtu, new_mtu); + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (netif_running(netdev)) diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 6105c6d1f3c9..9700527dd797 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -862,8 +862,8 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, * should have been handled by the upper layers. */ if (tx_ring->launchtime_enable) { - ts = ns_to_timespec64(first->skb->tstamp); - first->skb->tstamp = 0; + ts = ktime_to_timespec64(first->skb->tstamp); + first->skb->tstamp = ktime_set(0, 0); context_desc->launch_time = cpu_to_le32(ts.tv_nsec / 32); } else { context_desc->launch_time = 0; @@ -2272,7 +2272,6 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu) { int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; struct igc_adapter *adapter = netdev_priv(netdev); - struct pci_dev *pdev = adapter->pdev; /* adjust max frame to be at least the size of a standard frame */ if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) @@ -2287,8 +2286,8 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu) if (netif_running(netdev)) igc_down(adapter); - dev_info(&pdev->dev, "changing MTU from %d to %d\n", - netdev->mtu, new_mtu); + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (netif_running(netdev)) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index cc3196ae5aea..fd9f5d41b594 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -832,9 +832,9 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int xdp_count, int xdp_idx, int rxr_count, int rxr_idx) { + int node = dev_to_node(&adapter->pdev->dev); struct ixgbe_q_vector *q_vector; struct ixgbe_ring *ring; - int node = NUMA_NO_NODE; int cpu = -1; int ring_count; u8 tcs = adapter->hw_tcs; @@ -845,10 +845,8 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; if (rss_i > 1 && adapter->atr_sample_rate) { - if (cpu_online(v_idx)) { - cpu = v_idx; - node = cpu_to_node(cpu); - } + cpu = cpumask_local_spread(v_idx, node); + node = cpu_to_node(cpu); } } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index b22baea9d39b..25c097cd8100 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6725,7 +6725,8 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) (new_mtu > ETH_DATA_LEN)) e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); - e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; @@ -8649,7 +8650,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && adapter->ptp_clock) { - if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, + if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && + !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IXGBE_TX_FLAGS_TSTAMP; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index 100ac89b345d..d6feaacfbf89 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -622,8 +622,6 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) if (tx_desc) { ixgbe_xdp_ring_update_tail(xdp_ring); xsk_umem_consume_tx_done(xdp_ring->xsk_umem); - if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) - xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem); } return !!budget && work_done; @@ -691,12 +689,8 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, if (xsk_frames) xsk_umem_complete_tx(umem, xsk_frames); - if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) { - if (tx_ring->next_to_clean == tx_ring->next_to_use) - xsk_set_tx_need_wakeup(tx_ring->xsk_umem); - else - xsk_clear_tx_need_wakeup(tx_ring->xsk_umem); - } + if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) + xsk_set_tx_need_wakeup(tx_ring->xsk_umem); return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); } diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 82ea55ae5053..d5b644131cff 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2959,15 +2959,16 @@ static void set_params(struct mv643xx_eth_private *mp, static int get_phy_mode(struct mv643xx_eth_private *mp) { struct device *dev = mp->dev->dev.parent; - int iface = -1; + phy_interface_t iface; + int err; if (dev->of_node) - iface = of_get_phy_mode(dev->of_node); + err = of_get_phy_mode(dev->of_node, &iface); /* Historical default if unspecified. We could also read/write * the interface state in the PSC1 */ - if (iface < 0) + if (!dev->of_node || err) iface = PHY_INTERFACE_MODE_GMII; return iface; } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 8f9df6efda61..a06d109c9e80 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1846,7 +1846,6 @@ static int mvneta_rx_refill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, gfp_t gfp_mask) { - enum dma_data_direction dma_dir; dma_addr_t phys_addr; struct page *page; @@ -1856,9 +1855,6 @@ static int mvneta_rx_refill(struct mvneta_port *pp, return -ENOMEM; phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction; - dma_dir = page_pool_get_dma_dir(rxq->page_pool); - dma_sync_single_for_device(pp->dev->dev.parent, phys_addr, - MVNETA_MAX_RX_BUF_SIZE, dma_dir); mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq); return 0; @@ -2097,7 +2093,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, err = xdp_do_redirect(pp->dev, xdp, prog); if (err) { ret = MVNETA_XDP_DROPPED; - xdp_return_buff(xdp); + __page_pool_put_page(rxq->page_pool, + virt_to_head_page(xdp->data), + xdp->data_end - xdp->data_hard_start, + true); } else { ret = MVNETA_XDP_REDIR; } @@ -2106,7 +2105,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, case XDP_TX: ret = mvneta_xdp_xmit_back(pp, xdp); if (ret != MVNETA_XDP_TX) - xdp_return_buff(xdp); + __page_pool_put_page(rxq->page_pool, + virt_to_head_page(xdp->data), + xdp->data_end - xdp->data_hard_start, + true); break; default: bpf_warn_invalid_xdp_action(act); @@ -2115,8 +2117,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, trace_xdp_exception(pp->dev, prog, act); /* fall through */ case XDP_DROP: - page_pool_recycle_direct(rxq->page_pool, - virt_to_head_page(xdp->data)); + __page_pool_put_page(rxq->page_pool, + virt_to_head_page(xdp->data), + xdp->data_end - xdp->data_hard_start, + true); ret = MVNETA_XDP_DROPPED; break; } @@ -2154,7 +2158,7 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp, prefetch(data); xdp->data_hard_start = data; - xdp->data = data + MVNETA_SKB_HEADROOM + MVNETA_MH_SIZE; + xdp->data = data + pp->rx_offset_correction + MVNETA_MH_SIZE; xdp->data_end = xdp->data + data_len; xdp_set_data_meta_invalid(xdp); @@ -2219,7 +2223,7 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, /* refill descriptor with new buffer later */ skb_add_rx_frag(rxq->skb, skb_shinfo(rxq->skb)->nr_frags, - page, MVNETA_SKB_HEADROOM, data_len, + page, pp->rx_offset_correction, data_len, PAGE_SIZE); } page_pool_release_page(rxq->page_pool, page); @@ -3065,11 +3069,13 @@ static int mvneta_create_page_pool(struct mvneta_port *pp, struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog); struct page_pool_params pp_params = { .order = 0, - .flags = PP_FLAG_DMA_MAP, + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, .pool_size = size, .nid = cpu_to_node(0), .dev = pp->dev->dev.parent, .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, + .offset = pp->rx_offset_correction, + .max_len = MVNETA_MAX_RX_BUF_SIZE, }; int err; @@ -4797,9 +4803,9 @@ static int mvneta_probe(struct platform_device *pdev) struct phy *comphy; const char *dt_mac_addr; char hw_mac_addr[ETH_ALEN]; + phy_interface_t phy_mode; const char *mac_from; int tx_csum_limit; - int phy_mode; int err; int cpu; @@ -4812,10 +4818,9 @@ static int mvneta_probe(struct platform_device *pdev) if (dev->irq == 0) return -EINVAL; - phy_mode = of_get_phy_mode(dn); - if (phy_mode < 0) { + err = of_get_phy_mode(dn, &phy_mode); + if (err) { dev_err(&pdev->dev, "incorrect phy-mode\n"); - err = -EINVAL; goto err_free_irq; } diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig index 711ada7139d3..fb34fbd62088 100644 --- a/drivers/net/ethernet/marvell/octeontx2/Kconfig +++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig @@ -16,3 +16,12 @@ config OCTEONTX2_AF Unit's admin function manager which manages all RVU HW resources and provides a medium to other PF/VFs to configure HW. Should be enabled for other RVU device drivers to work. + +config NDC_DIS_DYNAMIC_CACHING + bool "Disable caching of dynamic entries in NDC" + depends on OCTEONTX2_AF + default n + ---help--- + This config option disables caching of dynamic entries such as NIX SQEs + , NPA stack pages etc in NDC. Also locks down NIX SQ/CQ/RQ/RSS and + NPA Aura/Pool contexts. diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile index 06329acf9c2c..1b25948c662b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -8,4 +8,4 @@ obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o octeontx2_mbox-y := mbox.o octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ - rvu_reg.o rvu_npc.o + rvu_reg.o rvu_npc.o rvu_debugfs.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 6d55e3d0b7ea..5ca788691911 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -138,6 +138,16 @@ void *cgx_get_pdata(int cgx_id) } EXPORT_SYMBOL(cgx_get_pdata); +int cgx_get_cgxid(void *cgxd) +{ + struct cgx *cgx = cgxd; + + if (!cgx) + return -EINVAL; + + return cgx->cgx_id; +} + /* Ensure the required lock for event queue(where asynchronous events are * posted) is acquired before calling this API. Else an asynchronous event(with * latest link status) can reach the destination before this function returns @@ -281,6 +291,35 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable) } EXPORT_SYMBOL(cgx_lmac_promisc_config); +/* Enable or disable forwarding received pause frames to Tx block */ +void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable) +{ + struct cgx *cgx = cgxd; + u64 cfg; + + if (!cgx) + return; + + if (enable) { + cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); + cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; + cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); + + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); + cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK; + cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); + } else { + cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); + cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; + cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); + + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); + cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK; + cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); + } +} +EXPORT_SYMBOL(cgx_lmac_enadis_rx_pause_fwding); + int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat) { struct cgx *cgx = cgxd; @@ -321,6 +360,27 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable) } EXPORT_SYMBOL(cgx_lmac_rx_tx_enable); +int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable) +{ + struct cgx *cgx = cgxd; + u64 cfg, last; + + if (!cgx || lmac_id >= cgx->lmac_count) + return -ENODEV; + + cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); + last = cfg; + if (enable) + cfg |= DATA_PKT_TX_EN; + else + cfg &= ~DATA_PKT_TX_EN; + + if (cfg != last) + cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg); + return !!(last & DATA_PKT_TX_EN); +} +EXPORT_SYMBOL(cgx_lmac_tx_enable); + /* CGX Firmware interface low level support */ static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 206dc5dc1df8..9343bf39cfac 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Marvell OcteonTx2 CGX driver +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 CGX driver * * Copyright (C) 2018 Marvell International Ltd. * @@ -56,6 +56,11 @@ #define CGXX_GMP_PCS_MRX_CTL 0x30000 #define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14) +#define CGXX_SMUX_RX_FRM_CTL 0x20020 +#define CGX_SMUX_RX_FRM_CTL_CTL_BCK BIT_ULL(3) +#define CGXX_GMP_GMI_RXX_FRM_CTL 0x38028 +#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3) + #define CGX_COMMAND_REG CGXX_SCRATCH1_REG #define CGX_EVENT_REG CGXX_SCRATCH0_REG #define CGX_CMD_TIMEOUT 2200 /* msecs */ @@ -63,6 +68,11 @@ #define CGX_NVEC 37 #define CGX_LMAC_FWI 0 +enum cgx_nix_stat_type { + NIX_STATS_RX, + NIX_STATS_TX, +}; + enum LMAC_TYPE { LMAC_MODE_SGMII = 0, LMAC_MODE_XAUI = 1, @@ -96,6 +106,7 @@ struct cgx_event_cb { extern struct pci_driver cgx_driver; int cgx_get_cgxcnt_max(void); +int cgx_get_cgxid(void *cgxd); int cgx_get_lmac_cnt(void *cgxd); void *cgx_get_pdata(int cgx_id); int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind); @@ -104,9 +115,11 @@ int cgx_lmac_evh_unregister(void *cgxd, int lmac_id); int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat); int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat); int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable); +int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable); int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr); u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id); void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable); +void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable); int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable); int cgx_get_link_info(void *cgxd, int lmac_id, struct cgx_link_user_info *linfo); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h index fb3ba4968a9b..473d9751601f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Marvell OcteonTx2 CGX driver +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 CGX driver * * Copyright (C) 2018 Marvell International Ltd. * diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index e332e82fc066..784207bae5f8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Marvell OcteonTx2 RVU Admin Function driver +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RVU Admin Function driver * * Copyright (C) 2018 Marvell International Ltd. * @@ -196,4 +196,20 @@ enum nix_scheduler { #define DEFAULT_RSS_CONTEXT_GROUP 0 #define MAX_RSS_INDIR_TBL_SIZE 256 /* 1 << Max adder bits */ +/* NDC info */ +enum ndc_idx_e { + NIX0_RX = 0x0, + NIX0_TX = 0x1, + NPA0_U = 0x2, +}; + +enum ndc_ctype_e { + CACHING = 0x0, + BYPASS = 0x1, +}; + +#define NDC_MAX_PORT 6 +#define NDC_READ_TRANS 0 +#define NDC_WRITE_TRANS 1 + #endif /* COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c index d6f9ed8ea966..387e33fa417a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c @@ -19,17 +19,20 @@ static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); void otx2_mbox_reset(struct otx2_mbox *mbox, int devid) { + void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); struct otx2_mbox_dev *mdev = &mbox->dev[devid]; struct mbox_hdr *tx_hdr, *rx_hdr; - tx_hdr = mdev->mbase + mbox->tx_start; - rx_hdr = mdev->mbase + mbox->rx_start; + tx_hdr = hw_mbase + mbox->tx_start; + rx_hdr = hw_mbase + mbox->rx_start; spin_lock(&mdev->mbox_lock); mdev->msg_size = 0; mdev->rsp_size = 0; tx_hdr->num_msgs = 0; + tx_hdr->msg_size = 0; rx_hdr->num_msgs = 0; + rx_hdr->msg_size = 0; spin_unlock(&mdev->mbox_lock); } EXPORT_SYMBOL(otx2_mbox_reset); @@ -133,16 +136,17 @@ EXPORT_SYMBOL(otx2_mbox_init); int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid) { + unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT); struct otx2_mbox_dev *mdev = &mbox->dev[devid]; - int timeout = 0, sleep = 1; + struct device *sender = &mbox->pdev->dev; - while (mdev->num_msgs != mdev->msgs_acked) { - msleep(sleep); - timeout += sleep; - if (timeout >= MBOX_RSP_TIMEOUT) - return -EIO; + while (!time_after(jiffies, timeout)) { + if (mdev->num_msgs == mdev->msgs_acked) + return 0; + usleep_range(800, 1000); } - return 0; + dev_dbg(sender, "timed out while waiting for rsp\n"); + return -EIO; } EXPORT_SYMBOL(otx2_mbox_wait_for_rsp); @@ -162,13 +166,25 @@ EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp); void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid) { + void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); struct otx2_mbox_dev *mdev = &mbox->dev[devid]; struct mbox_hdr *tx_hdr, *rx_hdr; - tx_hdr = mdev->mbase + mbox->tx_start; - rx_hdr = mdev->mbase + mbox->rx_start; + tx_hdr = hw_mbase + mbox->tx_start; + rx_hdr = hw_mbase + mbox->rx_start; + + /* If bounce buffer is implemented copy mbox messages from + * bounce buffer to hw mbox memory. + */ + if (mdev->mbase != hw_mbase) + memcpy(hw_mbase + mbox->tx_start + msgs_offset, + mdev->mbase + mbox->tx_start + msgs_offset, + mdev->msg_size); spin_lock(&mdev->mbox_lock); + + tx_hdr->msg_size = mdev->msg_size; + /* Reset header for next messages */ mdev->msg_size = 0; mdev->rsp_size = 0; @@ -215,7 +231,7 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size; /* Clear the whole msg region */ - memset(msghdr, 0, sizeof(*msghdr) + size); + memset(msghdr, 0, size); /* Init message header with reset values */ msghdr->ver = OTX2_MBOX_VERSION; mdev->msg_size += size; @@ -236,8 +252,10 @@ struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, struct otx2_mbox_dev *mdev = &mbox->dev[devid]; u16 msgs; + spin_lock(&mdev->mbox_lock); + if (mdev->num_msgs != mdev->msgs_acked) - return ERR_PTR(-ENODEV); + goto error; for (msgs = 0; msgs < mdev->msgs_acked; msgs++) { struct mbox_msghdr *pmsg = mdev->mbase + imsg; @@ -245,18 +263,55 @@ struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, if (msg == pmsg) { if (pmsg->id != prsp->id) - return ERR_PTR(-ENODEV); + goto error; + spin_unlock(&mdev->mbox_lock); return prsp; } - imsg = pmsg->next_msgoff; - irsp = prsp->next_msgoff; + imsg = mbox->tx_start + pmsg->next_msgoff; + irsp = mbox->rx_start + prsp->next_msgoff; } +error: + spin_unlock(&mdev->mbox_lock); return ERR_PTR(-ENODEV); } EXPORT_SYMBOL(otx2_mbox_get_rsp); +int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid) +{ + unsigned long ireq = mbox->tx_start + msgs_offset; + unsigned long irsp = mbox->rx_start + msgs_offset; + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + int rc = -ENODEV; + u16 msgs; + + spin_lock(&mdev->mbox_lock); + + if (mdev->num_msgs != mdev->msgs_acked) + goto exit; + + for (msgs = 0; msgs < mdev->msgs_acked; msgs++) { + struct mbox_msghdr *preq = mdev->mbase + ireq; + struct mbox_msghdr *prsp = mdev->mbase + irsp; + + if (preq->id != prsp->id) + goto exit; + if (prsp->rc) { + rc = prsp->rc; + goto exit; + } + + ireq = mbox->tx_start + preq->next_msgoff; + irsp = mbox->rx_start + prsp->next_msgoff; + } + rc = 0; +exit: + spin_unlock(&mdev->mbox_lock); + return rc; +} +EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs); + int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 76a4575d18ff..a589748f1240 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Marvell OcteonTx2 RVU Admin Function driver +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RVU Admin Function driver * * Copyright (C) 2018 Marvell International Ltd. * @@ -36,7 +36,7 @@ #define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull)) -#define MBOX_RSP_TIMEOUT 1000 /* in ms, Time to wait for mbox response */ +#define MBOX_RSP_TIMEOUT 2000 /* Time(ms) to wait for mbox response */ #define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */ @@ -75,6 +75,7 @@ struct otx2_mbox { /* Header which preceeds all mbox messages */ struct mbox_hdr { + u64 msg_size; /* Total msgs size embedded */ u16 num_msgs; /* No of msgs embedded */ }; @@ -103,6 +104,7 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, int size, int size_rsp); struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, struct mbox_msghdr *msg); +int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid); int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id); bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid); @@ -125,6 +127,7 @@ M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \ M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \ M(MSIX_OFFSET, 0x004, msix_offset, msg_req, msix_offset_rsp) \ M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \ +M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \ /* CGX mbox IDs (range 0x200 - 0x3FF) */ \ M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \ M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \ @@ -300,6 +303,12 @@ struct msix_offset_rsp { u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT]; }; +struct get_hw_cap_rsp { + struct mbox_msghdr hdr; + u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */ + u8 nix_shaping; /* Is shaping and coloring supported */ +}; + /* CGX mbox message formats */ struct cgx_stats_rsp { @@ -352,6 +361,7 @@ struct npa_lf_alloc_req { int node; int aura_sz; /* No of auras */ u32 nr_pools; /* No of pools */ + u64 way_mask; }; struct npa_lf_alloc_rsp { @@ -442,6 +452,7 @@ struct nix_lf_alloc_req { u16 npa_func; u16 sso_func; u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */ + u64 way_mask; }; struct nix_lf_alloc_rsp { @@ -512,6 +523,9 @@ struct nix_txsch_alloc_rsp { /* Scheduler queue list allocated at each level */ u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; + u8 aggr_level; /* Traffic aggregation scheduler level */ + u8 aggr_lvl_rr_prio; /* Aggregation lvl's RR_PRIO config */ + u8 link_cfg_lvl; /* LINKX_CFG CSRs mapped to TL3 or TL2's index ? */ }; struct nix_txsch_free_req { @@ -578,6 +592,18 @@ struct nix_rss_flowkey_cfg { #define NIX_FLOW_KEY_TYPE_TCP BIT(3) #define NIX_FLOW_KEY_TYPE_UDP BIT(4) #define NIX_FLOW_KEY_TYPE_SCTP BIT(5) +#define NIX_FLOW_KEY_TYPE_NVGRE BIT(6) +#define NIX_FLOW_KEY_TYPE_VXLAN BIT(7) +#define NIX_FLOW_KEY_TYPE_GENEVE BIT(8) +#define NIX_FLOW_KEY_TYPE_ETH_DMAC BIT(9) +#define NIX_FLOW_KEY_TYPE_IPV6_EXT BIT(10) +#define NIX_FLOW_KEY_TYPE_GTPU BIT(11) +#define NIX_FLOW_KEY_TYPE_INNR_IPV4 BIT(12) +#define NIX_FLOW_KEY_TYPE_INNR_IPV6 BIT(13) +#define NIX_FLOW_KEY_TYPE_INNR_TCP BIT(14) +#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15) +#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16) +#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17) u32 flowkey_cfg; /* Flowkey types selected */ u8 group; /* RSS context or group */ }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index 8d6d90fdfb73..3803af9231c6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Marvell OcteonTx2 RVU Admin Function driver +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RVU Admin Function driver * * Copyright (C) 2018 Marvell International Ltd. * @@ -27,26 +27,45 @@ enum NPC_LID_E { enum npc_kpu_la_ltype { NPC_LT_LA_8023 = 1, NPC_LT_LA_ETHER, + NPC_LT_LA_IH_NIX_ETHER, + NPC_LT_LA_IH_8_ETHER, + NPC_LT_LA_IH_4_ETHER, + NPC_LT_LA_IH_2_ETHER, + NPC_LT_LA_HIGIG2_ETHER, + NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_LT_LA_CUSTOM0 = 0xE, + NPC_LT_LA_CUSTOM1 = 0xF, }; enum npc_kpu_lb_ltype { NPC_LT_LB_ETAG = 1, NPC_LT_LB_CTAG, - NPC_LT_LB_STAG, + NPC_LT_LB_STAG_QINQ, NPC_LT_LB_BTAG, - NPC_LT_LB_QINQ, NPC_LT_LB_ITAG, + NPC_LT_LB_DSA, + NPC_LT_LB_DSA_VLAN, + NPC_LT_LB_EDSA, + NPC_LT_LB_EDSA_VLAN, + NPC_LT_LB_EXDSA, + NPC_LT_LB_EXDSA_VLAN, + NPC_LT_LB_CUSTOM0 = 0xE, + NPC_LT_LB_CUSTOM1 = 0xF, }; enum npc_kpu_lc_ltype { NPC_LT_LC_IP = 1, + NPC_LT_LC_IP_OPT, NPC_LT_LC_IP6, + NPC_LT_LC_IP6_EXT, NPC_LT_LC_ARP, NPC_LT_LC_RARP, NPC_LT_LC_MPLS, NPC_LT_LC_NSH, NPC_LT_LC_PTP, NPC_LT_LC_FCOE, + NPC_LT_LC_CUSTOM0 = 0xE, + NPC_LT_LC_CUSTOM1 = 0xF, }; /* Don't modify Ltypes upto SCTP, otherwise it will @@ -57,49 +76,67 @@ enum npc_kpu_ld_ltype { NPC_LT_LD_UDP, NPC_LT_LD_ICMP, NPC_LT_LD_SCTP, - NPC_LT_LD_IGMP, NPC_LT_LD_ICMP6, + NPC_LT_LD_IGMP = 8, NPC_LT_LD_ESP, NPC_LT_LD_AH, NPC_LT_LD_GRE, - NPC_LT_LD_GRE_MPLS, - NPC_LT_LD_GRE_NSH, - NPC_LT_LD_TU_MPLS, + NPC_LT_LD_NVGRE, + NPC_LT_LD_NSH, + NPC_LT_LD_TU_MPLS_IN_NSH, + NPC_LT_LD_TU_MPLS_IN_IP, + NPC_LT_LD_CUSTOM0 = 0xE, + NPC_LT_LD_CUSTOM1 = 0xF, }; enum npc_kpu_le_ltype { - NPC_LT_LE_TU_ETHER = 1, - NPC_LT_LE_TU_PPP, - NPC_LT_LE_TU_MPLS_IN_NSH, - NPC_LT_LE_TU_3RD_NSH, + NPC_LT_LE_VXLAN = 1, + NPC_LT_LE_GENEVE, + NPC_LT_LE_GTPU = 4, + NPC_LT_LE_VXLANGPE, + NPC_LT_LE_GTPC, + NPC_LT_LE_NSH, + NPC_LT_LE_TU_MPLS_IN_GRE, + NPC_LT_LE_TU_NSH_IN_GRE, + NPC_LT_LE_TU_MPLS_IN_UDP, + NPC_LT_LE_CUSTOM0 = 0xE, + NPC_LT_LE_CUSTOM1 = 0xF, }; enum npc_kpu_lf_ltype { - NPC_LT_LF_TU_IP = 1, - NPC_LT_LF_TU_IP6, - NPC_LT_LF_TU_ARP, - NPC_LT_LF_TU_MPLS_IP, - NPC_LT_LF_TU_MPLS_IP6, - NPC_LT_LF_TU_MPLS_ETHER, + NPC_LT_LF_TU_ETHER = 1, + NPC_LT_LF_TU_PPP, + NPC_LT_LF_TU_MPLS_IN_VXLANGPE, + NPC_LT_LF_TU_NSH_IN_VXLANGPE, + NPC_LT_LF_TU_MPLS_IN_NSH, + NPC_LT_LF_TU_3RD_NSH, + NPC_LT_LF_CUSTOM0 = 0xE, + NPC_LT_LF_CUSTOM1 = 0xF, }; enum npc_kpu_lg_ltype { - NPC_LT_LG_TU_TCP = 1, - NPC_LT_LG_TU_UDP, - NPC_LT_LG_TU_SCTP, - NPC_LT_LG_TU_ICMP, - NPC_LT_LG_TU_IGMP, - NPC_LT_LG_TU_ICMP6, - NPC_LT_LG_TU_ESP, - NPC_LT_LG_TU_AH, + NPC_LT_LG_TU_IP = 1, + NPC_LT_LG_TU_IP6, + NPC_LT_LG_TU_ARP, + NPC_LT_LG_TU_ETHER_IN_NSH, + NPC_LT_LG_CUSTOM0 = 0xE, + NPC_LT_LG_CUSTOM1 = 0xF, }; +/* Don't modify Ltypes upto SCTP, otherwise it will + * effect flow tag calculation and thus RSS. + */ enum npc_kpu_lh_ltype { - NPC_LT_LH_TCP_DATA = 1, - NPC_LT_LH_HTTP_DATA, - NPC_LT_LH_HTTPS_DATA, - NPC_LT_LH_PPTP_DATA, - NPC_LT_LH_UDP_DATA, + NPC_LT_LH_TU_TCP = 1, + NPC_LT_LH_TU_UDP, + NPC_LT_LH_TU_ICMP, + NPC_LT_LH_TU_SCTP, + NPC_LT_LH_TU_ICMP6, + NPC_LT_LH_TU_IGMP = 8, + NPC_LT_LH_TU_ESP, + NPC_LT_LH_TU_AH, + NPC_LT_LH_CUSTOM0 = 0xE, + NPC_LT_LH_CUSTOM1 = 0xF, }; struct npc_kpu_profile_cam { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h index b2ce957605bb..aa2727e6211a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Marvell OcteonTx2 RVU Admin Function driver +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RVU Admin Function driver * * Copyright (C) 2018 Marvell International Ltd. * @@ -11,6 +11,11 @@ #ifndef NPC_PROFILE_H #define NPC_PROFILE_H +#define NPC_KPU_PROFILE_VER 0x0000000100050000 + +#define NPC_IH_W 0x8000 +#define NPC_IH_UTAG 0x2000 + #define NPC_ETYPE_IP 0x0800 #define NPC_ETYPE_IP6 0x86dd #define NPC_ETYPE_ARP 0x0806 @@ -27,6 +32,7 @@ #define NPC_ETYPE_TRANS_ETH_BR 0x6558 #define NPC_ETYPE_PPP 0x880b #define NPC_ETYPE_NSH 0x894f +#define NPC_ETYPE_DSA 0xdada #define NPC_IPNH_HOP 0 #define NPC_IPNH_ICMP 1 @@ -44,13 +50,19 @@ #define NPC_IPNH_NONH 59 #define NPC_IPNH_DEST 60 #define NPC_IPNH_SCTP 132 +#define NPC_IPNH_MOBILITY 135 #define NPC_IPNH_MPLS 137 +#define NPC_IPNH_HOSTID 139 +#define NPC_IPNH_SHIM6 140 +#define NPC_UDP_PORT_PTP_E 319 +#define NPC_UDP_PORT_PTP_G 320 #define NPC_UDP_PORT_GTPC 2123 #define NPC_UDP_PORT_GTPU 2152 #define NPC_UDP_PORT_VXLAN 4789 #define NPC_UDP_PORT_VXLANGPE 4790 #define NPC_UDP_PORT_GENEVE 6081 +#define NPC_UDP_PORT_MPLS 6635 #define NPC_VXLANGPE_NP_IP 0x1 #define NPC_VXLANGPE_NP_IP6 0x2 @@ -72,11 +84,17 @@ #define NPC_MPLS_S 0x0100 +#define NPC_IP_TTL_MASK 0xff00 #define NPC_IP_VER_4 0x4000 #define NPC_IP_VER_6 0x6000 #define NPC_IP_VER_MASK 0xf000 #define NPC_IP_HDR_LEN_5 0x0500 #define NPC_IP_HDR_LEN_MASK 0x0f00 +#define NPC_IP_HDR_MF 0x2000 +#define NPC_IP_HDR_FRAGOFF 0x1fff + +#define NPC_IP6_HOP_MASK 0x00ff +#define NPC_IP6_FRAG_FRAGOFF 0xfff8 #define NPC_GRE_F_CSUM (0x1 << 15) #define NPC_GRE_F_ROUTE (0x1 << 14) @@ -108,22 +126,44 @@ #define NPC_GTP_MT_G_PDU 0xff #define NPC_GTP_MT_MASK 0xff +#define NPC_TCP_FLAGS_FIN 0x0001 +#define NPC_TCP_FLAGS_SYN 0x0002 +#define NPC_TCP_FLAGS_RST 0x0004 +#define NPC_TCP_FLAGS_PSH 0x0008 +#define NPC_TCP_FLAGS_ACK 0x0010 +#define NPC_TCP_FLAGS_URG 0x0020 +#define NPC_TCP_FLAGS_MASK 0x003f + #define NPC_TCP_DATA_OFFSET_5 0x5000 #define NPC_TCP_DATA_OFFSET_MASK 0xf000 +#define NPC_DSA_EXTEND 0x1000 +#define NPC_DSA_EDSA 0x8000 + enum npc_kpu_parser_state { NPC_S_NA = 0, NPC_S_KPU1_ETHER, - NPC_S_KPU1_PKI, + NPC_S_KPU1_IH_NIX, + NPC_S_KPU1_IH, + NPC_S_KPU1_EXDSA, + NPC_S_KPU1_HIGIG2, + NPC_S_KPU1_IH_NIX_HIGIG2, NPC_S_KPU2_CTAG, + NPC_S_KPU2_CTAG2, NPC_S_KPU2_SBTAG, NPC_S_KPU2_QINQ, NPC_S_KPU2_ETAG, NPC_S_KPU2_ITAG, + NPC_S_KPU2_PREHEADER, + NPC_S_KPU2_EXDSA, NPC_S_KPU3_CTAG, NPC_S_KPU3_STAG, NPC_S_KPU3_QINQ, NPC_S_KPU3_ITAG, + NPC_S_KPU3_CTAG_C, + NPC_S_KPU3_STAG_C, + NPC_S_KPU3_QINQ_C, + NPC_S_KPU3_DSA, NPC_S_KPU4_MPLS, NPC_S_KPU4_NSH, NPC_S_KPU5_IP, @@ -136,7 +176,12 @@ enum npc_kpu_parser_state { NPC_S_KPU5_MPLS_PL, NPC_S_KPU5_NSH, NPC_S_KPU6_IP6_EXT, + NPC_S_KPU6_IP6_HOP_DEST, + NPC_S_KPU6_IP6_ROUT, + NPC_S_KPU6_IP6_FRAG, NPC_S_KPU7_IP6_EXT, + NPC_S_KPU7_IP6_ROUT, + NPC_S_KPU7_IP6_FRAG, NPC_S_KPU8_TCP, NPC_S_KPU8_UDP, NPC_S_KPU8_SCTP, @@ -146,16 +191,26 @@ enum npc_kpu_parser_state { NPC_S_KPU8_GRE, NPC_S_KPU8_ESP, NPC_S_KPU8_AH, - NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, - NPC_S_KPU9_TU_MPLS, - NPC_S_KPU9_TU_NSH, + NPC_S_KPU9_TU_MPLS_IN_GRE, + NPC_S_KPU9_TU_MPLS_IN_NSH, + NPC_S_KPU9_TU_MPLS_IN_IP, + NPC_S_KPU9_TU_MPLS_IN_UDP, + NPC_S_KPU9_TU_NSH_IN_GRE, + NPC_S_KPU9_VXLAN, + NPC_S_KPU9_VXLANGPE, + NPC_S_KPU9_GENEVE, + NPC_S_KPU9_GTPC, + NPC_S_KPU9_GTPU, + NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, NPC_S_KPU10_TU_MPLS_PL, NPC_S_KPU10_TU_MPLS, - NPC_S_KPU10_TU_NSH, + NPC_S_KPU10_TU_NSH_IN_VXLANGPE, NPC_S_KPU11_TU_ETHER, NPC_S_KPU11_TU_PPP, NPC_S_KPU11_TU_MPLS_IN_NSH, - NPC_S_KPU11_TU_3RD_NSH, + NPC_S_KPU11_TU_MPLS_PL, + NPC_S_KPU11_TU_MPLS, + NPC_S_KPU11_TU_ETHER_IN_NSH, NPC_S_KPU12_TU_IP, NPC_S_KPU12_TU_IP6, NPC_S_KPU12_TU_ARP, @@ -174,135 +229,172 @@ enum npc_kpu_parser_state { NPC_S_KPU16_PPTP_DATA, NPC_S_KPU16_TCP_DATA, NPC_S_KPU16_UDP_DATA, + NPC_S_KPU16_UDP_PTP, NPC_S_LAST /* has to be the last item */ }; -enum npc_kpu_parser_flag { - NPC_F_NA = 0, - NPC_F_PKI, - NPC_F_PKI_VLAN, - NPC_F_PKI_ETAG, - NPC_F_PKI_ITAG, - NPC_F_PKI_MPLS, - NPC_F_PKI_NSH, - NPC_F_ETYPE_UNK, - NPC_F_ETHER_VLAN, - NPC_F_ETHER_ETAG, - NPC_F_ETHER_ITAG, - NPC_F_ETHER_MPLS, - NPC_F_ETHER_NSH, - NPC_F_STAG_CTAG, - NPC_F_STAG_CTAG_UNK, - NPC_F_STAG_STAG_CTAG, - NPC_F_STAG_STAG_STAG, - NPC_F_QINQ_CTAG, - NPC_F_QINQ_CTAG_UNK, - NPC_F_QINQ_QINQ_CTAG, - NPC_F_QINQ_QINQ_QINQ, - NPC_F_BTAG_ITAG, - NPC_F_BTAG_ITAG_STAG, - NPC_F_BTAG_ITAG_CTAG, - NPC_F_BTAG_ITAG_UNK, - NPC_F_ETAG_CTAG, - NPC_F_ETAG_BTAG_ITAG, - NPC_F_ETAG_STAG, - NPC_F_ETAG_QINQ, - NPC_F_ETAG_ITAG, - NPC_F_ETAG_ITAG_STAG, - NPC_F_ETAG_ITAG_CTAG, - NPC_F_ETAG_ITAG_UNK, - NPC_F_ITAG_STAG_CTAG, - NPC_F_ITAG_STAG, - NPC_F_ITAG_CTAG, - NPC_F_MPLS_4_LABELS, - NPC_F_MPLS_3_LABELS, - NPC_F_MPLS_2_LABELS, - NPC_F_IP_HAS_OPTIONS, - NPC_F_IP_IP_IN_IP, - NPC_F_IP_6TO4, - NPC_F_IP_MPLS_IN_IP, - NPC_F_IP_UNK_PROTO, - NPC_F_IP_IP_IN_IP_HAS_OPTIONS, - NPC_F_IP_6TO4_HAS_OPTIONS, - NPC_F_IP_MPLS_IN_IP_HAS_OPTIONS, - NPC_F_IP_UNK_PROTO_HAS_OPTIONS, - NPC_F_IP6_HAS_EXT, - NPC_F_IP6_TUN_IP6, - NPC_F_IP6_MPLS_IN_IP, - NPC_F_TCP_HAS_OPTIONS, - NPC_F_TCP_HTTP, - NPC_F_TCP_HTTPS, - NPC_F_TCP_PPTP, - NPC_F_TCP_UNK_PORT, - NPC_F_TCP_HTTP_HAS_OPTIONS, - NPC_F_TCP_HTTPS_HAS_OPTIONS, - NPC_F_TCP_PPTP_HAS_OPTIONS, - NPC_F_TCP_UNK_PORT_HAS_OPTIONS, - NPC_F_UDP_VXLAN, - NPC_F_UDP_VXLAN_NOVNI, - NPC_F_UDP_VXLAN_NOVNI_NSH, - NPC_F_UDP_VXLANGPE, - NPC_F_UDP_VXLANGPE_NSH, - NPC_F_UDP_VXLANGPE_MPLS, - NPC_F_UDP_VXLANGPE_NOVNI, - NPC_F_UDP_VXLANGPE_NOVNI_NSH, - NPC_F_UDP_VXLANGPE_NOVNI_MPLS, - NPC_F_UDP_VXLANGPE_UNK, - NPC_F_UDP_VXLANGPE_NONP, - NPC_F_UDP_GTP_GTPC, - NPC_F_UDP_GTP_GTPU_G_PDU, - NPC_F_UDP_GTP_GTPU_UNK, - NPC_F_UDP_UNK_PORT, - NPC_F_UDP_GENEVE, - NPC_F_UDP_GENEVE_OAM, - NPC_F_UDP_GENEVE_CRI_OPT, - NPC_F_UDP_GENEVE_OAM_CRI_OPT, - NPC_F_GRE_NVGRE, - NPC_F_GRE_HAS_SRE, - NPC_F_GRE_HAS_CSUM, - NPC_F_GRE_HAS_KEY, - NPC_F_GRE_HAS_SEQ, - NPC_F_GRE_HAS_CSUM_KEY, - NPC_F_GRE_HAS_CSUM_SEQ, - NPC_F_GRE_HAS_KEY_SEQ, - NPC_F_GRE_HAS_CSUM_KEY_SEQ, - NPC_F_GRE_HAS_ROUTE, - NPC_F_GRE_UNK_PROTO, - NPC_F_GRE_VER1, - NPC_F_GRE_VER1_HAS_SEQ, - NPC_F_GRE_VER1_HAS_ACK, - NPC_F_GRE_VER1_HAS_SEQ_ACK, - NPC_F_GRE_VER1_UNK_PROTO, - NPC_F_TU_ETHER_UNK, - NPC_F_TU_ETHER_CTAG, - NPC_F_TU_ETHER_CTAG_UNK, - NPC_F_TU_ETHER_STAG_CTAG, - NPC_F_TU_ETHER_STAG_CTAG_UNK, - NPC_F_TU_ETHER_STAG, - NPC_F_TU_ETHER_STAG_UNK, - NPC_F_TU_ETHER_QINQ_CTAG, - NPC_F_TU_ETHER_QINQ_CTAG_UNK, - NPC_F_TU_ETHER_QINQ, - NPC_F_TU_ETHER_QINQ_UNK, - NPC_F_LAST /* has to be the last item */ +enum npc_kpu_la_uflag { + NPC_F_LA_U_HAS_TAG = 0x10, + NPC_F_LA_U_HAS_IH_NIX = 0x20, + NPC_F_LA_U_HAS_HIGIG2 = 0x40, +}; +enum npc_kpu_la_lflag { + NPC_F_LA_L_UNK_ETYPE = 1, + NPC_F_LA_L_WITH_VLAN, + NPC_F_LA_L_WITH_ETAG, + NPC_F_LA_L_WITH_ITAG, + NPC_F_LA_L_WITH_MPLS, + NPC_F_LA_L_WITH_NSH, +}; + +enum npc_kpu_lb_uflag { + NPC_F_LB_U_UNK_ETYPE = 0x80, + NPC_F_LB_U_MORE_TAG = 0x40, +}; +enum npc_kpu_lb_lflag { + NPC_F_LB_L_WITH_CTAG = 1, + NPC_F_LB_L_WITH_CTAG_UNK, + NPC_F_LB_L_WITH_STAG_CTAG, + NPC_F_LB_L_WITH_STAG_STAG, + NPC_F_LB_L_WITH_QINQ_CTAG, + NPC_F_LB_L_WITH_QINQ_QINQ, + NPC_F_LB_L_WITH_ITAG, + NPC_F_LB_L_WITH_ITAG_STAG, + NPC_F_LB_L_WITH_ITAG_CTAG, + NPC_F_LB_L_WITH_ITAG_UNK, + NPC_F_LB_L_WITH_BTAG_ITAG, + NPC_F_LB_L_WITH_STAG, + NPC_F_LB_L_WITH_QINQ, + NPC_F_LB_L_DSA, + NPC_F_LB_L_DSA_VLAN, + NPC_F_LB_L_EDSA, + NPC_F_LB_L_EDSA_VLAN, + NPC_F_LB_L_EXDSA, + NPC_F_LB_L_EXDSA_VLAN, +}; + +enum npc_kpu_lc_uflag { + NPC_F_LC_U_UNK_PROTO = 0x10, + NPC_F_LC_U_IP_FRAG = 0x20, + NPC_F_LC_U_IP6_FRAG = 0x40, +}; +enum npc_kpu_lc_lflag { + NPC_F_LC_L_IP_IN_IP = 1, + NPC_F_LC_L_6TO4, + NPC_F_LC_L_MPLS_IN_IP, + NPC_F_LC_L_IP6_TUN_IP6, + NPC_F_LC_L_IP6_MPLS_IN_IP, + NPC_F_LC_L_MPLS_4_LABELS, + NPC_F_LC_L_MPLS_3_LABELS, + NPC_F_LC_L_MPLS_2_LABELS, + NPC_F_LC_L_EXT_HOP, + NPC_F_LC_L_EXT_DEST, + NPC_F_LC_L_EXT_ROUT, + NPC_F_LC_L_EXT_MOBILITY, + NPC_F_LC_L_EXT_HOSTID, + NPC_F_LC_L_EXT_SHIM6, +}; + +enum npc_kpu_ld_lflag { + NPC_F_LD_L_TCP_UNK_PORT = 1, + NPC_F_LD_L_TCP_HAS_OPTIONS, + NPC_F_LD_L_TCP_UNK_PORT_HAS_OPTIONS, + NPC_F_LD_L_UDP_UNK_PORT, + NPC_F_LD_L_GRE_NVGRE, + NPC_F_LD_L_GRE_HAS_SRE, + NPC_F_LD_L_GRE_HAS_CSUM, + NPC_F_LD_L_GRE_HAS_KEY, + NPC_F_LD_L_GRE_HAS_SEQ, + NPC_F_LD_L_GRE_HAS_CSUM_KEY, + NPC_F_LD_L_GRE_HAS_CSUM_SEQ, + NPC_F_LD_L_GRE_HAS_KEY_SEQ, + NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ, + NPC_F_LD_L_GRE_HAS_ROUTE, + NPC_F_LD_L_GRE_UNK_PROTO, + NPC_F_LD_L_GRE_VER1, + NPC_F_LD_L_GRE_VER1_HAS_SEQ, + NPC_F_LD_L_GRE_VER1_HAS_ACK, + NPC_F_LD_L_GRE_VER1_HAS_SEQ_ACK, + NPC_F_LD_L_GRE_VER1_UNK_PROTO, + NPC_F_LD_L_MPLS_4_LABELS, + NPC_F_LD_L_MPLS_3_LABELS, + NPC_F_LD_L_MPLS_2_LABELS, +}; + +enum npc_kpu_le_lflag { + NPC_F_LE_L_VXLAN_NOVNI, + NPC_F_LE_L_VXLANGPE_NOVNI, + NPC_F_LE_L_VXLANGPE_UNK, + NPC_F_LE_L_VXLANGPE_NONP, + NPC_F_LE_L_GENEVE_OAM, + NPC_F_LE_L_GENEVE_CRI_OPT, + NPC_F_LE_L_GENEVE_OAM_CRI_OPT, + NPC_F_LE_L_GTPU_G_PDU, + NPC_F_LE_L_GTPU_UNK, +}; + +enum npc_kpu_lf_uflag { + NPC_F_LF_U_UNK_ETYPE = 0x10, + NPC_F_LF_U_HAS_TAG = 0x20, +}; + +enum npc_kpu_lf_lflag { + NPC_F_LF_L_WITH_CTAG = 1, + NPC_F_LF_L_WITH_STAG_CTAG, + NPC_F_LF_L_WITH_STAG, + NPC_F_LF_L_WITH_QINQ_CTAG, + NPC_F_LF_L_WITH_QINQ, +}; + +enum npc_kpu_lg_uflag { + NPC_F_LG_U_UNK_IP_PROTO = 0x10, + NPC_F_LG_U_IP_HAS_OPTIONS = 0x20, + NPC_F_LG_U_IP6_HAS_EXT = 0x40, +}; + +enum npc_kpu_lh_uflag { + NPC_F_LH_U_TCP_HAS_OPTIONS = 0x80, +}; + +enum npc_kpu_lh_lflag { + NPC_F_LH_L_TCP_HTTP = 1, + NPC_F_LH_L_TCP_HTTPS, + NPC_F_LH_L_TCP_PPTP, + NPC_F_LH_L_TCP_UNK_PORT, + NPC_F_LH_L_UDP_UNK_PORT, }; enum npc_kpu_err_code { NPC_EC_NOERR = 0, /* has to be zero */ NPC_EC_UNK, + NPC_EC_IH_LENGTH, + NPC_EC_EDSA_UNK, NPC_EC_L2_K1, NPC_EC_L2_K2, NPC_EC_L2_K3, NPC_EC_L2_K3_ETYPE_UNK, - NPC_EC_L2_MPLS_2MANY, NPC_EC_L2_K4, + NPC_EC_MPLS_2MANY, + NPC_EC_MPLS_UNK, + NPC_EC_NSH_UNK, + NPC_EC_IP_TTL_0, + NPC_EC_IP_FRAG_OFFSET_1, NPC_EC_IP_VER, + NPC_EC_IP6_HOP_0, NPC_EC_IP6_VER, + NPC_EC_TCP_FLAGS_FIN_ONLY, + NPC_EC_TCP_FLAGS_ZERO, + NPC_EC_TCP_FLAGS_RST_FIN, + NPC_EC_TCP_FLAGS_URG_SYN, + NPC_EC_TCP_FLAGS_RST_SYN, + NPC_EC_TCP_FLAGS_SYN_FIN, NPC_EC_VXLAN, NPC_EC_NVGRE, NPC_EC_GRE, NPC_EC_GRE_VER1, NPC_EC_L4, + NPC_EC_OIP4_CSUM, + NPC_EC_IIP4_CSUM, NPC_EC_LAST /* has to be the last item */ }; @@ -328,5282 +420,12598 @@ enum NPC_ERRLEV_E { static struct npc_kpu_profile_action ikpu_action_entries[] = { { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_ETHER, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 36, 40, 44, 0, 0, + NPC_S_KPU1_IH_NIX_HIGIG2, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 28, 32, 36, 0, 0, + NPC_S_KPU1_HIGIG2, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU1_EXDSA, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16, - 0, 0, NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 1, 0xff, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 20, 24, 28, 0, 0, + NPC_S_KPU1_IH_NIX, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, }; static struct npc_kpu_profile_cam kpu1_cam_entries[] = { { - NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_IP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_IP6, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_RARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_PTP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_FCOE, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ETAG, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ITAG, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_MPLSU, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_MPLSM, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_NSH, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_ETHER, 0xff, 0x0000, 0xfc00, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_ETHER, 0xff, 0x0400, 0xfe00, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_ETHER, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_IP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_IP6, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_RARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_PTP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_FCOE, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_CTAG, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_SBTAG, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_QINQ, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ETAG, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ITAG, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_MPLSU, 0xffff, - 0x0010, 0x0010, 0x0000, 0xffff, - }, - { - NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_MPLSM, 0xffff, - 0x0010, 0x0010, 0x0000, 0xffff, + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_ETAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + NPC_ETYPE_DSA, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + 0x0000, + 0xfc00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + 0x0400, + 0xfe00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_ETHER, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_ETAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH, 0xff, + NPC_IH_W|NPC_IH_UTAG, + NPC_IH_W|NPC_IH_UTAG, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH, 0xff, + NPC_IH_W, + NPC_IH_W|NPC_IH_UTAG, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH, 0xff, + 0x0000, + NPC_IH_W|NPC_IH_UTAG, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_EXDSA, 0xff, + NPC_DSA_EXTEND, + NPC_DSA_EXTEND, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_EXDSA, 0xff, + 0x0000, + NPC_DSA_EXTEND, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_ETAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_HIGIG2, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_ETAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU1_IH_NIX_HIGIG2, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, }, +}; + +static struct npc_kpu_profile_cam kpu2_cam_entries[] = { { - NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_NSH, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_S_KPU2_CTAG, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_IP, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_IP6, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_ARP, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_RARP, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_PTP, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_FCOE, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_MPLSU, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_MPLSM, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_NSH, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_SBTAG, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_CTAG, + 0xffff, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_SBTAG, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + NPC_ETYPE_QINQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_QINQ, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_IP, + 0xffff, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_IP6, + 0xffff, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_ARP, + 0xffff, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_SBTAG, + 0xffff, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + NPC_ETYPE_CTAG, + 0xffff, + }, + { + NPC_S_KPU2_ETAG, 0xff, + NPC_ETYPE_ITAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ETAG, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_ITAG, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CTAG2, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_PREHEADER, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + NPC_DSA_EDSA, + NPC_DSA_EDSA, + 0x0000, + 0x0000, + NPC_ETYPE_IP, + 0xffff, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + NPC_DSA_EDSA, + NPC_DSA_EDSA, + 0x0000, + 0x0000, + NPC_ETYPE_IP6, + 0xffff, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + NPC_DSA_EDSA, + NPC_DSA_EDSA, + 0x0000, + 0x0000, + NPC_ETYPE_ARP, + 0xffff, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + NPC_DSA_EDSA, + NPC_DSA_EDSA, + 0x0000, + 0x0000, + NPC_ETYPE_RARP, + 0xffff, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + NPC_DSA_EDSA, + NPC_DSA_EDSA, + 0x0000, + 0x0000, + NPC_ETYPE_PTP, + 0xffff, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + NPC_DSA_EDSA, + NPC_DSA_EDSA, + 0x0000, + 0x0000, + NPC_ETYPE_FCOE, + 0xffff, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + NPC_DSA_EDSA, + NPC_DSA_EDSA, + 0x0000, + 0x0000, + NPC_ETYPE_CTAG, + 0xffff, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + NPC_DSA_EDSA, + NPC_DSA_EDSA, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + 0x0000, + NPC_DSA_EDSA, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + 0x0000, + NPC_DSA_EDSA, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + 0x0000, + NPC_DSA_EDSA, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + 0x0000, + NPC_DSA_EDSA, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + 0x0000, + NPC_DSA_EDSA, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + 0x0000, + NPC_DSA_EDSA, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + 0x0000, + NPC_DSA_EDSA, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_EXDSA, 0xff, + 0x0000, + NPC_DSA_EDSA, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, }, +}; + +static struct npc_kpu_profile_cam kpu3_cam_entries[] = { { - NPC_S_KPU1_PKI, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_S_KPU3_CTAG, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_ITAG, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG_C, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG_C, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG_C, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG_C, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG_C, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG_C, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG_C, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG_C, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG_C, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_CTAG_C, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_STAG_C, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_QINQ_C, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_RARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_PTP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_FCOE, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU3_DSA, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, }, +}; + +static struct npc_kpu_profile_cam kpu4_cam_entries[] = { { - NPC_S_NA, 0X00, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_S_KPU4_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_MPLS, 0xff, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_MPLS, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + }, + { + NPC_S_KPU4_MPLS, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + }, + { + NPC_S_KPU4_NSH, 0xff, + NPC_NSH_NP_IP, + NPC_NSH_NP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_NSH, 0xff, + NPC_NSH_NP_IP6, + NPC_NSH_NP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_NSH, 0xff, + NPC_NSH_NP_ETH, + NPC_NSH_NP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_NSH, 0xff, + NPC_NSH_NP_MPLS, + NPC_NSH_NP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU4_NSH, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, }, }; -static struct npc_kpu_profile_cam kpu2_cam_entries[] = { +static struct npc_kpu_profile_cam kpu5_cam_entries[] = { { - NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_IP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_S_KPU5_IP, 0xff, + 0x0000, + NPC_IP_TTL_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0001, + NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_TCP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_UDP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_SCTP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_ICMP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_IGMP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_ESP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_AH, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_GRE, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_IP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_IP6, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_MPLS, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + 0x0000, + 0x0000, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + 0x0000, + 0x0000, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_TCP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_UDP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_SCTP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_ICMP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_IGMP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_ESP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_AH, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_GRE, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_IP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_IP6, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + NPC_IPNH_MPLS, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + 0x0000, + 0x0000, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF, + }, + { + NPC_S_KPU5_IP, 0xff, + 0x0000, + 0x0000, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_ARP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_RARP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_PTP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_FCOE, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + 0x0000, + NPC_IP6_HOP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_TCP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_UDP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_SCTP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_ICMP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_ICMP6 << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_GRE << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_IP6 << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_MPLS << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_HOP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_DEST << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_ROUT << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_FRAG << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_ESP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_AH << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_MOBILITY << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_HOSTID << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_SHIM6 << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + 0x0000, + 0x0000, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_IP6, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_MPLS, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_MPLS_PL, 0xff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_MPLS_PL, 0xff, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_MPLS_PL, 0xff, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_MPLS_PL, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, }, +}; + +static struct npc_kpu_profile_cam kpu6_cam_entries[] = { { - NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_IP6, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_S_KPU6_IP6_EXT, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + NPC_IPNH_TCP << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + NPC_IPNH_UDP << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + NPC_IPNH_SCTP << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + NPC_IPNH_ICMP << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + NPC_IPNH_ICMP6 << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + NPC_IPNH_ESP << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + NPC_IPNH_AH << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + NPC_IPNH_GRE << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + NPC_IPNH_IP6 << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + NPC_IPNH_MPLS << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_TCP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_UDP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_SCTP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_ICMP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_ICMP6 << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_ESP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_AH << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_GRE << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_IP6 << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_MPLS << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_ROUT << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_FRAG << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_TCP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_UDP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_SCTP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_ICMP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_ICMP6 << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_ESP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_AH << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_GRE << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_IP6 << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_MPLS << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_FRAG << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, }, +}; + +static struct npc_kpu_profile_cam kpu7_cam_entries[] = { { - NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_ARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_S_KPU7_IP6_EXT, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + NPC_IPNH_TCP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + NPC_IPNH_UDP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + NPC_IPNH_SCTP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + NPC_IPNH_ICMP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + NPC_IPNH_ICMP6 << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + NPC_IPNH_ESP << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + NPC_IPNH_AH << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + NPC_IPNH_GRE << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + NPC_IPNH_IP6 << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + NPC_IPNH_MPLS << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + NPC_IPNH_TCP << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + NPC_IPNH_UDP << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + NPC_IPNH_SCTP << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + NPC_IPNH_ICMP << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + NPC_IPNH_ICMP6 << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + NPC_IPNH_ESP << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + NPC_IPNH_AH << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + NPC_IPNH_GRE << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + NPC_IPNH_IP6 << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + NPC_IPNH_MPLS << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, }, +}; + +static struct npc_kpu_profile_cam kpu8_cam_entries[] = { { - NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_RARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_S_KPU8_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_FIN, + NPC_TCP_FLAGS_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + 0x0000, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN, + NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN, + NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN, + NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN, + NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + NPC_TCP_PORT_HTTP, + 0xffff, + NPC_TCP_DATA_OFFSET_5, + NPC_TCP_DATA_OFFSET_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + NPC_TCP_PORT_HTTPS, + 0xffff, + NPC_TCP_DATA_OFFSET_5, + NPC_TCP_DATA_OFFSET_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + NPC_TCP_PORT_PPTP, + 0xffff, + NPC_TCP_DATA_OFFSET_5, + NPC_TCP_DATA_OFFSET_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_DATA_OFFSET_5, + NPC_TCP_DATA_OFFSET_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + NPC_TCP_PORT_HTTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + NPC_TCP_PORT_HTTPS, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + NPC_TCP_PORT_PPTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_TCP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, + NPC_UDP_PORT_VXLAN, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, + NPC_UDP_PORT_VXLANGPE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, + NPC_UDP_PORT_GENEVE, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, + NPC_UDP_PORT_GTPC, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, + NPC_UDP_PORT_GTPU, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, + NPC_UDP_PORT_PTP_E, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, + NPC_UDP_PORT_PTP_G, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, + NPC_UDP_PORT_MPLS, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_UDP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_SCTP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_ICMP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_IGMP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_ICMP6, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_ESP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_AH, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_TRANS_ETH_BR, + 0xffff, + NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_TRANS_ETH_BR, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + NPC_GRE_F_CSUM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSU, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + NPC_GRE_F_CSUM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_MPLSM, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_NSH, + 0xffff, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_NSH, + 0xffff, + NPC_GRE_F_CSUM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_NSH, + 0xffff, + NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_NSH, + 0xffff, + NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_NSH, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_NSH, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_NSH, + 0xffff, + NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_NSH, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP, + 0xffff, + NPC_GRE_F_CSUM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP, + 0xffff, + NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP, + 0xffff, + NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP, + 0xffff, + NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP6, + 0xffff, + NPC_GRE_F_CSUM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP6, + 0xffff, + NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP6, + 0xffff, + NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP6, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP6, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP6, + 0xffff, + NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_IP6, + 0xffff, + NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + 0x0000, + 0xffff, + NPC_GRE_F_ROUTE, + 0x4fff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + 0x0000, + 0xffff, + 0x0000, + 0x4fff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + 0x0000, + 0xffff, + 0x0000, + 0x0003, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_PPP, + 0xffff, + NPC_GRE_F_KEY|NPC_GRE_VER_1, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_PPP, + 0xffff, + NPC_GRE_F_KEY|NPC_GRE_F_SEQ|NPC_GRE_VER_1, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_PPP, + 0xffff, + NPC_GRE_F_KEY|NPC_GRE_F_ACK|NPC_GRE_VER_1, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_PPP, + 0xffff, + NPC_GRE_F_KEY|NPC_GRE_F_SEQ|NPC_GRE_F_ACK|NPC_GRE_VER_1, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + 0x0000, + 0xffff, + 0x2001, + 0xef7f, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + 0x0000, + 0xffff, + 0x0001, + 0x0003, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, }, +}; + +static struct npc_kpu_profile_cam kpu9_cam_entries[] = { { - NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_PTP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + }, + { + NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + }, + { + NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + }, + { + NPC_S_KPU9_TU_MPLS_IN_NSH, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + }, + { + NPC_S_KPU9_TU_MPLS_IN_IP, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS_IN_IP, 0xff, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS_IN_IP, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + }, + { + NPC_S_KPU9_TU_MPLS_IN_IP, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + }, + { + NPC_S_KPU9_TU_NSH_IN_GRE, 0xff, + NPC_NSH_NP_IP, + NPC_NSH_NP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_NSH_IN_GRE, 0xff, + NPC_NSH_NP_IP6, + NPC_NSH_NP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_NSH_IN_GRE, 0xff, + NPC_NSH_NP_ETH, + NPC_NSH_NP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_NSH_IN_GRE, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_VXLAN, 0xff, + 0x0000, + 0x0000, + NPC_VXLAN_I, + NPC_VXLAN_I, + 0x0000, + 0xffff, + }, + { + NPC_S_KPU9_VXLAN, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0xffff, + 0x0000, + 0xffff, + }, + { + NPC_S_KPU9_VXLAN, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_IP, + NPC_VXLANGPE_NP_MASK, }, { - NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_FCOE, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_IP6, + NPC_VXLANGPE_NP_MASK, }, { - NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_ETH, + NPC_VXLANGPE_NP_MASK, }, { - NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_NSH, + NPC_VXLANGPE_NP_MASK, }, { - NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_NSH, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_MPLS, + NPC_VXLANGPE_NP_MASK, }, { - NPC_S_KPU2_CTAG, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_IP, + NPC_VXLANGPE_NP_MASK, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_IP6, + NPC_VXLANGPE_NP_MASK, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_ETH, + NPC_VXLANGPE_NP_MASK, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_NSH, + NPC_VXLANGPE_NP_MASK, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000, + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P, + NPC_VXLANGPE_P | NPC_VXLANGPE_I, + NPC_VXLANGPE_NP_MPLS, + NPC_VXLANGPE_NP_MASK, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000, + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + NPC_VXLANGPE_P, + NPC_VXLANGPE_P, + 0x0000, + 0x0000, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000, + NPC_S_KPU9_VXLANGPE, 0xff, + 0x0000, + 0x0000, + 0x0000, + NPC_VXLANGPE_P, + 0x0000, + 0x0000, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000, + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + 0x0000, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_TRANS_ETH_BR, + 0xffff, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000, + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + NPC_GENEVE_F_OAM, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_TRANS_ETH_BR, + 0xffff, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000, + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + NPC_GENEVE_F_CRI_OPT, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_TRANS_ETH_BR, + 0xffff, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_TRANS_ETH_BR, + 0xffff, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000, + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + 0x0000, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_IP, + 0xffff, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_SBTAG, 0xffff, 0x0000, 0x0000, + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + NPC_GENEVE_F_OAM, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_IP, + 0xffff, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, - 0x0000, 0x0000, NPC_ETYPE_IP, 0xffff, + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + NPC_GENEVE_F_CRI_OPT, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_IP, + 0xffff, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, - 0x0000, 0x0000, NPC_ETYPE_IP6, 0xffff, + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_IP, + 0xffff, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, - 0x0000, 0x0000, NPC_ETYPE_ARP, 0xffff, + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + 0x0000, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_IP6, + 0xffff, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, - 0x0000, 0x0000, NPC_ETYPE_RARP, 0xffff, + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + NPC_GENEVE_F_OAM, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_IP6, + 0xffff, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, - 0x0000, 0x0000, NPC_ETYPE_PTP, 0xffff, + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + NPC_GENEVE_F_CRI_OPT, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_IP6, + 0xffff, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, - 0x0000, 0x0000, NPC_ETYPE_FCOE, 0xffff, + NPC_S_KPU9_GENEVE, 0xff, + 0x0000, + 0x0000, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, + NPC_ETYPE_IP6, + 0xffff, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, - 0x0000, 0x0000, NPC_ETYPE_MPLSU, 0xffff, + NPC_S_KPU9_GTPC, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, - 0x0000, 0x0000, NPC_ETYPE_MPLSM, 0xffff, + NPC_S_KPU9_GTPU, 0xff, + 0x0000, + 0x0000, + NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU, + NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_GTPU, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + }, + { + NPC_S_KPU9_TU_MPLS_IN_UDP, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, }, +}; + +static struct npc_kpu_profile_cam kpu10_cam_entries[] = { { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, - 0x0000, 0x0000, NPC_ETYPE_NSH, 0xffff, + NPC_S_KPU10_TU_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS_PL, 0xff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS_PL, 0xff, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS_PL, 0xff, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS_PL, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_MPLS_S, + }, + { + NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + 0x0000, + NPC_MPLS_S, + }, + { + NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff, + NPC_NSH_NP_IP, + NPC_NSH_NP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff, + NPC_NSH_NP_IP6, + NPC_NSH_NP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff, + NPC_NSH_NP_ETH, + NPC_NSH_NP_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, }, +}; + +static struct npc_kpu_profile_cam kpu11_cam_entries[] = { { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, - 0x0000, 0x0000, NPC_ETYPE_SBTAG, 0xffff, + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_SBTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + NPC_ETYPE_ARP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + NPC_ETYPE_QINQ, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_PPP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_MPLS, 0xff, + NPC_MPLS_S, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_MPLS, 0xff, + 0x0000, + NPC_MPLS_S, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_MPLS_PL, 0xff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_MPLS_PL, 0xff, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_MPLS_PL, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU11_TU_ETHER_IN_NSH, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, }, +}; + +static struct npc_kpu_profile_cam kpu12_cam_entries[] = { { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, - 0x0000, 0x0000, NPC_ETYPE_CTAG, 0xffff, + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_TCP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_UDP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_SCTP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_ICMP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_IGMP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_ESP, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_AH, + 0x00ff, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + 0x0000, + 0x0000, + NPC_IP_VER_4|NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_TCP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_UDP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_SCTP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_ICMP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_IGMP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_ESP, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + NPC_IPNH_AH, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + 0x0000, + 0x0000, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_ARP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, + NPC_IPNH_TCP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, + NPC_IPNH_UDP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, + NPC_IPNH_SCTP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, + NPC_IPNH_ICMP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, + NPC_IPNH_ICMP6 << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, + NPC_IPNH_ESP << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, + NPC_IPNH_AH << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, + 0x0000, + 0x0000, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU12_TU_IP6, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, }, +}; + +static struct npc_kpu_profile_cam kpu13_cam_entries[] = { { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_S_KPU13_TU_IP6_EXT, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, }, +}; + +static struct npc_kpu_profile_cam kpu14_cam_entries[] = { { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_IP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_S_KPU14_TU_IP6_EXT, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, }, +}; + +static struct npc_kpu_profile_cam kpu15_cam_entries[] = { { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_IP6, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_S_KPU15_TU_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_FIN, + NPC_TCP_FLAGS_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + 0x0000, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN, + NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN, + NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN, + NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN, + NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + NPC_TCP_PORT_HTTP, + 0xffff, + NPC_TCP_DATA_OFFSET_5, + NPC_TCP_DATA_OFFSET_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + NPC_TCP_PORT_HTTPS, + 0xffff, + NPC_TCP_DATA_OFFSET_5, + NPC_TCP_DATA_OFFSET_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + NPC_TCP_PORT_PPTP, + 0xffff, + NPC_TCP_DATA_OFFSET_5, + NPC_TCP_DATA_OFFSET_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + 0x0000, + 0x0000, + NPC_TCP_DATA_OFFSET_5, + NPC_TCP_DATA_OFFSET_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + NPC_TCP_PORT_HTTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + NPC_TCP_PORT_HTTPS, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + NPC_TCP_PORT_PPTP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_TCP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_UDP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_SCTP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_ICMP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_IGMP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_ICMP6, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_ESP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU15_TU_AH, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_NA, 0X00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, }, +}; + +static struct npc_kpu_profile_cam kpu16_cam_entries[] = { { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_S_KPU16_TCP_DATA, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU16_HTTP_DATA, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU16_HTTPS_DATA, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU16_PPTP_DATA, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU16_UDP_DATA, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU16_UDP_PTP, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, }, +}; + +static struct npc_kpu_profile_action kpu1_action_entries[] = { { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_RARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU5_IP, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_PTP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU5_IP6, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_FCOE, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_ARP, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_RARP, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_PTP, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_NSH, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_FCOE, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU2_SBTAG, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 0, 0, 0, + NPC_S_KPU2_CTAG2, 12, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_CTAG, 12, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 22, 0, 0, + NPC_S_KPU2_SBTAG, 12, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_QINQ, 12, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 26, 0, 0, + NPC_S_KPU2_ETAG, 12, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 18, 22, 26, 0, 0, + NPC_S_KPU2_ITAG, 12, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_L_WITH_MPLS, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_L_WITH_MPLS, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU4_NSH, 14, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_L_WITH_NSH, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 0, 1, 0, + NPC_S_KPU3_DSA, 12, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_8023, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_QINQ, 0xffff, - NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_8023, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_QINQ, 0xffff, - NPC_ETYPE_QINQ, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_L_UNK_ETYPE, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_IP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU5_IP, 22, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_IP6, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU5_IP6, 22, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_ARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_ARP, 22, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_RARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_RARP, 22, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_PTP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_PTP, 22, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_FCOE, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_FCOE, 22, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_MPLSU, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 0, 0, 0, + NPC_S_KPU2_CTAG2, 20, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_MPLSM, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_CTAG, 20, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_NSH, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 22, 0, 0, + NPC_S_KPU2_SBTAG, 20, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, }, { - NPC_S_KPU2_QINQ, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_QINQ, 20, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_IP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 26, 0, 0, + NPC_S_KPU2_ETAG, 20, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_ETAG, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_IP6, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 18, 22, 26, 0, 0, + NPC_S_KPU2_ITAG, 20, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_ITAG, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 22, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_MPLS, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_RARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 22, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_MPLS, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_PTP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU4_NSH, 22, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_WITH_NSH, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_FCOE, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_L_UNK_ETYPE, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_MPLSU, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 14, 16, 0, 0, + NPC_S_KPU2_PREHEADER, 8, 1, + NPC_LID_LA, NPC_LT_LA_IH_8_ETHER, + 0, + 1, 0xff, 0, 0, }, { - NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_MPLSM, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 14, 16, 0, 0, + NPC_S_KPU2_PREHEADER, 4, 1, + NPC_LID_LA, NPC_LT_LA_IH_4_ETHER, + 0, + 1, 0xff, 0, 0, }, { - NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_NSH, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 14, 16, 0, 0, + NPC_S_KPU2_PREHEADER, 2, 1, + NPC_LID_LA, NPC_LT_LA_IH_2_ETHER, + 0, + 1, 0xff, 0, 0, }, { - NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_LA, NPC_EC_IH_LENGTH, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + NPC_F_LA_L_UNK_ETYPE, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_ITAG, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 16, 0, 0, + NPC_S_KPU2_EXDSA, 12, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_LA, NPC_EC_EDSA_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_ETHER, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_QINQ, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU5_IP, 30, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff, - 0x0000, 0x0000, NPC_ETYPE_IP, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU5_IP6, 30, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff, - 0x0000, 0x0000, NPC_ETYPE_IP6, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_ARP, 30, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff, - 0x0000, 0x0000, NPC_ETYPE_ARP, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_RARP, 30, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff, - 0x0000, 0x0000, NPC_ETYPE_SBTAG, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_PTP, 30, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff, - 0x0000, 0x0000, NPC_ETYPE_CTAG, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_FCOE, 30, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 0, 0, 0, + NPC_S_KPU2_CTAG2, 28, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ETAG, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_CTAG, 28, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_IP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 22, 0, 0, + NPC_S_KPU2_SBTAG, 28, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_IP6, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_QINQ, 28, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_ARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 26, 0, 0, + NPC_S_KPU2_ETAG, 28, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_ETAG, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_RARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 18, 22, 26, 0, 0, + NPC_S_KPU2_ITAG, 28, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG + | NPC_F_LA_L_WITH_ITAG, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 30, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_MPLS, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 30, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_MPLS, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU4_NSH, 30, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_WITH_NSH, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, + NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_L_UNK_ETYPE, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU5_IP, 38, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU5_IP6, 38, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_ARP, 38, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_RARP, 38, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_PTP, 38, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 3, 0, + NPC_S_KPU5_FCOE, 38, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 0, 0, 0, + NPC_S_KPU2_CTAG2, 36, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 + | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_CTAG, 36, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 + | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, }, { - NPC_S_KPU2_ITAG, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 22, 0, 0, + NPC_S_KPU2_SBTAG, 36, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 + | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, }, { - NPC_S_NA, 0X00, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU2_QINQ, 36, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 + | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + 0, 0, 0, 0, }, -}; - -static struct npc_kpu_profile_cam kpu3_cam_entries[] = { { - NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_IP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 12, 26, 0, 0, + NPC_S_KPU2_ETAG, 36, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 + | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_IP6, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 18, 22, 26, 0, 0, + NPC_S_KPU2_ITAG, 36, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 + | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_ARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 38, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 + | NPC_F_LA_L_WITH_MPLS, + 0, 0, 0, 0, }, { - NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_RARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU4_MPLS, 38, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 + | NPC_F_LA_L_WITH_MPLS, + 0, 0, 0, 0, }, { - NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_PTP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU4_NSH, 38, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 + | NPC_F_LA_L_WITH_NSH, + 0, 0, 0, 0, }, { - NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_FCOE, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, + NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2 + | NPC_F_LA_L_UNK_ETYPE, + 0, 0, 0, 0, }, { - NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_LA, NPC_EC_L2_K1, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LA, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, +}; + +static struct npc_kpu_profile_action kpu2_action_entries[] = { { - NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_NSH, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU3_CTAG, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_RARP, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_PTP, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_FCOE, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 1, 0, + NPC_S_KPU4_NSH, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + NPC_F_LB_U_UNK_ETYPE, + 0, 0, 0, 0, }, { - NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_IP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_IP6, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_RARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_ARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_PTP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_RARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_FCOE, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_MPLSU, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_MPLSM, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_NSH, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 1, 0, + NPC_S_KPU4_NSH, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_STAG, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG_UNK, + 0, 0, 0, 0, }, { - NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 0, 0, 0, + NPC_S_KPU3_CTAG, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 0, 0, 0, + NPC_S_KPU3_STAG, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_STAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_RARP, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_PTP, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_FCOE, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_IP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_IP6, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 1, 0, + NPC_S_KPU4_NSH, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_ARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU3_STAG, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_STAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_RARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU3_CTAG, 24, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_PTP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_BTAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_UNK, + 0, 0, 0, 0, }, { - NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_FCOE, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_UNK_ETYPE, + 0, 0, 0, 0, }, { - NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_MPLSU, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_MPLSM, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_NSH, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_QINQ, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_RARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_IP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_PTP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_IP6, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_FCOE, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_ARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_RARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 1, 0, + NPC_S_KPU4_NSH, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG_UNK, + 0, 0, 0, 0, }, { - NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 0, 0, 0, + NPC_S_KPU3_CTAG, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 0, 0, 0, + NPC_S_KPU3_QINQ, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ_QINQ, + 0, 0, 0, 0, }, { - NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_UNK_ETYPE, + 0, 0, 0, 0, }, { - NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_RARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_PTP, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_FCOE, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + 1, + 0, 0, 0, 0, }, { - NPC_S_KPU3_ITAG, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + 2, + 0, 0, 0, 0, }, { - NPC_S_NA, 0X00, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_NSH, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + 2, + 0, 0, 0, 0, }, -}; - -static struct npc_kpu_profile_cam kpu4_cam_entries[] = { { - NPC_S_KPU4_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU3_CTAG, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S, - NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 16, 20, 24, 0, 0, + NPC_S_KPU3_ITAG, 14, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_BTAG_ITAG, + 0, 0, 0, 0, }, { - NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S, - 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 0, 0, 0, + NPC_S_KPU3_STAG, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG, + 0, 0, 0, 0, }, { - NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S, - 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 0, 0, 0, + NPC_S_KPU3_QINQ, 10, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_QINQ, + 0, 0, 0, 0, }, { - NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 28, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, }, { - NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 28, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, }, { - NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 28, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG, + 0, 0, 0, 0, }, { - NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU3_STAG, 28, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_STAG, + 0, 0, 0, 0, }, { - NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU3_CTAG, 28, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_CTAG, + 0, 0, 0, 0, }, { - NPC_S_NA, 0X00, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_UNK, + 0, 0, 0, 0, }, -}; - -static struct npc_kpu_profile_cam kpu5_cam_entries[] = { { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_TCP, 0x00ff, - NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_ETAG, + NPC_F_LB_U_UNK_ETYPE, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_UDP, 0x00ff, - NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 20, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_SCTP, 0x00ff, - NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 20, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_ICMP, 0x00ff, - NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 20, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_IGMP, 0x00ff, - NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_RARP, 20, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_ESP, 0x00ff, - NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 28, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_AH, 0x00ff, - NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 28, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_GRE, 0x00ff, - NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 28, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP, 0x00ff, - NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP6, 0x00ff, - NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 24, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_MPLS, 0x00ff, - NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 24, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000, - NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 24, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_TCP, 0x00ff, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_UDP, 0x00ff, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 24, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_SCTP, 0x00ff, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 24, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_ICMP, 0x00ff, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 24, 1, + NPC_LID_LB, NPC_LT_LB_ITAG, + NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_IGMP, 0x00ff, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_ESP, 0x00ff, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_AH, 0x00ff, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_GRE, 0x00ff, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP, 0x00ff, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP6, 0x00ff, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_RARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, NPC_IPNH_MPLS, 0x00ff, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_PTP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_FCOE, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_ARP, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_RARP, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 1, 0, + NPC_S_KPU4_NSH, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_PTP, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 0, 0, 0, + NPC_S_KPU3_QINQ, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ_QINQ, + 0, 0, 0, 0, }, { - NPC_S_KPU5_FCOE, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_UNK_ETYPE, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP6, 0xff, NPC_IPNH_TCP << 8, 0xff00, - NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP6, 0xff, NPC_IPNH_UDP << 8, 0xff00, - NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP6, 0xff, NPC_IPNH_SCTP << 8, 0xff00, - NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ICMP << 8, 0xff00, - NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_RARP, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ICMP6 << 8, 0xff00, - NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_PTP, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ESP << 8, 0xff00, - NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_FCOE, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP6, 0xff, NPC_IPNH_AH << 8, 0xff00, - NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 0, 0, 0, + NPC_S_KPU3_CTAG_C, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP6, 0xff, NPC_IPNH_GRE << 8, 0xff00, - NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 20, 0, 0, + NPC_S_KPU3_STAG_C, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP6, 0xff, NPC_IPNH_IP6 << 8, 0xff00, - NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 0, 0, 0, + NPC_S_KPU3_QINQ_C, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP6, 0xff, NPC_IPNH_MPLS << 8, 0xff00, - NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP6, 0xff, 0x0000, 0x0000, - NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU4_MPLS, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_IP6, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 1, 0, + NPC_S_KPU4_NSH, 14, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 18, 1, + NPC_LID_LB, NPC_LT_LB_EDSA, + NPC_F_LB_L_EDSA, + 0, 0, 0, 0, }, { - NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S, - NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 18, 1, + NPC_LID_LB, NPC_LT_LB_EDSA, + NPC_F_LB_L_EDSA, + 0, 0, 0, 0, }, { - NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S, - 0x0000, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 18, 1, + NPC_LID_LB, NPC_LT_LB_EDSA, + NPC_F_LB_L_EDSA, + 0, 0, 0, 0, }, { - NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_RARP, 18, 1, + NPC_LID_LB, NPC_LT_LB_EDSA, + NPC_F_LB_L_EDSA, + 0, 0, 0, 0, }, { - NPC_S_KPU5_MPLS, 0xff, 0x0000, NPC_MPLS_S, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_PTP, 18, 1, + NPC_LID_LB, NPC_LT_LB_EDSA, + NPC_F_LB_L_EDSA, + 0, 0, 0, 0, }, { - NPC_S_KPU5_MPLS_PL, 0xff, NPC_IP_VER_4, NPC_IP_VER_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_FCOE, 18, 1, + NPC_LID_LB, NPC_LT_LB_EDSA, + NPC_F_LB_L_EDSA, + 0, 0, 0, 0, }, { - NPC_S_KPU5_MPLS_PL, 0xff, NPC_IP_VER_6, NPC_IP_VER_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU3_CTAG, 16, 1, + NPC_LID_LB, NPC_LT_LB_EDSA_VLAN, + NPC_F_LB_L_EDSA_VLAN, + 0, 0, 0, 0, }, { - NPC_S_KPU5_MPLS_PL, 0xff, 0x0000, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_EDSA, + NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_EDSA, + 0, 0, 0, 0, }, { - NPC_S_KPU5_MPLS_PL, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 10, 1, + NPC_LID_LB, NPC_LT_LB_EXDSA, + NPC_F_LB_L_EXDSA, + 0, 0, 0, 0, }, { - NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 10, 1, + NPC_LID_LB, NPC_LT_LB_EXDSA, + NPC_F_LB_L_EXDSA, + 0, 0, 0, 0, }, { - NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_EXDSA, + NPC_F_LB_L_EXDSA, + 0, 0, 0, 0, }, { - NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_RARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_EXDSA, + NPC_F_LB_L_EXDSA, + 0, 0, 0, 0, }, { - NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_PTP, 10, 1, + NPC_LID_LB, NPC_LT_LB_EXDSA, + NPC_F_LB_L_EXDSA, + 0, 0, 0, 0, }, { - NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_FCOE, 10, 1, + NPC_LID_LB, NPC_LT_LB_EXDSA, + NPC_F_LB_L_EXDSA, + 0, 0, 0, 0, }, { - NPC_S_NA, 0X00, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 8, 0, 0, 0, + NPC_S_KPU3_CTAG, 8, 1, + NPC_LID_LB, NPC_LT_LB_EXDSA_VLAN, + NPC_F_LB_L_EXDSA_VLAN, + 0, 0, 0, 0, }, -}; - -static struct npc_kpu_profile_cam kpu6_cam_entries[] = { { - NPC_S_KPU6_IP6_EXT, 0xff, 0x0000, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_EXDSA, + NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_EXDSA, + 0, 0, 0, 0, }, -}; - -static struct npc_kpu_profile_cam kpu7_cam_entries[] = { { - NPC_S_KPU7_IP6_EXT, 0xff, 0x0000, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_LB, NPC_EC_L2_K3, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, }; -static struct npc_kpu_profile_cam kpu8_cam_entries[] = { - { - NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff, - NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000, - }, - { - NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff, - NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000, - }, +static struct npc_kpu_profile_action kpu3_action_entries[] = { { - NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff, - NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000, - NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_PTP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_FCOE, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff, - NPC_VXLAN_I, NPC_VXLAN_I, 0x0000, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff, - 0x0000, 0xffff, 0x0000, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU4_NSH, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, - NPC_VXLANGPE_P | NPC_VXLANGPE_I, - NPC_VXLANGPE_P | NPC_VXLANGPE_I, - NPC_VXLANGPE_NP_IP, NPC_VXLANGPE_NP_MASK, + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, - NPC_VXLANGPE_P | NPC_VXLANGPE_I, - NPC_VXLANGPE_P | NPC_VXLANGPE_I, - NPC_VXLANGPE_NP_IP6, NPC_VXLANGPE_NP_MASK, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, - NPC_VXLANGPE_P | NPC_VXLANGPE_I, - NPC_VXLANGPE_P | NPC_VXLANGPE_I, - NPC_VXLANGPE_NP_ETH, NPC_VXLANGPE_NP_MASK, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, - NPC_VXLANGPE_P | NPC_VXLANGPE_I, - NPC_VXLANGPE_P | NPC_VXLANGPE_I, - NPC_VXLANGPE_NP_NSH, NPC_VXLANGPE_NP_MASK, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, - NPC_VXLANGPE_P | NPC_VXLANGPE_I, - NPC_VXLANGPE_P | NPC_VXLANGPE_I, - NPC_VXLANGPE_NP_MPLS, NPC_VXLANGPE_NP_MASK, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, - NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I, - NPC_VXLANGPE_NP_IP, NPC_VXLANGPE_NP_MASK, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_PTP, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, - NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I, - NPC_VXLANGPE_NP_IP6, NPC_VXLANGPE_NP_MASK, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_FCOE, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, - NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I, - NPC_VXLANGPE_NP_ETH, NPC_VXLANGPE_NP_MASK, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, - NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I, - NPC_VXLANGPE_NP_NSH, NPC_VXLANGPE_NP_MASK, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, - NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I, - NPC_VXLANGPE_NP_MPLS, NPC_VXLANGPE_NP_MASK, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU4_NSH, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, - NPC_VXLANGPE_P, NPC_VXLANGPE_P, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff, - 0x0000, NPC_VXLANGPE_P, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, - 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, - NPC_ETYPE_TRANS_ETH_BR, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, - NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, - NPC_ETYPE_TRANS_ETH_BR, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, - NPC_GENEVE_F_CRI_OPT, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, - NPC_ETYPE_TRANS_ETH_BR, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, - NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, - NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, - NPC_ETYPE_TRANS_ETH_BR, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, - 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, - NPC_ETYPE_IP, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU4_NSH, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, - NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, - NPC_ETYPE_IP, 0xffff, + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, - NPC_GENEVE_F_CRI_OPT, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, - NPC_ETYPE_IP, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, - NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, - NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, - 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, - NPC_ETYPE_IP6, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, - NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, - NPC_ETYPE_IP6, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, - NPC_GENEVE_F_CRI_OPT, - NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP6, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_PTP, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff, - NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, - NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP6, 0xffff, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_FCOE, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPC, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPU, 0xffff, - NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU, - NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK, - 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPU, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU4_NSH, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_UDP, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_SCTP, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_ICMP, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_IGMP, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_ICMP6, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_PTP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_ESP, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_FCOE, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_AH, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_TRANS_ETH_BR, 0xffff, - NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_TRANS_ETH_BR, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU4_NSH, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff, - 0x0000, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff, - NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_IP, 18, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff, - NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_IP6, 18, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff, - NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_ARP, 18, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff, - NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU5_RARP, 18, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff, - NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 26, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff, - NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 26, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff, - NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ, - 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 26, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff, - 0x0000, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 22, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff, - NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 22, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff, - NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 22, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff, - NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff, - NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff, - NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 22, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff, - NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 22, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff, - NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ, - 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 22, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff, - 0x0000, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff, - NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff, - NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff, - NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff, - NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff, - NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff, - NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_PTP, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff, - NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ, - 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_FCOE, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff, - 0x0000, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff, - NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff, - NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU4_NSH, 4, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff, - NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff, - NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff, - NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff, - NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff, - NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ, - 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff, - 0x0000, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_PTP, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff, - NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_FCOE, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff, - NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff, - NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff, - NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU4_NSH, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff, - NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff, - NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff, - NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ, - 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff, - NPC_GRE_F_ROUTE, 0x4fff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff, - 0x0000, 0x4fff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff, - 0x0000, 0x0003, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff, - NPC_GRE_F_KEY | NPC_GRE_VER_1, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU4_NSH, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff, - NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_VER_1, - 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff, - NPC_GRE_F_KEY | NPC_GRE_F_ACK | NPC_GRE_VER_1, - 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff, - NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK | NPC_GRE_VER_1, - 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff, - 0x2001, 0xef7f, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff, - 0x0001, 0x0003, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_NA, 0X00, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_PTP, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, -}; - -static struct npc_kpu_profile_cam kpu9_cam_entries[] = { { - NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, NPC_MPLS_S, NPC_MPLS_S, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_FCOE, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S, - NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S, - 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S, - 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU4_NSH, 8, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU9_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S, - NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S, - 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S, - 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_PTP, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_FCOE, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU4_MPLS, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU4_NSH, 4, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, { - NPC_S_NA, 0X00, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + 0, + 0, 0, 0, 0, }, -}; - -static struct npc_kpu_profile_cam kpu10_cam_entries[] = { { - NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 10, 1, + NPC_LID_LB, NPC_LT_LB_DSA, + NPC_F_LB_L_DSA, + 0, 0, 0, 0, }, { - NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S, - NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 10, 1, + NPC_LID_LB, NPC_LT_LB_DSA, + NPC_F_LB_L_DSA, + 0, 0, 0, 0, }, { - NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S, - 0x0000, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_DSA, + NPC_F_LB_L_DSA, + 0, 0, 0, 0, }, { - NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 10, 1, + NPC_LID_LB, NPC_LT_LB_DSA, + NPC_F_LB_L_DSA, + 0, 0, 0, 0, }, { - NPC_S_KPU10_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_PTP, 10, 1, + NPC_LID_LB, NPC_LT_LB_DSA, + NPC_F_LB_L_DSA, + 0, 0, 0, 0, }, { - NPC_S_KPU10_TU_MPLS_PL, 0xff, NPC_IP_VER_4, NPC_IP_VER_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_FCOE, 10, 1, + NPC_LID_LB, NPC_LT_LB_DSA, + NPC_F_LB_L_DSA, + 0, 0, 0, 0, }, { - NPC_S_KPU10_TU_MPLS_PL, 0xff, NPC_IP_VER_6, NPC_IP_VER_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU5_IP, 14, 1, + NPC_LID_LB, NPC_LT_LB_DSA_VLAN, + NPC_F_LB_L_DSA_VLAN, + 0, 0, 0, 0, }, { - NPC_S_KPU10_TU_MPLS_PL, 0xff, 0x0000, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU5_IP6, 14, 1, + NPC_LID_LB, NPC_LT_LB_DSA_VLAN, + NPC_F_LB_L_DSA_VLAN, + 0, 0, 0, 0, }, { - NPC_S_KPU10_TU_MPLS_PL, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_ARP, 14, 1, + NPC_LID_LB, NPC_LT_LB_DSA_VLAN, + NPC_F_LB_L_DSA_VLAN, + 0, 0, 0, 0, }, { - NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_RARP, 14, 1, + NPC_LID_LB, NPC_LT_LB_DSA_VLAN, + NPC_F_LB_L_DSA_VLAN, + 0, 0, 0, 0, }, { - NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_PTP, 14, 1, + NPC_LID_LB, NPC_LT_LB_DSA_VLAN, + NPC_F_LB_L_DSA_VLAN, + 0, 0, 0, 0, }, { - NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU5_FCOE, 14, 1, + NPC_LID_LB, NPC_LT_LB_DSA_VLAN, + NPC_F_LB_L_DSA_VLAN, + 0, 0, 0, 0, }, { - NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_DSA_VLAN, + NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_DSA, + 0, 0, 0, 0, }, { - NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LB, NPC_LT_LB_DSA, + NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_DSA_VLAN, + 0, 0, 0, 0, }, { - NPC_S_NA, 0X00, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_LB, NPC_EC_L2_K3, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, }; -static struct npc_kpu_profile_cam kpu11_cam_entries[] = { - { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_IP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_IP6, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_ARP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, - }, - { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, - }, - { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, - }, - { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff, - }, - { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff, - }, - { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff, - }, - { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000, - }, - { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, - }, - { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, - }, - { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff, - NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, - }, - { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff, - NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff, - }, - { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff, - NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff, - }, - { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff, - NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff, - }, +static struct npc_kpu_profile_action kpu4_action_entries[] = { { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff, - NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU5_MPLS_PL, 4, 1, + NPC_LID_LC, NPC_LT_LC_MPLS, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff, - NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU5_MPLS_PL, 8, 1, + NPC_LID_LC, NPC_LT_LC_MPLS, + NPC_F_LC_L_MPLS_2_LABELS, + 0, 0, 0, 0, }, { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff, - NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU5_MPLS_PL, 12, 1, + NPC_LID_LC, NPC_LT_LC_MPLS, + NPC_F_LC_L_MPLS_3_LABELS, + 0, 0, 0, 0, }, { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff, - NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 4, 0, 0, 0, + NPC_S_KPU5_MPLS, 12, 1, + NPC_LID_LC, NPC_LT_LC_MPLS, + NPC_F_LC_L_MPLS_4_LABELS, + 0, 0, 0, 0, }, { - NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 7, 0, + NPC_S_KPU12_TU_IP, 0, 1, + NPC_LID_LC, NPC_LT_LC_NSH, + 0, + 1, 0x3f, 0, 2, }, { - NPC_S_KPU11_TU_ETHER, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 7, 0, + NPC_S_KPU12_TU_IP6, 0, 1, + NPC_LID_LC, NPC_LT_LC_NSH, + 0, + 1, 0x3f, 0, 2, }, { - NPC_S_KPU11_TU_PPP, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 6, 0, + NPC_S_KPU11_TU_ETHER, 0, 1, + NPC_LID_LC, NPC_LT_LC_NSH, + 0, + 1, 0x3f, 0, 2, }, { - NPC_S_KPU11_TU_MPLS_IN_NSH, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 4, 0, + NPC_S_KPU9_TU_MPLS_IN_NSH, 0, 1, + NPC_LID_LC, NPC_LT_LC_NSH, + 0, + 1, 0x3f, 0, 2, }, { - NPC_S_KPU11_TU_3RD_NSH, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_LC, NPC_EC_NSH_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_NSH, + 0, + 0, 0, 0, 0, }, { - NPC_S_NA, 0X00, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_LB, NPC_EC_L2_K4, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, }; -static struct npc_kpu_profile_cam kpu12_cam_entries[] = { - { - NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_TCP, 0x00ff, - NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, - }, - { - NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_UDP, 0x00ff, - NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, - }, - { - NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_SCTP, 0x00ff, - NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, - }, - { - NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ICMP, 0x00ff, - NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, - }, - { - NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_IGMP, 0x00ff, - NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, - }, - { - NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ESP, 0x00ff, - NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, - }, - { - NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_AH, 0x00ff, - NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, - }, - { - NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000, - NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, - NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000, - }, - { - NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_TCP, 0x00ff, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, - }, - { - NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_UDP, 0x00ff, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, - }, - { - NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_SCTP, 0x00ff, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, - }, - { - NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ICMP, 0x00ff, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, - }, - { - NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_IGMP, 0x00ff, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, - }, - { - NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ESP, 0x00ff, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, - }, - { - NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_AH, 0x00ff, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, - }, - { - NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000, - NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000, - }, - { - NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU12_TU_ARP, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, - }, - { - NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_TCP << 8, 0xff00, - NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, - }, - { - NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_UDP << 8, 0xff00, - NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, - }, - { - NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_SCTP << 8, 0xff00, - NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, - }, +static struct npc_kpu_profile_action kpu5_action_entries[] = { { - NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ICMP << 8, 0xff00, - NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_LC, NPC_EC_IP_TTL_0, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ICMP6 << 8, 0xff00, - NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_LC, NPC_EC_IP_FRAG_OFFSET_1, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, + NPC_F_LC_U_IP_FRAG, + 0, 0, 0, 0, }, { - NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ESP << 8, 0xff00, - NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 2, 0, + NPC_S_KPU8_TCP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_AH << 8, 0xff00, - NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU8_UDP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU12_TU_IP6, 0xff, 0x0000, 0x0000, - NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_SCTP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU12_TU_IP6, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_ICMP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, }, { - NPC_S_NA, 0X00, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_IGMP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, }, -}; - -static struct npc_kpu_profile_cam kpu13_cam_entries[] = { { - NPC_S_KPU13_TU_IP6_EXT, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_ESP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, }, -}; - -static struct npc_kpu_profile_cam kpu14_cam_entries[] = { { - NPC_S_KPU14_TU_IP6_EXT, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_AH, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, }, -}; - -static struct npc_kpu_profile_cam kpu15_cam_entries[] = { { - NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff, - NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU8_GRE, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff, - NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 6, 0, + NPC_S_KPU12_TU_IP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + NPC_F_LC_L_IP_IN_IP, + 0, 0, 0, 0, }, { - NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff, - NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 6, 0, + NPC_S_KPU12_TU_IP6, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + NPC_F_LC_L_6TO4, + 0, 0, 0, 0, }, { - NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000, - NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 3, 0, + NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + NPC_F_LC_L_MPLS_IN_IP, + 0, 0, 0, 0, }, { - NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, + NPC_F_LC_U_UNK_PROTO, + 0, 0, 0, 0, }, { - NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, + NPC_F_LC_U_IP_FRAG, + 0, 0, 0, 0, }, { - NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 2, 0, + NPC_S_KPU8_TCP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, }, { - NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 8, 10, 2, 0, + NPC_S_KPU8_UDP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, }, { - NPC_S_KPU15_TU_UDP, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_SCTP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, }, { - NPC_S_KPU15_TU_SCTP, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_ICMP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, }, { - NPC_S_KPU15_TU_ICMP, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_IGMP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, }, { - NPC_S_KPU15_TU_IGMP, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_ESP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, }, { - NPC_S_KPU15_TU_ICMP6, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_AH, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, }, { - NPC_S_KPU15_TU_ESP, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU8_GRE, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, }, { - NPC_S_KPU15_TU_AH, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 6, 0, + NPC_S_KPU12_TU_IP, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + NPC_F_LC_L_IP_IN_IP, + 0, 0xf, 0, 2, }, { - NPC_S_NA, 0X00, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 6, 0, + NPC_S_KPU12_TU_IP6, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + NPC_F_LC_L_6TO4, + 0, 0xf, 0, 2, }, -}; - -static struct npc_kpu_profile_cam kpu16_cam_entries[] = { { - NPC_S_KPU16_TCP_DATA, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 3, 0, + NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + NPC_F_LC_L_MPLS_IN_IP, + 0, 0xf, 0, 2, }, { - NPC_S_KPU16_HTTP_DATA, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + NPC_F_LC_U_UNK_PROTO, + 0, 0, 0, 0, }, { - NPC_S_KPU16_HTTPS_DATA, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + NPC_F_LC_U_IP_FRAG, + 0, 0, 0, 0, }, { - NPC_S_KPU16_PPTP_DATA, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_LC, NPC_EC_IP_VER, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, }, { - NPC_S_KPU16_UDP_DATA, 0xff, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_ARP, + 0, + 0, 0, 0, 0, }, -}; - -static struct npc_kpu_profile_action kpu1_action_entries[] = { { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 3, 0, NPC_S_KPU5_IP, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_RARP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 3, 0, NPC_S_KPU5_IP6, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_PTP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 3, 0, NPC_S_KPU5_ARP, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_FCOE, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 3, 0, NPC_S_KPU5_RARP, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LC, NPC_EC_IP6_HOP_0, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 3, 0, NPC_S_KPU5_PTP, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 2, 0, + NPC_S_KPU8_TCP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 3, 0, NPC_S_KPU5_FCOE, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU8_UDP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0, - 0, 0, NPC_S_KPU2_CTAG, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_SCTP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 20, - 0, 0, NPC_S_KPU2_SBTAG, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_ICMP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0, - 0, 0, NPC_S_KPU2_QINQ, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_ICMP6, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 10, 24, - 0, 0, NPC_S_KPU2_ETAG, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_ETAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_GRE, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24, - 0, 0, NPC_S_KPU2_ITAG, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_ITAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 6, 0, + NPC_S_KPU12_TU_IP6, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + NPC_F_LC_L_IP6_TUN_IP6, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 2, 0, NPC_S_KPU4_MPLS, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_MPLS, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 3, 0, + NPC_S_KPU9_TU_MPLS_IN_IP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + NPC_F_LC_L_IP6_MPLS_IN_IP, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 2, 0, NPC_S_KPU4_MPLS, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_MPLS, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU6_IP6_HOP_DEST, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_L_EXT_HOP, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 2, 0, NPC_S_KPU4_NSH, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_NSH, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU6_IP6_HOP_DEST, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_L_EXT_DEST, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LA, NPC_LT_LA_8023, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU6_IP6_ROUT, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_L_EXT_ROUT, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LA, NPC_LT_LA_8023, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 2, 0, 0, 0, + NPC_S_KPU6_IP6_FRAG, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_U_IP6_FRAG, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETYPE_UNK, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_ESP, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 3, 0, NPC_S_KPU5_IP, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_AH, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 3, 0, NPC_S_KPU5_IP6, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_L_EXT_MOBILITY, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 3, 0, NPC_S_KPU5_ARP, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_L_EXT_HOSTID, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 3, 0, NPC_S_KPU5_RARP, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_L_EXT_SHIM6, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 3, 0, NPC_S_KPU5_PTP, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + NPC_F_LC_U_UNK_PROTO, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 3, 0, NPC_S_KPU5_FCOE, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0, - 0, 0, + NPC_ERRLEV_LC, NPC_EC_IP6_VER, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0, - 0, 0, NPC_S_KPU2_CTAG, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 6, 0, + NPC_S_KPU12_TU_IP, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 20, - 0, 0, NPC_S_KPU2_SBTAG, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 6, 0, + NPC_S_KPU12_TU_IP6, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0, - 0, 0, NPC_S_KPU2_QINQ, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 5, 0, + NPC_S_KPU11_TU_ETHER, 8, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 10, 24, - 0, 0, NPC_S_KPU2_ETAG, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_ETAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 5, 0, + NPC_S_KPU11_TU_ETHER, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24, - 0, 0, NPC_S_KPU2_ITAG, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_ITAG, 0, 0, - 0, 0, + NPC_ERRLEV_LB, NPC_EC_MPLS_2MANY, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 2, 0, NPC_S_KPU4_MPLS, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_MPLS, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 6, 0, + NPC_S_KPU12_TU_IP, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 2, 0, NPC_S_KPU4_MPLS, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_MPLS, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 6, 0, + NPC_S_KPU12_TU_IP6, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 2, 0, NPC_S_KPU4_NSH, 14, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_NSH, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 5, 0, + NPC_S_KPU11_TU_ETHER, 4, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETYPE_UNK, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 5, 0, + NPC_S_KPU11_TU_ETHER, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LA, NPC_EC_L2_K1, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LA, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LC, NPC_EC_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, }; -static struct npc_kpu_profile_action kpu2_action_entries[] = { - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 2, 0, NPC_S_KPU5_IP, 4, 1, - NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 2, 0, NPC_S_KPU5_IP6, 4, 1, - NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_ARP, 4, 1, - NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_RARP, 4, 1, - NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_PTP, 4, 1, - NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_FCOE, 4, 1, - NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 1, 0, NPC_S_KPU4_MPLS, 4, 1, - NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 1, 0, NPC_S_KPU4_MPLS, 4, 1, - NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 1, 0, NPC_S_KPU4_NSH, 4, 1, - NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LB, NPC_LT_LB_CTAG, NPC_F_ETYPE_UNK, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 2, 0, NPC_S_KPU5_IP, 8, 1, - NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 2, 0, NPC_S_KPU5_IP6, 8, 1, - NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_ARP, 8, 1, - NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_RARP, 8, 1, - NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_PTP, 8, 1, - NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_FCOE, 8, 1, - NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 1, 0, NPC_S_KPU4_MPLS, 8, 1, - NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 1, 0, NPC_S_KPU4_MPLS, 8, 1, - NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 1, 0, NPC_S_KPU4_NSH, 8, 1, - NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG_UNK, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0, - 0, 0, NPC_S_KPU3_CTAG, 8, 1, - NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_STAG_CTAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0, - 0, 0, NPC_S_KPU3_STAG, 8, 1, - NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_STAG_STAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 2, 0, NPC_S_KPU5_IP, 22, 1, - NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 2, 0, NPC_S_KPU5_IP6, 22, 1, - NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_ARP, 22, 1, - NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_RARP, 22, 1, - NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_PTP, 22, 1, - NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_FCOE, 22, 1, - NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 1, 0, NPC_S_KPU4_MPLS, 22, 1, - NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 1, 0, NPC_S_KPU4_MPLS, 22, 1, - NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 1, 0, NPC_S_KPU4_NSH, 22, 1, - NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU3_STAG, 22, 1, - NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_STAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU3_CTAG, 22, 1, - NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_CTAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_UNK, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 2, 0, NPC_S_KPU5_IP, 4, 1, - NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 2, 0, NPC_S_KPU5_IP6, 4, 1, - NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_ARP, 4, 1, - NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_RARP, 4, 1, - NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_PTP, 4, 1, - NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_FCOE, 4, 1, - NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 1, 0, NPC_S_KPU4_MPLS, 4, 1, - NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 1, 0, NPC_S_KPU4_MPLS, 4, 1, - NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0, - 0, 0, - }, +static struct npc_kpu_profile_action kpu6_action_entries[] = { { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 1, 0, NPC_S_KPU4_NSH, 4, 1, - NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_ETYPE_UNK, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 1, 0, + NPC_S_KPU8_TCP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 2, 0, NPC_S_KPU5_IP, 8, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 8, 10, 1, 0, + NPC_S_KPU8_UDP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 2, 0, NPC_S_KPU5_IP6, 8, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_SCTP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_ARP, 8, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_ICMP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_RARP, 8, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_ICMP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_PTP, 8, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_ESP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_FCOE, 8, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_AH, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 1, 0, NPC_S_KPU4_MPLS, 8, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_GRE, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 1, 0, NPC_S_KPU4_MPLS, 8, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 5, 0, + NPC_S_KPU12_TU_IP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 1, 0, NPC_S_KPU4_NSH, 8, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG_UNK, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0, - 0, 0, NPC_S_KPU3_CTAG, 8, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_QINQ_CTAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 1, 0, + NPC_S_KPU8_TCP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0, - 0, 0, NPC_S_KPU3_QINQ, 8, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_QINQ_QINQ, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 8, 10, 1, 0, + NPC_S_KPU8_UDP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 2, 0, NPC_S_KPU5_IP, 4, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_SCTP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 2, 0, NPC_S_KPU5_IP6, 4, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_ICMP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_ARP, 4, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_ICMP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_RARP, 4, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_ESP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_PTP, 4, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_AH, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_FCOE, 4, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_GRE, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 1, 0, NPC_S_KPU4_MPLS, 4, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 5, 0, + NPC_S_KPU12_TU_IP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 1, 0, NPC_S_KPU4_MPLS, 4, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 1, 0, NPC_S_KPU4_NSH, 4, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU7_IP6_ROUT, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_ETYPE_UNK, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 2, 0, 0, 0, + NPC_S_KPU7_IP6_FRAG, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 2, 0, NPC_S_KPU5_IP, 8, 1, - NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 2, 0, NPC_S_KPU5_IP6, 8, 1, - NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 1, 0, + NPC_S_KPU8_TCP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_ARP, 8, 1, - NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 8, 10, 1, 0, + NPC_S_KPU8_UDP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_RARP, 8, 1, - NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_SCTP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_PTP, 8, 1, - NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_ICMP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_FCOE, 8, 1, - NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_ICMP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 1, 0, NPC_S_KPU4_MPLS, 8, 1, - NPC_LID_LB, NPC_LT_LB_ETAG, 1, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_ESP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 1, 0, NPC_S_KPU4_MPLS, 8, 1, - NPC_LID_LB, NPC_LT_LB_ETAG, 2, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_AH, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 1, 0, NPC_S_KPU4_NSH, 8, 1, - NPC_LID_LB, NPC_LT_LB_ETAG, 2, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_GRE, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU3_CTAG, 8, 1, - NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_CTAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 5, 0, + NPC_S_KPU12_TU_IP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24, - 0, 0, NPC_S_KPU3_ITAG, 12, 1, - NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_BTAG_ITAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 2, 0, + NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0, - 0, 0, NPC_S_KPU3_STAG, 8, 1, - NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_STAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 2, 0, 0, 0, + NPC_S_KPU7_IP6_FRAG, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0, - 0, 0, NPC_S_KPU3_QINQ, 8, 1, - NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_QINQ, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 2, 0, NPC_S_KPU5_IP, 26, 1, - NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0, - 0, 0, + NPC_ERRLEV_LC, NPC_EC_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, +}; + +static struct npc_kpu_profile_action kpu7_action_entries[] = { { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 2, 0, NPC_S_KPU5_IP6, 26, 1, - NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_ARP, 26, 1, - NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 0, 0, + NPC_S_KPU8_TCP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU3_STAG, 26, 1, - NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_STAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 8, 10, 0, 0, + NPC_S_KPU8_UDP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU3_CTAG, 26, 1, - NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_CTAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_SCTP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_UNK, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_ICMP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETYPE_UNK, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_ICMP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 2, 0, NPC_S_KPU5_IP, 18, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_ESP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 2, 0, NPC_S_KPU5_IP6, 18, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_AH, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_ARP, 18, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_GRE, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_RARP, 18, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 4, 0, + NPC_S_KPU12_TU_IP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 2, 0, NPC_S_KPU5_IP, 26, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 2, 0, NPC_S_KPU5_IP6, 26, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_ARP, 26, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 0, 0, + NPC_S_KPU8_TCP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 8, 10, 0, 0, + NPC_S_KPU8_UDP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 2, 0, NPC_S_KPU5_IP, 22, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_SCTP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 2, 0, NPC_S_KPU5_IP6, 22, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_ICMP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_ARP, 22, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_ICMP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_ESP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 2, 0, NPC_S_KPU5_IP, 22, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_AH, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 2, 0, NPC_S_KPU5_IP6, 22, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_GRE, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_ARP, 22, 1, - NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 4, 0, + NPC_S_KPU12_TU_IP6, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 1, 0, + NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LC, NPC_EC_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, }; -static struct npc_kpu_profile_action kpu3_action_entries[] = { +static struct npc_kpu_profile_action kpu8_action_entries[] = { { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 1, 0, NPC_S_KPU5_IP, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_FIN_ONLY, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 1, 0, NPC_S_KPU5_IP6, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_ZERO, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU5_ARP, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_RST_FIN, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU5_RARP, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_URG_SYN, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU5_PTP, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_RST_SYN, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU5_FCOE, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_SYN_FIN, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU4_MPLS, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 7, 0, + NPC_S_KPU16_HTTP_DATA, 20, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU4_MPLS, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 7, 0, + NPC_S_KPU16_HTTPS_DATA, 20, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU4_NSH, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 7, 0, + NPC_S_KPU16_PPTP_DATA, 20, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 7, 0, + NPC_S_KPU16_TCP_DATA, 20, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + NPC_F_LD_L_TCP_UNK_PORT, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 1, 0, NPC_S_KPU5_IP, 8, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 7, 0, + NPC_S_KPU16_HTTP_DATA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + NPC_F_LD_L_TCP_HAS_OPTIONS, + 12, 0xf0, 1, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 1, 0, NPC_S_KPU5_IP6, 8, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 7, 0, + NPC_S_KPU16_HTTPS_DATA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + NPC_F_LD_L_TCP_HAS_OPTIONS, + 12, 0xf0, 1, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU5_ARP, 8, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 7, 0, + NPC_S_KPU16_PPTP_DATA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + NPC_F_LD_L_TCP_HAS_OPTIONS, + 12, 0xf0, 1, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU5_RARP, 8, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 7, 0, + NPC_S_KPU16_TCP_DATA, 0, 1, + NPC_LID_LD, NPC_LT_LD_TCP, + NPC_F_LD_L_TCP_UNK_PORT_HAS_OPTIONS, + 12, 0xf0, 1, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU5_PTP, 8, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 2, 0, 0, + NPC_S_KPU9_VXLAN, 8, 1, + NPC_LID_LD, NPC_LT_LD_UDP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU5_FCOE, 8, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 2, 0, 0, + NPC_S_KPU9_VXLANGPE, 8, 1, + NPC_LID_LD, NPC_LT_LD_UDP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU4_MPLS, 8, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 2, 0, 0, + NPC_S_KPU9_GENEVE, 8, 1, + NPC_LID_LD, NPC_LT_LD_UDP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU4_MPLS, 8, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 2, 0, 0, + NPC_S_KPU9_GTPC, 8, 1, + NPC_LID_LD, NPC_LT_LD_UDP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU4_NSH, 8, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 2, 0, 0, + NPC_S_KPU9_GTPU, 8, 1, + NPC_LID_LD, NPC_LT_LD_UDP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 1, 0, NPC_S_KPU5_IP, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_KPU16_UDP_PTP, 0, 1, + NPC_LID_LD, NPC_LT_LD_UDP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 1, 0, NPC_S_KPU5_IP6, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_KPU16_UDP_PTP, 0, 1, + NPC_LID_LD, NPC_LT_LD_UDP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU5_ARP, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_UDP, 8, 1, + NPC_LID_LD, NPC_LT_LD_UDP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU5_RARP, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 7, 0, + NPC_S_KPU16_UDP_DATA, 8, 1, + NPC_LID_LD, NPC_LT_LD_UDP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU4_MPLS, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_SCTP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU4_MPLS, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_ICMP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU4_NSH, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_IGMP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_ICMP6, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 1, 0, NPC_S_KPU5_IP, 8, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_ESP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 1, 0, NPC_S_KPU5_IP6, 8, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_AH, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU5_ARP, 8, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 2, 0, + NPC_S_KPU11_TU_ETHER, 8, 1, + NPC_LID_LD, NPC_LT_LD_NVGRE, + NPC_F_LD_L_GRE_NVGRE, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU5_RARP, 8, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LD, NPC_EC_NVGRE, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LD, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU5_PTP, 8, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 4, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU5_FCOE, 8, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU4_MPLS, 8, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU4_MPLS, 8, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_SEQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU4_NSH, 8, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 1, 0, NPC_S_KPU5_IP, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_SEQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 1, 0, NPC_S_KPU5_IP6, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY_SEQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU5_ARP, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 16, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU5_RARP, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 4, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU5_PTP, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU5_FCOE, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU4_MPLS, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_SEQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU4_MPLS, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU4_NSH, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_SEQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY_SEQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 2, 0, NPC_S_KPU5_IP, 18, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU9_TU_MPLS_IN_GRE, 16, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 2, 0, NPC_S_KPU5_IP6, 18, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU9_TU_NSH_IN_GRE, 4, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_ARP, 18, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU9_TU_NSH_IN_GRE, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU5_RARP, 18, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU9_TU_NSH_IN_GRE, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 1, 0, NPC_S_KPU5_IP, 26, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU9_TU_NSH_IN_GRE, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_SEQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 1, 0, NPC_S_KPU5_IP6, 26, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU9_TU_NSH_IN_GRE, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU5_ARP, 26, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU9_TU_NSH_IN_GRE, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_SEQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 1, 0, NPC_S_KPU5_IP, 22, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU9_TU_NSH_IN_GRE, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY_SEQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 1, 0, NPC_S_KPU5_IP6, 22, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU9_TU_NSH_IN_GRE, 16, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU5_ARP, 22, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU12_TU_IP, 4, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 1, 0, NPC_S_KPU5_IP, 22, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_SEQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 1, 0, NPC_S_KPU5_IP6, 22, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU12_TU_IP, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU5_ARP, 22, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU12_TU_IP, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_SEQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU12_TU_IP, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY_SEQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 3, 0, + NPC_S_KPU12_TU_IP, 16, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU12_TU_IP6, 4, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + 0, + 0, 0, 0, 0, }, -}; - -static struct npc_kpu_profile_action kpu4_action_entries[] = { { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU5_MPLS_PL, 4, 1, - NPC_LID_LC, NPC_LT_LC_MPLS, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU12_TU_IP6, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU5_MPLS_PL, 8, 1, - NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_2_LABELS, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU12_TU_IP6, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU5_MPLS_PL, 12, 1, - NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_3_LABELS, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU12_TU_IP6, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_SEQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0, - 0, 0, NPC_S_KPU5_MPLS, 12, 1, - NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_4_LABELS, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU12_TU_IP6, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 7, 0, NPC_S_KPU12_TU_IP, 0, 1, - NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU12_TU_IP6, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_SEQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 7, 0, NPC_S_KPU12_TU_IP6, 0, 1, - NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU12_TU_IP6, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY_SEQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, - 6, 0, NPC_S_KPU11_TU_ETHER, 0, 1, - NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 3, 0, + NPC_S_KPU12_TU_IP6, 16, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU5_NSH, 0, 1, - NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_ROUTE, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 4, 0, NPC_S_KPU9_TU_MPLS, 0, 1, - NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_UNK_PROTO, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LB, NPC_EC_L2_K4, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LC, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LD, NPC_EC_GRE, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LD, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, -}; - -static struct npc_kpu_profile_action kpu5_action_entries[] = { { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0, - 2, 0, NPC_S_KPU8_TCP, 20, 1, - NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU11_TU_PPP, 8, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_VER1, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10, - 2, 0, NPC_S_KPU8_UDP, 20, 1, - NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU11_TU_PPP, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_VER1_HAS_SEQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU8_SCTP, 20, 1, - NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU11_TU_PPP, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_VER1_HAS_ACK, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU8_ICMP, 20, 1, - NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU11_TU_PPP, 16, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_VER1_HAS_SEQ_ACK, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU8_IGMP, 20, 1, - NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_VER1_UNK_PROTO, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU8_ESP, 20, 1, - NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LD, NPC_EC_GRE_VER1, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LD, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU8_AH, 20, 1, - NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LD, NPC_EC_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LD, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, +}; + +static struct npc_kpu_profile_action kpu9_action_entries[] = { { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 2, 0, NPC_S_KPU8_GRE, 20, 1, - NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 4, 1, + NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 6, 0, NPC_S_KPU12_TU_IP, 20, 1, - NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_IP_IN_IP, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 8, 1, + NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 6, 0, NPC_S_KPU12_TU_IP6, 20, 1, - NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_6TO4, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 12, 1, + NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 3, 0, NPC_S_KPU9_TU_MPLS, 20, 1, - NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_MPLS_IN_IP, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 4, 0, 0, 0, + NPC_S_KPU10_TU_MPLS, 12, 1, + NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_GRE, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_UNK_PROTO, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 4, 1, + NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0, - 2, 0, NPC_S_KPU8_TCP, 0, 1, - NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 8, 1, + NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH, + NPC_F_LD_L_MPLS_2_LABELS, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10, - 2, 0, NPC_S_KPU8_UDP, 0, 1, - NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 12, 1, + NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH, + NPC_F_LD_L_MPLS_3_LABELS, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU8_SCTP, 0, 1, - NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 4, 0, 0, 0, + NPC_S_KPU10_TU_MPLS, 12, 1, + NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_NSH, + NPC_F_LD_L_MPLS_4_LABELS, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU8_ICMP, 0, 1, - NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 4, 1, + NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU8_IGMP, 0, 1, - NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 8, 1, + NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP, + NPC_F_LD_L_MPLS_2_LABELS, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU8_ESP, 0, 1, - NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 12, 1, + NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP, + NPC_F_LD_L_MPLS_3_LABELS, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU8_AH, 0, 1, - NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 4, 0, 0, 0, + NPC_S_KPU10_TU_MPLS, 12, 1, + NPC_LID_LD, NPC_LT_LD_TU_MPLS_IN_IP, + NPC_F_LD_L_MPLS_4_LABELS, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 2, 0, NPC_S_KPU8_GRE, 0, 1, - NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU12_TU_IP, 0, 1, + NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE, + 0, + 1, 0x3f, 0, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 6, 0, NPC_S_KPU12_TU_IP, 0, 1, - NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_IP_IN_IP_HAS_OPTIONS, 0, 0xf, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU12_TU_IP6, 0, 1, + NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE, + 0, + 1, 0x3f, 0, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 6, 0, NPC_S_KPU12_TU_IP6, 0, 1, - NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_6TO4_HAS_OPTIONS, 0, 0xf, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 1, 0, + NPC_S_KPU11_TU_ETHER, 0, 1, + NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE, + 0, + 1, 0x3f, 0, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 3, 0, NPC_S_KPU9_TU_MPLS, 20, 1, - NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_MPLS_IN_IP_HAS_OPTIONS, - 0, 0xf, 0, 2, + NPC_ERRLEV_LE, NPC_EC_NSH_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LE, NPC_LT_LE_TU_NSH_IN_GRE, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_UNK_PROTO_HAS_OPTIONS, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 1, 0, + NPC_S_KPU11_TU_ETHER, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLAN, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LC, NPC_EC_IP_VER, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 1, 0, + NPC_S_KPU11_TU_ETHER, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLAN, + NPC_F_LE_L_VXLAN_NOVNI, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LC, NPC_LT_LC_ARP, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LE, NPC_EC_VXLAN, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LE, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LC, NPC_LT_LC_RARP, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LC, NPC_LT_LC_PTP, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU12_TU_IP6, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LC, NPC_LT_LC_FCOE, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 1, 0, + NPC_S_KPU11_TU_ETHER, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0, - 2, 0, NPC_S_KPU8_TCP, 40, 1, - NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10, - 2, 0, NPC_S_KPU8_UDP, 40, 1, - NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU8_SCTP, 40, 1, - NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + NPC_F_LE_L_VXLANGPE_NOVNI, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU8_ICMP, 40, 1, - NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU12_TU_IP6, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + NPC_F_LE_L_VXLANGPE_NOVNI, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU8_ICMP6, 40, 1, - NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 1, 0, + NPC_S_KPU11_TU_ETHER, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + NPC_F_LE_L_VXLANGPE_NOVNI, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU8_ESP, 40, 1, - NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU10_TU_NSH_IN_VXLANGPE, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + NPC_F_LE_L_VXLANGPE_NOVNI, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU8_AH, 40, 1, - NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 6, 10, 0, 0, + NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, 8, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + NPC_F_LE_L_VXLANGPE_NOVNI, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU8_GRE, 40, 1, - NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + NPC_F_LE_L_VXLANGPE_UNK, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 6, 0, NPC_S_KPU12_TU_IP6, 40, 1, - NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_TUN_IP6, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LE, NPC_LT_LE_VXLANGPE, + NPC_F_LE_L_VXLANGPE_NONP, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 3, 0, NPC_S_KPU9_TU_MPLS, 40, 1, - NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_MPLS_IN_IP, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 1, 0, + NPC_S_KPU11_TU_ETHER, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + 0, + 0, 0x3f, 0, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU6_IP6_EXT, 0, 1, - NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_HAS_EXT, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 1, 0, + NPC_S_KPU11_TU_ETHER, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + NPC_F_LE_L_GENEVE_OAM, + 0, 0x3f, 0, 2, }, { - NPC_ERRLEV_LC, NPC_EC_IP6_VER, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 1, 0, + NPC_S_KPU11_TU_ETHER, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + NPC_F_LE_L_GENEVE_CRI_OPT, + 0, 0x3f, 0, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 6, 0, NPC_S_KPU12_TU_IP, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 1, 0, + NPC_S_KPU11_TU_ETHER, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + NPC_F_LE_L_GENEVE_OAM_CRI_OPT, + 0, 0x3f, 0, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 6, 0, NPC_S_KPU12_TU_IP6, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + 0, + 0, 0x3f, 0, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, - 5, 0, NPC_S_KPU11_TU_ETHER, 8, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + NPC_F_LE_L_GENEVE_OAM, + 0, 0x3f, 0, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, - 5, 0, NPC_S_KPU11_TU_ETHER, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + NPC_F_LE_L_GENEVE_CRI_OPT, + 0, 0x3f, 0, 2, }, { - NPC_ERRLEV_LB, NPC_EC_L2_MPLS_2MANY, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + NPC_F_LE_L_GENEVE_OAM_CRI_OPT, + 0, 0x3f, 0, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 6, 0, NPC_S_KPU12_TU_IP, 0, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU12_TU_IP6, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + 0, + 0, 0x3f, 0, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 6, 0, NPC_S_KPU12_TU_IP6, 0, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU12_TU_IP6, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + NPC_F_LE_L_GENEVE_OAM, + 0, 0x3f, 0, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, - 5, 0, NPC_S_KPU11_TU_ETHER, 4, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU12_TU_IP6, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + NPC_F_LE_L_GENEVE_CRI_OPT, + 0, 0x3f, 0, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, - 5, 0, NPC_S_KPU11_TU_ETHER, 0, 0, - NPC_LID_LB, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU12_TU_IP6, 8, 1, + NPC_LID_LE, NPC_LT_LE_GENEVE, + NPC_F_LE_L_GENEVE_OAM_CRI_OPT, + 0, 0x3f, 0, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 6, 0, NPC_S_KPU12_TU_IP, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LE, NPC_LT_LE_GTPC, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 6, 0, NPC_S_KPU12_TU_IP6, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU12_TU_IP, 8, 1, + NPC_LID_LE, NPC_LT_LE_GTPU, + NPC_F_LE_L_GTPU_G_PDU, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, - 5, 0, NPC_S_KPU11_TU_ETHER, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LE, NPC_LT_LE_GTPU, + NPC_F_LE_L_GTPU_UNK, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 5, 0, NPC_S_KPU11_TU_3RD_NSH, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 4, 1, + NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 3, 0, NPC_S_KPU9_TU_MPLS, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 8, 1, + NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LC, NPC_EC_UNK, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LC, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU10_TU_MPLS_PL, 12, 1, + NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP, + 0, + 0, 0, 0, 0, }, -}; - -static struct npc_kpu_profile_action kpu6_action_entries[] = { { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LC, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 4, 0, 0, 0, + NPC_S_KPU10_TU_MPLS, 12, 1, + NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_UDP, + 0, + 0, 0, 0, 0, }, -}; - -static struct npc_kpu_profile_action kpu7_action_entries[] = { { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LC, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LE, NPC_EC_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LE, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, }; -static struct npc_kpu_profile_action kpu8_action_entries[] = { - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 7, 0, NPC_S_KPU16_HTTP_DATA, 20, 1, - NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTP, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 7, 0, NPC_S_KPU16_HTTPS_DATA, 20, 1, - NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTPS, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 7, 0, NPC_S_KPU16_PPTP_DATA, 20, 1, - NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_PPTP, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 7, 0, NPC_S_KPU16_TCP_DATA, 20, 1, - NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_UNK_PORT, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 7, 0, NPC_S_KPU16_HTTP_DATA, 0, 1, - NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTP_HAS_OPTIONS, - 12, 0xf0, 1, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 7, 0, NPC_S_KPU16_HTTPS_DATA, 0, 1, - NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTPS_HAS_OPTIONS, - 12, 0xf0, 1, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 7, 0, NPC_S_KPU16_PPTP_DATA, 0, 1, - NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_PPTP_HAS_OPTIONS, - 12, 0xf0, 1, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 7, 0, NPC_S_KPU16_TCP_DATA, 0, 1, - NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_UNK_PORT_HAS_OPTIONS, - 12, 0xf0, 1, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, - 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLAN, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, - 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLAN_NOVNI, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_LD, NPC_EC_VXLAN, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP6, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, - 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU9_TU_NSH, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NSH, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_MPLS, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP6, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, - 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU9_TU_NSH, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI_NSH, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI_MPLS, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_UNK, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NONP, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, - 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f, - 0, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, - 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM, 8, 0x3f, - 0, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, - 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT, 8, 0x3f, - 0, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, - 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT, - 8, 0x3f, 0, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f, - 0, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM, - 8, 0x3f, 0, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT, - 8, 0x3f, 0, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT, - 8, 0x3f, 0, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP6, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f, - 0, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP6, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM, 8, 0x3f, - 0, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP6, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT, - 8, 0x3f, 0, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP6, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT, - 8, 0x3f, 0, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPC, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP, 16, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPU_G_PDU, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPU_UNK, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 7, 0, NPC_S_KPU16_UDP_DATA, 8, 1, - NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_UNK_PORT, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LD, NPC_LT_LD_SCTP, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LD, NPC_LT_LD_ICMP, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LD, NPC_LT_LD_IGMP, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LD, NPC_LT_LD_ICMP6, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LD, NPC_LT_LD_ESP, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LD, NPC_LT_LD_AH, 0, 0, 0, - 0, 0, - }, +static struct npc_kpu_profile_action kpu10_action_entries[] = { { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, - 2, 0, NPC_S_KPU11_TU_ETHER, 8, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_NVGRE, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU12_TU_IP, 4, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LD, NPC_EC_NVGRE, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU12_TU_IP6, 4, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 4, 1, - NPC_LID_LD, NPC_LT_LD_GRE_MPLS, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU11_TU_ETHER, 8, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1, - NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU11_TU_ETHER, 4, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1, - NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY, 0, 0, - 0, 0, + NPC_ERRLEV_LE, NPC_EC_MPLS_2MANY, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1, - NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_SEQ, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU12_TU_IP, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1, - NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU12_TU_IP6, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1, - NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU11_TU_ETHER, 4, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1, - NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY_SEQ, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 0, 0, + NPC_S_KPU11_TU_ETHER, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1, - NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY_SEQ, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU11_TU_MPLS_PL, 4, 1, + NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE, + 0, 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 4, 1, - NPC_LID_LD, NPC_LT_LD_GRE_MPLS, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU11_TU_MPLS_PL, 8, 1, + NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1, - NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU11_TU_MPLS_PL, 12, 1, + NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1, - NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 4, 0, 0, 0, + NPC_S_KPU11_TU_MPLS, 12, 1, + NPC_LID_LF, NPC_LT_LF_TU_MPLS_IN_VXLANGPE, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1, - NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_SEQ, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 1, 0, + NPC_S_KPU12_TU_IP, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE, + 0, + 1, 0x3f, 0, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1, - NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 1, 0, + NPC_S_KPU12_TU_IP6, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE, + 0, + 1, 0x3f, 0, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1, - NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU11_TU_ETHER_IN_NSH, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE, + 0, + 1, 0x3f, 0, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1, - NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY_SEQ, 0, 0, - 0, 0, + NPC_ERRLEV_LF, NPC_EC_NSH_UNK, + 6, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_NSH_IN_VXLANGPE, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, - 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1, - NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY_SEQ, + NPC_ERRLEV_LE, NPC_EC_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, 0, 0, 0, 0, }, +}; + +static struct npc_kpu_profile_action kpu11_action_entries[] = { { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU9_TU_NSH, 4, 1, - NPC_LID_LD, NPC_LT_LD_GRE_NSH, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU12_TU_IP, 14, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU9_TU_NSH, 8, 1, - NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU12_TU_IP6, 14, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU9_TU_NSH, 8, 1, - NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_KEY, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU12_TU_ARP, 14, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU9_TU_NSH, 8, 1, - NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_SEQ, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU12_TU_IP, 18, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU9_TU_NSH, 12, 1, - NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_KEY, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU12_TU_IP6, 18, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU9_TU_NSH, 12, 1, - NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU12_TU_ARP, 18, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU9_TU_NSH, 12, 1, - NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_KEY_SEQ, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU9_TU_NSH, 16, 1, - NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU12_TU_IP, 22, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_STAG_CTAG, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP, 4, 1, - NPC_LID_LD, NPC_LT_LD_GRE, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU12_TU_IP6, 22, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_STAG_CTAG, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP, 8, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU12_TU_ARP, 22, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_STAG_CTAG, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP, 8, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_STAG_CTAG, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP, 8, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_SEQ, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU12_TU_IP, 18, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP, 12, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU12_TU_IP6, 18, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP, 12, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU12_TU_ARP, 18, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP, 12, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY_SEQ, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_CTAG, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP, 16, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU12_TU_IP, 22, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_QINQ_CTAG, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP6, 4, 1, - NPC_LID_LD, NPC_LT_LD_GRE, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU12_TU_IP6, 22, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_QINQ_CTAG, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP6, 8, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU12_TU_ARP, 22, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_QINQ_CTAG, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP6, 8, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_QINQ_CTAG, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP6, 8, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_SEQ, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU12_TU_IP, 18, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_QINQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP6, 12, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU12_TU_IP6, 18, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_QINQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP6, 12, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU12_TU_ARP, 18, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_L_WITH_QINQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP6, 12, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY_SEQ, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_U_UNK_ETYPE | NPC_F_LF_L_WITH_QINQ, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 3, 0, NPC_S_KPU12_TU_IP6, 16, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_ETHER, + NPC_F_LF_U_UNK_ETYPE, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_ROUTE, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LF, NPC_LT_LF_TU_PPP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_UNK_PROTO, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU12_TU_IP, 4, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LD, NPC_EC_GRE, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU12_TU_IP6, 4, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU11_TU_PPP, 8, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1, 0, 0, - 0, 0, + NPC_ERRLEV_LF, NPC_EC_MPLS_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU11_TU_PPP, 12, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_SEQ, 0, 0, - 0, 0, + NPC_ERRLEV_LF, NPC_EC_MPLS_2MANY, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU11_TU_PPP, 12, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_ACK, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 0, 0, + NPC_S_KPU12_TU_IP, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU11_TU_PPP, 16, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_SEQ_ACK, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 0, 0, + NPC_S_KPU12_TU_IP6, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_UNK_PROTO, 0, 0, - 0, 0, + NPC_ERRLEV_LF, NPC_EC_MPLS_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LD, NPC_EC_GRE_VER1, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_ETHER_IN_NSH, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LD, NPC_EC_UNK, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LF, NPC_EC_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LF, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, }; -static struct npc_kpu_profile_action kpu9_action_entries[] = { - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU10_TU_MPLS_PL, 4, 0, - NPC_LID_LD, NPC_LT_NA, 0, 0, 0, - 0, 0, - }, +static struct npc_kpu_profile_action kpu12_action_entries[] = { { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU10_TU_MPLS_PL, 8, 0, - NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_2_LABELS, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 2, 0, + NPC_S_KPU15_TU_TCP, 20, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU10_TU_MPLS_PL, 12, 0, - NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_3_LABELS, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU15_TU_UDP, 20, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0, - 0, 0, NPC_S_KPU10_TU_MPLS, 12, 0, - NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_4_LABELS, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_SCTP, 20, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU10_TU_MPLS_PL, 4, 1, - NPC_LID_LD, NPC_LT_LD_TU_MPLS, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_ICMP, 20, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU10_TU_MPLS_PL, 8, 1, - NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_2_LABELS, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_IGMP, 20, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU10_TU_MPLS_PL, 12, 1, - NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_3_LABELS, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_ESP, 20, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0, - 0, 0, NPC_S_KPU10_TU_MPLS, 12, 1, - NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_4_LABELS, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_AH, 20, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 2, 0, NPC_S_KPU12_TU_IP, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + NPC_F_LG_U_UNK_IP_PROTO, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 2, 0, NPC_S_KPU12_TU_IP6, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 2, 0, + NPC_S_KPU15_TU_TCP, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + NPC_F_LG_U_IP_HAS_OPTIONS, + 0, 0xf, 0, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, - 1, 0, NPC_S_KPU11_TU_ETHER, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU15_TU_UDP, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + NPC_F_LG_U_IP_HAS_OPTIONS, + 0, 0xf, 0, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU10_TU_NSH, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_SCTP, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + NPC_F_LG_U_IP_HAS_OPTIONS, + 0, 0xf, 0, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 1, 0, NPC_S_KPU11_TU_MPLS_IN_NSH, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_ICMP, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + NPC_F_LG_U_IP_HAS_OPTIONS, + 0, 0xf, 0, 2, }, { - NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_IGMP, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + NPC_F_LG_U_IP_HAS_OPTIONS, + 0, 0xf, 0, 2, }, -}; - -static struct npc_kpu_profile_action kpu10_action_entries[] = { { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 1, 0, NPC_S_KPU12_TU_IP, 4, 0, - NPC_LID_LD, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_ESP, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + NPC_F_LG_U_IP_HAS_OPTIONS, + 0, 0xf, 0, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 1, 0, NPC_S_KPU12_TU_IP6, 4, 0, - NPC_LID_LD, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_AH, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + NPC_F_LG_U_IP_HAS_OPTIONS, + 0, 0xf, 0, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, - 0, 0, NPC_S_KPU11_TU_ETHER, 8, 0, - NPC_LID_LD, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + NPC_F_LG_U_IP_HAS_OPTIONS | NPC_F_LG_U_UNK_IP_PROTO, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, - 0, 0, NPC_S_KPU11_TU_ETHER, 4, 0, - NPC_LID_LD, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LF, NPC_EC_IP_VER, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LB, NPC_EC_L2_MPLS_2MANY, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_ARP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 1, 0, NPC_S_KPU12_TU_IP, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 12, 0, 2, 0, + NPC_S_KPU15_TU_TCP, 40, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP6, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 1, 0, NPC_S_KPU12_TU_IP6, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 2, 0, + NPC_S_KPU15_TU_UDP, 40, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP6, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, - 0, 0, NPC_S_KPU11_TU_ETHER, 4, 0, - NPC_LID_LD, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_SCTP, 40, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP6, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, - 0, 0, NPC_S_KPU11_TU_ETHER, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_ICMP, 40, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP6, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 1, 0, NPC_S_KPU12_TU_IP, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_ICMP6, 40, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP6, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 1, 0, NPC_S_KPU12_TU_IP6, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_ESP, 40, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP6, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, - 0, 0, NPC_S_KPU11_TU_ETHER, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU15_TU_AH, 40, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP6, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU11_TU_3RD_NSH, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, - 0, 2, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 2, 0, 0, 0, 0, + NPC_S_KPU13_TU_IP6_EXT, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP6, + NPC_F_LG_U_IP6_HAS_EXT, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU11_TU_MPLS_IN_NSH, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f, - 0, 2, + NPC_ERRLEV_LF, NPC_EC_IP6_VER, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LG, NPC_LT_LG_TU_IP6, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LD, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LF, NPC_EC_UNK, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LG, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, }; -static struct npc_kpu_profile_action kpu11_action_entries[] = { - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 0, 0, NPC_S_KPU12_TU_IP, 14, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 0, 0, NPC_S_KPU12_TU_IP6, 14, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU12_TU_ARP, 14, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 0, 0, NPC_S_KPU12_TU_IP, 18, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 0, 0, NPC_S_KPU12_TU_IP6, 18, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU12_TU_ARP, 18, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG_UNK, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 0, 0, NPC_S_KPU12_TU_IP, 22, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 0, 0, NPC_S_KPU12_TU_IP6, 22, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU12_TU_ARP, 22, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, - NPC_F_TU_ETHER_STAG_CTAG_UNK, 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 0, 0, NPC_S_KPU12_TU_IP, 18, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 0, 0, NPC_S_KPU12_TU_IP6, 18, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU12_TU_ARP, 18, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_UNK, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 0, 0, NPC_S_KPU12_TU_IP, 22, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 0, 0, NPC_S_KPU12_TU_IP6, 22, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU12_TU_ARP, 22, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, - NPC_F_TU_ETHER_QINQ_CTAG_UNK, 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0, - 0, 0, NPC_S_KPU12_TU_IP, 18, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, - 0, 0, NPC_S_KPU12_TU_IP6, 18, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU12_TU_ARP, 18, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_UNK, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_UNK, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LE, NPC_LT_LE_TU_PPP, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_NSH, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LE, NPC_LT_LE_TU_3RD_NSH, 0, 0, 0, - 0, 0, - }, +static struct npc_kpu_profile_action kpu13_action_entries[] = { { - NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LE, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, }; -static struct npc_kpu_profile_action kpu12_action_entries[] = { - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0, - 2, 0, NPC_S_KPU15_TU_TCP, 20, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 2, 0, NPC_S_KPU15_TU_UDP, 20, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU15_TU_SCTP, 20, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU15_TU_ICMP, 20, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU15_TU_IGMP, 20, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU15_TU_ESP, 20, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU15_TU_AH, 20, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_UNK_PROTO, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0, - 2, 0, NPC_S_KPU15_TU_TCP, 0, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, - 0, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 2, 0, NPC_S_KPU15_TU_UDP, 0, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, - 0, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU15_TU_SCTP, 0, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, - 0, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU15_TU_ICMP, 0, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, - 0, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU15_TU_IGMP, 0, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, - 0, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU15_TU_ESP, 0, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, - 0, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU15_TU_AH, 0, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf, - 0, 2, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP, - NPC_F_IP_UNK_PROTO_HAS_OPTIONS, 0, 0, 0, 0, - }, - { - NPC_ERRLEV_LF, NPC_EC_IP_VER, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LF, NPC_LT_LF_TU_ARP, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0, - 2, 0, NPC_S_KPU15_TU_TCP, 40, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 2, 0, NPC_S_KPU15_TU_UDP, 40, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU15_TU_SCTP, 40, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0, - 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU15_TU_ICMP, 40, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0, - 0, 0, - }, +static struct npc_kpu_profile_action kpu14_action_entries[] = { { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU15_TU_ICMP6, 40, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, +}; + +static struct npc_kpu_profile_action kpu15_action_entries[] = { { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU15_TU_ESP, 40, 1, - NPC_LID_LC, NPC_LT_LF_TU_IP6, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_FIN_ONLY, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 2, 0, NPC_S_KPU15_TU_AH, 40, 1, - NPC_LID_LC, NPC_LT_LF_TU_IP6, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_ZERO, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, - 0, 0, NPC_S_KPU13_TU_IP6_EXT, 0, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP6, NPC_F_IP6_HAS_EXT, 0, 0, - 0, 0, + NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_RST_FIN, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LF, NPC_EC_IP6_VER, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_URG_SYN, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LF, NPC_EC_UNK, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LF, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_RST_SYN, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + 0, + 0, 0, 0, 0, }, -}; - -static struct npc_kpu_profile_action kpu13_action_entries[] = { { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LC, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_SYN_FIN, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + 0, + 0, 0, 0, 0, }, -}; - -static struct npc_kpu_profile_action kpu14_action_entries[] = { { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LC, NPC_LT_NA, 0, 0, 0, - 0, 0, - }, -}; - -static struct npc_kpu_profile_action kpu15_action_entries[] = { - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU16_HTTP_DATA, 20, 1, - NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTP, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU16_HTTP_DATA, 20, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + NPC_F_LH_L_TCP_HTTP, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU16_HTTPS_DATA, 20, 1, - NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTPS, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU16_HTTPS_DATA, 20, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + NPC_F_LH_L_TCP_HTTP, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU16_PPTP_DATA, 20, 1, - NPC_LID_LD, NPC_LT_LG_TU_TCP, NPC_F_TCP_PPTP, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU16_PPTP_DATA, 20, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + NPC_F_LH_L_TCP_PPTP, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU16_TCP_DATA, 20, 1, - NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_UNK_PORT, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU16_TCP_DATA, 20, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + NPC_F_LH_L_TCP_UNK_PORT, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU16_HTTP_DATA, 0, 1, - NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTP_HAS_OPTIONS, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU16_HTTP_DATA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_HTTP, 12, 0xf0, 1, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU16_HTTPS_DATA, 0, 1, - NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTPS_HAS_OPTIONS, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU16_HTTPS_DATA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_HTTPS, 12, 0xf0, 1, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU16_PPTP_DATA, 0, 1, - NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_PPTP_HAS_OPTIONS, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU16_PPTP_DATA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_PPTP, 12, 0xf0, 1, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU16_TCP_DATA, 0, 1, - NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_UNK_PORT_HAS_OPTIONS, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU16_TCP_DATA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_TCP, + NPC_F_LH_U_TCP_HAS_OPTIONS | NPC_F_LH_L_TCP_UNK_PORT, 12, 0xf0, 1, 2, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 0, NPC_S_KPU16_UDP_DATA, 8, 1, - NPC_LID_LG, NPC_LT_LG_TU_UDP, NPC_F_UDP_UNK_PORT, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU16_UDP_DATA, 8, 1, + NPC_LID_LH, NPC_LT_LH_TU_UDP, + NPC_F_LH_L_UDP_UNK_PORT, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LG, NPC_LT_LG_TU_SCTP, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_SCTP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LG, NPC_LT_LG_TU_ICMP, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_ICMP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LG, NPC_LT_LG_TU_IGMP, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_IGMP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LG, NPC_LT_LG_TU_ICMP6, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_ICMP6, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LG, NPC_LT_LG_TU_ESP, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_ESP, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LG, NPC_LT_LG_TU_AH, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 1, + NPC_LID_LH, NPC_LT_LH_TU_AH, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_LG, NPC_EC_L4, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 0, - NPC_LID_LG, NPC_LT_NA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_LG, NPC_EC_L4, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LH, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, }; static struct npc_kpu_profile_action kpu16_action_entries[] = { { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LH, NPC_LT_LH_TCP_DATA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LH, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LH, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LH, NPC_LT_LH_HTTP_DATA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LH, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LH, NPC_LT_LH_HTTPS_DATA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LH, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LH, NPC_LT_LH_PPTP_DATA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LH, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, { - NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, - 0, 1, NPC_S_NA, 0, 1, - NPC_LID_LH, NPC_LT_LH_UDP_DATA, 0, 0, 0, - 0, 0, + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LH, NPC_LT_NA, + 0, + 0, 0, 0, 0, }, }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index e581091c09c4..5c190c3ce898 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -56,12 +56,34 @@ static char *mkex_profile; /* MKEX profile name */ module_param(mkex_profile, charp, 0000); MODULE_PARM_DESC(mkex_profile, "MKEX profile name string"); +static void rvu_setup_hw_capabilities(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + + hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1; + hw->cap.nix_fixed_txschq_mapping = false; + hw->cap.nix_shaping = true; + hw->cap.nix_tx_link_bp = true; + hw->cap.nix_rx_multicast = true; + + if (is_rvu_96xx_B0(rvu)) { + hw->cap.nix_fixed_txschq_mapping = true; + hw->cap.nix_txsch_per_cgx_lmac = 4; + hw->cap.nix_txsch_per_lbk_lmac = 132; + hw->cap.nix_txsch_per_sdp_lmac = 76; + hw->cap.nix_shaping = false; + hw->cap.nix_tx_link_bp = false; + if (is_rvu_96xx_A0(rvu)) + hw->cap.nix_rx_multicast = false; + } +} + /* Poll a RVU block's register 'offset', for a 'zero' * or 'nonzero' at bits specified by 'mask' */ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero) { - unsigned long timeout = jiffies + usecs_to_jiffies(100); + unsigned long timeout = jiffies + usecs_to_jiffies(10000); void __iomem *reg; u64 reg_val; @@ -73,7 +95,6 @@ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero) if (!zero && (reg_val & mask)) return 0; usleep_range(1, 5); - timeout--; } return -EBUSY; } @@ -433,9 +454,9 @@ static void rvu_reset_all_blocks(struct rvu *rvu) rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST); - rvu_block_reset(rvu, BLKADDR_NDC0, NDC_AF_BLK_RST); - rvu_block_reset(rvu, BLKADDR_NDC1, NDC_AF_BLK_RST); - rvu_block_reset(rvu, BLKADDR_NDC2, NDC_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST); } static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block) @@ -877,8 +898,8 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue, return 0; } -static int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req, - struct ready_msg_rsp *rsp) +int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req, + struct ready_msg_rsp *rsp) { return 0; } @@ -1023,9 +1044,9 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach, return 0; } -static int rvu_mbox_handler_detach_resources(struct rvu *rvu, - struct rsrc_detach *detach, - struct msg_rsp *rsp) +int rvu_mbox_handler_detach_resources(struct rvu *rvu, + struct rsrc_detach *detach, + struct msg_rsp *rsp) { return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc); } @@ -1171,9 +1192,9 @@ fail: return -ENOSPC; } -static int rvu_mbox_handler_attach_resources(struct rvu *rvu, - struct rsrc_attach *attach, - struct msg_rsp *rsp) +int rvu_mbox_handler_attach_resources(struct rvu *rvu, + struct rsrc_attach *attach, + struct msg_rsp *rsp) { u16 pcifunc = attach->hdr.pcifunc; int err; @@ -1294,8 +1315,8 @@ static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset); } -static int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req, - struct msix_offset_rsp *rsp) +int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req, + struct msix_offset_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; @@ -1343,8 +1364,8 @@ static int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req, return 0; } -static int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req, - struct msg_rsp *rsp) +int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; u16 vf, numvfs; @@ -1363,6 +1384,17 @@ static int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req, return 0; } +int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req, + struct get_hw_cap_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + + rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping; + rsp->nix_shaping = hw->cap.nix_shaping; + + return 0; +} + static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid, struct mbox_msghdr *req) { @@ -1440,12 +1472,12 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type) /* Process received mbox messages */ req_hdr = mdev->mbase + mbox->rx_start; - if (req_hdr->num_msgs == 0) + if (mw->mbox_wrk[devid].num_msgs == 0) return; offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); - for (id = 0; id < req_hdr->num_msgs; id++) { + for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) { msg = mdev->mbase + offset; /* Set which PF/VF sent this message based on mbox IRQ */ @@ -1471,13 +1503,14 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type) if (msg->pcifunc & RVU_PFVF_FUNC_MASK) dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n", err, otx2_mbox_id2name(msg->id), - msg->id, devid, + msg->id, rvu_get_pf(msg->pcifunc), (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1); else dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n", err, otx2_mbox_id2name(msg->id), msg->id, devid); } + mw->mbox_wrk[devid].num_msgs = 0; /* Send mbox responses to VF/PF */ otx2_mbox_msg_send(mbox, devid); @@ -1523,14 +1556,14 @@ static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type) mdev = &mbox->dev[devid]; rsp_hdr = mdev->mbase + mbox->rx_start; - if (rsp_hdr->num_msgs == 0) { + if (mw->mbox_wrk_up[devid].up_num_msgs == 0) { dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n"); return; } offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); - for (id = 0; id < rsp_hdr->num_msgs; id++) { + for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) { msg = mdev->mbase + offset; if (msg->id >= MBOX_MSG_MAX) { @@ -1560,6 +1593,7 @@ end: offset = mbox->rx_start + msg->next_msgoff; mdev->msgs_acked++; } + mw->mbox_wrk_up[devid].up_num_msgs = 0; otx2_mbox_reset(mbox, devid); } @@ -1697,14 +1731,28 @@ static void rvu_queue_work(struct mbox_wq_info *mw, int first, mbox = &mw->mbox; mdev = &mbox->dev[i]; hdr = mdev->mbase + mbox->rx_start; - if (hdr->num_msgs) - queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work); + /*The hdr->num_msgs is set to zero immediately in the interrupt + * handler to ensure that it holds a correct value next time + * when the interrupt handler is called. + * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler + * pf>mbox.up_num_msgs holds the data for use in + * pfaf_mbox_up_handler. + */ + + if (hdr->num_msgs) { + mw->mbox_wrk[i].num_msgs = hdr->num_msgs; + hdr->num_msgs = 0; + queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work); + } mbox = &mw->mbox_up; mdev = &mbox->dev[i]; hdr = mdev->mbase + mbox->rx_start; - if (hdr->num_msgs) + if (hdr->num_msgs) { + mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs; + hdr->num_msgs = 0; queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work); + } } } @@ -2316,18 +2364,6 @@ static int rvu_enable_sriov(struct rvu *rvu) if (vfs > chans) vfs = chans; - /* AF's VFs work in pairs and talk over consecutive loopback channels. - * Thus we want to enable maximum even number of VFs. In case - * odd number of VFs are available then the last VF on the list - * remains disabled. - */ - if (vfs & 0x1) { - dev_warn(&pdev->dev, - "Number of VFs should be even. Enabling %d out of %d.\n", - vfs - 1, vfs); - vfs--; - } - if (!vfs) return 0; @@ -2432,6 +2468,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) rvu_reset_all_blocks(rvu); + rvu_setup_hw_capabilities(rvu); + err = rvu_setup_hw_resources(rvu); if (err) goto err_release_regions; @@ -2456,6 +2494,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_irq; + /* Initialize debugfs */ + rvu_dbg_init(rvu); + return 0; err_irq: rvu_unregister_interrupts(rvu); @@ -2482,6 +2523,7 @@ static void rvu_remove(struct pci_dev *pdev) { struct rvu *rvu = pci_get_drvdata(pdev); + rvu_dbg_exit(rvu); rvu_unregister_interrupts(rvu); rvu_flr_wq_destroy(rvu); rvu_cgx_exit(rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index c9d60b0554c0..51c206f4fe6f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Marvell OcteonTx2 RVU Admin Function driver +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RVU Admin Function driver * * Copyright (C) 2018 Marvell International Ltd. * @@ -35,9 +35,36 @@ #define RVU_PFVF_FUNC_SHIFT 0 #define RVU_PFVF_FUNC_MASK 0x3FF +#ifdef CONFIG_DEBUG_FS +struct dump_ctx { + int lf; + int id; + bool all; +}; + +struct rvu_debugfs { + struct dentry *root; + struct dentry *cgx_root; + struct dentry *cgx; + struct dentry *lmac; + struct dentry *npa; + struct dentry *nix; + struct dentry *npc; + struct dump_ctx npa_aura_ctx; + struct dump_ctx npa_pool_ctx; + struct dump_ctx nix_cq_ctx; + struct dump_ctx nix_rq_ctx; + struct dump_ctx nix_sq_ctx; + int npa_qsize_id; + int nix_qsize_id; +}; +#endif + struct rvu_work { struct work_struct work; struct rvu *rvu; + int num_msgs; + int up_num_msgs; }; struct rsrc_bmap { @@ -99,6 +126,7 @@ struct npc_mcam { u16 lprio_start; u16 hprio_count; u16 hprio_end; + u16 rx_miss_act_cntr; /* Counter for RX MISS action */ }; /* Structure for per RVU func info ie PF/VF */ @@ -151,15 +179,20 @@ struct rvu_pfvf { struct mcam_entry entry; int rxvlan_index; bool rxvlan; + + bool cgx_in_use; /* this PF/VF using CGX? */ + int cgx_users; /* number of cgx users - used only by PFs */ }; struct nix_txsch { struct rsrc_bmap schq; u8 lvl; -#define NIX_TXSCHQ_TL1_CFG_DONE BIT_ULL(0) +#define NIX_TXSCHQ_FREE BIT_ULL(1) +#define NIX_TXSCHQ_CFG_DONE BIT_ULL(0) #define TXSCH_MAP_FUNC(__pfvf_map) ((__pfvf_map) & 0xFFFF) #define TXSCH_MAP_FLAGS(__pfvf_map) ((__pfvf_map) >> 16) #define TXSCH_MAP(__func, __flags) (((__func) & 0xFFFF) | ((__flags) << 16)) +#define TXSCH_SET_FLAG(__pfvf_map, flag) ((__pfvf_map) | ((flag) << 16)) u32 *pfvf_map; }; @@ -193,6 +226,21 @@ struct nix_hw { struct nix_lso lso; }; +/* RVU block's capabilities or functionality, + * which vary by silicon version/skew. + */ +struct hw_cap { + /* Transmit side supported functionality */ + u8 nix_tx_aggr_lvl; /* Tx link's traffic aggregation level */ + u16 nix_txsch_per_cgx_lmac; /* Max Q's transmitting to CGX LMAC */ + u16 nix_txsch_per_lbk_lmac; /* Max Q's transmitting to LBK LMAC */ + u16 nix_txsch_per_sdp_lmac; /* Max Q's transmitting to SDP LMAC */ + bool nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */ + bool nix_shaping; /* Is shaping and coloring supported */ + bool nix_tx_link_bp; /* Can link backpressure TL queues ? */ + bool nix_rx_multicast; /* Rx packet replication support */ +}; + struct rvu_hwinfo { u8 total_pfs; /* MAX RVU PFs HW supports */ u16 total_vfs; /* Max RVU VFs HW supports */ @@ -204,7 +252,7 @@ struct rvu_hwinfo { u8 sdp_links; u8 npc_kpus; /* No of parser units */ - + struct hw_cap cap; struct rvu_block block[BLK_COUNT]; /* Block info */ struct nix_hw *nix0; struct npc_pkind pkind; @@ -261,8 +309,13 @@ struct rvu { struct workqueue_struct *cgx_evh_wq; spinlock_t cgx_evq_lock; /* cgx event queue lock */ struct list_head cgx_evq_head; /* cgx event queue head */ + struct mutex cgx_cfg_lock; /* serialize cgx configuration */ char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */ + +#ifdef CONFIG_DEBUG_FS + struct rvu_debugfs rvu_dbg; +#endif }; static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) @@ -285,7 +338,8 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset) return readq(rvu->pfreg_base + offset); } -static inline bool is_rvu_9xxx_A0(struct rvu *rvu) +/* Silicon revisions */ +static inline bool is_rvu_96xx_A0(struct rvu *rvu) { struct pci_dev *pdev = rvu->pdev; @@ -293,6 +347,14 @@ static inline bool is_rvu_9xxx_A0(struct rvu *rvu) (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX); } +static inline bool is_rvu_96xx_B0(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + + return ((pdev->revision == 0x00) || (pdev->revision == 0x01)) && + (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX); +} + /* Function Prototypes * RVU */ @@ -342,52 +404,25 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id) *lmac_id = (map & 0xF); } +#define M(_name, _id, fn_name, req, rsp) \ +int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *); +MBOX_MESSAGES +#undef M + int rvu_cgx_init(struct rvu *rvu); int rvu_cgx_exit(struct rvu *rvu); void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu); int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start); -int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req, - struct cgx_stats_rsp *rsp); -int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, - struct cgx_mac_addr_set_or_get *req, - struct cgx_mac_addr_set_or_get *rsp); -int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, - struct cgx_mac_addr_set_or_get *req, - struct cgx_mac_addr_set_or_get *rsp); -int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req, - struct cgx_link_info_msg *rsp); -int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req, - struct msg_rsp *rsp); - +void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable); +int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start); +int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index, + int rxtxflag, u64 *stat); /* NPA APIs */ int rvu_npa_init(struct rvu *rvu); void rvu_npa_freemem(struct rvu *rvu); void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf); -int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu, - struct npa_aq_enq_req *req, - struct npa_aq_enq_rsp *rsp); -int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu, - struct hwctx_disable_req *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu, - struct npa_lf_alloc_req *req, - struct npa_lf_alloc_rsp *rsp); -int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req, - struct msg_rsp *rsp); +int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req, + struct npa_aq_enq_rsp *rsp); /* NIX APIs */ bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc); @@ -397,55 +432,7 @@ int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, void rvu_nix_freemem(struct rvu *rvu); int rvu_get_nixlf_count(struct rvu *rvu); void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf); -int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, - struct nix_lf_alloc_req *req, - struct nix_lf_alloc_rsp *rsp); -int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, - struct nix_aq_enq_req *req, - struct nix_aq_enq_rsp *rsp); -int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, - struct hwctx_disable_req *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, - struct nix_txsch_alloc_req *req, - struct nix_txsch_alloc_rsp *rsp); -int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, - struct nix_txsch_free_req *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, - struct nix_txschq_config *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, - struct nix_vtag_config *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, - struct nix_rss_flowkey_cfg *req, - struct nix_rss_flowkey_cfg_rsp *rsp); -int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, - struct nix_set_mac_addr *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, - struct nix_mark_format_cfg *req, - struct nix_mark_format_cfg_rsp *rsp); -int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, - struct nix_lso_format_cfg *req, - struct nix_lso_format_cfg_rsp *rsp); +int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf); /* NPC APIs */ int rvu_npc_init(struct rvu *rvu); @@ -460,45 +447,25 @@ void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan); +void rvu_npc_disable_bcast_entry(struct rvu *rvu, u16 pcifunc); int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, int group, int alg_idx, int mcam_index); -int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu, - struct npc_mcam_alloc_entry_req *req, - struct npc_mcam_alloc_entry_rsp *rsp); -int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, - struct npc_mcam_free_entry_req *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, - struct npc_mcam_write_entry_req *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu, - struct npc_mcam_ena_dis_entry_req *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu, - struct npc_mcam_ena_dis_entry_req *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu, - struct npc_mcam_shift_entry_req *req, - struct npc_mcam_shift_entry_rsp *rsp); -int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, - struct npc_mcam_alloc_counter_req *req, - struct npc_mcam_alloc_counter_rsp *rsp); -int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, - struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu, - struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu, - struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu, - struct npc_mcam_oper_counter_req *req, - struct npc_mcam_oper_counter_rsp *rsp); -int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, - struct npc_mcam_alloc_and_write_entry_req *req, - struct npc_mcam_alloc_and_write_entry_rsp *rsp); -int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req, - struct npc_get_kex_cfg_rsp *rsp); +void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, + int blkaddr, int *alloc_cnt, + int *enable_cnt); +void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc, + int blkaddr, int *alloc_cnt, + int *enable_cnt); + +#ifdef CONFIG_DEBUG_FS +void rvu_dbg_init(struct rvu *rvu); +void rvu_dbg_exit(struct rvu *rvu); +#else +static inline void rvu_dbg_init(struct rvu *rvu) {} +static inline void rvu_dbg_exit(struct rvu *rvu) {} +#endif #endif /* RVU_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 7d7133c5f799..0bbb2eb1446e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -14,6 +14,7 @@ #include "rvu.h" #include "cgx.h" +#include "rvu_reg.h" struct cgx_evq_entry { struct list_head evq_node; @@ -40,12 +41,25 @@ MBOX_UP_CGX_MESSAGES #undef M /* Returns bitmap of mapped PFs */ -static inline u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) +static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) { return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id]; } -static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id) +static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) +{ + unsigned long pfmap; + + pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id); + + /* Assumes only one pf mapped to a cgx lmac port */ + if (!pfmap) + return -ENODEV; + else + return find_first_bit(&pfmap, 16); +} + +static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id) { return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF); } @@ -294,6 +308,8 @@ int rvu_cgx_init(struct rvu *rvu) if (err) return err; + mutex_init(&rvu->cgx_cfg_lock); + /* Ensure event handler registration is completed, before * we turn on the links */ @@ -334,6 +350,24 @@ int rvu_cgx_exit(struct rvu *rvu) return 0; } +void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable) +{ + u8 cgx_id, lmac_id; + void *cgxd; + + if (!is_pf_cgxmapped(rvu, pf)) + return; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgxd = rvu_cgx_pdata(cgx_id, rvu); + + /* Set / clear CTL_BCK to control pause frame forwarding to NIX */ + if (enable) + cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, true); + else + cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, false); +} + int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) { int pf = rvu_get_pf(pcifunc); @@ -562,3 +596,94 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req, rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false); return 0; } + +/* Finds cumulative status of NIX rx/tx counters from LF of a PF and those + * from its VFs as well. ie. NIX rx/tx counters at the CGX port level + */ +int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, + int index, int rxtxflag, u64 *stat) +{ + struct rvu_block *block; + int blkaddr; + u16 pcifunc; + int pf, lf; + + if (!cgxd || !rvu) + return -EINVAL; + + pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id); + if (pf < 0) + return pf; + + /* Assumes LF of a PF and all of its VF belongs to the same + * NIX block + */ + pcifunc = pf << RVU_PFVF_PF_SHIFT; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return 0; + block = &rvu->hw->block[blkaddr]; + + *stat = 0; + for (lf = 0; lf < block->lf.max; lf++) { + /* Check if a lf is attached to this PF or one of its VFs */ + if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc & + ~RVU_PFVF_FUNC_MASK))) + continue; + if (rxtxflag == NIX_STATS_RX) + *stat += rvu_read64(rvu, blkaddr, + NIX_AF_LFX_RX_STATX(lf, index)); + else + *stat += rvu_read64(rvu, blkaddr, + NIX_AF_LFX_TX_STATX(lf, index)); + } + + return 0; +} + +int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start) +{ + struct rvu_pfvf *parent_pf, *pfvf; + int cgx_users, err = 0; + + if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) + return 0; + + parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; + pfvf = rvu_get_pfvf(rvu, pcifunc); + + mutex_lock(&rvu->cgx_cfg_lock); + + if (start && pfvf->cgx_in_use) + goto exit; /* CGX is already started hence nothing to do */ + if (!start && !pfvf->cgx_in_use) + goto exit; /* CGX is already stopped hence nothing to do */ + + if (start) { + cgx_users = parent_pf->cgx_users; + parent_pf->cgx_users++; + } else { + parent_pf->cgx_users--; + cgx_users = parent_pf->cgx_users; + } + + /* Start CGX when first of all NIXLFs is started. + * Stop CGX when last of all NIXLFs is stopped. + */ + if (!cgx_users) { + err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK, + start); + if (err) { + dev_err(rvu->dev, "Unable to %s CGX\n", + start ? "start" : "stop"); + /* Revert the usage count in case of error */ + parent_pf->cgx_users = start ? parent_pf->cgx_users - 1 + : parent_pf->cgx_users + 1; + goto exit; + } + } + pfvf->cgx_in_use = start; +exit: + mutex_unlock(&rvu->cgx_cfg_lock); + return err; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c new file mode 100644 index 000000000000..77adad4adb1b --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -0,0 +1,1711 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2019 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifdef CONFIG_DEBUG_FS + +#include <linux/fs.h> +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "rvu_struct.h" +#include "rvu_reg.h" +#include "rvu.h" +#include "cgx.h" +#include "npc.h" + +#define DEBUGFS_DIR_NAME "octeontx2" + +enum { + CGX_STAT0, + CGX_STAT1, + CGX_STAT2, + CGX_STAT3, + CGX_STAT4, + CGX_STAT5, + CGX_STAT6, + CGX_STAT7, + CGX_STAT8, + CGX_STAT9, + CGX_STAT10, + CGX_STAT11, + CGX_STAT12, + CGX_STAT13, + CGX_STAT14, + CGX_STAT15, + CGX_STAT16, + CGX_STAT17, + CGX_STAT18, +}; + +/* NIX TX stats */ +enum nix_stat_lf_tx { + TX_UCAST = 0x0, + TX_BCAST = 0x1, + TX_MCAST = 0x2, + TX_DROP = 0x3, + TX_OCTS = 0x4, + TX_STATS_ENUM_LAST, +}; + +/* NIX RX stats */ +enum nix_stat_lf_rx { + RX_OCTS = 0x0, + RX_UCAST = 0x1, + RX_BCAST = 0x2, + RX_MCAST = 0x3, + RX_DROP = 0x4, + RX_DROP_OCTS = 0x5, + RX_FCS = 0x6, + RX_ERR = 0x7, + RX_DRP_BCAST = 0x8, + RX_DRP_MCAST = 0x9, + RX_DRP_L3BCAST = 0xa, + RX_DRP_L3MCAST = 0xb, + RX_STATS_ENUM_LAST, +}; + +static char *cgx_rx_stats_fields[] = { + [CGX_STAT0] = "Received packets", + [CGX_STAT1] = "Octets of received packets", + [CGX_STAT2] = "Received PAUSE packets", + [CGX_STAT3] = "Received PAUSE and control packets", + [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets", + [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets", + [CGX_STAT6] = "Packets dropped due to RX FIFO full", + [CGX_STAT7] = "Octets dropped due to RX FIFO full", + [CGX_STAT8] = "Error packets", + [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets", + [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets", + [CGX_STAT11] = "NCSI-bound packets dropped", + [CGX_STAT12] = "NCSI-bound octets dropped", +}; + +static char *cgx_tx_stats_fields[] = { + [CGX_STAT0] = "Packets dropped due to excessive collisions", + [CGX_STAT1] = "Packets dropped due to excessive deferral", + [CGX_STAT2] = "Multiple collisions before successful transmission", + [CGX_STAT3] = "Single collisions before successful transmission", + [CGX_STAT4] = "Total octets sent on the interface", + [CGX_STAT5] = "Total frames sent on the interface", + [CGX_STAT6] = "Packets sent with an octet count < 64", + [CGX_STAT7] = "Packets sent with an octet count == 64", + [CGX_STAT8] = "Packets sent with an octet count of 65–127", + [CGX_STAT9] = "Packets sent with an octet count of 128-255", + [CGX_STAT10] = "Packets sent with an octet count of 256-511", + [CGX_STAT11] = "Packets sent with an octet count of 512-1023", + [CGX_STAT12] = "Packets sent with an octet count of 1024-1518", + [CGX_STAT13] = "Packets sent with an octet count of > 1518", + [CGX_STAT14] = "Packets sent to a broadcast DMAC", + [CGX_STAT15] = "Packets sent to the multicast DMAC", + [CGX_STAT16] = "Transmit underflow and were truncated", + [CGX_STAT17] = "Control/PAUSE packets sent", +}; + +#define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \ + blk_addr, NDC_AF_CONST) & 0xFF) + +#define rvu_dbg_NULL NULL +#define rvu_dbg_open_NULL NULL + +#define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \ +static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, rvu_dbg_##read_op, inode->i_private); \ +} \ +static const struct file_operations rvu_dbg_##name##_fops = { \ + .owner = THIS_MODULE, \ + .open = rvu_dbg_open_##name, \ + .read = seq_read, \ + .write = rvu_dbg_##write_op, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + +#define RVU_DEBUG_FOPS(name, read_op, write_op) \ +static const struct file_operations rvu_dbg_##name##_fops = { \ + .owner = THIS_MODULE, \ + .open = simple_open, \ + .read = rvu_dbg_##read_op, \ + .write = rvu_dbg_##write_op \ +} + +static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf); + +/* Dumps current provisioning status of all RVU block LFs */ +static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, + char __user *buffer, + size_t count, loff_t *ppos) +{ + int index, off = 0, flag = 0, go_back = 0, off_prev; + struct rvu *rvu = filp->private_data; + int lf, pf, vf, pcifunc; + struct rvu_block block; + int bytes_not_copied; + int buf_size = 2048; + char *buf; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOSPC; + off += scnprintf(&buf[off], buf_size - 1 - off, "\npcifunc\t\t"); + for (index = 0; index < BLK_COUNT; index++) + if (strlen(rvu->hw->block[index].name)) + off += scnprintf(&buf[off], buf_size - 1 - off, + "%*s\t", (index - 1) * 2, + rvu->hw->block[index].name); + off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { + pcifunc = pf << 10 | vf; + if (!pcifunc) + continue; + + if (vf) { + go_back = scnprintf(&buf[off], + buf_size - 1 - off, + "PF%d:VF%d\t\t", pf, + vf - 1); + } else { + go_back = scnprintf(&buf[off], + buf_size - 1 - off, + "PF%d\t\t", pf); + } + + off += go_back; + for (index = 0; index < BLKTYPE_MAX; index++) { + block = rvu->hw->block[index]; + if (!strlen(block.name)) + continue; + off_prev = off; + for (lf = 0; lf < block.lf.max; lf++) { + if (block.fn_map[lf] != pcifunc) + continue; + flag = 1; + off += scnprintf(&buf[off], buf_size - 1 + - off, "%3d,", lf); + } + if (flag && off_prev != off) + off--; + else + go_back++; + off += scnprintf(&buf[off], buf_size - 1 - off, + "\t"); + } + if (!flag) + off -= go_back; + else + flag = 0; + off--; + off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); + } + } + + bytes_not_copied = copy_to_user(buffer, buf, off); + kfree(buf); + + if (bytes_not_copied) + return -EFAULT; + + *ppos = off; + return off; +} + +RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL); + +static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blktype, int lf, + u16 *pcifunc) +{ + struct rvu_block *block; + struct rvu_hwinfo *hw; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, blktype, 0); + if (blkaddr < 0) { + dev_warn(rvu->dev, "Invalid blktype\n"); + return false; + } + + hw = rvu->hw; + block = &hw->block[blkaddr]; + + if (lf < 0 || lf >= block->lf.max) { + dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n", + block->lf.max - 1); + return false; + } + + *pcifunc = block->fn_map[lf]; + if (!*pcifunc) { + dev_warn(rvu->dev, + "This LF is not attached to any RVU PFFUNC\n"); + return false; + } + return true; +} + +static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf) +{ + char *buf; + + buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + return; + + if (!pfvf->aura_ctx) { + seq_puts(m, "Aura context is not initialized\n"); + } else { + bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap, + pfvf->aura_ctx->qsize); + seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize); + seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf); + } + + if (!pfvf->pool_ctx) { + seq_puts(m, "Pool context is not initialized\n"); + } else { + bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap, + pfvf->pool_ctx->qsize); + seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize); + seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf); + } + kfree(buf); +} + +/* The 'qsize' entry dumps current Aura/Pool context Qsize + * and each context's current enable/disable status in a bitmap. + */ +static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused, + int blktype) +{ + void (*print_qsize)(struct seq_file *filp, + struct rvu_pfvf *pfvf) = NULL; + struct rvu_pfvf *pfvf; + struct rvu *rvu; + int qsize_id; + u16 pcifunc; + + rvu = filp->private; + switch (blktype) { + case BLKTYPE_NPA: + qsize_id = rvu->rvu_dbg.npa_qsize_id; + print_qsize = print_npa_qsize; + break; + + case BLKTYPE_NIX: + qsize_id = rvu->rvu_dbg.nix_qsize_id; + print_qsize = print_nix_qsize; + break; + + default: + return -EINVAL; + } + + if (!rvu_dbg_is_valid_lf(rvu, blktype, qsize_id, &pcifunc)) + return -EINVAL; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + print_qsize(filp, pfvf); + + return 0; +} + +static ssize_t rvu_dbg_qsize_write(struct file *filp, + const char __user *buffer, size_t count, + loff_t *ppos, int blktype) +{ + char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix"; + struct seq_file *seqfile = filp->private_data; + char *cmd_buf, *cmd_buf_tmp, *subtoken; + struct rvu *rvu = seqfile->private; + u16 pcifunc; + int ret, lf; + + cmd_buf = memdup_user(buffer, count); + if (IS_ERR(cmd_buf)) + return -ENOMEM; + + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + cmd_buf_tmp = cmd_buf; + subtoken = strsep(&cmd_buf, " "); + ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL; + if (cmd_buf) + ret = -EINVAL; + + if (!strncmp(subtoken, "help", 4) || ret < 0) { + dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string); + goto qsize_write_done; + } + + if (!rvu_dbg_is_valid_lf(rvu, blktype, lf, &pcifunc)) { + ret = -EINVAL; + goto qsize_write_done; + } + if (blktype == BLKTYPE_NPA) + rvu->rvu_dbg.npa_qsize_id = lf; + else + rvu->rvu_dbg.nix_qsize_id = lf; + +qsize_write_done: + kfree(cmd_buf_tmp); + return ret ? ret : count; +} + +static ssize_t rvu_dbg_npa_qsize_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return rvu_dbg_qsize_write(filp, buffer, count, ppos, + BLKTYPE_NPA); +} + +static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused) +{ + return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA); +} + +RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write); + +/* Dumps given NPA Aura's context */ +static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) +{ + struct npa_aura_s *aura = &rsp->aura; + + seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr); + + seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n", + aura->ena, aura->pool_caching); + seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n", + aura->pool_way_mask, aura->avg_con); + seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n", + aura->pool_drop_ena, aura->aura_drop_ena); + seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n", + aura->bp_ena, aura->aura_drop); + seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n", + aura->shift, aura->avg_level); + + seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n", + (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid); + + seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n", + (u64)aura->limit, aura->bp, aura->fc_ena); + seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n", + aura->fc_up_crossing, aura->fc_stype); + seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits); + + seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr); + + seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n", + aura->pool_drop, aura->update_time); + seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n", + aura->err_int, aura->err_int_ena); + seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n", + aura->thresh_int, aura->thresh_int_ena); + seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n", + aura->thresh_up, aura->thresh_qint_idx); + seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx); + + seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh); +} + +/* Dumps given NPA Pool's context */ +static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) +{ + struct npa_pool_s *pool = &rsp->pool; + + seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base); + + seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n", + pool->ena, pool->nat_align); + seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n", + pool->stack_caching, pool->stack_way_mask); + seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n", + pool->buf_offset, pool->buf_size); + + seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n", + pool->stack_max_pages, pool->stack_pages); + + seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc); + + seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n", + pool->stack_offset, pool->shift, pool->avg_level); + seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n", + pool->avg_con, pool->fc_ena, pool->fc_stype); + seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n", + pool->fc_hyst_bits, pool->fc_up_crossing); + seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time); + + seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr); + + seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start); + + seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end); + + seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n", + pool->err_int, pool->err_int_ena); + seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int); + seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n", + pool->thresh_int_ena, pool->thresh_up); + seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t\t%d\n", + pool->thresh_qint_idx, pool->err_qint_idx); +} + +/* Reads aura/pool's ctx from admin queue */ +static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype) +{ + void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp); + struct npa_aq_enq_req aq_req; + struct npa_aq_enq_rsp rsp; + struct rvu_pfvf *pfvf; + int aura, rc, max_id; + int npalf, id, all; + struct rvu *rvu; + u16 pcifunc; + + rvu = m->private; + + switch (ctype) { + case NPA_AQ_CTYPE_AURA: + npalf = rvu->rvu_dbg.npa_aura_ctx.lf; + id = rvu->rvu_dbg.npa_aura_ctx.id; + all = rvu->rvu_dbg.npa_aura_ctx.all; + break; + + case NPA_AQ_CTYPE_POOL: + npalf = rvu->rvu_dbg.npa_pool_ctx.lf; + id = rvu->rvu_dbg.npa_pool_ctx.id; + all = rvu->rvu_dbg.npa_pool_ctx.all; + break; + default: + return -EINVAL; + } + + if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc)) + return -EINVAL; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) { + seq_puts(m, "Aura context is not initialized\n"); + return -EINVAL; + } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) { + seq_puts(m, "Pool context is not initialized\n"); + return -EINVAL; + } + + memset(&aq_req, 0, sizeof(struct npa_aq_enq_req)); + aq_req.hdr.pcifunc = pcifunc; + aq_req.ctype = ctype; + aq_req.op = NPA_AQ_INSTOP_READ; + if (ctype == NPA_AQ_CTYPE_AURA) { + max_id = pfvf->aura_ctx->qsize; + print_npa_ctx = print_npa_aura_ctx; + } else { + max_id = pfvf->pool_ctx->qsize; + print_npa_ctx = print_npa_pool_ctx; + } + + if (id < 0 || id >= max_id) { + seq_printf(m, "Invalid %s, valid range is 0-%d\n", + (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool", + max_id - 1); + return -EINVAL; + } + + if (all) + id = 0; + else + max_id = id + 1; + + for (aura = id; aura < max_id; aura++) { + aq_req.aura_id = aura; + seq_printf(m, "======%s : %d=======\n", + (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL", + aq_req.aura_id); + rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp); + if (rc) { + seq_puts(m, "Failed to read context\n"); + return -EINVAL; + } + print_npa_ctx(m, &rsp); + } + return 0; +} + +static int write_npa_ctx(struct rvu *rvu, bool all, + int npalf, int id, int ctype) +{ + struct rvu_pfvf *pfvf; + int max_id = 0; + u16 pcifunc; + + if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc)) + return -EINVAL; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + + if (ctype == NPA_AQ_CTYPE_AURA) { + if (!pfvf->aura_ctx) { + dev_warn(rvu->dev, "Aura context is not initialized\n"); + return -EINVAL; + } + max_id = pfvf->aura_ctx->qsize; + } else if (ctype == NPA_AQ_CTYPE_POOL) { + if (!pfvf->pool_ctx) { + dev_warn(rvu->dev, "Pool context is not initialized\n"); + return -EINVAL; + } + max_id = pfvf->pool_ctx->qsize; + } + + if (id < 0 || id >= max_id) { + dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n", + (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool", + max_id - 1); + return -EINVAL; + } + + switch (ctype) { + case NPA_AQ_CTYPE_AURA: + rvu->rvu_dbg.npa_aura_ctx.lf = npalf; + rvu->rvu_dbg.npa_aura_ctx.id = id; + rvu->rvu_dbg.npa_aura_ctx.all = all; + break; + + case NPA_AQ_CTYPE_POOL: + rvu->rvu_dbg.npa_pool_ctx.lf = npalf; + rvu->rvu_dbg.npa_pool_ctx.id = id; + rvu->rvu_dbg.npa_pool_ctx.all = all; + break; + default: + return -EINVAL; + } + return 0; +} + +static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count, + const char __user *buffer, int *npalf, + int *id, bool *all) +{ + int bytes_not_copied; + char *cmd_buf_tmp; + char *subtoken; + int ret; + + bytes_not_copied = copy_from_user(cmd_buf, buffer, *count); + if (bytes_not_copied) + return -EFAULT; + + cmd_buf[*count] = '\0'; + cmd_buf_tmp = strchr(cmd_buf, '\n'); + + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + *count = cmd_buf_tmp - cmd_buf + 1; + } + + subtoken = strsep(&cmd_buf, " "); + ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL; + if (ret < 0) + return ret; + subtoken = strsep(&cmd_buf, " "); + if (subtoken && strcmp(subtoken, "all") == 0) { + *all = true; + } else { + ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL; + if (ret < 0) + return ret; + } + if (cmd_buf) + return -EINVAL; + return ret; +} + +static ssize_t rvu_dbg_npa_ctx_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos, int ctype) +{ + char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ? + "aura" : "pool"; + struct seq_file *seqfp = filp->private_data; + struct rvu *rvu = seqfp->private; + int npalf, id = 0, ret; + bool all = false; + + if ((*ppos != 0) || !count) + return -EINVAL; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return count; + ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer, + &npalf, &id, &all); + if (ret < 0) { + dev_info(rvu->dev, + "Usage: echo <npalf> [%s number/all] > %s_ctx\n", + ctype_string, ctype_string); + goto done; + } else { + ret = write_npa_ctx(rvu, all, npalf, id, ctype); + } +done: + kfree(cmd_buf); + return ret ? ret : count; +} + +static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos, + NPA_AQ_CTYPE_AURA); +} + +static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused) +{ + return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA); +} + +RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write); + +static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos, + NPA_AQ_CTYPE_POOL); +} + +static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused) +{ + return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL); +} + +RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write); + +static void ndc_cache_stats(struct seq_file *s, int blk_addr, + int ctype, int transaction) +{ + u64 req, out_req, lat, cant_alloc; + struct rvu *rvu = s->private; + int port; + + for (port = 0; port < NDC_MAX_PORT; port++) { + req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC + (port, ctype, transaction)); + lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC + (port, ctype, transaction)); + out_req = rvu_read64(rvu, blk_addr, + NDC_AF_PORTX_RTX_RWX_OSTDN_PC + (port, ctype, transaction)); + cant_alloc = rvu_read64(rvu, blk_addr, + NDC_AF_PORTX_RTX_CANT_ALLOC_PC + (port, transaction)); + seq_printf(s, "\nPort:%d\n", port); + seq_printf(s, "\tTotal Requests:\t\t%lld\n", req); + seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat); + seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req); + seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req); + seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc); + } +} + +static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr) +{ + seq_puts(s, "\n***** CACHE mode read stats *****\n"); + ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS); + seq_puts(s, "\n***** CACHE mode write stats *****\n"); + ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS); + seq_puts(s, "\n***** BY-PASS mode read stats *****\n"); + ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS); + seq_puts(s, "\n***** BY-PASS mode write stats *****\n"); + ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS); + return 0; +} + +static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused) +{ + return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0); +} + +RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL); + +static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr) +{ + struct rvu *rvu = s->private; + int bank, max_bank; + + max_bank = NDC_MAX_BANK(rvu, blk_addr); + for (bank = 0; bank < max_bank; bank++) { + seq_printf(s, "BANK:%d\n", bank); + seq_printf(s, "\tHits:\t%lld\n", + (u64)rvu_read64(rvu, blk_addr, + NDC_AF_BANKX_HIT_PC(bank))); + seq_printf(s, "\tMiss:\t%lld\n", + (u64)rvu_read64(rvu, blk_addr, + NDC_AF_BANKX_MISS_PC(bank))); + } + return 0; +} + +static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused) +{ + return ndc_blk_cache_stats(filp, NIX0_RX, + BLKADDR_NDC_NIX0_RX); +} + +RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL); + +static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused) +{ + return ndc_blk_cache_stats(filp, NIX0_TX, + BLKADDR_NDC_NIX0_TX); +} + +RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL); + +static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp, + void *unused) +{ + return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0); +} + +RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL); + +static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp, + void *unused) +{ + return ndc_blk_hits_miss_stats(filp, + NPA0_U, BLKADDR_NDC_NIX0_RX); +} + +RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL); + +static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp, + void *unused) +{ + return ndc_blk_hits_miss_stats(filp, + NPA0_U, BLKADDR_NDC_NIX0_TX); +} + +RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL); + +/* Dumps given nix_sq's context */ +static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) +{ + struct nix_sq_ctx_s *sq_ctx = &rsp->sq; + + seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n", + sq_ctx->sqe_way_mask, sq_ctx->cq); + seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n", + sq_ctx->sdp_mcast, sq_ctx->substream); + seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n", + sq_ctx->qint_idx, sq_ctx->ena); + + seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n", + sq_ctx->sqb_count, sq_ctx->default_chan); + seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n", + sq_ctx->smq_rr_quantum, sq_ctx->sso_ena); + seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n", + sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq); + + seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n", + sq_ctx->sqe_stype, sq_ctx->sq_int_ena); + seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n", + sq_ctx->sq_int, sq_ctx->sqb_aura); + seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count); + + seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n", + sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend); + seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n", + sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset); + seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n", + sq_ctx->smenq_offset, sq_ctx->tail_offset); + seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n", + sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq); + seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n", + sq_ctx->mnq_dis, sq_ctx->lmt_dis); + seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n", + sq_ctx->cq_limit, sq_ctx->max_sqe_size); + + seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb); + seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb); + seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb); + seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n", + sq_ctx->smenq_next_sqb); + + seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb); + + seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n", + sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena); + seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n", + sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps); + seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n", + sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1); + seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total); + + seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n", + (u64)sq_ctx->scm_lso_rem); + seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs); + seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts); + seq_printf(m, "W14: dropped_octs \t\t%llu\n\n", + (u64)sq_ctx->dropped_octs); + seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n", + (u64)sq_ctx->dropped_pkts); +} + +/* Dumps given nix_rq's context */ +static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) +{ + struct nix_rq_ctx_s *rq_ctx = &rsp->rq; + + seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n", + rq_ctx->wqe_aura, rq_ctx->substream); + seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n", + rq_ctx->cq, rq_ctx->ena_wqwd); + seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n", + rq_ctx->ipsech_ena, rq_ctx->sso_ena); + seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena); + + seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n", + rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena); + seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n", + rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching); + seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n", + rq_ctx->pb_caching, rq_ctx->sso_tt); + seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n", + rq_ctx->sso_grp, rq_ctx->lpb_aura); + seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura); + + seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n", + rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy); + seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n", + rq_ctx->xqe_imm_size, rq_ctx->later_skip); + seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n", + rq_ctx->first_skip, rq_ctx->lpb_sizem1); + seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n", + rq_ctx->spb_ena, rq_ctx->wqe_skip); + seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1); + + seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n", + rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop); + seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n", + rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop); + seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n", + rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop); + seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n", + rq_ctx->xqe_pass, rq_ctx->xqe_drop); + + seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n", + rq_ctx->qint_idx, rq_ctx->rq_int_ena); + seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n", + rq_ctx->rq_int, rq_ctx->lpb_pool_pass); + seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n", + rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass); + seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop); + + seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n", + rq_ctx->flow_tagw, rq_ctx->bad_utag); + seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n", + rq_ctx->good_utag, rq_ctx->ltag); + + seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs); + seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts); + seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs); + seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts); + seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts); +} + +/* Dumps given nix_cq's context */ +static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) +{ + struct nix_cq_ctx_s *cq_ctx = &rsp->cq; + + seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base); + + seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr); + seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n", + cq_ctx->avg_con, cq_ctx->cint_idx); + seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n", + cq_ctx->cq_err, cq_ctx->qint_idx); + seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n", + cq_ctx->bpid, cq_ctx->bp_ena); + + seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n", + cq_ctx->update_time, cq_ctx->avg_level); + seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n", + cq_ctx->head, cq_ctx->tail); + + seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n", + cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int); + seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n", + cq_ctx->qsize, cq_ctx->caching); + seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n", + cq_ctx->substream, cq_ctx->ena); + seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n", + cq_ctx->drop_ena, cq_ctx->drop); + seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp); +} + +static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp, + void *unused, int ctype) +{ + void (*print_nix_ctx)(struct seq_file *filp, + struct nix_aq_enq_rsp *rsp) = NULL; + struct rvu *rvu = filp->private; + struct nix_aq_enq_req aq_req; + struct nix_aq_enq_rsp rsp; + char *ctype_string = NULL; + int qidx, rc, max_id = 0; + struct rvu_pfvf *pfvf; + int nixlf, id, all; + u16 pcifunc; + + switch (ctype) { + case NIX_AQ_CTYPE_CQ: + nixlf = rvu->rvu_dbg.nix_cq_ctx.lf; + id = rvu->rvu_dbg.nix_cq_ctx.id; + all = rvu->rvu_dbg.nix_cq_ctx.all; + break; + + case NIX_AQ_CTYPE_SQ: + nixlf = rvu->rvu_dbg.nix_sq_ctx.lf; + id = rvu->rvu_dbg.nix_sq_ctx.id; + all = rvu->rvu_dbg.nix_sq_ctx.all; + break; + + case NIX_AQ_CTYPE_RQ: + nixlf = rvu->rvu_dbg.nix_rq_ctx.lf; + id = rvu->rvu_dbg.nix_rq_ctx.id; + all = rvu->rvu_dbg.nix_rq_ctx.all; + break; + + default: + return -EINVAL; + } + + if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc)) + return -EINVAL; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) { + seq_puts(filp, "SQ context is not initialized\n"); + return -EINVAL; + } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) { + seq_puts(filp, "RQ context is not initialized\n"); + return -EINVAL; + } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) { + seq_puts(filp, "CQ context is not initialized\n"); + return -EINVAL; + } + + if (ctype == NIX_AQ_CTYPE_SQ) { + max_id = pfvf->sq_ctx->qsize; + ctype_string = "sq"; + print_nix_ctx = print_nix_sq_ctx; + } else if (ctype == NIX_AQ_CTYPE_RQ) { + max_id = pfvf->rq_ctx->qsize; + ctype_string = "rq"; + print_nix_ctx = print_nix_rq_ctx; + } else if (ctype == NIX_AQ_CTYPE_CQ) { + max_id = pfvf->cq_ctx->qsize; + ctype_string = "cq"; + print_nix_ctx = print_nix_cq_ctx; + } + + memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); + aq_req.hdr.pcifunc = pcifunc; + aq_req.ctype = ctype; + aq_req.op = NIX_AQ_INSTOP_READ; + if (all) + id = 0; + else + max_id = id + 1; + for (qidx = id; qidx < max_id; qidx++) { + aq_req.qidx = qidx; + seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n", + ctype_string, nixlf, aq_req.qidx); + rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp); + if (rc) { + seq_puts(filp, "Failed to read the context\n"); + return -EINVAL; + } + print_nix_ctx(filp, &rsp); + } + return 0; +} + +static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf, + int id, int ctype, char *ctype_string) +{ + struct rvu_pfvf *pfvf; + int max_id = 0; + u16 pcifunc; + + if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc)) + return -EINVAL; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + + if (ctype == NIX_AQ_CTYPE_SQ) { + if (!pfvf->sq_ctx) { + dev_warn(rvu->dev, "SQ context is not initialized\n"); + return -EINVAL; + } + max_id = pfvf->sq_ctx->qsize; + } else if (ctype == NIX_AQ_CTYPE_RQ) { + if (!pfvf->rq_ctx) { + dev_warn(rvu->dev, "RQ context is not initialized\n"); + return -EINVAL; + } + max_id = pfvf->rq_ctx->qsize; + } else if (ctype == NIX_AQ_CTYPE_CQ) { + if (!pfvf->cq_ctx) { + dev_warn(rvu->dev, "CQ context is not initialized\n"); + return -EINVAL; + } + max_id = pfvf->cq_ctx->qsize; + } + + if (id < 0 || id >= max_id) { + dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n", + ctype_string, max_id - 1); + return -EINVAL; + } + switch (ctype) { + case NIX_AQ_CTYPE_CQ: + rvu->rvu_dbg.nix_cq_ctx.lf = nixlf; + rvu->rvu_dbg.nix_cq_ctx.id = id; + rvu->rvu_dbg.nix_cq_ctx.all = all; + break; + + case NIX_AQ_CTYPE_SQ: + rvu->rvu_dbg.nix_sq_ctx.lf = nixlf; + rvu->rvu_dbg.nix_sq_ctx.id = id; + rvu->rvu_dbg.nix_sq_ctx.all = all; + break; + + case NIX_AQ_CTYPE_RQ: + rvu->rvu_dbg.nix_rq_ctx.lf = nixlf; + rvu->rvu_dbg.nix_rq_ctx.id = id; + rvu->rvu_dbg.nix_rq_ctx.all = all; + break; + default: + return -EINVAL; + } + return 0; +} + +static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos, + int ctype) +{ + struct seq_file *m = filp->private_data; + struct rvu *rvu = m->private; + char *cmd_buf, *ctype_string; + int nixlf, id = 0, ret; + bool all = false; + + if ((*ppos != 0) || !count) + return -EINVAL; + + switch (ctype) { + case NIX_AQ_CTYPE_SQ: + ctype_string = "sq"; + break; + case NIX_AQ_CTYPE_RQ: + ctype_string = "rq"; + break; + case NIX_AQ_CTYPE_CQ: + ctype_string = "cq"; + break; + default: + return -EINVAL; + } + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + + if (!cmd_buf) + return count; + + ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer, + &nixlf, &id, &all); + if (ret < 0) { + dev_info(rvu->dev, + "Usage: echo <nixlf> [%s number/all] > %s_ctx\n", + ctype_string, ctype_string); + goto done; + } else { + ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype, + ctype_string); + } +done: + kfree(cmd_buf); + return ret ? ret : count; +} + +static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos, + NIX_AQ_CTYPE_SQ); +} + +static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused) +{ + return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ); +} + +RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write); + +static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos, + NIX_AQ_CTYPE_RQ); +} + +static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused) +{ + return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ); +} + +RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write); + +static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos, + NIX_AQ_CTYPE_CQ); +} + +static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused) +{ + return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ); +} + +RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write); + +static void print_nix_qctx_qsize(struct seq_file *filp, int qsize, + unsigned long *bmap, char *qtype) +{ + char *buf; + + buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + return; + + bitmap_print_to_pagebuf(false, buf, bmap, qsize); + seq_printf(filp, "%s context count : %d\n", qtype, qsize); + seq_printf(filp, "%s context ena/dis bitmap : %s\n", + qtype, buf); + kfree(buf); +} + +static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf) +{ + if (!pfvf->cq_ctx) + seq_puts(filp, "cq context is not initialized\n"); + else + print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap, + "cq"); + + if (!pfvf->rq_ctx) + seq_puts(filp, "rq context is not initialized\n"); + else + print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap, + "rq"); + + if (!pfvf->sq_ctx) + seq_puts(filp, "sq context is not initialized\n"); + else + print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap, + "sq"); +} + +static ssize_t rvu_dbg_nix_qsize_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + return rvu_dbg_qsize_write(filp, buffer, count, ppos, + BLKTYPE_NIX); +} + +static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused) +{ + return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX); +} + +RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write); + +static void rvu_dbg_nix_init(struct rvu *rvu) +{ + const struct device *dev = &rvu->pdev->dev; + struct dentry *pfile; + + rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root); + if (!rvu->rvu_dbg.nix) { + dev_err(rvu->dev, "create debugfs dir failed for nix\n"); + return; + } + + pfile = debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, rvu, + &rvu_dbg_nix_sq_ctx_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, rvu, + &rvu_dbg_nix_rq_ctx_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, rvu, + &rvu_dbg_nix_cq_ctx_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, rvu, + &rvu_dbg_nix_ndc_tx_cache_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, rvu, + &rvu_dbg_nix_ndc_rx_cache_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, + rvu, &rvu_dbg_nix_ndc_tx_hits_miss_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, + rvu, &rvu_dbg_nix_ndc_rx_hits_miss_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu, + &rvu_dbg_nix_qsize_fops); + if (!pfile) + goto create_failed; + + return; +create_failed: + dev_err(dev, "Failed to create debugfs dir/file for NIX\n"); + debugfs_remove_recursive(rvu->rvu_dbg.nix); +} + +static void rvu_dbg_npa_init(struct rvu *rvu) +{ + const struct device *dev = &rvu->pdev->dev; + struct dentry *pfile; + + rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root); + if (!rvu->rvu_dbg.npa) + return; + + pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu, + &rvu_dbg_npa_qsize_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu, + &rvu_dbg_npa_aura_ctx_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu, + &rvu_dbg_npa_pool_ctx_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu, + &rvu_dbg_npa_ndc_cache_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, + rvu, &rvu_dbg_npa_ndc_hits_miss_fops); + if (!pfile) + goto create_failed; + + return; + +create_failed: + dev_err(dev, "Failed to create debugfs dir/file for NPA\n"); + debugfs_remove_recursive(rvu->rvu_dbg.npa); +} + +#define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \ + ({ \ + u64 cnt; \ + err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \ + NIX_STATS_RX, &(cnt)); \ + if (!err) \ + seq_printf(s, "%s: %llu\n", name, cnt); \ + cnt; \ + }) + +#define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \ + ({ \ + u64 cnt; \ + err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \ + NIX_STATS_TX, &(cnt)); \ + if (!err) \ + seq_printf(s, "%s: %llu\n", name, cnt); \ + cnt; \ + }) + +static int cgx_print_stats(struct seq_file *s, int lmac_id) +{ + struct cgx_link_user_info linfo; + void *cgxd = s->private; + u64 ucast, mcast, bcast; + int stat = 0, err = 0; + u64 tx_stat, rx_stat; + struct rvu *rvu; + + rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_RVU_AF, NULL)); + if (!rvu) + return -ENODEV; + + /* Link status */ + seq_puts(s, "\n=======Link Status======\n\n"); + err = cgx_get_link_info(cgxd, lmac_id, &linfo); + if (err) + seq_puts(s, "Failed to read link status\n"); + seq_printf(s, "\nLink is %s %d Mbps\n\n", + linfo.link_up ? "UP" : "DOWN", linfo.speed); + + /* Rx stats */ + seq_puts(s, "\n=======NIX RX_STATS(CGX port level)======\n\n"); + ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames"); + if (err) + return err; + mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames"); + if (err) + return err; + bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames"); + if (err) + return err; + seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast); + PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes"); + if (err) + return err; + PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops"); + if (err) + return err; + PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors"); + if (err) + return err; + + /* Tx stats */ + seq_puts(s, "\n=======NIX TX_STATS(CGX port level)======\n\n"); + ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames"); + if (err) + return err; + mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames"); + if (err) + return err; + bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames"); + if (err) + return err; + seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast); + PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes"); + if (err) + return err; + PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops"); + if (err) + return err; + + /* Rx stats */ + seq_puts(s, "\n=======CGX RX_STATS======\n\n"); + while (stat < CGX_RX_STATS_COUNT) { + err = cgx_get_rx_stats(cgxd, lmac_id, stat, &rx_stat); + if (err) + return err; + seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], rx_stat); + stat++; + } + + /* Tx stats */ + stat = 0; + seq_puts(s, "\n=======CGX TX_STATS======\n\n"); + while (stat < CGX_TX_STATS_COUNT) { + err = cgx_get_tx_stats(cgxd, lmac_id, stat, &tx_stat); + if (err) + return err; + seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], tx_stat); + stat++; + } + + return err; +} + +static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) +{ + struct dentry *current_dir; + int err, lmac_id; + char *buf; + + current_dir = filp->file->f_path.dentry->d_parent; + buf = strrchr(current_dir->d_name.name, 'c'); + if (!buf) + return -EINVAL; + + err = kstrtoint(buf + 1, 10, &lmac_id); + if (!err) { + err = cgx_print_stats(filp, lmac_id); + if (err) + return err; + } + return err; +} + +RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL); + +static void rvu_dbg_cgx_init(struct rvu *rvu) +{ + const struct device *dev = &rvu->pdev->dev; + struct dentry *pfile; + int i, lmac_id; + char dname[20]; + void *cgx; + + rvu->rvu_dbg.cgx_root = debugfs_create_dir("cgx", rvu->rvu_dbg.root); + + for (i = 0; i < cgx_get_cgxcnt_max(); i++) { + cgx = rvu_cgx_pdata(i, rvu); + if (!cgx) + continue; + /* cgx debugfs dir */ + sprintf(dname, "cgx%d", i); + rvu->rvu_dbg.cgx = debugfs_create_dir(dname, + rvu->rvu_dbg.cgx_root); + for (lmac_id = 0; lmac_id < cgx_get_lmac_cnt(cgx); lmac_id++) { + /* lmac debugfs dir */ + sprintf(dname, "lmac%d", lmac_id); + rvu->rvu_dbg.lmac = + debugfs_create_dir(dname, rvu->rvu_dbg.cgx); + + pfile = debugfs_create_file("stats", 0600, + rvu->rvu_dbg.lmac, cgx, + &rvu_dbg_cgx_stat_fops); + if (!pfile) + goto create_failed; + } + } + return; + +create_failed: + dev_err(dev, "Failed to create debugfs dir/file for CGX\n"); + debugfs_remove_recursive(rvu->rvu_dbg.cgx_root); +} + +/* NPC debugfs APIs */ +static void rvu_print_npc_mcam_info(struct seq_file *s, + u16 pcifunc, int blkaddr) +{ + struct rvu *rvu = s->private; + int entry_acnt, entry_ecnt; + int cntr_acnt, cntr_ecnt; + + /* Skip PF0 */ + if (!pcifunc) + return; + rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr, + &entry_acnt, &entry_ecnt); + rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr, + &cntr_acnt, &cntr_ecnt); + if (!entry_acnt && !cntr_acnt) + return; + + if (!(pcifunc & RVU_PFVF_FUNC_MASK)) + seq_printf(s, "\n\t\t Device \t\t: PF%d\n", + rvu_get_pf(pcifunc)); + else + seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n", + rvu_get_pf(pcifunc), + (pcifunc & RVU_PFVF_FUNC_MASK) - 1); + + if (entry_acnt) { + seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt); + seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt); + } + if (cntr_acnt) { + seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt); + seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt); + } +} + +static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued) +{ + struct rvu *rvu = filp->private; + int pf, vf, numvfs, blkaddr; + struct npc_mcam *mcam; + u16 pcifunc; + u64 cfg; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return -ENODEV; + + mcam = &rvu->hw->mcam; + + seq_puts(filp, "\nNPC MCAM info:\n"); + /* MCAM keywidth on receive and transmit sides */ + cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX)); + cfg = (cfg >> 32) & 0x07; + seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ? + "112bits" : ((cfg == NPC_MCAM_KEY_X2) ? + "224bits" : "448bits")); + cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX)); + cfg = (cfg >> 32) & 0x07; + seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ? + "112bits" : ((cfg == NPC_MCAM_KEY_X2) ? + "224bits" : "448bits")); + + mutex_lock(&mcam->lock); + /* MCAM entries */ + seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries); + seq_printf(filp, "\t\t Reserved \t: %d\n", + mcam->total_entries - mcam->bmap_entries); + seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt); + + /* MCAM counters */ + cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST); + cfg = (cfg >> 48) & 0xFFFF; + seq_printf(filp, "\n\t\t MCAM counters \t: %lld\n", cfg); + seq_printf(filp, "\t\t Reserved \t: %lld\n", cfg - mcam->counters.max); + seq_printf(filp, "\t\t Available \t: %d\n", + rvu_rsrc_free_count(&mcam->counters)); + + if (mcam->bmap_entries == mcam->bmap_fcnt) { + mutex_unlock(&mcam->lock); + return 0; + } + + seq_puts(filp, "\n\t\t Current allocation\n"); + seq_puts(filp, "\t\t====================\n"); + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + pcifunc = (pf << RVU_PFVF_PF_SHIFT); + rvu_print_npc_mcam_info(filp, pcifunc, blkaddr); + + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + numvfs = (cfg >> 12) & 0xFF; + for (vf = 0; vf < numvfs; vf++) { + pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1); + rvu_print_npc_mcam_info(filp, pcifunc, blkaddr); + } + } + + mutex_unlock(&mcam->lock); + return 0; +} + +RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL); + +static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp, + void *unused) +{ + struct rvu *rvu = filp->private; + struct npc_mcam *mcam; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return -ENODEV; + + mcam = &rvu->hw->mcam; + + seq_puts(filp, "\nNPC MCAM RX miss action stats\n"); + seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr, + rvu_read64(rvu, blkaddr, + NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr))); + + return 0; +} + +RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL); + +static void rvu_dbg_npc_init(struct rvu *rvu) +{ + const struct device *dev = &rvu->pdev->dev; + struct dentry *pfile; + + rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root); + if (!rvu->rvu_dbg.npc) + return; + + pfile = debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, + rvu, &rvu_dbg_npc_mcam_info_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, + rvu, &rvu_dbg_npc_rx_miss_act_fops); + if (!pfile) + goto create_failed; + + return; + +create_failed: + dev_err(dev, "Failed to create debugfs dir/file for NPC\n"); + debugfs_remove_recursive(rvu->rvu_dbg.npc); +} + +void rvu_dbg_init(struct rvu *rvu) +{ + struct device *dev = &rvu->pdev->dev; + struct dentry *pfile; + + rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL); + if (!rvu->rvu_dbg.root) { + dev_err(rvu->dev, "%s failed\n", __func__); + return; + } + pfile = debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu, + &rvu_dbg_rsrc_status_fops); + if (!pfile) + goto create_failed; + + rvu_dbg_npa_init(rvu); + rvu_dbg_nix_init(rvu); + rvu_dbg_cgx_init(rvu); + rvu_dbg_npc_init(rvu); + + return; + +create_failed: + dev_err(dev, "Failed to create debugfs dir\n"); + debugfs_remove_recursive(rvu->rvu_dbg.root); +} + +void rvu_dbg_exit(struct rvu *rvu) +{ + debugfs_remove_recursive(rvu->rvu_dbg.root); +} + +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 4a7609fd6dd0..8a59f7d53fbf 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -64,7 +64,6 @@ enum nix_makr_fmt_indexes { struct mce { struct hlist_node node; - u16 idx; u16 pcifunc; }; @@ -127,17 +126,12 @@ static void nix_rx_sync(struct rvu *rvu, int blkaddr) err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); if (err) dev_err(rvu->dev, "NIX RX software sync failed\n"); - - /* As per a HW errata in 9xxx A0 silicon, HW may clear SW_SYNC[ENA] - * bit too early. Hence wait for 50us more. - */ - if (is_rvu_9xxx_A0(rvu)) - usleep_range(50, 60); } static bool is_valid_txschq(struct rvu *rvu, int blkaddr, int lvl, u16 pcifunc, u16 schq) { + struct rvu_hwinfo *hw = rvu->hw; struct nix_txsch *txsch; struct nix_hw *nix_hw; u16 map_func; @@ -155,13 +149,15 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); mutex_unlock(&rvu->rsrc_lock); - /* For TL1 schq, sharing across VF's of same PF is ok */ - if (lvl == NIX_TXSCH_LVL_TL1 && - rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) - return false; + /* TLs aggegating traffic are shared across PF and VFs */ + if (lvl >= hw->cap.nix_tx_aggr_lvl) { + if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) + return false; + else + return true; + } - if (lvl != NIX_TXSCH_LVL_TL1 && - map_func != pcifunc) + if (map_func != pcifunc) return false; return true; @@ -198,6 +194,11 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) break; case NIX_INTF_TYPE_LBK: vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; + + /* Note that AF's VFs work in pairs and talk over consecutive + * loopback channels.Therefore if odd number of AF VFs are + * enabled then the last VF remains with no pair. + */ pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf); pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) : NIX_CHAN_LBK_CHX(0, vf + 1); @@ -382,7 +383,8 @@ static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, struct rvu_pfvf *pfvf, int nixlf, - int rss_sz, int rss_grps, int hwctx_size) + int rss_sz, int rss_grps, int hwctx_size, + u64 way_mask) { int err, grp, num_indices; @@ -402,7 +404,8 @@ static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, /* Config full RSS table size, enable RSS and caching */ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), BIT_ULL(36) | BIT_ULL(4) | - ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE)); + ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) | + way_mask << 20); /* Config RSS group offset and sizes */ for (grp = 0; grp < rss_grps; grp++) rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), @@ -663,6 +666,21 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, return 0; } +static const char *nix_get_ctx_name(int ctype) +{ + switch (ctype) { + case NIX_AQ_CTYPE_CQ: + return "CQ"; + case NIX_AQ_CTYPE_SQ: + return "SQ"; + case NIX_AQ_CTYPE_RQ: + return "RQ"; + case NIX_AQ_CTYPE_RSS: + return "RSS"; + } + return ""; +} + static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); @@ -707,21 +725,60 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) if (rc) { err = rc; dev_err(rvu->dev, "Failed to disable %s:%d context\n", - (req->ctype == NIX_AQ_CTYPE_CQ) ? - "CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ? - "RQ" : "SQ"), qidx); + nix_get_ctx_name(req->ctype), qidx); } } return err; } +#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING +static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) +{ + struct nix_aq_enq_req lock_ctx_req; + int err; + + if (req->op != NIX_AQ_INSTOP_INIT) + return 0; + + if (req->ctype == NIX_AQ_CTYPE_MCE || + req->ctype == NIX_AQ_CTYPE_DYNO) + return 0; + + memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); + lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; + lock_ctx_req.ctype = req->ctype; + lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; + lock_ctx_req.qidx = req->qidx; + err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL); + if (err) + dev_err(rvu->dev, + "PFUNC 0x%x: Failed to lock NIX %s:%d context\n", + req->hdr.pcifunc, + nix_get_ctx_name(req->ctype), req->qidx); + return err; +} + +int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, + struct nix_aq_enq_req *req, + struct nix_aq_enq_rsp *rsp) +{ + int err; + + err = rvu_nix_aq_enq_inst(rvu, req, rsp); + if (!err) + err = nix_lf_hwctx_lockdown(rvu, req); + return err; +} +#else + int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp) { return rvu_nix_aq_enq_inst(rvu, req, rsp); } +#endif int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req, @@ -745,6 +802,9 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) return NIX_AF_ERR_PARAM; + if (req->way_mask) + req->way_mask &= 0xFFFF; + pfvf = rvu_get_pfvf(rvu, pcifunc); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (!pfvf->nixlf || blkaddr < 0) @@ -810,7 +870,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, (u64)pfvf->rq_ctx->iova); /* Set caching and queue count in HW */ - cfg = BIT_ULL(36) | (req->rq_cnt - 1); + cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20; rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); /* Alloc NIX SQ HW context memory and config the base */ @@ -825,7 +885,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), (u64)pfvf->sq_ctx->iova); - cfg = BIT_ULL(36) | (req->sq_cnt - 1); + + cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20; rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); /* Alloc NIX CQ HW context memory and config the base */ @@ -840,13 +901,14 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), (u64)pfvf->cq_ctx->iova); - cfg = BIT_ULL(36) | (req->cq_cnt - 1); + + cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20; rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); /* Initialize receive side scaling (RSS) */ hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); - err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, - req->rss_sz, req->rss_grps, hwctx_size); + err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, + req->rss_grps, hwctx_size, req->way_mask); if (err) goto free_mem; @@ -860,7 +922,9 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), (u64)pfvf->cq_ints_ctx->iova); - rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36)); + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), + BIT_ULL(36) | req->way_mask << 20); /* Alloc memory for QINT's HW contexts */ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); @@ -872,7 +936,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), (u64)pfvf->nix_qints_ctx->iova); - rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36)); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), + BIT_ULL(36) | req->way_mask << 20); /* Setup VLANX TPID's. * Use VLAN1 for 802.1Q @@ -1048,6 +1113,9 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, struct rvu_hwinfo *hw = rvu->hw; int link; + if (lvl >= hw->cap.nix_tx_aggr_lvl) + return; + /* Reset TL4's SDP link config */ if (lvl == NIX_TXSCH_LVL_TL4) rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); @@ -1061,83 +1129,185 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); } -static int -rvu_get_tl1_schqs(struct rvu *rvu, int blkaddr, u16 pcifunc, - u16 *schq_list, u16 *schq_cnt) +static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) { - struct nix_txsch *txsch; - struct nix_hw *nix_hw; - struct rvu_pfvf *pfvf; - u8 cgx_id, lmac_id; - u16 schq_base; - u32 *pfvf_map; - int pf, intf; + struct rvu_hwinfo *hw = rvu->hw; + int pf = rvu_get_pf(pcifunc); + u8 cgx_id = 0, lmac_id = 0; - nix_hw = get_nix_hw(rvu->hw, blkaddr); - if (!nix_hw) - return -ENODEV; + if (is_afvf(pcifunc)) {/* LBK links */ + return hw->cgx_links; + } else if (is_pf_cgxmapped(rvu, pf)) { + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + return (cgx_id * hw->lmac_per_cgx) + lmac_id; + } - pfvf = rvu_get_pfvf(rvu, pcifunc); - txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1]; - pfvf_map = txsch->pfvf_map; - pf = rvu_get_pf(pcifunc); + /* SDP link */ + return hw->cgx_links + hw->lbk_links; +} - /* static allocation as two TL1's per link */ - intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; +static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, + int link, int *start, int *end) +{ + struct rvu_hwinfo *hw = rvu->hw; + int pf = rvu_get_pf(pcifunc); - switch (intf) { - case NIX_INTF_TYPE_CGX: - rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); - schq_base = (cgx_id * MAX_LMAC_PER_CGX + lmac_id) * 2; - break; - case NIX_INTF_TYPE_LBK: - schq_base = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX * 2; - break; - default: - return -ENODEV; + if (is_afvf(pcifunc)) { /* LBK links */ + *start = hw->cap.nix_txsch_per_cgx_lmac * link; + *end = *start + hw->cap.nix_txsch_per_lbk_lmac; + } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ + *start = hw->cap.nix_txsch_per_cgx_lmac * link; + *end = *start + hw->cap.nix_txsch_per_cgx_lmac; + } else { /* SDP link */ + *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) + + (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links); + *end = *start + hw->cap.nix_txsch_per_sdp_lmac; } +} - if (schq_base + 1 > txsch->schq.max) - return -ENODEV; +static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, + struct nix_hw *nix_hw, + struct nix_txsch_alloc_req *req) +{ + struct rvu_hwinfo *hw = rvu->hw; + int schq, req_schq, free_cnt; + struct nix_txsch *txsch; + int link, start, end; - /* init pfvf_map as we store flags */ - if (pfvf_map[schq_base] == U32_MAX) { - pfvf_map[schq_base] = - TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0); - pfvf_map[schq_base + 1] = - TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0); + txsch = &nix_hw->txsch[lvl]; + req_schq = req->schq_contig[lvl] + req->schq[lvl]; - /* Onetime reset for TL1 */ - nix_reset_tx_linkcfg(rvu, blkaddr, - NIX_TXSCH_LVL_TL1, schq_base); - nix_reset_tx_shaping(rvu, blkaddr, - NIX_TXSCH_LVL_TL1, schq_base); + if (!req_schq) + return 0; - nix_reset_tx_linkcfg(rvu, blkaddr, - NIX_TXSCH_LVL_TL1, schq_base + 1); - nix_reset_tx_shaping(rvu, blkaddr, - NIX_TXSCH_LVL_TL1, schq_base + 1); + link = nix_get_tx_link(rvu, pcifunc); + + /* For traffic aggregating scheduler level, one queue is enough */ + if (lvl >= hw->cap.nix_tx_aggr_lvl) { + if (req_schq != 1) + return NIX_AF_ERR_TLX_ALLOC_FAIL; + return 0; } - if (schq_list && schq_cnt) { - schq_list[0] = schq_base; - schq_list[1] = schq_base + 1; - *schq_cnt = 2; + /* Get free SCHQ count and check if request can be accomodated */ + if (hw->cap.nix_fixed_txschq_mapping) { + nix_get_txschq_range(rvu, pcifunc, link, &start, &end); + schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); + if (end <= txsch->schq.max && schq < end && + !test_bit(schq, txsch->schq.bmap)) + free_cnt = 1; + else + free_cnt = 0; + } else { + free_cnt = rvu_rsrc_free_count(&txsch->schq); } + if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC) + return NIX_AF_ERR_TLX_ALLOC_FAIL; + + /* If contiguous queues are needed, check for availability */ + if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] && + !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) + return NIX_AF_ERR_TLX_ALLOC_FAIL; + return 0; } +static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, + struct nix_txsch_alloc_rsp *rsp, + int lvl, int start, int end) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = rsp->hdr.pcifunc; + int idx, schq; + + /* For traffic aggregating levels, queue alloc is based + * on transmit link to which PF_FUNC is mapped to. + */ + if (lvl >= hw->cap.nix_tx_aggr_lvl) { + /* A single TL queue is allocated */ + if (rsp->schq_contig[lvl]) { + rsp->schq_contig[lvl] = 1; + rsp->schq_contig_list[lvl][0] = start; + } + + /* Both contig and non-contig reqs doesn't make sense here */ + if (rsp->schq_contig[lvl]) + rsp->schq[lvl] = 0; + + if (rsp->schq[lvl]) { + rsp->schq[lvl] = 1; + rsp->schq_list[lvl][0] = start; + } + return; + } + + /* Adjust the queue request count if HW supports + * only one queue per level configuration. + */ + if (hw->cap.nix_fixed_txschq_mapping) { + idx = pcifunc & RVU_PFVF_FUNC_MASK; + schq = start + idx; + if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) { + rsp->schq_contig[lvl] = 0; + rsp->schq[lvl] = 0; + return; + } + + if (rsp->schq_contig[lvl]) { + rsp->schq_contig[lvl] = 1; + set_bit(schq, txsch->schq.bmap); + rsp->schq_contig_list[lvl][0] = schq; + rsp->schq[lvl] = 0; + } else if (rsp->schq[lvl]) { + rsp->schq[lvl] = 1; + set_bit(schq, txsch->schq.bmap); + rsp->schq_list[lvl][0] = schq; + } + return; + } + + /* Allocate contiguous queue indices requesty first */ + if (rsp->schq_contig[lvl]) { + schq = bitmap_find_next_zero_area(txsch->schq.bmap, + txsch->schq.max, start, + rsp->schq_contig[lvl], 0); + if (schq >= end) + rsp->schq_contig[lvl] = 0; + for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) { + set_bit(schq, txsch->schq.bmap); + rsp->schq_contig_list[lvl][idx] = schq; + schq++; + } + } + + /* Allocate non-contiguous queue indices */ + if (rsp->schq[lvl]) { + idx = 0; + for (schq = start; schq < end; schq++) { + if (!test_bit(schq, txsch->schq.bmap)) { + set_bit(schq, txsch->schq.bmap); + rsp->schq_list[lvl][idx++] = schq; + } + if (idx == rsp->schq[lvl]) + break; + } + /* Update how many were allocated */ + rsp->schq[lvl] = idx; + } +} + int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, struct nix_txsch_alloc_req *req, struct nix_txsch_alloc_rsp *rsp) { + struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; + int link, blkaddr, rc = 0; + int lvl, idx, start, end; struct nix_txsch *txsch; - int lvl, idx, req_schq; struct rvu_pfvf *pfvf; struct nix_hw *nix_hw; - int blkaddr, rc = 0; u32 *pfvf_map; u16 schq; @@ -1151,83 +1321,66 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, return -EINVAL; mutex_lock(&rvu->rsrc_lock); - for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { - txsch = &nix_hw->txsch[lvl]; - req_schq = req->schq_contig[lvl] + req->schq[lvl]; - pfvf_map = txsch->pfvf_map; - - if (!req_schq) - continue; - /* There are only 28 TL1s */ - if (lvl == NIX_TXSCH_LVL_TL1) { - if (req->schq_contig[lvl] || - req->schq[lvl] > 2 || - rvu_get_tl1_schqs(rvu, blkaddr, - pcifunc, NULL, NULL)) - goto err; - continue; - } - - /* Check if request is valid */ - if (req_schq > MAX_TXSCHQ_PER_FUNC) - goto err; - - /* If contiguous queues are needed, check for availability */ - if (req->schq_contig[lvl] && - !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) - goto err; - - /* Check if full request can be accommodated */ - if (req_schq >= rvu_rsrc_free_count(&txsch->schq)) + /* Check if request is valid as per HW capabilities + * and can be accomodated. + */ + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); + if (rc) goto err; } + /* Allocate requested Tx scheduler queues */ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { txsch = &nix_hw->txsch[lvl]; - rsp->schq_contig[lvl] = req->schq_contig[lvl]; pfvf_map = txsch->pfvf_map; - rsp->schq[lvl] = req->schq[lvl]; if (!req->schq[lvl] && !req->schq_contig[lvl]) continue; - /* Handle TL1 specially as it is - * allocation is restricted to 2 TL1's - * per link - */ + rsp->schq[lvl] = req->schq[lvl]; + rsp->schq_contig[lvl] = req->schq_contig[lvl]; - if (lvl == NIX_TXSCH_LVL_TL1) { - rsp->schq_contig[lvl] = 0; - rvu_get_tl1_schqs(rvu, blkaddr, pcifunc, - &rsp->schq_list[lvl][0], - &rsp->schq[lvl]); - continue; + link = nix_get_tx_link(rvu, pcifunc); + + if (lvl >= hw->cap.nix_tx_aggr_lvl) { + start = link; + end = link; + } else if (hw->cap.nix_fixed_txschq_mapping) { + nix_get_txschq_range(rvu, pcifunc, link, &start, &end); + } else { + start = 0; + end = txsch->schq.max; } - /* Alloc contiguous queues first */ - if (req->schq_contig[lvl]) { - schq = rvu_alloc_rsrc_contig(&txsch->schq, - req->schq_contig[lvl]); + nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end); - for (idx = 0; idx < req->schq_contig[lvl]; idx++) { + /* Reset queue config */ + for (idx = 0; idx < req->schq_contig[lvl]; idx++) { + schq = rsp->schq_contig_list[lvl][idx]; + if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & + NIX_TXSCHQ_CFG_DONE)) pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); - nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); - nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); - rsp->schq_contig_list[lvl][idx] = schq; - schq++; - } + nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); + nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); } - /* Alloc non-contiguous queues */ for (idx = 0; idx < req->schq[lvl]; idx++) { - schq = rvu_alloc_rsrc(&txsch->schq); - pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); + schq = rsp->schq_list[lvl][idx]; + if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & + NIX_TXSCHQ_CFG_DONE)) + pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); - rsp->schq_list[lvl][idx] = schq; } } + + rsp->aggr_level = hw->cap.nix_tx_aggr_lvl; + rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO; + rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr, + NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? + NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; goto exit; err: rc = NIX_AF_ERR_TLX_ALLOC_FAIL; @@ -1236,13 +1389,50 @@ exit: return rc; } +static void nix_smq_flush(struct rvu *rvu, int blkaddr, + int smq, u16 pcifunc, int nixlf) +{ + int pf = rvu_get_pf(pcifunc); + u8 cgx_id = 0, lmac_id = 0; + int err, restore_tx_en = 0; + u64 cfg; + + /* enable cgx tx if disabled */ + if (is_pf_cgxmapped(rvu, pf)) { + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), + lmac_id, true); + } + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); + /* Do SMQ flush and set enqueue xoff */ + cfg |= BIT_ULL(50) | BIT_ULL(49); + rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); + + /* Disable backpressure from physical link, + * otherwise SMQ flush may stall. + */ + rvu_cgx_enadis_rx_bp(rvu, pf, false); + + /* Wait for flush to complete */ + err = rvu_poll_reg(rvu, blkaddr, + NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); + if (err) + dev_err(rvu->dev, + "NIXLF%d: SMQ%d flush failed\n", nixlf, smq); + + rvu_cgx_enadis_rx_bp(rvu, pf, true); + /* restore cgx tx state */ + if (restore_tx_en) + cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); +} + static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) { int blkaddr, nixlf, lvl, schq, err; struct rvu_hwinfo *hw = rvu->hw; struct nix_txsch *txsch; struct nix_hw *nix_hw; - u64 cfg; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) @@ -1275,26 +1465,15 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) for (schq = 0; schq < txsch->schq.max; schq++) { if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) continue; - cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); - /* Do SMQ flush and set enqueue xoff */ - cfg |= BIT_ULL(50) | BIT_ULL(49); - rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); - - /* Wait for flush to complete */ - err = rvu_poll_reg(rvu, blkaddr, - NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true); - if (err) { - dev_err(rvu->dev, - "NIXLF%d: SMQ%d flush failed\n", nixlf, schq); - } + nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); } /* Now free scheduler queues to free pool */ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { - /* Free all SCHQ's except TL1 as - * TL1 is shared across all VF's for a RVU PF - */ - if (lvl == NIX_TXSCH_LVL_TL1) + /* TLs above aggregation level are shared across all PF + * and it's VFs, hence skip freeing them. + */ + if (lvl >= hw->cap.nix_tx_aggr_lvl) continue; txsch = &nix_hw->txsch[lvl]; @@ -1302,7 +1481,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) continue; rvu_free_rsrc(&txsch->schq, schq); - txsch->pfvf_map[schq] = 0; + txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); } } mutex_unlock(&rvu->rsrc_lock); @@ -1319,13 +1498,12 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) static int nix_txschq_free_one(struct rvu *rvu, struct nix_txsch_free_req *req) { - int lvl, schq, nixlf, blkaddr, rc; struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; + int lvl, schq, nixlf, blkaddr; struct nix_txsch *txsch; struct nix_hw *nix_hw; u32 *pfvf_map; - u64 cfg; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) @@ -1343,10 +1521,8 @@ static int nix_txschq_free_one(struct rvu *rvu, schq = req->schq; txsch = &nix_hw->txsch[lvl]; - /* Don't allow freeing TL1 */ - if (lvl > NIX_TXSCH_LVL_TL2 || - schq >= txsch->schq.max) - goto err; + if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max) + return 0; pfvf_map = txsch->pfvf_map; mutex_lock(&rvu->rsrc_lock); @@ -1359,24 +1535,12 @@ static int nix_txschq_free_one(struct rvu *rvu, /* Flush if it is a SMQ. Onus of disabling * TL2/3 queue links before SMQ flush is on user */ - if (lvl == NIX_TXSCH_LVL_SMQ) { - cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); - /* Do SMQ flush and set enqueue xoff */ - cfg |= BIT_ULL(50) | BIT_ULL(49); - rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); - - /* Wait for flush to complete */ - rc = rvu_poll_reg(rvu, blkaddr, - NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true); - if (rc) { - dev_err(rvu->dev, - "NIXLF%d: SMQ%d flush failed\n", nixlf, schq); - } - } + if (lvl == NIX_TXSCH_LVL_SMQ) + nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); /* Free the resource */ rvu_free_rsrc(&txsch->schq, schq); - txsch->pfvf_map[schq] = 0; + txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); mutex_unlock(&rvu->rsrc_lock); return 0; err: @@ -1393,8 +1557,8 @@ int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, return nix_txschq_free_one(rvu, req); } -static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, - int lvl, u64 reg, u64 regval) +static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, + int lvl, u64 reg, u64 regval) { u64 regbase = reg & 0xFFFF; u16 schq, parent; @@ -1431,79 +1595,82 @@ static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, return true; } -static int -nix_tl1_default_cfg(struct rvu *rvu, u16 pcifunc) +static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) { - u16 schq_list[2], schq_cnt, schq; - int blkaddr, idx, err = 0; - u16 map_func, map_flags; - struct nix_hw *nix_hw; - u64 reg, regval; - u32 *pfvf_map; - - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (blkaddr < 0) - return NIX_AF_ERR_AF_LF_INVALID; + u64 regbase; - nix_hw = get_nix_hw(rvu->hw, blkaddr); - if (!nix_hw) - return -EINVAL; - - pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; - - mutex_lock(&rvu->rsrc_lock); - - err = rvu_get_tl1_schqs(rvu, blkaddr, - pcifunc, schq_list, &schq_cnt); - if (err) - goto unlock; + if (hw->cap.nix_shaping) + return true; - for (idx = 0; idx < schq_cnt; idx++) { - schq = schq_list[idx]; - map_func = TXSCH_MAP_FUNC(pfvf_map[schq]); - map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]); + /* If shaping and coloring is not supported, then + * *_CIR and *_PIR registers should not be configured. + */ + regbase = reg & 0xFFFF; - /* check if config is already done or this is pf */ - if (map_flags & NIX_TXSCHQ_TL1_CFG_DONE) - continue; + switch (lvl) { + case NIX_TXSCH_LVL_TL1: + if (regbase == NIX_AF_TL1X_CIR(0)) + return false; + break; + case NIX_TXSCH_LVL_TL2: + if (regbase == NIX_AF_TL2X_CIR(0) || + regbase == NIX_AF_TL2X_PIR(0)) + return false; + break; + case NIX_TXSCH_LVL_TL3: + if (regbase == NIX_AF_TL3X_CIR(0) || + regbase == NIX_AF_TL3X_PIR(0)) + return false; + break; + case NIX_TXSCH_LVL_TL4: + if (regbase == NIX_AF_TL4X_CIR(0) || + regbase == NIX_AF_TL4X_PIR(0)) + return false; + break; + } + return true; +} - /* default configuration */ - reg = NIX_AF_TL1X_TOPOLOGY(schq); - regval = (TXSCH_TL1_DFLT_RR_PRIO << 1); - rvu_write64(rvu, blkaddr, reg, regval); - reg = NIX_AF_TL1X_SCHEDULE(schq); - regval = TXSCH_TL1_DFLT_RR_QTM; - rvu_write64(rvu, blkaddr, reg, regval); - reg = NIX_AF_TL1X_CIR(schq); - regval = 0; - rvu_write64(rvu, blkaddr, reg, regval); +static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, + u16 pcifunc, int blkaddr) +{ + u32 *pfvf_map; + int schq; - map_flags |= NIX_TXSCHQ_TL1_CFG_DONE; - pfvf_map[schq] = TXSCH_MAP(map_func, map_flags); - } -unlock: - mutex_unlock(&rvu->rsrc_lock); - return err; + schq = nix_get_tx_link(rvu, pcifunc); + pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; + /* Skip if PF has already done the config */ + if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE) + return; + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), + (TXSCH_TL1_DFLT_RR_PRIO << 1)); + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), + TXSCH_TL1_DFLT_RR_QTM); + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); + pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); } int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, struct nix_txschq_config *req, struct msg_rsp *rsp) { - u16 schq, pcifunc = req->hdr.pcifunc; struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; u64 reg, regval, schq_regbase; struct nix_txsch *txsch; - u16 map_func, map_flags; struct nix_hw *nix_hw; int blkaddr, idx, err; + int nixlf, schq; u32 *pfvf_map; - int nixlf; if (req->lvl >= NIX_TXSCH_LVL_CNT || req->num_regs > MAX_REGS_PER_MBOX_MSG) return NIX_AF_INVAL_TXSCHQ_CFG; + err = nix_get_nixlf(rvu, pcifunc, &nixlf); + if (err) + return NIX_AF_ERR_AF_LF_INVALID; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return NIX_AF_ERR_AF_LF_INVALID; @@ -1512,19 +1679,16 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, if (!nix_hw) return -EINVAL; - nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); - if (nixlf < 0) - return NIX_AF_ERR_AF_LF_INVALID; - txsch = &nix_hw->txsch[req->lvl]; pfvf_map = txsch->pfvf_map; - /* VF is only allowed to trigger - * setting default cfg on TL1 - */ - if (pcifunc & RVU_PFVF_FUNC_MASK && - req->lvl == NIX_TXSCH_LVL_TL1) { - return nix_tl1_default_cfg(rvu, pcifunc); + if (req->lvl >= hw->cap.nix_tx_aggr_lvl && + pcifunc & RVU_PFVF_FUNC_MASK) { + mutex_lock(&rvu->rsrc_lock); + if (req->lvl == NIX_TXSCH_LVL_TL1) + nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr); + mutex_unlock(&rvu->rsrc_lock); + return 0; } for (idx = 0; idx < req->num_regs; idx++) { @@ -1532,10 +1696,14 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, regval = req->regval[idx]; schq_regbase = reg & 0xFFFF; - if (!is_txschq_config_valid(rvu, pcifunc, blkaddr, - txsch->lvl, reg, regval)) + if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, + txsch->lvl, reg, regval)) return NIX_AF_INVAL_TXSCHQ_CFG; + /* Check if shaping and coloring is supported */ + if (!is_txschq_shaping_valid(hw, req->lvl, reg)) + continue; + /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ if (schq_regbase == NIX_AF_SMQX_CFG(0)) { nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], @@ -1544,32 +1712,36 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, regval |= ((u64)nixlf << 24); } + /* Clear 'BP_ENA' config, if it's not allowed */ + if (!hw->cap.nix_tx_link_bp) { + if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) || + (schq_regbase & 0xFF00) == + NIX_AF_TL3_TL2X_LINKX_CFG(0, 0)) + regval &= ~BIT_ULL(13); + } + /* Mark config as done for TL1 by PF */ if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); - mutex_lock(&rvu->rsrc_lock); - - map_func = TXSCH_MAP_FUNC(pfvf_map[schq]); - map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]); - - map_flags |= NIX_TXSCHQ_TL1_CFG_DONE; - pfvf_map[schq] = TXSCH_MAP(map_func, map_flags); + pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], + NIX_TXSCHQ_CFG_DONE); mutex_unlock(&rvu->rsrc_lock); } - rvu_write64(rvu, blkaddr, reg, regval); - - /* Check for SMQ flush, if so, poll for its completion */ + /* SMQ flush is special hence split register writes such + * that flush first and write rest of the bits later. + */ if (schq_regbase == NIX_AF_SMQX_CFG(0) && (regval & BIT_ULL(49))) { - err = rvu_poll_reg(rvu, blkaddr, - reg, BIT_ULL(49), true); - if (err) - return NIX_AF_SMQ_FLUSH_FAILED; + schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); + nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); + regval &= ~BIT_ULL(49); } + rvu_write64(rvu, blkaddr, reg, regval); } + return 0; } @@ -1650,7 +1822,7 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op, } static int nix_update_mce_list(struct nix_mce_list *mce_list, - u16 pcifunc, int idx, bool add) + u16 pcifunc, bool add) { struct mce *mce, *tail = NULL; bool delete = false; @@ -1679,7 +1851,6 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list, mce = kzalloc(sizeof(*mce), GFP_KERNEL); if (!mce) return -ENOMEM; - mce->idx = idx; mce->pcifunc = pcifunc; if (!tail) hlist_add_head(&mce->node, &mce_list->head); @@ -1691,12 +1862,12 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list, static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) { - int err = 0, idx, next_idx, count; + int err = 0, idx, next_idx, last_idx; struct nix_mce_list *mce_list; - struct mce *mce, *next_mce; struct nix_mcast *mcast; struct nix_hw *nix_hw; struct rvu_pfvf *pfvf; + struct mce *mce; int blkaddr; /* Broadcast pkt replication is not needed for AF's VFs, hence skip */ @@ -1728,31 +1899,31 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) mutex_lock(&mcast->mce_lock); - err = nix_update_mce_list(mce_list, pcifunc, idx, add); + err = nix_update_mce_list(mce_list, pcifunc, add); if (err) goto end; /* Disable MCAM entry in NPC */ - - if (!mce_list->count) + if (!mce_list->count) { + rvu_npc_disable_bcast_entry(rvu, pcifunc); goto end; - count = mce_list->count; + } /* Dump the updated list to HW */ + idx = pfvf->bcast_mce_idx; + last_idx = idx + mce_list->count - 1; hlist_for_each_entry(mce, &mce_list->head, node) { - next_idx = 0; - count--; - if (count) { - next_mce = hlist_entry(mce->node.next, - struct mce, node); - next_idx = next_mce->idx; - } + if (idx > last_idx) + break; + + next_idx = idx + 1; /* EOL should be set in last MCE */ - err = nix_setup_mce(rvu, mce->idx, - NIX_AQ_INSTOP_WRITE, mce->pcifunc, - next_idx, count ? false : true); + err = nix_setup_mce(rvu, idx, NIX_AQ_INSTOP_WRITE, + mce->pcifunc, next_idx, + (next_idx > last_idx) ? true : false); if (err) goto end; + idx++; } end: @@ -1849,8 +2020,8 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) { struct nix_txsch *txsch; + int err, lvl, schq; u64 cfg, reg; - int err, lvl; /* Get scheduler queue count of each type and alloc * bitmap for each for alloc/free/attach operations. @@ -1888,7 +2059,8 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) sizeof(u32), GFP_KERNEL); if (!txsch->pfvf_map) return -ENOMEM; - memset(txsch->pfvf_map, U8_MAX, txsch->schq.max * sizeof(u32)); + for (schq = 0; schq < txsch->schq.max; schq++) + txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); } return 0; } @@ -2032,51 +2204,82 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) if (field_marker) memset(&tmp, 0, sizeof(tmp)); + field_marker = true; + keyoff_marker = true; switch (key_type) { case NIX_FLOW_KEY_TYPE_PORT: field->sel_chan = true; /* This should be set to 1, when SEL_CHAN is set */ field->bytesm1 = 1; - field_marker = true; - keyoff_marker = true; break; case NIX_FLOW_KEY_TYPE_IPV4: + case NIX_FLOW_KEY_TYPE_INNR_IPV4: field->lid = NPC_LID_LC; field->ltype_match = NPC_LT_LC_IP; + if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) { + field->lid = NPC_LID_LG; + field->ltype_match = NPC_LT_LG_TU_IP; + } field->hdr_offset = 12; /* SIP offset */ field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ field->ltype_mask = 0xF; /* Match only IPv4 */ - field_marker = true; keyoff_marker = false; break; case NIX_FLOW_KEY_TYPE_IPV6: + case NIX_FLOW_KEY_TYPE_INNR_IPV6: field->lid = NPC_LID_LC; field->ltype_match = NPC_LT_LC_IP6; + if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) { + field->lid = NPC_LID_LG; + field->ltype_match = NPC_LT_LG_TU_IP6; + } field->hdr_offset = 8; /* SIP offset */ field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ field->ltype_mask = 0xF; /* Match only IPv6 */ - field_marker = true; - keyoff_marker = true; break; case NIX_FLOW_KEY_TYPE_TCP: case NIX_FLOW_KEY_TYPE_UDP: case NIX_FLOW_KEY_TYPE_SCTP: + case NIX_FLOW_KEY_TYPE_INNR_TCP: + case NIX_FLOW_KEY_TYPE_INNR_UDP: + case NIX_FLOW_KEY_TYPE_INNR_SCTP: field->lid = NPC_LID_LD; + if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP || + key_type == NIX_FLOW_KEY_TYPE_INNR_UDP || + key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) + field->lid = NPC_LID_LH; field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ - if (key_type == NIX_FLOW_KEY_TYPE_TCP && valid_key) { + + /* Enum values for NPC_LID_LD and NPC_LID_LG are same, + * so no need to change the ltype_match, just change + * the lid for inner protocols + */ + BUILD_BUG_ON((int)NPC_LT_LD_TCP != + (int)NPC_LT_LH_TU_TCP); + BUILD_BUG_ON((int)NPC_LT_LD_UDP != + (int)NPC_LT_LH_TU_UDP); + BUILD_BUG_ON((int)NPC_LT_LD_SCTP != + (int)NPC_LT_LH_TU_SCTP); + + if ((key_type == NIX_FLOW_KEY_TYPE_TCP || + key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) && + valid_key) { field->ltype_match |= NPC_LT_LD_TCP; group_member = true; - } else if (key_type == NIX_FLOW_KEY_TYPE_UDP && + } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP || + key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) && valid_key) { field->ltype_match |= NPC_LT_LD_UDP; group_member = true; - } else if (key_type == NIX_FLOW_KEY_TYPE_SCTP && + } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP || + key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) && valid_key) { field->ltype_match |= NPC_LT_LD_SCTP; group_member = true; } field->ltype_mask = ~field->ltype_match; - if (key_type == NIX_FLOW_KEY_TYPE_SCTP) { + if (key_type == NIX_FLOW_KEY_TYPE_SCTP || + key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) { /* Handle the case where any of the group item * is enabled in the group but not the final one */ @@ -2084,13 +2287,73 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) valid_key = true; group_member = false; } - field_marker = true; - keyoff_marker = true; } else { field_marker = false; keyoff_marker = false; } break; + case NIX_FLOW_KEY_TYPE_NVGRE: + field->lid = NPC_LID_LD; + field->hdr_offset = 4; /* VSID offset */ + field->bytesm1 = 2; + field->ltype_match = NPC_LT_LD_NVGRE; + field->ltype_mask = 0xF; + break; + case NIX_FLOW_KEY_TYPE_VXLAN: + case NIX_FLOW_KEY_TYPE_GENEVE: + field->lid = NPC_LID_LE; + field->bytesm1 = 2; + field->hdr_offset = 4; + field->ltype_mask = 0xF; + field_marker = false; + keyoff_marker = false; + + if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) { + field->ltype_match |= NPC_LT_LE_VXLAN; + group_member = true; + } + + if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) { + field->ltype_match |= NPC_LT_LE_GENEVE; + group_member = true; + } + + if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) { + if (group_member) { + field->ltype_mask = ~field->ltype_match; + field_marker = true; + keyoff_marker = true; + valid_key = true; + group_member = false; + } + } + break; + case NIX_FLOW_KEY_TYPE_ETH_DMAC: + case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC: + field->lid = NPC_LID_LA; + field->ltype_match = NPC_LT_LA_ETHER; + if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) { + field->lid = NPC_LID_LF; + field->ltype_match = NPC_LT_LF_TU_ETHER; + } + field->hdr_offset = 0; + field->bytesm1 = 5; /* DMAC 6 Byte */ + field->ltype_mask = 0xF; + break; + case NIX_FLOW_KEY_TYPE_IPV6_EXT: + field->lid = NPC_LID_LC; + field->hdr_offset = 40; /* IPV6 hdr */ + field->bytesm1 = 0; /* 1 Byte ext hdr*/ + field->ltype_match = NPC_LT_LC_IP6_EXT; + field->ltype_mask = 0xF; + break; + case NIX_FLOW_KEY_TYPE_GTPU: + field->lid = NPC_LID_LE; + field->hdr_offset = 4; + field->bytesm1 = 3; /* 4 bytes TID*/ + field->ltype_match = NPC_LT_LE_GTPU; + field->ltype_mask = 0xF; + break; } field->ena = 1; @@ -2449,8 +2712,6 @@ linkcfg: cfg &= ~(0xFFFFFULL << 12); cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12; rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); - rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_EXPR_CREDIT(link), cfg); - return 0; } @@ -2591,9 +2852,6 @@ static void nix_link_config(struct rvu *rvu, int blkaddr) rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); - rvu_write64(rvu, blkaddr, - NIX_AF_TX_LINKX_EXPR_CREDIT(link), - tx_credits); } } @@ -2605,8 +2863,6 @@ static void nix_link_config(struct rvu *rvu, int blkaddr) tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); - rvu_write64(rvu, blkaddr, - NIX_AF_TX_LINKX_EXPR_CREDIT(link), tx_credits); } } @@ -2674,6 +2930,10 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) /* Do not bypass NDC cache */ cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); cfg &= ~0x3FFEULL; +#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING + /* Disable caching of SQB aka SQEs */ + cfg |= 0x04ULL; +#endif rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); /* Result structure can be followed by RQ/SQ/CQ context at @@ -2704,13 +2964,25 @@ int rvu_nix_init(struct rvu *rvu) return 0; block = &hw->block[blkaddr]; - /* As per a HW errata in 9xxx A0 silicon, NIX may corrupt - * internal state when conditional clocks are turned off. - * Hence enable them. - */ - if (is_rvu_9xxx_A0(rvu)) + if (is_rvu_96xx_B0(rvu)) { + /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt + * internal state when conditional clocks are turned off. + * Hence enable them. + */ rvu_write64(rvu, blkaddr, NIX_AF_CFG, - rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x5EULL); + rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL); + + /* Set chan/link to backpressure TL3 instead of TL2 */ + rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); + + /* Disable SQ manager's sticky mode operation (set TM6 = 0) + * This sticky mode is known to cause SQ stalls when multiple + * SQs are mapped to same SMQ and transmitting pkts at a time. + */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); + cfg &= ~BIT_ULL(15); + rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); + } /* Calibrate X2P bus to check if CGX/LBK links are fine */ err = nix_calibrate_x2p(rvu, blkaddr); @@ -2763,23 +3035,23 @@ int rvu_nix_init(struct rvu *rvu) rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4, - (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F); + (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6, (NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6, - (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP6 << 4) | 0x0F); + (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP6 << 4) | 0x0F); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP, - (NPC_LID_LG << 8) | (NPC_LT_LG_TU_TCP << 4) | 0x0F); + (NPC_LID_LH << 8) | (NPC_LT_LH_TU_TCP << 4) | 0x0F); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP, - (NPC_LID_LG << 8) | (NPC_LT_LG_TU_UDP << 4) | 0x0F); + (NPC_LID_LH << 8) | (NPC_LT_LH_TU_UDP << 4) | 0x0F); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP, (NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP, - (NPC_LID_LG << 8) | (NPC_LT_LG_TU_SCTP << 4) | + (NPC_LID_LH << 8) | (NPC_LT_LH_TU_SCTP << 4) | 0x0F); err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); @@ -2825,7 +3097,7 @@ void rvu_nix_freemem(struct rvu *rvu) } } -static int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf) +int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct rvu_hwinfo *hw = rvu->hw; @@ -2853,7 +3125,8 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, return err; rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); - return 0; + + return rvu_cgx_start_stop_io(rvu, pcifunc, true); } int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, @@ -2867,7 +3140,8 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, return err; rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); - return 0; + + return rvu_cgx_start_stop_io(rvu, pcifunc, false); } void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) @@ -2883,6 +3157,8 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) nix_rx_sync(rvu, blkaddr); nix_txschq_free(rvu, pcifunc); + rvu_cgx_start_stop_io(rvu, pcifunc, false); + if (pfvf->sq_ctx) { ctx_req.ctype = NIX_AQ_CTYPE_SQ; err = nix_lf_hwctx_disable(rvu, &ctx_req); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c index c0e165dfc403..6e7c7f459f74 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c @@ -52,8 +52,8 @@ static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, return 0; } -static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req, - struct npa_aq_enq_rsp *rsp) +int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req, + struct npa_aq_enq_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; @@ -241,12 +241,50 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) return err; } +#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING +static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req) +{ + struct npa_aq_enq_req lock_ctx_req; + int err; + + if (req->op != NPA_AQ_INSTOP_INIT) + return 0; + + memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req)); + lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; + lock_ctx_req.ctype = req->ctype; + lock_ctx_req.op = NPA_AQ_INSTOP_LOCK; + lock_ctx_req.aura_id = req->aura_id; + err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL); + if (err) + dev_err(rvu->dev, + "PFUNC 0x%x: Failed to lock NPA context %s:%d\n", + req->hdr.pcifunc, + (req->ctype == NPA_AQ_CTYPE_AURA) ? + "Aura" : "Pool", req->aura_id); + return err; +} + +int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu, + struct npa_aq_enq_req *req, + struct npa_aq_enq_rsp *rsp) +{ + int err; + + err = rvu_npa_aq_enq_inst(rvu, req, rsp); + if (!err) + err = npa_lf_hwctx_lockdown(rvu, req); + return err; +} +#else + int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu, struct npa_aq_enq_req *req, struct npa_aq_enq_rsp *rsp) { return rvu_npa_aq_enq_inst(rvu, req, rsp); } +#endif int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req, @@ -289,6 +327,9 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu, req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools) return NPA_AF_ERR_PARAM; + if (req->way_mask) + req->way_mask &= 0xFFFF; + pfvf = rvu_get_pfvf(rvu, pcifunc); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc); if (!pfvf->npalf || blkaddr < 0) @@ -345,7 +386,8 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu, /* Clear way partition mask and set aura offset to '0' */ cfg &= ~(BIT_ULL(34) - 1); /* Set aura size & enable caching of contexts */ - cfg |= (req->aura_sz << 16) | BIT_ULL(34); + cfg |= (req->aura_sz << 16) | BIT_ULL(34) | req->way_mask; + rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg); /* Configure aura HW context's base */ @@ -353,7 +395,8 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu, (u64)pfvf->aura_ctx->iova); /* Enable caching of qints hw context */ - rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), BIT_ULL(36)); + rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), + BIT_ULL(36) | req->way_mask << 20); rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf), (u64)pfvf->npa_qints_ctx->iova); @@ -422,6 +465,10 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block) /* Do not bypass NDC cache */ cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG); cfg &= ~0x03DULL; +#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING + /* Disable caching of stack pages */ + cfg |= 0x10ULL; +#endif rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg); /* Result structure can be followed by Aura/Pool context at diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 15f70273e29c..40e431debbe9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -120,6 +120,31 @@ static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, } } +static void npc_clear_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, int index) +{ + int bank = npc_get_bank(mcam, index); + int actbank = bank; + + index &= (mcam->banksize - 1); + for (; bank < (actbank + mcam->banks_per_entry); bank++) { + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 0); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 0); + + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), 0); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), 0); + + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), 0); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), 0); + } +} + static void npc_get_keyword(struct mcam_entry *entry, int idx, u64 *cam0, u64 *cam1) { @@ -211,6 +236,12 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, actindex = index; index &= (mcam->banksize - 1); + /* Disable before mcam entry update */ + npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false); + + /* Clear mcam entry to avoid writes being suppressed by NPC */ + npc_clear_mcam_entry(rvu, mcam, blkaddr, actindex); + /* CAM1 takes the comparison value and * CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'. * CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0 @@ -251,8 +282,6 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, /* Enable the entry */ if (enable) npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true); - else - npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false); } static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, @@ -354,8 +383,8 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, NIX_INTF_RX, &entry, true); /* add VLAN matching, setup action and save entry back for later */ - entry.kw[0] |= (NPC_LT_LB_STAG | NPC_LT_LB_CTAG) << 20; - entry.kw_mask[0] |= (NPC_LT_LB_STAG & NPC_LT_LB_CTAG) << 20; + entry.kw[0] |= (NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG) << 20; + entry.kw_mask[0] |= (NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG) << 20; entry.vtag_action = VTAG0_VALID_BIT | FIELD_PREP(VTAG0_TYPE_MASK, 0) | @@ -448,68 +477,75 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, { struct npc_mcam *mcam = &rvu->hw->mcam; struct mcam_entry entry = { {0} }; + struct rvu_hwinfo *hw = rvu->hw; struct nix_rx_action action; -#ifdef MCAST_MCE struct rvu_pfvf *pfvf; -#endif int blkaddr, index; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return; - /* Only PF can add a bcast match entry */ - if (pcifunc & RVU_PFVF_FUNC_MASK) + /* Skip LBK VFs */ + if (is_afvf(pcifunc)) return; -#ifdef MCAST_MCE - pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); -#endif + /* If pkt replication is not supported, + * then only PF is allowed to add a bcast match entry. + */ + if (!hw->cap.nix_rx_multicast && pcifunc & RVU_PFVF_FUNC_MASK) + return; + + /* Get 'pcifunc' of PF device */ + pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_BCAST_ENTRY); - /* Check for L2B bit and LMAC channel - * NOTE: Since MKEX default profile(a reduced version intended to - * accommodate more capability but igoring few bits) a stap-gap - * approach. - * Since we care for L2B which by HRM NPC_PARSE_KEX_S at BIT_POS[25], So - * moved to BIT_POS[13], ignoring ERRCODE, ERRLEV as we'll loose out - * on capability features needed for CoS (/from ODP PoV) e.g: VLAN, - * DSCP. - * - * Reduced layout of MKEX default profile - - * Includes following are (i.e.CHAN, L2/3{B/M}, LA, LB, LC, LD): - * - * BIT_POS[31:28] : LD - * BIT_POS[27:24] : LC - * BIT_POS[23:20] : LB - * BIT_POS[19:16] : LA - * BIT_POS[15:12] : L3B, L3M, L2B, L2M - * BIT_POS[11:00] : CHAN - * + /* Match ingress channel */ + entry.kw[0] = chan; + entry.kw_mask[0] = 0xfffull; + + /* Match broadcast MAC address. + * DMAC is extracted at 0th bit of PARSE_KEX::KW1 */ - entry.kw[0] = BIT_ULL(13) | chan; - entry.kw_mask[0] = BIT_ULL(13) | 0xFFFULL; + entry.kw[1] = 0xffffffffffffull; + entry.kw_mask[1] = 0xffffffffffffull; *(u64 *)&action = 0x00; -#ifdef MCAST_MCE - /* Early silicon doesn't support pkt replication, - * so install entry with UCAST action, so that PF - * receives all broadcast packets. - */ - action.op = NIX_RX_ACTIONOP_MCAST; - action.pf_func = pcifunc; - action.index = pfvf->bcast_mce_idx; -#else - action.op = NIX_RX_ACTIONOP_UCAST; - action.pf_func = pcifunc; -#endif + if (!hw->cap.nix_rx_multicast) { + /* Early silicon doesn't support pkt replication, + * so install entry with UCAST action, so that PF + * receives all broadcast packets. + */ + action.op = NIX_RX_ACTIONOP_UCAST; + action.pf_func = pcifunc; + } else { + pfvf = rvu_get_pfvf(rvu, pcifunc); + action.index = pfvf->bcast_mce_idx; + action.op = NIX_RX_ACTIONOP_MCAST; + } entry.action = *(u64 *)&action; npc_config_mcam_entry(rvu, mcam, blkaddr, index, NIX_INTF_RX, &entry, true); } +void rvu_npc_disable_bcast_entry(struct rvu *rvu, u16 pcifunc) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int blkaddr, index; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + /* Get 'pcifunc' of PF device */ + pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; + + index = npc_get_nixlf_mcam_index(mcam, pcifunc, 0, NIXLF_BCAST_ENTRY); + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); +} + void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, int group, int alg_idx, int mcam_index) { @@ -704,8 +740,7 @@ static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr) /* Layer B: Stacked VLAN (STAG|QinQ) */ /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */ cfg = KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4); - SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG, 0, cfg); - SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_QINQ, 0, cfg); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 0, cfg); /* Layer C: IPv4 */ /* SIP+DIP: 8 bytes, KW2[63:0] */ @@ -806,11 +841,11 @@ static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr) /* Compare with mkex mod_param name string */ if (mcam_kex->mkex_sign == MKEX_SIGN && !strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) { - /* Due to an errata (35786) in A0 pass silicon, + /* Due to an errata (35786) in A0/B0 pass silicon, * parse nibble enable configuration has to be * identical for both Rx and Tx interfaces. */ - if (is_rvu_9xxx_A0(rvu) && + if (is_rvu_96xx_B0(rvu) && mcam_kex->keyx_cfg[NIX_INTF_RX] != mcam_kex->keyx_cfg[NIX_INTF_TX]) goto load_default; @@ -1064,6 +1099,13 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) mcam->hprio_count = mcam->lprio_count; mcam->hprio_end = mcam->hprio_count; + /* Reserve last counter for MCAM RX miss action which is set to + * drop pkt. This way we will know how many pkts didn't match + * any MCAM entry. + */ + mcam->counters.max--; + mcam->rx_miss_act_cntr = mcam->counters.max; + /* Allocate bitmap for managing MCAM counters and memory * for saving counter to RVU PFFUNC allocation mapping. */ @@ -1101,6 +1143,7 @@ free_mem: int rvu_npc_init(struct rvu *rvu) { struct npc_pkind *pkind = &rvu->hw->pkind; + struct npc_mcam *mcam = &rvu->hw->mcam; u64 keyz = NPC_MCAM_KEY_X2; int blkaddr, entry, bank, err; u64 cfg, nibble_ena; @@ -1143,7 +1186,7 @@ int rvu_npc_init(struct rvu *rvu) /* Config Inner IPV4 NPC layer info */ rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4, - (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F); + (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F); /* Enable below for Rx pkts. * - Outer IPv4 header checksum validation. @@ -1165,7 +1208,7 @@ int rvu_npc_init(struct rvu *rvu) /* Due to an errata (35786) in A0 pass silicon, parse nibble enable * configuration has to be identical for both Rx and Tx interfaces. */ - if (!is_rvu_9xxx_A0(rvu)) + if (!is_rvu_96xx_B0(rvu)) nibble_ena = (1ULL << 19) - 1; rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX), ((keyz & 0x3) << 32) | nibble_ena); @@ -1183,9 +1226,13 @@ int rvu_npc_init(struct rvu *rvu) rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_TX), NIX_TX_ACTIONOP_UCAST_DEFAULT); - /* If MCAM lookup doesn't result in a match, drop the received packet */ + /* If MCAM lookup doesn't result in a match, drop the received packet. + * And map this action to a counter to count dropped pkts. + */ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_RX), NIX_RX_ACTIONOP_DROP); + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_STAT_ACT(NIX_INTF_RX), + BIT_ULL(9) | mcam->rx_miss_act_cntr); return 0; } @@ -1200,6 +1247,44 @@ void rvu_npc_freemem(struct rvu *rvu) mutex_destroy(&mcam->lock); } +void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, + int blkaddr, int *alloc_cnt, + int *enable_cnt) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int entry; + + *alloc_cnt = 0; + *enable_cnt = 0; + + for (entry = 0; entry < mcam->bmap_entries; entry++) { + if (mcam->entry2pfvf_map[entry] == pcifunc) { + (*alloc_cnt)++; + if (is_mcam_entry_enabled(rvu, mcam, blkaddr, entry)) + (*enable_cnt)++; + } + } +} + +void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc, + int blkaddr, int *alloc_cnt, + int *enable_cnt) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int cntr; + + *alloc_cnt = 0; + *enable_cnt = 0; + + for (cntr = 0; cntr < mcam->counters.max; cntr++) { + if (mcam->cntr2pfvf_map[cntr] == pcifunc) { + (*alloc_cnt)++; + if (mcam->cntr_refcnt[cntr]) + (*enable_cnt)++; + } + } +} + static int npc_mcam_verify_entry(struct npc_mcam *mcam, u16 pcifunc, int entry) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 09a8d61f3144..7ca599b973c0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Marvell OcteonTx2 RVU Admin Function driver +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RVU Admin Function driver * * Copyright (C) 2018 Marvell International Ltd. * @@ -246,6 +246,7 @@ #define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3) #define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16) +#define NIX_AF_SQM_DBG_CTL_STATUS (0x750) #define NIX_AF_PSE_CHANNEL_LEVEL (0x800) #define NIX_AF_PSE_SHAPER_CFG (0x810) #define NIX_AF_TX_EXPR_CREDIT (0x830) @@ -435,7 +436,6 @@ #define CPT_AF_LF_RST (0x44000) #define CPT_AF_BLK_RST (0x46000) -#define NDC_AF_BLK_RST (0x002F0) #define NPC_AF_BLK_RST (0x00040) /* NPC */ @@ -499,4 +499,30 @@ #define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4) #define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4) +/* NDC */ +#define NDC_AF_CONST (0x00000) +#define NDC_AF_CLK_EN (0x00020) +#define NDC_AF_CTL (0x00030) +#define NDC_AF_BANK_CTL (0x00040) +#define NDC_AF_BANK_CTL_DONE (0x00048) +#define NDC_AF_INTR (0x00058) +#define NDC_AF_INTR_W1S (0x00060) +#define NDC_AF_INTR_ENA_W1S (0x00068) +#define NDC_AF_INTR_ENA_W1C (0x00070) +#define NDC_AF_ACTIVE_PC (0x00078) +#define NDC_AF_BP_TEST_ENABLE (0x001F8) +#define NDC_AF_BP_TEST(a) (0x00200 | (a) << 3) +#define NDC_AF_BLK_RST (0x002F0) +#define NDC_PRIV_AF_INT_CFG (0x002F8) +#define NDC_AF_HASHX(a) (0x00300 | (a) << 3) +#define NDC_AF_PORTX_RTX_RWX_REQ_PC(a, b, c) \ + (0x00C00 | (a) << 5 | (b) << 4 | (c) << 3) +#define NDC_AF_PORTX_RTX_RWX_OSTDN_PC(a, b, c) \ + (0x00D00 | (a) << 5 | (b) << 4 | (c) << 3) +#define NDC_AF_PORTX_RTX_RWX_LAT_PC(a, b, c) \ + (0x00E00 | (a) << 5 | (b) << 4 | (c) << 3) +#define NDC_AF_PORTX_RTX_CANT_ALLOC_PC(a, b) \ + (0x00F00 | (a) << 5 | (b) << 4) +#define NDC_AF_BANKX_HIT_PC(a) (0x01000 | (a) << 3) +#define NDC_AF_BANKX_MISS_PC(a) (0x01100 | (a) << 3) #endif /* RVU_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index f920dac74e6c..9d8942acc232 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Marvell OcteonTx2 RVU Admin Function driver +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RVU Admin Function driver * * Copyright (C) 2018 Marvell International Ltd. * @@ -13,22 +13,22 @@ /* RVU Block Address Enumeration */ enum rvu_block_addr_e { - BLKADDR_RVUM = 0x0ULL, - BLKADDR_LMT = 0x1ULL, - BLKADDR_MSIX = 0x2ULL, - BLKADDR_NPA = 0x3ULL, - BLKADDR_NIX0 = 0x4ULL, - BLKADDR_NIX1 = 0x5ULL, - BLKADDR_NPC = 0x6ULL, - BLKADDR_SSO = 0x7ULL, - BLKADDR_SSOW = 0x8ULL, - BLKADDR_TIM = 0x9ULL, - BLKADDR_CPT0 = 0xaULL, - BLKADDR_CPT1 = 0xbULL, - BLKADDR_NDC0 = 0xcULL, - BLKADDR_NDC1 = 0xdULL, - BLKADDR_NDC2 = 0xeULL, - BLK_COUNT = 0xfULL, + BLKADDR_RVUM = 0x0ULL, + BLKADDR_LMT = 0x1ULL, + BLKADDR_MSIX = 0x2ULL, + BLKADDR_NPA = 0x3ULL, + BLKADDR_NIX0 = 0x4ULL, + BLKADDR_NIX1 = 0x5ULL, + BLKADDR_NPC = 0x6ULL, + BLKADDR_SSO = 0x7ULL, + BLKADDR_SSOW = 0x8ULL, + BLKADDR_TIM = 0x9ULL, + BLKADDR_CPT0 = 0xaULL, + BLKADDR_CPT1 = 0xbULL, + BLKADDR_NDC_NIX0_RX = 0xcULL, + BLKADDR_NDC_NIX0_TX = 0xdULL, + BLKADDR_NDC_NPA0 = 0xeULL, + BLK_COUNT = 0xfULL, }; /* RVU Block Type Enumeration */ @@ -474,9 +474,9 @@ struct nix_cq_ctx_s { u64 ena : 1; u64 drop_ena : 1; u64 drop : 8; - u64 dp : 8; + u64 bp : 8; #else - u64 dp : 8; + u64 bp : 8; u64 drop : 8; u64 drop_ena : 1; u64 ena : 1; diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 51b77c2de400..3fb7ee3d4d13 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1489,8 +1489,10 @@ static int pxa168_eth_probe(struct platform_device *pdev) goto err_netdev; } of_property_read_u32(np, "reg", &pep->phy_addr); - pep->phy_intf = of_get_phy_mode(pdev->dev.of_node); of_node_put(np); + err = of_get_phy_mode(pdev->dev.of_node, &pep->phy_intf); + if (err && err != -ENODEV) + goto err_netdev; } /* Hardware supports only 3 ports */ diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 703adb96429e..1923ba76a1ec 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2180,6 +2180,31 @@ static int mtk_start_dma(struct mtk_eth *eth) return 0; } +static void mtk_gdm_config(struct mtk_eth *eth, u32 config) +{ + int i; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) + return; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); + + /* default setup the forward port to send frame to PDMA */ + val &= ~0xffff; + + /* Enable RX checksum */ + val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN; + + val |= config; + + mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); + } + /* Reset and enable PSE */ + mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); + mtk_w32(eth, 0, MTK_RST_GL); +} + static int mtk_open(struct net_device *dev) { struct mtk_mac *mac = netdev_priv(dev); @@ -2200,6 +2225,8 @@ static int mtk_open(struct net_device *dev) if (err) return err; + mtk_gdm_config(eth, MTK_GDMA_TO_PDMA); + napi_enable(ð->tx_napi); napi_enable(ð->rx_napi); mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); @@ -2252,6 +2279,8 @@ static int mtk_stop(struct net_device *dev) if (!refcount_dec_and_test(ð->dma_refcnt)) return 0; + mtk_gdm_config(eth, MTK_GDMA_DROP_ALL); + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); napi_disable(ð->tx_napi); @@ -2375,8 +2404,6 @@ static int mtk_hw_init(struct mtk_eth *eth) mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); mtk_tx_irq_disable(eth, ~0); mtk_rx_irq_disable(eth, ~0); - mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); - mtk_w32(eth, 0, MTK_RST_GL); /* FE int grouping */ mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1); @@ -2385,19 +2412,6 @@ static int mtk_hw_init(struct mtk_eth *eth) mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2); mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); - for (i = 0; i < MTK_MAC_COUNT; i++) { - u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); - - /* setup the forward port to send frame to PDMA */ - val &= ~0xffff; - - /* Enable RX checksum */ - val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN; - - /* setup the mac dma */ - mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); - } - return 0; err_disable_pm: @@ -2758,9 +2772,10 @@ static const struct net_device_ops mtk_netdev_ops = { static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) { const __be32 *_id = of_get_property(np, "reg", NULL); + phy_interface_t phy_mode; struct phylink *phylink; - int phy_mode, id, err; struct mtk_mac *mac; + int id, err; if (!_id) { dev_err(eth->dev, "missing mac id\n"); @@ -2805,10 +2820,9 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; /* phylink create */ - phy_mode = of_get_phy_mode(np); - if (phy_mode < 0) { + err = of_get_phy_mode(np, &phy_mode); + if (err) { dev_err(eth->dev, "incorrect phy-mode\n"); - err = -EINVAL; goto free_netdev; } diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 76bd12cb8150..85830fe14a1b 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -84,6 +84,8 @@ #define MTK_GDMA_ICS_EN BIT(22) #define MTK_GDMA_TCS_EN BIT(21) #define MTK_GDMA_UCS_EN BIT(20) +#define MTK_GDMA_TO_PDMA 0x0 +#define MTK_GDMA_DROP_ALL 0x7777 /* Unicast Filter MAC Address Register - Low */ #define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000)) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 22c72fb7206a..5716c3d2bb86 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -514,8 +514,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; /* * Subtract 1 from the limit because we need to allocate a - * spare CQE so the HCA HW can tell the difference between an - * empty CQ and a full CQ. + * spare CQE to enable resizing the CQ. */ dev->caps.max_cqes = dev_cap->max_cq_sz - 1; dev->caps.reserved_cqs = dev_cap->reserved_cqs; @@ -4015,6 +4014,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) goto err_params_unregister; devlink_params_publish(devlink); + devlink_reload_enable(devlink); pci_save_state(pdev); return 0; @@ -4126,6 +4126,8 @@ static void mlx4_remove_one(struct pci_dev *pdev) struct devlink *devlink = priv_to_devlink(priv); int active_vfs = 0; + devlink_reload_disable(devlink); + if (mlx4_is_slave(dev)) persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index 381925c90d94..ac108f1e5bd6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -85,6 +85,22 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, return 0; } +static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, + struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + + return mlx5_unload_one(dev, false); +} + +static int mlx5_devlink_reload_up(struct devlink *devlink, + struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + + return mlx5_load_one(dev, false); +} + static const struct devlink_ops mlx5_devlink_ops = { #ifdef CONFIG_MLX5_ESWITCH .eswitch_mode_set = mlx5_devlink_eswitch_mode_set, @@ -96,6 +112,8 @@ static const struct devlink_ops mlx5_devlink_ops = { #endif .flash_update = mlx5_devlink_flash_update, .info_get = mlx5_devlink_info_get, + .reload_down = mlx5_devlink_reload_down, + .reload_up = mlx5_devlink_reload_up, }; struct devlink *mlx5_devlink_alloc(void) @@ -177,12 +195,29 @@ enum mlx5_devlink_param_id { MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE, }; +static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + bool new_state = val.vbool; + + if (new_state && !MLX5_CAP_GEN(dev, roce)) { + NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE"); + return -EOPNOTSUPP; + } + + return 0; +} + static const struct devlink_param mlx5_devlink_params[] = { DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE, "flow_steering_mode", DEVLINK_PARAM_TYPE_STRING, BIT(DEVLINK_PARAM_CMODE_RUNTIME), mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set, mlx5_devlink_fs_mode_validate), + DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, mlx5_devlink_enable_roce_validate), }; static void mlx5_devlink_set_params_init_values(struct devlink *devlink) @@ -197,6 +232,11 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink) devlink_param_driverinit_value_set(devlink, MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE, value); + + value.vbool = MLX5_CAP_GEN(dev, roce); + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, + value); } int mlx5_devlink_register(struct devlink *devlink, struct device *dev) @@ -213,6 +253,7 @@ int mlx5_devlink_register(struct devlink *devlink, struct device *dev) goto params_reg_err; mlx5_devlink_set_params_init_values(devlink); devlink_params_publish(devlink); + devlink_reload_enable(devlink); return 0; params_reg_err: @@ -222,6 +263,7 @@ params_reg_err: void mlx5_devlink_unregister(struct devlink *devlink) { + devlink_reload_disable(devlink); devlink_params_unregister(devlink, mlx5_devlink_params, ARRAY_SIZE(mlx5_devlink_params)); devlink_unregister(devlink); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 13af72556987..5316cedd78bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -77,8 +77,8 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, struct neighbour **out_n, u8 *out_ttl) { + struct neighbour *n; struct rtable *rt; - struct neighbour *n = NULL; #if IS_ENABLED(CONFIG_INET) struct mlx5_core_dev *mdev = priv->mdev; @@ -138,8 +138,8 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, struct neighbour **out_n, u8 *out_ttl) { - struct neighbour *n = NULL; struct dst_entry *dst; + struct neighbour *n; #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) int ret; @@ -212,8 +212,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); const struct ip_tunnel_key *tun_key = &e->tun_info->key; struct net_device *out_dev, *route_dev; - struct neighbour *n = NULL; struct flowi4 fl4 = {}; + struct neighbour *n; int ipv4_encap_size; char *encap_header; u8 nud_state, ttl; @@ -239,12 +239,15 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, if (max_encap_size < ipv4_encap_size) { mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", ipv4_encap_size, max_encap_size); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto out; } encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL); - if (!encap_header) - return -ENOMEM; + if (!encap_header) { + err = -ENOMEM; + goto out; + } /* used by mlx5e_detach_encap to lookup a neigh hash table * entry in the neigh hash table when a user deletes a rule @@ -328,9 +331,9 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); const struct ip_tunnel_key *tun_key = &e->tun_info->key; struct net_device *out_dev, *route_dev; - struct neighbour *n = NULL; struct flowi6 fl6 = {}; struct ipv6hdr *ip6h; + struct neighbour *n; int ipv6_encap_size; char *encap_header; u8 nud_state, ttl; @@ -355,12 +358,15 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, if (max_encap_size < ipv6_encap_size) { mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", ipv6_encap_size, max_encap_size); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto out; } encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL); - if (!encap_header) - return -ENOMEM; + if (!encap_header) { + err = -ENOMEM; + goto out; + } /* used by mlx5e_detach_encap to lookup a neigh hash table * entry in the neigh hash table when a user deletes a rule diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 1d4a66fb466a..e8d799c0dfda 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -63,6 +63,7 @@ #include "en/xsk/rx.h" #include "en/xsk/tx.h" #include "en/hv_vhca_stats.h" +#include "lib/mlx5.h" bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) @@ -5419,6 +5420,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) return NULL; } + dev_net_set(netdev, mlx5_core_net(mdev)); priv = netdev_priv(netdev); err = mlx5e_attach(mdev, priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index cd9bb7c7b341..f175cb24bb67 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -47,6 +47,7 @@ #include "en/tc_tun.h" #include "fs_core.h" #include "lib/port_tun.h" +#include "lib/mlx5.h" #define CREATE_TRACE_POINTS #include "diag/en_rep_tracepoint.h" @@ -1243,21 +1244,60 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, } } -static LIST_HEAD(mlx5e_rep_block_cb_list); +static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct flow_cls_offload *f = type_data; + struct flow_cls_offload cls_flower; + struct mlx5e_priv *priv = cb_priv; + struct mlx5_eswitch *esw; + unsigned long flags; + int err; + + flags = MLX5_TC_FLAG(INGRESS) | + MLX5_TC_FLAG(ESW_OFFLOAD) | + MLX5_TC_FLAG(FT_OFFLOAD); + esw = priv->mdev->priv.eswitch; + switch (type) { + case TC_SETUP_CLSFLOWER: + if (!mlx5_eswitch_prios_supported(esw) || f->common.chain_index) + return -EOPNOTSUPP; + + /* Re-use tc offload path by moving the ft flow to the + * reserved ft chain. + */ + memcpy(&cls_flower, f, sizeof(*f)); + cls_flower.common.chain_index = FDB_FT_CHAIN; + err = mlx5e_rep_setup_tc_cls_flower(priv, &cls_flower, flags); + memcpy(&f->stats, &cls_flower.stats, sizeof(f->stats)); + return err; + default: + return -EOPNOTSUPP; + } +} + +static LIST_HEAD(mlx5e_rep_block_tc_cb_list); +static LIST_HEAD(mlx5e_rep_block_ft_cb_list); static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { struct mlx5e_priv *priv = netdev_priv(dev); struct flow_block_offload *f = type_data; + f->unlocked_driver_cb = true; + switch (type) { case TC_SETUP_BLOCK: - f->unlocked_driver_cb = true; return flow_block_cb_setup_simple(type_data, - &mlx5e_rep_block_cb_list, + &mlx5e_rep_block_tc_cb_list, mlx5e_rep_setup_tc_cb, priv, priv, true); + case TC_SETUP_FT: + return flow_block_cb_setup_simple(type_data, + &mlx5e_rep_block_ft_cb_list, + mlx5e_rep_setup_ft_cb, + priv, priv, true); default: return -EOPNOTSUPP; } @@ -1877,6 +1917,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) return -EINVAL; } + dev_net_set(netdev, mlx5_core_net(dev)); rpriv->netdev = netdev; rep->rep_data[REP_ETH].priv = rpriv; INIT_LIST_HEAD(&rpriv->vport_sqs_list); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 82cffb3a9964..9e9960146e5b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1386,6 +1386,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) return 0; + if (rq->page_pool) + page_pool_nid_changed(rq->page_pool, numa_mem_id()); + if (rq->cqd.left) { work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget); if (rq->cqd.left || work_done >= budget) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index bb970b2ebf8a..3a707d788022 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -74,6 +74,7 @@ enum { MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT, MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT, MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT, + MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT, MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT, MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE, MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1, @@ -276,6 +277,11 @@ static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow) return flow_flag_test(flow, ESWITCH); } +static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow) +{ + return flow_flag_test(flow, FT); +} + static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow) { return flow_flag_test(flow, OFFLOADED); @@ -1074,7 +1080,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; slow_attr->split_count = 0; - slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN; + slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN; rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); if (!IS_ERR(rule)) @@ -1091,7 +1097,7 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; slow_attr->split_count = 0; - slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN; + slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN; mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); flow_flag_clear(flow, SLOW); } @@ -1168,7 +1174,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, return -EOPNOTSUPP; } - if (attr->chain > max_chain) { + /* We check chain range only for tc flows. + * For ft flows, we checked attr->chain was originally 0 and set it to + * FDB_FT_CHAIN which is outside tc range. + * See mlx5e_rep_setup_ft_cb(). + */ + if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) { NL_SET_ERR_MSG(extack, "Requested chain is out of supported range"); return -EOPNOTSUPP; } @@ -3217,6 +3228,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; struct mlx5e_rep_priv *rpriv = priv->ppriv; const struct ip_tunnel_info *info = NULL; + bool ft_flow = mlx5e_is_ft_flow(flow); const struct flow_action_entry *act; bool encap = false; u32 action = 0; @@ -3261,6 +3273,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, return -EINVAL; } + if (ft_flow && out_dev == priv->netdev) { + /* Ignore forward to self rules generated + * by adding both mlx5 devs to the flow table + * block on a normal nft offload setup. + */ + return -EOPNOTSUPP; + } + if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) { NL_SET_ERR_MSG_MOD(extack, "can't support more output ports, can't offload forwarding"); @@ -3385,6 +3405,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, u32 dest_chain = act->chain_index; u32 max_chain = mlx5_eswitch_get_chain_range(esw); + if (ft_flow) { + NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported"); + return -EOPNOTSUPP; + } if (dest_chain <= attr->chain) { NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported"); return -EOPNOTSUPP; @@ -3475,6 +3499,8 @@ static void get_flags(int flags, unsigned long *flow_flags) __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH); if (flags & MLX5_TC_FLAG(NIC_OFFLOAD)) __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC); + if (flags & MLX5_TC_FLAG(FT_OFFLOAD)) + __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT); *flow_flags = __flow_flags; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 924c6ef86a14..262cdb7b69b1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -44,7 +44,8 @@ enum { MLX5E_TC_FLAG_EGRESS_BIT, MLX5E_TC_FLAG_NIC_OFFLOAD_BIT, MLX5E_TC_FLAG_ESW_OFFLOAD_BIT, - MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT, + MLX5E_TC_FLAG_FT_OFFLOAD_BIT, + MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT, }; #define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 89a2806eceb8..f2e400a23a59 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -111,42 +111,32 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, } /* E-Switch vport context HW commands */ -static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport, - void *in, int inlen) +int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, + bool other_vport, + void *in, int inlen) { u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0}; MLX5_SET(modify_esw_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); - MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); + MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport); return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } -int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport, - void *in, int inlen) -{ - return modify_esw_vport_context_cmd(esw->dev, vport, in, inlen); -} - -static int query_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport, - void *out, int outlen) +int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, + bool other_vport, + void *out, int outlen) { u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {}; MLX5_SET(query_esw_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT); MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); - MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); + MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport); return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } -int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport, - void *out, int outlen) -{ - return query_esw_vport_context_cmd(esw->dev, vport, out, outlen); -} - static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport, u16 vlan, u8 qos, u8 set_flags) { @@ -179,7 +169,8 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport, MLX5_SET(modify_esw_vport_context_in, in, field_select.vport_cvlan_insert, 1); - return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in)); + return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, + in, sizeof(in)); } /* E-Switch FDB */ @@ -452,6 +443,13 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw) return err; } +static void esw_destroy_legacy_table(struct mlx5_eswitch *esw) +{ + esw_cleanup_vepa_rules(esw); + esw_destroy_legacy_fdb_table(esw); + esw_destroy_legacy_vepa_table(esw); +} + #define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \ MLX5_VPORT_MC_ADDR_CHANGE | \ MLX5_VPORT_PROMISC_CHANGE) @@ -464,15 +462,10 @@ static int esw_legacy_enable(struct mlx5_eswitch *esw) if (ret) return ret; - mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS); - return 0; -} - -static void esw_destroy_legacy_table(struct mlx5_eswitch *esw) -{ - esw_cleanup_vepa_rules(esw); - esw_destroy_legacy_fdb_table(esw); - esw_destroy_legacy_vepa_table(esw); + ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS); + if (ret) + esw_destroy_legacy_table(esw); + return ret; } static void esw_legacy_disable(struct mlx5_eswitch *esw) @@ -501,7 +494,7 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) /* Skip mlx5_mpfs_add_mac for eswitch_managers, * it is already done by its netdev in mlx5e_execute_l2_action */ - if (esw->manager_vport == vport) + if (mlx5_esw_is_manager_vport(esw, vport)) goto fdb_add; err = mlx5_mpfs_add_mac(esw->dev, mac); @@ -530,10 +523,10 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) u16 vport = vaddr->vport; int err = 0; - /* Skip mlx5_mpfs_del_mac for eswitch managerss, + /* Skip mlx5_mpfs_del_mac for eswitch managers, * it is already done by its netdev in mlx5e_execute_l2_action */ - if (!vaddr->mpfs || esw->manager_vport == vport) + if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport)) goto fdb_del; err = mlx5_mpfs_del_mac(esw->dev, mac); @@ -1040,14 +1033,15 @@ out: void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { - if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) + if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) { mlx5_del_flow_rules(vport->egress.allowed_vlan); + vport->egress.allowed_vlan = NULL; + } - if (!IS_ERR_OR_NULL(vport->egress.drop_rule)) - mlx5_del_flow_rules(vport->egress.drop_rule); - - vport->egress.allowed_vlan = NULL; - vport->egress.drop_rule = NULL; + if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_rule)) { + mlx5_del_flow_rules(vport->egress.legacy.drop_rule); + vport->egress.legacy.drop_rule = NULL; + } } void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, @@ -1067,57 +1061,21 @@ void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, vport->egress.acl = NULL; } -int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) +static int +esw_vport_create_legacy_ingress_acl_groups(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_core_dev *dev = esw->dev; - struct mlx5_flow_namespace *root_ns; - struct mlx5_flow_table *acl; struct mlx5_flow_group *g; void *match_criteria; u32 *flow_group_in; - /* The ingress acl table contains 4 groups - * (2 active rules at the same time - - * 1 allow rule from one of the first 3 groups. - * 1 drop rule from the last group): - * 1)Allow untagged traffic with smac=original mac. - * 2)Allow untagged traffic. - * 3)Allow traffic with smac=original mac. - * 4)Drop all other traffic. - */ - int table_size = 4; - int err = 0; - - if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) - return -EOPNOTSUPP; - - if (!IS_ERR_OR_NULL(vport->ingress.acl)) - return 0; - - esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n", - vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size)); - - root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, - mlx5_eswitch_vport_num_to_index(esw, vport->vport)); - if (!root_ns) { - esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport); - return -EOPNOTSUPP; - } + int err; flow_group_in = kvzalloc(inlen, GFP_KERNEL); if (!flow_group_in) return -ENOMEM; - acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); - if (IS_ERR(acl)) { - err = PTR_ERR(acl); - esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n", - vport->vport, err); - goto out; - } - vport->ingress.acl = acl; - match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); @@ -1127,14 +1085,14 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); - g = mlx5_create_flow_group(acl, flow_group_in); + g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); if (IS_ERR(g)) { err = PTR_ERR(g); - esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n", + esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n", vport->vport, err); - goto out; + goto spoof_err; } - vport->ingress.allow_untagged_spoofchk_grp = g; + vport->ingress.legacy.allow_untagged_spoofchk_grp = g; memset(flow_group_in, 0, inlen); MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); @@ -1142,14 +1100,14 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); - g = mlx5_create_flow_group(acl, flow_group_in); + g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); if (IS_ERR(g)) { err = PTR_ERR(g); - esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n", + esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n", vport->vport, err); - goto out; + goto untagged_err; } - vport->ingress.allow_untagged_only_grp = g; + vport->ingress.legacy.allow_untagged_only_grp = g; memset(flow_group_in, 0, inlen); MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); @@ -1158,108 +1116,178 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2); - g = mlx5_create_flow_group(acl, flow_group_in); + g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); if (IS_ERR(g)) { err = PTR_ERR(g); - esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n", + esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n", vport->vport, err); - goto out; + goto allow_spoof_err; } - vport->ingress.allow_spoofchk_only_grp = g; + vport->ingress.legacy.allow_spoofchk_only_grp = g; memset(flow_group_in, 0, inlen); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3); - g = mlx5_create_flow_group(acl, flow_group_in); + g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); if (IS_ERR(g)) { err = PTR_ERR(g); - esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n", + esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n", vport->vport, err); - goto out; + goto drop_err; } - vport->ingress.drop_grp = g; + vport->ingress.legacy.drop_grp = g; + kvfree(flow_group_in); + return 0; -out: - if (err) { - if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp)) - mlx5_destroy_flow_group( - vport->ingress.allow_spoofchk_only_grp); - if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp)) - mlx5_destroy_flow_group( - vport->ingress.allow_untagged_only_grp); - if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp)) - mlx5_destroy_flow_group( - vport->ingress.allow_untagged_spoofchk_grp); - if (!IS_ERR_OR_NULL(vport->ingress.acl)) - mlx5_destroy_flow_table(vport->ingress.acl); +drop_err: + if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) { + mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp); + vport->ingress.legacy.allow_spoofchk_only_grp = NULL; } - +allow_spoof_err: + if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) { + mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp); + vport->ingress.legacy.allow_untagged_only_grp = NULL; + } +untagged_err: + if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) { + mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp); + vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL; + } +spoof_err: kvfree(flow_group_in); return err; } +int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, int table_size) +{ + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_namespace *root_ns; + struct mlx5_flow_table *acl; + int vport_index; + int err; + + if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) + return -EOPNOTSUPP; + + esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n", + vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size)); + + vport_index = mlx5_eswitch_vport_num_to_index(esw, vport->vport); + root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, + vport_index); + if (!root_ns) { + esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", + vport->vport); + return -EOPNOTSUPP; + } + + acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); + if (IS_ERR(acl)) { + err = PTR_ERR(acl); + esw_warn(dev, "vport[%d] ingress create flow Table, err(%d)\n", + vport->vport, err); + return err; + } + vport->ingress.acl = acl; + return 0; +} + +void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport) +{ + if (!vport->ingress.acl) + return; + + mlx5_destroy_flow_table(vport->ingress.acl); + vport->ingress.acl = NULL; +} + void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { - if (!IS_ERR_OR_NULL(vport->ingress.drop_rule)) - mlx5_del_flow_rules(vport->ingress.drop_rule); + if (vport->ingress.legacy.drop_rule) { + mlx5_del_flow_rules(vport->ingress.legacy.drop_rule); + vport->ingress.legacy.drop_rule = NULL; + } - if (!IS_ERR_OR_NULL(vport->ingress.allow_rule)) + if (vport->ingress.allow_rule) { mlx5_del_flow_rules(vport->ingress.allow_rule); - - vport->ingress.drop_rule = NULL; - vport->ingress.allow_rule = NULL; - - esw_vport_del_ingress_acl_modify_metadata(esw, vport); + vport->ingress.allow_rule = NULL; + } } -void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) +static void esw_vport_disable_legacy_ingress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { - if (IS_ERR_OR_NULL(vport->ingress.acl)) + if (!vport->ingress.acl) return; esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport); esw_vport_cleanup_ingress_rules(esw, vport); - mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp); - mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp); - mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp); - mlx5_destroy_flow_group(vport->ingress.drop_grp); - mlx5_destroy_flow_table(vport->ingress.acl); - vport->ingress.acl = NULL; - vport->ingress.drop_grp = NULL; - vport->ingress.allow_spoofchk_only_grp = NULL; - vport->ingress.allow_untagged_only_grp = NULL; - vport->ingress.allow_untagged_spoofchk_grp = NULL; + if (vport->ingress.legacy.allow_spoofchk_only_grp) { + mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp); + vport->ingress.legacy.allow_spoofchk_only_grp = NULL; + } + if (vport->ingress.legacy.allow_untagged_only_grp) { + mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp); + vport->ingress.legacy.allow_untagged_only_grp = NULL; + } + if (vport->ingress.legacy.allow_untagged_spoofchk_grp) { + mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp); + vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL; + } + if (vport->ingress.legacy.drop_grp) { + mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp); + vport->ingress.legacy.drop_grp = NULL; + } + esw_vport_destroy_ingress_acl_table(vport); } static int esw_vport_ingress_config(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { - struct mlx5_fc *counter = vport->ingress.drop_counter; + struct mlx5_fc *counter = vport->ingress.legacy.drop_counter; struct mlx5_flow_destination drop_ctr_dst = {0}; struct mlx5_flow_destination *dst = NULL; struct mlx5_flow_act flow_act = {0}; - struct mlx5_flow_spec *spec; + struct mlx5_flow_spec *spec = NULL; int dest_num = 0; int err = 0; u8 *smac_v; + /* The ingress acl table contains 4 groups + * (2 active rules at the same time - + * 1 allow rule from one of the first 3 groups. + * 1 drop rule from the last group): + * 1)Allow untagged traffic with smac=original mac. + * 2)Allow untagged traffic. + * 3)Allow traffic with smac=original mac. + * 4)Drop all other traffic. + */ + int table_size = 4; + esw_vport_cleanup_ingress_rules(esw, vport); if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) { - esw_vport_disable_ingress_acl(esw, vport); + esw_vport_disable_legacy_ingress_acl(esw, vport); return 0; } - err = esw_vport_enable_ingress_acl(esw, vport); - if (err) { - mlx5_core_warn(esw->dev, - "failed to enable ingress acl (%d) on vport[%d]\n", - err, vport->vport); - return err; + if (!vport->ingress.acl) { + err = esw_vport_create_ingress_acl_table(esw, vport, table_size); + if (err) { + esw_warn(esw->dev, + "vport[%d] enable ingress acl err (%d)\n", + err, vport->vport); + return err; + } + + err = esw_vport_create_legacy_ingress_acl_groups(esw, vport); + if (err) + goto out; } esw_debug(esw->dev, @@ -1309,21 +1337,59 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, dst = &drop_ctr_dst; dest_num++; } - vport->ingress.drop_rule = + vport->ingress.legacy.drop_rule = mlx5_add_flow_rules(vport->ingress.acl, spec, &flow_act, dst, dest_num); - if (IS_ERR(vport->ingress.drop_rule)) { - err = PTR_ERR(vport->ingress.drop_rule); + if (IS_ERR(vport->ingress.legacy.drop_rule)) { + err = PTR_ERR(vport->ingress.legacy.drop_rule); esw_warn(esw->dev, "vport[%d] configure ingress drop rule, err(%d)\n", vport->vport, err); - vport->ingress.drop_rule = NULL; + vport->ingress.legacy.drop_rule = NULL; goto out; } + kvfree(spec); + return 0; out: - if (err) - esw_vport_cleanup_ingress_rules(esw, vport); + esw_vport_disable_legacy_ingress_acl(esw, vport); + kvfree(spec); + return err; +} + +int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, + u16 vlan_id, u32 flow_action) +{ + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_spec *spec; + int err = 0; + + if (vport->egress.allowed_vlan) + return -EEXIST; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vlan_id); + + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + flow_act.action = flow_action; + vport->egress.allowed_vlan = + mlx5_add_flow_rules(vport->egress.acl, spec, + &flow_act, NULL, 0); + if (IS_ERR(vport->egress.allowed_vlan)) { + err = PTR_ERR(vport->egress.allowed_vlan); + esw_warn(esw->dev, + "vport[%d] configure egress vlan rule failed, err(%d)\n", + vport->vport, err); + vport->egress.allowed_vlan = NULL; + } + kvfree(spec); return err; } @@ -1331,7 +1397,7 @@ out: static int esw_vport_egress_config(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { - struct mlx5_fc *counter = vport->egress.drop_counter; + struct mlx5_fc *counter = vport->egress.legacy.drop_counter; struct mlx5_flow_destination drop_ctr_dst = {0}; struct mlx5_flow_destination *dst = NULL; struct mlx5_flow_act flow_act = {0}; @@ -1358,34 +1424,17 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, "vport[%d] configure egress rules, vlan(%d) qos(%d)\n", vport->vport, vport->info.vlan, vport->info.qos); - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) { - err = -ENOMEM; - goto out; - } - /* Allowed vlan rule */ - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); - MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag); - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); - MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan); + err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, vport->info.vlan, + MLX5_FLOW_CONTEXT_ACTION_ALLOW); + if (err) + return err; - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; - vport->egress.allowed_vlan = - mlx5_add_flow_rules(vport->egress.acl, spec, - &flow_act, NULL, 0); - if (IS_ERR(vport->egress.allowed_vlan)) { - err = PTR_ERR(vport->egress.allowed_vlan); - esw_warn(esw->dev, - "vport[%d] configure egress allowed vlan rule failed, err(%d)\n", - vport->vport, err); - vport->egress.allowed_vlan = NULL; + /* Drop others rule (star rule) */ + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) goto out; - } - /* Drop others rule (star rule) */ - memset(spec, 0, sizeof(*spec)); flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; /* Attach egress drop flow counter */ @@ -1396,15 +1445,15 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, dst = &drop_ctr_dst; dest_num++; } - vport->egress.drop_rule = + vport->egress.legacy.drop_rule = mlx5_add_flow_rules(vport->egress.acl, spec, &flow_act, dst, dest_num); - if (IS_ERR(vport->egress.drop_rule)) { - err = PTR_ERR(vport->egress.drop_rule); + if (IS_ERR(vport->egress.legacy.drop_rule)) { + err = PTR_ERR(vport->egress.legacy.drop_rule); esw_warn(esw->dev, "vport[%d] configure egress drop rule failed, err(%d)\n", vport->vport, err); - vport->egress.drop_rule = NULL; + vport->egress.legacy.drop_rule = NULL; } out: kvfree(spec); @@ -1619,7 +1668,7 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw, u16 vport_num = vport->vport; int flags; - if (esw->manager_vport == vport_num) + if (mlx5_esw_is_manager_vport(esw, vport_num)) return; mlx5_modify_vport_admin_state(esw->dev, @@ -1639,66 +1688,112 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw, SET_VLAN_STRIP | SET_VLAN_INSERT : 0; modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos, flags); - - /* Only legacy mode needs ACLs */ - if (esw->mode == MLX5_ESWITCH_LEGACY) { - esw_vport_ingress_config(esw, vport); - esw_vport_egress_config(esw, vport); - } } -static void esw_vport_create_drop_counters(struct mlx5_vport *vport) +static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { - struct mlx5_core_dev *dev = vport->dev; + int ret; - if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) { - vport->ingress.drop_counter = mlx5_fc_create(dev, false); - if (IS_ERR(vport->ingress.drop_counter)) { - esw_warn(dev, + /* Only non manager vports need ACL in legacy mode */ + if (mlx5_esw_is_manager_vport(esw, vport->vport)) + return 0; + + if (!mlx5_esw_is_manager_vport(esw, vport->vport) && + MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) { + vport->ingress.legacy.drop_counter = mlx5_fc_create(esw->dev, false); + if (IS_ERR(vport->ingress.legacy.drop_counter)) { + esw_warn(esw->dev, "vport[%d] configure ingress drop rule counter failed\n", vport->vport); - vport->ingress.drop_counter = NULL; + vport->ingress.legacy.drop_counter = NULL; } } - if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) { - vport->egress.drop_counter = mlx5_fc_create(dev, false); - if (IS_ERR(vport->egress.drop_counter)) { - esw_warn(dev, + ret = esw_vport_ingress_config(esw, vport); + if (ret) + goto ingress_err; + + if (!mlx5_esw_is_manager_vport(esw, vport->vport) && + MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) { + vport->egress.legacy.drop_counter = mlx5_fc_create(esw->dev, false); + if (IS_ERR(vport->egress.legacy.drop_counter)) { + esw_warn(esw->dev, "vport[%d] configure egress drop rule counter failed\n", vport->vport); - vport->egress.drop_counter = NULL; + vport->egress.legacy.drop_counter = NULL; } } + + ret = esw_vport_egress_config(esw, vport); + if (ret) + goto egress_err; + + return 0; + +egress_err: + esw_vport_disable_legacy_ingress_acl(esw, vport); + mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter); + vport->egress.legacy.drop_counter = NULL; + +ingress_err: + mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter); + vport->ingress.legacy.drop_counter = NULL; + return ret; } -static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport) +static int esw_vport_setup_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { - struct mlx5_core_dev *dev = vport->dev; + if (esw->mode == MLX5_ESWITCH_LEGACY) + return esw_vport_create_legacy_acl_tables(esw, vport); + else + return esw_vport_create_offloads_acl_tables(esw, vport); +} + +static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) - if (vport->ingress.drop_counter) - mlx5_fc_destroy(dev, vport->ingress.drop_counter); - if (vport->egress.drop_counter) - mlx5_fc_destroy(dev, vport->egress.drop_counter); +{ + if (mlx5_esw_is_manager_vport(esw, vport->vport)) + return; + + esw_vport_disable_egress_acl(esw, vport); + mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter); + vport->egress.legacy.drop_counter = NULL; + + esw_vport_disable_legacy_ingress_acl(esw, vport); + mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter); + vport->ingress.legacy.drop_counter = NULL; +} + +static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + if (esw->mode == MLX5_ESWITCH_LEGACY) + esw_vport_destroy_legacy_acl_tables(esw, vport); + else + esw_vport_destroy_offloads_acl_tables(esw, vport); } -static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, - enum mlx5_eswitch_vport_event enabled_events) +static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + enum mlx5_eswitch_vport_event enabled_events) { u16 vport_num = vport->vport; + int ret; mutex_lock(&esw->state_lock); WARN_ON(vport->enabled); esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); - /* Create steering drop counters for ingress and egress ACLs */ - if (vport_num && esw->mode == MLX5_ESWITCH_LEGACY) - esw_vport_create_drop_counters(vport); - /* Restore old vport configuration */ esw_apply_vport_conf(esw, vport); + ret = esw_vport_setup_acl(esw, vport); + if (ret) + goto done; + /* Attach vport to the eswitch rate limiter */ if (esw_vport_enable_qos(esw, vport, vport->info.max_rate, vport->qos.bw_share)) @@ -1711,7 +1806,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well * in smartNIC as it's a vport group manager. */ - if (esw->manager_vport == vport_num || + if (mlx5_esw_is_manager_vport(esw, vport_num) || (!vport_num && mlx5_core_is_ecpf(esw->dev))) vport->info.trusted = true; @@ -1719,7 +1814,9 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, esw->enabled_vports++; esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); +done: mutex_unlock(&esw->state_lock); + return ret; } static void esw_disable_vport(struct mlx5_eswitch *esw, @@ -1727,18 +1824,16 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, { u16 vport_num = vport->vport; + mutex_lock(&esw->state_lock); if (!vport->enabled) - return; + goto done; esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num); /* Mark this vport as disabled to discard new events */ vport->enabled = false; - /* Wait for current already scheduled events to complete */ - flush_workqueue(esw->work_queue); /* Disable events from this vport */ arm_vport_context_events_cmd(esw->dev, vport->vport, 0); - mutex_lock(&esw->state_lock); /* We don't assume VFs will cleanup after themselves. * Calling vport change handler while vport is disabled will cleanup * the vport resources. @@ -1746,17 +1841,18 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, esw_vport_change_handle_locked(vport); vport->enabled_events = 0; esw_vport_disable_qos(esw, vport); - if (esw->manager_vport != vport_num && - esw->mode == MLX5_ESWITCH_LEGACY) { + + if (!mlx5_esw_is_manager_vport(esw, vport->vport) && + esw->mode == MLX5_ESWITCH_LEGACY) mlx5_modify_vport_admin_state(esw->dev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, vport_num, 1, MLX5_VPORT_ADMIN_STATE_DOWN); - esw_vport_disable_egress_acl(esw, vport); - esw_vport_disable_ingress_acl(esw, vport); - esw_vport_destroy_drop_counters(vport); - } + + esw_vport_cleanup_acl(esw, vport); esw->enabled_vports--; + +done: mutex_unlock(&esw->state_lock); } @@ -1770,12 +1866,8 @@ static int eswitch_vport_event(struct notifier_block *nb, vport_num = be16_to_cpu(eqe->data.vport_change.vport_num); vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) - return NOTIFY_OK; - - if (vport->enabled) + if (!IS_ERR(vport)) queue_work(esw->work_queue, &vport->vport_change_handler); - return NOTIFY_OK; } @@ -1846,26 +1938,51 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw) /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs * whichever are present on the eswitch. */ -void +int mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, enum mlx5_eswitch_vport_event enabled_events) { struct mlx5_vport *vport; + int num_vfs; + int ret; int i; /* Enable PF vport */ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); - esw_enable_vport(esw, vport, enabled_events); + ret = esw_enable_vport(esw, vport, enabled_events); + if (ret) + return ret; - /* Enable ECPF vports */ + /* Enable ECPF vport */ if (mlx5_ecpf_vport_exists(esw->dev)) { vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); - esw_enable_vport(esw, vport, enabled_events); + ret = esw_enable_vport(esw, vport, enabled_events); + if (ret) + goto ecpf_err; } /* Enable VF vports */ - mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) - esw_enable_vport(esw, vport, enabled_events); + mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) { + ret = esw_enable_vport(esw, vport, enabled_events); + if (ret) + goto vf_err; + } + return 0; + +vf_err: + num_vfs = i - 1; + mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, num_vfs) + esw_disable_vport(esw, vport); + + if (mlx5_ecpf_vport_exists(esw->dev)) { + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); + esw_disable_vport(esw, vport); + } + +ecpf_err: + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); + esw_disable_vport(esw, vport); + return ret; } /* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs @@ -2485,12 +2602,12 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev, if (!vport->enabled || esw->mode != MLX5_ESWITCH_LEGACY) return 0; - if (vport->egress.drop_counter) - mlx5_fc_query(dev, vport->egress.drop_counter, + if (vport->egress.legacy.drop_counter) + mlx5_fc_query(dev, vport->egress.legacy.drop_counter, &stats->rx_dropped, &bytes); - if (vport->ingress.drop_counter) - mlx5_fc_query(dev, vport->ingress.drop_counter, + if (vport->ingress.legacy.drop_counter) + mlx5_fc_query(dev, vport->ingress.legacy.drop_counter, &stats->tx_dropped, &bytes); if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) && diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 804a7ed2b969..962888a7c3c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -43,6 +43,16 @@ #include <linux/mlx5/fs.h> #include "lib/mpfs.h" +#define FDB_TC_MAX_CHAIN 3 +#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1) +#define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1) + +/* The index of the last real chain (FT) + 1 as chain zero is valid as well */ +#define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1) + +#define FDB_TC_MAX_PRIO 16 +#define FDB_TC_LEVELS_PER_PRIO 2 + #ifdef CONFIG_MLX5_ESWITCH #define MLX5_MAX_UC_PER_VPORT(dev) \ @@ -59,21 +69,22 @@ #define mlx5_esw_has_fwd_fdb(dev) \ MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table) -#define FDB_MAX_CHAIN 3 -#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1) -#define FDB_MAX_PRIO 16 - struct vport_ingress { struct mlx5_flow_table *acl; - struct mlx5_flow_group *allow_untagged_spoofchk_grp; - struct mlx5_flow_group *allow_spoofchk_only_grp; - struct mlx5_flow_group *allow_untagged_only_grp; - struct mlx5_flow_group *drop_grp; - struct mlx5_modify_hdr *modify_metadata; - struct mlx5_flow_handle *modify_metadata_rule; - struct mlx5_flow_handle *allow_rule; - struct mlx5_flow_handle *drop_rule; - struct mlx5_fc *drop_counter; + struct mlx5_flow_handle *allow_rule; + struct { + struct mlx5_flow_group *allow_spoofchk_only_grp; + struct mlx5_flow_group *allow_untagged_spoofchk_grp; + struct mlx5_flow_group *allow_untagged_only_grp; + struct mlx5_flow_group *drop_grp; + struct mlx5_flow_handle *drop_rule; + struct mlx5_fc *drop_counter; + } legacy; + struct { + struct mlx5_flow_group *metadata_grp; + struct mlx5_modify_hdr *modify_metadata; + struct mlx5_flow_handle *modify_metadata_rule; + } offloads; }; struct vport_egress { @@ -81,8 +92,10 @@ struct vport_egress { struct mlx5_flow_group *allowed_vlans_grp; struct mlx5_flow_group *drop_grp; struct mlx5_flow_handle *allowed_vlan; - struct mlx5_flow_handle *drop_rule; - struct mlx5_fc *drop_counter; + struct { + struct mlx5_flow_handle *drop_rule; + struct mlx5_fc *drop_counter; + } legacy; }; struct mlx5_vport_drop_stats { @@ -139,7 +152,6 @@ enum offloads_fdb_flags { extern const unsigned int ESW_POOLS[4]; -#define PRIO_LEVELS 2 struct mlx5_eswitch_fdb { union { struct legacy_fdb { @@ -166,7 +178,7 @@ struct mlx5_eswitch_fdb { struct { struct mlx5_flow_table *fdb; u32 num_rules; - } fdb_prio[FDB_MAX_CHAIN + 1][FDB_MAX_PRIO + 1][PRIO_LEVELS]; + } fdb_prio[FDB_NUM_CHAINS][FDB_TC_MAX_PRIO + 1][FDB_TC_LEVELS_PER_PRIO]; /* Protects fdb_prio table */ struct mutex fdb_prio_lock; @@ -217,8 +229,8 @@ enum { struct mlx5_eswitch { struct mlx5_core_dev *dev; struct mlx5_nb nb; - /* legacy data structures */ struct mlx5_eswitch_fdb fdb_table; + /* legacy data structures */ struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; struct esw_mc_addr mc_promisc; /* end of legacy */ @@ -251,18 +263,16 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); int esw_offloads_init_reps(struct mlx5_eswitch *esw); void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, struct mlx5_vport *vport); -int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, - struct mlx5_vport *vport); +int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, + int table_size); +void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport); void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, struct mlx5_vport *vport); int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, struct mlx5_vport *vport); void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, struct mlx5_vport *vport); -void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, - struct mlx5_vport *vport); -void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, - struct mlx5_vport *vport); int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps); @@ -292,9 +302,11 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, struct ifla_vf_stats *vf_stats); void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule); -int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport, +int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, + bool other_vport, void *in, int inlen); -int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport, +int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, + bool other_vport, void *out, int outlen); struct mlx5_flow_spec; @@ -421,6 +433,10 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, u16 vport, u16 vlan, u8 qos, u8 set_flags); +int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, + u16 vlan_id, u32 flow_action); + static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, u8 vlan_depth) { @@ -459,6 +475,12 @@ static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev) MLX5_VPORT_ECPF : MLX5_VPORT_PF; } +static inline bool +mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num) +{ + return esw->manager_vport == vport_num; +} + static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) { return mlx5_core_is_ecpf_esw_manager(dev) ? @@ -593,11 +615,18 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num); void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs); int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); -void +int mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, enum mlx5_eswitch_vport_event enabled_events); void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw); +int +esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, + struct mlx5_vport *vport); +void +esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, + struct mlx5_vport *vport); + #else /* CONFIG_MLX5_ESWITCH */ /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } @@ -613,10 +642,6 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {} -#define FDB_MAX_CHAIN 1 -#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1) -#define FDB_MAX_PRIO 1 - #endif /* CONFIG_MLX5_ESWITCH */ #endif /* __MLX5_ESWITCH_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 2276bb183705..8ba59a21a163 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -75,7 +75,7 @@ bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw) u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw) { if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED) - return FDB_MAX_CHAIN; + return FDB_TC_MAX_CHAIN; return 0; } @@ -83,7 +83,7 @@ u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw) u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw) { if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED) - return FDB_MAX_PRIO; + return FDB_TC_MAX_PRIO; return 1; } @@ -599,7 +599,7 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable) if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) return 0; - err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport, + err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false, out, sizeof(out)); if (err) return err; @@ -618,7 +618,7 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable) MLX5_SET(modify_esw_vport_context_in, in, field_select.fdb_to_vport_reg_c_id, 1); - return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport, + return mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, in, sizeof(in)); } @@ -927,7 +927,7 @@ esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level) int table_prio, l = 0; u32 flags = 0; - if (chain == FDB_SLOW_PATH_CHAIN) + if (chain == FDB_TC_SLOW_PATH_CHAIN) return esw->fdb_table.offloads.slow_fdb; mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock); @@ -952,7 +952,7 @@ esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level) flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); - table_prio = (chain * FDB_MAX_PRIO) + prio - 1; + table_prio = prio - 1; /* create earlier levels for correct fs_core lookup when * connecting tables @@ -989,7 +989,7 @@ esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level) { int l; - if (chain == FDB_SLOW_PATH_CHAIN) + if (chain == FDB_TC_SLOW_PATH_CHAIN) return; mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock); @@ -1079,7 +1079,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) MLX5_CAP_GEN(dev, max_flow_counter_15_0); fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size); - esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n", + esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(%d))\n", MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size), max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, fdb_max); @@ -1777,9 +1777,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw, flow_act.vlan[0].vid = 0; flow_act.vlan[0].prio = 0; - if (vport->ingress.modify_metadata_rule) { + if (vport->ingress.offloads.modify_metadata_rule) { flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - flow_act.modify_hdr = vport->ingress.modify_metadata; + flow_act.modify_hdr = vport->ingress.offloads.modify_metadata; } vport->ingress.allow_rule = @@ -1815,11 +1815,11 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, MLX5_SET(set_action_in, action, data, mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport)); - vport->ingress.modify_metadata = + vport->ingress.offloads.modify_metadata = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, 1, action); - if (IS_ERR(vport->ingress.modify_metadata)) { - err = PTR_ERR(vport->ingress.modify_metadata); + if (IS_ERR(vport->ingress.offloads.modify_metadata)) { + err = PTR_ERR(vport->ingress.offloads.modify_metadata); esw_warn(esw->dev, "failed to alloc modify header for vport %d ingress acl (%d)\n", vport->vport, err); @@ -1827,100 +1827,76 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, } flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW; - flow_act.modify_hdr = vport->ingress.modify_metadata; - vport->ingress.modify_metadata_rule = mlx5_add_flow_rules(vport->ingress.acl, - &spec, &flow_act, NULL, 0); - if (IS_ERR(vport->ingress.modify_metadata_rule)) { - err = PTR_ERR(vport->ingress.modify_metadata_rule); + flow_act.modify_hdr = vport->ingress.offloads.modify_metadata; + vport->ingress.offloads.modify_metadata_rule = + mlx5_add_flow_rules(vport->ingress.acl, + &spec, &flow_act, NULL, 0); + if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) { + err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule); esw_warn(esw->dev, "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n", vport->vport, err); - vport->ingress.modify_metadata_rule = NULL; + vport->ingress.offloads.modify_metadata_rule = NULL; goto out; } out: if (err) - mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata); + mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata); return err; } -void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) +static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { - if (vport->ingress.modify_metadata_rule) { - mlx5_del_flow_rules(vport->ingress.modify_metadata_rule); - mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata); + if (vport->ingress.offloads.modify_metadata_rule) { + mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule); + mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata); - vport->ingress.modify_metadata_rule = NULL; + vport->ingress.offloads.modify_metadata_rule = NULL; } } -static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) +static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { - struct mlx5_flow_act flow_act = {0}; - struct mlx5_flow_spec *spec; - int err = 0; - - if (!MLX5_CAP_GEN(esw->dev, prio_tag_required)) - return 0; - - /* For prio tag mode, there is only 1 FTEs: - * 1) prio tag packets - pop the prio tag VLAN, allow - * Unmatched traffic is allowed by default - */ - - esw_vport_cleanup_egress_rules(esw, vport); - - err = esw_vport_enable_egress_acl(esw, vport); - if (err) { - mlx5_core_warn(esw->dev, - "failed to enable egress acl (%d) on vport[%d]\n", - err, vport->vport); - return err; - } - - esw_debug(esw->dev, - "vport[%d] configure prio tag egress rules\n", vport->vport); + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_group *g; + u32 *flow_group_in; + int ret = 0; - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) { - err = -ENOMEM; - goto out_no_mem; - } + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; - /* prio tag vlan rule - pop it so VF receives untagged packets */ - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); - MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag); - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); - MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0); + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP | - MLX5_FLOW_CONTEXT_ACTION_ALLOW; - vport->egress.allowed_vlan = - mlx5_add_flow_rules(vport->egress.acl, spec, - &flow_act, NULL, 0); - if (IS_ERR(vport->egress.allowed_vlan)) { - err = PTR_ERR(vport->egress.allowed_vlan); + g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); + if (IS_ERR(g)) { + ret = PTR_ERR(g); esw_warn(esw->dev, - "vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n", - vport->vport, err); - vport->egress.allowed_vlan = NULL; - goto out; + "Failed to create vport[%d] ingress metadata group, err(%d)\n", + vport->vport, ret); + goto grp_err; } + vport->ingress.offloads.metadata_grp = g; +grp_err: + kvfree(flow_group_in); + return ret; +} -out: - kvfree(spec); -out_no_mem: - if (err) - esw_vport_cleanup_egress_rules(esw, vport); - return err; +static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport) +{ + if (vport->ingress.offloads.metadata_grp) { + mlx5_destroy_flow_group(vport->ingress.offloads.metadata_grp); + vport->ingress.offloads.metadata_grp = NULL; + } } -static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) +static int esw_vport_ingress_config(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { int err; @@ -1929,8 +1905,7 @@ static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw, return 0; esw_vport_cleanup_ingress_rules(esw, vport); - - err = esw_vport_enable_ingress_acl(esw, vport); + err = esw_vport_create_ingress_acl_table(esw, vport, 1); if (err) { esw_warn(esw->dev, "failed to enable ingress acl (%d) on vport[%d]\n", @@ -1938,25 +1913,65 @@ static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw, return err; } + err = esw_vport_create_ingress_acl_group(esw, vport); + if (err) + goto group_err; + esw_debug(esw->dev, "vport[%d] configure ingress rules\n", vport->vport); if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { err = esw_vport_add_ingress_acl_modify_metadata(esw, vport); if (err) - goto out; + goto metadata_err; } if (MLX5_CAP_GEN(esw->dev, prio_tag_required) && mlx5_eswitch_is_vf_vport(esw, vport->vport)) { err = esw_vport_ingress_prio_tag_config(esw, vport); if (err) - goto out; + goto prio_tag_err; } + return 0; -out: +prio_tag_err: + esw_vport_del_ingress_acl_modify_metadata(esw, vport); +metadata_err: + esw_vport_cleanup_ingress_rules(esw, vport); + esw_vport_destroy_ingress_acl_group(vport); +group_err: + esw_vport_destroy_ingress_acl_table(vport); + return err; +} + +static int esw_vport_egress_config(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + int err; + + if (!MLX5_CAP_GEN(esw->dev, prio_tag_required)) + return 0; + + esw_vport_cleanup_egress_rules(esw, vport); + + err = esw_vport_enable_egress_acl(esw, vport); + if (err) + return err; + + /* For prio tag mode, there is only 1 FTEs: + * 1) prio tag packets - pop the prio tag VLAN, allow + * Unmatched traffic is allowed by default + */ + esw_debug(esw->dev, + "vport[%d] configure prio tag egress rules\n", vport->vport); + + /* prio tag vlan rule - pop it so VF receives untagged packets */ + err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, 0, + MLX5_FLOW_CONTEXT_ACTION_VLAN_POP | + MLX5_FLOW_CONTEXT_ACTION_ALLOW); if (err) - esw_vport_disable_ingress_acl(esw, vport); + esw_vport_disable_egress_acl(esw, vport); + return err; } @@ -1980,54 +1995,59 @@ esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw) return true; } -static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw) +int +esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { - struct mlx5_vport *vport; - int i, j; int err; - if (esw_check_vport_match_metadata_supported(esw)) - esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; - - mlx5_esw_for_all_vports(esw, i, vport) { - err = esw_vport_ingress_common_config(esw, vport); - if (err) - goto err_ingress; + err = esw_vport_ingress_config(esw, vport); + if (err) + return err; - if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) { - err = esw_vport_egress_prio_tag_config(esw, vport); - if (err) - goto err_egress; + if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) { + err = esw_vport_egress_config(esw, vport); + if (err) { + esw_vport_del_ingress_acl_modify_metadata(esw, vport); + esw_vport_cleanup_ingress_rules(esw, vport); + esw_vport_destroy_ingress_acl_table(vport); } } + return err; +} - if (mlx5_eswitch_vport_match_metadata_enabled(esw)) - esw_info(esw->dev, "Use metadata reg_c as source vport to match\n"); +void +esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + esw_vport_disable_egress_acl(esw, vport); + esw_vport_del_ingress_acl_modify_metadata(esw, vport); + esw_vport_cleanup_ingress_rules(esw, vport); + esw_vport_destroy_ingress_acl_group(vport); + esw_vport_destroy_ingress_acl_table(vport); +} - return 0; +static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) +{ + struct mlx5_vport *vport; + int err; -err_egress: - esw_vport_disable_ingress_acl(esw, vport); -err_ingress: - for (j = MLX5_VPORT_PF; j < i; j++) { - vport = &esw->vports[j]; - esw_vport_disable_egress_acl(esw, vport); - esw_vport_disable_ingress_acl(esw, vport); - } + if (esw_check_vport_match_metadata_supported(esw)) + esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); + err = esw_vport_create_offloads_acl_tables(esw, vport); + if (err) + esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; return err; } -static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw) +static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) { struct mlx5_vport *vport; - int i; - - mlx5_esw_for_all_vports(esw, i, vport) { - esw_vport_disable_egress_acl(esw, vport); - esw_vport_disable_ingress_acl(esw, vport); - } + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); + esw_vport_destroy_offloads_acl_tables(esw, vport); esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; } @@ -2045,7 +2065,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); - err = esw_create_offloads_acl_tables(esw); + err = esw_create_uplink_offloads_acl_tables(esw); if (err) return err; @@ -2070,7 +2090,7 @@ create_ft_err: esw_destroy_offloads_fdb_tables(esw); create_fdb_err: - esw_destroy_offloads_acl_tables(esw); + esw_destroy_uplink_offloads_acl_tables(esw); return err; } @@ -2080,7 +2100,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) esw_destroy_vport_rx_group(esw); esw_destroy_offloads_table(esw); esw_destroy_offloads_fdb_tables(esw); - esw_destroy_offloads_acl_tables(esw); + esw_destroy_uplink_offloads_acl_tables(esw); } static void @@ -2169,7 +2189,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) if (err) goto err_vport_metadata; - mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE); + err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE); + if (err) + goto err_vports; err = esw_offloads_load_all_reps(esw); if (err) @@ -2182,6 +2204,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) err_reps: mlx5_eswitch_disable_pf_vf_vports(esw); +err_vports: esw_set_passing_vport_metadata(esw, false); err_vport_metadata: esw_offloads_steering_cleanup(esw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c index 7879e1746297..366bda1bb1c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c @@ -183,7 +183,8 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw, u32 port_mask, port_value; if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source)) - return spec->flow_context.flow_source == MLX5_VPORT_UPLINK; + return spec->flow_context.flow_source == + MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; port_mask = MLX5_GET(fte_match_param, spec->match_criteria, misc_parameters.source_port); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 0246f5cdd355..2c1dcb5084ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -2400,9 +2400,17 @@ static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level) int acc_level_ns = acc_level; prio->start_level = acc_level; - fs_for_each_ns(ns, prio) + fs_for_each_ns(ns, prio) { /* This updates start_level and num_levels of ns's priority descendants */ acc_level_ns = set_prio_attrs_in_ns(ns, acc_level); + + /* If this a prio with chains, and we can jump from one chain + * (namepsace) to another, so we accumulate the levels + */ + if (prio->node.type == FS_TYPE_PRIO_CHAINS) + acc_level = acc_level_ns; + } + if (!prio->num_levels) prio->num_levels = acc_level_ns - prio->start_level; WARN_ON(prio->num_levels < acc_level_ns - prio->start_level); @@ -2591,58 +2599,109 @@ out_err: steering->rdma_rx_root_ns = NULL; return err; } -static int init_fdb_root_ns(struct mlx5_flow_steering *steering) + +/* FT and tc chains are stored in the same array so we can re-use the + * mlx5_get_fdb_sub_ns() and tc api for FT chains. + * When creating a new ns for each chain store it in the first available slot. + * Assume tc chains are created and stored first and only then the FT chain. + */ +static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering, + struct mlx5_flow_namespace *ns) +{ + int chain = 0; + + while (steering->fdb_sub_ns[chain]) + ++chain; + + steering->fdb_sub_ns[chain] = ns; +} + +static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering, + struct fs_prio *maj_prio) { struct mlx5_flow_namespace *ns; - struct fs_prio *maj_prio; struct fs_prio *min_prio; + int prio; + + ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF); + if (IS_ERR(ns)) + return PTR_ERR(ns); + + for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) { + min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO); + if (IS_ERR(min_prio)) + return PTR_ERR(min_prio); + } + + store_fdb_sub_ns_prio_chain(steering, ns); + + return 0; +} + +static int create_fdb_chains(struct mlx5_flow_steering *steering, + int fs_prio, + int chains) +{ + struct fs_prio *maj_prio; int levels; int chain; - int prio; int err; - steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB); - if (!steering->fdb_root_ns) - return -ENOMEM; + levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains; + maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns, + fs_prio, + levels); + if (IS_ERR(maj_prio)) + return PTR_ERR(maj_prio); + + for (chain = 0; chain < chains; chain++) { + err = create_fdb_sub_ns_prio_chain(steering, maj_prio); + if (err) + return err; + } + + return 0; +} - steering->fdb_sub_ns = kzalloc(sizeof(steering->fdb_sub_ns) * - (FDB_MAX_CHAIN + 1), GFP_KERNEL); +static int create_fdb_fast_path(struct mlx5_flow_steering *steering) +{ + int err; + + steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS, + sizeof(*steering->fdb_sub_ns), + GFP_KERNEL); if (!steering->fdb_sub_ns) return -ENOMEM; + err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1); + if (err) + return err; + + err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1); + if (err) + return err; + + return 0; +} + +static int init_fdb_root_ns(struct mlx5_flow_steering *steering) +{ + struct fs_prio *maj_prio; + int err; + + steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB); + if (!steering->fdb_root_ns) + return -ENOMEM; + maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH, 1); if (IS_ERR(maj_prio)) { err = PTR_ERR(maj_prio); goto out_err; } - - levels = 2 * FDB_MAX_PRIO * (FDB_MAX_CHAIN + 1); - maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns, - FDB_FAST_PATH, - levels); - if (IS_ERR(maj_prio)) { - err = PTR_ERR(maj_prio); + err = create_fdb_fast_path(steering); + if (err) goto out_err; - } - - for (chain = 0; chain <= FDB_MAX_CHAIN; chain++) { - ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF); - if (IS_ERR(ns)) { - err = PTR_ERR(ns); - goto out_err; - } - - for (prio = 0; prio < FDB_MAX_PRIO * (chain + 1); prio++) { - min_prio = fs_create_prio(ns, prio, 2); - if (IS_ERR(min_prio)) { - err = PTR_ERR(min_prio); - goto out_err; - } - } - - steering->fdb_sub_ns[chain] = ns; - } maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1); if (IS_ERR(maj_prio)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index e718170a80c3..d9f4e8c59c1f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -555,7 +555,6 @@ mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter, return mlx5_health_try_recover(dev); } -#define MLX5_CR_DUMP_CHUNK_SIZE 256 static int mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, void *priv_ctx, @@ -564,8 +563,6 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter, struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter); u32 crdump_size = dev->priv.health.crdump_size; u32 *cr_data; - u32 data_size; - u32 offset; int err; if (!mlx5_core_is_pf(dev)) @@ -586,20 +583,7 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter, goto free_data; } - err = devlink_fmsg_arr_pair_nest_start(fmsg, "crdump_data"); - if (err) - goto free_data; - for (offset = 0; offset < crdump_size; offset += data_size) { - if (crdump_size - offset < MLX5_CR_DUMP_CHUNK_SIZE) - data_size = crdump_size - offset; - else - data_size = MLX5_CR_DUMP_CHUNK_SIZE; - err = devlink_fmsg_binary_put(fmsg, (char *)cr_data + offset, - data_size); - if (err) - goto free_data; - } - err = devlink_fmsg_arr_pair_nest_end(fmsg); + err = devlink_fmsg_binary_pair_put(fmsg, "crdump_data", cr_data, crdump_size); free_data: kvfree(cr_data); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 0059b290e095..43f97601b500 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -236,6 +236,19 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp, if (!MLX5_PPS_CAP(mdev)) return -EOPNOTSUPP; + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + /* Reject requests to enable time stamping on both edges. */ + if ((rq->extts.flags & PTP_STRICT_FLAGS) && + (rq->extts.flags & PTP_ENABLE_FEATURE) && + (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES) + return -EOPNOTSUPP; + if (rq->extts.index >= clock->ptp_info.n_pins) return -EINVAL; @@ -290,6 +303,10 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, if (!MLX5_PPS_CAP(mdev)) return -EOPNOTSUPP; + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + if (rq->perout.index >= clock->ptp_info.n_pins) return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h index b99d469e4e64..249539247e2e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h @@ -84,4 +84,9 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev, void *key, u32 sz_bytes, u32 *p_key_id); void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id); +static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev) +{ + return devlink_net(priv_to_devlink(dev)); +} + #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index c9a091d3226c..31fbfd6e8bb9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1168,7 +1168,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev) mlx5_put_uars_page(dev, dev->priv.uar); } -static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) +int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) { int err = 0; @@ -1226,7 +1226,7 @@ function_teardown: return err; } -static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) +int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) { if (cleanup) { mlx5_unregister_device(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index b100489dc85c..da67b28d6e23 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -243,4 +243,7 @@ enum { u8 mlx5_get_nic_state(struct mlx5_core_dev *dev); void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state); + +int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup); +int mlx5_load_one(struct mlx5_core_dev *dev, bool boot); #endif /* __MLX5_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index f641f1336402..03f037811f1d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -108,10 +108,10 @@ enable_vfs_hca: return 0; } -static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev, bool clear_vf) +static void +mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf) { struct mlx5_core_sriov *sriov = &dev->priv.sriov; - int num_vfs = pci_num_vf(dev->pdev); int err; int vf; @@ -147,7 +147,7 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs) err = pci_enable_sriov(pdev, num_vfs); if (err) { mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err); - mlx5_device_disable_sriov(dev, true); + mlx5_device_disable_sriov(dev, num_vfs, true); } return err; } @@ -155,9 +155,10 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs) static void mlx5_sriov_disable(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + int num_vfs = pci_num_vf(dev->pdev); pci_disable_sriov(pdev); - mlx5_device_disable_sriov(dev, true); + mlx5_device_disable_sriov(dev, num_vfs, true); } int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) @@ -192,7 +193,7 @@ void mlx5_sriov_detach(struct mlx5_core_dev *dev) if (!mlx5_core_is_pf(dev)) return; - mlx5_device_disable_sriov(dev, false); + mlx5_device_disable_sriov(dev, pci_num_vf(dev->pdev), false); } static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index b74b7d0f6590..004c56c2fc0c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -1577,6 +1577,7 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action) break; case DR_ACTION_TYP_MODIFY_HDR: mlx5dr_icm_free_chunk(action->rewrite.chunk); + kfree(action->rewrite.data); refcount_dec(&action->rewrite.dmn->refcount); break; default: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index 5db947df8763..c6548980daf0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -154,7 +154,7 @@ int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher, nic_matcher->num_of_builders = nic_matcher->num_of_builders_arr[outer_ipv][inner_ipv]; - if (!nic_matcher->ste_builder) { + if (!nic_matcher->num_of_builders) { mlx5dr_dbg(matcher->tbl->dmn, "Rule not supported on this matcher due to IP related fields\n"); return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index 90c79a133692..8560460d97fd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -1097,6 +1097,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, if (htbl) mlx5dr_htbl_put(htbl); + kfree(hw_ste_arr); + return 0; free_ste: diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index e1a90f5bddd0..e9f791c43f20 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -71,6 +71,7 @@ struct mlxsw_core { struct list_head trans_list; spinlock_t trans_list_lock; /* protects trans_list writes */ bool use_emad; + bool enable_string_tlv; } emad; struct { u8 *mapping; /* lag_id+port_index to local_port mapping */ @@ -249,6 +250,25 @@ MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8); */ MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64); +/* emad_string_tlv_type + * Type of the TLV. + * Must be set to 0x2 (string TLV). + */ +MLXSW_ITEM32(emad, string_tlv, type, 0x00, 27, 5); + +/* emad_string_tlv_len + * Length of the string TLV in u32. + */ +MLXSW_ITEM32(emad, string_tlv, len, 0x00, 16, 11); + +#define MLXSW_EMAD_STRING_TLV_STRING_LEN 128 + +/* emad_string_tlv_string + * String provided by the device's firmware in case of erroneous register access + */ +MLXSW_ITEM_BUF(emad, string_tlv, string, 0x04, + MLXSW_EMAD_STRING_TLV_STRING_LEN); + /* emad_reg_tlv_type * Type of the TLV. * Must be set to 0x3 (register TLV). @@ -304,6 +324,12 @@ static void mlxsw_emad_pack_reg_tlv(char *reg_tlv, memcpy(reg_tlv + sizeof(u32), payload, reg->len); } +static void mlxsw_emad_pack_string_tlv(char *string_tlv) +{ + mlxsw_emad_string_tlv_type_set(string_tlv, MLXSW_EMAD_TLV_TYPE_STRING); + mlxsw_emad_string_tlv_len_set(string_tlv, MLXSW_EMAD_STRING_TLV_LEN); +} + static void mlxsw_emad_pack_op_tlv(char *op_tlv, const struct mlxsw_reg_info *reg, enum mlxsw_core_reg_access_type type, @@ -345,7 +371,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb, const struct mlxsw_reg_info *reg, char *payload, enum mlxsw_core_reg_access_type type, - u64 tid) + u64 tid, bool enable_string_tlv) { char *buf; @@ -355,26 +381,82 @@ static void mlxsw_emad_construct(struct sk_buff *skb, buf = skb_push(skb, reg->len + sizeof(u32)); mlxsw_emad_pack_reg_tlv(buf, reg, payload); + if (enable_string_tlv) { + buf = skb_push(skb, MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32)); + mlxsw_emad_pack_string_tlv(buf); + } + buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)); mlxsw_emad_pack_op_tlv(buf, reg, type, tid); mlxsw_emad_construct_eth_hdr(skb); } +struct mlxsw_emad_tlv_offsets { + u16 op_tlv; + u16 string_tlv; + u16 reg_tlv; +}; + +static bool mlxsw_emad_tlv_is_string_tlv(const char *tlv) +{ + u8 tlv_type = mlxsw_emad_string_tlv_type_get(tlv); + + return tlv_type == MLXSW_EMAD_TLV_TYPE_STRING; +} + +static void mlxsw_emad_tlv_parse(struct sk_buff *skb) +{ + struct mlxsw_emad_tlv_offsets *offsets = + (struct mlxsw_emad_tlv_offsets *) skb->cb; + + offsets->op_tlv = MLXSW_EMAD_ETH_HDR_LEN; + offsets->string_tlv = 0; + offsets->reg_tlv = MLXSW_EMAD_ETH_HDR_LEN + + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); + + /* If string TLV is present, it must come after the operation TLV. */ + if (mlxsw_emad_tlv_is_string_tlv(skb->data + offsets->reg_tlv)) { + offsets->string_tlv = offsets->reg_tlv; + offsets->reg_tlv += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32); + } +} + static char *mlxsw_emad_op_tlv(const struct sk_buff *skb) { - return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN)); + struct mlxsw_emad_tlv_offsets *offsets = + (struct mlxsw_emad_tlv_offsets *) skb->cb; + + return ((char *) (skb->data + offsets->op_tlv)); +} + +static char *mlxsw_emad_string_tlv(const struct sk_buff *skb) +{ + struct mlxsw_emad_tlv_offsets *offsets = + (struct mlxsw_emad_tlv_offsets *) skb->cb; + + if (!offsets->string_tlv) + return NULL; + + return ((char *) (skb->data + offsets->string_tlv)); } static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb) { - return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN + - MLXSW_EMAD_OP_TLV_LEN * sizeof(u32))); + struct mlxsw_emad_tlv_offsets *offsets = + (struct mlxsw_emad_tlv_offsets *) skb->cb; + + return ((char *) (skb->data + offsets->reg_tlv)); +} + +static char *mlxsw_emad_reg_payload(const char *reg_tlv) +{ + return ((char *) (reg_tlv + sizeof(u32))); } -static char *mlxsw_emad_reg_payload(const char *op_tlv) +static char *mlxsw_emad_reg_payload_cmd(const char *mbox) { - return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32))); + return ((char *) (mbox + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32))); } static u64 mlxsw_emad_get_tid(const struct sk_buff *skb) @@ -440,10 +522,31 @@ struct mlxsw_reg_trans { const struct mlxsw_reg_info *reg; enum mlxsw_core_reg_access_type type; int err; + char *emad_err_string; enum mlxsw_emad_op_tlv_status emad_status; struct rcu_head rcu; }; +static void mlxsw_emad_process_string_tlv(const struct sk_buff *skb, + struct mlxsw_reg_trans *trans) +{ + char *string_tlv; + char *string; + + string_tlv = mlxsw_emad_string_tlv(skb); + if (!string_tlv) + return; + + trans->emad_err_string = kzalloc(MLXSW_EMAD_STRING_TLV_STRING_LEN, + GFP_ATOMIC); + if (!trans->emad_err_string) + return; + + string = mlxsw_emad_string_tlv_string_data(string_tlv); + strlcpy(trans->emad_err_string, string, + MLXSW_EMAD_STRING_TLV_STRING_LEN); +} + #define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000 #define MLXSW_EMAD_TIMEOUT_MS 200 @@ -535,12 +638,14 @@ static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core, mlxsw_emad_transmit_retry(mlxsw_core, trans); } else { if (err == 0) { - char *op_tlv = mlxsw_emad_op_tlv(skb); + char *reg_tlv = mlxsw_emad_reg_tlv(skb); if (trans->cb) trans->cb(mlxsw_core, - mlxsw_emad_reg_payload(op_tlv), + mlxsw_emad_reg_payload(reg_tlv), trans->reg->len, trans->cb_priv); + } else { + mlxsw_emad_process_string_tlv(skb, trans); } mlxsw_emad_trans_finish(trans, err); } @@ -556,6 +661,8 @@ static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port, trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0, skb->data, skb->len); + mlxsw_emad_tlv_parse(skb); + if (!mlxsw_emad_is_resp(skb)) goto free_skb; @@ -631,7 +738,7 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) } static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, - u16 reg_len) + u16 reg_len, bool enable_string_tlv) { struct sk_buff *skb; u16 emad_len; @@ -639,6 +746,8 @@ static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN + (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) * sizeof(u32) + mlxsw_core->driver->txhdr_len); + if (enable_string_tlv) + emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32); if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN) return NULL; @@ -660,6 +769,7 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv, u64 tid) { + bool enable_string_tlv; struct sk_buff *skb; int err; @@ -667,7 +777,12 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, tid, reg->id, mlxsw_reg_id_str(reg->id), mlxsw_core_reg_access_type_str(type)); - skb = mlxsw_emad_alloc(mlxsw_core, reg->len); + /* Since this can be changed during emad_reg_access, read it once and + * use the value all the way. + */ + enable_string_tlv = mlxsw_core->emad.enable_string_tlv; + + skb = mlxsw_emad_alloc(mlxsw_core, reg->len, enable_string_tlv); if (!skb) return -ENOMEM; @@ -684,7 +799,8 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, trans->reg = reg; trans->type = type; - mlxsw_emad_construct(skb, reg, payload, type, trans->tid); + mlxsw_emad_construct(skb, reg, payload, type, trans->tid, + enable_string_tlv); mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info); spin_lock_bh(&mlxsw_core->emad.trans_list_lock); @@ -1201,6 +1317,9 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (mlxsw_driver->params_register) devlink_params_publish(devlink); + if (!reload) + devlink_reload_enable(devlink); + return 0; err_thermal_init: @@ -1263,6 +1382,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, { struct devlink *devlink = priv_to_devlink(mlxsw_core); + if (!reload) + devlink_reload_disable(devlink); if (devlink_is_reload_failed(devlink)) { if (!reload) /* Only the parts that were not de-initialized in the @@ -1390,12 +1511,16 @@ static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port, struct mlxsw_event_listener_item *event_listener_item = priv; struct mlxsw_reg_info reg; char *payload; - char *op_tlv = mlxsw_emad_op_tlv(skb); - char *reg_tlv = mlxsw_emad_reg_tlv(skb); + char *reg_tlv; + char *op_tlv; + + mlxsw_emad_tlv_parse(skb); + op_tlv = mlxsw_emad_op_tlv(skb); + reg_tlv = mlxsw_emad_reg_tlv(skb); reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv); reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32); - payload = mlxsw_emad_reg_payload(op_tlv); + payload = mlxsw_emad_reg_payload(reg_tlv); event_listener_item->el.func(®, payload, event_listener_item->priv); dev_kfree_skb(skb); } @@ -1613,8 +1738,11 @@ int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_reg_trans_write); +#define MLXSW_REG_TRANS_ERR_STRING_SIZE 256 + static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans) { + char err_string[MLXSW_REG_TRANS_ERR_STRING_SIZE]; struct mlxsw_core *mlxsw_core = trans->core; int err; @@ -1632,9 +1760,17 @@ static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans) mlxsw_core_reg_access_type_str(trans->type), trans->emad_status, mlxsw_emad_op_tlv_status_str(trans->emad_status)); + + snprintf(err_string, MLXSW_REG_TRANS_ERR_STRING_SIZE, + "(tid=%llx,reg_id=%x(%s)) %s (%s)\n", trans->tid, + trans->reg->id, mlxsw_reg_id_str(trans->reg->id), + mlxsw_emad_op_tlv_status_str(trans->emad_status), + trans->emad_err_string ? trans->emad_err_string : ""); + trace_devlink_hwerr(priv_to_devlink(mlxsw_core), - trans->emad_status, - mlxsw_emad_op_tlv_status_str(trans->emad_status)); + trans->emad_status, err_string); + + kfree(trans->emad_err_string); } list_del(&trans->bulk_list); @@ -1708,7 +1844,7 @@ retry: } if (!err) - memcpy(payload, mlxsw_emad_reg_payload(out_mbox), + memcpy(payload, mlxsw_emad_reg_payload_cmd(out_mbox), reg->len); mlxsw_cmd_mbox_free(out_mbox); @@ -2205,6 +2341,12 @@ u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core) } EXPORT_SYMBOL(mlxsw_core_read_frc_l); +void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core) +{ + mlxsw_core->emad.enable_string_tlv = true; +} +EXPORT_SYMBOL(mlxsw_core_emad_string_tlv_enable); + static int __init mlxsw_core_module_init(void) { int err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 0d18bee6d140..543476a2e503 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -347,6 +347,8 @@ void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core); u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core); u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core); +void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core); + bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core, enum mlxsw_res_id res_id); diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h index a33b896f4bb8..acfbbec52424 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/emad.h +++ b/drivers/net/ethernet/mellanox/mlxsw/emad.h @@ -19,10 +19,8 @@ enum { MLXSW_EMAD_TLV_TYPE_END, MLXSW_EMAD_TLV_TYPE_OP, - MLXSW_EMAD_TLV_TYPE_DR, + MLXSW_EMAD_TLV_TYPE_STRING, MLXSW_EMAD_TLV_TYPE_REG, - MLXSW_EMAD_TLV_TYPE_USERDATA, - MLXSW_EMAD_TLV_TYPE_OOBETH, }; /* OP TLV */ @@ -89,6 +87,9 @@ enum { MLXSW_EMAD_OP_TLV_METHOD_EVENT = 5, }; +/* STRING TLV */ +#define MLXSW_EMAD_STRING_TLV_LEN 33 /* Length in u32 */ + /* END TLV */ #define MLXSW_EMAD_END_TLV_LEN 1 /* Length in u32 */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index bec035ee5349..5294a1622643 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5480,6 +5480,7 @@ enum mlxsw_reg_htgt_trap_group { enum mlxsw_reg_htgt_discard_trap_group { MLXSW_REG_HTGT_DISCARD_TRAP_GROUP_BASE = MLXSW_REG_HTGT_TRAP_GROUP_MAX, MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS, + MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS, }; /* reg_htgt_trap_group diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index ea4cc2aa99e0..556dca328bb5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4079,8 +4079,10 @@ static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping, sizeof(port_mapping), GFP_KERNEL); - if (!mlxsw_sp->port_mapping[i]) + if (!mlxsw_sp->port_mapping[i]) { + err = -ENOMEM; goto err_port_module_info_dup; + } } return 0; @@ -4510,8 +4512,6 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, false), /* L3 traps */ - MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, @@ -4538,8 +4538,6 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), - MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), @@ -4554,7 +4552,6 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { /* Multicast Router Traps */ MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), - MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), /* NVE traps */ @@ -4897,6 +4894,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, if (err) return err; + mlxsw_core_emad_string_tlv_enable(mlxsw_core); + err = mlxsw_sp_base_mac_get(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 0e99b64450ca..517cb8b14b1d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -77,6 +77,8 @@ struct mlxsw_sp_router { struct notifier_block inet6addr_nb; const struct mlxsw_sp_rif_ops **rif_ops_arr; const struct mlxsw_sp_ipip_ops **ipip_ops_arr; + u32 adj_discard_index; + bool adj_discard_index_valid; }; struct mlxsw_sp_rif { @@ -367,6 +369,7 @@ enum mlxsw_sp_fib_entry_type { MLXSW_SP_FIB_ENTRY_TYPE_LOCAL, MLXSW_SP_FIB_ENTRY_TYPE_TRAP, MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE, + MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE, /* This is a special case of local delivery, where a packet should be * decapsulated on reception. Note that there is no corresponding ENCAP, @@ -4196,15 +4199,51 @@ mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl, } } +static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp, u16 rif_index) +{ + u32 adj_discard_index = mlxsw_sp->router->adj_discard_index; + enum mlxsw_reg_ratr_trap_action trap_action; + char ratr_pl[MLXSW_REG_RATR_LEN]; + int err; + + if (mlxsw_sp->router->adj_discard_index_valid) + return 0; + + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, + &mlxsw_sp->router->adj_discard_index); + if (err) + return err; + + trap_action = MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS; + mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true, + MLXSW_REG_RATR_TYPE_ETHERNET, adj_discard_index, + rif_index); + mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); + if (err) + goto err_ratr_write; + + mlxsw_sp->router->adj_discard_index_valid = true; + + return 0; + +err_ratr_write: + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, + mlxsw_sp->router->adj_discard_index); + return err; +} + static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, enum mlxsw_reg_ralue_op op) { + struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group; char ralue_pl[MLXSW_REG_RALUE_LEN]; enum mlxsw_reg_ralue_trap_action trap_action; u16 trap_id = 0; u32 adjacency_index = 0; u16 ecmp_size = 0; + int err; /* In case the nexthop group adjacency index is valid, use it * with provided ECMP size. Otherwise, setup trap and pass @@ -4214,6 +4253,15 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; adjacency_index = fib_entry->nh_group->adj_index; ecmp_size = fib_entry->nh_group->ecmp_size; + } else if (!nh_group->adj_index_valid && nh_group->count && + nh_group->nh_rif) { + err = mlxsw_sp_adj_discard_write(mlxsw_sp, + nh_group->nh_rif->rif_index); + if (err) + return err; + trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; + adjacency_index = mlxsw_sp->router->adj_discard_index; + ecmp_size = 1; } else { trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP; trap_id = MLXSW_TRAP_ID_RTR_INGRESS0; @@ -4274,6 +4322,23 @@ static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp, } static int +mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) +{ + enum mlxsw_reg_ralue_trap_action trap_action; + char ralue_pl[MLXSW_REG_RALUE_LEN]; + u16 trap_id; + + trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP; + trap_id = MLXSW_TRAP_ID_RTR_INGRESS1; + + mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); + mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); +} + +static int mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, enum mlxsw_reg_ralue_op op) @@ -4314,6 +4379,9 @@ static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op); case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE: return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op); + case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE: + return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry, + op); case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP: return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, fib_entry, op); @@ -4391,7 +4459,7 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, * can do so with a lower priority than packets directed * at the host, so use action type local instead of trap. */ - fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE; return 0; case RTN_UNICAST: if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi)) @@ -5351,7 +5419,7 @@ static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp, else if (rt->fib6_type == RTN_BLACKHOLE) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE; else if (rt->fib6_flags & RTF_REJECT) - fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE; else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt)) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; else @@ -5909,6 +5977,16 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) continue; mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); } + + /* After flushing all the routes, it is not possible anyone is still + * using the adjacency index that is discarding packets, so free it in + * case it was allocated. + */ + if (!mlxsw_sp->router->adj_discard_index_valid) + return; + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, + mlxsw_sp->router->adj_discard_index); + mlxsw_sp->router->adj_discard_index_valid = false; } static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 7c03b661ae7e..e0d7c49ffae0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -13,16 +13,27 @@ static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port, void *priv); +static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port, + void *trap_ctx); #define MLXSW_SP_TRAP_DROP(_id, _group_id) \ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \ DEVLINK_TRAP_GROUP_GENERIC(_group_id), \ MLXSW_SP_TRAP_METADATA) +#define MLXSW_SP_TRAP_EXCEPTION(_id, _group_id) \ + DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \ + DEVLINK_TRAP_GROUP_GENERIC(_group_id), \ + MLXSW_SP_TRAP_METADATA) + #define MLXSW_SP_RXL_DISCARD(_id, _group_id) \ MLXSW_RXL(mlxsw_sp_rx_drop_listener, DISCARD_##_id, SET_FW_DEFAULT, \ false, SP_##_group_id, DISCARD) +#define MLXSW_SP_RXL_EXCEPTION(_id, _group_id, _action) \ + MLXSW_RXL(mlxsw_sp_rx_exception_listener, _id, \ + _action, false, SP_##_group_id, DISCARD) + static struct devlink_trap mlxsw_sp_traps_arr[] = { MLXSW_SP_TRAP_DROP(SMAC_MC, L2_DROPS), MLXSW_SP_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS), @@ -30,6 +41,23 @@ static struct devlink_trap mlxsw_sp_traps_arr[] = { MLXSW_SP_TRAP_DROP(INGRESS_STP_FILTER, L2_DROPS), MLXSW_SP_TRAP_DROP(EMPTY_TX_LIST, L2_DROPS), MLXSW_SP_TRAP_DROP(PORT_LOOPBACK_FILTER, L2_DROPS), + MLXSW_SP_TRAP_DROP(BLACKHOLE_ROUTE, L3_DROPS), + MLXSW_SP_TRAP_DROP(NON_IP_PACKET, L3_DROPS), + MLXSW_SP_TRAP_DROP(UC_DIP_MC_DMAC, L3_DROPS), + MLXSW_SP_TRAP_DROP(DIP_LB, L3_DROPS), + MLXSW_SP_TRAP_DROP(SIP_MC, L3_DROPS), + MLXSW_SP_TRAP_DROP(SIP_LB, L3_DROPS), + MLXSW_SP_TRAP_DROP(CORRUPTED_IP_HDR, L3_DROPS), + MLXSW_SP_TRAP_DROP(IPV4_SIP_BC, L3_DROPS), + MLXSW_SP_TRAP_DROP(IPV6_MC_DIP_RESERVED_SCOPE, L3_DROPS), + MLXSW_SP_TRAP_DROP(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, L3_DROPS), + MLXSW_SP_TRAP_EXCEPTION(MTU_ERROR, L3_DROPS), + MLXSW_SP_TRAP_EXCEPTION(TTL_ERROR, L3_DROPS), + MLXSW_SP_TRAP_EXCEPTION(RPF, L3_DROPS), + MLXSW_SP_TRAP_EXCEPTION(REJECT_ROUTE, L3_DROPS), + MLXSW_SP_TRAP_EXCEPTION(UNRESOLVED_NEIGH, L3_DROPS), + MLXSW_SP_TRAP_EXCEPTION(IPV4_LPM_UNICAST_MISS, L3_DROPS), + MLXSW_SP_TRAP_EXCEPTION(IPV6_LPM_UNICAST_MISS, L3_DROPS), }; static struct mlxsw_listener mlxsw_sp_listeners_arr[] = { @@ -40,6 +68,28 @@ static struct mlxsw_listener mlxsw_sp_listeners_arr[] = { MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_UC, L2_DISCARDS), MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_MC_NULL, L2_DISCARDS), MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_LB, L2_DISCARDS), + MLXSW_SP_RXL_DISCARD(ROUTER2, L3_DISCARDS), + MLXSW_SP_RXL_DISCARD(ING_ROUTER_NON_IP_PACKET, L3_DISCARDS), + MLXSW_SP_RXL_DISCARD(ING_ROUTER_UC_DIP_MC_DMAC, L3_DISCARDS), + MLXSW_SP_RXL_DISCARD(ING_ROUTER_DIP_LB, L3_DISCARDS), + MLXSW_SP_RXL_DISCARD(ING_ROUTER_SIP_MC, L3_DISCARDS), + MLXSW_SP_RXL_DISCARD(ING_ROUTER_SIP_LB, L3_DISCARDS), + MLXSW_SP_RXL_DISCARD(ING_ROUTER_CORRUPTED_IP_HDR, L3_DISCARDS), + MLXSW_SP_RXL_DISCARD(ING_ROUTER_IPV4_SIP_BC, L3_DISCARDS), + MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_RESERVED_SCOPE, L3_DISCARDS), + MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, L3_DISCARDS), + MLXSW_SP_RXL_EXCEPTION(MTUERROR, ROUTER_EXP, TRAP_TO_CPU), + MLXSW_SP_RXL_EXCEPTION(TTLERROR, ROUTER_EXP, TRAP_TO_CPU), + MLXSW_SP_RXL_EXCEPTION(RPF, RPF, TRAP_TO_CPU), + MLXSW_SP_RXL_EXCEPTION(RTR_INGRESS1, REMOTE_ROUTE, TRAP_TO_CPU), + MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV4, HOST_MISS, TRAP_TO_CPU), + MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV6, HOST_MISS, TRAP_TO_CPU), + MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER3, REMOTE_ROUTE, + TRAP_EXCEPTION_TO_CPU), + MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM4, ROUTER_EXP, + TRAP_EXCEPTION_TO_CPU), + MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM6, ROUTER_EXP, + TRAP_EXCEPTION_TO_CPU), }; /* Mapping between hardware trap and devlink trap. Multiple hardware traps can @@ -54,6 +104,25 @@ static u16 mlxsw_sp_listener_devlink_map[] = { DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST, DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST, DEVLINK_TRAP_GENERIC_ID_PORT_LOOPBACK_FILTER, + DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_ROUTE, + DEVLINK_TRAP_GENERIC_ID_NON_IP_PACKET, + DEVLINK_TRAP_GENERIC_ID_UC_DIP_MC_DMAC, + DEVLINK_TRAP_GENERIC_ID_DIP_LB, + DEVLINK_TRAP_GENERIC_ID_SIP_MC, + DEVLINK_TRAP_GENERIC_ID_SIP_LB, + DEVLINK_TRAP_GENERIC_ID_CORRUPTED_IP_HDR, + DEVLINK_TRAP_GENERIC_ID_IPV4_SIP_BC, + DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_RESERVED_SCOPE, + DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, + DEVLINK_TRAP_GENERIC_ID_MTU_ERROR, + DEVLINK_TRAP_GENERIC_ID_TTL_ERROR, + DEVLINK_TRAP_GENERIC_ID_RPF, + DEVLINK_TRAP_GENERIC_ID_REJECT_ROUTE, + DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH, + DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH, + DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH, + DEVLINK_TRAP_GENERIC_ID_IPV4_LPM_UNICAST_MISS, + DEVLINK_TRAP_GENERIC_ID_IPV6_LPM_UNICAST_MISS, }; static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, @@ -104,6 +173,30 @@ static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port, consume_skb(skb); } +static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port, + void *trap_ctx) +{ + struct devlink_port *in_devlink_port; + struct mlxsw_sp_port *mlxsw_sp_port; + struct mlxsw_sp *mlxsw_sp; + struct devlink *devlink; + + mlxsw_sp = devlink_trap_ctx_priv(trap_ctx); + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + + if (mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port)) + return; + + devlink = priv_to_devlink(mlxsw_sp->core); + in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core, + local_port); + skb_push(skb, ETH_HLEN); + devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port); + skb_pull(skb, ETH_HLEN); + skb->offload_fwd_mark = 1; + netif_receive_skb(skb); +} + int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp) { struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); @@ -211,6 +304,7 @@ mlxsw_sp_trap_group_policer_init(struct mlxsw_sp *mlxsw_sp, u32 rate; switch (group->id) { + case DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS:/* fall through */ case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS: policer_id = MLXSW_SP_DISCARD_POLICER_ID; ir_units = MLXSW_REG_QPCR_IR_UNITS_M; @@ -242,6 +336,12 @@ __mlxsw_sp_trap_group_init(struct mlxsw_sp *mlxsw_sp, priority = 0; tc = 1; break; + case DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS: + group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS; + policer_id = MLXSW_SP_DISCARD_POLICER_ID; + priority = 0; + tc = 1; + break; default: return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 7618f084cae9..0c1c142bb6b0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -49,6 +49,7 @@ enum { MLXSW_TRAP_ID_IPV6_DHCP = 0x69, MLXSW_TRAP_ID_IPV6_ALL_ROUTERS_LINK = 0x6F, MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, + MLXSW_TRAP_ID_RTR_INGRESS1 = 0x71, MLXSW_TRAP_ID_IPV6_PIM = 0x79, MLXSW_TRAP_ID_IPV6_VRRP = 0x7A, MLXSW_TRAP_ID_IPV4_BGP = 0x88, @@ -66,6 +67,8 @@ enum { MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD, MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6, MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7, + MLXSW_TRAP_ID_DISCARD_ROUTER2 = 0x130, + MLXSW_TRAP_ID_DISCARD_ROUTER3 = 0x131, MLXSW_TRAP_ID_DISCARD_ING_PACKET_SMAC_MC = 0x140, MLXSW_TRAP_ID_DISCARD_ING_SWITCH_VTAG_ALLOW = 0x148, MLXSW_TRAP_ID_DISCARD_ING_SWITCH_VLAN = 0x149, @@ -73,6 +76,18 @@ enum { MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_UC = 0x150, MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_MC_NULL = 0x151, MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_LB = 0x152, + MLXSW_TRAP_ID_DISCARD_ING_ROUTER_NON_IP_PACKET = 0x160, + MLXSW_TRAP_ID_DISCARD_ING_ROUTER_UC_DIP_MC_DMAC = 0x161, + MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LB = 0x162, + MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_MC = 0x163, + MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LB = 0x165, + MLXSW_TRAP_ID_DISCARD_ING_ROUTER_CORRUPTED_IP_HDR = 0x167, + MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A, + MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B, + MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B, + MLXSW_TRAP_ID_DISCARD_ROUTER_LPM6 = 0x17C, + MLXSW_TRAP_ID_DISCARD_IPV6_MC_DIP_RESERVED_SCOPE = 0x1B0, + MLXSW_TRAP_ID_DISCARD_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE = 0x1B1, MLXSW_TRAP_ID_ACL0 = 0x1C0, /* Multicast trap used for routes with trap action */ MLXSW_TRAP_ID_ACL1 = 0x1C1, diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c index 57b26c2acf87..afe52463dc57 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.c +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -11,7 +11,9 @@ #include "lan743x_ptp.h" -#define LAN743X_NUMBER_OF_GPIO (12) +#define LAN743X_LED0_ENABLE 20 /* LED0 offset in HW_CFG */ +#define LAN743X_LED_ENABLE(pin) BIT(LAN743X_LED0_ENABLE + (pin)) + #define LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB (31249999) #define LAN743X_PTP_MAX_FINE_ADJ_IN_SCALED_PPM (2047999934) @@ -139,19 +141,20 @@ done: spin_unlock_bh(&ptp->tx_ts_lock); } -static int lan743x_ptp_reserve_event_ch(struct lan743x_adapter *adapter) +static int lan743x_ptp_reserve_event_ch(struct lan743x_adapter *adapter, + int event_channel) { struct lan743x_ptp *ptp = &adapter->ptp; int result = -ENODEV; - int index = 0; mutex_lock(&ptp->command_lock); - for (index = 0; index < LAN743X_PTP_NUMBER_OF_EVENT_CHANNELS; index++) { - if (!(test_bit(index, &ptp->used_event_ch))) { - ptp->used_event_ch |= BIT(index); - result = index; - break; - } + if (!(test_bit(event_channel, &ptp->used_event_ch))) { + ptp->used_event_ch |= BIT(event_channel); + result = event_channel; + } else { + netif_warn(adapter, drv, adapter->netdev, + "attempted to reserved a used event_channel = %d\n", + event_channel); } mutex_unlock(&ptp->command_lock); return result; @@ -179,12 +182,62 @@ static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter, static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter, s64 time_step_ns); +static void lan743x_led_mux_enable(struct lan743x_adapter *adapter, + int pin, bool enable) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + + if (ptp->leds_multiplexed && + ptp->led_enabled[pin]) { + u32 val = lan743x_csr_read(adapter, HW_CFG); + + if (enable) + val |= LAN743X_LED_ENABLE(pin); + else + val &= ~LAN743X_LED_ENABLE(pin); + + lan743x_csr_write(adapter, HW_CFG, val); + } +} + +static void lan743x_led_mux_save(struct lan743x_adapter *adapter) +{ + struct lan743x_ptp *ptp = &adapter->ptp; + u32 id_rev = adapter->csr.id_rev & ID_REV_ID_MASK_; + + if (id_rev == ID_REV_ID_LAN7430_) { + int i; + u32 val = lan743x_csr_read(adapter, HW_CFG); + + for (i = 0; i < LAN7430_N_LED; i++) { + bool led_enabled = (val & LAN743X_LED_ENABLE(i)) != 0; + + ptp->led_enabled[i] = led_enabled; + } + ptp->leds_multiplexed = true; + } else { + ptp->leds_multiplexed = false; + } +} + +static void lan743x_led_mux_restore(struct lan743x_adapter *adapter) +{ + u32 id_rev = adapter->csr.id_rev & ID_REV_ID_MASK_; + + if (id_rev == ID_REV_ID_LAN7430_) { + int i; + + for (i = 0; i < LAN7430_N_LED; i++) + lan743x_led_mux_enable(adapter, i, true); + } +} + static int lan743x_gpio_rsrv_ptp_out(struct lan743x_adapter *adapter, - int bit, int ptp_channel) + int pin, int event_channel) { struct lan743x_gpio *gpio = &adapter->gpio; unsigned long irq_flags = 0; - int bit_mask = BIT(bit); + int bit_mask = BIT(pin); int ret = -EBUSY; spin_lock_irqsave(&gpio->gpio_lock, irq_flags); @@ -194,41 +247,44 @@ static int lan743x_gpio_rsrv_ptp_out(struct lan743x_adapter *adapter, gpio->output_bits |= bit_mask; gpio->ptp_bits |= bit_mask; + /* assign pin to GPIO function */ + lan743x_led_mux_enable(adapter, pin, false); + /* set as output, and zero initial value */ - gpio->gpio_cfg0 |= GPIO_CFG0_GPIO_DIR_BIT_(bit); - gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(bit); + gpio->gpio_cfg0 |= GPIO_CFG0_GPIO_DIR_BIT_(pin); + gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(pin); lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0); /* enable gpio, and set buffer type to push pull */ - gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOEN_BIT_(bit); - gpio->gpio_cfg1 |= GPIO_CFG1_GPIOBUF_BIT_(bit); + gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOEN_BIT_(pin); + gpio->gpio_cfg1 |= GPIO_CFG1_GPIOBUF_BIT_(pin); lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1); /* set 1588 polarity to high */ - gpio->gpio_cfg2 |= GPIO_CFG2_1588_POL_BIT_(bit); + gpio->gpio_cfg2 |= GPIO_CFG2_1588_POL_BIT_(pin); lan743x_csr_write(adapter, GPIO_CFG2, gpio->gpio_cfg2); - if (!ptp_channel) { + if (event_channel == 0) { /* use channel A */ - gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_CH_SEL_BIT_(bit); + gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_CH_SEL_BIT_(pin); } else { /* use channel B */ - gpio->gpio_cfg3 |= GPIO_CFG3_1588_CH_SEL_BIT_(bit); + gpio->gpio_cfg3 |= GPIO_CFG3_1588_CH_SEL_BIT_(pin); } - gpio->gpio_cfg3 |= GPIO_CFG3_1588_OE_BIT_(bit); + gpio->gpio_cfg3 |= GPIO_CFG3_1588_OE_BIT_(pin); lan743x_csr_write(adapter, GPIO_CFG3, gpio->gpio_cfg3); - ret = bit; + ret = pin; } spin_unlock_irqrestore(&gpio->gpio_lock, irq_flags); return ret; } -static void lan743x_gpio_release(struct lan743x_adapter *adapter, int bit) +static void lan743x_gpio_release(struct lan743x_adapter *adapter, int pin) { struct lan743x_gpio *gpio = &adapter->gpio; unsigned long irq_flags = 0; - int bit_mask = BIT(bit); + int bit_mask = BIT(pin); spin_lock_irqsave(&gpio->gpio_lock, irq_flags); if (gpio->used_bits & bit_mask) { @@ -239,21 +295,24 @@ static void lan743x_gpio_release(struct lan743x_adapter *adapter, int bit) if (gpio->ptp_bits & bit_mask) { gpio->ptp_bits &= ~bit_mask; /* disable ptp output */ - gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_OE_BIT_(bit); + gpio->gpio_cfg3 &= ~GPIO_CFG3_1588_OE_BIT_(pin); lan743x_csr_write(adapter, GPIO_CFG3, gpio->gpio_cfg3); } /* release gpio output */ /* disable gpio */ - gpio->gpio_cfg1 |= GPIO_CFG1_GPIOEN_BIT_(bit); - gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOBUF_BIT_(bit); + gpio->gpio_cfg1 |= GPIO_CFG1_GPIOEN_BIT_(pin); + gpio->gpio_cfg1 &= ~GPIO_CFG1_GPIOBUF_BIT_(pin); lan743x_csr_write(adapter, GPIO_CFG1, gpio->gpio_cfg1); /* reset back to input */ - gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DIR_BIT_(bit); - gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(bit); + gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DIR_BIT_(pin); + gpio->gpio_cfg0 &= ~GPIO_CFG0_GPIO_DATA_BIT_(pin); lan743x_csr_write(adapter, GPIO_CFG0, gpio->gpio_cfg0); + + /* assign pin to original function */ + lan743x_led_mux_enable(adapter, pin, true); } } spin_unlock_irqrestore(&gpio->gpio_lock, irq_flags); @@ -391,89 +450,95 @@ static int lan743x_ptpci_settime64(struct ptp_clock_info *ptpci, return 0; } -static void lan743x_ptp_perout_off(struct lan743x_adapter *adapter) +static void lan743x_ptp_perout_off(struct lan743x_adapter *adapter, + unsigned int index) { struct lan743x_ptp *ptp = &adapter->ptp; u32 general_config = 0; + struct lan743x_ptp_perout *perout = &ptp->perout[index]; - if (ptp->perout_gpio_bit >= 0) { - lan743x_gpio_release(adapter, ptp->perout_gpio_bit); - ptp->perout_gpio_bit = -1; + if (perout->gpio_pin >= 0) { + lan743x_gpio_release(adapter, perout->gpio_pin); + perout->gpio_pin = -1; } - if (ptp->perout_event_ch >= 0) { + if (perout->event_ch >= 0) { /* set target to far in the future, effectively disabling it */ lan743x_csr_write(adapter, - PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch), + PTP_CLOCK_TARGET_SEC_X(perout->event_ch), 0xFFFF0000); lan743x_csr_write(adapter, - PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch), + PTP_CLOCK_TARGET_NS_X(perout->event_ch), 0); general_config = lan743x_csr_read(adapter, PTP_GENERAL_CONFIG); general_config |= PTP_GENERAL_CONFIG_RELOAD_ADD_X_ - (ptp->perout_event_ch); + (perout->event_ch); lan743x_csr_write(adapter, PTP_GENERAL_CONFIG, general_config); - lan743x_ptp_release_event_ch(adapter, ptp->perout_event_ch); - ptp->perout_event_ch = -1; + lan743x_ptp_release_event_ch(adapter, perout->event_ch); + perout->event_ch = -1; } } static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on, - struct ptp_perout_request *perout) + struct ptp_perout_request *perout_request) { struct lan743x_ptp *ptp = &adapter->ptp; u32 period_sec = 0, period_nsec = 0; u32 start_sec = 0, start_nsec = 0; u32 general_config = 0; int pulse_width = 0; - int perout_bit = 0; - - if (!on) { - lan743x_ptp_perout_off(adapter); + int perout_pin = 0; + unsigned int index = perout_request->index; + struct lan743x_ptp_perout *perout = &ptp->perout[index]; + + /* Reject requests with unsupported flags */ + if (perout_request->flags) + return -EOPNOTSUPP; + + if (on) { + perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT, + perout_request->index); + if (perout_pin < 0) + return -EBUSY; + } else { + lan743x_ptp_perout_off(adapter, index); return 0; } - if (ptp->perout_event_ch >= 0 || - ptp->perout_gpio_bit >= 0) { + if (perout->event_ch >= 0 || + perout->gpio_pin >= 0) { /* already on, turn off first */ - lan743x_ptp_perout_off(adapter); + lan743x_ptp_perout_off(adapter, index); } - ptp->perout_event_ch = lan743x_ptp_reserve_event_ch(adapter); - if (ptp->perout_event_ch < 0) { + perout->event_ch = lan743x_ptp_reserve_event_ch(adapter, index); + + if (perout->event_ch < 0) { netif_warn(adapter, drv, adapter->netdev, - "Failed to reserve event channel for PEROUT\n"); + "Failed to reserve event channel %d for PEROUT\n", + index); goto failed; } - switch (adapter->csr.id_rev & ID_REV_ID_MASK_) { - case ID_REV_ID_LAN7430_: - perout_bit = 2;/* GPIO 2 is preferred on EVB LAN7430 */ - break; - case ID_REV_ID_LAN7431_: - perout_bit = 4;/* GPIO 4 is preferred on EVB LAN7431 */ - break; - } + perout->gpio_pin = lan743x_gpio_rsrv_ptp_out(adapter, + perout_pin, + perout->event_ch); - ptp->perout_gpio_bit = lan743x_gpio_rsrv_ptp_out(adapter, - perout_bit, - ptp->perout_event_ch); - - if (ptp->perout_gpio_bit < 0) { + if (perout->gpio_pin < 0) { netif_warn(adapter, drv, adapter->netdev, "Failed to reserve gpio %d for PEROUT\n", - perout_bit); + perout_pin); goto failed; } - start_sec = perout->start.sec; - start_sec += perout->start.nsec / 1000000000; - start_nsec = perout->start.nsec % 1000000000; + start_sec = perout_request->start.sec; + start_sec += perout_request->start.nsec / 1000000000; + start_nsec = perout_request->start.nsec % 1000000000; - period_sec = perout->period.sec; - period_sec += perout->period.nsec / 1000000000; - period_nsec = perout->period.nsec % 1000000000; + period_sec = perout_request->period.sec; + period_sec += perout_request->period.nsec / 1000000000; + period_nsec = perout_request->period.nsec % 1000000000; if (period_sec == 0) { if (period_nsec >= 400000000) { @@ -499,41 +564,41 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on, /* turn off by setting target far in future */ lan743x_csr_write(adapter, - PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch), + PTP_CLOCK_TARGET_SEC_X(perout->event_ch), 0xFFFF0000); lan743x_csr_write(adapter, - PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch), 0); + PTP_CLOCK_TARGET_NS_X(perout->event_ch), 0); /* Configure to pulse every period */ general_config = lan743x_csr_read(adapter, PTP_GENERAL_CONFIG); general_config &= ~(PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_ - (ptp->perout_event_ch)); + (perout->event_ch)); general_config |= PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_ - (ptp->perout_event_ch, pulse_width); + (perout->event_ch, pulse_width); general_config &= ~PTP_GENERAL_CONFIG_RELOAD_ADD_X_ - (ptp->perout_event_ch); + (perout->event_ch); lan743x_csr_write(adapter, PTP_GENERAL_CONFIG, general_config); /* set the reload to one toggle cycle */ lan743x_csr_write(adapter, - PTP_CLOCK_TARGET_RELOAD_SEC_X(ptp->perout_event_ch), + PTP_CLOCK_TARGET_RELOAD_SEC_X(perout->event_ch), period_sec); lan743x_csr_write(adapter, - PTP_CLOCK_TARGET_RELOAD_NS_X(ptp->perout_event_ch), + PTP_CLOCK_TARGET_RELOAD_NS_X(perout->event_ch), period_nsec); /* set the start time */ lan743x_csr_write(adapter, - PTP_CLOCK_TARGET_SEC_X(ptp->perout_event_ch), + PTP_CLOCK_TARGET_SEC_X(perout->event_ch), start_sec); lan743x_csr_write(adapter, - PTP_CLOCK_TARGET_NS_X(ptp->perout_event_ch), + PTP_CLOCK_TARGET_NS_X(perout->event_ch), start_nsec); return 0; failed: - lan743x_ptp_perout_off(adapter); + lan743x_ptp_perout_off(adapter, index); return -ENODEV; } @@ -550,7 +615,7 @@ static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci, case PTP_CLK_REQ_EXTTS: return -EINVAL; case PTP_CLK_REQ_PEROUT: - if (request->perout.index == 0) + if (request->perout.index < ptpci->n_per_out) return lan743x_ptp_perout(adapter, on, &request->perout); return -EINVAL; @@ -568,6 +633,29 @@ static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci, return 0; } +static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp, + unsigned int pin, + enum ptp_pin_function func, + unsigned int chan) +{ + int result = 0; + + /* Confirm the requested function is supported. Parameter + * validation is done by the caller. + */ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_PEROUT: + break; + case PTP_PF_EXTTS: + case PTP_PF_PHYSYNC: + default: + result = -1; + break; + } + return result; +} + static long lan743x_ptpci_do_aux_work(struct ptp_clock_info *ptpci) { struct lan743x_ptp *ptp = @@ -861,12 +949,19 @@ void lan743x_ptp_update_latency(struct lan743x_adapter *adapter, int lan743x_ptp_init(struct lan743x_adapter *adapter) { struct lan743x_ptp *ptp = &adapter->ptp; + int i; mutex_init(&ptp->command_lock); spin_lock_init(&ptp->tx_ts_lock); ptp->used_event_ch = 0; - ptp->perout_event_ch = -1; - ptp->perout_gpio_bit = -1; + + for (i = 0; i < LAN743X_PTP_N_EVENT_CHAN; i++) { + ptp->perout[i].event_ch = -1; + ptp->perout[i].gpio_pin = -1; + } + + lan743x_led_mux_save(adapter); + return 0; } @@ -875,6 +970,8 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter) struct lan743x_ptp *ptp = &adapter->ptp; int ret = -ENODEV; u32 temp; + int i; + int n_pins; lan743x_ptp_reset(adapter); lan743x_ptp_sync_to_system_clock(adapter); @@ -890,10 +987,32 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter) if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK)) return 0; - snprintf(ptp->pin_config[0].name, 32, "lan743x_ptp_pin_0"); - ptp->pin_config[0].index = 0; - ptp->pin_config[0].func = PTP_PF_PEROUT; - ptp->pin_config[0].chan = 0; + switch (adapter->csr.id_rev & ID_REV_ID_MASK_) { + case ID_REV_ID_LAN7430_: + n_pins = LAN7430_N_GPIO; + break; + case ID_REV_ID_LAN7431_: + n_pins = LAN7431_N_GPIO; + break; + default: + netif_warn(adapter, drv, adapter->netdev, + "Unknown LAN743x (%08x). Assuming no GPIO\n", + adapter->csr.id_rev); + n_pins = 0; + break; + } + + if (n_pins > LAN743X_PTP_N_GPIO) + n_pins = LAN743X_PTP_N_GPIO; + + for (i = 0; i < n_pins; i++) { + struct ptp_pin_desc *ptp_pin = &ptp->pin_config[i]; + + snprintf(ptp_pin->name, + sizeof(ptp_pin->name), "lan743x_ptp_pin_%02d", i); + ptp_pin->index = i; + ptp_pin->func = PTP_PF_NONE; + } ptp->ptp_clock_info.owner = THIS_MODULE; snprintf(ptp->ptp_clock_info.name, 16, "%pm", @@ -901,10 +1020,10 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter) ptp->ptp_clock_info.max_adj = LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB; ptp->ptp_clock_info.n_alarm = 0; ptp->ptp_clock_info.n_ext_ts = 0; - ptp->ptp_clock_info.n_per_out = 1; - ptp->ptp_clock_info.n_pins = 0; + ptp->ptp_clock_info.n_per_out = LAN743X_PTP_N_EVENT_CHAN; + ptp->ptp_clock_info.n_pins = n_pins; ptp->ptp_clock_info.pps = 0; - ptp->ptp_clock_info.pin_config = NULL; + ptp->ptp_clock_info.pin_config = ptp->pin_config; ptp->ptp_clock_info.adjfine = lan743x_ptpci_adjfine; ptp->ptp_clock_info.adjfreq = lan743x_ptpci_adjfreq; ptp->ptp_clock_info.adjtime = lan743x_ptpci_adjtime; @@ -913,7 +1032,7 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter) ptp->ptp_clock_info.settime64 = lan743x_ptpci_settime64; ptp->ptp_clock_info.enable = lan743x_ptpci_enable; ptp->ptp_clock_info.do_aux_work = lan743x_ptpci_do_aux_work; - ptp->ptp_clock_info.verify = NULL; + ptp->ptp_clock_info.verify = lan743x_ptpci_verify_pin_config; ptp->ptp_clock = ptp_clock_register(&ptp->ptp_clock_info, &adapter->pdev->dev); @@ -939,7 +1058,7 @@ void lan743x_ptp_close(struct lan743x_adapter *adapter) int index; if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) && - ptp->flags & PTP_FLAG_PTP_CLOCK_REGISTERED) { + (ptp->flags & PTP_FLAG_PTP_CLOCK_REGISTERED)) { ptp_clock_unregister(ptp->ptp_clock); ptp->ptp_clock = NULL; ptp->flags &= ~PTP_FLAG_PTP_CLOCK_REGISTERED; @@ -973,6 +1092,8 @@ void lan743x_ptp_close(struct lan743x_adapter *adapter) ptp->pending_tx_timestamps = 0; spin_unlock_bh(&ptp->tx_ts_lock); + lan743x_led_mux_restore(adapter); + lan743x_ptp_disable(adapter); } diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h index 5fc1b3cd5e33..7663bf5d2e33 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.h +++ b/drivers/net/ethernet/microchip/lan743x_ptp.h @@ -7,6 +7,18 @@ #include "linux/ptp_clock_kernel.h" #include "linux/netdevice.h" +#define LAN7430_N_LED 4 +#define LAN7430_N_GPIO 4 /* multiplexed with PHY LEDs */ +#define LAN7431_N_GPIO 12 + +#define LAN743X_PTP_N_GPIO LAN7431_N_GPIO + +/* the number of periodic outputs is limited by number of + * PTP clock event channels + */ +#define LAN743X_PTP_N_EVENT_CHAN 2 +#define LAN743X_PTP_N_PEROUT LAN743X_PTP_N_EVENT_CHAN + struct lan743x_adapter; /* GPIO */ @@ -40,9 +52,14 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); #define LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS (4) -#define PTP_FLAG_PTP_CLOCK_REGISTERED BIT(1) +#define PTP_FLAG_PTP_CLOCK_REGISTERED BIT(1) #define PTP_FLAG_ISR_ENABLED BIT(2) +struct lan743x_ptp_perout { + int event_ch; /* PTP event channel (0=channel A, 1=channel B) */ + int gpio_pin; /* GPIO pin where output appears */ +}; + struct lan743x_ptp { int flags; @@ -51,13 +68,13 @@ struct lan743x_ptp { struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_info; - struct ptp_pin_desc pin_config[1]; + struct ptp_pin_desc pin_config[LAN743X_PTP_N_GPIO]; -#define LAN743X_PTP_NUMBER_OF_EVENT_CHANNELS (2) unsigned long used_event_ch; + struct lan743x_ptp_perout perout[LAN743X_PTP_N_PEROUT]; - int perout_event_ch; - int perout_gpio_bit; + bool leds_multiplexed; + bool led_enabled[LAN7430_N_LED]; /* tx_ts_lock: used to prevent concurrent access to timestamp arrays */ spinlock_t tx_ts_lock; diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 344539c0d3aa..90c46ba763d7 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -132,11 +132,11 @@ static void ocelot_mact_init(struct ocelot *ocelot) ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS); } -static void ocelot_vcap_enable(struct ocelot *ocelot, struct ocelot_port *port) +static void ocelot_vcap_enable(struct ocelot *ocelot, int port) { ocelot_write_gix(ocelot, ANA_PORT_VCAP_S2_CFG_S2_ENA | ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(0xa), - ANA_PORT_VCAP_S2_CFG, port->chip_port); + ANA_PORT_VCAP_S2_CFG, port); } static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot) @@ -169,117 +169,178 @@ static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask) return ocelot_vlant_wait_for_completion(ocelot); } -static void ocelot_vlan_mode(struct ocelot_port *port, +static void ocelot_vlan_mode(struct ocelot *ocelot, int port, netdev_features_t features) { - struct ocelot *ocelot = port->ocelot; - u8 p = port->chip_port; u32 val; /* Filtering */ val = ocelot_read(ocelot, ANA_VLANMASK); if (features & NETIF_F_HW_VLAN_CTAG_FILTER) - val |= BIT(p); + val |= BIT(port); else - val &= ~BIT(p); + val &= ~BIT(port); ocelot_write(ocelot, val, ANA_VLANMASK); } -static void ocelot_vlan_port_apply(struct ocelot *ocelot, - struct ocelot_port *port) +void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, + bool vlan_aware) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; u32 val; - /* Ingress clasification (ANA_PORT_VLAN_CFG) */ - /* Default vlan to clasify for untagged frames (may be zero) */ - val = ANA_PORT_VLAN_CFG_VLAN_VID(port->pvid); - if (port->vlan_aware) - val |= ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | - ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1); - + if (vlan_aware) + val = ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | + ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1); + else + val = 0; ocelot_rmw_gix(ocelot, val, - ANA_PORT_VLAN_CFG_VLAN_VID_M | ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M, - ANA_PORT_VLAN_CFG, port->chip_port); + ANA_PORT_VLAN_CFG, port); - /* Drop frames with multicast source address */ - val = ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA; - if (port->vlan_aware && !port->vid) + if (vlan_aware && !ocelot_port->vid) /* If port is vlan-aware and tagged, drop untagged and priority * tagged frames. */ - val |= ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA | + val = ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA | + ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA | + ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA; + else + val = 0; + ocelot_rmw_gix(ocelot, val, + ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA | ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA | - ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA; - ocelot_write_gix(ocelot, val, ANA_PORT_DROP_CFG, port->chip_port); - - /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q. */ - val = REW_TAG_CFG_TAG_TPID_CFG(0); + ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, + ANA_PORT_DROP_CFG, port); - if (port->vlan_aware) { - if (port->vid) + if (vlan_aware) { + if (ocelot_port->vid) /* Tag all frames except when VID == DEFAULT_VLAN */ val |= REW_TAG_CFG_TAG_CFG(1); else /* Tag all frames */ val |= REW_TAG_CFG_TAG_CFG(3); + } else { + /* Port tagging disabled. */ + val = REW_TAG_CFG_TAG_CFG(0); } ocelot_rmw_gix(ocelot, val, - REW_TAG_CFG_TAG_TPID_CFG_M | REW_TAG_CFG_TAG_CFG_M, - REW_TAG_CFG, port->chip_port); + REW_TAG_CFG, port); +} +EXPORT_SYMBOL(ocelot_port_vlan_filtering); - /* Set default VLAN and tag type to 8021Q. */ - val = REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q) | - REW_PORT_VLAN_CFG_PORT_VID(port->vid); - ocelot_rmw_gix(ocelot, val, - REW_PORT_VLAN_CFG_PORT_TPID_M | +static int ocelot_port_set_native_vlan(struct ocelot *ocelot, int port, + u16 vid) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (ocelot_port->vid != vid) { + /* Always permit deleting the native VLAN (vid = 0) */ + if (ocelot_port->vid && vid) { + dev_err(ocelot->dev, + "Port already has a native VLAN: %d\n", + ocelot_port->vid); + return -EBUSY; + } + ocelot_port->vid = vid; + } + + ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(vid), REW_PORT_VLAN_CFG_PORT_VID_M, - REW_PORT_VLAN_CFG, port->chip_port); + REW_PORT_VLAN_CFG, port); + + return 0; } -static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid, - bool untagged) +/* Default vlan to clasify for untagged frames (may be zero) */ +static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, u16 pvid) { - struct ocelot_port *port = netdev_priv(dev); - struct ocelot *ocelot = port->ocelot; - int ret; + struct ocelot_port *ocelot_port = ocelot->ports[port]; - /* Add the port MAC address to with the right VLAN information */ - ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid, - ENTRYTYPE_LOCKED); + ocelot_rmw_gix(ocelot, + ANA_PORT_VLAN_CFG_VLAN_VID(pvid), + ANA_PORT_VLAN_CFG_VLAN_VID_M, + ANA_PORT_VLAN_CFG, port); + + ocelot_port->pvid = pvid; +} + +int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, + bool untagged) +{ + int ret; /* Make the port a member of the VLAN */ - ocelot->vlan_mask[vid] |= BIT(port->chip_port); + ocelot->vlan_mask[vid] |= BIT(port); ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]); if (ret) return ret; /* Default ingress vlan classification */ if (pvid) - port->pvid = vid; + ocelot_port_set_pvid(ocelot, port, vid); /* Untagged egress vlan clasification */ - if (untagged && port->vid != vid) { - if (port->vid) { - dev_err(ocelot->dev, - "Port already has a native VLAN: %d\n", - port->vid); - return -EBUSY; - } - port->vid = vid; + if (untagged) { + ret = ocelot_port_set_native_vlan(ocelot, port, vid); + if (ret) + return ret; } - ocelot_vlan_port_apply(ocelot, port); + return 0; +} +EXPORT_SYMBOL(ocelot_vlan_add); + +static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid, + bool untagged) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; + int ret; + + ret = ocelot_vlan_add(ocelot, port, vid, pvid, untagged); + if (ret) + return ret; + + /* Add the port MAC address to with the right VLAN information */ + ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid, + ENTRYTYPE_LOCKED); + + return 0; +} + +int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + int ret; + + /* Stop the port from being a member of the vlan */ + ocelot->vlan_mask[vid] &= ~BIT(port); + ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]); + if (ret) + return ret; + + /* Ingress */ + if (ocelot_port->pvid == vid) + ocelot_port_set_pvid(ocelot, port, 0); + + /* Egress */ + if (ocelot_port->vid == vid) + ocelot_port_set_native_vlan(ocelot, port, 0); return 0; } +EXPORT_SYMBOL(ocelot_vlan_del); static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid) { - struct ocelot_port *port = netdev_priv(dev); - struct ocelot *ocelot = port->ocelot; + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; int ret; /* 8021q removes VID 0 on module unload for all interfaces @@ -289,24 +350,12 @@ static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid) if (vid == 0) return 0; - /* Del the port MAC address to with the right VLAN information */ - ocelot_mact_forget(ocelot, dev->dev_addr, vid); - - /* Stop the port from being a member of the vlan */ - ocelot->vlan_mask[vid] &= ~BIT(port->chip_port); - ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]); + ret = ocelot_vlan_del(ocelot, port, vid); if (ret) return ret; - /* Ingress */ - if (port->pvid == vid) - port->pvid = 0; - - /* Egress */ - if (port->vid == vid) - port->vid = 0; - - ocelot_vlan_port_apply(ocelot, port); + /* Del the port MAC address to with the right VLAN information */ + ocelot_mact_forget(ocelot, dev->dev_addr, vid); return 0; } @@ -333,16 +382,11 @@ static void ocelot_vlan_init(struct ocelot *ocelot) ocelot->vlan_mask[0] = GENMASK(ocelot->num_phys_ports - 1, 0); ocelot_vlant_set_mask(ocelot, 0, ocelot->vlan_mask[0]); - /* Configure the CPU port to be VLAN aware */ - ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) | - ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | - ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1), - ANA_PORT_VLAN_CFG, ocelot->num_phys_ports); - /* Set vlan ingress filter mask to all ports but the CPU port by * default. */ - ocelot_write(ocelot, GENMASK(9, 0), ANA_VLANMASK); + ocelot_write(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0), + ANA_VLANMASK); for (port = 0; port < ocelot->num_phys_ports; port++) { ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port); @@ -362,14 +406,13 @@ static u16 ocelot_wm_enc(u16 value) return value; } -static void ocelot_port_adjust_link(struct net_device *dev) +void ocelot_adjust_link(struct ocelot *ocelot, int port, + struct phy_device *phydev) { - struct ocelot_port *port = netdev_priv(dev); - struct ocelot *ocelot = port->ocelot; - u8 p = port->chip_port; - int speed, atop_wm, mode = 0; + struct ocelot_port *ocelot_port = ocelot->ports[port]; + int speed, mode = 0; - switch (dev->phydev->speed) { + switch (phydev->speed) { case SPEED_10: speed = OCELOT_SPEED_10; break; @@ -385,87 +428,41 @@ static void ocelot_port_adjust_link(struct net_device *dev) mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA; break; default: - netdev_err(dev, "Unsupported PHY speed: %d\n", - dev->phydev->speed); + dev_err(ocelot->dev, "Unsupported PHY speed on port %d: %d\n", + port, phydev->speed); return; } - phy_print_status(dev->phydev); + phy_print_status(phydev); - if (!dev->phydev->link) + if (!phydev->link) return; /* Only full duplex supported for now */ - ocelot_port_writel(port, DEV_MAC_MODE_CFG_FDX_ENA | + ocelot_port_writel(ocelot_port, DEV_MAC_MODE_CFG_FDX_ENA | mode, DEV_MAC_MODE_CFG); - /* Set MAC IFG Gaps - * FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 0 - * !FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 5 - */ - ocelot_port_writel(port, DEV_MAC_IFG_CFG_TX_IFG(5), DEV_MAC_IFG_CFG); - - /* Load seed (0) and set MAC HDX late collision */ - ocelot_port_writel(port, DEV_MAC_HDX_CFG_LATE_COL_POS(67) | - DEV_MAC_HDX_CFG_SEED_LOAD, - DEV_MAC_HDX_CFG); - mdelay(1); - ocelot_port_writel(port, DEV_MAC_HDX_CFG_LATE_COL_POS(67), - DEV_MAC_HDX_CFG); - - /* Disable HDX fast control */ - ocelot_port_writel(port, DEV_PORT_MISC_HDX_FAST_DIS, DEV_PORT_MISC); - - /* SGMII only for now */ - ocelot_port_writel(port, PCS1G_MODE_CFG_SGMII_MODE_ENA, PCS1G_MODE_CFG); - ocelot_port_writel(port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG); - - /* Enable PCS */ - ocelot_port_writel(port, PCS1G_CFG_PCS_ENA, PCS1G_CFG); - - /* No aneg on SGMII */ - ocelot_port_writel(port, 0, PCS1G_ANEG_CFG); - - /* No loopback */ - ocelot_port_writel(port, 0, PCS1G_LB_CFG); - - /* Set Max Length and maximum tags allowed */ - ocelot_port_writel(port, VLAN_ETH_FRAME_LEN, DEV_MAC_MAXLEN_CFG); - ocelot_port_writel(port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) | - DEV_MAC_TAGS_CFG_VLAN_AWR_ENA | - DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, - DEV_MAC_TAGS_CFG); + if (ocelot->ops->pcs_init) + ocelot->ops->pcs_init(ocelot, port); /* Enable MAC module */ - ocelot_port_writel(port, DEV_MAC_ENA_CFG_RX_ENA | + ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA | DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG); /* Take MAC, Port, Phy (intern) and PCS (SGMII/Serdes) clock out of * reset */ - ocelot_port_writel(port, DEV_CLOCK_CFG_LINK_SPEED(speed), + ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(speed), DEV_CLOCK_CFG); - /* Set SMAC of Pause frame (00:00:00:00:00:00) */ - ocelot_port_writel(port, 0, DEV_MAC_FC_MAC_HIGH_CFG); - ocelot_port_writel(port, 0, DEV_MAC_FC_MAC_LOW_CFG); - /* No PFC */ ocelot_write_gix(ocelot, ANA_PFC_PFC_CFG_FC_LINK_SPEED(speed), - ANA_PFC_PFC_CFG, p); - - /* Set Pause WM hysteresis - * 152 = 6 * VLAN_ETH_FRAME_LEN / OCELOT_BUFFER_CELL_SZ - * 101 = 4 * VLAN_ETH_FRAME_LEN / OCELOT_BUFFER_CELL_SZ - */ - ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA | - SYS_PAUSE_CFG_PAUSE_STOP(101) | - SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, p); + ANA_PFC_PFC_CFG, port); /* Core: Enable port for frame transfer */ ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE | QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) | QSYS_SWITCH_PORT_MODE_PORT_ENA, - QSYS_SWITCH_PORT_MODE, p); + QSYS_SWITCH_PORT_MODE, port); /* Flow control */ ocelot_write_rix(ocelot, SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) | @@ -473,64 +470,88 @@ static void ocelot_port_adjust_link(struct net_device *dev) SYS_MAC_FC_CFG_ZERO_PAUSE_ENA | SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) | SYS_MAC_FC_CFG_FC_LINK_SPEED(speed), - SYS_MAC_FC_CFG, p); - ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, p); - - /* Tail dropping watermark */ - atop_wm = (ocelot->shared_queue_sz - 9 * VLAN_ETH_FRAME_LEN) / OCELOT_BUFFER_CELL_SZ; - ocelot_write_rix(ocelot, ocelot_wm_enc(9 * VLAN_ETH_FRAME_LEN), - SYS_ATOP, p); - ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG); + SYS_MAC_FC_CFG, port); + ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port); } +EXPORT_SYMBOL(ocelot_adjust_link); -static int ocelot_port_open(struct net_device *dev) +static void ocelot_port_adjust_link(struct net_device *dev) { - struct ocelot_port *port = netdev_priv(dev); - struct ocelot *ocelot = port->ocelot; - int err; + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + ocelot_adjust_link(ocelot, port, dev->phydev); +} +void ocelot_port_enable(struct ocelot *ocelot, int port, + struct phy_device *phy) +{ /* Enable receiving frames on the port, and activate auto-learning of * MAC addresses. */ ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO | ANA_PORT_PORT_CFG_RECV_ENA | - ANA_PORT_PORT_CFG_PORTID_VAL(port->chip_port), - ANA_PORT_PORT_CFG, port->chip_port); + ANA_PORT_PORT_CFG_PORTID_VAL(port), + ANA_PORT_PORT_CFG, port); +} +EXPORT_SYMBOL(ocelot_port_enable); - if (port->serdes) { - err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET, - port->phy_mode); +static int ocelot_port_open(struct net_device *dev) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + int err; + + if (priv->serdes) { + err = phy_set_mode_ext(priv->serdes, PHY_MODE_ETHERNET, + priv->phy_mode); if (err) { netdev_err(dev, "Could not set mode of SerDes\n"); return err; } } - err = phy_connect_direct(dev, port->phy, &ocelot_port_adjust_link, - port->phy_mode); + err = phy_connect_direct(dev, priv->phy, &ocelot_port_adjust_link, + priv->phy_mode); if (err) { netdev_err(dev, "Could not attach to PHY\n"); return err; } - dev->phydev = port->phy; + dev->phydev = priv->phy; + + phy_attached_info(priv->phy); + phy_start(priv->phy); + + ocelot_port_enable(ocelot, port, priv->phy); - phy_attached_info(port->phy); - phy_start(port->phy); return 0; } +void ocelot_port_disable(struct ocelot *ocelot, int port) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG); + ocelot_rmw_rix(ocelot, 0, QSYS_SWITCH_PORT_MODE_PORT_ENA, + QSYS_SWITCH_PORT_MODE, port); +} +EXPORT_SYMBOL(ocelot_port_disable); + static int ocelot_port_stop(struct net_device *dev) { - struct ocelot_port *port = netdev_priv(dev); + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; - phy_disconnect(port->phy); + phy_disconnect(priv->phy); dev->phydev = NULL; - ocelot_port_writel(port, 0, DEV_MAC_ENA_CFG); - ocelot_rmw_rix(port->ocelot, 0, QSYS_SWITCH_PORT_MODE_PORT_ENA, - QSYS_SWITCH_PORT_MODE, port->chip_port); + ocelot_port_disable(ocelot, port); + return 0; } @@ -556,13 +577,15 @@ static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info) static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) { + struct ocelot_port_private *priv = netdev_priv(dev); struct skb_shared_info *shinfo = skb_shinfo(skb); - struct ocelot_port *port = netdev_priv(dev); - struct ocelot *ocelot = port->ocelot; - u32 val, ifh[IFH_LEN]; + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + u32 val, ifh[OCELOT_TAG_LEN / 4]; struct frame_info info = {}; u8 grp = 0; /* Send everything on CPU group 0 */ unsigned int i, count, last; + int port = priv->chip_port; val = ocelot_read(ocelot, QS_INJ_STATUS); if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))) || @@ -572,20 +595,20 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp); - info.port = BIT(port->chip_port); + info.port = BIT(port); info.tag_type = IFH_TAG_TYPE_C; info.vid = skb_vlan_tag_get(skb); /* Check if timestamping is needed */ if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) { - info.rew_op = port->ptp_cmd; - if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) - info.rew_op |= (port->ts_id % 4) << 3; + info.rew_op = ocelot_port->ptp_cmd; + if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) + info.rew_op |= (ocelot_port->ts_id % 4) << 3; } ocelot_gen_ifh(ifh, &info); - for (i = 0; i < IFH_LEN; i++) + for (i = 0; i < OCELOT_TAG_LEN / 4; i++) ocelot_write_rix(ocelot, (__force u32)cpu_to_be32(ifh[i]), QS_INJ_WR, grp); @@ -615,7 +638,7 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) dev->stats.tx_bytes += skb->len; if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP && - port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { + ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { struct ocelot_skb *oskb = kzalloc(sizeof(struct ocelot_skb), GFP_ATOMIC); @@ -625,10 +648,10 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; oskb->skb = skb; - oskb->id = port->ts_id % 4; - port->ts_id++; + oskb->id = ocelot_port->ts_id % 4; + ocelot_port->ts_id++; - list_add_tail(&oskb->head, &port->skbs); + list_add_tail(&oskb->head, &ocelot_port->skbs); return NETDEV_TX_OK; } @@ -667,25 +690,29 @@ EXPORT_SYMBOL(ocelot_get_hwtimestamp); static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr) { - struct ocelot_port *port = netdev_priv(dev); + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; - return ocelot_mact_forget(port->ocelot, addr, port->pvid); + return ocelot_mact_forget(ocelot, addr, ocelot_port->pvid); } static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr) { - struct ocelot_port *port = netdev_priv(dev); + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; - return ocelot_mact_learn(port->ocelot, PGID_CPU, addr, port->pvid, + return ocelot_mact_learn(ocelot, PGID_CPU, addr, ocelot_port->pvid, ENTRYTYPE_LOCKED); } static void ocelot_set_rx_mode(struct net_device *dev) { - struct ocelot_port *port = netdev_priv(dev); - struct ocelot *ocelot = port->ocelot; - int i; + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; u32 val; + int i; /* This doesn't handle promiscuous mode because the bridge core is * setting IFF_PROMISC on all slave interfaces and all frames would be @@ -701,10 +728,11 @@ static void ocelot_set_rx_mode(struct net_device *dev) static int ocelot_port_get_phys_port_name(struct net_device *dev, char *buf, size_t len) { - struct ocelot_port *port = netdev_priv(dev); + struct ocelot_port_private *priv = netdev_priv(dev); + int port = priv->chip_port; int ret; - ret = snprintf(buf, len, "p%d", port->chip_port); + ret = snprintf(buf, len, "p%d", port); if (ret >= len) return -EINVAL; @@ -713,15 +741,16 @@ static int ocelot_port_get_phys_port_name(struct net_device *dev, static int ocelot_port_set_mac_address(struct net_device *dev, void *p) { - struct ocelot_port *port = netdev_priv(dev); - struct ocelot *ocelot = port->ocelot; + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; const struct sockaddr *addr = p; /* Learn the new net device MAC address in the mac table. */ - ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, port->pvid, + ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, ocelot_port->pvid, ENTRYTYPE_LOCKED); /* Then forget the previous one. */ - ocelot_mact_forget(ocelot, dev->dev_addr, port->pvid); + ocelot_mact_forget(ocelot, dev->dev_addr, ocelot_port->pvid); ether_addr_copy(dev->dev_addr, addr->sa_data); return 0; @@ -730,11 +759,12 @@ static int ocelot_port_set_mac_address(struct net_device *dev, void *p) static void ocelot_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { - struct ocelot_port *port = netdev_priv(dev); - struct ocelot *ocelot = port->ocelot; + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; /* Configure the port to read the stats from */ - ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port->chip_port), + ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), SYS_STAT_CFG); /* Get Rx stats */ @@ -765,21 +795,18 @@ static void ocelot_get_stats64(struct net_device *dev, stats->collisions = ocelot_read(ocelot, SYS_COUNT_TX_COLLISION); } -static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, const unsigned char *addr, - u16 vid, u16 flags, - struct netlink_ext_ack *extack) +int ocelot_fdb_add(struct ocelot *ocelot, int port, + const unsigned char *addr, u16 vid, bool vlan_aware) { - struct ocelot_port *port = netdev_priv(dev); - struct ocelot *ocelot = port->ocelot; + struct ocelot_port *ocelot_port = ocelot->ports[port]; if (!vid) { - if (!port->vlan_aware) + if (!vlan_aware) /* If the bridge is not VLAN aware and no VID was * provided, set it to pvid to ensure the MAC entry * matches incoming untagged packets */ - vid = port->pvid; + vid = ocelot_port->pvid; else /* If the bridge is VLAN aware a VID must be provided as * otherwise the learnt entry wouldn't match any frame. @@ -787,19 +814,40 @@ static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return -EINVAL; } - return ocelot_mact_learn(ocelot, port->chip_port, addr, vid, - ENTRYTYPE_LOCKED); + return ocelot_mact_learn(ocelot, port, addr, vid, ENTRYTYPE_LOCKED); } +EXPORT_SYMBOL(ocelot_fdb_add); -static int ocelot_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr, u16 vid) +static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 vid, u16 flags, + struct netlink_ext_ack *extack) { - struct ocelot_port *port = netdev_priv(dev); - struct ocelot *ocelot = port->ocelot; + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + return ocelot_fdb_add(ocelot, port, addr, vid, priv->vlan_aware); +} +int ocelot_fdb_del(struct ocelot *ocelot, int port, + const unsigned char *addr, u16 vid) +{ return ocelot_mact_forget(ocelot, addr, vid); } +EXPORT_SYMBOL(ocelot_fdb_del); + +static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + return ocelot_fdb_del(ocelot, port, addr, vid); +} struct ocelot_dump_ctx { struct net_device *dev; @@ -808,9 +856,10 @@ struct ocelot_dump_ctx { int idx; }; -static int ocelot_fdb_do_dump(struct ocelot_mact_entry *entry, - struct ocelot_dump_ctx *dump) +static int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid, + bool is_static, void *data) { + struct ocelot_dump_ctx *dump = data; u32 portid = NETLINK_CB(dump->cb->skb).portid; u32 seq = dump->cb->nlh->nlmsg_seq; struct nlmsghdr *nlh; @@ -831,12 +880,12 @@ static int ocelot_fdb_do_dump(struct ocelot_mact_entry *entry, ndm->ndm_flags = NTF_SELF; ndm->ndm_type = 0; ndm->ndm_ifindex = dump->dev->ifindex; - ndm->ndm_state = NUD_REACHABLE; + ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; - if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac)) + if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr)) goto nla_put_failure; - if (entry->vid && nla_put_u16(dump->skb, NDA_VLAN, entry->vid)) + if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid)) goto nla_put_failure; nlmsg_end(dump->skb, nlh); @@ -850,12 +899,11 @@ nla_put_failure: return -EMSGSIZE; } -static inline int ocelot_mact_read(struct ocelot_port *port, int row, int col, - struct ocelot_mact_entry *entry) +static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col, + struct ocelot_mact_entry *entry) { - struct ocelot *ocelot = port->ocelot; - char mac[ETH_ALEN]; u32 val, dst, macl, mach; + char mac[ETH_ALEN]; /* Set row and column to read from */ ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_M_INDEX, row); @@ -878,7 +926,7 @@ static inline int ocelot_mact_read(struct ocelot_port *port, int row, int col, * do not report it. */ dst = (val & ANA_TABLES_MACACCESS_DEST_IDX_M) >> 3; - if (dst != port->chip_port) + if (dst != port) return -EINVAL; /* Get the entry's MAC address and VLAN id */ @@ -898,43 +946,61 @@ static inline int ocelot_mact_read(struct ocelot_port *port, int row, int col, return 0; } -static int ocelot_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, - struct net_device *dev, - struct net_device *filter_dev, int *idx) +int ocelot_fdb_dump(struct ocelot *ocelot, int port, + dsa_fdb_dump_cb_t *cb, void *data) { - struct ocelot_port *port = netdev_priv(dev); - int i, j, ret = 0; - struct ocelot_dump_ctx dump = { - .dev = dev, - .skb = skb, - .cb = cb, - .idx = *idx, - }; - - struct ocelot_mact_entry entry; + int i, j; /* Loop through all the mac tables entries. There are 1024 rows of 4 * entries. */ for (i = 0; i < 1024; i++) { for (j = 0; j < 4; j++) { - ret = ocelot_mact_read(port, i, j, &entry); + struct ocelot_mact_entry entry; + bool is_static; + int ret; + + ret = ocelot_mact_read(ocelot, port, i, j, &entry); /* If the entry is invalid (wrong port, invalid...), * skip it. */ if (ret == -EINVAL) continue; else if (ret) - goto end; + return ret; - ret = ocelot_fdb_do_dump(&entry, &dump); + is_static = (entry.type == ENTRYTYPE_LOCKED); + + ret = cb(entry.mac, entry.vid, is_static, data); if (ret) - goto end; + return ret; } } -end: + return 0; +} +EXPORT_SYMBOL(ocelot_fdb_dump); + +static int ocelot_port_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + struct net_device *filter_dev, int *idx) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + struct ocelot_dump_ctx dump = { + .dev = dev, + .skb = skb, + .cb = cb, + .idx = *idx, + }; + int port = priv->chip_port; + int ret; + + ret = ocelot_fdb_dump(ocelot, port, ocelot_port_fdb_do_dump, &dump); + *idx = dump.idx; + return ret; } @@ -953,18 +1019,20 @@ static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, static int ocelot_set_features(struct net_device *dev, netdev_features_t features) { - struct ocelot_port *port = netdev_priv(dev); netdev_features_t changed = dev->features ^ features; + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && - port->tc.offload_cnt) { + priv->tc.offload_cnt) { netdev_err(dev, "Cannot disable HW TC offload while offloads active\n"); return -EBUSY; } if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) - ocelot_vlan_mode(port, features); + ocelot_vlan_mode(ocelot, port, features); return 0; } @@ -972,8 +1040,8 @@ static int ocelot_set_features(struct net_device *dev, static int ocelot_get_port_parent_id(struct net_device *dev, struct netdev_phys_item_id *ppid) { - struct ocelot_port *ocelot_port = netdev_priv(dev); - struct ocelot *ocelot = ocelot_port->ocelot; + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; ppid->id_len = sizeof(ocelot->base_mac); memcpy(&ppid->id, &ocelot->base_mac, ppid->id_len); @@ -981,17 +1049,17 @@ static int ocelot_get_port_parent_id(struct net_device *dev, return 0; } -static int ocelot_hwstamp_get(struct ocelot_port *port, struct ifreq *ifr) +static int ocelot_hwstamp_get(struct ocelot *ocelot, int port, + struct ifreq *ifr) { - struct ocelot *ocelot = port->ocelot; - return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config, sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0; } -static int ocelot_hwstamp_set(struct ocelot_port *port, struct ifreq *ifr) +static int ocelot_hwstamp_set(struct ocelot *ocelot, int port, + struct ifreq *ifr) { - struct ocelot *ocelot = port->ocelot; + struct ocelot_port *ocelot_port = ocelot->ports[port]; struct hwtstamp_config cfg; if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) @@ -1004,16 +1072,16 @@ static int ocelot_hwstamp_set(struct ocelot_port *port, struct ifreq *ifr) /* Tx type sanity check */ switch (cfg.tx_type) { case HWTSTAMP_TX_ON: - port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; + ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; break; case HWTSTAMP_TX_ONESTEP_SYNC: /* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we * need to update the origin time. */ - port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP; + ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP; break; case HWTSTAMP_TX_OFF: - port->ptp_cmd = 0; + ocelot_port->ptp_cmd = 0; break; default: return -ERANGE; @@ -1055,8 +1123,9 @@ static int ocelot_hwstamp_set(struct ocelot_port *port, struct ifreq *ifr) static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - struct ocelot_port *port = netdev_priv(dev); - struct ocelot *ocelot = port->ocelot; + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; /* The function is only used for PTP operations for now */ if (!ocelot->ptp) @@ -1064,9 +1133,9 @@ static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCSHWTSTAMP: - return ocelot_hwstamp_set(port, ifr); + return ocelot_hwstamp_set(ocelot, port, ifr); case SIOCGHWTSTAMP: - return ocelot_hwstamp_get(port, ifr); + return ocelot_hwstamp_get(ocelot, port, ifr); default: return -EOPNOTSUPP; } @@ -1080,9 +1149,9 @@ static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_get_phys_port_name = ocelot_port_get_phys_port_name, .ndo_set_mac_address = ocelot_port_set_mac_address, .ndo_get_stats64 = ocelot_get_stats64, - .ndo_fdb_add = ocelot_fdb_add, - .ndo_fdb_del = ocelot_fdb_del, - .ndo_fdb_dump = ocelot_fdb_dump, + .ndo_fdb_add = ocelot_port_fdb_add, + .ndo_fdb_del = ocelot_port_fdb_del, + .ndo_fdb_dump = ocelot_port_fdb_dump, .ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid, .ndo_set_features = ocelot_set_features, @@ -1091,10 +1160,8 @@ static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_do_ioctl = ocelot_ioctl, }; -static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data) +void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data) { - struct ocelot_port *port = netdev_priv(netdev); - struct ocelot *ocelot = port->ocelot; int i; if (sset != ETH_SS_STATS) @@ -1104,6 +1171,17 @@ static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data) memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name, ETH_GSTRING_LEN); } +EXPORT_SYMBOL(ocelot_get_strings); + +static void ocelot_port_get_strings(struct net_device *netdev, u32 sset, + u8 *data) +{ + struct ocelot_port_private *priv = netdev_priv(netdev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + ocelot_get_strings(ocelot, port, sset, data); +} static void ocelot_update_stats(struct ocelot *ocelot) { @@ -1145,11 +1223,8 @@ static void ocelot_check_stats_work(struct work_struct *work) OCELOT_STATS_CHECK_DELAY); } -static void ocelot_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) +void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data) { - struct ocelot_port *port = netdev_priv(dev); - struct ocelot *ocelot = port->ocelot; int i; /* check and update now */ @@ -1157,28 +1232,42 @@ static void ocelot_get_ethtool_stats(struct net_device *dev, /* Copy all counters */ for (i = 0; i < ocelot->num_stats; i++) - *data++ = ocelot->stats[port->chip_port * ocelot->num_stats + i]; + *data++ = ocelot->stats[port * ocelot->num_stats + i]; } +EXPORT_SYMBOL(ocelot_get_ethtool_stats); -static int ocelot_get_sset_count(struct net_device *dev, int sset) +static void ocelot_port_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) { - struct ocelot_port *port = netdev_priv(dev); - struct ocelot *ocelot = port->ocelot; + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + ocelot_get_ethtool_stats(ocelot, port, data); +} + +int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset) +{ if (sset != ETH_SS_STATS) return -EOPNOTSUPP; + return ocelot->num_stats; } +EXPORT_SYMBOL(ocelot_get_sset_count); -static int ocelot_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) +static int ocelot_port_get_sset_count(struct net_device *dev, int sset) { - struct ocelot_port *ocelot_port = netdev_priv(dev); - struct ocelot *ocelot = ocelot_port->ocelot; + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; - if (!ocelot->ptp) - return ethtool_op_get_ts_info(dev, info); + return ocelot_get_sset_count(ocelot, port, sset); +} +int ocelot_get_ts_info(struct ocelot *ocelot, int port, + struct ethtool_ts_info *info) +{ info->phc_index = ocelot->ptp_clock ? ptp_clock_index(ocelot->ptp_clock) : -1; info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | @@ -1193,36 +1282,43 @@ static int ocelot_get_ts_info(struct net_device *dev, return 0; } +EXPORT_SYMBOL(ocelot_get_ts_info); + +static int ocelot_port_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + if (!ocelot->ptp) + return ethtool_op_get_ts_info(dev, info); + + return ocelot_get_ts_info(ocelot, port, info); +} static const struct ethtool_ops ocelot_ethtool_ops = { - .get_strings = ocelot_get_strings, - .get_ethtool_stats = ocelot_get_ethtool_stats, - .get_sset_count = ocelot_get_sset_count, + .get_strings = ocelot_port_get_strings, + .get_ethtool_stats = ocelot_port_get_ethtool_stats, + .get_sset_count = ocelot_port_get_sset_count, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, - .get_ts_info = ocelot_get_ts_info, + .get_ts_info = ocelot_port_get_ts_info, }; -static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port, - struct switchdev_trans *trans, - u8 state) +void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state) { - struct ocelot *ocelot = ocelot_port->ocelot; u32 port_cfg; - int port, i; + int p, i; - if (switchdev_trans_ph_prepare(trans)) - return 0; - - if (!(BIT(ocelot_port->chip_port) & ocelot->bridge_mask)) - return 0; + if (!(BIT(port) & ocelot->bridge_mask)) + return; - port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, - ocelot_port->chip_port); + port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, port); switch (state) { case BR_STATE_FORWARDING: - ocelot->bridge_fwd_mask |= BIT(ocelot_port->chip_port); + ocelot->bridge_fwd_mask |= BIT(port); /* Fallthrough */ case BR_STATE_LEARNING: port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA; @@ -1230,19 +1326,18 @@ static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port, default: port_cfg &= ~ANA_PORT_PORT_CFG_LEARN_ENA; - ocelot->bridge_fwd_mask &= ~BIT(ocelot_port->chip_port); + ocelot->bridge_fwd_mask &= ~BIT(port); break; } - ocelot_write_gix(ocelot, port_cfg, ANA_PORT_PORT_CFG, - ocelot_port->chip_port); + ocelot_write_gix(ocelot, port_cfg, ANA_PORT_PORT_CFG, port); /* Apply FWD mask. The loop is needed to add/remove the current port as * a source for the other ports. */ - for (port = 0; port < ocelot->num_phys_ports; port++) { - if (ocelot->bridge_fwd_mask & BIT(port)) { - unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(port); + for (p = 0; p < ocelot->num_phys_ports; p++) { + if (p == ocelot->cpu || (ocelot->bridge_fwd_mask & BIT(p))) { + unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(p); for (i = 0; i < ocelot->num_phys_ports; i++) { unsigned long bond_mask = ocelot->lags[i]; @@ -1250,78 +1345,93 @@ static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port, if (!bond_mask) continue; - if (bond_mask & BIT(port)) { + if (bond_mask & BIT(p)) { mask &= ~bond_mask; break; } } - ocelot_write_rix(ocelot, - BIT(ocelot->num_phys_ports) | mask, - ANA_PGID_PGID, PGID_SRC + port); + /* Avoid the NPI port from looping back to itself */ + if (p != ocelot->cpu) + mask |= BIT(ocelot->cpu); + + ocelot_write_rix(ocelot, mask, + ANA_PGID_PGID, PGID_SRC + p); } else { /* Only the CPU port, this is compatible with link * aggregation. */ ocelot_write_rix(ocelot, - BIT(ocelot->num_phys_ports), - ANA_PGID_PGID, PGID_SRC + port); + BIT(ocelot->cpu), + ANA_PGID_PGID, PGID_SRC + p); } } +} +EXPORT_SYMBOL(ocelot_bridge_stp_state_set); - return 0; +static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port, + struct switchdev_trans *trans, + u8 state) +{ + if (switchdev_trans_ph_prepare(trans)) + return; + + ocelot_bridge_stp_state_set(ocelot, port, state); } -static void ocelot_port_attr_ageing_set(struct ocelot_port *ocelot_port, +void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs) +{ + ocelot_write(ocelot, ANA_AUTOAGE_AGE_PERIOD(msecs / 2), + ANA_AUTOAGE); +} +EXPORT_SYMBOL(ocelot_set_ageing_time); + +static void ocelot_port_attr_ageing_set(struct ocelot *ocelot, int port, unsigned long ageing_clock_t) { - struct ocelot *ocelot = ocelot_port->ocelot; unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; - ocelot_write(ocelot, ANA_AUTOAGE_AGE_PERIOD(ageing_time / 2), - ANA_AUTOAGE); + ocelot_set_ageing_time(ocelot, ageing_time); } -static void ocelot_port_attr_mc_set(struct ocelot_port *port, bool mc) +static void ocelot_port_attr_mc_set(struct ocelot *ocelot, int port, bool mc) { - struct ocelot *ocelot = port->ocelot; - u32 val = ocelot_read_gix(ocelot, ANA_PORT_CPU_FWD_CFG, - port->chip_port); + u32 cpu_fwd_mcast = ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA | + ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA | + ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA; + u32 val = 0; if (mc) - val |= ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA | - ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA | - ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA; - else - val &= ~(ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA | - ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA | - ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA); + val = cpu_fwd_mcast; - ocelot_write_gix(ocelot, val, ANA_PORT_CPU_FWD_CFG, port->chip_port); + ocelot_rmw_gix(ocelot, val, cpu_fwd_mcast, + ANA_PORT_CPU_FWD_CFG, port); } static int ocelot_port_attr_set(struct net_device *dev, const struct switchdev_attr *attr, struct switchdev_trans *trans) { - struct ocelot_port *ocelot_port = netdev_priv(dev); + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; int err = 0; switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_STP_STATE: - ocelot_port_attr_stp_state_set(ocelot_port, trans, + ocelot_port_attr_stp_state_set(ocelot, port, trans, attr->u.stp_state); break; case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: - ocelot_port_attr_ageing_set(ocelot_port, attr->u.ageing_time); + ocelot_port_attr_ageing_set(ocelot, port, attr->u.ageing_time); break; case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: - ocelot_port->vlan_aware = attr->u.vlan_filtering; - ocelot_vlan_port_apply(ocelot_port->ocelot, ocelot_port); + priv->vlan_aware = attr->u.vlan_filtering; + ocelot_port_vlan_filtering(ocelot, port, priv->vlan_aware); break; case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: - ocelot_port_attr_mc_set(ocelot_port, !attr->u.mc_disabled); + ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled); break; default: err = -EOPNOTSUPP; @@ -1383,15 +1493,17 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev, const struct switchdev_obj_port_mdb *mdb, struct switchdev_trans *trans) { - struct ocelot_port *port = netdev_priv(dev); - struct ocelot *ocelot = port->ocelot; - struct ocelot_multicast *mc; + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; unsigned char addr[ETH_ALEN]; + struct ocelot_multicast *mc; + int port = priv->chip_port; u16 vid = mdb->vid; bool new = false; if (!vid) - vid = port->pvid; + vid = ocelot_port->pvid; mc = ocelot_multicast_get(ocelot, mdb->addr, vid); if (!mc) { @@ -1415,7 +1527,7 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev, ocelot_mact_forget(ocelot, addr, vid); } - mc->ports |= BIT(port->chip_port); + mc->ports |= BIT(port); addr[2] = mc->ports << 0; addr[1] = mc->ports << 8; @@ -1425,14 +1537,16 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev, static int ocelot_port_obj_del_mdb(struct net_device *dev, const struct switchdev_obj_port_mdb *mdb) { - struct ocelot_port *port = netdev_priv(dev); - struct ocelot *ocelot = port->ocelot; - struct ocelot_multicast *mc; + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; unsigned char addr[ETH_ALEN]; + struct ocelot_multicast *mc; + int port = priv->chip_port; u16 vid = mdb->vid; if (!vid) - vid = port->pvid; + vid = ocelot_port->pvid; mc = ocelot_multicast_get(ocelot, mdb->addr, vid); if (!mc) @@ -1444,7 +1558,7 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev, addr[0] = 0; ocelot_mact_forget(ocelot, addr, vid); - mc->ports &= ~BIT(port->chip_port); + mc->ports &= ~BIT(port); if (!mc->ports) { list_del(&mc->list); devm_kfree(ocelot->dev, mc); @@ -1501,11 +1615,9 @@ static int ocelot_port_obj_del(struct net_device *dev, return ret; } -static int ocelot_port_bridge_join(struct ocelot_port *ocelot_port, - struct net_device *bridge) +int ocelot_port_bridge_join(struct ocelot *ocelot, int port, + struct net_device *bridge) { - struct ocelot *ocelot = ocelot_port->ocelot; - if (!ocelot->bridge_mask) { ocelot->hw_bridge_dev = bridge; } else { @@ -1515,26 +1627,25 @@ static int ocelot_port_bridge_join(struct ocelot_port *ocelot_port, return -ENODEV; } - ocelot->bridge_mask |= BIT(ocelot_port->chip_port); + ocelot->bridge_mask |= BIT(port); return 0; } +EXPORT_SYMBOL(ocelot_port_bridge_join); -static void ocelot_port_bridge_leave(struct ocelot_port *ocelot_port, - struct net_device *bridge) +int ocelot_port_bridge_leave(struct ocelot *ocelot, int port, + struct net_device *bridge) { - struct ocelot *ocelot = ocelot_port->ocelot; - - ocelot->bridge_mask &= ~BIT(ocelot_port->chip_port); + ocelot->bridge_mask &= ~BIT(port); if (!ocelot->bridge_mask) ocelot->hw_bridge_dev = NULL; - /* Clear bridge vlan settings before calling ocelot_vlan_port_apply */ - ocelot_port->vlan_aware = 0; - ocelot_port->pvid = 0; - ocelot_port->vid = 0; + ocelot_port_vlan_filtering(ocelot, port, 0); + ocelot_port_set_pvid(ocelot, port, 0); + return ocelot_port_set_native_vlan(ocelot, port, 0); } +EXPORT_SYMBOL(ocelot_port_bridge_leave); static void ocelot_set_aggr_pgids(struct ocelot *ocelot) { @@ -1594,20 +1705,18 @@ static void ocelot_setup_lag(struct ocelot *ocelot, int lag) } } -static int ocelot_port_lag_join(struct ocelot_port *ocelot_port, +static int ocelot_port_lag_join(struct ocelot *ocelot, int port, struct net_device *bond) { - struct ocelot *ocelot = ocelot_port->ocelot; - int p = ocelot_port->chip_port; - int lag, lp; struct net_device *ndev; u32 bond_mask = 0; + int lag, lp; rcu_read_lock(); for_each_netdev_in_bond_rcu(bond, ndev) { - struct ocelot_port *port = netdev_priv(ndev); + struct ocelot_port_private *priv = netdev_priv(ndev); - bond_mask |= BIT(port->chip_port); + bond_mask |= BIT(priv->chip_port); } rcu_read_unlock(); @@ -1616,17 +1725,17 @@ static int ocelot_port_lag_join(struct ocelot_port *ocelot_port, /* If the new port is the lowest one, use it as the logical port from * now on */ - if (p == lp) { - lag = p; - ocelot->lags[p] = bond_mask; - bond_mask &= ~BIT(p); + if (port == lp) { + lag = port; + ocelot->lags[port] = bond_mask; + bond_mask &= ~BIT(port); if (bond_mask) { lp = __ffs(bond_mask); ocelot->lags[lp] = 0; } } else { lag = lp; - ocelot->lags[lp] |= BIT(p); + ocelot->lags[lp] |= BIT(port); } ocelot_setup_lag(ocelot, lag); @@ -1635,34 +1744,32 @@ static int ocelot_port_lag_join(struct ocelot_port *ocelot_port, return 0; } -static void ocelot_port_lag_leave(struct ocelot_port *ocelot_port, +static void ocelot_port_lag_leave(struct ocelot *ocelot, int port, struct net_device *bond) { - struct ocelot *ocelot = ocelot_port->ocelot; - int p = ocelot_port->chip_port; u32 port_cfg; int i; /* Remove port from any lag */ for (i = 0; i < ocelot->num_phys_ports; i++) - ocelot->lags[i] &= ~BIT(ocelot_port->chip_port); + ocelot->lags[i] &= ~BIT(port); /* if it was the logical port of the lag, move the lag config to the * next port */ - if (ocelot->lags[p]) { - int n = __ffs(ocelot->lags[p]); + if (ocelot->lags[port]) { + int n = __ffs(ocelot->lags[port]); - ocelot->lags[n] = ocelot->lags[p]; - ocelot->lags[p] = 0; + ocelot->lags[n] = ocelot->lags[port]; + ocelot->lags[port] = 0; ocelot_setup_lag(ocelot, n); } - port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p); + port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, port); port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M; - ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(p), - ANA_PORT_PORT_CFG, p); + ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(port), + ANA_PORT_PORT_CFG, port); ocelot_set_aggr_pgids(ocelot); } @@ -1677,31 +1784,30 @@ static int ocelot_netdevice_port_event(struct net_device *dev, unsigned long event, struct netdev_notifier_changeupper_info *info) { - struct ocelot_port *ocelot_port = netdev_priv(dev); + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; int err = 0; - if (!ocelot_netdevice_dev_check(dev)) - return 0; - switch (event) { case NETDEV_CHANGEUPPER: if (netif_is_bridge_master(info->upper_dev)) { - if (info->linking) - err = ocelot_port_bridge_join(ocelot_port, + if (info->linking) { + err = ocelot_port_bridge_join(ocelot, port, info->upper_dev); - else - ocelot_port_bridge_leave(ocelot_port, - info->upper_dev); - - ocelot_vlan_port_apply(ocelot_port->ocelot, - ocelot_port); + } else { + err = ocelot_port_bridge_leave(ocelot, port, + info->upper_dev); + priv->vlan_aware = false; + } } if (netif_is_lag_master(info->upper_dev)) { if (info->linking) - err = ocelot_port_lag_join(ocelot_port, + err = ocelot_port_lag_join(ocelot, port, info->upper_dev); else - ocelot_port_lag_leave(ocelot_port, + ocelot_port_lag_leave(ocelot, port, info->upper_dev); } break; @@ -1719,12 +1825,16 @@ static int ocelot_netdevice_event(struct notifier_block *unused, struct net_device *dev = netdev_notifier_info_to_dev(ptr); int ret = 0; + if (!ocelot_netdevice_dev_check(dev)) + return 0; + if (event == NETDEV_PRECHANGEUPPER && netif_is_lag_master(info->upper_dev)) { struct netdev_lag_upper_info *lag_upper_info = info->upper_info; struct netlink_ext_ack *extack; - if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { + if (lag_upper_info && + lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { extack = netdev_notifier_info_to_extack(&info->info); NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); @@ -2000,24 +2110,97 @@ static int ocelot_init_timestamp(struct ocelot *ocelot) return 0; } +static void ocelot_port_set_mtu(struct ocelot *ocelot, int port, size_t mtu) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + int atop_wm; + + ocelot_port_writel(ocelot_port, mtu, DEV_MAC_MAXLEN_CFG); + + /* Set Pause WM hysteresis + * 152 = 6 * mtu / OCELOT_BUFFER_CELL_SZ + * 101 = 4 * mtu / OCELOT_BUFFER_CELL_SZ + */ + ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA | + SYS_PAUSE_CFG_PAUSE_STOP(101) | + SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, port); + + /* Tail dropping watermark */ + atop_wm = (ocelot->shared_queue_sz - 9 * mtu) / OCELOT_BUFFER_CELL_SZ; + ocelot_write_rix(ocelot, ocelot_wm_enc(9 * mtu), + SYS_ATOP, port); + ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG); +} + +void ocelot_init_port(struct ocelot *ocelot, int port) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + INIT_LIST_HEAD(&ocelot_port->skbs); + + /* Basic L2 initialization */ + + /* Set MAC IFG Gaps + * FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 0 + * !FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 5 + */ + ocelot_port_writel(ocelot_port, DEV_MAC_IFG_CFG_TX_IFG(5), + DEV_MAC_IFG_CFG); + + /* Load seed (0) and set MAC HDX late collision */ + ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67) | + DEV_MAC_HDX_CFG_SEED_LOAD, + DEV_MAC_HDX_CFG); + mdelay(1); + ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67), + DEV_MAC_HDX_CFG); + + /* Set Max Length and maximum tags allowed */ + ocelot_port_set_mtu(ocelot, port, VLAN_ETH_FRAME_LEN); + ocelot_port_writel(ocelot_port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) | + DEV_MAC_TAGS_CFG_VLAN_AWR_ENA | + DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, + DEV_MAC_TAGS_CFG); + + /* Set SMAC of Pause frame (00:00:00:00:00:00) */ + ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_HIGH_CFG); + ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_LOW_CFG); + + /* Drop frames with multicast source address */ + ocelot_rmw_gix(ocelot, ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA, + ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA, + ANA_PORT_DROP_CFG, port); + + /* Set default VLAN and tag type to 8021Q. */ + ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q), + REW_PORT_VLAN_CFG_PORT_TPID_M, + REW_PORT_VLAN_CFG, port); + + /* Enable vcap lookups */ + ocelot_vcap_enable(ocelot, port); +} +EXPORT_SYMBOL(ocelot_init_port); + int ocelot_probe_port(struct ocelot *ocelot, u8 port, void __iomem *regs, struct phy_device *phy) { + struct ocelot_port_private *priv; struct ocelot_port *ocelot_port; struct net_device *dev; int err; - dev = alloc_etherdev(sizeof(struct ocelot_port)); + dev = alloc_etherdev(sizeof(struct ocelot_port_private)); if (!dev) return -ENOMEM; SET_NETDEV_DEV(dev, ocelot->dev); - ocelot_port = netdev_priv(dev); - ocelot_port->dev = dev; + priv = netdev_priv(dev); + priv->dev = dev; + priv->phy = phy; + priv->chip_port = port; + ocelot_port = &priv->port; ocelot_port->ocelot = ocelot; ocelot_port->regs = regs; - ocelot_port->chip_port = port; - ocelot_port->phy = phy; ocelot->ports[port] = ocelot_port; dev->netdev_ops = &ocelot_port_netdev_ops; @@ -2032,33 +2215,81 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port, ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid, ENTRYTYPE_LOCKED); - INIT_LIST_HEAD(&ocelot_port->skbs); + ocelot_init_port(ocelot, port); err = register_netdev(dev); if (err) { dev_err(ocelot->dev, "register_netdev failed\n"); - goto err_register_netdev; + free_netdev(dev); } - /* Basic L2 initialization */ - ocelot_vlan_port_apply(ocelot, ocelot_port); + return err; +} +EXPORT_SYMBOL(ocelot_probe_port); - /* Enable vcap lookups */ - ocelot_vcap_enable(ocelot, ocelot_port); +void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu, + enum ocelot_tag_prefix injection, + enum ocelot_tag_prefix extraction) +{ + /* Configure and enable the CPU port. */ + ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu); + ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU); + ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA | + ANA_PORT_PORT_CFG_PORTID_VAL(cpu), + ANA_PORT_PORT_CFG, cpu); - return 0; + /* If the CPU port is a physical port, set up the port in Node + * Processor Interface (NPI) mode. This is the mode through which + * frames can be injected from and extracted to an external CPU. + * Only one port can be an NPI at the same time. + */ + if (cpu < ocelot->num_phys_ports) { + int mtu = VLAN_ETH_FRAME_LEN + OCELOT_TAG_LEN; -err_register_netdev: - free_netdev(dev); - return err; + ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M | + QSYS_EXT_CPU_CFG_EXT_CPU_PORT(cpu), + QSYS_EXT_CPU_CFG); + + if (injection == OCELOT_TAG_PREFIX_SHORT) + mtu += OCELOT_SHORT_PREFIX_LEN; + else if (injection == OCELOT_TAG_PREFIX_LONG) + mtu += OCELOT_LONG_PREFIX_LEN; + + ocelot_port_set_mtu(ocelot, cpu, mtu); + } + + /* CPU port Injection/Extraction configuration */ + ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE | + QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) | + QSYS_SWITCH_PORT_MODE_PORT_ENA, + QSYS_SWITCH_PORT_MODE, cpu); + ocelot_write_rix(ocelot, SYS_PORT_MODE_INCL_XTR_HDR(extraction) | + SYS_PORT_MODE_INCL_INJ_HDR(injection), + SYS_PORT_MODE, cpu); + + /* Configure the CPU port to be VLAN aware */ + ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) | + ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | + ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1), + ANA_PORT_VLAN_CFG, cpu); + + ocelot->cpu = cpu; } -EXPORT_SYMBOL(ocelot_probe_port); +EXPORT_SYMBOL(ocelot_set_cpu_port); int ocelot_init(struct ocelot *ocelot) { - u32 port; - int i, ret, cpu = ocelot->num_phys_ports; char queue_name[32]; + int i, ret; + u32 port; + + if (ocelot->ops->reset) { + ret = ocelot->ops->reset(ocelot); + if (ret) { + dev_err(ocelot->dev, "Switch reset failed\n"); + return ret; + } + } ocelot->lags = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports, sizeof(u32), GFP_KERNEL); @@ -2080,6 +2311,7 @@ int ocelot_init(struct ocelot *ocelot) if (!ocelot->stats_queue) return -ENOMEM; + INIT_LIST_HEAD(&ocelot->multicast); ocelot_mact_init(ocelot); ocelot_vlan_init(ocelot); ocelot_ace_init(ocelot); @@ -2137,13 +2369,6 @@ int ocelot_init(struct ocelot *ocelot) ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port); } - /* Configure and enable the CPU port. */ - ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu); - ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU); - ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA | - ANA_PORT_PORT_CFG_PORTID_VAL(cpu), - ANA_PORT_PORT_CFG, cpu); - /* Allow broadcast MAC frames. */ for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++) { u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0)); @@ -2156,13 +2381,6 @@ int ocelot_init(struct ocelot *ocelot) ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4); ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6); - /* CPU port Injection/Extraction configuration */ - ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE | - QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) | - QSYS_SWITCH_PORT_MODE_PORT_ENA, - QSYS_SWITCH_PORT_MODE, cpu); - ocelot_write_rix(ocelot, SYS_PORT_MODE_INCL_XTR_HDR(1) | - SYS_PORT_MODE_INCL_INJ_HDR(1), SYS_PORT_MODE, cpu); /* Allow manual injection via DEVCPU_QS registers, and byte swap these * registers endianness. */ diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index e40773c01a44..32fef4f495aa 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -18,11 +18,12 @@ #include <linux/ptp_clock_kernel.h> #include <linux/regmap.h> +#include <soc/mscc/ocelot_sys.h> +#include <soc/mscc/ocelot.h> #include "ocelot_ana.h" #include "ocelot_dev.h" #include "ocelot_qsys.h" #include "ocelot_rew.h" -#include "ocelot_sys.h" #include "ocelot_qs.h" #include "ocelot_tc.h" #include "ocelot_ptp.h" @@ -43,8 +44,6 @@ #define OCELOT_PTP_QUEUE_SZ 128 -#define IFH_LEN 4 - struct frame_info { u32 len; u16 port; @@ -54,372 +53,6 @@ struct frame_info { u32 timestamp; /* rew_val */ }; -#define IFH_INJ_BYPASS BIT(31) -#define IFH_INJ_POP_CNT_DISABLE (3 << 28) - -#define IFH_TAG_TYPE_C 0 -#define IFH_TAG_TYPE_S 1 - -#define IFH_REW_OP_NOOP 0x0 -#define IFH_REW_OP_DSCP 0x1 -#define IFH_REW_OP_ONE_STEP_PTP 0x2 -#define IFH_REW_OP_TWO_STEP_PTP 0x3 -#define IFH_REW_OP_ORIGIN_PTP 0x5 - -#define OCELOT_SPEED_2500 0 -#define OCELOT_SPEED_1000 1 -#define OCELOT_SPEED_100 2 -#define OCELOT_SPEED_10 3 - -#define TARGET_OFFSET 24 -#define REG_MASK GENMASK(TARGET_OFFSET - 1, 0) -#define REG(reg, offset) [reg & REG_MASK] = offset - -enum ocelot_target { - ANA = 1, - QS, - QSYS, - REW, - SYS, - S2, - HSIO, - PTP, - TARGET_MAX, -}; - -enum ocelot_reg { - ANA_ADVLEARN = ANA << TARGET_OFFSET, - ANA_VLANMASK, - ANA_PORT_B_DOMAIN, - ANA_ANAGEFIL, - ANA_ANEVENTS, - ANA_STORMLIMIT_BURST, - ANA_STORMLIMIT_CFG, - ANA_ISOLATED_PORTS, - ANA_COMMUNITY_PORTS, - ANA_AUTOAGE, - ANA_MACTOPTIONS, - ANA_LEARNDISC, - ANA_AGENCTRL, - ANA_MIRRORPORTS, - ANA_EMIRRORPORTS, - ANA_FLOODING, - ANA_FLOODING_IPMC, - ANA_SFLOW_CFG, - ANA_PORT_MODE, - ANA_CUT_THRU_CFG, - ANA_PGID_PGID, - ANA_TABLES_ANMOVED, - ANA_TABLES_MACHDATA, - ANA_TABLES_MACLDATA, - ANA_TABLES_STREAMDATA, - ANA_TABLES_MACACCESS, - ANA_TABLES_MACTINDX, - ANA_TABLES_VLANACCESS, - ANA_TABLES_VLANTIDX, - ANA_TABLES_ISDXACCESS, - ANA_TABLES_ISDXTIDX, - ANA_TABLES_ENTRYLIM, - ANA_TABLES_PTP_ID_HIGH, - ANA_TABLES_PTP_ID_LOW, - ANA_TABLES_STREAMACCESS, - ANA_TABLES_STREAMTIDX, - ANA_TABLES_SEQ_HISTORY, - ANA_TABLES_SEQ_MASK, - ANA_TABLES_SFID_MASK, - ANA_TABLES_SFIDACCESS, - ANA_TABLES_SFIDTIDX, - ANA_MSTI_STATE, - ANA_OAM_UPM_LM_CNT, - ANA_SG_ACCESS_CTRL, - ANA_SG_CONFIG_REG_1, - ANA_SG_CONFIG_REG_2, - ANA_SG_CONFIG_REG_3, - ANA_SG_CONFIG_REG_4, - ANA_SG_CONFIG_REG_5, - ANA_SG_GCL_GS_CONFIG, - ANA_SG_GCL_TI_CONFIG, - ANA_SG_STATUS_REG_1, - ANA_SG_STATUS_REG_2, - ANA_SG_STATUS_REG_3, - ANA_PORT_VLAN_CFG, - ANA_PORT_DROP_CFG, - ANA_PORT_QOS_CFG, - ANA_PORT_VCAP_CFG, - ANA_PORT_VCAP_S1_KEY_CFG, - ANA_PORT_VCAP_S2_CFG, - ANA_PORT_PCP_DEI_MAP, - ANA_PORT_CPU_FWD_CFG, - ANA_PORT_CPU_FWD_BPDU_CFG, - ANA_PORT_CPU_FWD_GARP_CFG, - ANA_PORT_CPU_FWD_CCM_CFG, - ANA_PORT_PORT_CFG, - ANA_PORT_POL_CFG, - ANA_PORT_PTP_CFG, - ANA_PORT_PTP_DLY1_CFG, - ANA_PORT_PTP_DLY2_CFG, - ANA_PORT_SFID_CFG, - ANA_PFC_PFC_CFG, - ANA_PFC_PFC_TIMER, - ANA_IPT_OAM_MEP_CFG, - ANA_IPT_IPT, - ANA_PPT_PPT, - ANA_FID_MAP_FID_MAP, - ANA_AGGR_CFG, - ANA_CPUQ_CFG, - ANA_CPUQ_CFG2, - ANA_CPUQ_8021_CFG, - ANA_DSCP_CFG, - ANA_DSCP_REWR_CFG, - ANA_VCAP_RNG_TYPE_CFG, - ANA_VCAP_RNG_VAL_CFG, - ANA_VRAP_CFG, - ANA_VRAP_HDR_DATA, - ANA_VRAP_HDR_MASK, - ANA_DISCARD_CFG, - ANA_FID_CFG, - ANA_POL_PIR_CFG, - ANA_POL_CIR_CFG, - ANA_POL_MODE_CFG, - ANA_POL_PIR_STATE, - ANA_POL_CIR_STATE, - ANA_POL_STATE, - ANA_POL_FLOWC, - ANA_POL_HYST, - ANA_POL_MISC_CFG, - QS_XTR_GRP_CFG = QS << TARGET_OFFSET, - QS_XTR_RD, - QS_XTR_FRM_PRUNING, - QS_XTR_FLUSH, - QS_XTR_DATA_PRESENT, - QS_XTR_CFG, - QS_INJ_GRP_CFG, - QS_INJ_WR, - QS_INJ_CTRL, - QS_INJ_STATUS, - QS_INJ_ERR, - QS_INH_DBG, - QSYS_PORT_MODE = QSYS << TARGET_OFFSET, - QSYS_SWITCH_PORT_MODE, - QSYS_STAT_CNT_CFG, - QSYS_EEE_CFG, - QSYS_EEE_THRES, - QSYS_IGR_NO_SHARING, - QSYS_EGR_NO_SHARING, - QSYS_SW_STATUS, - QSYS_EXT_CPU_CFG, - QSYS_PAD_CFG, - QSYS_CPU_GROUP_MAP, - QSYS_QMAP, - QSYS_ISDX_SGRP, - QSYS_TIMED_FRAME_ENTRY, - QSYS_TFRM_MISC, - QSYS_TFRM_PORT_DLY, - QSYS_TFRM_TIMER_CFG_1, - QSYS_TFRM_TIMER_CFG_2, - QSYS_TFRM_TIMER_CFG_3, - QSYS_TFRM_TIMER_CFG_4, - QSYS_TFRM_TIMER_CFG_5, - QSYS_TFRM_TIMER_CFG_6, - QSYS_TFRM_TIMER_CFG_7, - QSYS_TFRM_TIMER_CFG_8, - QSYS_RED_PROFILE, - QSYS_RES_QOS_MODE, - QSYS_RES_CFG, - QSYS_RES_STAT, - QSYS_EGR_DROP_MODE, - QSYS_EQ_CTRL, - QSYS_EVENTS_CORE, - QSYS_QMAXSDU_CFG_0, - QSYS_QMAXSDU_CFG_1, - QSYS_QMAXSDU_CFG_2, - QSYS_QMAXSDU_CFG_3, - QSYS_QMAXSDU_CFG_4, - QSYS_QMAXSDU_CFG_5, - QSYS_QMAXSDU_CFG_6, - QSYS_QMAXSDU_CFG_7, - QSYS_PREEMPTION_CFG, - QSYS_CIR_CFG, - QSYS_EIR_CFG, - QSYS_SE_CFG, - QSYS_SE_DWRR_CFG, - QSYS_SE_CONNECT, - QSYS_SE_DLB_SENSE, - QSYS_CIR_STATE, - QSYS_EIR_STATE, - QSYS_SE_STATE, - QSYS_HSCH_MISC_CFG, - QSYS_TAG_CONFIG, - QSYS_TAS_PARAM_CFG_CTRL, - QSYS_PORT_MAX_SDU, - QSYS_PARAM_CFG_REG_1, - QSYS_PARAM_CFG_REG_2, - QSYS_PARAM_CFG_REG_3, - QSYS_PARAM_CFG_REG_4, - QSYS_PARAM_CFG_REG_5, - QSYS_GCL_CFG_REG_1, - QSYS_GCL_CFG_REG_2, - QSYS_PARAM_STATUS_REG_1, - QSYS_PARAM_STATUS_REG_2, - QSYS_PARAM_STATUS_REG_3, - QSYS_PARAM_STATUS_REG_4, - QSYS_PARAM_STATUS_REG_5, - QSYS_PARAM_STATUS_REG_6, - QSYS_PARAM_STATUS_REG_7, - QSYS_PARAM_STATUS_REG_8, - QSYS_PARAM_STATUS_REG_9, - QSYS_GCL_STATUS_REG_1, - QSYS_GCL_STATUS_REG_2, - REW_PORT_VLAN_CFG = REW << TARGET_OFFSET, - REW_TAG_CFG, - REW_PORT_CFG, - REW_DSCP_CFG, - REW_PCP_DEI_QOS_MAP_CFG, - REW_PTP_CFG, - REW_PTP_DLY1_CFG, - REW_RED_TAG_CFG, - REW_DSCP_REMAP_DP1_CFG, - REW_DSCP_REMAP_CFG, - REW_STAT_CFG, - REW_REW_STICKY, - REW_PPT, - SYS_COUNT_RX_OCTETS = SYS << TARGET_OFFSET, - SYS_COUNT_RX_UNICAST, - SYS_COUNT_RX_MULTICAST, - SYS_COUNT_RX_BROADCAST, - SYS_COUNT_RX_SHORTS, - SYS_COUNT_RX_FRAGMENTS, - SYS_COUNT_RX_JABBERS, - SYS_COUNT_RX_CRC_ALIGN_ERRS, - SYS_COUNT_RX_SYM_ERRS, - SYS_COUNT_RX_64, - SYS_COUNT_RX_65_127, - SYS_COUNT_RX_128_255, - SYS_COUNT_RX_256_1023, - SYS_COUNT_RX_1024_1526, - SYS_COUNT_RX_1527_MAX, - SYS_COUNT_RX_PAUSE, - SYS_COUNT_RX_CONTROL, - SYS_COUNT_RX_LONGS, - SYS_COUNT_RX_CLASSIFIED_DROPS, - SYS_COUNT_TX_OCTETS, - SYS_COUNT_TX_UNICAST, - SYS_COUNT_TX_MULTICAST, - SYS_COUNT_TX_BROADCAST, - SYS_COUNT_TX_COLLISION, - SYS_COUNT_TX_DROPS, - SYS_COUNT_TX_PAUSE, - SYS_COUNT_TX_64, - SYS_COUNT_TX_65_127, - SYS_COUNT_TX_128_511, - SYS_COUNT_TX_512_1023, - SYS_COUNT_TX_1024_1526, - SYS_COUNT_TX_1527_MAX, - SYS_COUNT_TX_AGING, - SYS_RESET_CFG, - SYS_SR_ETYPE_CFG, - SYS_VLAN_ETYPE_CFG, - SYS_PORT_MODE, - SYS_FRONT_PORT_MODE, - SYS_FRM_AGING, - SYS_STAT_CFG, - SYS_SW_STATUS, - SYS_MISC_CFG, - SYS_REW_MAC_HIGH_CFG, - SYS_REW_MAC_LOW_CFG, - SYS_TIMESTAMP_OFFSET, - SYS_CMID, - SYS_PAUSE_CFG, - SYS_PAUSE_TOT_CFG, - SYS_ATOP, - SYS_ATOP_TOT_CFG, - SYS_MAC_FC_CFG, - SYS_MMGT, - SYS_MMGT_FAST, - SYS_EVENTS_DIF, - SYS_EVENTS_CORE, - SYS_CNT, - SYS_PTP_STATUS, - SYS_PTP_TXSTAMP, - SYS_PTP_NXT, - SYS_PTP_CFG, - SYS_RAM_INIT, - SYS_CM_ADDR, - SYS_CM_DATA_WR, - SYS_CM_DATA_RD, - SYS_CM_OP, - SYS_CM_DATA, - S2_CORE_UPDATE_CTRL = S2 << TARGET_OFFSET, - S2_CORE_MV_CFG, - S2_CACHE_ENTRY_DAT, - S2_CACHE_MASK_DAT, - S2_CACHE_ACTION_DAT, - S2_CACHE_CNT_DAT, - S2_CACHE_TG_DAT, - PTP_PIN_CFG = PTP << TARGET_OFFSET, - PTP_PIN_TOD_SEC_MSB, - PTP_PIN_TOD_SEC_LSB, - PTP_PIN_TOD_NSEC, - PTP_CFG_MISC, - PTP_CLK_CFG_ADJ_CFG, - PTP_CLK_CFG_ADJ_FREQ, -}; - -enum ocelot_regfield { - ANA_ADVLEARN_VLAN_CHK, - ANA_ADVLEARN_LEARN_MIRROR, - ANA_ANEVENTS_FLOOD_DISCARD, - ANA_ANEVENTS_MSTI_DROP, - ANA_ANEVENTS_ACLKILL, - ANA_ANEVENTS_ACLUSED, - ANA_ANEVENTS_AUTOAGE, - ANA_ANEVENTS_VS2TTL1, - ANA_ANEVENTS_STORM_DROP, - ANA_ANEVENTS_LEARN_DROP, - ANA_ANEVENTS_AGED_ENTRY, - ANA_ANEVENTS_CPU_LEARN_FAILED, - ANA_ANEVENTS_AUTO_LEARN_FAILED, - ANA_ANEVENTS_LEARN_REMOVE, - ANA_ANEVENTS_AUTO_LEARNED, - ANA_ANEVENTS_AUTO_MOVED, - ANA_ANEVENTS_DROPPED, - ANA_ANEVENTS_CLASSIFIED_DROP, - ANA_ANEVENTS_CLASSIFIED_COPY, - ANA_ANEVENTS_VLAN_DISCARD, - ANA_ANEVENTS_FWD_DISCARD, - ANA_ANEVENTS_MULTICAST_FLOOD, - ANA_ANEVENTS_UNICAST_FLOOD, - ANA_ANEVENTS_DEST_KNOWN, - ANA_ANEVENTS_BUCKET3_MATCH, - ANA_ANEVENTS_BUCKET2_MATCH, - ANA_ANEVENTS_BUCKET1_MATCH, - ANA_ANEVENTS_BUCKET0_MATCH, - ANA_ANEVENTS_CPU_OPERATION, - ANA_ANEVENTS_DMAC_LOOKUP, - ANA_ANEVENTS_SMAC_LOOKUP, - ANA_ANEVENTS_SEQ_GEN_ERR_0, - ANA_ANEVENTS_SEQ_GEN_ERR_1, - ANA_TABLES_MACACCESS_B_DOM, - ANA_TABLES_MACTINDX_BUCKET, - ANA_TABLES_MACTINDX_M_INDEX, - QSYS_TIMED_FRAME_ENTRY_TFRM_VLD, - QSYS_TIMED_FRAME_ENTRY_TFRM_FP, - QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO, - QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL, - QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T, - SYS_RESET_CFG_CORE_ENA, - SYS_RESET_CFG_MEM_ENA, - SYS_RESET_CFG_MEM_INIT, - REGFIELD_MAX -}; - -enum ocelot_clk_pins { - ALT_PPS_PIN = 1, - EXT_CLK_PIN, - ALT_LDST_PIN, - TOD_ACC_PIN -}; - struct ocelot_multicast { struct list_head list; unsigned char addr[ETH_ALEN]; @@ -427,82 +60,18 @@ struct ocelot_multicast { u16 ports; }; -struct ocelot_port; - -struct ocelot_stat_layout { - u32 offset; - char name[ETH_GSTRING_LEN]; -}; - -struct ocelot { - struct device *dev; - - struct regmap *targets[TARGET_MAX]; - struct regmap_field *regfields[REGFIELD_MAX]; - const u32 *const *map; - const struct ocelot_stat_layout *stats_layout; - unsigned int num_stats; - - u8 base_mac[ETH_ALEN]; - - struct net_device *hw_bridge_dev; - u16 bridge_mask; - u16 bridge_fwd_mask; - - struct workqueue_struct *ocelot_owq; - - int shared_queue_sz; - - u8 num_phys_ports; - u8 num_cpu_ports; - struct ocelot_port **ports; - - u32 *lags; - - /* Keep track of the vlan port masks */ - u32 vlan_mask[VLAN_N_VID]; - - struct list_head multicast; - - /* Workqueue to check statistics for overflow with its lock */ - struct mutex stats_lock; - u64 *stats; - struct delayed_work stats_work; - struct workqueue_struct *stats_queue; - - u8 ptp:1; - struct ptp_clock *ptp_clock; - struct ptp_clock_info ptp_info; - struct hwtstamp_config hwtstamp_config; - struct mutex ptp_lock; /* Protects the PTP interface state */ - spinlock_t ptp_clock_lock; /* Protects the PTP clock */ -}; - -struct ocelot_port { +struct ocelot_port_private { + struct ocelot_port port; struct net_device *dev; - struct ocelot *ocelot; struct phy_device *phy; - void __iomem *regs; u8 chip_port; - /* Ingress default VLAN (pvid) */ - u16 pvid; - - /* Egress default VLAN (vid) */ - u16 vid; - u8 vlan_aware; - u64 *stats; - phy_interface_t phy_mode; struct phy *serdes; struct ocelot_port_tc tc; - - u8 ptp_cmd; - struct list_head skbs; - u8 ts_id; }; struct ocelot_skb { @@ -511,49 +80,26 @@ struct ocelot_skb { u8 id; }; -u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset); -#define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) -#define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi)) -#define ocelot_read_rix(ocelot, reg, ri) __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri)) -#define ocelot_read(ocelot, reg) __ocelot_read_ix(ocelot, reg, 0) - -void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset); -#define ocelot_write_ix(ocelot, val, reg, gi, ri) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) -#define ocelot_write_gix(ocelot, val, reg, gi) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi)) -#define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri)) -#define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0) - -void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 mask, - u32 offset); -#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) -#define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi)) -#define ocelot_rmw_rix(ocelot, val, m, reg, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri)) -#define ocelot_rmw(ocelot, val, m, reg) __ocelot_rmw_ix(ocelot, val, m, reg, 0) - u32 ocelot_port_readl(struct ocelot_port *port, u32 reg); void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg); -int ocelot_regfields_init(struct ocelot *ocelot, - const struct reg_field *const regfields); -struct regmap *ocelot_io_platform_init(struct ocelot *ocelot, - struct platform_device *pdev, - const char *name); - #define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val)) #define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val)) -int ocelot_init(struct ocelot *ocelot); -void ocelot_deinit(struct ocelot *ocelot); -int ocelot_chip_init(struct ocelot *ocelot); +int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops); int ocelot_probe_port(struct ocelot *ocelot, u8 port, void __iomem *regs, struct phy_device *phy); +void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu, + enum ocelot_tag_prefix injection, + enum ocelot_tag_prefix extraction); + extern struct notifier_block ocelot_netdevice_nb; extern struct notifier_block ocelot_switchdev_nb; extern struct notifier_block ocelot_switchdev_blocking_nb; -int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts); -void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts); +#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val)) +#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val)) #endif diff --git a/drivers/net/ethernet/mscc/ocelot_ace.h b/drivers/net/ethernet/mscc/ocelot_ace.h index e98944c87259..c08e3e8482e7 100644 --- a/drivers/net/ethernet/mscc/ocelot_ace.h +++ b/drivers/net/ethernet/mscc/ocelot_ace.h @@ -224,9 +224,9 @@ int ocelot_ace_rule_stats_update(struct ocelot_ace_rule *rule); int ocelot_ace_init(struct ocelot *ocelot); void ocelot_ace_deinit(void); -int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port, +int ocelot_setup_tc_block_flower_bind(struct ocelot_port_private *priv, struct flow_block_offload *f); -void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port, +void ocelot_setup_tc_block_flower_unbind(struct ocelot_port_private *priv, struct flow_block_offload *f); #endif /* _MSCC_OCELOT_ACE_H_ */ diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c index aac115136720..5541ec26f953 100644 --- a/drivers/net/ethernet/mscc/ocelot_board.c +++ b/drivers/net/ethernet/mscc/ocelot_board.c @@ -95,6 +95,8 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) do { struct skb_shared_hwtstamps *shhwtstamps; + struct ocelot_port_private *priv; + struct ocelot_port *ocelot_port; u64 tod_in_ns, full_ts_in_ns; struct frame_info info = {}; struct net_device *dev; @@ -103,7 +105,7 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) int sz, len, buf_len; struct sk_buff *skb; - for (i = 0; i < IFH_LEN; i++) { + for (i = 0; i < OCELOT_TAG_LEN / 4; i++) { err = ocelot_rx_frame_word(ocelot, grp, true, &ifh[i]); if (err != 4) break; @@ -114,7 +116,10 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) ocelot_parse_ifh(ifh, &info); - dev = ocelot->ports[info.port]->dev; + ocelot_port = ocelot->ports[info.port]; + priv = container_of(ocelot_port, struct ocelot_port_private, + port); + dev = priv->dev; skb = netdev_alloc_skb(dev, info.len); @@ -249,6 +254,57 @@ static const struct of_device_id mscc_ocelot_match[] = { }; MODULE_DEVICE_TABLE(of, mscc_ocelot_match); +static void ocelot_port_pcs_init(struct ocelot *ocelot, int port) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + /* Disable HDX fast control */ + ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS, + DEV_PORT_MISC); + + /* SGMII only for now */ + ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA, + PCS1G_MODE_CFG); + ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG); + + /* Enable PCS */ + ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG); + + /* No aneg on SGMII */ + ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG); + + /* No loopback */ + ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG); +} + +static int ocelot_reset(struct ocelot *ocelot) +{ + int retries = 100; + u32 val; + + regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1); + regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1); + + do { + msleep(1); + regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], + &val); + } while (val && --retries); + + if (!retries) + return -ETIMEDOUT; + + regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1); + regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1); + + return 0; +} + +static const struct ocelot_ops ocelot_ops = { + .pcs_init = ocelot_port_pcs_init, + .reset = ocelot_reset, +}; + static int mscc_ocelot_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -257,13 +313,12 @@ static int mscc_ocelot_probe(struct platform_device *pdev) struct ocelot *ocelot; struct regmap *hsio; unsigned int i; - u32 val; struct { enum ocelot_target id; char *name; u8 optional:1; - } res[] = { + } io_target[] = { { SYS, "sys" }, { REW, "rew" }, { QSYS, "qsys" }, @@ -283,20 +338,23 @@ static int mscc_ocelot_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ocelot); ocelot->dev = &pdev->dev; - for (i = 0; i < ARRAY_SIZE(res); i++) { + for (i = 0; i < ARRAY_SIZE(io_target); i++) { struct regmap *target; + struct resource *res; - target = ocelot_io_platform_init(ocelot, pdev, res[i].name); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + io_target[i].name); + + target = ocelot_regmap_init(ocelot, res); if (IS_ERR(target)) { - if (res[i].optional) { - ocelot->targets[res[i].id] = NULL; + if (io_target[i].optional) { + ocelot->targets[io_target[i].id] = NULL; continue; } - return PTR_ERR(target); } - ocelot->targets[res[i].id] = target; + ocelot->targets[io_target[i].id] = target; } hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio"); @@ -307,7 +365,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) ocelot->targets[HSIO] = hsio; - err = ocelot_chip_init(ocelot); + err = ocelot_chip_init(ocelot, &ocelot_ops); if (err) return err; @@ -334,18 +392,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev) ocelot->ptp = 1; } - regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1); - regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1); - - do { - msleep(1); - regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], - &val); - } while (val); - - regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1); - regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1); - ocelot->num_cpu_ports = 1; /* 1 port on the switch, two groups */ ports = of_get_child_by_name(np, "ethernet-ports"); @@ -359,17 +405,20 @@ static int mscc_ocelot_probe(struct platform_device *pdev) ocelot->ports = devm_kcalloc(&pdev->dev, ocelot->num_phys_ports, sizeof(struct ocelot_port *), GFP_KERNEL); - INIT_LIST_HEAD(&ocelot->multicast); ocelot_init(ocelot); + ocelot_set_cpu_port(ocelot, ocelot->num_phys_ports, + OCELOT_TAG_PREFIX_NONE, OCELOT_TAG_PREFIX_NONE); for_each_available_child_of_node(ports, portnp) { + struct ocelot_port_private *priv; + struct ocelot_port *ocelot_port; struct device_node *phy_node; + phy_interface_t phy_mode; struct phy_device *phy; struct resource *res; struct phy *serdes; void __iomem *regs; char res_name[8]; - int phy_mode; u32 port; if (of_property_read_u32(portnp, "reg", &port)) @@ -398,13 +447,15 @@ static int mscc_ocelot_probe(struct platform_device *pdev) goto out_put_ports; } - phy_mode = of_get_phy_mode(portnp); - if (phy_mode < 0) - ocelot->ports[port]->phy_mode = PHY_INTERFACE_MODE_NA; - else - ocelot->ports[port]->phy_mode = phy_mode; + ocelot_port = ocelot->ports[port]; + priv = container_of(ocelot_port, struct ocelot_port_private, + port); + + of_get_phy_mode(portnp, &phy_mode); + + priv->phy_mode = phy_mode; - switch (ocelot->ports[port]->phy_mode) { + switch (priv->phy_mode) { case PHY_INTERFACE_MODE_NA: continue; case PHY_INTERFACE_MODE_SGMII: @@ -413,7 +464,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) /* Ensure clock signals and speed is set on all * QSGMII links */ - ocelot_port_writel(ocelot->ports[port], + ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED (OCELOT_SPEED_1000), DEV_CLOCK_CFG); @@ -441,7 +492,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) goto out_put_ports; } - ocelot->ports[port]->serdes = serdes; + priv->serdes = serdes; } register_netdevice_notifier(&ocelot_netdevice_nb); diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index b894bc0c9c16..3d65b99b9734 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -10,7 +10,7 @@ struct ocelot_port_block { struct ocelot_acl_block *block; - struct ocelot_port *port; + struct ocelot_port_private *priv; }; static int ocelot_flower_parse_action(struct flow_cls_offload *f, @@ -177,8 +177,8 @@ struct ocelot_ace_rule *ocelot_ace_rule_create(struct flow_cls_offload *f, if (!rule) return NULL; - rule->port = block->port; - rule->chip_port = block->port->chip_port; + rule->port = &block->priv->port; + rule->chip_port = block->priv->chip_port; return rule; } @@ -202,7 +202,7 @@ static int ocelot_flower_replace(struct flow_cls_offload *f, if (ret) return ret; - port_block->port->tc.offload_cnt++; + port_block->priv->tc.offload_cnt++; return 0; } @@ -213,14 +213,14 @@ static int ocelot_flower_destroy(struct flow_cls_offload *f, int ret; rule.prio = f->common.prio; - rule.port = port_block->port; + rule.port = &port_block->priv->port; rule.id = f->cookie; ret = ocelot_ace_rule_offload_del(&rule); if (ret) return ret; - port_block->port->tc.offload_cnt--; + port_block->priv->tc.offload_cnt--; return 0; } @@ -231,7 +231,7 @@ static int ocelot_flower_stats_update(struct flow_cls_offload *f, int ret; rule.prio = f->common.prio; - rule.port = port_block->port; + rule.port = &port_block->priv->port; rule.id = f->cookie; ret = ocelot_ace_rule_stats_update(&rule); if (ret) @@ -261,7 +261,7 @@ static int ocelot_setup_tc_block_cb_flower(enum tc_setup_type type, { struct ocelot_port_block *port_block = cb_priv; - if (!tc_cls_can_offload_and_chain0(port_block->port->dev, type_data)) + if (!tc_cls_can_offload_and_chain0(port_block->priv->dev, type_data)) return -EOPNOTSUPP; switch (type) { @@ -275,7 +275,7 @@ static int ocelot_setup_tc_block_cb_flower(enum tc_setup_type type, } static struct ocelot_port_block* -ocelot_port_block_create(struct ocelot_port *port) +ocelot_port_block_create(struct ocelot_port_private *priv) { struct ocelot_port_block *port_block; @@ -283,7 +283,7 @@ ocelot_port_block_create(struct ocelot_port *port) if (!port_block) return NULL; - port_block->port = port; + port_block->priv = priv; return port_block; } @@ -300,7 +300,7 @@ static void ocelot_tc_block_unbind(void *cb_priv) ocelot_port_block_destroy(port_block); } -int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port, +int ocelot_setup_tc_block_flower_bind(struct ocelot_port_private *priv, struct flow_block_offload *f) { struct ocelot_port_block *port_block; @@ -311,14 +311,14 @@ int ocelot_setup_tc_block_flower_bind(struct ocelot_port *port, return -EOPNOTSUPP; block_cb = flow_block_cb_lookup(f->block, - ocelot_setup_tc_block_cb_flower, port); + ocelot_setup_tc_block_cb_flower, priv); if (!block_cb) { - port_block = ocelot_port_block_create(port); + port_block = ocelot_port_block_create(priv); if (!port_block) return -ENOMEM; block_cb = flow_block_cb_alloc(ocelot_setup_tc_block_cb_flower, - port, port_block, + priv, port_block, ocelot_tc_block_unbind); if (IS_ERR(block_cb)) { ret = PTR_ERR(block_cb); @@ -339,13 +339,13 @@ err_cb_register: return ret; } -void ocelot_setup_tc_block_flower_unbind(struct ocelot_port *port, +void ocelot_setup_tc_block_flower_unbind(struct ocelot_port_private *priv, struct flow_block_offload *f) { struct flow_block_cb *block_cb; block_cb = flow_block_cb_lookup(f->block, - ocelot_setup_tc_block_cb_flower, port); + ocelot_setup_tc_block_cb_flower, priv); if (!block_cb) return; diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c index c6db8ad31fdf..b229b1cb68ef 100644 --- a/drivers/net/ethernet/mscc/ocelot_io.c +++ b/drivers/net/ethernet/mscc/ocelot_io.c @@ -97,20 +97,16 @@ static struct regmap_config ocelot_regmap_config = { .reg_stride = 4, }; -struct regmap *ocelot_io_platform_init(struct ocelot *ocelot, - struct platform_device *pdev, - const char *name) +struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res) { - struct resource *res; void __iomem *regs; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); regs = devm_ioremap_resource(ocelot->dev, res); if (IS_ERR(regs)) return ERR_CAST(regs); - ocelot_regmap_config.name = name; - return devm_regmap_init_mmio(ocelot->dev, regs, - &ocelot_regmap_config); + ocelot_regmap_config.name = res->name; + + return devm_regmap_init_mmio(ocelot->dev, regs, &ocelot_regmap_config); } -EXPORT_SYMBOL(ocelot_io_platform_init); +EXPORT_SYMBOL(ocelot_regmap_init); diff --git a/drivers/net/ethernet/mscc/ocelot_police.c b/drivers/net/ethernet/mscc/ocelot_police.c index 701e82dd749a..faddce43f2e3 100644 --- a/drivers/net/ethernet/mscc/ocelot_police.c +++ b/drivers/net/ethernet/mscc/ocelot_police.c @@ -40,13 +40,12 @@ struct qos_policer_conf { u8 ipg; /* Size of IPG when MSCC_QOS_RATE_MODE_LINE is chosen */ }; -static int qos_policer_conf_set(struct ocelot_port *port, u32 pol_ix, +static int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix, struct qos_policer_conf *conf) { u32 cf = 0, cir_ena = 0, frm_mode = POL_MODE_LINERATE; u32 cir = 0, cbs = 0, pir = 0, pbs = 0; bool cir_discard = 0, pir_discard = 0; - struct ocelot *ocelot = port->ocelot; u32 pbs_max = 0, cbs_max = 0; u8 ipg = 20; u32 value; @@ -123,22 +122,26 @@ static int qos_policer_conf_set(struct ocelot_port *port, u32 pol_ix, /* Check limits */ if (pir > GENMASK(15, 0)) { - netdev_err(port->dev, "Invalid pir\n"); + dev_err(ocelot->dev, "Invalid pir for port %d: %u (max %lu)\n", + port, pir, GENMASK(15, 0)); return -EINVAL; } if (cir > GENMASK(15, 0)) { - netdev_err(port->dev, "Invalid cir\n"); + dev_err(ocelot->dev, "Invalid cir for port %d: %u (max %lu)\n", + port, cir, GENMASK(15, 0)); return -EINVAL; } if (pbs > pbs_max) { - netdev_err(port->dev, "Invalid pbs\n"); + dev_err(ocelot->dev, "Invalid pbs for port %d: %u (max %u)\n", + port, pbs, pbs_max); return -EINVAL; } if (cbs > cbs_max) { - netdev_err(port->dev, "Invalid cbs\n"); + dev_err(ocelot->dev, "Invalid cbs for port %d: %u (max %u)\n", + port, cbs, cbs_max); return -EINVAL; } @@ -171,10 +174,9 @@ static int qos_policer_conf_set(struct ocelot_port *port, u32 pol_ix, return 0; } -int ocelot_port_policer_add(struct ocelot_port *port, +int ocelot_port_policer_add(struct ocelot *ocelot, int port, struct ocelot_policer *pol) { - struct ocelot *ocelot = port->ocelot; struct qos_policer_conf pp = { 0 }; int err; @@ -185,11 +187,10 @@ int ocelot_port_policer_add(struct ocelot_port *port, pp.pir = pol->rate; pp.pbs = pol->burst; - netdev_dbg(port->dev, - "%s: port %u pir %u kbps, pbs %u bytes\n", - __func__, port->chip_port, pp.pir, pp.pbs); + dev_dbg(ocelot->dev, "%s: port %u pir %u kbps, pbs %u bytes\n", + __func__, port, pp.pir, pp.pbs); - err = qos_policer_conf_set(port, POL_IX_PORT + port->chip_port, &pp); + err = qos_policer_conf_set(ocelot, port, POL_IX_PORT + port, &pp); if (err) return err; @@ -198,22 +199,21 @@ int ocelot_port_policer_add(struct ocelot_port *port, ANA_PORT_POL_CFG_POL_ORDER(POL_ORDER), ANA_PORT_POL_CFG_PORT_POL_ENA | ANA_PORT_POL_CFG_POL_ORDER_M, - ANA_PORT_POL_CFG, port->chip_port); + ANA_PORT_POL_CFG, port); return 0; } -int ocelot_port_policer_del(struct ocelot_port *port) +int ocelot_port_policer_del(struct ocelot *ocelot, int port) { - struct ocelot *ocelot = port->ocelot; struct qos_policer_conf pp = { 0 }; int err; - netdev_dbg(port->dev, "%s: port %u\n", __func__, port->chip_port); + dev_dbg(ocelot->dev, "%s: port %u\n", __func__, port); pp.mode = MSCC_QOS_RATE_MODE_DISABLED; - err = qos_policer_conf_set(port, POL_IX_PORT + port->chip_port, &pp); + err = qos_policer_conf_set(ocelot, port, POL_IX_PORT + port, &pp); if (err) return err; @@ -221,7 +221,7 @@ int ocelot_port_policer_del(struct ocelot_port *port) ANA_PORT_POL_CFG_POL_ORDER(POL_ORDER), ANA_PORT_POL_CFG_PORT_POL_ENA | ANA_PORT_POL_CFG_POL_ORDER_M, - ANA_PORT_POL_CFG, port->chip_port); + ANA_PORT_POL_CFG, port); return 0; } diff --git a/drivers/net/ethernet/mscc/ocelot_police.h b/drivers/net/ethernet/mscc/ocelot_police.h index d1137f79efda..ae9509229463 100644 --- a/drivers/net/ethernet/mscc/ocelot_police.h +++ b/drivers/net/ethernet/mscc/ocelot_police.h @@ -14,9 +14,9 @@ struct ocelot_policer { u32 burst; /* bytes */ }; -int ocelot_port_policer_add(struct ocelot_port *port, +int ocelot_port_policer_add(struct ocelot *ocelot, int port, struct ocelot_policer *pol); -int ocelot_port_policer_del(struct ocelot_port *port); +int ocelot_port_policer_del(struct ocelot *ocelot, int port); #endif /* _MSCC_OCELOT_POLICE_H_ */ diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c index e59977d20400..b88b5899b227 100644 --- a/drivers/net/ethernet/mscc/ocelot_regs.c +++ b/drivers/net/ethernet/mscc/ocelot_regs.c @@ -423,7 +423,7 @@ static void ocelot_pll5_init(struct ocelot *ocelot) HSIO_PLL5G_CFG2_AMPC_SEL(0x10)); } -int ocelot_chip_init(struct ocelot *ocelot) +int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops) { int ret; @@ -431,6 +431,7 @@ int ocelot_chip_init(struct ocelot *ocelot) ocelot->stats_layout = ocelot_stats_layout; ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout); ocelot->shared_queue_sz = 224 * 1024; + ocelot->ops = ops; ret = ocelot_regfields_init(ocelot, ocelot_regfields); if (ret) diff --git a/drivers/net/ethernet/mscc/ocelot_sys.h b/drivers/net/ethernet/mscc/ocelot_sys.h deleted file mode 100644 index 16f91e172bcb..000000000000 --- a/drivers/net/ethernet/mscc/ocelot_sys.h +++ /dev/null @@ -1,144 +0,0 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ -/* - * Microsemi Ocelot Switch driver - * - * Copyright (c) 2017 Microsemi Corporation - */ - -#ifndef _MSCC_OCELOT_SYS_H_ -#define _MSCC_OCELOT_SYS_H_ - -#define SYS_COUNT_RX_OCTETS_RSZ 0x4 - -#define SYS_COUNT_TX_OCTETS_RSZ 0x4 - -#define SYS_PORT_MODE_RSZ 0x4 - -#define SYS_PORT_MODE_DATA_WO_TS(x) (((x) << 5) & GENMASK(6, 5)) -#define SYS_PORT_MODE_DATA_WO_TS_M GENMASK(6, 5) -#define SYS_PORT_MODE_DATA_WO_TS_X(x) (((x) & GENMASK(6, 5)) >> 5) -#define SYS_PORT_MODE_INCL_INJ_HDR(x) (((x) << 3) & GENMASK(4, 3)) -#define SYS_PORT_MODE_INCL_INJ_HDR_M GENMASK(4, 3) -#define SYS_PORT_MODE_INCL_INJ_HDR_X(x) (((x) & GENMASK(4, 3)) >> 3) -#define SYS_PORT_MODE_INCL_XTR_HDR(x) (((x) << 1) & GENMASK(2, 1)) -#define SYS_PORT_MODE_INCL_XTR_HDR_M GENMASK(2, 1) -#define SYS_PORT_MODE_INCL_XTR_HDR_X(x) (((x) & GENMASK(2, 1)) >> 1) -#define SYS_PORT_MODE_INJ_HDR_ERR BIT(0) - -#define SYS_FRONT_PORT_MODE_RSZ 0x4 - -#define SYS_FRONT_PORT_MODE_HDX_MODE BIT(0) - -#define SYS_FRM_AGING_AGE_TX_ENA BIT(20) -#define SYS_FRM_AGING_MAX_AGE(x) ((x) & GENMASK(19, 0)) -#define SYS_FRM_AGING_MAX_AGE_M GENMASK(19, 0) - -#define SYS_STAT_CFG_STAT_CLEAR_SHOT(x) (((x) << 10) & GENMASK(16, 10)) -#define SYS_STAT_CFG_STAT_CLEAR_SHOT_M GENMASK(16, 10) -#define SYS_STAT_CFG_STAT_CLEAR_SHOT_X(x) (((x) & GENMASK(16, 10)) >> 10) -#define SYS_STAT_CFG_STAT_VIEW(x) ((x) & GENMASK(9, 0)) -#define SYS_STAT_CFG_STAT_VIEW_M GENMASK(9, 0) - -#define SYS_SW_STATUS_RSZ 0x4 - -#define SYS_SW_STATUS_PORT_RX_PAUSED BIT(0) - -#define SYS_MISC_CFG_PTP_RSRV_CLR BIT(1) -#define SYS_MISC_CFG_PTP_DIS_NEG_RO BIT(0) - -#define SYS_REW_MAC_HIGH_CFG_RSZ 0x4 - -#define SYS_REW_MAC_LOW_CFG_RSZ 0x4 - -#define SYS_TIMESTAMP_OFFSET_ETH_TYPE_CFG(x) (((x) << 6) & GENMASK(21, 6)) -#define SYS_TIMESTAMP_OFFSET_ETH_TYPE_CFG_M GENMASK(21, 6) -#define SYS_TIMESTAMP_OFFSET_ETH_TYPE_CFG_X(x) (((x) & GENMASK(21, 6)) >> 6) -#define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET(x) ((x) & GENMASK(5, 0)) -#define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET_M GENMASK(5, 0) - -#define SYS_PAUSE_CFG_RSZ 0x4 - -#define SYS_PAUSE_CFG_PAUSE_START(x) (((x) << 10) & GENMASK(18, 10)) -#define SYS_PAUSE_CFG_PAUSE_START_M GENMASK(18, 10) -#define SYS_PAUSE_CFG_PAUSE_START_X(x) (((x) & GENMASK(18, 10)) >> 10) -#define SYS_PAUSE_CFG_PAUSE_STOP(x) (((x) << 1) & GENMASK(9, 1)) -#define SYS_PAUSE_CFG_PAUSE_STOP_M GENMASK(9, 1) -#define SYS_PAUSE_CFG_PAUSE_STOP_X(x) (((x) & GENMASK(9, 1)) >> 1) -#define SYS_PAUSE_CFG_PAUSE_ENA BIT(0) - -#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START(x) (((x) << 9) & GENMASK(17, 9)) -#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_M GENMASK(17, 9) -#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_X(x) (((x) & GENMASK(17, 9)) >> 9) -#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_STOP(x) ((x) & GENMASK(8, 0)) -#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_STOP_M GENMASK(8, 0) - -#define SYS_ATOP_RSZ 0x4 - -#define SYS_MAC_FC_CFG_RSZ 0x4 - -#define SYS_MAC_FC_CFG_FC_LINK_SPEED(x) (((x) << 26) & GENMASK(27, 26)) -#define SYS_MAC_FC_CFG_FC_LINK_SPEED_M GENMASK(27, 26) -#define SYS_MAC_FC_CFG_FC_LINK_SPEED_X(x) (((x) & GENMASK(27, 26)) >> 26) -#define SYS_MAC_FC_CFG_FC_LATENCY_CFG(x) (((x) << 20) & GENMASK(25, 20)) -#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_M GENMASK(25, 20) -#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_X(x) (((x) & GENMASK(25, 20)) >> 20) -#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA BIT(18) -#define SYS_MAC_FC_CFG_TX_FC_ENA BIT(17) -#define SYS_MAC_FC_CFG_RX_FC_ENA BIT(16) -#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG(x) ((x) & GENMASK(15, 0)) -#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_M GENMASK(15, 0) - -#define SYS_MMGT_RELCNT(x) (((x) << 16) & GENMASK(31, 16)) -#define SYS_MMGT_RELCNT_M GENMASK(31, 16) -#define SYS_MMGT_RELCNT_X(x) (((x) & GENMASK(31, 16)) >> 16) -#define SYS_MMGT_FREECNT(x) ((x) & GENMASK(15, 0)) -#define SYS_MMGT_FREECNT_M GENMASK(15, 0) - -#define SYS_MMGT_FAST_FREEVLD(x) (((x) << 4) & GENMASK(7, 4)) -#define SYS_MMGT_FAST_FREEVLD_M GENMASK(7, 4) -#define SYS_MMGT_FAST_FREEVLD_X(x) (((x) & GENMASK(7, 4)) >> 4) -#define SYS_MMGT_FAST_RELVLD(x) ((x) & GENMASK(3, 0)) -#define SYS_MMGT_FAST_RELVLD_M GENMASK(3, 0) - -#define SYS_EVENTS_DIF_RSZ 0x4 - -#define SYS_EVENTS_DIF_EV_DRX(x) (((x) << 6) & GENMASK(8, 6)) -#define SYS_EVENTS_DIF_EV_DRX_M GENMASK(8, 6) -#define SYS_EVENTS_DIF_EV_DRX_X(x) (((x) & GENMASK(8, 6)) >> 6) -#define SYS_EVENTS_DIF_EV_DTX(x) ((x) & GENMASK(5, 0)) -#define SYS_EVENTS_DIF_EV_DTX_M GENMASK(5, 0) - -#define SYS_EVENTS_CORE_EV_FWR BIT(2) -#define SYS_EVENTS_CORE_EV_ANA(x) ((x) & GENMASK(1, 0)) -#define SYS_EVENTS_CORE_EV_ANA_M GENMASK(1, 0) - -#define SYS_CNT_GSZ 0x4 - -#define SYS_PTP_STATUS_PTP_TXSTAMP_OAM BIT(29) -#define SYS_PTP_STATUS_PTP_OVFL BIT(28) -#define SYS_PTP_STATUS_PTP_MESS_VLD BIT(27) -#define SYS_PTP_STATUS_PTP_MESS_ID(x) (((x) << 21) & GENMASK(26, 21)) -#define SYS_PTP_STATUS_PTP_MESS_ID_M GENMASK(26, 21) -#define SYS_PTP_STATUS_PTP_MESS_ID_X(x) (((x) & GENMASK(26, 21)) >> 21) -#define SYS_PTP_STATUS_PTP_MESS_TXPORT(x) (((x) << 16) & GENMASK(20, 16)) -#define SYS_PTP_STATUS_PTP_MESS_TXPORT_M GENMASK(20, 16) -#define SYS_PTP_STATUS_PTP_MESS_TXPORT_X(x) (((x) & GENMASK(20, 16)) >> 16) -#define SYS_PTP_STATUS_PTP_MESS_SEQ_ID(x) ((x) & GENMASK(15, 0)) -#define SYS_PTP_STATUS_PTP_MESS_SEQ_ID_M GENMASK(15, 0) - -#define SYS_PTP_TXSTAMP_PTP_TXSTAMP(x) ((x) & GENMASK(29, 0)) -#define SYS_PTP_TXSTAMP_PTP_TXSTAMP_M GENMASK(29, 0) -#define SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC BIT(31) - -#define SYS_PTP_NXT_PTP_NXT BIT(0) - -#define SYS_PTP_CFG_PTP_STAMP_WID(x) (((x) << 2) & GENMASK(7, 2)) -#define SYS_PTP_CFG_PTP_STAMP_WID_M GENMASK(7, 2) -#define SYS_PTP_CFG_PTP_STAMP_WID_X(x) (((x) & GENMASK(7, 2)) >> 2) -#define SYS_PTP_CFG_PTP_CF_ROLL_MODE(x) ((x) & GENMASK(1, 0)) -#define SYS_PTP_CFG_PTP_CF_ROLL_MODE_M GENMASK(1, 0) - -#define SYS_RAM_INIT_RAM_INIT BIT(1) -#define SYS_RAM_INIT_RAM_CFG_HOOK BIT(0) - -#endif diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c index 16a6db71ca5e..a4f7fbd76507 100644 --- a/drivers/net/ethernet/mscc/ocelot_tc.c +++ b/drivers/net/ethernet/mscc/ocelot_tc.c @@ -9,17 +9,19 @@ #include "ocelot_ace.h" #include <net/pkt_cls.h> -static int ocelot_setup_tc_cls_matchall(struct ocelot_port *port, +static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv, struct tc_cls_matchall_offload *f, bool ingress) { struct netlink_ext_ack *extack = f->common.extack; + struct ocelot *ocelot = priv->port.ocelot; struct ocelot_policer pol = { 0 }; struct flow_action_entry *action; + int port = priv->chip_port; int err; - netdev_dbg(port->dev, "%s: port %u command %d cookie %lu\n", - __func__, port->chip_port, f->command, f->cookie); + netdev_dbg(priv->dev, "%s: port %u command %d cookie %lu\n", + __func__, port, f->command, f->cookie); if (!ingress) { NL_SET_ERR_MSG_MOD(extack, "Only ingress is supported"); @@ -34,7 +36,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port *port, return -EOPNOTSUPP; } - if (port->tc.block_shared) { + if (priv->tc.block_shared) { NL_SET_ERR_MSG_MOD(extack, "Rate limit is not supported on shared blocks"); return -EOPNOTSUPP; @@ -47,7 +49,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port *port, return -EOPNOTSUPP; } - if (port->tc.police_id && port->tc.police_id != f->cookie) { + if (priv->tc.police_id && priv->tc.police_id != f->cookie) { NL_SET_ERR_MSG_MOD(extack, "Only one policer per port is supported\n"); return -EEXIST; @@ -58,27 +60,27 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port *port, PSCHED_NS2TICKS(action->police.burst), PSCHED_TICKS_PER_SEC); - err = ocelot_port_policer_add(port, &pol); + err = ocelot_port_policer_add(ocelot, port, &pol); if (err) { NL_SET_ERR_MSG_MOD(extack, "Could not add policer\n"); return err; } - port->tc.police_id = f->cookie; - port->tc.offload_cnt++; + priv->tc.police_id = f->cookie; + priv->tc.offload_cnt++; return 0; case TC_CLSMATCHALL_DESTROY: - if (port->tc.police_id != f->cookie) + if (priv->tc.police_id != f->cookie) return -ENOENT; - err = ocelot_port_policer_del(port); + err = ocelot_port_policer_del(ocelot, port); if (err) { NL_SET_ERR_MSG_MOD(extack, "Could not delete policer\n"); return err; } - port->tc.police_id = 0; - port->tc.offload_cnt--; + priv->tc.police_id = 0; + priv->tc.offload_cnt--; return 0; case TC_CLSMATCHALL_STATS: /* fall through */ default: @@ -90,21 +92,21 @@ static int ocelot_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv, bool ingress) { - struct ocelot_port *port = cb_priv; + struct ocelot_port_private *priv = cb_priv; - if (!tc_cls_can_offload_and_chain0(port->dev, type_data)) + if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) return -EOPNOTSUPP; switch (type) { case TC_SETUP_CLSMATCHALL: - netdev_dbg(port->dev, "tc_block_cb: TC_SETUP_CLSMATCHALL %s\n", + netdev_dbg(priv->dev, "tc_block_cb: TC_SETUP_CLSMATCHALL %s\n", ingress ? "ingress" : "egress"); - return ocelot_setup_tc_cls_matchall(port, type_data, ingress); + return ocelot_setup_tc_cls_matchall(priv, type_data, ingress); case TC_SETUP_CLSFLOWER: return 0; default: - netdev_dbg(port->dev, "tc_block_cb: type %d %s\n", + netdev_dbg(priv->dev, "tc_block_cb: type %d %s\n", type, ingress ? "ingress" : "egress"); @@ -130,19 +132,19 @@ static int ocelot_setup_tc_block_cb_eg(enum tc_setup_type type, static LIST_HEAD(ocelot_block_cb_list); -static int ocelot_setup_tc_block(struct ocelot_port *port, +static int ocelot_setup_tc_block(struct ocelot_port_private *priv, struct flow_block_offload *f) { struct flow_block_cb *block_cb; flow_setup_cb_t *cb; int err; - netdev_dbg(port->dev, "tc_block command %d, binder_type %d\n", + netdev_dbg(priv->dev, "tc_block command %d, binder_type %d\n", f->command, f->binder_type); if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { cb = ocelot_setup_tc_block_cb_ig; - port->tc.block_shared = f->block_shared; + priv->tc.block_shared = f->block_shared; } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { cb = ocelot_setup_tc_block_cb_eg; } else { @@ -153,14 +155,14 @@ static int ocelot_setup_tc_block(struct ocelot_port *port, switch (f->command) { case FLOW_BLOCK_BIND: - if (flow_block_cb_is_busy(cb, port, &ocelot_block_cb_list)) + if (flow_block_cb_is_busy(cb, priv, &ocelot_block_cb_list)) return -EBUSY; - block_cb = flow_block_cb_alloc(cb, port, port, NULL); + block_cb = flow_block_cb_alloc(cb, priv, priv, NULL); if (IS_ERR(block_cb)) return PTR_ERR(block_cb); - err = ocelot_setup_tc_block_flower_bind(port, f); + err = ocelot_setup_tc_block_flower_bind(priv, f); if (err < 0) { flow_block_cb_free(block_cb); return err; @@ -169,11 +171,11 @@ static int ocelot_setup_tc_block(struct ocelot_port *port, list_add_tail(&block_cb->driver_list, f->driver_block_list); return 0; case FLOW_BLOCK_UNBIND: - block_cb = flow_block_cb_lookup(f->block, cb, port); + block_cb = flow_block_cb_lookup(f->block, cb, priv); if (!block_cb) return -ENOENT; - ocelot_setup_tc_block_flower_unbind(port, f); + ocelot_setup_tc_block_flower_unbind(priv, f); flow_block_cb_remove(block_cb, f); list_del(&block_cb->driver_list); return 0; @@ -185,11 +187,11 @@ static int ocelot_setup_tc_block(struct ocelot_port *port, int ocelot_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { - struct ocelot_port *port = netdev_priv(dev); + struct ocelot_port_private *priv = netdev_priv(dev); switch (type) { case TC_SETUP_BLOCK: - return ocelot_setup_tc_block(port, type_data); + return ocelot_setup_tc_block(priv, type_data); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 2761f3a3ae50..49c7987c2abd 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -1346,10 +1346,9 @@ static int nixge_probe(struct platform_device *pdev) } } - priv->phy_mode = of_get_phy_mode(pdev->dev.of_node); - if ((int)priv->phy_mode < 0) { + err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_mode); + if (err) { netdev_err(ndev, "not find \"phy-mode\" property\n"); - err = -EINVAL; goto unregister_mdio; } diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 05d2b478c99b..6b54cb3b681d 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -2225,6 +2225,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) struct nv_skb_map *prev_tx_ctx; struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL; unsigned long flags; + netdev_tx_t ret = NETDEV_TX_OK; /* add fragments to entries count */ for (i = 0; i < fragments; i++) { @@ -2240,7 +2241,12 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); np->tx_stop = 1; spin_unlock_irqrestore(&np->lock, flags); - return NETDEV_TX_BUSY; + + /* When normal packets and/or xmit_more packets fill up + * tx_desc, it is necessary to trigger NIC tx reg. + */ + ret = NETDEV_TX_BUSY; + goto txkick; } spin_unlock_irqrestore(&np->lock, flags); @@ -2259,7 +2265,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) u64_stats_update_begin(&np->swstats_tx_syncp); nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); - return NETDEV_TX_OK; + + ret = NETDEV_TX_OK; + + goto dma_error; } np->put_tx_ctx->dma_len = bcnt; np->put_tx_ctx->dma_single = 1; @@ -2305,7 +2314,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) u64_stats_update_begin(&np->swstats_tx_syncp); nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); - return NETDEV_TX_OK; + + ret = NETDEV_TX_OK; + + goto dma_error; } np->put_tx_ctx->dma_len = bcnt; @@ -2357,8 +2369,15 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_unlock_irqrestore(&np->lock, flags); - writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); - return NETDEV_TX_OK; +txkick: + if (netif_queue_stopped(dev) || !netdev_xmit_more()) { + u32 txrxctl_kick; +dma_error: + txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits; + writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl); + } + + return ret; } static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, @@ -2381,6 +2400,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, struct nv_skb_map *start_tx_ctx = NULL; struct nv_skb_map *tmp_tx_ctx = NULL; unsigned long flags; + netdev_tx_t ret = NETDEV_TX_OK; /* add fragments to entries count */ for (i = 0; i < fragments; i++) { @@ -2396,7 +2416,13 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, netif_stop_queue(dev); np->tx_stop = 1; spin_unlock_irqrestore(&np->lock, flags); - return NETDEV_TX_BUSY; + + /* When normal packets and/or xmit_more packets fill up + * tx_desc, it is necessary to trigger NIC tx reg. + */ + ret = NETDEV_TX_BUSY; + + goto txkick; } spin_unlock_irqrestore(&np->lock, flags); @@ -2416,7 +2442,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, u64_stats_update_begin(&np->swstats_tx_syncp); nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); - return NETDEV_TX_OK; + + ret = NETDEV_TX_OK; + + goto dma_error; } np->put_tx_ctx->dma_len = bcnt; np->put_tx_ctx->dma_single = 1; @@ -2463,7 +2492,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, u64_stats_update_begin(&np->swstats_tx_syncp); nv_txrx_stats_inc(stat_tx_dropped); u64_stats_update_end(&np->swstats_tx_syncp); - return NETDEV_TX_OK; + + ret = NETDEV_TX_OK; + + goto dma_error; } np->put_tx_ctx->dma_len = bcnt; np->put_tx_ctx->dma_single = 0; @@ -2542,8 +2574,15 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, spin_unlock_irqrestore(&np->lock, flags); - writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); - return NETDEV_TX_OK; +txkick: + if (netif_queue_stopped(dev) || !netdev_xmit_more()) { + u32 txrxctl_kick; +dma_error: + txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits; + writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl); + } + + return ret; } static inline void nv_tx_flip_ownership(struct net_device *dev) diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 1e26964fe4e9..481b096e984d 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1208,8 +1208,16 @@ enum qede_remove_mode { static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) { struct net_device *ndev = pci_get_drvdata(pdev); - struct qede_dev *edev = netdev_priv(ndev); - struct qed_dev *cdev = edev->cdev; + struct qede_dev *edev; + struct qed_dev *cdev; + + if (!ndev) { + dev_info(&pdev->dev, "Device has already been removed\n"); + return; + } + + edev = netdev_priv(ndev); + cdev = edev->cdev; DP_INFO(edev, "Starting qede_remove\n"); diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index c84ab052ef26..98f92268cbaa 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -213,9 +213,9 @@ static int emac_change_mtu(struct net_device *netdev, int new_mtu) { struct emac_adapter *adpt = netdev_priv(netdev); - netif_info(adpt, hw, adpt->netdev, - "changing MTU from %d to %d\n", netdev->mtu, - new_mtu); + netif_dbg(adpt, hw, adpt->netdev, + "changing MTU from %d to %d\n", netdev->mtu, + new_mtu); netdev->mtu = new_mtu; if (netif_running(netdev)) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 9c54b715228e..06de59521fc4 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -57,10 +57,10 @@ static int rmnet_unregister_real_device(struct net_device *real_dev, if (port->nr_rmnet_devs) return -EINVAL; - kfree(port); - netdev_rx_handler_unregister(real_dev); + kfree(port); + /* release reference on real_dev */ dev_put(real_dev); diff --git a/drivers/net/ethernet/realtek/r8169_firmware.c b/drivers/net/ethernet/realtek/r8169_firmware.c index 8f54a2c832eb..355cc810e322 100644 --- a/drivers/net/ethernet/realtek/r8169_firmware.c +++ b/drivers/net/ethernet/realtek/r8169_firmware.c @@ -37,7 +37,7 @@ struct fw_info { u8 chksum; } __packed; -#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code)) +#define FW_OPCODE_SIZE FIELD_SIZEOF(struct rtl_fw_phy_action, code[0]) static bool rtl_fw_format_ok(struct rtl_fw *rtl_fw) { @@ -92,19 +92,24 @@ static bool rtl_fw_data_ok(struct rtl_fw *rtl_fw) for (index = 0; index < pa->size; index++) { u32 action = le32_to_cpu(pa->code[index]); + u32 val = action & 0x0000ffff; u32 regno = (action & 0x0fff0000) >> 16; switch (action >> 28) { case PHY_READ: case PHY_DATA_OR: case PHY_DATA_AND: - case PHY_MDIO_CHG: case PHY_CLEAR_READCOUNT: case PHY_WRITE: case PHY_WRITE_PREVIOUS: case PHY_DELAY_MS: break; + case PHY_MDIO_CHG: + if (val > 1) + goto out; + break; + case PHY_BJMPN: if (regno > index) goto out; @@ -164,12 +169,12 @@ void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) index -= (regno + 1); break; case PHY_MDIO_CHG: - if (data == 0) { - fw_write = rtl_fw->phy_write; - fw_read = rtl_fw->phy_read; - } else if (data == 1) { + if (data) { fw_write = rtl_fw->mac_mcu_write; fw_read = rtl_fw->mac_mcu_read; + } else { + fw_write = rtl_fw->phy_write; + fw_read = rtl_fw->phy_read; } break; @@ -198,7 +203,7 @@ void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) index += regno; break; case PHY_DELAY_MS: - mdelay(data); + msleep(data); break; } } diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 0704f8bd1df4..d8fcdb9db8d1 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -52,6 +52,7 @@ #define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw" #define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw" #define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw" +#define FIRMWARE_8168FP_3 "rtl_nic/rtl8168fp-3.fw" #define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw" #define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" #define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw" @@ -135,6 +136,7 @@ enum mac_version { RTL_GIGA_MAC_VER_49, RTL_GIGA_MAC_VER_50, RTL_GIGA_MAC_VER_51, + RTL_GIGA_MAC_VER_52, RTL_GIGA_MAC_VER_60, RTL_GIGA_MAC_VER_61, RTL_GIGA_MAC_NONE @@ -202,6 +204,7 @@ static const struct { [RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" }, [RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" }, [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" }, + [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3}, [RTL_GIGA_MAC_VER_60] = {"RTL8125" }, [RTL_GIGA_MAC_VER_61] = {"RTL8125", FIRMWARE_8125A_3}, }; @@ -680,6 +683,7 @@ struct rtl8169_private { struct rtl8169_counters *counters; struct rtl8169_tc_offsets tc_offset; u32 saved_wolopts; + int eee_adv; const char *fw_name; struct rtl_fw *rtl_fw; @@ -712,6 +716,7 @@ MODULE_FIRMWARE(FIRMWARE_8168G_2); MODULE_FIRMWARE(FIRMWARE_8168G_3); MODULE_FIRMWARE(FIRMWARE_8168H_1); MODULE_FIRMWARE(FIRMWARE_8168H_2); +MODULE_FIRMWARE(FIRMWARE_8168FP_3); MODULE_FIRMWARE(FIRMWARE_8107E_1); MODULE_FIRMWARE(FIRMWARE_8107E_2); MODULE_FIRMWARE(FIRMWARE_8125A_3); @@ -750,7 +755,7 @@ static bool rtl_is_8168evl_up(struct rtl8169_private *tp) { return tp->mac_version >= RTL_GIGA_MAC_VER_34 && tp->mac_version != RTL_GIGA_MAC_VER_39 && - tp->mac_version <= RTL_GIGA_MAC_VER_51; + tp->mac_version <= RTL_GIGA_MAC_VER_52; } static bool rtl_supports_eee(struct rtl8169_private *tp) @@ -910,6 +915,9 @@ static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value) static int r8168g_mdio_read(struct rtl8169_private *tp, int reg) { + if (reg == 0x1f) + return tp->ocp_base == OCP_STD_PHY_BASE ? 0 : tp->ocp_base >> 4; + if (tp->ocp_base != OCP_STD_PHY_BASE) reg -= 0x10; @@ -1083,6 +1091,39 @@ static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m) rtl_writephy(tp, reg_addr, (val & ~m) | p); } +static void r8168d_modify_extpage(struct phy_device *phydev, int extpage, + int reg, u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0007); + + __phy_write(phydev, 0x1e, extpage); + __phy_modify(phydev, reg, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +static void r8168d_phy_param(struct phy_device *phydev, u16 parm, + u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0005); + + __phy_write(phydev, 0x05, parm); + __phy_modify(phydev, 0x06, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + +static void r8168g_phy_param(struct phy_device *phydev, u16 parm, + u16 mask, u16 val) +{ + int oldpage = phy_select_page(phydev, 0x0a43); + + __phy_write(phydev, 0x13, parm); + __phy_modify(phydev, 0x14, mask, val); + + phy_restore_page(phydev, oldpage, 0); +} + DECLARE_RTL_COND(rtl_ephyar_cond) { return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG; @@ -1253,9 +1294,7 @@ static void rtl8168_driver_start(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_31: rtl8168dp_driver_start(tp); break; - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52: rtl8168ep_driver_start(tp); break; default: @@ -1287,9 +1326,7 @@ static void rtl8168_driver_stop(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_31: rtl8168dp_driver_stop(tp); break; - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52: rtl8168ep_driver_stop(tp); break; default: @@ -1317,9 +1354,7 @@ static bool r8168_check_dash(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: return r8168dp_check_dash(tp); - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52: return r8168ep_check_dash(tp); default: return false; @@ -1494,7 +1529,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) break; case RTL_GIGA_MAC_VER_34: case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_52: options = RTL_R8(tp, Config2) & ~PME_SIGNAL; if (wolopts) options |= PME_SIGNAL; @@ -2065,6 +2100,10 @@ static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data) } ret = phy_ethtool_set_eee(tp->phydev, data); + + if (!ret) + tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN, + MDIO_AN_EEE_ADV); out: pm_runtime_put_noidle(d); return ret; @@ -2095,10 +2134,16 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { static void rtl_enable_eee(struct rtl8169_private *tp) { struct phy_device *phydev = tp->phydev; - int supported = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); + int adv; - if (supported > 0) - phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, supported); + /* respect EEE advertisement the user may have set */ + if (tp->eee_adv >= 0) + adv = tp->eee_adv; + else + adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); + + if (adv >= 0) + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv); } static void rtl8169_get_mac_version(struct rtl8169_private *tp) @@ -2123,6 +2168,9 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp) { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 }, { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 }, + /* RTL8117 */ + { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52 }, + /* 8168EP family. */ { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 }, { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 }, @@ -2251,14 +2299,6 @@ static void rtl_apply_firmware(struct rtl8169_private *tp) rtl_fw_write_firmware(tp, tp->rtl_fw); } -static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val) -{ - if (rtl_readphy(tp, reg) != val) - netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n"); - else - rtl_apply_firmware(tp); -} - static void rtl8168_config_eee_mac(struct rtl8169_private *tp) { /* Adjust EEE LED frequency */ @@ -2278,15 +2318,8 @@ static void rtl8168f_config_eee_phy(struct rtl8169_private *tp) { struct phy_device *phydev = tp->phydev; - phy_write(phydev, 0x1f, 0x0007); - phy_write(phydev, 0x1e, 0x0020); - phy_set_bits(phydev, 0x15, BIT(8)); - - phy_write(phydev, 0x1f, 0x0005); - phy_write(phydev, 0x05, 0x8b85); - phy_set_bits(phydev, 0x06, BIT(13)); - - phy_write(phydev, 0x1f, 0x0000); + r8168d_modify_extpage(phydev, 0x0020, 0x15, 0, BIT(8)); + r8168d_phy_param(phydev, 0x8b85, 0, BIT(13)); } static void rtl8168g_config_eee_phy(struct rtl8169_private *tp) @@ -2383,13 +2416,7 @@ static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp) { - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0002 }, - { 0x01, 0x90d0 }, - { 0x1f, 0x0000 } - }; - - rtl_writephy_batch(tp, phy_reg_init); + phy_write_paged(tp->phydev, 0x0002, 0x01, 0x90d0); } static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp) @@ -2400,9 +2427,7 @@ static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp) (pdev->subsystem_device != 0xe000)) return; - rtl_writephy(tp, 0x1f, 0x0001); - rtl_writephy(tp, 0x10, 0xf01b); - rtl_writephy(tp, 0x1f, 0x0000); + phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b); } static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp) @@ -2507,54 +2532,28 @@ static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp) static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp) { - static const struct phy_reg phy_reg_init[] = { - { 0x10, 0xf41b }, - { 0x1f, 0x0000 } - }; - rtl_writephy(tp, 0x1f, 0x0001); rtl_patchphy(tp, 0x16, 1 << 0); - - rtl_writephy_batch(tp, phy_reg_init); + rtl_writephy(tp, 0x10, 0xf41b); + rtl_writephy(tp, 0x1f, 0x0000); } static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp) { - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0001 }, - { 0x10, 0xf41b }, - { 0x1f, 0x0000 } - }; - - rtl_writephy_batch(tp, phy_reg_init); + phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf41b); } static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp) { - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0000 }, - { 0x1d, 0x0f00 }, - { 0x1f, 0x0002 }, - { 0x0c, 0x1ec8 }, - { 0x1f, 0x0000 } - }; - - rtl_writephy_batch(tp, phy_reg_init); + phy_write(tp->phydev, 0x1d, 0x0f00); + phy_write_paged(tp->phydev, 0x0002, 0x0c, 0x1ec8); } static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp) { - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0001 }, - { 0x1d, 0x3d98 }, - { 0x1f, 0x0000 } - }; - - rtl_writephy(tp, 0x1f, 0x0000); - rtl_patchphy(tp, 0x14, 1 << 5); - rtl_patchphy(tp, 0x0d, 1 << 5); - - rtl_writephy_batch(tp, phy_reg_init); + phy_set_bits(tp->phydev, 0x14, BIT(5)); + phy_set_bits(tp->phydev, 0x0d, BIT(5)); + phy_write_paged(tp->phydev, 0x0001, 0x1d, 0x3d98); } static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp) @@ -2636,11 +2635,6 @@ static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x1f, 0x0000); } -static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp) -{ - rtl8168c_3_hw_phy_config(tp); -} - static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = { /* Channel Estimation */ { 0x1f, 0x0001 }, @@ -2691,6 +2685,21 @@ static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = { { 0x1f, 0x0002 } }; +static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp, u16 val) +{ + u16 reg_val; + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x001b); + reg_val = rtl_readphy(tp, 0x06); + rtl_writephy(tp, 0x1f, 0x0000); + + if (reg_val != val) + netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n"); + else + rtl_apply_firmware(tp); +} + static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) { rtl_writephy_batch(tp, rtl8168d_1_phy_reg_init_0); @@ -2724,15 +2733,8 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x0d, val | set[i]); } } else { - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0002 }, - { 0x05, 0x6662 }, - { 0x1f, 0x0005 }, - { 0x05, 0x8330 }, - { 0x06, 0x6662 } - }; - - rtl_writephy_batch(tp, phy_reg_init); + phy_write_paged(tp->phydev, 0x0002, 0x05, 0x6662); + r8168d_phy_param(tp->phydev, 0x8330, 0xffff, 0x6662); } /* RSET couple improve */ @@ -2744,13 +2746,9 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x1f, 0x0002); rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600); rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000); - - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x001b); - - rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00); - rtl_writephy(tp, 0x1f, 0x0000); + + rtl8168d_apply_firmware_cond(tp, 0xbf00); } static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) @@ -2777,15 +2775,8 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x0d, val | set[i]); } } else { - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0002 }, - { 0x05, 0x2642 }, - { 0x1f, 0x0005 }, - { 0x05, 0x8330 }, - { 0x06, 0x2642 } - }; - - rtl_writephy_batch(tp, phy_reg_init); + phy_write_paged(tp->phydev, 0x0002, 0x05, 0x2642); + r8168d_phy_param(tp->phydev, 0x8330, 0xffff, 0x2642); } /* Fine tune PLL performance */ @@ -2796,13 +2787,9 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) /* Switching regulator Slew rate */ rtl_writephy(tp, 0x1f, 0x0002); rtl_patchphy(tp, 0x0f, 0x0017); - - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x001b); - - rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300); - rtl_writephy(tp, 0x1f, 0x0000); + + rtl8168d_apply_firmware_cond(tp, 0xb300); } static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp) @@ -2856,41 +2843,23 @@ static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp) { 0x04, 0xf800 }, { 0x04, 0xf000 }, { 0x1f, 0x0000 }, - - { 0x1f, 0x0007 }, - { 0x1e, 0x0023 }, - { 0x16, 0x0000 }, - { 0x1f, 0x0000 } }; rtl_writephy_batch(tp, phy_reg_init); + + r8168d_modify_extpage(tp->phydev, 0x0023, 0x16, 0xffff, 0x0000); } static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp) { - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0001 }, - { 0x17, 0x0cc0 }, - - { 0x1f, 0x0007 }, - { 0x1e, 0x002d }, - { 0x18, 0x0040 }, - { 0x1f, 0x0000 } - }; - - rtl_writephy_batch(tp, phy_reg_init); - rtl_patchphy(tp, 0x0d, 1 << 5); + phy_write_paged(tp->phydev, 0x0001, 0x17, 0x0cc0); + r8168d_modify_extpage(tp->phydev, 0x002d, 0x18, 0xffff, 0x0040); + phy_set_bits(tp->phydev, 0x0d, BIT(5)); } static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp) { static const struct phy_reg phy_reg_init[] = { - /* Enable Delay cap */ - { 0x1f, 0x0005 }, - { 0x05, 0x8b80 }, - { 0x06, 0xc896 }, - { 0x1f, 0x0000 }, - /* Channel estimation fine tune */ { 0x1f, 0x0001 }, { 0x0b, 0x6c20 }, @@ -2899,60 +2868,38 @@ static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp) { 0x1f, 0x0003 }, { 0x14, 0x6420 }, { 0x1f, 0x0000 }, - - /* Update PFM & 10M TX idle timer */ - { 0x1f, 0x0007 }, - { 0x1e, 0x002f }, - { 0x15, 0x1919 }, - { 0x1f, 0x0000 }, - - { 0x1f, 0x0007 }, - { 0x1e, 0x00ac }, - { 0x18, 0x0006 }, - { 0x1f, 0x0000 } }; + struct phy_device *phydev = tp->phydev; rtl_apply_firmware(tp); + /* Enable Delay cap */ + r8168d_phy_param(phydev, 0x8b80, 0xffff, 0xc896); + rtl_writephy_batch(tp, phy_reg_init); + /* Update PFM & 10M TX idle timer */ + r8168d_modify_extpage(phydev, 0x002f, 0x15, 0xffff, 0x1919); + + r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006); + /* DCO enable for 10M IDLE Power */ - rtl_writephy(tp, 0x1f, 0x0007); - rtl_writephy(tp, 0x1e, 0x0023); - rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); + r8168d_modify_extpage(phydev, 0x0023, 0x17, 0x0000, 0x0006); /* For impedance matching */ - rtl_writephy(tp, 0x1f, 0x0002); - rtl_w0w1_phy(tp, 0x08, 0x8000, 0x7f00); - rtl_writephy(tp, 0x1f, 0x0000); + phy_modify_paged(phydev, 0x0002, 0x08, 0x7f00, 0x8000); /* PHY auto speed down */ - rtl_writephy(tp, 0x1f, 0x0007); - rtl_writephy(tp, 0x1e, 0x002d); - rtl_w0w1_phy(tp, 0x18, 0x0050, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0050); + phy_set_bits(phydev, 0x14, BIT(15)); - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b86); - rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); + r8168d_phy_param(phydev, 0x8b85, 0x2000, 0x0000); - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b85); - rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000); - rtl_writephy(tp, 0x1f, 0x0007); - rtl_writephy(tp, 0x1e, 0x0020); - rtl_w0w1_phy(tp, 0x15, 0x0000, 0x1100); - rtl_writephy(tp, 0x1f, 0x0006); - rtl_writephy(tp, 0x00, 0x5a00); - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, 0x0d, 0x0007); - rtl_writephy(tp, 0x0e, 0x003c); - rtl_writephy(tp, 0x0d, 0x4007); - rtl_writephy(tp, 0x0e, 0x0000); - rtl_writephy(tp, 0x0d, 0x0000); + r8168d_modify_extpage(phydev, 0x0020, 0x15, 0x1100, 0x0000); + phy_write_paged(phydev, 0x0006, 0x00, 0x5a00); + + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0000); } static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr) @@ -2971,36 +2918,20 @@ static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr) static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) { - static const struct phy_reg phy_reg_init[] = { - /* Enable Delay cap */ - { 0x1f, 0x0004 }, - { 0x1f, 0x0007 }, - { 0x1e, 0x00ac }, - { 0x18, 0x0006 }, - { 0x1f, 0x0002 }, - { 0x1f, 0x0000 }, - { 0x1f, 0x0000 }, + struct phy_device *phydev = tp->phydev; - /* Channel estimation fine tune */ - { 0x1f, 0x0003 }, - { 0x09, 0xa20f }, - { 0x1f, 0x0000 }, - { 0x1f, 0x0000 }, + rtl_apply_firmware(tp); - /* Green Setting */ - { 0x1f, 0x0005 }, - { 0x05, 0x8b5b }, - { 0x06, 0x9222 }, - { 0x05, 0x8b6d }, - { 0x06, 0x8000 }, - { 0x05, 0x8b76 }, - { 0x06, 0x8000 }, - { 0x1f, 0x0000 } - }; + /* Enable Delay cap */ + r8168d_modify_extpage(phydev, 0x00ac, 0x18, 0xffff, 0x0006); - rtl_apply_firmware(tp); + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); - rtl_writephy_batch(tp, phy_reg_init); + /* Green Setting */ + r8168d_phy_param(phydev, 0x8b5b, 0xffff, 0x9222); + r8168d_phy_param(phydev, 0x8b6d, 0xffff, 0x8000); + r8168d_phy_param(phydev, 0x8b76, 0xffff, 0x8000); /* For 4-corner performance improve */ rtl_writephy(tp, 0x1f, 0x0005); @@ -3009,25 +2940,14 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x1f, 0x0000); /* PHY auto speed down */ - rtl_writephy(tp, 0x1f, 0x0004); - rtl_writephy(tp, 0x1f, 0x0007); - rtl_writephy(tp, 0x1e, 0x002d); - rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000); - rtl_writephy(tp, 0x1f, 0x0002); - rtl_writephy(tp, 0x1f, 0x0000); - rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010); + phy_set_bits(phydev, 0x14, BIT(15)); /* improve 10M EEE waveform */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b86); - rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); /* Improve 2-pair detection performance */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b85); - rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); rtl8168f_config_eee_phy(tp); rtl_enable_eee(tp); @@ -3047,24 +2967,17 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) static void rtl8168f_hw_phy_config(struct rtl8169_private *tp) { + struct phy_device *phydev = tp->phydev; + /* For 4-corner performance improve */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b80); - rtl_w0w1_phy(tp, 0x06, 0x0006, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); + r8168d_phy_param(phydev, 0x8b80, 0x0000, 0x0006); /* PHY auto speed down */ - rtl_writephy(tp, 0x1f, 0x0007); - rtl_writephy(tp, 0x1e, 0x002d); - rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); - rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + r8168d_modify_extpage(phydev, 0x002d, 0x18, 0x0000, 0x0010); + phy_set_bits(phydev, 0x14, BIT(15)); /* Improve 10M EEE waveform */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b86); - rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); + r8168d_phy_param(phydev, 0x8b86, 0x0000, 0x0001); rtl8168f_config_eee_phy(tp); rtl_enable_eee(tp); @@ -3072,52 +2985,31 @@ static void rtl8168f_hw_phy_config(struct rtl8169_private *tp) static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp) { - static const struct phy_reg phy_reg_init[] = { - /* Channel estimation fine tune */ - { 0x1f, 0x0003 }, - { 0x09, 0xa20f }, - { 0x1f, 0x0000 }, + struct phy_device *phydev = tp->phydev; - /* Modify green table for giga & fnet */ - { 0x1f, 0x0005 }, - { 0x05, 0x8b55 }, - { 0x06, 0x0000 }, - { 0x05, 0x8b5e }, - { 0x06, 0x0000 }, - { 0x05, 0x8b67 }, - { 0x06, 0x0000 }, - { 0x05, 0x8b70 }, - { 0x06, 0x0000 }, - { 0x1f, 0x0000 }, - { 0x1f, 0x0007 }, - { 0x1e, 0x0078 }, - { 0x17, 0x0000 }, - { 0x19, 0x00fb }, - { 0x1f, 0x0000 }, + rtl_apply_firmware(tp); - /* Modify green table for 10M */ - { 0x1f, 0x0005 }, - { 0x05, 0x8b79 }, - { 0x06, 0xaa00 }, - { 0x1f, 0x0000 }, + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); - /* Disable hiimpedance detection (RTCT) */ - { 0x1f, 0x0003 }, - { 0x01, 0x328a }, - { 0x1f, 0x0000 } - }; + /* Modify green table for giga & fnet */ + r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00fb); - rtl_apply_firmware(tp); + /* Modify green table for 10M */ + r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00); - rtl_writephy_batch(tp, phy_reg_init); + /* Disable hiimpedance detection (RTCT) */ + phy_write_paged(phydev, 0x0003, 0x01, 0x328a); rtl8168f_hw_phy_config(tp); /* Improve 2-pair detection performance */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b85); - rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); } static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp) @@ -3129,77 +3021,43 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp) static void rtl8411_hw_phy_config(struct rtl8169_private *tp) { - static const struct phy_reg phy_reg_init[] = { - /* Channel estimation fine tune */ - { 0x1f, 0x0003 }, - { 0x09, 0xa20f }, - { 0x1f, 0x0000 }, - - /* Modify green table for giga & fnet */ - { 0x1f, 0x0005 }, - { 0x05, 0x8b55 }, - { 0x06, 0x0000 }, - { 0x05, 0x8b5e }, - { 0x06, 0x0000 }, - { 0x05, 0x8b67 }, - { 0x06, 0x0000 }, - { 0x05, 0x8b70 }, - { 0x06, 0x0000 }, - { 0x1f, 0x0000 }, - { 0x1f, 0x0007 }, - { 0x1e, 0x0078 }, - { 0x17, 0x0000 }, - { 0x19, 0x00aa }, - { 0x1f, 0x0000 }, - - /* Modify green table for 10M */ - { 0x1f, 0x0005 }, - { 0x05, 0x8b79 }, - { 0x06, 0xaa00 }, - { 0x1f, 0x0000 }, - - /* Disable hiimpedance detection (RTCT) */ - { 0x1f, 0x0003 }, - { 0x01, 0x328a }, - { 0x1f, 0x0000 } - }; - + struct phy_device *phydev = tp->phydev; rtl_apply_firmware(tp); rtl8168f_hw_phy_config(tp); /* Improve 2-pair detection performance */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b85); - rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x4000); - rtl_writephy_batch(tp, phy_reg_init); + /* Channel estimation fine tune */ + phy_write_paged(phydev, 0x0003, 0x09, 0xa20f); + + /* Modify green table for giga & fnet */ + r8168d_phy_param(phydev, 0x8b55, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b5e, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b67, 0xffff, 0x0000); + r8168d_phy_param(phydev, 0x8b70, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x17, 0xffff, 0x0000); + r8168d_modify_extpage(phydev, 0x0078, 0x19, 0xffff, 0x00aa); + + /* Modify green table for 10M */ + r8168d_phy_param(phydev, 0x8b79, 0xffff, 0xaa00); + + /* Disable hiimpedance detection (RTCT) */ + phy_write_paged(phydev, 0x0003, 0x01, 0x328a); /* Modify green table for giga */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b54); - rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800); - rtl_writephy(tp, 0x05, 0x8b5d); - rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800); - rtl_writephy(tp, 0x05, 0x8a7c); - rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); - rtl_writephy(tp, 0x05, 0x8a7f); - rtl_w0w1_phy(tp, 0x06, 0x0100, 0x0000); - rtl_writephy(tp, 0x05, 0x8a82); - rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); - rtl_writephy(tp, 0x05, 0x8a85); - rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); - rtl_writephy(tp, 0x05, 0x8a88); - rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); - rtl_writephy(tp, 0x1f, 0x0000); + r8168d_phy_param(phydev, 0x8b54, 0x0800, 0x0000); + r8168d_phy_param(phydev, 0x8b5d, 0x0800, 0x0000); + r8168d_phy_param(phydev, 0x8a7c, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a7f, 0x0000, 0x0100); + r8168d_phy_param(phydev, 0x8a82, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a85, 0x0100, 0x0000); + r8168d_phy_param(phydev, 0x8a88, 0x0100, 0x0000); /* uc same-seed solution */ - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b85); - rtl_w0w1_phy(tp, 0x06, 0x8000, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); + r8168d_phy_param(phydev, 0x8b85, 0x0000, 0x8000); /* Green feature */ rtl_writephy(tp, 0x1f, 0x0003); @@ -3219,12 +3077,8 @@ static void rtl8168g_phy_adjust_10m_aldps(struct rtl8169_private *tp) phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0); phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(7) | BIT(6)); - phy_write(phydev, 0x1f, 0x0a43); - phy_write(phydev, 0x13, 0x8084); - phy_clear_bits(phydev, 0x14, BIT(14) | BIT(13)); - phy_set_bits(phydev, 0x10, BIT(12) | BIT(1) | BIT(0)); - - phy_write(phydev, 0x1f, 0x0000); + r8168g_phy_param(phydev, 0x8084, 0x6000, 0x0000); + phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x1003); } static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) @@ -3254,9 +3108,7 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) phy_modify_paged(tp->phydev, 0x0a4b, 0x11, 0, BIT(2)); /* Enable UC LPF tune function */ - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x8012); - rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + r8168g_phy_param(tp->phydev, 0x8012, 0x0000, 0x8000); phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14)); @@ -3286,73 +3138,48 @@ static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp) static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp) { + struct phy_device *phydev = tp->phydev; u16 dout_tapbin; u32 data; rtl_apply_firmware(tp); /* CHN EST parameters adjust - giga master */ - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x809b); - rtl_w0w1_phy(tp, 0x14, 0x8000, 0xf800); - rtl_writephy(tp, 0x13, 0x80a2); - rtl_w0w1_phy(tp, 0x14, 0x8000, 0xff00); - rtl_writephy(tp, 0x13, 0x80a4); - rtl_w0w1_phy(tp, 0x14, 0x8500, 0xff00); - rtl_writephy(tp, 0x13, 0x809c); - rtl_w0w1_phy(tp, 0x14, 0xbd00, 0xff00); - rtl_writephy(tp, 0x1f, 0x0000); + r8168g_phy_param(phydev, 0x809b, 0xf800, 0x8000); + r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x8000); + r8168g_phy_param(phydev, 0x80a4, 0xff00, 0x8500); + r8168g_phy_param(phydev, 0x809c, 0xff00, 0xbd00); /* CHN EST parameters adjust - giga slave */ - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x80ad); - rtl_w0w1_phy(tp, 0x14, 0x7000, 0xf800); - rtl_writephy(tp, 0x13, 0x80b4); - rtl_w0w1_phy(tp, 0x14, 0x5000, 0xff00); - rtl_writephy(tp, 0x13, 0x80ac); - rtl_w0w1_phy(tp, 0x14, 0x4000, 0xff00); - rtl_writephy(tp, 0x1f, 0x0000); + r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x7000); + r8168g_phy_param(phydev, 0x80b4, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x80ac, 0xff00, 0x4000); /* CHN EST parameters adjust - fnet */ - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x808e); - rtl_w0w1_phy(tp, 0x14, 0x1200, 0xff00); - rtl_writephy(tp, 0x13, 0x8090); - rtl_w0w1_phy(tp, 0x14, 0xe500, 0xff00); - rtl_writephy(tp, 0x13, 0x8092); - rtl_w0w1_phy(tp, 0x14, 0x9f00, 0xff00); - rtl_writephy(tp, 0x1f, 0x0000); + r8168g_phy_param(phydev, 0x808e, 0xff00, 0x1200); + r8168g_phy_param(phydev, 0x8090, 0xff00, 0xe500); + r8168g_phy_param(phydev, 0x8092, 0xff00, 0x9f00); /* enable R-tune & PGA-retune function */ dout_tapbin = 0; - rtl_writephy(tp, 0x1f, 0x0a46); - data = rtl_readphy(tp, 0x13); + data = phy_read_paged(phydev, 0x0a46, 0x13); data &= 3; data <<= 2; dout_tapbin |= data; - data = rtl_readphy(tp, 0x12); + data = phy_read_paged(phydev, 0x0a46, 0x12); data &= 0xc000; data >>= 14; dout_tapbin |= data; dout_tapbin = ~(dout_tapbin^0x08); dout_tapbin <<= 12; dout_tapbin &= 0xf000; - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x827a); - rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); - rtl_writephy(tp, 0x13, 0x827b); - rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); - rtl_writephy(tp, 0x13, 0x827c); - rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); - rtl_writephy(tp, 0x13, 0x827d); - rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); - - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x0811); - rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000); - rtl_writephy(tp, 0x1f, 0x0a42); - rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); + + r8168g_phy_param(phydev, 0x827a, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x827b, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x827c, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x827d, 0xf000, dout_tapbin); + r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800); + phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002); /* enable GPHY 10M */ phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11)); @@ -3360,22 +3187,13 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp) /* SAR ADC performance */ phy_modify_paged(tp->phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14)); - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x803f); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); - rtl_writephy(tp, 0x13, 0x8047); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); - rtl_writephy(tp, 0x13, 0x804f); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); - rtl_writephy(tp, 0x13, 0x8057); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); - rtl_writephy(tp, 0x13, 0x805f); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); - rtl_writephy(tp, 0x13, 0x8067); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); - rtl_writephy(tp, 0x13, 0x806f); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); - rtl_writephy(tp, 0x1f, 0x0000); + r8168g_phy_param(phydev, 0x803f, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x8047, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x804f, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x8057, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x805f, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x8067, 0x3000, 0x0000); + r8168g_phy_param(phydev, 0x806f, 0x3000, 0x0000); /* disable phy pfm mode */ phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0); @@ -3388,24 +3206,18 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp) static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp) { u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0; + struct phy_device *phydev = tp->phydev; u16 rlen; u32 data; rtl_apply_firmware(tp); /* CHIN EST parameter update */ - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x808a); - rtl_w0w1_phy(tp, 0x14, 0x000a, 0x003f); - rtl_writephy(tp, 0x1f, 0x0000); + r8168g_phy_param(phydev, 0x808a, 0x003f, 0x000a); /* enable R-tune & PGA-retune function */ - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x0811); - rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000); - rtl_writephy(tp, 0x1f, 0x0a42); - rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); + r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800); + phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002); /* enable GPHY 10M */ phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11)); @@ -3425,26 +3237,20 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp) data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0); if ((ioffset_p3 != 0x0f) || (ioffset_p2 != 0x0f) || - (ioffset_p1 != 0x0f) || (ioffset_p0 != 0x0f)) { - rtl_writephy(tp, 0x1f, 0x0bcf); - rtl_writephy(tp, 0x16, data); - rtl_writephy(tp, 0x1f, 0x0000); - } + (ioffset_p1 != 0x0f) || (ioffset_p0 != 0x0f)) + phy_write_paged(phydev, 0x0bcf, 0x16, data); /* Modify rlen (TX LPF corner frequency) level */ - rtl_writephy(tp, 0x1f, 0x0bcd); - data = rtl_readphy(tp, 0x16); + data = phy_read_paged(phydev, 0x0bcd, 0x16); data &= 0x000f; rlen = 0; if (data > 3) rlen = data - 3; data = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12); - rtl_writephy(tp, 0x17, data); - rtl_writephy(tp, 0x1f, 0x0bcd); - rtl_writephy(tp, 0x1f, 0x0000); + phy_write_paged(phydev, 0x0bcd, 0x17, data); /* disable phy pfm mode */ - phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0); + phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0); rtl8168g_disable_aldps(tp); rtl8168g_config_eee_phy(tp); @@ -3453,22 +3259,21 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp) static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp) { + struct phy_device *phydev = tp->phydev; + /* Enable PHY auto speed down */ - phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2)); + phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2)); rtl8168g_phy_adjust_10m_aldps(tp); /* Enable EEE auto-fallback function */ - phy_modify_paged(tp->phydev, 0x0a4b, 0x11, 0, BIT(2)); + phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2)); /* Enable UC LPF tune function */ - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x8012); - rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); /* set rg_sel_sdm_rate */ - phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14)); + phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14)); rtl8168g_disable_aldps(tp); rtl8168g_config_eee_phy(tp); @@ -3477,63 +3282,38 @@ static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp) static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp) { + struct phy_device *phydev = tp->phydev; + rtl8168g_phy_adjust_10m_aldps(tp); /* Enable UC LPF tune function */ - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x8012); - rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000); /* Set rg_sel_sdm_rate */ phy_modify_paged(tp->phydev, 0x0c42, 0x11, BIT(13), BIT(14)); /* Channel estimation parameters */ - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x80f3); - rtl_w0w1_phy(tp, 0x14, 0x8b00, ~0x8bff); - rtl_writephy(tp, 0x13, 0x80f0); - rtl_w0w1_phy(tp, 0x14, 0x3a00, ~0x3aff); - rtl_writephy(tp, 0x13, 0x80ef); - rtl_w0w1_phy(tp, 0x14, 0x0500, ~0x05ff); - rtl_writephy(tp, 0x13, 0x80f6); - rtl_w0w1_phy(tp, 0x14, 0x6e00, ~0x6eff); - rtl_writephy(tp, 0x13, 0x80ec); - rtl_w0w1_phy(tp, 0x14, 0x6800, ~0x68ff); - rtl_writephy(tp, 0x13, 0x80ed); - rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff); - rtl_writephy(tp, 0x13, 0x80f2); - rtl_w0w1_phy(tp, 0x14, 0xf400, ~0xf4ff); - rtl_writephy(tp, 0x13, 0x80f4); - rtl_w0w1_phy(tp, 0x14, 0x8500, ~0x85ff); - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x8110); - rtl_w0w1_phy(tp, 0x14, 0xa800, ~0xa8ff); - rtl_writephy(tp, 0x13, 0x810f); - rtl_w0w1_phy(tp, 0x14, 0x1d00, ~0x1dff); - rtl_writephy(tp, 0x13, 0x8111); - rtl_w0w1_phy(tp, 0x14, 0xf500, ~0xf5ff); - rtl_writephy(tp, 0x13, 0x8113); - rtl_w0w1_phy(tp, 0x14, 0x6100, ~0x61ff); - rtl_writephy(tp, 0x13, 0x8115); - rtl_w0w1_phy(tp, 0x14, 0x9200, ~0x92ff); - rtl_writephy(tp, 0x13, 0x810e); - rtl_w0w1_phy(tp, 0x14, 0x0400, ~0x04ff); - rtl_writephy(tp, 0x13, 0x810c); - rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff); - rtl_writephy(tp, 0x13, 0x810b); - rtl_w0w1_phy(tp, 0x14, 0x5a00, ~0x5aff); - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x80d1); - rtl_w0w1_phy(tp, 0x14, 0xff00, ~0xffff); - rtl_writephy(tp, 0x13, 0x80cd); - rtl_w0w1_phy(tp, 0x14, 0x9e00, ~0x9eff); - rtl_writephy(tp, 0x13, 0x80d3); - rtl_w0w1_phy(tp, 0x14, 0x0e00, ~0x0eff); - rtl_writephy(tp, 0x13, 0x80d5); - rtl_w0w1_phy(tp, 0x14, 0xca00, ~0xcaff); - rtl_writephy(tp, 0x13, 0x80d7); - rtl_w0w1_phy(tp, 0x14, 0x8400, ~0x84ff); + r8168g_phy_param(phydev, 0x80f3, 0xff00, 0x8b00); + r8168g_phy_param(phydev, 0x80f0, 0xff00, 0x3a00); + r8168g_phy_param(phydev, 0x80ef, 0xff00, 0x0500); + r8168g_phy_param(phydev, 0x80f6, 0xff00, 0x6e00); + r8168g_phy_param(phydev, 0x80ec, 0xff00, 0x6800); + r8168g_phy_param(phydev, 0x80ed, 0xff00, 0x7c00); + r8168g_phy_param(phydev, 0x80f2, 0xff00, 0xf400); + r8168g_phy_param(phydev, 0x80f4, 0xff00, 0x8500); + r8168g_phy_param(phydev, 0x8110, 0xff00, 0xa800); + r8168g_phy_param(phydev, 0x810f, 0xff00, 0x1d00); + r8168g_phy_param(phydev, 0x8111, 0xff00, 0xf500); + r8168g_phy_param(phydev, 0x8113, 0xff00, 0x6100); + r8168g_phy_param(phydev, 0x8115, 0xff00, 0x9200); + r8168g_phy_param(phydev, 0x810e, 0xff00, 0x0400); + r8168g_phy_param(phydev, 0x810c, 0xff00, 0x7c00); + r8168g_phy_param(phydev, 0x810b, 0xff00, 0x5a00); + r8168g_phy_param(phydev, 0x80d1, 0xff00, 0xff00); + r8168g_phy_param(phydev, 0x80cd, 0xff00, 0x9e00); + r8168g_phy_param(phydev, 0x80d3, 0xff00, 0x0e00); + r8168g_phy_param(phydev, 0x80d5, 0xff00, 0xca00); + r8168g_phy_param(phydev, 0x80d7, 0xff00, 0x8400); /* Force PWM-mode */ rtl_writephy(tp, 0x1f, 0x0bcd); @@ -3552,6 +3332,46 @@ static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp) rtl_enable_eee(tp); } +static void rtl8117_hw_phy_config(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + + /* CHN EST parameters adjust - fnet */ + r8168g_phy_param(phydev, 0x808e, 0xff00, 0x4800); + r8168g_phy_param(phydev, 0x8090, 0xff00, 0xcc00); + r8168g_phy_param(phydev, 0x8092, 0xff00, 0xb000); + + r8168g_phy_param(phydev, 0x8088, 0xff00, 0x6000); + r8168g_phy_param(phydev, 0x808b, 0x3f00, 0x0b00); + r8168g_phy_param(phydev, 0x808d, 0x1f00, 0x0600); + r8168g_phy_param(phydev, 0x808c, 0xff00, 0xb000); + r8168g_phy_param(phydev, 0x80a0, 0xff00, 0x2800); + r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x809b, 0xf800, 0xb000); + r8168g_phy_param(phydev, 0x809a, 0xff00, 0x4b00); + r8168g_phy_param(phydev, 0x809d, 0x3f00, 0x0800); + r8168g_phy_param(phydev, 0x80a1, 0xff00, 0x7000); + r8168g_phy_param(phydev, 0x809f, 0x1f00, 0x0300); + r8168g_phy_param(phydev, 0x809e, 0xff00, 0x8800); + r8168g_phy_param(phydev, 0x80b2, 0xff00, 0x2200); + r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x9800); + r8168g_phy_param(phydev, 0x80af, 0x3f00, 0x0800); + r8168g_phy_param(phydev, 0x80b3, 0xff00, 0x6f00); + r8168g_phy_param(phydev, 0x80b1, 0x1f00, 0x0300); + r8168g_phy_param(phydev, 0x80b0, 0xff00, 0x9300); + + r8168g_phy_param(phydev, 0x8011, 0x0000, 0x0800); + + /* enable GPHY 10M */ + phy_modify_paged(tp->phydev, 0x0a44, 0x11, 0, BIT(11)); + + r8168g_phy_param(phydev, 0x8016, 0x0000, 0x0400); + + rtl8168g_disable_aldps(tp); + rtl8168h_config_eee_phy(tp); + rtl_enable_eee(tp); +} + static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) { static const struct phy_reg phy_reg_init[] = { @@ -3571,35 +3391,21 @@ static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) static void rtl8105e_hw_phy_config(struct rtl8169_private *tp) { - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0005 }, - { 0x1a, 0x0000 }, - { 0x1f, 0x0000 }, - - { 0x1f, 0x0004 }, - { 0x1c, 0x0000 }, - { 0x1f, 0x0000 }, - - { 0x1f, 0x0001 }, - { 0x15, 0x7701 }, - { 0x1f, 0x0000 } - }; - /* Disable ALDPS before ram code */ - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, 0x18, 0x0310); + phy_write(tp->phydev, 0x18, 0x0310); msleep(100); rtl_apply_firmware(tp); - rtl_writephy_batch(tp, phy_reg_init); + phy_write_paged(tp->phydev, 0x0005, 0x1a, 0x0000); + phy_write_paged(tp->phydev, 0x0004, 0x1c, 0x0000); + phy_write_paged(tp->phydev, 0x0001, 0x15, 0x7701); } static void rtl8402_hw_phy_config(struct rtl8169_private *tp) { /* Disable ALDPS before setting firmware */ - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, 0x18, 0x0310); + phy_write(tp->phydev, 0x18, 0x0310); msleep(20); rtl_apply_firmware(tp); @@ -3622,8 +3428,7 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp) }; /* Disable ALDPS before ram code */ - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, 0x18, 0x0310); + phy_write(tp->phydev, 0x18, 0x0310); msleep(100); rtl_apply_firmware(tp); @@ -3648,38 +3453,22 @@ static void rtl8125_1_hw_phy_config(struct rtl8169_private *tp) phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff); phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff); - phy_write(phydev, 0x1f, 0x0a43); - phy_write(phydev, 0x13, 0x80ea); - phy_modify(phydev, 0x14, 0xff00, 0xc400); - phy_write(phydev, 0x13, 0x80eb); - phy_modify(phydev, 0x14, 0x0700, 0x0300); - phy_write(phydev, 0x13, 0x80f8); - phy_modify(phydev, 0x14, 0xff00, 0x1c00); - phy_write(phydev, 0x13, 0x80f1); - phy_modify(phydev, 0x14, 0xff00, 0x3000); - phy_write(phydev, 0x13, 0x80fe); - phy_modify(phydev, 0x14, 0xff00, 0xa500); - phy_write(phydev, 0x13, 0x8102); - phy_modify(phydev, 0x14, 0xff00, 0x5000); - phy_write(phydev, 0x13, 0x8105); - phy_modify(phydev, 0x14, 0xff00, 0x3300); - phy_write(phydev, 0x13, 0x8100); - phy_modify(phydev, 0x14, 0xff00, 0x7000); - phy_write(phydev, 0x13, 0x8104); - phy_modify(phydev, 0x14, 0xff00, 0xf000); - phy_write(phydev, 0x13, 0x8106); - phy_modify(phydev, 0x14, 0xff00, 0x6500); - phy_write(phydev, 0x13, 0x80dc); - phy_modify(phydev, 0x14, 0xff00, 0xed00); - phy_write(phydev, 0x13, 0x80df); - phy_set_bits(phydev, 0x14, BIT(8)); - phy_write(phydev, 0x13, 0x80e1); - phy_clear_bits(phydev, 0x14, BIT(8)); - phy_write(phydev, 0x1f, 0x0000); + r8168g_phy_param(phydev, 0x80ea, 0xff00, 0xc400); + r8168g_phy_param(phydev, 0x80eb, 0x0700, 0x0300); + r8168g_phy_param(phydev, 0x80f8, 0xff00, 0x1c00); + r8168g_phy_param(phydev, 0x80f1, 0xff00, 0x3000); + r8168g_phy_param(phydev, 0x80fe, 0xff00, 0xa500); + r8168g_phy_param(phydev, 0x8102, 0xff00, 0x5000); + r8168g_phy_param(phydev, 0x8105, 0xff00, 0x3300); + r8168g_phy_param(phydev, 0x8100, 0xff00, 0x7000); + r8168g_phy_param(phydev, 0x8104, 0xff00, 0xf000); + r8168g_phy_param(phydev, 0x8106, 0xff00, 0x6500); + r8168g_phy_param(phydev, 0x80dc, 0xff00, 0xed00); + r8168g_phy_param(phydev, 0x80df, 0x0000, 0x0100); + r8168g_phy_param(phydev, 0x80e1, 0x0100, 0x0000); phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038); - phy_write_paged(phydev, 0xa43, 0x13, 0x819f); - phy_write_paged(phydev, 0xa43, 0x14, 0xd0b6); + r8168g_phy_param(phydev, 0x819f, 0xffff, 0xd0b6); phy_write_paged(phydev, 0xbc3, 0x12, 0x5555); phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00); @@ -3734,22 +3523,16 @@ static void rtl8125_2_hw_phy_config(struct rtl8169_private *tp) phy_write(phydev, 0x14, 0x0002); for (i = 0; i < 25; i++) phy_write(phydev, 0x14, 0x0000); - - phy_write(phydev, 0x13, 0x8257); - phy_write(phydev, 0x14, 0x020F); - - phy_write(phydev, 0x13, 0x80EA); - phy_write(phydev, 0x14, 0x7843); phy_write(phydev, 0x1f, 0x0000); + r8168g_phy_param(phydev, 0x8257, 0xffff, 0x020F); + r8168g_phy_param(phydev, 0x80ea, 0xffff, 0x7843); + rtl_apply_firmware(tp); phy_modify_paged(phydev, 0xd06, 0x14, 0x0000, 0x2000); - phy_write(phydev, 0x1f, 0x0a43); - phy_write(phydev, 0x13, 0x81a2); - phy_set_bits(phydev, 0x14, BIT(8)); - phy_write(phydev, 0x1f, 0x0000); + r8168g_phy_param(phydev, 0x81a2, 0x0000, 0x0100); phy_modify_paged(phydev, 0xb54, 0x16, 0xff00, 0xdb00); phy_modify_paged(phydev, 0xa45, 0x12, 0x0001, 0x0000); @@ -3787,7 +3570,7 @@ static void rtl_hw_phy_config(struct net_device *dev) [RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config, [RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config, [RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config, - [RTL_GIGA_MAC_VER_22] = rtl8168c_4_hw_phy_config, + [RTL_GIGA_MAC_VER_22] = rtl8168c_3_hw_phy_config, [RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config, [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config, [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config, @@ -3817,6 +3600,7 @@ static void rtl_hw_phy_config(struct net_device *dev) [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config, [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config, [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config, + [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config, [RTL_GIGA_MAC_VER_60] = rtl8125_1_hw_phy_config, [RTL_GIGA_MAC_VER_61] = rtl8125_2_hw_phy_config, }; @@ -3910,7 +3694,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_32: case RTL_GIGA_MAC_VER_33: case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_52: RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | AcceptBroadcast | AcceptMulticast | AcceptMyPhys); break; @@ -3946,6 +3730,7 @@ static void rtl_pll_power_down(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_48: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_52: case RTL_GIGA_MAC_VER_60: case RTL_GIGA_MAC_VER_61: RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); @@ -3977,6 +3762,7 @@ static void rtl_pll_power_up(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_48: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_52: case RTL_GIGA_MAC_VER_60: case RTL_GIGA_MAC_VER_61: RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); @@ -4008,7 +3794,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_38: RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52: RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); break; case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61: @@ -4194,7 +3980,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42); break; case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52: RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); break; @@ -5040,6 +4826,71 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) rtl_hw_aspm_clkreq_enable(tp, true); } +static void rtl_hw_start_8117(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8117[] = { + { 0x19, 0x0040, 0x1100 }, + { 0x59, 0x0040, 0x1100 }, + }; + int rg_saw_cnt; + + rtl8168ep_stop_cmac(tp); + + /* disable aspm and clock request before access ephy */ + rtl_hw_aspm_clkreq_enable(tp, false); + rtl_ephy_init(tp, e_info_8117); + + rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06); + rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f); + + rtl_set_def_aspm_entry_latency(tp); + + rtl_reset_packet_filter(tp); + + rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f90); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); + + RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + + rtl8168_config_eee_mac(tp); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); + RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); + + rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12)); + + rtl_pcie_state_l2l3_disable(tp); + + rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff; + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = (16000000 / rg_saw_cnt) & 0x0fff; + r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini); + } + + r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070); + r8168_mac_ocp_write(tp, 0xea80, 0x0003); + r8168_mac_ocp_modify(tp, 0xe052, 0x0000, 0x0009); + r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + /* firmware is for MAC only */ + rtl_apply_firmware(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) { static const struct ephy_info e_info_8102e_1[] = { @@ -5327,6 +5178,7 @@ static void rtl_hw_config(struct rtl8169_private *tp) [RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1, [RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2, [RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3, + [RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117, [RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125_1, [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125_2, }; @@ -6873,7 +6725,7 @@ static void rtl_hw_init_8125(struct rtl8169_private *tp) static void rtl_hw_initialize(struct rtl8169_private *tp) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52: rtl8168ep_stop_cmac(tp); /* fall through */ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: @@ -6983,6 +6835,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->pci_dev = pdev; tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1; + tp->eee_adv = -1; /* Get the *optional* external "ether_clk" used on some boards */ rc = rtl_get_ether_clk(tp); diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index a9c89d5d8898..9f88b5db4f89 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -955,6 +955,8 @@ enum RAVB_QUEUE { #define NUM_RX_QUEUE 2 #define NUM_TX_QUEUE 2 +#define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16)) + /* TX descriptors per packet */ #define NUM_TX_DESC_GEN2 2 #define NUM_TX_DESC_GEN3 1 @@ -1018,7 +1020,6 @@ struct ravb_private { u32 dirty_rx[NUM_RX_QUEUE]; /* Producer ring indices */ u32 cur_tx[NUM_TX_QUEUE]; u32 dirty_tx[NUM_TX_QUEUE]; - u32 rx_buf_sz; /* Based on MTU+slack. */ struct napi_struct napi[NUM_RX_QUEUE]; struct work_struct work; /* MII transceiver section. */ diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index de9aa8c47f1c..4b13a184bfc7 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -230,7 +230,7 @@ static void ravb_ring_free(struct net_device *ndev, int q) le32_to_cpu(desc->dptr))) dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), - priv->rx_buf_sz, + RX_BUF_SZ, DMA_FROM_DEVICE); } ring_size = sizeof(struct ravb_ex_rx_desc) * @@ -293,9 +293,9 @@ static void ravb_ring_format(struct net_device *ndev, int q) for (i = 0; i < priv->num_rx_ring[q]; i++) { /* RX descriptor */ rx_desc = &priv->rx_ring[q][i]; - rx_desc->ds_cc = cpu_to_le16(priv->rx_buf_sz); + rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ); dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, - priv->rx_buf_sz, + RX_BUF_SZ, DMA_FROM_DEVICE); /* We just set the data size to 0 for a failed mapping which * should prevent DMA from happening... @@ -342,9 +342,6 @@ static int ravb_ring_init(struct net_device *ndev, int q) int ring_size; int i; - priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) + - ETH_HLEN + VLAN_HLEN + sizeof(__sum16); - /* Allocate RX and TX skb rings */ priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], sizeof(*priv->rx_skb[q]), GFP_KERNEL); @@ -354,7 +351,7 @@ static int ravb_ring_init(struct net_device *ndev, int q) goto error; for (i = 0; i < priv->num_rx_ring[q]; i++) { - skb = netdev_alloc_skb(ndev, priv->rx_buf_sz + RAVB_ALIGN - 1); + skb = netdev_alloc_skb(ndev, RX_BUF_SZ + RAVB_ALIGN - 1); if (!skb) goto error; ravb_set_buffer_align(skb); @@ -584,7 +581,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) skb = priv->rx_skb[q][entry]; priv->rx_skb[q][entry] = NULL; dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), - priv->rx_buf_sz, + RX_BUF_SZ, DMA_FROM_DEVICE); get_ts &= (q == RAVB_NC) ? RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : @@ -617,11 +614,11 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; desc = &priv->rx_ring[q][entry]; - desc->ds_cc = cpu_to_le16(priv->rx_buf_sz); + desc->ds_cc = cpu_to_le16(RX_BUF_SZ); if (!priv->rx_skb[q][entry]) { skb = netdev_alloc_skb(ndev, - priv->rx_buf_sz + + RX_BUF_SZ + RAVB_ALIGN - 1); if (!skb) break; /* Better luck next round. */ @@ -1801,10 +1798,15 @@ static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) static int ravb_change_mtu(struct net_device *ndev, int new_mtu) { - if (netif_running(ndev)) - return -EBUSY; + struct ravb_private *priv = netdev_priv(ndev); ndev->mtu = new_mtu; + + if (netif_running(ndev)) { + synchronize_irq(priv->emac_irq); + ravb_emac_init(ndev); + } + netdev_update_features(ndev); return 0; @@ -2046,7 +2048,9 @@ static int ravb_probe(struct platform_device *pdev) spin_lock_init(&priv->lock); INIT_WORK(&priv->work, ravb_tx_timeout_work); - priv->phy_interface = of_get_phy_mode(np); + error = of_get_phy_mode(np, &priv->phy_interface); + if (error && error != -ENODEV) + goto out_release; priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link"); priv->avb_link_active_low = diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index 9a42580693cb..6984bd5b7da9 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -182,6 +182,13 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp, struct net_device *ndev = priv->ndev; unsigned long flags; + /* Reject requests with unsupported flags */ + if (req->flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + if (req->index) return -EINVAL; @@ -211,6 +218,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, unsigned long flags; int error = 0; + /* Reject requests with unsupported flags */ + if (req->flags) + return -EOPNOTSUPP; + if (req->index) return -EINVAL; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 7ba35a0bdb29..e19b49c4013e 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3183,6 +3183,7 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev) { struct device_node *np = dev->of_node; struct sh_eth_plat_data *pdata; + phy_interface_t interface; const char *mac_addr; int ret; @@ -3190,10 +3191,10 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev) if (!pdata) return NULL; - ret = of_get_phy_mode(np); - if (ret < 0) + ret = of_get_phy_mode(np, &interface); + if (ret) return NULL; - pdata->phy_interface = ret; + pdata->phy_interface = interface; mac_addr = of_get_mac_address(np); if (!IS_ERR(mac_addr)) diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c index 2412c87561e0..33f79402850d 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c @@ -30,12 +30,15 @@ static int sxgbe_probe_config_dt(struct platform_device *pdev, { struct device_node *np = pdev->dev.of_node; struct sxgbe_dma_cfg *dma_cfg; + int err; if (!np) return -ENODEV; *mac = of_get_mac_address(np); - plat->interface = of_get_phy_mode(np); + err = of_get_phy_mode(np, &plat->interface); + if (err && err != -ENODEV) + return err; plat->bus_id = of_alias_get_id(np, "ethernet"); if (plat->bus_id < 0) diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index a7d9841105d8..bec261905530 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -724,6 +724,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, netif_err(efx, rx_err, efx->net_dev, "XDP TX failed (%d)\n", err); channel->n_rx_xdp_bad_drops++; + trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); } else { channel->n_rx_xdp_tx++; } @@ -737,6 +738,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, netif_err(efx, rx_err, efx->net_dev, "XDP redirect failed (%d)\n", err); channel->n_rx_xdp_bad_drops++; + trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); } else { channel->n_rx_xdp_redirect++; } @@ -746,6 +748,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, bpf_warn_invalid_xdp_action(xdp_act); efx_free_rx_buffers(rx_queue, rx_buf, 1); channel->n_rx_xdp_bad_drops++; + trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); break; case XDP_ABORTED: diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index deb636d653f3..d242906ae233 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -48,7 +48,7 @@ #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/skbuff.h> -#include <linux/dma-direct.h> +#include <linux/dma-mapping.h> #include <net/ip.h> @@ -89,6 +89,7 @@ struct ioc3_private { struct device *dma_dev; u32 *ssram; unsigned long *rxr; /* pointer to receiver ring */ + void *tx_ring; struct ioc3_etxd *txr; dma_addr_t rxr_dma; dma_addr_t txr_dma; @@ -1173,26 +1174,14 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct ioc3 *ioc3; unsigned long ioc3_base, ioc3_size; u32 vendor, model, rev; - int err, pci_using_dac; + int err; /* Configure DMA attributes. */ - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (!err) { - pci_using_dac = 1; - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err < 0) { - pr_err("%s: Unable to obtain 64 bit DMA for consistent allocations\n", - pci_name(pdev)); - goto out; - } - } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - pr_err("%s: No usable DMA configuration, aborting.\n", - pci_name(pdev)); - goto out; - } - pci_using_dac = 0; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + pr_err("%s: No usable DMA configuration, aborting.\n", + pci_name(pdev)); + goto out; } if (pci_enable_device(pdev)) @@ -1204,9 +1193,6 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_disable; } - if (pci_using_dac) - dev->features |= NETIF_F_HIGHDMA; - err = pci_request_regions(pdev, "ioc3"); if (err) goto out_free; @@ -1242,8 +1228,8 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ioc3_stop(ip); /* Allocate rx ring. 4kb = 512 entries, must be 4kb aligned */ - ip->rxr = dma_direct_alloc_pages(ip->dma_dev, RX_RING_SIZE, - &ip->rxr_dma, GFP_ATOMIC, 0); + ip->rxr = dma_alloc_coherent(ip->dma_dev, RX_RING_SIZE, &ip->rxr_dma, + GFP_KERNEL); if (!ip->rxr) { pr_err("ioc3-eth: rx ring allocation failed\n"); err = -ENOMEM; @@ -1251,14 +1237,16 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } /* Allocate tx rings. 16kb = 128 bufs, must be 16kb aligned */ - ip->txr = dma_direct_alloc_pages(ip->dma_dev, TX_RING_SIZE, - &ip->txr_dma, - GFP_KERNEL | __GFP_ZERO, 0); - if (!ip->txr) { + ip->tx_ring = dma_alloc_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, + &ip->txr_dma, GFP_KERNEL); + if (!ip->tx_ring) { pr_err("ioc3-eth: tx ring allocation failed\n"); err = -ENOMEM; goto out_stop; } + /* Align TX ring */ + ip->txr = PTR_ALIGN(ip->tx_ring, SZ_16K); + ip->txr_dma = ALIGN(ip->txr_dma, SZ_16K); ioc3_init(dev); @@ -1288,7 +1276,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev->netdev_ops = &ioc3_netdev_ops; dev->ethtool_ops = &ioc3_ethtool_ops; dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM; - dev->features = NETIF_F_IP_CSUM; + dev->features = NETIF_F_IP_CSUM | NETIF_F_HIGHDMA; sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1); sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2); @@ -1313,11 +1301,11 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) out_stop: del_timer_sync(&ip->ioc3_timer); if (ip->rxr) - dma_direct_free_pages(ip->dma_dev, RX_RING_SIZE, ip->rxr, - ip->rxr_dma, 0); - if (ip->txr) - dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr, - ip->txr_dma, 0); + dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, + ip->rxr_dma); + if (ip->tx_ring) + dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring, + ip->txr_dma); out_res: pci_release_regions(pdev); out_free: @@ -1335,10 +1323,8 @@ static void ioc3_remove_one(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct ioc3_private *ip = netdev_priv(dev); - dma_direct_free_pages(ip->dma_dev, RX_RING_SIZE, ip->rxr, - ip->rxr_dma, 0); - dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr, - ip->txr_dma, 0); + dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma); + dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring, ip->txr_dma); unregister_netdev(dev); del_timer_sync(&ip->ioc3_timer); diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 6e984d5a729f..f7e927ad67fa 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1565,10 +1565,10 @@ static int ave_probe(struct platform_device *pdev) return -EINVAL; np = dev->of_node; - phy_mode = of_get_phy_mode(np); - if ((int)phy_mode < 0) { + ret = of_get_phy_mode(np, &phy_mode); + if (ret) { dev_err(dev, "phy-mode not found\n"); - return -EINVAL; + return ret; } irq = platform_get_irq(pdev, 0); diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 912bbb6515b2..b210e987a1db 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -248,12 +248,13 @@ struct stmmac_safety_stats { /* Max/Min RI Watchdog Timer count value */ #define MAX_DMA_RIWT 0xff #define MIN_DMA_RIWT 0x10 +#define DEF_DMA_RIWT 0xa0 /* Tx coalesce parameters */ #define STMMAC_COAL_TX_TIMER 1000 #define STMMAC_MAX_COAL_TX_TICK 100000 #define STMMAC_TX_MAX_FRAMES 256 -#define STMMAC_TX_FRAMES 1 -#define STMMAC_RX_FRAMES 25 +#define STMMAC_TX_FRAMES 25 +#define STMMAC_RX_FRAMES 0 /* Packets types */ enum packets_types { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c index 527f93320a5a..d0d2d0fc5f0a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c @@ -61,9 +61,10 @@ static void anarion_gmac_exit(struct platform_device *pdev, void *priv) static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev) { - int phy_mode; - void __iomem *ctl_block; struct anarion_gmac *gmac; + phy_interface_t phy_mode; + void __iomem *ctl_block; + int err; ctl_block = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(ctl_block)) { @@ -78,7 +79,10 @@ static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev) gmac->ctl_block = (uintptr_t)ctl_block; - phy_mode = of_get_phy_mode(pdev->dev.of_node); + err = of_get_phy_mode(pdev->dev.of_node, &phy_mode); + if (err) + return ERR_PTR(err); + switch (phy_mode) { case PHY_INTERFACE_MODE_RGMII: /* Fall through */ case PHY_INTERFACE_MODE_RGMII_ID /* Fall through */: diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 0d21082ceb93..6ae13dc19510 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -189,9 +189,10 @@ static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed) static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac) { struct device *dev = &gmac->pdev->dev; + int ret; - gmac->phy_mode = of_get_phy_mode(dev->of_node); - if ((int)gmac->phy_mode < 0) { + ret = of_get_phy_mode(dev->of_node, &gmac->phy_mode); + if (ret) { dev_err(dev, "missing phy mode property\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c index cea7a0c7ce68..bdb80421acac 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c @@ -54,7 +54,7 @@ struct mediatek_dwmac_plat_data { struct device_node *np; struct regmap *peri_regmap; struct device *dev; - int phy_mode; + phy_interface_t phy_mode; bool rmii_rxc; }; @@ -243,6 +243,7 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat) { struct mac_delay_struct *mac_delay = &plat->mac_delay; u32 tx_delay_ps, rx_delay_ps; + int err; plat->peri_regmap = syscon_regmap_lookup_by_phandle(plat->np, "mediatek,pericfg"); if (IS_ERR(plat->peri_regmap)) { @@ -250,10 +251,10 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat) return PTR_ERR(plat->peri_regmap); } - plat->phy_mode = of_get_phy_mode(plat->np); - if (plat->phy_mode < 0) { + err = of_get_phy_mode(plat->np, &plat->phy_mode); + if (err) { dev_err(plat->dev, "not find phy-mode\n"); - return -EINVAL; + return err; } if (!of_property_read_u32(plat->np, "mediatek,tx-delay-ps", &tx_delay_ps)) { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 306da8f6b7d5..bd6c01004913 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -338,10 +338,9 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) } dwmac->dev = &pdev->dev; - dwmac->phy_mode = of_get_phy_mode(pdev->dev.of_node); - if ((int)dwmac->phy_mode < 0) { + ret = of_get_phy_mode(pdev->dev.of_node, &dwmac->phy_mode); + if (ret) { dev_err(&pdev->dev, "missing phy-mode property\n"); - ret = -EINVAL; goto err_remove_config_dt; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index e2e469c37a4d..dc50ba13a746 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -37,7 +37,7 @@ struct rk_gmac_ops { struct rk_priv_data { struct platform_device *pdev; - int phy_iface; + phy_interface_t phy_iface; struct regulator *regulator; bool suspended; const struct rk_gmac_ops *ops; @@ -1224,7 +1224,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev, if (!bsp_priv) return ERR_PTR(-ENOMEM); - bsp_priv->phy_iface = of_get_phy_mode(dev->of_node); + of_get_phy_mode(dev->of_node, &bsp_priv->phy_iface); bsp_priv->ops = ops; bsp_priv->regulator = devm_regulator_get_optional(dev, "phy"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index e9fd661f7995..e1b63df6f96f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -116,7 +116,7 @@ #define ETH_PHY_SEL_MII 0x0 struct sti_dwmac { - int interface; /* MII interface */ + phy_interface_t interface; /* MII interface */ bool ext_phyclk; /* Clock from external PHY */ u32 tx_retime_src; /* TXCLK Retiming*/ struct clk *clk; /* PHY clock */ @@ -269,7 +269,12 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, return err; } - dwmac->interface = of_get_phy_mode(np); + err = of_get_phy_mode(np, &dwmac->interface); + if (err && err != -ENODEV) { + dev_err(dev, "Can't get phy-mode\n"); + return err; + } + dwmac->regmap = regmap; dwmac->gmac_en = of_property_read_bool(np, "st,gmac_en"); dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index 4ef041bdf6a1..9b7be996d07b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -155,18 +155,14 @@ static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare) ret = clk_prepare_enable(dwmac->syscfg_clk); if (ret) return ret; - - if (dwmac->clk_eth_ck) { - ret = clk_prepare_enable(dwmac->clk_eth_ck); - if (ret) { - clk_disable_unprepare(dwmac->syscfg_clk); - return ret; - } + ret = clk_prepare_enable(dwmac->clk_eth_ck); + if (ret) { + clk_disable_unprepare(dwmac->syscfg_clk); + return ret; } } else { clk_disable_unprepare(dwmac->syscfg_clk); - if (dwmac->clk_eth_ck) - clk_disable_unprepare(dwmac->clk_eth_ck); + clk_disable_unprepare(dwmac->clk_eth_ck); } return ret; } @@ -175,7 +171,7 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat) { struct stm32_dwmac *dwmac = plat_dat->bsp_priv; u32 reg = dwmac->mode_reg; - int val, ret; + int val; switch (plat_dat->interface) { case PHY_INTERFACE_MODE_MII: @@ -211,8 +207,8 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat) } /* Need to update PMCCLRR (clear register) */ - ret = regmap_write(dwmac->regmap, reg + SYSCFG_PMCCLRR_OFFSET, - dwmac->ops->syscfg_eth_mask); + regmap_write(dwmac->regmap, reg + SYSCFG_PMCCLRR_OFFSET, + dwmac->ops->syscfg_eth_mask); /* Update PMCSETR (set register) */ return regmap_update_bits(dwmac->regmap, reg, @@ -320,12 +316,10 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac, return PTR_ERR(dwmac->clk_ethstp); } - /* Clock for sysconfig */ + /* Optional Clock for sysconfig */ dwmac->syscfg_clk = devm_clk_get(dev, "syscfg-clk"); - if (IS_ERR(dwmac->syscfg_clk)) { - dev_err(dev, "No syscfg clock provided...\n"); - return PTR_ERR(dwmac->syscfg_clk); - } + if (IS_ERR(dwmac->syscfg_clk)) + dwmac->syscfg_clk = NULL; /* Get IRQ information early to have an ability to ask for deferred * probe if needed before we went too far with resource allocation. @@ -437,8 +431,7 @@ static int stm32mp1_suspend(struct stm32_dwmac *dwmac) clk_disable_unprepare(dwmac->clk_tx); clk_disable_unprepare(dwmac->syscfg_clk); - if (dwmac->clk_eth_ck) - clk_disable_unprepare(dwmac->clk_eth_ck); + clk_disable_unprepare(dwmac->clk_eth_ck); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index ddcc191febdb..1c8d84ed8410 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -1105,6 +1105,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) struct stmmac_resources stmmac_res; struct sunxi_priv_data *gmac; struct device *dev = &pdev->dev; + phy_interface_t interface; int ret; struct stmmac_priv *priv; struct net_device *ndev; @@ -1178,10 +1179,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) return ret; } - ret = of_get_phy_mode(dev->of_node); - if (ret < 0) + ret = of_get_phy_mode(dev->of_node, &interface); + if (ret) return -EINVAL; - plat_dat->interface = ret; + plat_dat->interface = interface; /* platform data specifying hardware features and callbacks. * hardware features were copied from Allwinner drivers. @@ -1226,7 +1227,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) dwmac_mux: sun8i_dwmac_unset_syscon(gmac); dwmac_exit: - sun8i_dwmac_exit(pdev, plat_dat->bsp_priv); + stmmac_pltfr_remove(pdev); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index a299da3971b4..26353ef616b8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -18,7 +18,7 @@ #include "stmmac_platform.h" struct sunxi_priv_data { - int interface; + phy_interface_t interface; int clk_enabled; struct clk *tx_clk; struct regulator *regulator; @@ -118,7 +118,11 @@ static int sun7i_gmac_probe(struct platform_device *pdev) goto err_remove_config_dt; } - gmac->interface = of_get_phy_mode(dev->of_node); + ret = of_get_phy_mode(dev->of_node, &gmac->interface); + if (ret && ret != -ENODEV) { + dev_err(dev, "Can't get phy-mode\n"); + goto err_remove_config_dt; + } gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx"); if (IS_ERR(gmac->tx_clk)) { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 07e97f45755d..2dc70d104161 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -14,6 +14,7 @@ /* MAC registers */ #define GMAC_CONFIG 0x00000000 +#define GMAC_EXT_CONFIG 0x00000004 #define GMAC_PACKET_FILTER 0x00000008 #define GMAC_HASH_TAB(x) (0x10 + (x) * 4) #define GMAC_VLAN_TAG 0x00000050 @@ -188,6 +189,11 @@ enum power_event { #define GMAC_CONFIG_TE BIT(1) #define GMAC_CONFIG_RE BIT(0) +/* MAC extended config */ +#define GMAC_CONFIG_HDSMS GENMASK(22, 20) +#define GMAC_CONFIG_HDSMS_SHIFT 20 +#define GMAC_CONFIG_HDSMS_256 (0x2 << GMAC_CONFIG_HDSMS_SHIFT) + /* MAC HW features0 bitmap */ #define GMAC_HW_FEAT_SAVLANINS BIT(27) #define GMAC_HW_FEAT_ADDMAC BIT(18) @@ -211,6 +217,7 @@ enum power_event { #define GMAC_HW_HASH_TB_SZ GENMASK(25, 24) #define GMAC_HW_FEAT_AVSEL BIT(20) #define GMAC_HW_TSOEN BIT(18) +#define GMAC_HW_FEAT_SPHEN BIT(17) #define GMAC_HW_ADDR64 GENMASK(15, 14) #define GMAC_HW_TXFIFOSIZE GENMASK(10, 6) #define GMAC_HW_RXFIFOSIZE GENMASK(4, 0) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index bec929daf703..40ca00e596dd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -432,7 +432,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw, * bits used depends on the hardware configuration * selected at core configuration time. */ - int bit_nr = bitrev32(~crc32_le(~0, ha->addr, + u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)) >> (32 - mcbitslog2); /* The most significant bit determines the register to * use (H/L) while the other 5 bits determine the bit @@ -733,7 +733,7 @@ static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable) } static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash, - u16 perfect_match, bool is_double) + __le16 perfect_match, bool is_double) { void __iomem *ioaddr = hw->pcsr; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 707ab5eba8da..3e14da69f378 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -83,9 +83,10 @@ static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x, if (unlikely(rdes3 & RDES3_OWN)) return dma_own; - /* Verify rx error by looking at the last segment. */ - if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR))) + if (unlikely(rdes3 & RDES3_CONTEXT_DESCRIPTOR)) return discard_frame; + if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR))) + return rx_not_ls; if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) { if (unlikely(rdes3 & RDES3_GIANT_PACKET)) @@ -188,7 +189,7 @@ static void dwmac4_set_tx_owner(struct dma_desc *p) static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic) { - p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR); + p->des3 |= cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR); if (!disable_rx_ic) p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN); @@ -492,6 +493,18 @@ static void dwmac4_set_vlan(struct dma_desc *p, u32 type) p->des2 |= cpu_to_le32(type & TDES2_VLAN_TAG_MASK); } +static int dwmac4_get_rx_header_len(struct dma_desc *p, unsigned int *len) +{ + *len = le32_to_cpu(p->des2) & RDES2_HL; + return 0; +} + +static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr) +{ + p->des2 = cpu_to_le32(lower_32_bits(addr)); + p->des3 = cpu_to_le32(upper_32_bits(addr) | RDES3_BUFFER2_VALID_ADDR); +} + const struct stmmac_desc_ops dwmac4_desc_ops = { .tx_status = dwmac4_wrback_get_tx_status, .rx_status = dwmac4_wrback_get_rx_status, @@ -519,6 +532,8 @@ const struct stmmac_desc_ops dwmac4_desc_ops = { .set_sarc = dwmac4_set_sarc, .set_vlan_tag = dwmac4_set_vlan_tag, .set_vlan = dwmac4_set_vlan, + .get_rx_header_len = dwmac4_get_rx_header_len, + .set_sec_addr = dwmac4_set_sec_addr, }; const struct stmmac_mode_ops dwmac4_ring_mode_ops = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h index 0d7b3bbcd5a7..6d92109dc9aa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h @@ -109,6 +109,7 @@ #define RDES2_L4_FILTER_MATCH BIT(28) #define RDES2_L3_L4_FILT_NB_MATCH_MASK GENMASK(27, 26) #define RDES2_L3_L4_FILT_NB_MATCH_SHIFT 26 +#define RDES2_HL GENMASK(9, 0) /* RDES3 (write back format) */ #define RDES3_PACKET_SIZE_MASK GENMASK(14, 0) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index b24c89572745..c15409030710 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -252,19 +252,9 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode, rfa = 0x01; /* Full-1.5K */ break; - case 8192: - rfd = 0x06; /* Full-4K */ - rfa = 0x0a; /* Full-6K */ - break; - - case 16384: - rfd = 0x06; /* Full-4K */ - rfa = 0x12; /* Full-10K */ - break; - default: - rfd = 0x06; /* Full-4K */ - rfa = 0x1e; /* Full-16K */ + rfd = 0x07; /* Full-4.5K */ + rfa = 0x04; /* Full-3K */ break; } @@ -368,6 +358,7 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr, dma_cap->hash_tb_sz = (hw_cap & GMAC_HW_HASH_TB_SZ) >> 24; dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20; dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18; + dma_cap->sphen = (hw_cap & GMAC_HW_FEAT_SPHEN) >> 17; dma_cap->addr64 = (hw_cap & GMAC_HW_ADDR64) >> 14; switch (dma_cap->addr64) { @@ -460,6 +451,22 @@ static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan)); } +static void dwmac4_enable_sph(void __iomem *ioaddr, bool en, u32 chan) +{ + u32 value = readl(ioaddr + GMAC_EXT_CONFIG); + + value &= ~GMAC_CONFIG_HDSMS; + value |= GMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */ + writel(value, ioaddr + GMAC_EXT_CONFIG); + + value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); + if (en) + value |= DMA_CONTROL_SPH; + else + value &= ~DMA_CONTROL_SPH; + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); +} + const struct stmmac_dma_ops dwmac4_dma_ops = { .reset = dwmac4_dma_reset, .init = dwmac4_dma_init, @@ -486,6 +493,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = { .enable_tso = dwmac4_enable_tso, .qmode = dwmac4_qmode, .set_bfsize = dwmac4_set_bfsize, + .enable_sph = dwmac4_enable_sph, }; const struct stmmac_dma_ops dwmac410_dma_ops = { @@ -514,4 +522,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = { .enable_tso = dwmac4_enable_tso, .qmode = dwmac4_qmode, .set_bfsize = dwmac4_set_bfsize, + .enable_sph = dwmac4_enable_sph, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h index 5299fa1001a3..589931795847 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h @@ -110,6 +110,7 @@ #define DMA_CHAN_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x60) /* DMA Control X */ +#define DMA_CONTROL_SPH BIT(24) #define DMA_CONTROL_MSS_MASK GENMASK(13, 0) /* DMA Tx Channel X Control register defines */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h index 775db776b3cc..23fecf68f781 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ // Copyright (c) 2017 Synopsys, Inc. and/or its affiliates. // stmmac Support for 5.xx Ethernet QoS cores diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 99037386080a..3b6e559aa0b9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ /* * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. * stmmac XGMAC definitions. @@ -360,7 +360,7 @@ #define XGMAC_TBUE BIT(2) #define XGMAC_TIE BIT(0) #define XGMAC_DMA_INT_DEFAULT_EN (XGMAC_NIE | XGMAC_AIE | XGMAC_RBUE | \ - XGMAC_RIE | XGMAC_TBUE | XGMAC_TIE) + XGMAC_RIE | XGMAC_TIE) #define XGMAC_DMA_CH_Rx_WATCHDOG(x) (0x0000313c + (0x80 * (x))) #define XGMAC_RWT GENMASK(7, 0) #define XGMAC_DMA_CH_STATUS(x) (0x00003160 + (0x80 * (x))) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index 5cda360d5d07..082f5ee9e525 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -224,6 +224,7 @@ static void dwxgmac2_config_cbs(struct mac_device_info *hw, writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue)); value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue)); + value &= ~XGMAC_TSA; value |= XGMAC_CC | XGMAC_CBS; writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue)); } @@ -463,7 +464,7 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw, value |= XGMAC_FILTER_HMC; netdev_for_each_mc_addr(ha, dev) { - int nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >> + u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >> (32 - mcbitslog2)); mc_filter[nr >> 5] |= (1 << (nr & 0x1F)); } @@ -555,7 +556,7 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw, } static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash, - u16 perfect_match, bool is_double) + __le16 perfect_match, bool is_double) { void __iomem *ioaddr = hw->pcsr; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c index ae48154f933c..bd5838ce1e8a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c @@ -288,7 +288,8 @@ static int dwxgmac2_get_rx_hash(struct dma_desc *p, u32 *hash, static int dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len) { - *len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL; + if (le32_to_cpu(p->des3) & XGMAC_RDES3_L34T) + *len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL; return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 7cc331996cd8..22a7f0cc1b90 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -183,19 +183,9 @@ static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode, rfa = 0x01; /* Full-1.5K */ break; - case 8192: - rfd = 0x06; /* Full-4K */ - rfa = 0x0a; /* Full-6K */ - break; - - case 16384: - rfd = 0x06; /* Full-4K */ - rfa = 0x12; /* Full-10K */ - break; - default: - rfd = 0x06; /* Full-4K */ - rfa = 0x1e; /* Full-16K */ + rfd = 0x07; /* Full-4.5K */ + rfa = 0x04; /* Full-3K */ break; } @@ -372,7 +362,7 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr, dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13; dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12; dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11; - dma_cap->av &= !(hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10; + dma_cap->av &= !((hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10); dma_cap->arpoffsel = (hw_cap & XGMAC_HWFEAT_ARPOFFSEL) >> 9; dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8; dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7; @@ -473,6 +463,7 @@ static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan) static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode) { u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); + u32 flow = readl(ioaddr + XGMAC_RX_FLOW_CTRL); value &= ~XGMAC_TXQEN; if (qmode != MTL_QUEUE_AVB) { @@ -480,6 +471,7 @@ static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode) writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel)); } else { value |= 0x1 << XGMAC_TXQEN_SHIFT; + writel(flow & (~XGMAC_RFE), ioaddr + XGMAC_RX_FLOW_CTRL); } writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 1303d1e9a18f..aa5b917398fe 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ // Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. // stmmac HW Interface Callbacks @@ -357,7 +357,7 @@ struct stmmac_ops { struct stmmac_rss *cfg, u32 num_rxq); /* VLAN */ void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash, - u16 perfect_match, bool is_double); + __le16 perfect_match, bool is_double); void (*enable_vlan)(struct mac_device_info *hw, u32 type); /* TX Timestamp */ int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts); diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index a223584f5f9a..252cf48c5816 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -176,6 +176,7 @@ #define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c #define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230 #define MMC_XGMAC_RX_FPE_FRAG 0x234 +#define MMC_XGMAC_RX_IPC_INTR_MASK 0x25c static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode) { @@ -333,8 +334,9 @@ static void dwxgmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode) static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr) { - writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK); - writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK); + writel(0x0, mmcaddr + MMC_RX_INTR_MASK); + writel(0x0, mmcaddr + MMC_TX_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK); } static void dwxgmac_read_mmc_reg(void __iomem *addr, u32 reg, u32 *dest) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 654a2b7595b8..8cc4cd0cc515 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -36,6 +36,7 @@ #endif /* CONFIG_DEBUG_FS */ #include <linux/net_tstamp.h> #include <linux/phylink.h> +#include <linux/udp.h> #include <net/pkt_cls.h> #include "stmmac_ptp.h" #include "stmmac.h" @@ -1502,10 +1503,8 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv) rx_q->dma_erx, rx_q->dma_rx_phy); kfree(rx_q->buf_pool); - if (rx_q->page_pool) { - page_pool_request_shutdown(rx_q->page_pool); + if (rx_q->page_pool) page_pool_destroy(rx_q->page_pool); - } } } @@ -2604,9 +2603,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; if (priv->use_riwt) { - ret = stmmac_rx_watchdog(priv, priv->ioaddr, MIN_DMA_RIWT, rx_cnt); - if (!ret) - priv->rx_riwt = MIN_DMA_RIWT; + if (!priv->rx_riwt) + priv->rx_riwt = DEF_DMA_RIWT; + + ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt); } if (priv->hw->pcs) @@ -2914,19 +2914,26 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); int nfrags = skb_shinfo(skb)->nr_frags; u32 queue = skb_get_queue_mapping(skb); + unsigned int first_entry, tx_packets; + int tmp_pay_len = 0, first_tx; struct stmmac_tx_queue *tx_q; - unsigned int first_entry; - int tmp_pay_len = 0; + u8 proto_hdr_len, hdr; + bool has_vlan, set_ic; u32 pay_len, mss; - u8 proto_hdr_len; dma_addr_t des; - bool has_vlan; int i; tx_q = &priv->tx_queue[queue]; + first_tx = tx_q->cur_tx; /* Compute header lengths */ - proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr); + hdr = sizeof(struct udphdr); + } else { + proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + hdr = tcp_hdrlen(skb); + } /* Desc availability based on threshold should be enough safe */ if (unlikely(stmmac_tx_avail(priv, queue) < @@ -2956,8 +2963,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) } if (netif_msg_tx_queued(priv)) { - pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n", - __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss); + pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n", + __func__, hdr, proto_hdr_len, pay_len, mss); pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, skb->data_len); } @@ -2996,6 +3003,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) stmmac_set_desc_addr(priv, first, des); tmp_pay_len = pay_len; des += proto_hdr_len; + pay_len = 0; } stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); @@ -3023,6 +3031,30 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) /* Only the last descriptor gets to point to the skb. */ tx_q->tx_skbuff[tx_q->cur_tx] = skb; + /* Manage tx mitigation */ + tx_packets = (tx_q->cur_tx + 1) - first_tx; + tx_q->tx_count_frames += tx_packets; + + if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) + set_ic = true; + else if (!priv->tx_coal_frames) + set_ic = false; + else if (tx_packets > priv->tx_coal_frames) + set_ic = true; + else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets) + set_ic = true; + else + set_ic = false; + + if (set_ic) { + desc = &tx_q->dma_tx[tx_q->cur_tx]; + tx_q->tx_count_frames = 0; + stmmac_set_tx_ic(priv, desc); + priv->xstats.tx_set_ic_bit++; + } else { + stmmac_tx_timer_arm(priv, queue); + } + /* We've used all descriptors we need for this skb, however, * advance cur_tx so that it references a fresh descriptor. * ndo_start_xmit will fill this descriptor the next time it's @@ -3040,19 +3072,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) priv->xstats.tx_tso_frames++; priv->xstats.tx_tso_nfrags += nfrags; - /* Manage tx mitigation */ - tx_q->tx_count_frames += nfrags + 1; - if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) && - !(priv->synopsys_id >= DWMAC_CORE_4_00 && - (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && - priv->hwts_tx_en)) { - stmmac_tx_timer_arm(priv, queue); - } else { - tx_q->tx_count_frames = 0; - stmmac_set_tx_ic(priv, desc); - priv->xstats.tx_set_ic_bit++; - } - if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); @@ -3070,7 +3089,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) proto_hdr_len, pay_len, 1, tx_q->tx_skbuff_dma[first_entry].last_segment, - tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len)); + hdr / 4, (skb->len - proto_hdr_len)); /* If context desc is used to change MSS */ if (mss_desc) { @@ -3124,27 +3143,30 @@ dma_map_err: */ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) { + unsigned int first_entry, tx_packets, enh_desc; struct stmmac_priv *priv = netdev_priv(dev); unsigned int nopaged_len = skb_headlen(skb); int i, csum_insertion = 0, is_jumbo = 0; u32 queue = skb_get_queue_mapping(skb); int nfrags = skb_shinfo(skb)->nr_frags; + int gso = skb_shinfo(skb)->gso_type; struct dma_desc *desc, *first; struct stmmac_tx_queue *tx_q; - unsigned int first_entry; - unsigned int enh_desc; + bool has_vlan, set_ic; + int entry, first_tx; dma_addr_t des; - bool has_vlan; - int entry; tx_q = &priv->tx_queue[queue]; + first_tx = tx_q->cur_tx; if (priv->tx_path_in_lpi_mode) stmmac_disable_eee_mode(priv); /* Manage oversized TCP frames for GMAC4 device */ if (skb_is_gso(skb) && priv->tso) { - if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) + if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) + return stmmac_tso_xmit(skb, dev); + if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) return stmmac_tso_xmit(skb, dev); } @@ -3224,6 +3246,38 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) /* Only the last descriptor gets to point to the skb. */ tx_q->tx_skbuff[entry] = skb; + /* According to the coalesce parameter the IC bit for the latest + * segment is reset and the timer re-started to clean the tx status. + * This approach takes care about the fragments: desc is the first + * element in case of no SG. + */ + tx_packets = (entry + 1) - first_tx; + tx_q->tx_count_frames += tx_packets; + + if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) + set_ic = true; + else if (!priv->tx_coal_frames) + set_ic = false; + else if (tx_packets > priv->tx_coal_frames) + set_ic = true; + else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets) + set_ic = true; + else + set_ic = false; + + if (set_ic) { + if (likely(priv->extend_desc)) + desc = &tx_q->dma_etx[entry].basic; + else + desc = &tx_q->dma_tx[entry]; + + tx_q->tx_count_frames = 0; + stmmac_set_tx_ic(priv, desc); + priv->xstats.tx_set_ic_bit++; + } else { + stmmac_tx_timer_arm(priv, queue); + } + /* We've used all descriptors we need for this skb, however, * advance cur_tx so that it references a fresh descriptor. * ndo_start_xmit will fill this descriptor the next time it's @@ -3259,23 +3313,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) dev->stats.tx_bytes += skb->len; - /* According to the coalesce parameter the IC bit for the latest - * segment is reset and the timer re-started to clean the tx status. - * This approach takes care about the fragments: desc is the first - * element in case of no SG. - */ - tx_q->tx_count_frames += nfrags + 1; - if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) && - !(priv->synopsys_id >= DWMAC_CORE_4_00 && - (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && - priv->hwts_tx_en)) { - stmmac_tx_timer_arm(priv, queue); - } else { - tx_q->tx_count_frames = 0; - stmmac_set_tx_ic(priv, desc); - priv->xstats.tx_set_ic_bit++; - } - if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); @@ -3425,7 +3462,11 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) rx_q->rx_count_frames += priv->rx_coal_frames; if (rx_q->rx_count_frames > priv->rx_coal_frames) rx_q->rx_count_frames = 0; - use_rx_wd = priv->use_riwt && rx_q->rx_count_frames; + + use_rx_wd = !priv->rx_coal_frames; + use_rx_wd |= rx_q->rx_count_frames > 0; + if (!priv->use_riwt) + use_rx_wd = false; dma_wmb(); stmmac_set_rx_owner(priv, p, use_rx_wd); @@ -3438,6 +3479,55 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); } +static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, + struct dma_desc *p, + int status, unsigned int len) +{ + int ret, coe = priv->hw->rx_csum; + unsigned int plen = 0, hlen = 0; + + /* Not first descriptor, buffer is always zero */ + if (priv->sph && len) + return 0; + + /* First descriptor, get split header length */ + ret = stmmac_get_rx_header_len(priv, p, &hlen); + if (priv->sph && hlen) { + priv->xstats.rx_split_hdr_pkt_n++; + return hlen; + } + + /* First descriptor, not last descriptor and not split header */ + if (status & rx_not_ls) + return priv->dma_buf_sz; + + plen = stmmac_get_rx_frame_len(priv, p, coe); + + /* First descriptor and last descriptor and not split header */ + return min_t(unsigned int, priv->dma_buf_sz, plen); +} + +static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, + struct dma_desc *p, + int status, unsigned int len) +{ + int coe = priv->hw->rx_csum; + unsigned int plen = 0; + + /* Not split header, buffer is not available */ + if (!priv->sph) + return 0; + + /* Not last descriptor */ + if (status & rx_not_ls) + return priv->dma_buf_sz; + + plen = stmmac_get_rx_frame_len(priv, p, coe); + + /* Last descriptor */ + return plen - len; +} + /** * stmmac_rx - manage the receive process * @priv: driver private structure @@ -3467,11 +3557,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); } while (count < limit) { - unsigned int hlen = 0, prev_len = 0; + unsigned int buf1_len = 0, buf2_len = 0; enum pkt_hash_types hash_type; struct stmmac_rx_buffer *buf; struct dma_desc *np, *p; - unsigned int sec_len; int entry; u32 hash; @@ -3490,7 +3579,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) break; read_again: - sec_len = 0; + buf1_len = 0; + buf2_len = 0; entry = next_entry; buf = &rx_q->buf_pool[entry]; @@ -3506,8 +3596,6 @@ read_again: if (unlikely(status & dma_own)) break; - count++; - rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE); next_entry = rx_q->cur_rx; @@ -3517,7 +3605,6 @@ read_again: np = rx_q->dma_rx + next_entry; prefetch(np); - prefetch(page_address(buf->page)); if (priv->extend_desc) stmmac_rx_extended_status(priv, &priv->dev->stats, @@ -3534,67 +3621,61 @@ read_again: goto read_again; if (unlikely(error)) { dev_kfree_skb(skb); + skb = NULL; + count++; continue; } /* Buffer is good. Go on. */ - if (likely(status & rx_not_ls)) { - len += priv->dma_buf_sz; - } else { - prev_len = len; - len = stmmac_get_rx_frame_len(priv, p, coe); - - /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 - * Type frames (LLC/LLC-SNAP) - * - * llc_snap is never checked in GMAC >= 4, so this ACS - * feature is always disabled and packets need to be - * stripped manually. - */ - if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) || - unlikely(status != llc_snap)) - len -= ETH_FCS_LEN; + prefetch(page_address(buf->page)); + if (buf->sec_page) + prefetch(page_address(buf->sec_page)); + + buf1_len = stmmac_rx_buf1_len(priv, p, status, len); + len += buf1_len; + buf2_len = stmmac_rx_buf2_len(priv, p, status, len); + len += buf2_len; + + /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 + * Type frames (LLC/LLC-SNAP) + * + * llc_snap is never checked in GMAC >= 4, so this ACS + * feature is always disabled and packets need to be + * stripped manually. + */ + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) || + unlikely(status != llc_snap)) { + if (buf2_len) + buf2_len -= ETH_FCS_LEN; + else + buf1_len -= ETH_FCS_LEN; + + len -= ETH_FCS_LEN; } if (!skb) { - int ret = stmmac_get_rx_header_len(priv, p, &hlen); - - if (priv->sph && !ret && (hlen > 0)) { - sec_len = len; - if (!(status & rx_not_ls)) - sec_len = sec_len - hlen; - len = hlen; - - prefetch(page_address(buf->sec_page)); - priv->xstats.rx_split_hdr_pkt_n++; - } - - skb = napi_alloc_skb(&ch->rx_napi, len); + skb = napi_alloc_skb(&ch->rx_napi, buf1_len); if (!skb) { priv->dev->stats.rx_dropped++; - continue; + count++; + goto drain_data; } - dma_sync_single_for_cpu(priv->device, buf->addr, len, - DMA_FROM_DEVICE); + dma_sync_single_for_cpu(priv->device, buf->addr, + buf1_len, DMA_FROM_DEVICE); skb_copy_to_linear_data(skb, page_address(buf->page), - len); - skb_put(skb, len); + buf1_len); + skb_put(skb, buf1_len); /* Data payload copied into SKB, page ready for recycle */ page_pool_recycle_direct(rx_q->page_pool, buf->page); buf->page = NULL; - } else { - unsigned int buf_len = len - prev_len; - - if (likely(status & rx_not_ls)) - buf_len = priv->dma_buf_sz; - + } else if (buf1_len) { dma_sync_single_for_cpu(priv->device, buf->addr, - buf_len, DMA_FROM_DEVICE); + buf1_len, DMA_FROM_DEVICE); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - buf->page, 0, buf_len, + buf->page, 0, buf1_len, priv->dma_buf_sz); /* Data payload appended into SKB */ @@ -3602,22 +3683,23 @@ read_again: buf->page = NULL; } - if (sec_len > 0) { + if (buf2_len) { dma_sync_single_for_cpu(priv->device, buf->sec_addr, - sec_len, DMA_FROM_DEVICE); + buf2_len, DMA_FROM_DEVICE); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - buf->sec_page, 0, sec_len, + buf->sec_page, 0, buf2_len, priv->dma_buf_sz); - len += sec_len; - /* Data payload appended into SKB */ page_pool_release_page(rx_q->page_pool, buf->sec_page); buf->sec_page = NULL; } +drain_data: if (likely(status & rx_not_ls)) goto read_again; + if (!skb) + continue; /* Got entire packet into SKB. Finish it. */ @@ -3635,12 +3717,14 @@ read_again: skb_record_rx_queue(skb, queue); napi_gro_receive(&ch->rx_napi, skb); + skb = NULL; priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += len; + count++; } - if (status & rx_not_ls) { + if (status & rx_not_ls || skb) { rx_q->state_saved = true; rx_q->state.skb = skb; rx_q->state.error = error; @@ -3988,11 +4072,13 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { - if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { + int gso = skb_shinfo(skb)->gso_type; + + if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) { /* - * There is no way to determine the number of TSO + * There is no way to determine the number of TSO/USO * capable Queues. Let's use always the Queue 0 - * because if TSO is supported then at least this + * because if TSO/USO is supported then at least this * one will be capable. */ return 0; @@ -4208,6 +4294,7 @@ static u32 stmmac_vid_crc32_le(__le16 vid_le) static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) { u32 crc, hash = 0; + __le16 pmatch = 0; int count = 0; u16 vid = 0; @@ -4222,11 +4309,11 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) if (count > 2) /* VID = 0 always passes filter */ return -EOPNOTSUPP; - vid = cpu_to_le16(vid); + pmatch = cpu_to_le16(vid); hash = 0; } - return stmmac_update_vlan_hash(priv, priv->hw, hash, vid, is_double); + return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); } static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) @@ -4506,6 +4593,8 @@ int stmmac_dvr_probe(struct device *device, if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; + if (priv->plat->has_gmac4) + ndev->hw_features |= NETIF_F_GSO_UDP_L4; priv->tso = true; dev_info(priv->device, "TSO feature enabled\n"); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 40c42637ad75..cfe5d8b73142 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -41,20 +41,32 @@ #define MII_XGMAC_BUSY BIT(22) #define MII_XGMAC_MAX_C22ADDR 3 #define MII_XGMAC_C22P_MASK GENMASK(MII_XGMAC_MAX_C22ADDR, 0) +#define MII_XGMAC_PA_SHIFT 16 +#define MII_XGMAC_DA_SHIFT 21 + +static int stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr, + int phyreg, u32 *hw_addr) +{ + u32 tmp; + + /* Set port as Clause 45 */ + tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P); + tmp &= ~BIT(phyaddr); + writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P); + + *hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0xffff); + *hw_addr |= (phyreg >> MII_DEVADDR_C45_SHIFT) << MII_XGMAC_DA_SHIFT; + return 0; +} static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr, int phyreg, u32 *hw_addr) { - unsigned int mii_data = priv->hw->mii.data; u32 tmp; /* HW does not support C22 addr >= 4 */ if (phyaddr > MII_XGMAC_MAX_C22ADDR) return -ENODEV; - /* Wait until any existing MII operation is complete */ - if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, - !(tmp & MII_XGMAC_BUSY), 100, 10000)) - return -EBUSY; /* Set port as Clause 22 */ tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P); @@ -62,7 +74,7 @@ static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr, tmp |= BIT(phyaddr); writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P); - *hw_addr = (phyaddr << 16) | (phyreg & 0x1f); + *hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0x1f); return 0; } @@ -75,17 +87,28 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) u32 tmp, addr, value = MII_XGMAC_BUSY; int ret; + /* Wait until any existing MII operation is complete */ + if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 10000)) + return -EBUSY; + if (phyreg & MII_ADDR_C45) { - return -EOPNOTSUPP; + phyreg &= ~MII_ADDR_C45; + + ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr); + if (ret) + return ret; } else { ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr); if (ret) return ret; + + value |= MII_XGMAC_SADDR; } value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) & priv->hw->mii.clk_csr_mask; - value |= MII_XGMAC_SADDR | MII_XGMAC_READ; + value |= MII_XGMAC_READ; /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, @@ -115,17 +138,28 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr, u32 addr, tmp, value = MII_XGMAC_BUSY; int ret; + /* Wait until any existing MII operation is complete */ + if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, + !(tmp & MII_XGMAC_BUSY), 100, 10000)) + return -EBUSY; + if (phyreg & MII_ADDR_C45) { - return -EOPNOTSUPP; + phyreg &= ~MII_ADDR_C45; + + ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr); + if (ret) + return ret; } else { ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr); if (ret) return ret; + + value |= MII_XGMAC_SADDR; } value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift) & priv->hw->mii.clk_csr_mask; - value |= phydata | MII_XGMAC_SADDR; + value |= phydata; value |= MII_XGMAC_WRITE; /* Wait until any existing MII operation is complete */ @@ -363,6 +397,10 @@ int stmmac_mdio_register(struct net_device *ndev) goto bus_register_fail; } + /* Looks like we need a dummy read for XGMAC only and C45 PHYs */ + if (priv->plat->has_xgmac) + stmmac_xgmac2_mdio_read(new_bus, 0, MII_ADDR_C45); + if (priv->plat->phy_node || mdio_node) goto bus_register_done; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 170c3a052b14..bedaff0c13bd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -412,9 +412,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) *mac = NULL; } - plat->phy_interface = of_get_phy_mode(np); - if (plat->phy_interface < 0) - return ERR_PTR(plat->phy_interface); + rc = of_get_phy_mode(np, &plat->phy_interface); + if (rc) + return ERR_PTR(rc); plat->interface = stmmac_of_get_mac_mode(np); if (plat->interface < 0) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index df638b18b72c..0989e2bb6ee3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -140,6 +140,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp, switch (rq->type) { case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + cfg = &priv->pps[rq->perout.index]; cfg->start.tv_sec = rq->perout.start.sec; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index 0b5db52149bc..f3d8b9336b8e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@ -6,7 +6,9 @@ * Author: Jose Abreu <joabreu@synopsys.com> */ +#include <linux/bitrev.h> #include <linux/completion.h> +#include <linux/crc32.h> #include <linux/ethtool.h> #include <linux/ip.h> #include <linux/phy.h> @@ -485,12 +487,48 @@ static int stmmac_filter_check(struct stmmac_priv *priv) return -EOPNOTSUPP; } +static bool stmmac_hash_check(struct stmmac_priv *priv, unsigned char *addr) +{ + int mc_offset = 32 - priv->hw->mcast_bits_log2; + struct netdev_hw_addr *ha; + u32 hash, hash_nr; + + /* First compute the hash for desired addr */ + hash = bitrev32(~crc32_le(~0, addr, 6)) >> mc_offset; + hash_nr = hash >> 5; + hash = 1 << (hash & 0x1f); + + /* Now, check if it collides with any existing one */ + netdev_for_each_mc_addr(ha, priv->dev) { + u32 nr = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)) >> mc_offset; + if (((nr >> 5) == hash_nr) && ((1 << (nr & 0x1f)) == hash)) + return false; + } + + /* No collisions, address is good to go */ + return true; +} + +static bool stmmac_perfect_check(struct stmmac_priv *priv, unsigned char *addr) +{ + struct netdev_hw_addr *ha; + + /* Check if it collides with any existing one */ + netdev_for_each_uc_addr(ha, priv->dev) { + if (!memcmp(ha->addr, addr, ETH_ALEN)) + return false; + } + + /* No collisions, address is good to go */ + return true; +} + static int stmmac_test_hfilt(struct stmmac_priv *priv) { - unsigned char gd_addr[ETH_ALEN] = {0x01, 0xee, 0xdd, 0xcc, 0xbb, 0xaa}; - unsigned char bd_addr[ETH_ALEN] = {0x01, 0x01, 0x02, 0x03, 0x04, 0x05}; + unsigned char gd_addr[ETH_ALEN] = {0xf1, 0xee, 0xdd, 0xcc, 0xbb, 0xaa}; + unsigned char bd_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff}; struct stmmac_packet_attrs attr = { }; - int ret; + int ret, tries = 256; ret = stmmac_filter_check(priv); if (ret) @@ -499,6 +537,16 @@ static int stmmac_test_hfilt(struct stmmac_priv *priv) if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) return -EOPNOTSUPP; + while (--tries) { + /* We only need to check the bd_addr for collisions */ + bd_addr[ETH_ALEN - 1] = tries; + if (stmmac_hash_check(priv, bd_addr)) + break; + } + + if (!tries) + return -EOPNOTSUPP; + ret = dev_mc_add(priv->dev, gd_addr); if (ret) return ret; @@ -523,13 +571,25 @@ cleanup: static int stmmac_test_pfilt(struct stmmac_priv *priv) { - unsigned char gd_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77}; - unsigned char bd_addr[ETH_ALEN] = {0x08, 0x00, 0x22, 0x33, 0x44, 0x55}; + unsigned char gd_addr[ETH_ALEN] = {0xf0, 0x01, 0x44, 0x55, 0x66, 0x77}; + unsigned char bd_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff}; struct stmmac_packet_attrs attr = { }; - int ret; + int ret, tries = 256; if (stmmac_filter_check(priv)) return -EOPNOTSUPP; + if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries) + return -EOPNOTSUPP; + + while (--tries) { + /* We only need to check the bd_addr for collisions */ + bd_addr[ETH_ALEN - 1] = tries; + if (stmmac_perfect_check(priv, bd_addr)) + break; + } + + if (!tries) + return -EOPNOTSUPP; ret = dev_uc_add(priv->dev, gd_addr); if (ret) @@ -553,39 +613,31 @@ cleanup: return ret; } -static int stmmac_dummy_sync(struct net_device *netdev, const u8 *addr) -{ - return 0; -} - -static void stmmac_test_set_rx_mode(struct net_device *netdev) -{ - /* As we are in test mode of ethtool we already own the rtnl lock - * so no address will change from user. We can just call the - * ndo_set_rx_mode() callback directly */ - if (netdev->netdev_ops->ndo_set_rx_mode) - netdev->netdev_ops->ndo_set_rx_mode(netdev); -} - static int stmmac_test_mcfilt(struct stmmac_priv *priv) { - unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77}; - unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77}; + unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff}; + unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff}; struct stmmac_packet_attrs attr = { }; - int ret; + int ret, tries = 256; if (stmmac_filter_check(priv)) return -EOPNOTSUPP; - if (!priv->hw->multicast_filter_bins) + if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries) return -EOPNOTSUPP; - /* Remove all MC addresses */ - __dev_mc_unsync(priv->dev, NULL); - stmmac_test_set_rx_mode(priv->dev); + while (--tries) { + /* We only need to check the mc_addr for collisions */ + mc_addr[ETH_ALEN - 1] = tries; + if (stmmac_hash_check(priv, mc_addr)) + break; + } + + if (!tries) + return -EOPNOTSUPP; ret = dev_uc_add(priv->dev, uc_addr); if (ret) - goto cleanup; + return ret; attr.dst = uc_addr; @@ -602,30 +654,34 @@ static int stmmac_test_mcfilt(struct stmmac_priv *priv) cleanup: dev_uc_del(priv->dev, uc_addr); - __dev_mc_sync(priv->dev, stmmac_dummy_sync, NULL); - stmmac_test_set_rx_mode(priv->dev); return ret; } static int stmmac_test_ucfilt(struct stmmac_priv *priv) { - unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77}; - unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77}; + unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff}; + unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff}; struct stmmac_packet_attrs attr = { }; - int ret; + int ret, tries = 256; if (stmmac_filter_check(priv)) return -EOPNOTSUPP; - if (!priv->hw->multicast_filter_bins) + if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) return -EOPNOTSUPP; - /* Remove all UC addresses */ - __dev_uc_unsync(priv->dev, NULL); - stmmac_test_set_rx_mode(priv->dev); + while (--tries) { + /* We only need to check the uc_addr for collisions */ + uc_addr[ETH_ALEN - 1] = tries; + if (stmmac_perfect_check(priv, uc_addr)) + break; + } + + if (!tries) + return -EOPNOTSUPP; ret = dev_mc_add(priv->dev, mc_addr); if (ret) - goto cleanup; + return ret; attr.dst = mc_addr; @@ -642,8 +698,6 @@ static int stmmac_test_ucfilt(struct stmmac_priv *priv) cleanup: dev_mc_del(priv->dev, mc_addr); - __dev_uc_sync(priv->dev, stmmac_dummy_sync, NULL); - stmmac_test_set_rx_mode(priv->dev); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index f9a9a9d82233..7d972e0fd2b0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -321,8 +321,6 @@ static int tc_setup_cbs(struct stmmac_priv *priv, return -EINVAL; if (!priv->dma_cap.av) return -EOPNOTSUPP; - if (priv->speed != SPEED_100 && priv->speed != SPEED_1000) - return -EOPNOTSUPP; mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) { diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 834afca3a019..9170572346b5 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -22,6 +22,7 @@ config TI_DAVINCI_EMAC depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) || COMPILE_TEST select TI_DAVINCI_MDIO select PHYLIB + select GENERIC_ALLOCATOR ---help--- This driver supports TI's DaVinci Ethernet . @@ -58,9 +59,24 @@ config TI_CPSW To compile this driver as a module, choose M here: the module will be called cpsw. +config TI_CPSW_SWITCHDEV + tristate "TI CPSW Switch Support with switchdev" + depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST + select NET_SWITCHDEV + select TI_DAVINCI_MDIO + select MFD_SYSCON + select REGMAP + select NET_DEVLINK + imply PHY_TI_GMII_SEL + help + This driver supports TI's CPSW Ethernet Switch. + + To compile this driver as a module, choose M here: the module + will be called cpsw_new. + config TI_CPTS bool "TI Common Platform Time Sync (CPTS) Support" - depends on TI_CPSW || TI_KEYSTONE_NETCP || COMPILE_TEST + depends on TI_CPSW || TI_KEYSTONE_NETCP || TI_CPSW_SWITCHDEV || COMPILE_TEST depends on COMMON_CLK depends on POSIX_TIMERS ---help--- @@ -72,7 +88,7 @@ config TI_CPTS config TI_CPTS_MOD tristate depends on TI_CPTS - default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y + default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y || TI_CPSW_SWITCHDEV=y select NET_PTP_CLASSIFY imply PTP_1588_CLOCK default m diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile index ed12e1e5df2f..d34df8e5cf94 100644 --- a/drivers/net/ethernet/ti/Makefile +++ b/drivers/net/ethernet/ti/Makefile @@ -15,6 +15,8 @@ obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o obj-$(CONFIG_TI_CPTS_MOD) += cpts.o obj-$(CONFIG_TI_CPSW) += ti_cpsw.o ti_cpsw-y := cpsw.o davinci_cpdma.o cpsw_ale.o cpsw_priv.o cpsw_sl.o cpsw_ethtool.o +obj-$(CONFIG_TI_CPSW_SWITCHDEV) += ti_cpsw_new.o +ti_cpsw_new-y := cpsw_switchdev.o cpsw_new.o davinci_cpdma.o cpsw_ale.o cpsw_sl.o cpsw_priv.o cpsw_ethtool.o obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o keystone_netcp-y := netcp_core.o cpsw_ale.o diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index f298d714efd6..6ae4a72e6f43 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -34,7 +34,6 @@ #include <net/page_pool.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> -#include <linux/filter.h> #include <linux/pinctrl/consumer.h> #include <net/pkt_cls.h> @@ -64,10 +63,6 @@ static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT; module_param(descs_pool_size, int, 0444); MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool"); -/* The buf includes headroom compatible with both skb and xdpf */ -#define CPSW_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN) -#define CPSW_HEADROOM ALIGN(CPSW_HEADROOM_NA, sizeof(long)) - #define for_each_slave(priv, func, arg...) \ do { \ struct cpsw_slave *slave; \ @@ -82,10 +77,16 @@ MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool"); (func)(slave++, ##arg); \ } while (0) -#define CPSW_XMETA_OFFSET ALIGN(sizeof(struct xdp_frame), sizeof(long)) +static int cpsw_slave_index_priv(struct cpsw_common *cpsw, + struct cpsw_priv *priv) +{ + return cpsw->data.dual_emac ? priv->emac_port : cpsw->data.active_slave; +} -#define CPSW_XDP_CONSUMED 1 -#define CPSW_XDP_PASS 0 +static int cpsw_get_slave_port(u32 slave_num) +{ + return slave_num + 1; +} static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid); @@ -332,218 +333,6 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) cpsw_del_mc_addr); } -void cpsw_intr_enable(struct cpsw_common *cpsw) -{ - writel_relaxed(0xFF, &cpsw->wr_regs->tx_en); - writel_relaxed(0xFF, &cpsw->wr_regs->rx_en); - - cpdma_ctlr_int_ctrl(cpsw->dma, true); - return; -} - -void cpsw_intr_disable(struct cpsw_common *cpsw) -{ - writel_relaxed(0, &cpsw->wr_regs->tx_en); - writel_relaxed(0, &cpsw->wr_regs->rx_en); - - cpdma_ctlr_int_ctrl(cpsw->dma, false); - return; -} - -static int cpsw_is_xdpf_handle(void *handle) -{ - return (unsigned long)handle & BIT(0); -} - -static void *cpsw_xdpf_to_handle(struct xdp_frame *xdpf) -{ - return (void *)((unsigned long)xdpf | BIT(0)); -} - -static struct xdp_frame *cpsw_handle_to_xdpf(void *handle) -{ - return (struct xdp_frame *)((unsigned long)handle & ~BIT(0)); -} - -struct __aligned(sizeof(long)) cpsw_meta_xdp { - struct net_device *ndev; - int ch; -}; - -void cpsw_tx_handler(void *token, int len, int status) -{ - struct cpsw_meta_xdp *xmeta; - struct xdp_frame *xdpf; - struct net_device *ndev; - struct netdev_queue *txq; - struct sk_buff *skb; - int ch; - - if (cpsw_is_xdpf_handle(token)) { - xdpf = cpsw_handle_to_xdpf(token); - xmeta = (void *)xdpf + CPSW_XMETA_OFFSET; - ndev = xmeta->ndev; - ch = xmeta->ch; - xdp_return_frame(xdpf); - } else { - skb = token; - ndev = skb->dev; - ch = skb_get_queue_mapping(skb); - cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb); - dev_kfree_skb_any(skb); - } - - /* Check whether the queue is stopped due to stalled tx dma, if the - * queue is stopped then start the queue as we have free desc for tx - */ - txq = netdev_get_tx_queue(ndev, ch); - if (unlikely(netif_tx_queue_stopped(txq))) - netif_tx_wake_queue(txq); - - ndev->stats.tx_packets++; - ndev->stats.tx_bytes += len; -} - -static void cpsw_rx_vlan_encap(struct sk_buff *skb) -{ - struct cpsw_priv *priv = netdev_priv(skb->dev); - struct cpsw_common *cpsw = priv->cpsw; - u32 rx_vlan_encap_hdr = *((u32 *)skb->data); - u16 vtag, vid, prio, pkt_type; - - /* Remove VLAN header encapsulation word */ - skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE); - - pkt_type = (rx_vlan_encap_hdr >> - CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) & - CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK; - /* Ignore unknown & Priority-tagged packets*/ - if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV || - pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG) - return; - - vid = (rx_vlan_encap_hdr >> - CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) & - VLAN_VID_MASK; - /* Ignore vid 0 and pass packet as is */ - if (!vid) - return; - /* Ignore default vlans in dual mac mode */ - if (cpsw->data.dual_emac && - vid == cpsw->slaves[priv->emac_port].port_vlan) - return; - - prio = (rx_vlan_encap_hdr >> - CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) & - CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK; - - vtag = (prio << VLAN_PRIO_SHIFT) | vid; - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); - - /* strip vlan tag for VLAN-tagged packet */ - if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) { - memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); - skb_pull(skb, VLAN_HLEN); - } -} - -static int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf, - struct page *page) -{ - struct cpsw_common *cpsw = priv->cpsw; - struct cpsw_meta_xdp *xmeta; - struct cpdma_chan *txch; - dma_addr_t dma; - int ret, port; - - xmeta = (void *)xdpf + CPSW_XMETA_OFFSET; - xmeta->ndev = priv->ndev; - xmeta->ch = 0; - txch = cpsw->txv[0].ch; - - port = priv->emac_port + cpsw->data.dual_emac; - if (page) { - dma = page_pool_get_dma_addr(page); - dma += xdpf->headroom + sizeof(struct xdp_frame); - ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf), - dma, xdpf->len, port); - } else { - if (sizeof(*xmeta) > xdpf->headroom) { - xdp_return_frame_rx_napi(xdpf); - return -EINVAL; - } - - ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf), - xdpf->data, xdpf->len, port); - } - - if (ret) { - priv->ndev->stats.tx_dropped++; - xdp_return_frame_rx_napi(xdpf); - } - - return ret; -} - -static int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp, - struct page *page) -{ - struct cpsw_common *cpsw = priv->cpsw; - struct net_device *ndev = priv->ndev; - int ret = CPSW_XDP_CONSUMED; - struct xdp_frame *xdpf; - struct bpf_prog *prog; - u32 act; - - rcu_read_lock(); - - prog = READ_ONCE(priv->xdp_prog); - if (!prog) { - ret = CPSW_XDP_PASS; - goto out; - } - - act = bpf_prog_run_xdp(prog, xdp); - switch (act) { - case XDP_PASS: - ret = CPSW_XDP_PASS; - break; - case XDP_TX: - xdpf = convert_to_xdp_frame(xdp); - if (unlikely(!xdpf)) - goto drop; - - cpsw_xdp_tx_frame(priv, xdpf, page); - break; - case XDP_REDIRECT: - if (xdp_do_redirect(ndev, xdp, prog)) - goto drop; - - /* Have to flush here, per packet, instead of doing it in bulk - * at the end of the napi handler. The RX devices on this - * particular hardware is sharing a common queue, so the - * incoming device might change per packet. - */ - xdp_do_flush_map(); - break; - default: - bpf_warn_invalid_xdp_action(act); - /* fall through */ - case XDP_ABORTED: - trace_xdp_exception(ndev, prog, act); - /* fall through -- handle aborts by dropping packet */ - case XDP_DROP: - goto drop; - } -out: - rcu_read_unlock(); - return ret; -drop: - rcu_read_unlock(); - page_pool_recycle_direct(cpsw->page_pool[ch], page); - return ret; -} - static unsigned int cpsw_rxbuf_total_len(unsigned int len) { len += CPSW_HEADROOM; @@ -552,123 +341,6 @@ static unsigned int cpsw_rxbuf_total_len(unsigned int len) return SKB_DATA_ALIGN(len); } -static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw, - int size) -{ - struct page_pool_params pp_params; - struct page_pool *pool; - - pp_params.order = 0; - pp_params.flags = PP_FLAG_DMA_MAP; - pp_params.pool_size = size; - pp_params.nid = NUMA_NO_NODE; - pp_params.dma_dir = DMA_BIDIRECTIONAL; - pp_params.dev = cpsw->dev; - - pool = page_pool_create(&pp_params); - if (IS_ERR(pool)) - dev_err(cpsw->dev, "cannot create rx page pool\n"); - - return pool; -} - -static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch) -{ - struct cpsw_common *cpsw = priv->cpsw; - struct xdp_rxq_info *rxq; - struct page_pool *pool; - int ret; - - pool = cpsw->page_pool[ch]; - rxq = &priv->xdp_rxq[ch]; - - ret = xdp_rxq_info_reg(rxq, priv->ndev, ch); - if (ret) - return ret; - - ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool); - if (ret) - xdp_rxq_info_unreg(rxq); - - return ret; -} - -static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch) -{ - struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch]; - - if (!xdp_rxq_info_is_reg(rxq)) - return; - - xdp_rxq_info_unreg(rxq); -} - -static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch) -{ - struct page_pool *pool; - int ret = 0, pool_size; - - pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch); - pool = cpsw_create_page_pool(cpsw, pool_size); - if (IS_ERR(pool)) - ret = PTR_ERR(pool); - else - cpsw->page_pool[ch] = pool; - - return ret; -} - -void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw) -{ - struct net_device *ndev; - int i, ch; - - for (ch = 0; ch < cpsw->rx_ch_num; ch++) { - for (i = 0; i < cpsw->data.slaves; i++) { - ndev = cpsw->slaves[i].ndev; - if (!ndev) - continue; - - cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch); - } - - page_pool_destroy(cpsw->page_pool[ch]); - cpsw->page_pool[ch] = NULL; - } -} - -int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw) -{ - struct net_device *ndev; - int i, ch, ret; - - for (ch = 0; ch < cpsw->rx_ch_num; ch++) { - ret = cpsw_create_rx_pool(cpsw, ch); - if (ret) - goto err_cleanup; - - /* using same page pool is allowed as no running rx handlers - * simultaneously for both ndevs - */ - for (i = 0; i < cpsw->data.slaves; i++) { - ndev = cpsw->slaves[i].ndev; - if (!ndev) - continue; - - ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch); - if (ret) - goto err_cleanup; - } - } - - return 0; - -err_cleanup: - cpsw_destroy_xdp_rxqs(cpsw); - - return ret; -} - static void cpsw_rx_handler(void *token, int len, int status) { struct page *new_page, *page = token; @@ -735,7 +407,8 @@ static void cpsw_rx_handler(void *token, int len, int status) xdp.data_hard_start = pa; xdp.rxq = &priv->xdp_rxq[ch]; - ret = cpsw_run_xdp(priv, ch, &xdp, page); + port = priv->emac_port + cpsw->data.dual_emac; + ret = cpsw_run_xdp(priv, ch, &xdp, page, port); if (ret != CPSW_XDP_PASS) goto requeue; @@ -785,274 +458,6 @@ requeue: } } -void cpsw_split_res(struct cpsw_common *cpsw) -{ - u32 consumed_rate = 0, bigest_rate = 0; - struct cpsw_vector *txv = cpsw->txv; - int i, ch_weight, rlim_ch_num = 0; - int budget, bigest_rate_ch = 0; - u32 ch_rate, max_rate; - int ch_budget = 0; - - for (i = 0; i < cpsw->tx_ch_num; i++) { - ch_rate = cpdma_chan_get_rate(txv[i].ch); - if (!ch_rate) - continue; - - rlim_ch_num++; - consumed_rate += ch_rate; - } - - if (cpsw->tx_ch_num == rlim_ch_num) { - max_rate = consumed_rate; - } else if (!rlim_ch_num) { - ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num; - bigest_rate = 0; - max_rate = consumed_rate; - } else { - max_rate = cpsw->speed * 1000; - - /* if max_rate is less then expected due to reduced link speed, - * split proportionally according next potential max speed - */ - if (max_rate < consumed_rate) - max_rate *= 10; - - if (max_rate < consumed_rate) - max_rate *= 10; - - ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate; - ch_budget = (CPSW_POLL_WEIGHT - ch_budget) / - (cpsw->tx_ch_num - rlim_ch_num); - bigest_rate = (max_rate - consumed_rate) / - (cpsw->tx_ch_num - rlim_ch_num); - } - - /* split tx weight/budget */ - budget = CPSW_POLL_WEIGHT; - for (i = 0; i < cpsw->tx_ch_num; i++) { - ch_rate = cpdma_chan_get_rate(txv[i].ch); - if (ch_rate) { - txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate; - if (!txv[i].budget) - txv[i].budget++; - if (ch_rate > bigest_rate) { - bigest_rate_ch = i; - bigest_rate = ch_rate; - } - - ch_weight = (ch_rate * 100) / max_rate; - if (!ch_weight) - ch_weight++; - cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight); - } else { - txv[i].budget = ch_budget; - if (!bigest_rate_ch) - bigest_rate_ch = i; - cpdma_chan_set_weight(cpsw->txv[i].ch, 0); - } - - budget -= txv[i].budget; - } - - if (budget) - txv[bigest_rate_ch].budget += budget; - - /* split rx budget */ - budget = CPSW_POLL_WEIGHT; - ch_budget = budget / cpsw->rx_ch_num; - for (i = 0; i < cpsw->rx_ch_num; i++) { - cpsw->rxv[i].budget = ch_budget; - budget -= ch_budget; - } - - if (budget) - cpsw->rxv[0].budget += budget; -} - -static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id) -{ - struct cpsw_common *cpsw = dev_id; - - writel(0, &cpsw->wr_regs->tx_en); - cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX); - - if (cpsw->quirk_irq) { - disable_irq_nosync(cpsw->irqs_table[1]); - cpsw->tx_irq_disabled = true; - } - - napi_schedule(&cpsw->napi_tx); - return IRQ_HANDLED; -} - -static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) -{ - struct cpsw_common *cpsw = dev_id; - - cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX); - writel(0, &cpsw->wr_regs->rx_en); - - if (cpsw->quirk_irq) { - disable_irq_nosync(cpsw->irqs_table[0]); - cpsw->rx_irq_disabled = true; - } - - napi_schedule(&cpsw->napi_rx); - return IRQ_HANDLED; -} - -static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget) -{ - u32 ch_map; - int num_tx, cur_budget, ch; - struct cpsw_common *cpsw = napi_to_cpsw(napi_tx); - struct cpsw_vector *txv; - - /* process every unprocessed channel */ - ch_map = cpdma_ctrl_txchs_state(cpsw->dma); - for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) { - if (!(ch_map & 0x80)) - continue; - - txv = &cpsw->txv[ch]; - if (unlikely(txv->budget > budget - num_tx)) - cur_budget = budget - num_tx; - else - cur_budget = txv->budget; - - num_tx += cpdma_chan_process(txv->ch, cur_budget); - if (num_tx >= budget) - break; - } - - if (num_tx < budget) { - napi_complete(napi_tx); - writel(0xff, &cpsw->wr_regs->tx_en); - } - - return num_tx; -} - -static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget) -{ - struct cpsw_common *cpsw = napi_to_cpsw(napi_tx); - int num_tx; - - num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget); - if (num_tx < budget) { - napi_complete(napi_tx); - writel(0xff, &cpsw->wr_regs->tx_en); - if (cpsw->tx_irq_disabled) { - cpsw->tx_irq_disabled = false; - enable_irq(cpsw->irqs_table[1]); - } - } - - return num_tx; -} - -static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget) -{ - u32 ch_map; - int num_rx, cur_budget, ch; - struct cpsw_common *cpsw = napi_to_cpsw(napi_rx); - struct cpsw_vector *rxv; - - /* process every unprocessed channel */ - ch_map = cpdma_ctrl_rxchs_state(cpsw->dma); - for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) { - if (!(ch_map & 0x01)) - continue; - - rxv = &cpsw->rxv[ch]; - if (unlikely(rxv->budget > budget - num_rx)) - cur_budget = budget - num_rx; - else - cur_budget = rxv->budget; - - num_rx += cpdma_chan_process(rxv->ch, cur_budget); - if (num_rx >= budget) - break; - } - - if (num_rx < budget) { - napi_complete_done(napi_rx, num_rx); - writel(0xff, &cpsw->wr_regs->rx_en); - } - - return num_rx; -} - -static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) -{ - struct cpsw_common *cpsw = napi_to_cpsw(napi_rx); - int num_rx; - - num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget); - if (num_rx < budget) { - napi_complete_done(napi_rx, num_rx); - writel(0xff, &cpsw->wr_regs->rx_en); - if (cpsw->rx_irq_disabled) { - cpsw->rx_irq_disabled = false; - enable_irq(cpsw->irqs_table[0]); - } - } - - return num_rx; -} - -static inline void soft_reset(const char *module, void __iomem *reg) -{ - unsigned long timeout = jiffies + HZ; - - writel_relaxed(1, reg); - do { - cpu_relax(); - } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies)); - - WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module); -} - -static void cpsw_set_slave_mac(struct cpsw_slave *slave, - struct cpsw_priv *priv) -{ - slave_write(slave, mac_hi(priv->mac_addr), SA_HI); - slave_write(slave, mac_lo(priv->mac_addr), SA_LO); -} - -static bool cpsw_shp_is_off(struct cpsw_priv *priv) -{ - struct cpsw_common *cpsw = priv->cpsw; - struct cpsw_slave *slave; - u32 shift, mask, val; - - val = readl_relaxed(&cpsw->regs->ptype); - - slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; - shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num; - mask = 7 << shift; - val = val & mask; - - return !val; -} - -static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on) -{ - struct cpsw_common *cpsw = priv->cpsw; - struct cpsw_slave *slave; - u32 shift, mask, val; - - val = readl_relaxed(&cpsw->regs->ptype); - - slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; - shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num; - mask = (1 << --fifo) << shift; - val = on ? val | mask : val & ~mask; - - writel_relaxed(val, &cpsw->regs->ptype); -} - static void _cpsw_adjust_link(struct cpsw_slave *slave, struct cpsw_priv *priv, bool *link) { @@ -1118,44 +523,6 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, slave->mac_control = mac_control; } -static int cpsw_get_common_speed(struct cpsw_common *cpsw) -{ - int i, speed; - - for (i = 0, speed = 0; i < cpsw->data.slaves; i++) - if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link) - speed += cpsw->slaves[i].phy->speed; - - return speed; -} - -static int cpsw_need_resplit(struct cpsw_common *cpsw) -{ - int i, rlim_ch_num; - int speed, ch_rate; - - /* re-split resources only in case speed was changed */ - speed = cpsw_get_common_speed(cpsw); - if (speed == cpsw->speed || !speed) - return 0; - - cpsw->speed = speed; - - for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) { - ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch); - if (!ch_rate) - break; - - rlim_ch_num++; - } - - /* cases not dependent on speed */ - if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num) - return 0; - - return 1; -} - static void cpsw_adjust_link(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); @@ -1348,51 +715,6 @@ static void cpsw_init_host_port(struct cpsw_priv *priv) } } -int cpsw_fill_rx_channels(struct cpsw_priv *priv) -{ - struct cpsw_common *cpsw = priv->cpsw; - struct cpsw_meta_xdp *xmeta; - struct page_pool *pool; - struct page *page; - int ch_buf_num; - int ch, i, ret; - dma_addr_t dma; - - for (ch = 0; ch < cpsw->rx_ch_num; ch++) { - pool = cpsw->page_pool[ch]; - ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch); - for (i = 0; i < ch_buf_num; i++) { - page = page_pool_dev_alloc_pages(pool); - if (!page) { - cpsw_err(priv, ifup, "allocate rx page err\n"); - return -ENOMEM; - } - - xmeta = page_address(page) + CPSW_XMETA_OFFSET; - xmeta->ndev = priv->ndev; - xmeta->ch = ch; - - dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM; - ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch, - page, dma, - cpsw->rx_packet_max, - 0); - if (ret < 0) { - cpsw_err(priv, ifup, - "cannot submit page to channel %d rx, error %d\n", - ch, ret); - page_pool_recycle_direct(pool, page); - return ret; - } - } - - cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n", - ch, ch_buf_num); - } - - return 0; -} - static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw) { u32 slave_port; @@ -1410,221 +732,6 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw) cpsw_sl_ctl_reset(slave->mac_sl); } -static int cpsw_tc_to_fifo(int tc, int num_tc) -{ - if (tc == num_tc - 1) - return 0; - - return CPSW_FIFO_SHAPERS_NUM - tc; -} - -static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw) -{ - struct cpsw_common *cpsw = priv->cpsw; - u32 val = 0, send_pct, shift; - struct cpsw_slave *slave; - int pct = 0, i; - - if (bw > priv->shp_cfg_speed * 1000) - goto err; - - /* shaping has to stay enabled for highest fifos linearly - * and fifo bw no more then interface can allow - */ - slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; - send_pct = slave_read(slave, SEND_PERCENT); - for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) { - if (!bw) { - if (i >= fifo || !priv->fifo_bw[i]) - continue; - - dev_warn(priv->dev, "Prev FIFO%d is shaped", i); - continue; - } - - if (!priv->fifo_bw[i] && i > fifo) { - dev_err(priv->dev, "Upper FIFO%d is not shaped", i); - return -EINVAL; - } - - shift = (i - 1) * 8; - if (i == fifo) { - send_pct &= ~(CPSW_PCT_MASK << shift); - val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10); - if (!val) - val = 1; - - send_pct |= val << shift; - pct += val; - continue; - } - - if (priv->fifo_bw[i]) - pct += (send_pct >> shift) & CPSW_PCT_MASK; - } - - if (pct >= 100) - goto err; - - slave_write(slave, send_pct, SEND_PERCENT); - priv->fifo_bw[fifo] = bw; - - dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo, - DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100)); - - return 0; -err: - dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration"); - return -EINVAL; -} - -static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw) -{ - struct cpsw_common *cpsw = priv->cpsw; - struct cpsw_slave *slave; - u32 tx_in_ctl_rg, val; - int ret; - - ret = cpsw_set_fifo_bw(priv, fifo, bw); - if (ret) - return ret; - - slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; - tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ? - CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL; - - if (!bw) - cpsw_fifo_shp_on(priv, fifo, bw); - - val = slave_read(slave, tx_in_ctl_rg); - if (cpsw_shp_is_off(priv)) { - /* disable FIFOs rate limited queues */ - val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT); - - /* set type of FIFO queues to normal priority mode */ - val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT); - - /* set type of FIFO queues to be rate limited */ - if (bw) - val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT; - else - priv->shp_cfg_speed = 0; - } - - /* toggle a FIFO rate limited queue */ - if (bw) - val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT); - else - val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT); - slave_write(slave, val, tx_in_ctl_rg); - - /* FIFO transmit shape enable */ - cpsw_fifo_shp_on(priv, fifo, bw); - return 0; -} - -/* Defaults: - * class A - prio 3 - * class B - prio 2 - * shaping for class A should be set first - */ -static int cpsw_set_cbs(struct net_device *ndev, - struct tc_cbs_qopt_offload *qopt) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - struct cpsw_slave *slave; - int prev_speed = 0; - int tc, ret, fifo; - u32 bw = 0; - - tc = netdev_txq_to_tc(priv->ndev, qopt->queue); - - /* enable channels in backward order, as highest FIFOs must be rate - * limited first and for compliance with CPDMA rate limited channels - * that also used in bacward order. FIFO0 cannot be rate limited. - */ - fifo = cpsw_tc_to_fifo(tc, ndev->num_tc); - if (!fifo) { - dev_err(priv->dev, "Last tc%d can't be rate limited", tc); - return -EINVAL; - } - - /* do nothing, it's disabled anyway */ - if (!qopt->enable && !priv->fifo_bw[fifo]) - return 0; - - /* shapers can be set if link speed is known */ - slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; - if (slave->phy && slave->phy->link) { - if (priv->shp_cfg_speed && - priv->shp_cfg_speed != slave->phy->speed) - prev_speed = priv->shp_cfg_speed; - - priv->shp_cfg_speed = slave->phy->speed; - } - - if (!priv->shp_cfg_speed) { - dev_err(priv->dev, "Link speed is not known"); - return -1; - } - - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); - return ret; - } - - bw = qopt->enable ? qopt->idleslope : 0; - ret = cpsw_set_fifo_rlimit(priv, fifo, bw); - if (ret) { - priv->shp_cfg_speed = prev_speed; - prev_speed = 0; - } - - if (bw && prev_speed) - dev_warn(priv->dev, - "Speed was changed, CBS shaper speeds are changed!"); - - pm_runtime_put_sync(cpsw->dev); - return ret; -} - -static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv) -{ - int fifo, bw; - - for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) { - bw = priv->fifo_bw[fifo]; - if (!bw) - continue; - - cpsw_set_fifo_rlimit(priv, fifo, bw); - } -} - -static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv) -{ - struct cpsw_common *cpsw = priv->cpsw; - u32 tx_prio_map = 0; - int i, tc, fifo; - u32 tx_prio_rg; - - if (!priv->mqprio_hw) - return; - - for (i = 0; i < 8; i++) { - tc = netdev_get_prio_tc_map(priv->ndev, i); - fifo = CPSW_FIFO_SHAPERS_NUM - tc; - tx_prio_map |= fifo << (4 * i); - } - - tx_prio_rg = cpsw->version == CPSW_VERSION_1 ? - CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP; - - slave_write(slave, tx_prio_map, tx_prio_rg); -} - static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg) { struct cpsw_priv *priv = arg; @@ -1853,207 +960,6 @@ fail: return NETDEV_TX_BUSY; } -#if IS_ENABLED(CONFIG_TI_CPTS) - -static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) -{ - struct cpsw_common *cpsw = priv->cpsw; - struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave]; - u32 ts_en, seq_id; - - if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) { - slave_write(slave, 0, CPSW1_TS_CTL); - return; - } - - seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588; - ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS; - - if (priv->tx_ts_enabled) - ts_en |= CPSW_V1_TS_TX_EN; - - if (priv->rx_ts_enabled) - ts_en |= CPSW_V1_TS_RX_EN; - - slave_write(slave, ts_en, CPSW1_TS_CTL); - slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE); -} - -static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) -{ - struct cpsw_slave *slave; - struct cpsw_common *cpsw = priv->cpsw; - u32 ctrl, mtype; - - slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; - - ctrl = slave_read(slave, CPSW2_CONTROL); - switch (cpsw->version) { - case CPSW_VERSION_2: - ctrl &= ~CTRL_V2_ALL_TS_MASK; - - if (priv->tx_ts_enabled) - ctrl |= CTRL_V2_TX_TS_BITS; - - if (priv->rx_ts_enabled) - ctrl |= CTRL_V2_RX_TS_BITS; - break; - case CPSW_VERSION_3: - default: - ctrl &= ~CTRL_V3_ALL_TS_MASK; - - if (priv->tx_ts_enabled) - ctrl |= CTRL_V3_TX_TS_BITS; - - if (priv->rx_ts_enabled) - ctrl |= CTRL_V3_RX_TS_BITS; - break; - } - - mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS; - - slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE); - slave_write(slave, ctrl, CPSW2_CONTROL); - writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype); - writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype); -} - -static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) -{ - struct cpsw_priv *priv = netdev_priv(dev); - struct hwtstamp_config cfg; - struct cpsw_common *cpsw = priv->cpsw; - - if (cpsw->version != CPSW_VERSION_1 && - cpsw->version != CPSW_VERSION_2 && - cpsw->version != CPSW_VERSION_3) - return -EOPNOTSUPP; - - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; - - /* reserved for future extensions */ - if (cfg.flags) - return -EINVAL; - - if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) - return -ERANGE; - - switch (cfg.rx_filter) { - case HWTSTAMP_FILTER_NONE: - priv->rx_ts_enabled = 0; - break; - case HWTSTAMP_FILTER_ALL: - case HWTSTAMP_FILTER_NTP_ALL: - return -ERANGE; - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; - break; - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT; - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; - break; - default: - return -ERANGE; - } - - priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON; - - switch (cpsw->version) { - case CPSW_VERSION_1: - cpsw_hwtstamp_v1(priv); - break; - case CPSW_VERSION_2: - case CPSW_VERSION_3: - cpsw_hwtstamp_v2(priv); - break; - default: - WARN_ON(1); - } - - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; -} - -static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) -{ - struct cpsw_common *cpsw = ndev_to_cpsw(dev); - struct cpsw_priv *priv = netdev_priv(dev); - struct hwtstamp_config cfg; - - if (cpsw->version != CPSW_VERSION_1 && - cpsw->version != CPSW_VERSION_2 && - cpsw->version != CPSW_VERSION_3) - return -EOPNOTSUPP; - - cfg.flags = 0; - cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - cfg.rx_filter = priv->rx_ts_enabled; - - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; -} -#else -static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) -{ - return -EOPNOTSUPP; -} - -static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) -{ - return -EOPNOTSUPP; -} -#endif /*CONFIG_TI_CPTS*/ - -static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) -{ - struct cpsw_priv *priv = netdev_priv(dev); - struct cpsw_common *cpsw = priv->cpsw; - int slave_no = cpsw_slave_index(cpsw, priv); - - if (!netif_running(dev)) - return -EINVAL; - - switch (cmd) { - case SIOCSHWTSTAMP: - return cpsw_hwtstamp_set(dev, req); - case SIOCGHWTSTAMP: - return cpsw_hwtstamp_get(dev, req); - } - - if (!cpsw->slaves[slave_no].phy) - return -EOPNOTSUPP; - return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd); -} - -static void cpsw_ndo_tx_timeout(struct net_device *ndev) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - int ch; - - cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n"); - ndev->stats.tx_errors++; - cpsw_intr_disable(cpsw); - for (ch = 0; ch < cpsw->tx_ch_num; ch++) { - cpdma_chan_stop(cpsw->txv[ch].ch); - cpdma_chan_start(cpsw->txv[ch].ch); - } - - cpsw_intr_enable(cpsw); - netif_trans_update(ndev); - netif_tx_wake_all_queues(ndev); -} - static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) { struct cpsw_priv *priv = netdev_priv(ndev); @@ -2215,168 +1121,13 @@ err: return ret; } -static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - struct cpsw_slave *slave; - u32 min_rate; - u32 ch_rate; - int i, ret; - - ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate; - if (ch_rate == rate) - return 0; - - ch_rate = rate * 1000; - min_rate = cpdma_chan_get_min_rate(cpsw->dma); - if ((ch_rate < min_rate && ch_rate)) { - dev_err(priv->dev, "The channel rate cannot be less than %dMbps", - min_rate); - return -EINVAL; - } - - if (rate > cpsw->speed) { - dev_err(priv->dev, "The channel rate cannot be more than 2Gbps"); - return -EINVAL; - } - - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); - return ret; - } - - ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate); - pm_runtime_put(cpsw->dev); - - if (ret) - return ret; - - /* update rates for slaves tx queues */ - for (i = 0; i < cpsw->data.slaves; i++) { - slave = &cpsw->slaves[i]; - if (!slave->ndev) - continue; - - netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate; - } - - cpsw_split_res(cpsw); - return ret; -} - -static int cpsw_set_mqprio(struct net_device *ndev, void *type_data) -{ - struct tc_mqprio_qopt_offload *mqprio = type_data; - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - int fifo, num_tc, count, offset; - struct cpsw_slave *slave; - u32 tx_prio_map = 0; - int i, tc, ret; - - num_tc = mqprio->qopt.num_tc; - if (num_tc > CPSW_TC_NUM) - return -EINVAL; - - if (mqprio->mode != TC_MQPRIO_MODE_DCB) - return -EINVAL; - - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); - return ret; - } - - if (num_tc) { - for (i = 0; i < 8; i++) { - tc = mqprio->qopt.prio_tc_map[i]; - fifo = cpsw_tc_to_fifo(tc, num_tc); - tx_prio_map |= fifo << (4 * i); - } - - netdev_set_num_tc(ndev, num_tc); - for (i = 0; i < num_tc; i++) { - count = mqprio->qopt.count[i]; - offset = mqprio->qopt.offset[i]; - netdev_set_tc_queue(ndev, i, count, offset); - } - } - - if (!mqprio->qopt.hw) { - /* restore default configuration */ - netdev_reset_tc(ndev); - tx_prio_map = TX_PRIORITY_MAPPING; - } - - priv->mqprio_hw = mqprio->qopt.hw; - - offset = cpsw->version == CPSW_VERSION_1 ? - CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP; - - slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; - slave_write(slave, tx_prio_map, offset); - - pm_runtime_put_sync(cpsw->dev); - - return 0; -} - -static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, - void *type_data) -{ - switch (type) { - case TC_SETUP_QDISC_CBS: - return cpsw_set_cbs(ndev, type_data); - - case TC_SETUP_QDISC_MQPRIO: - return cpsw_set_mqprio(ndev, type_data); - - default: - return -EOPNOTSUPP; - } -} - -static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf) -{ - struct bpf_prog *prog = bpf->prog; - - if (!priv->xdpi.prog && !prog) - return 0; - - if (!xdp_attachment_flags_ok(&priv->xdpi, bpf)) - return -EBUSY; - - WRITE_ONCE(priv->xdp_prog, prog); - - xdp_attachment_setup(&priv->xdpi, bpf); - - return 0; -} - -static int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - - switch (bpf->command) { - case XDP_SETUP_PROG: - return cpsw_xdp_prog_setup(priv, bpf); - - case XDP_QUERY_PROG: - return xdp_attachment_query(&priv->xdpi, bpf); - - default: - return -EINVAL; - } -} - static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, u32 flags) { struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; struct xdp_frame *xdpf; - int i, drops = 0; + int i, drops = 0, port; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; @@ -2389,7 +1140,8 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n, continue; } - if (cpsw_xdp_tx_frame(priv, xdpf, NULL)) + port = priv->emac_port + cpsw->data.dual_emac; + if (cpsw_xdp_tx_frame(priv, xdpf, NULL, port)) drops++; } @@ -2619,11 +1371,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, i); goto no_phy_slave; } - slave_data->phy_if = of_get_phy_mode(slave_node); - if (slave_data->phy_if < 0) { + ret = of_get_phy_mode(slave_node, &slave_data->phy_if); + if (ret) { dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", i); - ret = slave_data->phy_if; goto err_node_put; } @@ -2776,6 +1527,8 @@ static int cpsw_probe(struct platform_device *pdev) return -ENOMEM; platform_set_drvdata(pdev, cpsw); + cpsw_slave_index = cpsw_slave_index_priv; + cpsw->dev = dev; mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW); diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 84025dcc78d5..929f3d3354e3 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -5,6 +5,8 @@ * Copyright (C) 2012 Texas Instruments * */ +#include <linux/bitmap.h> +#include <linux/if_vlan.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> @@ -382,6 +384,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask, int flags, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; + int mcast_members; int idx; idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); @@ -390,11 +393,15 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask, cpsw_ale_read(ale, idx, ale_entry); - if (port_mask) - cpsw_ale_set_port_mask(ale_entry, port_mask, + if (port_mask) { + mcast_members = cpsw_ale_get_port_mask(ale_entry, + ale->port_mask_bits); + mcast_members &= ~port_mask; + cpsw_ale_set_port_mask(ale_entry, mcast_members, ale->port_mask_bits); - else + } else { cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); + } cpsw_ale_write(ale, idx, ale_entry); return 0; @@ -415,7 +422,18 @@ static void cpsw_ale_set_vlan_mcast(struct cpsw_ale *ale, u32 *ale_entry, writel(unreg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx)); } -int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, +static void cpsw_ale_set_vlan_untag(struct cpsw_ale *ale, u32 *ale_entry, + u16 vid, int untag_mask) +{ + cpsw_ale_set_vlan_untag_force(ale_entry, + untag_mask, ale->vlan_field_bits); + if (untag_mask & ALE_PORT_HOST) + bitmap_set(ale->p0_untag_vid_mask, vid, 1); + else + bitmap_clear(ale->p0_untag_vid_mask, vid, 1); +} + +int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port_mask, int untag, int reg_mcast, int unreg_mcast) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; @@ -427,8 +445,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN); cpsw_ale_set_vlan_id(ale_entry, vid); + cpsw_ale_set_vlan_untag(ale, ale_entry, vid, untag); - cpsw_ale_set_vlan_untag_force(ale_entry, untag, ale->vlan_field_bits); if (!ale->params.nu_switch_ale) { cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast, ale->vlan_field_bits); @@ -437,7 +455,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, } else { cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast, unreg_mcast); } - cpsw_ale_set_vlan_member_list(ale_entry, port, ale->vlan_field_bits); + cpsw_ale_set_vlan_member_list(ale_entry, port_mask, + ale->vlan_field_bits); if (idx < 0) idx = cpsw_ale_match_free(ale); @@ -450,6 +469,41 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, return 0; } +static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry, + u16 vid, int port_mask) +{ + int reg_mcast, unreg_mcast; + int members, untag; + + members = cpsw_ale_get_vlan_member_list(ale_entry, + ale->vlan_field_bits); + members &= ~port_mask; + + untag = cpsw_ale_get_vlan_untag_force(ale_entry, + ale->vlan_field_bits); + reg_mcast = cpsw_ale_get_vlan_reg_mcast(ale_entry, + ale->vlan_field_bits); + unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry, + ale->vlan_field_bits); + untag &= members; + reg_mcast &= members; + unreg_mcast &= members; + + cpsw_ale_set_vlan_untag(ale, ale_entry, vid, untag); + + if (!ale->params.nu_switch_ale) { + cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast, + ale->vlan_field_bits); + cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast, + ale->vlan_field_bits); + } else { + cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast, + unreg_mcast); + } + cpsw_ale_set_vlan_member_list(ale_entry, members, + ale->vlan_field_bits); +} + int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; @@ -461,16 +515,83 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) cpsw_ale_read(ale, idx, ale_entry); - if (port_mask) - cpsw_ale_set_vlan_member_list(ale_entry, port_mask, - ale->vlan_field_bits); - else + if (port_mask) { + cpsw_ale_del_vlan_modify(ale, ale_entry, vid, port_mask); + } else { + cpsw_ale_set_vlan_untag(ale, ale_entry, vid, 0); cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); + } cpsw_ale_write(ale, idx, ale_entry); + return 0; } +int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask, + int untag_mask, int reg_mask, int unreg_mask) +{ + u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; + int reg_mcast_members, unreg_mcast_members; + int vlan_members, untag_members; + int idx, ret = 0; + + idx = cpsw_ale_match_vlan(ale, vid); + if (idx >= 0) + cpsw_ale_read(ale, idx, ale_entry); + + vlan_members = cpsw_ale_get_vlan_member_list(ale_entry, + ale->vlan_field_bits); + reg_mcast_members = cpsw_ale_get_vlan_reg_mcast(ale_entry, + ale->vlan_field_bits); + unreg_mcast_members = + cpsw_ale_get_vlan_unreg_mcast(ale_entry, + ale->vlan_field_bits); + untag_members = cpsw_ale_get_vlan_untag_force(ale_entry, + ale->vlan_field_bits); + + vlan_members |= port_mask; + untag_members = (untag_members & ~port_mask) | untag_mask; + reg_mcast_members = (reg_mcast_members & ~port_mask) | reg_mask; + unreg_mcast_members = (unreg_mcast_members & ~port_mask) | unreg_mask; + + ret = cpsw_ale_add_vlan(ale, vid, vlan_members, untag_members, + reg_mcast_members, unreg_mcast_members); + if (ret) { + dev_err(ale->params.dev, "Unable to add vlan\n"); + return ret; + } + dev_dbg(ale->params.dev, "port mask 0x%x untag 0x%x\n", vlan_members, + untag_mask); + + return ret; +} + +void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask, + bool add) +{ + u32 ale_entry[ALE_ENTRY_WORDS]; + int unreg_members = 0; + int type, idx; + + for (idx = 0; idx < ale->params.ale_entries; idx++) { + cpsw_ale_read(ale, idx, ale_entry); + type = cpsw_ale_get_entry_type(ale_entry); + if (type != ALE_TYPE_VLAN) + continue; + + unreg_members = + cpsw_ale_get_vlan_unreg_mcast(ale_entry, + ale->vlan_field_bits); + if (add) + unreg_members |= unreg_mcast_mask; + else + unreg_members &= ~unreg_mcast_mask; + cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_members, + ale->vlan_field_bits); + cpsw_ale_write(ale, idx, ale_entry); + } +} + void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port) { u32 ale_entry[ALE_ENTRY_WORDS]; @@ -779,6 +900,7 @@ void cpsw_ale_start(struct cpsw_ale *ale) void cpsw_ale_stop(struct cpsw_ale *ale) { del_timer_sync(&ale->timer); + cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1); cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0); } @@ -791,6 +913,13 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params) if (!ale) return NULL; + ale->p0_untag_vid_mask = + devm_kmalloc_array(params->dev, BITS_TO_LONGS(VLAN_N_VID), + sizeof(unsigned long), + GFP_KERNEL); + if (!ale->p0_untag_vid_mask) + return ERR_PTR(-ENOMEM); + ale->params = *params; ale->ageout = ale->params.ale_ageout * HZ; @@ -862,6 +991,7 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params) ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS; } + cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1); return ale; } diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index 370df254eb12..70d0955c2652 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -35,6 +35,7 @@ struct cpsw_ale { u32 port_mask_bits; u32 port_num_bits; u32 vlan_field_bits; + unsigned long *p0_untag_vid_mask; }; enum cpsw_ale_control { @@ -115,4 +116,14 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control, int value); void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data); +static inline int cpsw_ale_get_vlan_p0_untag(struct cpsw_ale *ale, u16 vid) +{ + return test_bit(vid, ale->p0_untag_vid_mask); +} + +int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask, + int untag_mask, int reg_mcast, int unreg_mcast); +void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask, + bool add); + #endif diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c new file mode 100644 index 000000000000..71215db7934b --- /dev/null +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -0,0 +1,2048 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Texas Instruments Ethernet Switch Driver + * + * Copyright (C) 2019 Texas Instruments + */ + +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/timer.h> +#include <linux/module.h> +#include <linux/irqreturn.h> +#include <linux/interrupt.h> +#include <linux/if_ether.h> +#include <linux/etherdevice.h> +#include <linux/net_tstamp.h> +#include <linux/phy.h> +#include <linux/phy/phy.h> +#include <linux/delay.h> +#include <linux/pm_runtime.h> +#include <linux/gpio/consumer.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/of_device.h> +#include <linux/if_vlan.h> +#include <linux/kmemleak.h> +#include <linux/sys_soc.h> + +#include <net/page_pool.h> +#include <net/pkt_cls.h> +#include <net/devlink.h> + +#include "cpsw.h" +#include "cpsw_ale.h" +#include "cpsw_priv.h" +#include "cpsw_sl.h" +#include "cpsw_switchdev.h" +#include "cpts.h" +#include "davinci_cpdma.h" + +#include <net/pkt_sched.h> + +static int debug_level; +static int ale_ageout = CPSW_ALE_AGEOUT_DEFAULT; +static int rx_packet_max = CPSW_MAX_PACKET_SIZE; +static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT; + +struct cpsw_devlink { + struct cpsw_common *cpsw; +}; + +enum cpsw_devlink_param_id { + CPSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + CPSW_DL_PARAM_SWITCH_MODE, + CPSW_DL_PARAM_ALE_BYPASS, +}; + +/* struct cpsw_common is not needed, kept here for compatibility + * reasons witrh the old driver + */ +static int cpsw_slave_index_priv(struct cpsw_common *cpsw, + struct cpsw_priv *priv) +{ + if (priv->emac_port == HOST_PORT_NUM) + return -1; + + return priv->emac_port - 1; +} + +static bool cpsw_is_switch_en(struct cpsw_common *cpsw) +{ + return !cpsw->data.dual_emac; +} + +static void cpsw_set_promiscious(struct net_device *ndev, bool enable) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + bool enable_uni = false; + int i; + + if (cpsw_is_switch_en(cpsw)) + return; + + /* Enabling promiscuous mode for one interface will be + * common for both the interface as the interface shares + * the same hardware resource. + */ + for (i = 0; i < cpsw->data.slaves; i++) + if (cpsw->slaves[i].ndev && + (cpsw->slaves[i].ndev->flags & IFF_PROMISC)) + enable_uni = true; + + if (!enable && enable_uni) { + enable = enable_uni; + dev_dbg(cpsw->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n"); + } + + if (enable) { + /* Enable unknown unicast, reg/unreg mcast */ + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, + ALE_P0_UNI_FLOOD, 1); + + dev_dbg(cpsw->dev, "promiscuity enabled\n"); + } else { + /* Disable unknown unicast */ + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, + ALE_P0_UNI_FLOOD, 0); + dev_dbg(cpsw->dev, "promiscuity disabled\n"); + } +} + +/** + * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes + * if it's not deleted + * @ndev: device to sync + * @addr: address to be added or deleted + * @vid: vlan id, if vid < 0 set/unset address for real device + * @add: add address if the flag is set or remove otherwise + */ +static int cpsw_set_mc(struct net_device *ndev, const u8 *addr, + int vid, int add) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int mask, flags, ret, slave_no; + + slave_no = cpsw_slave_index(cpsw, priv); + if (vid < 0) + vid = cpsw->slaves[slave_no].port_vlan; + + mask = ALE_PORT_HOST; + flags = vid ? ALE_VLAN : 0; + + if (add) + ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0); + else + ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid); + + return ret; +} + +static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx) +{ + struct addr_sync_ctx *sync_ctx = ctx; + struct netdev_hw_addr *ha; + int found = 0, ret = 0; + + if (!vdev || !(vdev->flags & IFF_UP)) + return 0; + + /* vlan address is relevant if its sync_cnt != 0 */ + netdev_for_each_mc_addr(ha, vdev) { + if (ether_addr_equal(ha->addr, sync_ctx->addr)) { + found = ha->sync_cnt; + break; + } + } + + if (found) + sync_ctx->consumed++; + + if (sync_ctx->flush) { + if (!found) + cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0); + return 0; + } + + if (found) + ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1); + + return ret; +} + +static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num) +{ + struct addr_sync_ctx sync_ctx; + int ret; + + sync_ctx.consumed = 0; + sync_ctx.addr = addr; + sync_ctx.ndev = ndev; + sync_ctx.flush = 0; + + ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx); + if (sync_ctx.consumed < num && !ret) + ret = cpsw_set_mc(ndev, addr, -1, 1); + + return ret; +} + +static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num) +{ + struct addr_sync_ctx sync_ctx; + + sync_ctx.consumed = 0; + sync_ctx.addr = addr; + sync_ctx.ndev = ndev; + sync_ctx.flush = 1; + + vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx); + if (sync_ctx.consumed == num) + cpsw_set_mc(ndev, addr, -1, 0); + + return 0; +} + +static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx) +{ + struct addr_sync_ctx *sync_ctx = ctx; + struct netdev_hw_addr *ha; + int found = 0; + + if (!vdev || !(vdev->flags & IFF_UP)) + return 0; + + /* vlan address is relevant if its sync_cnt != 0 */ + netdev_for_each_mc_addr(ha, vdev) { + if (ether_addr_equal(ha->addr, sync_ctx->addr)) { + found = ha->sync_cnt; + break; + } + } + + if (!found) + return 0; + + sync_ctx->consumed++; + cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0); + return 0; +} + +static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num) +{ + struct addr_sync_ctx sync_ctx; + + sync_ctx.addr = addr; + sync_ctx.ndev = ndev; + sync_ctx.consumed = 0; + + vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx); + if (sync_ctx.consumed < num) + cpsw_set_mc(ndev, addr, -1, 0); + + return 0; +} + +static void cpsw_ndo_set_rx_mode(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + + if (ndev->flags & IFF_PROMISC) { + /* Enable promiscuous mode */ + cpsw_set_promiscious(ndev, true); + cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, priv->emac_port); + return; + } + + /* Disable promiscuous mode */ + cpsw_set_promiscious(ndev, false); + + /* Restore allmulti on vlans if necessary */ + cpsw_ale_set_allmulti(cpsw->ale, + ndev->flags & IFF_ALLMULTI, priv->emac_port); + + /* add/remove mcast address either for real netdev or for vlan */ + __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr, + cpsw_del_mc_addr); +} + +static unsigned int cpsw_rxbuf_total_len(unsigned int len) +{ + len += CPSW_HEADROOM; + len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + return SKB_DATA_ALIGN(len); +} + +static void cpsw_rx_handler(void *token, int len, int status) +{ + struct page *new_page, *page = token; + void *pa = page_address(page); + int headroom = CPSW_HEADROOM; + struct cpsw_meta_xdp *xmeta; + struct cpsw_common *cpsw; + struct net_device *ndev; + int port, ch, pkt_size; + struct cpsw_priv *priv; + struct page_pool *pool; + struct sk_buff *skb; + struct xdp_buff xdp; + int ret = 0; + dma_addr_t dma; + + xmeta = pa + CPSW_XMETA_OFFSET; + cpsw = ndev_to_cpsw(xmeta->ndev); + ndev = xmeta->ndev; + pkt_size = cpsw->rx_packet_max; + ch = xmeta->ch; + + if (status >= 0) { + port = CPDMA_RX_SOURCE_PORT(status); + if (port) + ndev = cpsw->slaves[--port].ndev; + } + + priv = netdev_priv(ndev); + pool = cpsw->page_pool[ch]; + + if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { + /* In dual emac mode check for all interfaces */ + if (cpsw->usage_count && status >= 0) { + /* The packet received is for the interface which + * is already down and the other interface is up + * and running, instead of freeing which results + * in reducing of the number of rx descriptor in + * DMA engine, requeue page back to cpdma. + */ + new_page = page; + goto requeue; + } + + /* the interface is going down, pages are purged */ + page_pool_recycle_direct(pool, page); + return; + } + + new_page = page_pool_dev_alloc_pages(pool); + if (unlikely(!new_page)) { + new_page = page; + ndev->stats.rx_dropped++; + goto requeue; + } + + if (priv->xdp_prog) { + if (status & CPDMA_RX_VLAN_ENCAP) { + xdp.data = pa + CPSW_HEADROOM + + CPSW_RX_VLAN_ENCAP_HDR_SIZE; + xdp.data_end = xdp.data + len - + CPSW_RX_VLAN_ENCAP_HDR_SIZE; + } else { + xdp.data = pa + CPSW_HEADROOM; + xdp.data_end = xdp.data + len; + } + + xdp_set_data_meta_invalid(&xdp); + + xdp.data_hard_start = pa; + xdp.rxq = &priv->xdp_rxq[ch]; + + ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port); + if (ret != CPSW_XDP_PASS) + goto requeue; + + /* XDP prog might have changed packet data and boundaries */ + len = xdp.data_end - xdp.data; + headroom = xdp.data - xdp.data_hard_start; + + /* XDP prog can modify vlan tag, so can't use encap header */ + status &= ~CPDMA_RX_VLAN_ENCAP; + } + + /* pass skb to netstack if no XDP prog or returned XDP_PASS */ + skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size)); + if (!skb) { + ndev->stats.rx_dropped++; + page_pool_recycle_direct(pool, page); + goto requeue; + } + + skb->offload_fwd_mark = priv->offload_fwd_mark; + skb_reserve(skb, headroom); + skb_put(skb, len); + skb->dev = ndev; + if (status & CPDMA_RX_VLAN_ENCAP) + cpsw_rx_vlan_encap(skb); + if (priv->rx_ts_enabled) + cpts_rx_timestamp(cpsw->cpts, skb); + skb->protocol = eth_type_trans(skb, ndev); + + /* unmap page as no netstack skb page recycling */ + page_pool_release_page(pool, page); + netif_receive_skb(skb); + + ndev->stats.rx_bytes += len; + ndev->stats.rx_packets++; + +requeue: + xmeta = page_address(new_page) + CPSW_XMETA_OFFSET; + xmeta->ndev = ndev; + xmeta->ch = ch; + + dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM; + ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma, + pkt_size, 0); + if (ret < 0) { + WARN_ON(ret == -ENOMEM); + page_pool_recycle_direct(pool, new_page); + } +} + +static int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, + unsigned short vid) +{ + struct cpsw_common *cpsw = priv->cpsw; + int unreg_mcast_mask = 0; + int mcast_mask; + u32 port_mask; + int ret; + + port_mask = (1 << priv->emac_port) | ALE_PORT_HOST; + + mcast_mask = ALE_PORT_HOST; + if (priv->ndev->flags & IFF_ALLMULTI) + unreg_mcast_mask = mcast_mask; + + ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask, + unreg_mcast_mask); + if (ret != 0) + return ret; + + ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, + HOST_PORT_NUM, ALE_VLAN, vid); + if (ret != 0) + goto clean_vid; + + ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, + mcast_mask, ALE_VLAN, vid, 0); + if (ret != 0) + goto clean_vlan_ucast; + return 0; + +clean_vlan_ucast: + cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, + HOST_PORT_NUM, ALE_VLAN, vid); +clean_vid: + cpsw_ale_del_vlan(cpsw->ale, vid, 0); + return ret; +} + +static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, + __be16 proto, u16 vid) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int ret, i; + + if (cpsw_is_switch_en(cpsw)) { + dev_dbg(cpsw->dev, ".ndo_vlan_rx_add_vid called in switch mode\n"); + return 0; + } + + if (vid == cpsw->data.default_vlan) + return 0; + + ret = pm_runtime_get_sync(cpsw->dev); + if (ret < 0) { + pm_runtime_put_noidle(cpsw->dev); + return ret; + } + + /* In dual EMAC, reserved VLAN id should not be used for + * creating VLAN interfaces as this can break the dual + * EMAC port separation + */ + for (i = 0; i < cpsw->data.slaves; i++) { + if (cpsw->slaves[i].ndev && + vid == cpsw->slaves[i].port_vlan) { + ret = -EINVAL; + goto err; + } + } + + dev_dbg(priv->dev, "Adding vlanid %d to vlan filter\n", vid); + ret = cpsw_add_vlan_ale_entry(priv, vid); +err: + pm_runtime_put(cpsw->dev); + return ret; +} + +static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg) +{ + struct cpsw_priv *priv = arg; + + if (!vdev || !vid) + return 0; + + cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid); + return 0; +} + +/* restore resources after port reset */ +static void cpsw_restore(struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + + /* restore vlan configurations */ + vlan_for_each(priv->ndev, cpsw_restore_vlans, priv); + + /* restore MQPRIO offload */ + cpsw_mqprio_resume(&cpsw->slaves[priv->emac_port - 1], priv); + + /* restore CBS offload */ + cpsw_cbs_resume(&cpsw->slaves[priv->emac_port - 1], priv); +} + +static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw) +{ + char stpa[] = {0x01, 0x80, 0xc2, 0x0, 0x0, 0x0}; + + cpsw_ale_add_mcast(cpsw->ale, stpa, + ALE_PORT_HOST, ALE_SUPER, 0, + ALE_MCAST_BLOCK_LEARN_FWD); +} + +static void cpsw_init_host_port_switch(struct cpsw_common *cpsw) +{ + int vlan = cpsw->data.default_vlan; + + writel(CPSW_FIFO_NORMAL_MODE, &cpsw->host_port_regs->tx_in_ctl); + + writel(vlan, &cpsw->host_port_regs->port_vlan); + + cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, + ALE_ALL_PORTS, ALE_ALL_PORTS, + ALE_PORT_1 | ALE_PORT_2); + + cpsw_init_stp_ale_entry(cpsw); + + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1); + dev_dbg(cpsw->dev, "Set P0_UNI_FLOOD\n"); + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0); +} + +static void cpsw_init_host_port_dual_mac(struct cpsw_common *cpsw) +{ + int vlan = cpsw->data.default_vlan; + + writel(CPSW_FIFO_DUAL_MAC_MODE, &cpsw->host_port_regs->tx_in_ctl); + + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0); + dev_dbg(cpsw->dev, "unset P0_UNI_FLOOD\n"); + + writel(vlan, &cpsw->host_port_regs->port_vlan); + + cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0); + /* learning make no sense in dual_mac mode */ + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1); +} + +static void cpsw_init_host_port(struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + u32 control_reg; + + /* soft reset the controller and initialize ale */ + soft_reset("cpsw", &cpsw->regs->soft_reset); + cpsw_ale_start(cpsw->ale); + + /* switch to vlan unaware mode */ + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, + CPSW_ALE_VLAN_AWARE); + control_reg = readl(&cpsw->regs->control); + control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP; + writel(control_reg, &cpsw->regs->control); + + /* setup host port priority mapping */ + writel_relaxed(CPDMA_TX_PRIORITY_MAP, + &cpsw->host_port_regs->cpdma_tx_pri_map); + writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map); + + /* disable priority elevation */ + writel_relaxed(0, &cpsw->regs->ptype); + + /* enable statistics collection only on all ports */ + writel_relaxed(0x7, &cpsw->regs->stat_port_en); + + /* Enable internal fifo flow control */ + writel(0x7, &cpsw->regs->flow_control); + + if (cpsw_is_switch_en(cpsw)) + cpsw_init_host_port_switch(cpsw); + else + cpsw_init_host_port_dual_mac(cpsw); + + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, + ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); +} + +static void cpsw_port_add_dual_emac_def_ale_entries(struct cpsw_priv *priv, + struct cpsw_slave *slave) +{ + u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST; + struct cpsw_common *cpsw = priv->cpsw; + u32 reg; + + reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : + CPSW2_PORT_VLAN; + slave_write(slave, slave->port_vlan, reg); + + cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask, + port_mask, port_mask, 0); + cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, + ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, + ALE_MCAST_FWD); + cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, + HOST_PORT_NUM, ALE_VLAN | + ALE_SECURE, slave->port_vlan); + cpsw_ale_control_set(cpsw->ale, priv->emac_port, + ALE_PORT_DROP_UNKNOWN_VLAN, 1); + /* learning make no sense in dual_mac mode */ + cpsw_ale_control_set(cpsw->ale, priv->emac_port, + ALE_PORT_NOLEARN, 1); +} + +static void cpsw_port_add_switch_def_ale_entries(struct cpsw_priv *priv, + struct cpsw_slave *slave) +{ + u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST; + struct cpsw_common *cpsw = priv->cpsw; + u32 reg; + + cpsw_ale_control_set(cpsw->ale, priv->emac_port, + ALE_PORT_DROP_UNKNOWN_VLAN, 0); + cpsw_ale_control_set(cpsw->ale, priv->emac_port, + ALE_PORT_NOLEARN, 0); + /* disabling SA_UPDATE required to make stp work, without this setting + * Host MAC addresses will jump between ports. + * As per TRM MAC address can be defined as unicast supervisory (super) + * by setting both (ALE_BLOCKED | ALE_SECURE) which should prevent + * SA_UPDATE, but HW seems works incorrectly and setting ALE_SECURE + * causes STP packets to be dropped due to ingress filter + * if (source address found) and (secure) and + * (receive port number != port_number)) + * then discard the packet + */ + cpsw_ale_control_set(cpsw->ale, priv->emac_port, + ALE_PORT_NO_SA_UPDATE, 1); + + cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, + port_mask, ALE_VLAN, slave->port_vlan, + ALE_MCAST_FWD_2); + cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, + HOST_PORT_NUM, ALE_VLAN, slave->port_vlan); + + reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : + CPSW2_PORT_VLAN; + slave_write(slave, slave->port_vlan, reg); +} + +static void cpsw_adjust_link(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + struct phy_device *phy; + u32 mac_control = 0; + + slave = &cpsw->slaves[priv->emac_port - 1]; + phy = slave->phy; + + if (!phy) + return; + + if (phy->link) { + mac_control = CPSW_SL_CTL_GMII_EN; + + if (phy->speed == 1000) + mac_control |= CPSW_SL_CTL_GIG; + if (phy->duplex) + mac_control |= CPSW_SL_CTL_FULLDUPLEX; + + /* set speed_in input in case RMII mode is used in 100Mbps */ + if (phy->speed == 100) + mac_control |= CPSW_SL_CTL_IFCTL_A; + /* in band mode only works in 10Mbps RGMII mode */ + else if ((phy->speed == 10) && phy_interface_is_rgmii(phy)) + mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */ + + if (priv->rx_pause) + mac_control |= CPSW_SL_CTL_RX_FLOW_EN; + + if (priv->tx_pause) + mac_control |= CPSW_SL_CTL_TX_FLOW_EN; + + if (mac_control != slave->mac_control) + cpsw_sl_ctl_set(slave->mac_sl, mac_control); + + /* enable forwarding */ + cpsw_ale_control_set(cpsw->ale, priv->emac_port, + ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); + + netif_tx_wake_all_queues(ndev); + + if (priv->shp_cfg_speed && + priv->shp_cfg_speed != slave->phy->speed && + !cpsw_shp_is_off(priv)) + dev_warn(priv->dev, "Speed was changed, CBS shaper speeds are changed!"); + } else { + netif_tx_stop_all_queues(ndev); + + mac_control = 0; + /* disable forwarding */ + cpsw_ale_control_set(cpsw->ale, priv->emac_port, + ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); + + cpsw_sl_wait_for_idle(slave->mac_sl, 100); + + cpsw_sl_ctl_reset(slave->mac_sl); + } + + if (mac_control != slave->mac_control) + phy_print_status(phy); + + slave->mac_control = mac_control; + + if (phy->link && cpsw_need_resplit(cpsw)) + cpsw_split_res(cpsw); +} + +static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct phy_device *phy; + + cpsw_sl_reset(slave->mac_sl, 100); + cpsw_sl_ctl_reset(slave->mac_sl); + + /* setup priority mapping */ + cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP, + RX_PRIORITY_MAPPING); + + switch (cpsw->version) { + case CPSW_VERSION_1: + slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); + /* Increase RX FIFO size to 5 for supporting fullduplex + * flow control mode + */ + slave_write(slave, + (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) | + CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS); + break; + case CPSW_VERSION_2: + case CPSW_VERSION_3: + case CPSW_VERSION_4: + slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP); + /* Increase RX FIFO size to 5 for supporting fullduplex + * flow control mode + */ + slave_write(slave, + (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) | + CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS); + break; + } + + /* setup max packet size, and mac address */ + cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN, + cpsw->rx_packet_max); + cpsw_set_slave_mac(slave, priv); + + slave->mac_control = 0; /* no link yet */ + + if (cpsw_is_switch_en(cpsw)) + cpsw_port_add_switch_def_ale_entries(priv, slave); + else + cpsw_port_add_dual_emac_def_ale_entries(priv, slave); + + if (!slave->data->phy_node) + dev_err(priv->dev, "no phy found on slave %d\n", + slave->slave_num); + phy = of_phy_connect(priv->ndev, slave->data->phy_node, + &cpsw_adjust_link, 0, slave->data->phy_if); + if (!phy) { + dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n", + slave->data->phy_node, + slave->slave_num); + return; + } + slave->phy = phy; + + phy_attached_info(slave->phy); + + phy_start(slave->phy); + + /* Configure GMII_SEL register */ + phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET, + slave->data->phy_if); +} + +static int cpsw_ndo_stop(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + + cpsw_info(priv, ifdown, "shutting down ndev\n"); + slave = &cpsw->slaves[priv->emac_port - 1]; + if (slave->phy) + phy_stop(slave->phy); + + netif_tx_stop_all_queues(priv->ndev); + + if (slave->phy) { + phy_disconnect(slave->phy); + slave->phy = NULL; + } + + __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc); + + if (cpsw->usage_count <= 1) { + napi_disable(&cpsw->napi_rx); + napi_disable(&cpsw->napi_tx); + cpts_unregister(cpsw->cpts); + cpsw_intr_disable(cpsw); + cpdma_ctlr_stop(cpsw->dma); + cpsw_ale_stop(cpsw->ale); + cpsw_destroy_xdp_rxqs(cpsw); + } + + if (cpsw_need_resplit(cpsw)) + cpsw_split_res(cpsw); + + cpsw->usage_count--; + pm_runtime_put_sync(cpsw->dev); + return 0; +} + +static int cpsw_ndo_open(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int ret; + + dev_info(priv->dev, "starting ndev. mode: %s\n", + cpsw_is_switch_en(cpsw) ? "switch" : "dual_mac"); + ret = pm_runtime_get_sync(cpsw->dev); + if (ret < 0) { + pm_runtime_put_noidle(cpsw->dev); + return ret; + } + + /* Notify the stack of the actual queue counts. */ + ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num); + if (ret) { + dev_err(priv->dev, "cannot set real number of tx queues\n"); + goto pm_cleanup; + } + + ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num); + if (ret) { + dev_err(priv->dev, "cannot set real number of rx queues\n"); + goto pm_cleanup; + } + + /* Initialize host and slave ports */ + if (!cpsw->usage_count) + cpsw_init_host_port(priv); + cpsw_slave_open(&cpsw->slaves[priv->emac_port - 1], priv); + + /* initialize shared resources for every ndev */ + if (!cpsw->usage_count) { + /* create rxqs for both infs in dual mac as they use same pool + * and must be destroyed together when no users. + */ + ret = cpsw_create_xdp_rxqs(cpsw); + if (ret < 0) + goto err_cleanup; + + ret = cpsw_fill_rx_channels(priv); + if (ret < 0) + goto err_cleanup; + + if (cpts_register(cpsw->cpts)) + dev_err(priv->dev, "error registering cpts device\n"); + + napi_enable(&cpsw->napi_rx); + napi_enable(&cpsw->napi_tx); + + if (cpsw->tx_irq_disabled) { + cpsw->tx_irq_disabled = false; + enable_irq(cpsw->irqs_table[1]); + } + + if (cpsw->rx_irq_disabled) { + cpsw->rx_irq_disabled = false; + enable_irq(cpsw->irqs_table[0]); + } + } + + cpsw_restore(priv); + + /* Enable Interrupt pacing if configured */ + if (cpsw->coal_intvl != 0) { + struct ethtool_coalesce coal; + + coal.rx_coalesce_usecs = cpsw->coal_intvl; + cpsw_set_coalesce(ndev, &coal); + } + + cpdma_ctlr_start(cpsw->dma); + cpsw_intr_enable(cpsw); + cpsw->usage_count++; + + return 0; + +err_cleanup: + cpsw_ndo_stop(ndev); + +pm_cleanup: + pm_runtime_put_sync(cpsw->dev); + return ret; +} + +static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpts *cpts = cpsw->cpts; + struct netdev_queue *txq; + struct cpdma_chan *txch; + int ret, q_idx; + + if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { + cpsw_err(priv, tx_err, "packet pad failed\n"); + ndev->stats.tx_dropped++; + return NET_XMIT_DROP; + } + + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb)) + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + q_idx = skb_get_queue_mapping(skb); + if (q_idx >= cpsw->tx_ch_num) + q_idx = q_idx % cpsw->tx_ch_num; + + txch = cpsw->txv[q_idx].ch; + txq = netdev_get_tx_queue(ndev, q_idx); + skb_tx_timestamp(skb); + ret = cpdma_chan_submit(txch, skb, skb->data, skb->len, + priv->emac_port); + if (unlikely(ret != 0)) { + cpsw_err(priv, tx_err, "desc submit failed\n"); + goto fail; + } + + /* If there is no more tx desc left free then we need to + * tell the kernel to stop sending us tx frames. + */ + if (unlikely(!cpdma_check_free_tx_desc(txch))) { + netif_tx_stop_queue(txq); + + /* Barrier, so that stop_queue visible to other cpus */ + smp_mb__after_atomic(); + + if (cpdma_check_free_tx_desc(txch)) + netif_tx_wake_queue(txq); + } + + return NETDEV_TX_OK; +fail: + ndev->stats.tx_dropped++; + netif_tx_stop_queue(txq); + + /* Barrier, so that stop_queue visible to other cpus */ + smp_mb__after_atomic(); + + if (cpdma_check_free_tx_desc(txch)) + netif_tx_wake_queue(txq); + + return NETDEV_TX_BUSY; +} + +static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) +{ + struct sockaddr *addr = (struct sockaddr *)p; + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int ret, slave_no; + int flags = 0; + u16 vid = 0; + + slave_no = cpsw_slave_index(cpsw, priv); + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + ret = pm_runtime_get_sync(cpsw->dev); + if (ret < 0) { + pm_runtime_put_noidle(cpsw->dev); + return ret; + } + + vid = cpsw->slaves[slave_no].port_vlan; + flags = ALE_VLAN | ALE_SECURE; + + cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, + flags, vid); + cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM, + flags, vid); + + ether_addr_copy(priv->mac_addr, addr->sa_data); + ether_addr_copy(ndev->dev_addr, priv->mac_addr); + cpsw_set_slave_mac(&cpsw->slaves[slave_no], priv); + + pm_runtime_put(cpsw->dev); + + return 0; +} + +static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, + __be16 proto, u16 vid) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int ret; + int i; + + if (cpsw_is_switch_en(cpsw)) { + dev_dbg(cpsw->dev, "ndo del vlan is called in switch mode\n"); + return 0; + } + + if (vid == cpsw->data.default_vlan) + return 0; + + ret = pm_runtime_get_sync(cpsw->dev); + if (ret < 0) { + pm_runtime_put_noidle(cpsw->dev); + return ret; + } + + for (i = 0; i < cpsw->data.slaves; i++) { + if (cpsw->slaves[i].ndev && + vid == cpsw->slaves[i].port_vlan) + goto err; + } + + dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid); + cpsw_ale_del_vlan(cpsw->ale, vid, 0); + cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, + HOST_PORT_NUM, ALE_VLAN, vid); + cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, + 0, ALE_VLAN, vid); + cpsw_ale_flush_multicast(cpsw->ale, 0, vid); +err: + pm_runtime_put(cpsw->dev); + return ret; +} + +static int cpsw_ndo_get_phys_port_name(struct net_device *ndev, char *name, + size_t len) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int err; + + err = snprintf(name, len, "p%d", priv->emac_port); + + if (err >= len) + return -EINVAL; + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void cpsw_ndo_poll_controller(struct net_device *ndev) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + + cpsw_intr_disable(cpsw); + cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw); + cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw); + cpsw_intr_enable(cpsw); +} +#endif + +static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct xdp_frame *xdpf; + int i, drops = 0; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + for (i = 0; i < n; i++) { + xdpf = frames[i]; + if (xdpf->len < CPSW_MIN_PACKET_SIZE) { + xdp_return_frame_rx_napi(xdpf); + drops++; + continue; + } + + if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port)) + drops++; + } + + return n - drops; +} + +static int cpsw_get_port_parent_id(struct net_device *ndev, + struct netdev_phys_item_id *ppid) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + + ppid->id_len = sizeof(cpsw->base_mac); + memcpy(&ppid->id, &cpsw->base_mac, ppid->id_len); + + return 0; +} + +static const struct net_device_ops cpsw_netdev_ops = { + .ndo_open = cpsw_ndo_open, + .ndo_stop = cpsw_ndo_stop, + .ndo_start_xmit = cpsw_ndo_start_xmit, + .ndo_set_mac_address = cpsw_ndo_set_mac_address, + .ndo_do_ioctl = cpsw_ndo_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_tx_timeout = cpsw_ndo_tx_timeout, + .ndo_set_rx_mode = cpsw_ndo_set_rx_mode, + .ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cpsw_ndo_poll_controller, +#endif + .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid, + .ndo_setup_tc = cpsw_ndo_setup_tc, + .ndo_get_phys_port_name = cpsw_ndo_get_phys_port_name, + .ndo_bpf = cpsw_ndo_bpf, + .ndo_xdp_xmit = cpsw_ndo_xdp_xmit, + .ndo_get_port_parent_id = cpsw_get_port_parent_id, +}; + +static void cpsw_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + struct platform_device *pdev; + + pdev = to_platform_device(cpsw->dev); + strlcpy(info->driver, "cpsw-switch", sizeof(info->driver)); + strlcpy(info->version, "2.0", sizeof(info->version)); + strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info)); +} + +static int cpsw_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + struct cpsw_priv *priv = netdev_priv(ndev); + int slave_no; + + slave_no = cpsw_slave_index(cpsw, priv); + if (!cpsw->slaves[slave_no].phy) + return -EINVAL; + + if (!phy_validate_pause(cpsw->slaves[slave_no].phy, pause)) + return -EINVAL; + + priv->rx_pause = pause->rx_pause ? true : false; + priv->tx_pause = pause->tx_pause ? true : false; + + phy_set_asym_pause(cpsw->slaves[slave_no].phy, + priv->rx_pause, priv->tx_pause); + + return 0; +} + +static int cpsw_set_channels(struct net_device *ndev, + struct ethtool_channels *chs) +{ + return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler); +} + +static const struct ethtool_ops cpsw_ethtool_ops = { + .get_drvinfo = cpsw_get_drvinfo, + .get_msglevel = cpsw_get_msglevel, + .set_msglevel = cpsw_set_msglevel, + .get_link = ethtool_op_get_link, + .get_ts_info = cpsw_get_ts_info, + .get_coalesce = cpsw_get_coalesce, + .set_coalesce = cpsw_set_coalesce, + .get_sset_count = cpsw_get_sset_count, + .get_strings = cpsw_get_strings, + .get_ethtool_stats = cpsw_get_ethtool_stats, + .get_pauseparam = cpsw_get_pauseparam, + .set_pauseparam = cpsw_set_pauseparam, + .get_wol = cpsw_get_wol, + .set_wol = cpsw_set_wol, + .get_regs_len = cpsw_get_regs_len, + .get_regs = cpsw_get_regs, + .begin = cpsw_ethtool_op_begin, + .complete = cpsw_ethtool_op_complete, + .get_channels = cpsw_get_channels, + .set_channels = cpsw_set_channels, + .get_link_ksettings = cpsw_get_link_ksettings, + .set_link_ksettings = cpsw_set_link_ksettings, + .get_eee = cpsw_get_eee, + .set_eee = cpsw_set_eee, + .nway_reset = cpsw_nway_reset, + .get_ringparam = cpsw_get_ringparam, + .set_ringparam = cpsw_set_ringparam, +}; + +static int cpsw_probe_dt(struct cpsw_common *cpsw) +{ + struct device_node *node = cpsw->dev->of_node, *tmp_node, *port_np; + struct cpsw_platform_data *data = &cpsw->data; + struct device *dev = cpsw->dev; + int ret; + u32 prop; + + if (!node) + return -EINVAL; + + tmp_node = of_get_child_by_name(node, "ethernet-ports"); + if (!tmp_node) + return -ENOENT; + data->slaves = of_get_child_count(tmp_node); + if (data->slaves != CPSW_SLAVE_PORTS_NUM) { + of_node_put(tmp_node); + return -ENOENT; + } + + data->active_slave = 0; + data->channels = CPSW_MAX_QUEUES; + data->ale_entries = CPSW_ALE_NUM_ENTRIES; + data->dual_emac = 1; + data->bd_ram_size = CPSW_BD_RAM_SIZE; + data->mac_control = 0; + + data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM, + sizeof(struct cpsw_slave_data), + GFP_KERNEL); + if (!data->slave_data) + return -ENOMEM; + + /* Populate all the child nodes here... + */ + ret = devm_of_platform_populate(dev); + /* We do not want to force this, as in some cases may not have child */ + if (ret) + dev_warn(dev, "Doesn't have any child node\n"); + + for_each_child_of_node(tmp_node, port_np) { + struct cpsw_slave_data *slave_data; + const void *mac_addr; + u32 port_id; + + ret = of_property_read_u32(port_np, "reg", &port_id); + if (ret < 0) { + dev_err(dev, "%pOF error reading port_id %d\n", + port_np, ret); + goto err_node_put; + } + + if (!port_id || port_id > CPSW_SLAVE_PORTS_NUM) { + dev_err(dev, "%pOF has invalid port_id %u\n", + port_np, port_id); + ret = -EINVAL; + goto err_node_put; + } + + slave_data = &data->slave_data[port_id - 1]; + + slave_data->disabled = !of_device_is_available(port_np); + if (slave_data->disabled) + continue; + + slave_data->slave_node = port_np; + slave_data->ifphy = devm_of_phy_get(dev, port_np, NULL); + if (IS_ERR(slave_data->ifphy)) { + ret = PTR_ERR(slave_data->ifphy); + dev_err(dev, "%pOF: Error retrieving port phy: %d\n", + port_np, ret); + goto err_node_put; + } + + if (of_phy_is_fixed_link(port_np)) { + ret = of_phy_register_fixed_link(port_np); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(dev, "%pOF failed to register fixed-link phy: %d\n", + port_np, ret); + goto err_node_put; + } + slave_data->phy_node = of_node_get(port_np); + } else { + slave_data->phy_node = + of_parse_phandle(port_np, "phy-handle", 0); + } + + if (!slave_data->phy_node) { + dev_err(dev, "%pOF no phy found\n", port_np); + ret = -ENODEV; + goto err_node_put; + } + + ret = of_get_phy_mode(port_np, &slave_data->phy_if); + if (ret) { + dev_err(dev, "%pOF read phy-mode err %d\n", + port_np, ret); + goto err_node_put; + } + + mac_addr = of_get_mac_address(port_np); + if (!IS_ERR(mac_addr)) { + ether_addr_copy(slave_data->mac_addr, mac_addr); + } else { + ret = ti_cm_get_macid(dev, port_id - 1, + slave_data->mac_addr); + if (ret) + goto err_node_put; + } + + if (of_property_read_u32(port_np, "ti,dual-emac-pvid", + &prop)) { + dev_err(dev, "%pOF Missing dual_emac_res_vlan in DT.\n", + port_np); + slave_data->dual_emac_res_vlan = port_id; + dev_err(dev, "%pOF Using %d as Reserved VLAN\n", + port_np, slave_data->dual_emac_res_vlan); + } else { + slave_data->dual_emac_res_vlan = prop; + } + } + + of_node_put(tmp_node); + return 0; + +err_node_put: + of_node_put(port_np); + return ret; +} + +static void cpsw_remove_dt(struct cpsw_common *cpsw) +{ + struct cpsw_platform_data *data = &cpsw->data; + int i = 0; + + for (i = 0; i < cpsw->data.slaves; i++) { + struct cpsw_slave_data *slave_data = &data->slave_data[i]; + struct device_node *port_np = slave_data->phy_node; + + if (port_np) { + if (of_phy_is_fixed_link(port_np)) + of_phy_deregister_fixed_link(port_np); + + of_node_put(port_np); + } + } +} + +static int cpsw_create_ports(struct cpsw_common *cpsw) +{ + struct cpsw_platform_data *data = &cpsw->data; + struct net_device *ndev, *napi_ndev = NULL; + struct device *dev = cpsw->dev; + struct cpsw_priv *priv; + int ret = 0, i = 0; + + for (i = 0; i < cpsw->data.slaves; i++) { + struct cpsw_slave_data *slave_data = &data->slave_data[i]; + + if (slave_data->disabled) + continue; + + ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv), + CPSW_MAX_QUEUES, + CPSW_MAX_QUEUES); + if (!ndev) { + dev_err(dev, "error allocating net_device\n"); + return -ENOMEM; + } + + priv = netdev_priv(ndev); + priv->cpsw = cpsw; + priv->ndev = ndev; + priv->dev = dev; + priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); + priv->emac_port = i + 1; + + if (is_valid_ether_addr(slave_data->mac_addr)) { + ether_addr_copy(priv->mac_addr, slave_data->mac_addr); + dev_info(cpsw->dev, "Detected MACID = %pM\n", + priv->mac_addr); + } else { + eth_random_addr(slave_data->mac_addr); + dev_info(cpsw->dev, "Random MACID = %pM\n", + priv->mac_addr); + } + ether_addr_copy(ndev->dev_addr, slave_data->mac_addr); + ether_addr_copy(priv->mac_addr, slave_data->mac_addr); + + cpsw->slaves[i].ndev = ndev; + + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL; + + ndev->netdev_ops = &cpsw_netdev_ops; + ndev->ethtool_ops = &cpsw_ethtool_ops; + SET_NETDEV_DEV(ndev, dev); + + if (!napi_ndev) { + /* CPSW Host port CPDMA interface is shared between + * ports and there is only one TX and one RX IRQs + * available for all possible TX and RX channels + * accordingly. + */ + netif_napi_add(ndev, &cpsw->napi_rx, + cpsw->quirk_irq ? + cpsw_rx_poll : cpsw_rx_mq_poll, + CPSW_POLL_WEIGHT); + netif_tx_napi_add(ndev, &cpsw->napi_tx, + cpsw->quirk_irq ? + cpsw_tx_poll : cpsw_tx_mq_poll, + CPSW_POLL_WEIGHT); + } + + napi_ndev = ndev; + } + + return ret; +} + +static void cpsw_unregister_ports(struct cpsw_common *cpsw) +{ + int i = 0; + + for (i = 0; i < cpsw->data.slaves; i++) { + if (!cpsw->slaves[i].ndev) + continue; + + unregister_netdev(cpsw->slaves[i].ndev); + } +} + +static int cpsw_register_ports(struct cpsw_common *cpsw) +{ + int ret = 0, i = 0; + + for (i = 0; i < cpsw->data.slaves; i++) { + if (!cpsw->slaves[i].ndev) + continue; + + /* register the network device */ + ret = register_netdev(cpsw->slaves[i].ndev); + if (ret) { + dev_err(cpsw->dev, + "cpsw: err registering net device%d\n", i); + cpsw->slaves[i].ndev = NULL; + break; + } + } + + if (ret) + cpsw_unregister_ports(cpsw); + return ret; +} + +bool cpsw_port_dev_check(const struct net_device *ndev) +{ + if (ndev->netdev_ops == &cpsw_netdev_ops) { + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + + return !cpsw->data.dual_emac; + } + + return false; +} + +static void cpsw_port_offload_fwd_mark_update(struct cpsw_common *cpsw) +{ + int set_val = 0; + int i; + + if (!cpsw->ale_bypass && + (cpsw->br_members == (ALE_PORT_1 | ALE_PORT_2))) + set_val = 1; + + dev_dbg(cpsw->dev, "set offload_fwd_mark %d\n", set_val); + + for (i = 0; i < cpsw->data.slaves; i++) { + struct net_device *sl_ndev = cpsw->slaves[i].ndev; + struct cpsw_priv *priv = netdev_priv(sl_ndev); + + priv->offload_fwd_mark = set_val; + } +} + +static int cpsw_netdevice_port_link(struct net_device *ndev, + struct net_device *br_ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + + if (!cpsw->br_members) { + cpsw->hw_bridge_dev = br_ndev; + } else { + /* This is adding the port to a second bridge, this is + * unsupported + */ + if (cpsw->hw_bridge_dev != br_ndev) + return -EOPNOTSUPP; + } + + cpsw->br_members |= BIT(priv->emac_port); + + cpsw_port_offload_fwd_mark_update(cpsw); + + return NOTIFY_DONE; +} + +static void cpsw_netdevice_port_unlink(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + + cpsw->br_members &= ~BIT(priv->emac_port); + + cpsw_port_offload_fwd_mark_update(cpsw); + + if (!cpsw->br_members) + cpsw->hw_bridge_dev = NULL; +} + +/* netdev notifier */ +static int cpsw_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct netdev_notifier_changeupper_info *info; + int ret = NOTIFY_DONE; + + if (!cpsw_port_dev_check(ndev)) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_CHANGEUPPER: + info = ptr; + + if (netif_is_bridge_master(info->upper_dev)) { + if (info->linking) + ret = cpsw_netdevice_port_link(ndev, + info->upper_dev); + else + cpsw_netdevice_port_unlink(ndev); + } + break; + default: + return NOTIFY_DONE; + } + + return notifier_from_errno(ret); +} + +static struct notifier_block cpsw_netdevice_nb __read_mostly = { + .notifier_call = cpsw_netdevice_event, +}; + +static int cpsw_register_notifiers(struct cpsw_common *cpsw) +{ + int ret = 0; + + ret = register_netdevice_notifier(&cpsw_netdevice_nb); + if (ret) { + dev_err(cpsw->dev, "can't register netdevice notifier\n"); + return ret; + } + + ret = cpsw_switchdev_register_notifiers(cpsw); + if (ret) + unregister_netdevice_notifier(&cpsw_netdevice_nb); + + return ret; +} + +static void cpsw_unregister_notifiers(struct cpsw_common *cpsw) +{ + cpsw_switchdev_unregister_notifiers(cpsw); + unregister_netdevice_notifier(&cpsw_netdevice_nb); +} + +static const struct devlink_ops cpsw_devlink_ops = { +}; + +static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct cpsw_devlink *dl_priv = devlink_priv(dl); + struct cpsw_common *cpsw = dl_priv->cpsw; + + dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id); + + if (id != CPSW_DL_PARAM_SWITCH_MODE) + return -EOPNOTSUPP; + + ctx->val.vbool = !cpsw->data.dual_emac; + + return 0; +} + +static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct cpsw_devlink *dl_priv = devlink_priv(dl); + struct cpsw_common *cpsw = dl_priv->cpsw; + int vlan = cpsw->data.default_vlan; + bool switch_en = ctx->val.vbool; + bool if_running = false; + int i; + + dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id); + + if (id != CPSW_DL_PARAM_SWITCH_MODE) + return -EOPNOTSUPP; + + if (switch_en == !cpsw->data.dual_emac) + return 0; + + if (!switch_en && cpsw->br_members) { + dev_err(cpsw->dev, "Remove ports from BR before disabling switch mode\n"); + return -EINVAL; + } + + rtnl_lock(); + + for (i = 0; i < cpsw->data.slaves; i++) { + struct cpsw_slave *slave = &cpsw->slaves[i]; + struct net_device *sl_ndev = slave->ndev; + + if (!sl_ndev || !netif_running(sl_ndev)) + continue; + + if_running = true; + } + + if (!if_running) { + /* all ndevs are down */ + cpsw->data.dual_emac = !switch_en; + for (i = 0; i < cpsw->data.slaves; i++) { + struct cpsw_slave *slave = &cpsw->slaves[i]; + struct net_device *sl_ndev = slave->ndev; + struct cpsw_priv *priv; + + if (!sl_ndev) + continue; + + priv = netdev_priv(sl_ndev); + if (switch_en) + vlan = cpsw->data.default_vlan; + else + vlan = slave->data->dual_emac_res_vlan; + slave->port_vlan = vlan; + } + goto exit; + } + + if (switch_en) { + dev_info(cpsw->dev, "Enable switch mode\n"); + + /* enable bypass - no forwarding; all traffic goes to Host */ + cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1); + + /* clean up ALE table */ + cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1); + cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT); + + cpsw_init_host_port_switch(cpsw); + + for (i = 0; i < cpsw->data.slaves; i++) { + struct cpsw_slave *slave = &cpsw->slaves[i]; + struct net_device *sl_ndev = slave->ndev; + struct cpsw_priv *priv; + + if (!sl_ndev) + continue; + + priv = netdev_priv(sl_ndev); + slave->port_vlan = vlan; + if (netif_running(sl_ndev)) + cpsw_port_add_switch_def_ale_entries(priv, + slave); + } + + cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0); + cpsw->data.dual_emac = false; + } else { + dev_info(cpsw->dev, "Disable switch mode\n"); + + /* enable bypass - no forwarding; all traffic goes to Host */ + cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1); + + cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1); + cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT); + + cpsw_init_host_port_dual_mac(cpsw); + + for (i = 0; i < cpsw->data.slaves; i++) { + struct cpsw_slave *slave = &cpsw->slaves[i]; + struct net_device *sl_ndev = slave->ndev; + struct cpsw_priv *priv; + + if (!sl_ndev) + continue; + + priv = netdev_priv(slave->ndev); + slave->port_vlan = slave->data->dual_emac_res_vlan; + cpsw_port_add_dual_emac_def_ale_entries(priv, slave); + } + + cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0); + cpsw->data.dual_emac = true; + } +exit: + rtnl_unlock(); + + return 0; +} + +static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct cpsw_devlink *dl_priv = devlink_priv(dl); + struct cpsw_common *cpsw = dl_priv->cpsw; + + dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id); + + switch (id) { + case CPSW_DL_PARAM_ALE_BYPASS: + ctx->val.vbool = cpsw_ale_control_get(cpsw->ale, 0, ALE_BYPASS); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int cpsw_dl_ale_ctrl_set(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct cpsw_devlink *dl_priv = devlink_priv(dl); + struct cpsw_common *cpsw = dl_priv->cpsw; + int ret = -EOPNOTSUPP; + + dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id); + + switch (id) { + case CPSW_DL_PARAM_ALE_BYPASS: + ret = cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, + ctx->val.vbool); + if (!ret) { + cpsw->ale_bypass = ctx->val.vbool; + cpsw_port_offload_fwd_mark_update(cpsw); + } + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static const struct devlink_param cpsw_devlink_params[] = { + DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_SWITCH_MODE, + "switch_mode", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + cpsw_dl_switch_mode_get, cpsw_dl_switch_mode_set, + NULL), + DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_ALE_BYPASS, + "ale_bypass", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + cpsw_dl_ale_ctrl_get, cpsw_dl_ale_ctrl_set, NULL), +}; + +static int cpsw_register_devlink(struct cpsw_common *cpsw) +{ + struct device *dev = cpsw->dev; + struct cpsw_devlink *dl_priv; + int ret = 0; + + cpsw->devlink = devlink_alloc(&cpsw_devlink_ops, sizeof(*dl_priv)); + if (!cpsw->devlink) + return -ENOMEM; + + dl_priv = devlink_priv(cpsw->devlink); + dl_priv->cpsw = cpsw; + + ret = devlink_register(cpsw->devlink, dev); + if (ret) { + dev_err(dev, "DL reg fail ret:%d\n", ret); + goto dl_free; + } + + ret = devlink_params_register(cpsw->devlink, cpsw_devlink_params, + ARRAY_SIZE(cpsw_devlink_params)); + if (ret) { + dev_err(dev, "DL params reg fail ret:%d\n", ret); + goto dl_unreg; + } + + devlink_params_publish(cpsw->devlink); + return ret; + +dl_unreg: + devlink_unregister(cpsw->devlink); +dl_free: + devlink_free(cpsw->devlink); + return ret; +} + +static void cpsw_unregister_devlink(struct cpsw_common *cpsw) +{ + devlink_params_unpublish(cpsw->devlink); + devlink_params_unregister(cpsw->devlink, cpsw_devlink_params, + ARRAY_SIZE(cpsw_devlink_params)); + devlink_unregister(cpsw->devlink); + devlink_free(cpsw->devlink); +} + +static const struct of_device_id cpsw_of_mtable[] = { + { .compatible = "ti,cpsw-switch"}, + { .compatible = "ti,am335x-cpsw-switch"}, + { .compatible = "ti,am4372-cpsw-switch"}, + { .compatible = "ti,dra7-cpsw-switch"}, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, cpsw_of_mtable); + +static const struct soc_device_attribute cpsw_soc_devices[] = { + { .family = "AM33xx", .revision = "ES1.0"}, + { /* sentinel */ } +}; + +static int cpsw_probe(struct platform_device *pdev) +{ + const struct soc_device_attribute *soc; + struct device *dev = &pdev->dev; + struct cpsw_common *cpsw; + struct resource *ss_res; + struct gpio_descs *mode; + void __iomem *ss_regs; + int ret = 0, ch; + struct clk *clk; + int irq; + + cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL); + if (!cpsw) + return -ENOMEM; + + cpsw_slave_index = cpsw_slave_index_priv; + + cpsw->dev = dev; + + cpsw->slaves = devm_kcalloc(dev, + CPSW_SLAVE_PORTS_NUM, + sizeof(struct cpsw_slave), + GFP_KERNEL); + if (!cpsw->slaves) + return -ENOMEM; + + mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW); + if (IS_ERR(mode)) { + ret = PTR_ERR(mode); + dev_err(dev, "gpio request failed, ret %d\n", ret); + return ret; + } + + clk = devm_clk_get(dev, "fck"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + dev_err(dev, "fck is not found %d\n", ret); + return ret; + } + cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000; + + ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ss_regs = devm_ioremap_resource(dev, ss_res); + if (IS_ERR(ss_regs)) { + ret = PTR_ERR(ss_regs); + return ret; + } + cpsw->regs = ss_regs; + + irq = platform_get_irq_byname(pdev, "rx"); + if (irq < 0) + return irq; + cpsw->irqs_table[0] = irq; + + irq = platform_get_irq_byname(pdev, "tx"); + if (irq < 0) + return irq; + cpsw->irqs_table[1] = irq; + + platform_set_drvdata(pdev, cpsw); + /* This may be required here for child devices. */ + pm_runtime_enable(dev); + + /* Need to enable clocks with runtime PM api to access module + * registers + */ + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + pm_runtime_disable(dev); + return ret; + } + + ret = cpsw_probe_dt(cpsw); + if (ret) + goto clean_dt_ret; + + soc = soc_device_match(cpsw_soc_devices); + if (soc) + cpsw->quirk_irq = 1; + + cpsw->rx_packet_max = rx_packet_max; + cpsw->descs_pool_size = descs_pool_size; + eth_random_addr(cpsw->base_mac); + + ret = cpsw_init_common(cpsw, ss_regs, ale_ageout, + (u32 __force)ss_res->start + CPSW2_BD_OFFSET, + descs_pool_size); + if (ret) + goto clean_dt_ret; + + cpsw->wr_regs = cpsw->version == CPSW_VERSION_1 ? + ss_regs + CPSW1_WR_OFFSET : + ss_regs + CPSW2_WR_OFFSET; + + ch = cpsw->quirk_irq ? 0 : 7; + cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0); + if (IS_ERR(cpsw->txv[0].ch)) { + dev_err(dev, "error initializing tx dma channel\n"); + ret = PTR_ERR(cpsw->txv[0].ch); + goto clean_cpts; + } + + cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1); + if (IS_ERR(cpsw->rxv[0].ch)) { + dev_err(dev, "error initializing rx dma channel\n"); + ret = PTR_ERR(cpsw->rxv[0].ch); + goto clean_cpts; + } + cpsw_split_res(cpsw); + + /* setup netdevs */ + ret = cpsw_create_ports(cpsw); + if (ret) + goto clean_unregister_netdev; + + /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and + * MISC IRQs which are always kept disabled with this driver so + * we will not request them. + * + * If anyone wants to implement support for those, make sure to + * first request and append them to irqs_table array. + */ + + ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt, + 0, dev_name(dev), cpsw); + if (ret < 0) { + dev_err(dev, "error attaching irq (%d)\n", ret); + goto clean_unregister_netdev; + } + + ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt, + 0, dev_name(dev), cpsw); + if (ret < 0) { + dev_err(dev, "error attaching irq (%d)\n", ret); + goto clean_unregister_netdev; + } + + ret = cpsw_register_notifiers(cpsw); + if (ret) + goto clean_unregister_netdev; + + ret = cpsw_register_devlink(cpsw); + if (ret) + goto clean_unregister_notifiers; + + ret = cpsw_register_ports(cpsw); + if (ret) + goto clean_unregister_notifiers; + + dev_notice(dev, "initialized (regs %pa, pool size %d) hw_ver:%08X %d.%d (%d)\n", + &ss_res->start, descs_pool_size, + cpsw->version, CPSW_MAJOR_VERSION(cpsw->version), + CPSW_MINOR_VERSION(cpsw->version), + CPSW_RTL_VERSION(cpsw->version)); + + pm_runtime_put(dev); + + return 0; + +clean_unregister_notifiers: + cpsw_unregister_notifiers(cpsw); +clean_unregister_netdev: + cpsw_unregister_ports(cpsw); +clean_cpts: + cpts_release(cpsw->cpts); + cpdma_ctlr_destroy(cpsw->dma); +clean_dt_ret: + cpsw_remove_dt(cpsw); + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + return ret; +} + +static int cpsw_remove(struct platform_device *pdev) +{ + struct cpsw_common *cpsw = platform_get_drvdata(pdev); + int ret; + + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); + return ret; + } + + cpsw_unregister_notifiers(cpsw); + cpsw_unregister_devlink(cpsw); + cpsw_unregister_ports(cpsw); + + cpts_release(cpsw->cpts); + cpdma_ctlr_destroy(cpsw->dma); + cpsw_remove_dt(cpsw); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return 0; +} + +static struct platform_driver cpsw_driver = { + .driver = { + .name = "cpsw-switch", + .of_match_table = cpsw_of_mtable, + }, + .probe = cpsw_probe, + .remove = cpsw_remove, +}; + +module_platform_driver(cpsw_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("TI CPSW switchdev Ethernet driver"); diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index 476d050a022c..b833cc1d188c 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -5,20 +5,415 @@ * Copyright (C) 2019 Texas Instruments */ +#include <linux/bpf.h> +#include <linux/bpf_trace.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> +#include <linux/kmemleak.h> #include <linux/module.h> #include <linux/netdevice.h> +#include <linux/net_tstamp.h> +#include <linux/of.h> #include <linux/phy.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/skbuff.h> +#include <net/page_pool.h> +#include <net/pkt_cls.h> +#include "cpsw.h" #include "cpts.h" #include "cpsw_ale.h" #include "cpsw_priv.h" #include "cpsw_sl.h" #include "davinci_cpdma.h" +int (*cpsw_slave_index)(struct cpsw_common *cpsw, struct cpsw_priv *priv); + +void cpsw_intr_enable(struct cpsw_common *cpsw) +{ + writel_relaxed(0xFF, &cpsw->wr_regs->tx_en); + writel_relaxed(0xFF, &cpsw->wr_regs->rx_en); + + cpdma_ctlr_int_ctrl(cpsw->dma, true); +} + +void cpsw_intr_disable(struct cpsw_common *cpsw) +{ + writel_relaxed(0, &cpsw->wr_regs->tx_en); + writel_relaxed(0, &cpsw->wr_regs->rx_en); + + cpdma_ctlr_int_ctrl(cpsw->dma, false); +} + +void cpsw_tx_handler(void *token, int len, int status) +{ + struct cpsw_meta_xdp *xmeta; + struct xdp_frame *xdpf; + struct net_device *ndev; + struct netdev_queue *txq; + struct sk_buff *skb; + int ch; + + if (cpsw_is_xdpf_handle(token)) { + xdpf = cpsw_handle_to_xdpf(token); + xmeta = (void *)xdpf + CPSW_XMETA_OFFSET; + ndev = xmeta->ndev; + ch = xmeta->ch; + xdp_return_frame(xdpf); + } else { + skb = token; + ndev = skb->dev; + ch = skb_get_queue_mapping(skb); + cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb); + dev_kfree_skb_any(skb); + } + + /* Check whether the queue is stopped due to stalled tx dma, if the + * queue is stopped then start the queue as we have free desc for tx + */ + txq = netdev_get_tx_queue(ndev, ch); + if (unlikely(netif_tx_queue_stopped(txq))) + netif_tx_wake_queue(txq); + + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += len; +} + +irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id) +{ + struct cpsw_common *cpsw = dev_id; + + writel(0, &cpsw->wr_regs->tx_en); + cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX); + + if (cpsw->quirk_irq) { + disable_irq_nosync(cpsw->irqs_table[1]); + cpsw->tx_irq_disabled = true; + } + + napi_schedule(&cpsw->napi_tx); + return IRQ_HANDLED; +} + +irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) +{ + struct cpsw_common *cpsw = dev_id; + + cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX); + writel(0, &cpsw->wr_regs->rx_en); + + if (cpsw->quirk_irq) { + disable_irq_nosync(cpsw->irqs_table[0]); + cpsw->rx_irq_disabled = true; + } + + napi_schedule(&cpsw->napi_rx); + return IRQ_HANDLED; +} + +int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget) +{ + struct cpsw_common *cpsw = napi_to_cpsw(napi_tx); + int num_tx, cur_budget, ch; + u32 ch_map; + struct cpsw_vector *txv; + + /* process every unprocessed channel */ + ch_map = cpdma_ctrl_txchs_state(cpsw->dma); + for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) { + if (!(ch_map & 0x80)) + continue; + + txv = &cpsw->txv[ch]; + if (unlikely(txv->budget > budget - num_tx)) + cur_budget = budget - num_tx; + else + cur_budget = txv->budget; + + num_tx += cpdma_chan_process(txv->ch, cur_budget); + if (num_tx >= budget) + break; + } + + if (num_tx < budget) { + napi_complete(napi_tx); + writel(0xff, &cpsw->wr_regs->tx_en); + } + + return num_tx; +} + +int cpsw_tx_poll(struct napi_struct *napi_tx, int budget) +{ + struct cpsw_common *cpsw = napi_to_cpsw(napi_tx); + int num_tx; + + num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget); + if (num_tx < budget) { + napi_complete(napi_tx); + writel(0xff, &cpsw->wr_regs->tx_en); + if (cpsw->tx_irq_disabled) { + cpsw->tx_irq_disabled = false; + enable_irq(cpsw->irqs_table[1]); + } + } + + return num_tx; +} + +int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget) +{ + struct cpsw_common *cpsw = napi_to_cpsw(napi_rx); + int num_rx, cur_budget, ch; + u32 ch_map; + struct cpsw_vector *rxv; + + /* process every unprocessed channel */ + ch_map = cpdma_ctrl_rxchs_state(cpsw->dma); + for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) { + if (!(ch_map & 0x01)) + continue; + + rxv = &cpsw->rxv[ch]; + if (unlikely(rxv->budget > budget - num_rx)) + cur_budget = budget - num_rx; + else + cur_budget = rxv->budget; + + num_rx += cpdma_chan_process(rxv->ch, cur_budget); + if (num_rx >= budget) + break; + } + + if (num_rx < budget) { + napi_complete_done(napi_rx, num_rx); + writel(0xff, &cpsw->wr_regs->rx_en); + } + + return num_rx; +} + +int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) +{ + struct cpsw_common *cpsw = napi_to_cpsw(napi_rx); + int num_rx; + + num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget); + if (num_rx < budget) { + napi_complete_done(napi_rx, num_rx); + writel(0xff, &cpsw->wr_regs->rx_en); + if (cpsw->rx_irq_disabled) { + cpsw->rx_irq_disabled = false; + enable_irq(cpsw->irqs_table[0]); + } + } + + return num_rx; +} + +void cpsw_rx_vlan_encap(struct sk_buff *skb) +{ + struct cpsw_priv *priv = netdev_priv(skb->dev); + u32 rx_vlan_encap_hdr = *((u32 *)skb->data); + struct cpsw_common *cpsw = priv->cpsw; + u16 vtag, vid, prio, pkt_type; + + /* Remove VLAN header encapsulation word */ + skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE); + + pkt_type = (rx_vlan_encap_hdr >> + CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) & + CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK; + /* Ignore unknown & Priority-tagged packets*/ + if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV || + pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG) + return; + + vid = (rx_vlan_encap_hdr >> + CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) & + VLAN_VID_MASK; + /* Ignore vid 0 and pass packet as is */ + if (!vid) + return; + + /* Untag P0 packets if set for vlan */ + if (!cpsw_ale_get_vlan_p0_untag(cpsw->ale, vid)) { + prio = (rx_vlan_encap_hdr >> + CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) & + CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK; + + vtag = (prio << VLAN_PRIO_SHIFT) | vid; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); + } + + /* strip vlan tag for VLAN-tagged packet */ + if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) { + memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); + skb_pull(skb, VLAN_HLEN); + } +} + +void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + slave_write(slave, mac_hi(priv->mac_addr), SA_HI); + slave_write(slave, mac_lo(priv->mac_addr), SA_LO); +} + +void soft_reset(const char *module, void __iomem *reg) +{ + unsigned long timeout = jiffies + HZ; + + writel_relaxed(1, reg); + do { + cpu_relax(); + } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies)); + + WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module); +} + +void cpsw_ndo_tx_timeout(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int ch; + + cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n"); + ndev->stats.tx_errors++; + cpsw_intr_disable(cpsw); + for (ch = 0; ch < cpsw->tx_ch_num; ch++) { + cpdma_chan_stop(cpsw->txv[ch].ch); + cpdma_chan_start(cpsw->txv[ch].ch); + } + + cpsw_intr_enable(cpsw); + netif_trans_update(ndev); + netif_tx_wake_all_queues(ndev); +} + +static int cpsw_get_common_speed(struct cpsw_common *cpsw) +{ + int i, speed; + + for (i = 0, speed = 0; i < cpsw->data.slaves; i++) + if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link) + speed += cpsw->slaves[i].phy->speed; + + return speed; +} + +int cpsw_need_resplit(struct cpsw_common *cpsw) +{ + int i, rlim_ch_num; + int speed, ch_rate; + + /* re-split resources only in case speed was changed */ + speed = cpsw_get_common_speed(cpsw); + if (speed == cpsw->speed || !speed) + return 0; + + cpsw->speed = speed; + + for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) { + ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch); + if (!ch_rate) + break; + + rlim_ch_num++; + } + + /* cases not dependent on speed */ + if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num) + return 0; + + return 1; +} + +void cpsw_split_res(struct cpsw_common *cpsw) +{ + u32 consumed_rate = 0, bigest_rate = 0; + struct cpsw_vector *txv = cpsw->txv; + int i, ch_weight, rlim_ch_num = 0; + int budget, bigest_rate_ch = 0; + u32 ch_rate, max_rate; + int ch_budget = 0; + + for (i = 0; i < cpsw->tx_ch_num; i++) { + ch_rate = cpdma_chan_get_rate(txv[i].ch); + if (!ch_rate) + continue; + + rlim_ch_num++; + consumed_rate += ch_rate; + } + + if (cpsw->tx_ch_num == rlim_ch_num) { + max_rate = consumed_rate; + } else if (!rlim_ch_num) { + ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num; + bigest_rate = 0; + max_rate = consumed_rate; + } else { + max_rate = cpsw->speed * 1000; + + /* if max_rate is less then expected due to reduced link speed, + * split proportionally according next potential max speed + */ + if (max_rate < consumed_rate) + max_rate *= 10; + + if (max_rate < consumed_rate) + max_rate *= 10; + + ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate; + ch_budget = (CPSW_POLL_WEIGHT - ch_budget) / + (cpsw->tx_ch_num - rlim_ch_num); + bigest_rate = (max_rate - consumed_rate) / + (cpsw->tx_ch_num - rlim_ch_num); + } + + /* split tx weight/budget */ + budget = CPSW_POLL_WEIGHT; + for (i = 0; i < cpsw->tx_ch_num; i++) { + ch_rate = cpdma_chan_get_rate(txv[i].ch); + if (ch_rate) { + txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate; + if (!txv[i].budget) + txv[i].budget++; + if (ch_rate > bigest_rate) { + bigest_rate_ch = i; + bigest_rate = ch_rate; + } + + ch_weight = (ch_rate * 100) / max_rate; + if (!ch_weight) + ch_weight++; + cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight); + } else { + txv[i].budget = ch_budget; + if (!bigest_rate_ch) + bigest_rate_ch = i; + cpdma_chan_set_weight(cpsw->txv[i].ch, 0); + } + + budget -= txv[i].budget; + } + + if (budget) + txv[bigest_rate_ch].budget += budget; + + /* split rx budget */ + budget = CPSW_POLL_WEIGHT; + ch_budget = budget / cpsw->rx_ch_num; + for (i = 0; i < cpsw->rx_ch_num; i++) { + cpsw->rxv[i].budget = ch_budget; + budget -= ch_budget; + } + + if (budget) + cpsw->rxv[0].budget += budget; +} + int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs, int ale_ageout, phys_addr_t desc_mem_phys, int descs_pool_size) @@ -28,6 +423,7 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs, struct cpsw_platform_data *data; struct cpdma_params dma_params; struct device *dev = cpsw->dev; + struct device_node *cpts_node; void __iomem *cpts_regs; int ret = 0, i; @@ -122,11 +518,859 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs, return -ENOMEM; } - cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node); + cpts_node = of_get_child_by_name(cpsw->dev->of_node, "cpts"); + if (!cpts_node) + cpts_node = cpsw->dev->of_node; + + cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node); if (IS_ERR(cpsw->cpts)) { ret = PTR_ERR(cpsw->cpts); cpdma_ctlr_destroy(cpsw->dma); } + of_node_put(cpts_node); + + return ret; +} + +#if IS_ENABLED(CONFIG_TI_CPTS) + +static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + u32 ts_en, seq_id; + + if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) { + slave_write(slave, 0, CPSW1_TS_CTL); + return; + } + + seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588; + ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS; + + if (priv->tx_ts_enabled) + ts_en |= CPSW_V1_TS_TX_EN; + + if (priv->rx_ts_enabled) + ts_en |= CPSW_V1_TS_RX_EN; + + slave_write(slave, ts_en, CPSW1_TS_CTL); + slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE); +} + +static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + u32 ctrl, mtype; + + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + + ctrl = slave_read(slave, CPSW2_CONTROL); + switch (cpsw->version) { + case CPSW_VERSION_2: + ctrl &= ~CTRL_V2_ALL_TS_MASK; + + if (priv->tx_ts_enabled) + ctrl |= CTRL_V2_TX_TS_BITS; + + if (priv->rx_ts_enabled) + ctrl |= CTRL_V2_RX_TS_BITS; + break; + case CPSW_VERSION_3: + default: + ctrl &= ~CTRL_V3_ALL_TS_MASK; + + if (priv->tx_ts_enabled) + ctrl |= CTRL_V3_TX_TS_BITS; + + if (priv->rx_ts_enabled) + ctrl |= CTRL_V3_RX_TS_BITS; + break; + } + + mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS; + + slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE); + slave_write(slave, ctrl, CPSW2_CONTROL); + writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype); + writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype); +} + +static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +{ + struct cpsw_priv *priv = netdev_priv(dev); + struct cpsw_common *cpsw = priv->cpsw; + struct hwtstamp_config cfg; + + if (cpsw->version != CPSW_VERSION_1 && + cpsw->version != CPSW_VERSION_2 && + cpsw->version != CPSW_VERSION_3) + return -EOPNOTSUPP; + + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + /* reserved for future extensions */ + if (cfg.flags) + return -EINVAL; + + if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) + return -ERANGE; + + switch (cfg.rx_filter) { + case HWTSTAMP_FILTER_NONE: + priv->rx_ts_enabled = 0; + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_NTP_ALL: + return -ERANGE; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + default: + return -ERANGE; + } + + priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON; + + switch (cpsw->version) { + case CPSW_VERSION_1: + cpsw_hwtstamp_v1(priv); + break; + case CPSW_VERSION_2: + case CPSW_VERSION_3: + cpsw_hwtstamp_v2(priv); + break; + default: + WARN_ON(1); + } + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} + +static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(dev); + struct cpsw_priv *priv = netdev_priv(dev); + struct hwtstamp_config cfg; + + if (cpsw->version != CPSW_VERSION_1 && + cpsw->version != CPSW_VERSION_2 && + cpsw->version != CPSW_VERSION_3) + return -EOPNOTSUPP; + + cfg.flags = 0; + cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + cfg.rx_filter = priv->rx_ts_enabled; + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} +#else +static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +{ + return -EOPNOTSUPP; +} + +static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +{ + return -EOPNOTSUPP; +} +#endif /*CONFIG_TI_CPTS*/ + +int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) +{ + struct cpsw_priv *priv = netdev_priv(dev); + struct cpsw_common *cpsw = priv->cpsw; + int slave_no = cpsw_slave_index(cpsw, priv); + + if (!netif_running(dev)) + return -EINVAL; + + switch (cmd) { + case SIOCSHWTSTAMP: + return cpsw_hwtstamp_set(dev, req); + case SIOCGHWTSTAMP: + return cpsw_hwtstamp_get(dev, req); + } + + if (!cpsw->slaves[slave_no].phy) + return -EOPNOTSUPP; + return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd); +} + +int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + u32 min_rate; + u32 ch_rate; + int i, ret; + + ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate; + if (ch_rate == rate) + return 0; + + ch_rate = rate * 1000; + min_rate = cpdma_chan_get_min_rate(cpsw->dma); + if ((ch_rate < min_rate && ch_rate)) { + dev_err(priv->dev, "The channel rate cannot be less than %dMbps", + min_rate); + return -EINVAL; + } + + if (rate > cpsw->speed) { + dev_err(priv->dev, "The channel rate cannot be more than 2Gbps"); + return -EINVAL; + } + + ret = pm_runtime_get_sync(cpsw->dev); + if (ret < 0) { + pm_runtime_put_noidle(cpsw->dev); + return ret; + } + + ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate); + pm_runtime_put(cpsw->dev); + + if (ret) + return ret; + + /* update rates for slaves tx queues */ + for (i = 0; i < cpsw->data.slaves; i++) { + slave = &cpsw->slaves[i]; + if (!slave->ndev) + continue; + + netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate; + } + + cpsw_split_res(cpsw); + return ret; +} + +static int cpsw_tc_to_fifo(int tc, int num_tc) +{ + if (tc == num_tc - 1) + return 0; + + return CPSW_FIFO_SHAPERS_NUM - tc; +} + +bool cpsw_shp_is_off(struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + u32 shift, mask, val; + + val = readl_relaxed(&cpsw->regs->ptype); + + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num; + mask = 7 << shift; + val = val & mask; + + return !val; +} + +static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + u32 shift, mask, val; + + val = readl_relaxed(&cpsw->regs->ptype); + + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num; + mask = (1 << --fifo) << shift; + val = on ? val | mask : val & ~mask; + + writel_relaxed(val, &cpsw->regs->ptype); +} + +static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw) +{ + struct cpsw_common *cpsw = priv->cpsw; + u32 val = 0, send_pct, shift; + struct cpsw_slave *slave; + int pct = 0, i; + + if (bw > priv->shp_cfg_speed * 1000) + goto err; + + /* shaping has to stay enabled for highest fifos linearly + * and fifo bw no more then interface can allow + */ + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + send_pct = slave_read(slave, SEND_PERCENT); + for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) { + if (!bw) { + if (i >= fifo || !priv->fifo_bw[i]) + continue; + + dev_warn(priv->dev, "Prev FIFO%d is shaped", i); + continue; + } + + if (!priv->fifo_bw[i] && i > fifo) { + dev_err(priv->dev, "Upper FIFO%d is not shaped", i); + return -EINVAL; + } + + shift = (i - 1) * 8; + if (i == fifo) { + send_pct &= ~(CPSW_PCT_MASK << shift); + val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10); + if (!val) + val = 1; + + send_pct |= val << shift; + pct += val; + continue; + } + + if (priv->fifo_bw[i]) + pct += (send_pct >> shift) & CPSW_PCT_MASK; + } + + if (pct >= 100) + goto err; + + slave_write(slave, send_pct, SEND_PERCENT); + priv->fifo_bw[fifo] = bw; + + dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo, + DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100)); + + return 0; +err: + dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration"); + return -EINVAL; +} + +static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + u32 tx_in_ctl_rg, val; + int ret; + + ret = cpsw_set_fifo_bw(priv, fifo, bw); + if (ret) + return ret; + + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ? + CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL; + + if (!bw) + cpsw_fifo_shp_on(priv, fifo, bw); + + val = slave_read(slave, tx_in_ctl_rg); + if (cpsw_shp_is_off(priv)) { + /* disable FIFOs rate limited queues */ + val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT); + + /* set type of FIFO queues to normal priority mode */ + val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT); + + /* set type of FIFO queues to be rate limited */ + if (bw) + val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT; + else + priv->shp_cfg_speed = 0; + } + + /* toggle a FIFO rate limited queue */ + if (bw) + val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT); + else + val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT); + slave_write(slave, val, tx_in_ctl_rg); + + /* FIFO transmit shape enable */ + cpsw_fifo_shp_on(priv, fifo, bw); + return 0; +} + +/* Defaults: + * class A - prio 3 + * class B - prio 2 + * shaping for class A should be set first + */ +static int cpsw_set_cbs(struct net_device *ndev, + struct tc_cbs_qopt_offload *qopt) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + int prev_speed = 0; + int tc, ret, fifo; + u32 bw = 0; + + tc = netdev_txq_to_tc(priv->ndev, qopt->queue); + + /* enable channels in backward order, as highest FIFOs must be rate + * limited first and for compliance with CPDMA rate limited channels + * that also used in bacward order. FIFO0 cannot be rate limited. + */ + fifo = cpsw_tc_to_fifo(tc, ndev->num_tc); + if (!fifo) { + dev_err(priv->dev, "Last tc%d can't be rate limited", tc); + return -EINVAL; + } + + /* do nothing, it's disabled anyway */ + if (!qopt->enable && !priv->fifo_bw[fifo]) + return 0; + + /* shapers can be set if link speed is known */ + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + if (slave->phy && slave->phy->link) { + if (priv->shp_cfg_speed && + priv->shp_cfg_speed != slave->phy->speed) + prev_speed = priv->shp_cfg_speed; + + priv->shp_cfg_speed = slave->phy->speed; + } + + if (!priv->shp_cfg_speed) { + dev_err(priv->dev, "Link speed is not known"); + return -1; + } + + ret = pm_runtime_get_sync(cpsw->dev); + if (ret < 0) { + pm_runtime_put_noidle(cpsw->dev); + return ret; + } + + bw = qopt->enable ? qopt->idleslope : 0; + ret = cpsw_set_fifo_rlimit(priv, fifo, bw); + if (ret) { + priv->shp_cfg_speed = prev_speed; + prev_speed = 0; + } + + if (bw && prev_speed) + dev_warn(priv->dev, + "Speed was changed, CBS shaper speeds are changed!"); + + pm_runtime_put_sync(cpsw->dev); + return ret; +} + +static int cpsw_set_mqprio(struct net_device *ndev, void *type_data) +{ + struct tc_mqprio_qopt_offload *mqprio = type_data; + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int fifo, num_tc, count, offset; + struct cpsw_slave *slave; + u32 tx_prio_map = 0; + int i, tc, ret; + + num_tc = mqprio->qopt.num_tc; + if (num_tc > CPSW_TC_NUM) + return -EINVAL; + + if (mqprio->mode != TC_MQPRIO_MODE_DCB) + return -EINVAL; + ret = pm_runtime_get_sync(cpsw->dev); + if (ret < 0) { + pm_runtime_put_noidle(cpsw->dev); + return ret; + } + + if (num_tc) { + for (i = 0; i < 8; i++) { + tc = mqprio->qopt.prio_tc_map[i]; + fifo = cpsw_tc_to_fifo(tc, num_tc); + tx_prio_map |= fifo << (4 * i); + } + + netdev_set_num_tc(ndev, num_tc); + for (i = 0; i < num_tc; i++) { + count = mqprio->qopt.count[i]; + offset = mqprio->qopt.offset[i]; + netdev_set_tc_queue(ndev, i, count, offset); + } + } + + if (!mqprio->qopt.hw) { + /* restore default configuration */ + netdev_reset_tc(ndev); + tx_prio_map = TX_PRIORITY_MAPPING; + } + + priv->mqprio_hw = mqprio->qopt.hw; + + offset = cpsw->version == CPSW_VERSION_1 ? + CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP; + + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + slave_write(slave, tx_prio_map, offset); + + pm_runtime_put_sync(cpsw->dev); + + return 0; +} + +int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_QDISC_CBS: + return cpsw_set_cbs(ndev, type_data); + + case TC_SETUP_QDISC_MQPRIO: + return cpsw_set_mqprio(ndev, type_data); + + default: + return -EOPNOTSUPP; + } +} + +void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + int fifo, bw; + + for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) { + bw = priv->fifo_bw[fifo]; + if (!bw) + continue; + + cpsw_set_fifo_rlimit(priv, fifo, bw); + } +} + +void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + u32 tx_prio_map = 0; + int i, tc, fifo; + u32 tx_prio_rg; + + if (!priv->mqprio_hw) + return; + + for (i = 0; i < 8; i++) { + tc = netdev_get_prio_tc_map(priv->ndev, i); + fifo = CPSW_FIFO_SHAPERS_NUM - tc; + tx_prio_map |= fifo << (4 * i); + } + + tx_prio_rg = cpsw->version == CPSW_VERSION_1 ? + CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP; + + slave_write(slave, tx_prio_map, tx_prio_rg); +} + +int cpsw_fill_rx_channels(struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_meta_xdp *xmeta; + struct page_pool *pool; + struct page *page; + int ch_buf_num; + int ch, i, ret; + dma_addr_t dma; + + for (ch = 0; ch < cpsw->rx_ch_num; ch++) { + pool = cpsw->page_pool[ch]; + ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch); + for (i = 0; i < ch_buf_num; i++) { + page = page_pool_dev_alloc_pages(pool); + if (!page) { + cpsw_err(priv, ifup, "allocate rx page err\n"); + return -ENOMEM; + } + + xmeta = page_address(page) + CPSW_XMETA_OFFSET; + xmeta->ndev = priv->ndev; + xmeta->ch = ch; + + dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM; + ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch, + page, dma, + cpsw->rx_packet_max, + 0); + if (ret < 0) { + cpsw_err(priv, ifup, + "cannot submit page to channel %d rx, error %d\n", + ch, ret); + page_pool_recycle_direct(pool, page); + return ret; + } + } + + cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n", + ch, ch_buf_num); + } + + return 0; +} + +static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw, + int size) +{ + struct page_pool_params pp_params; + struct page_pool *pool; + + pp_params.order = 0; + pp_params.flags = PP_FLAG_DMA_MAP; + pp_params.pool_size = size; + pp_params.nid = NUMA_NO_NODE; + pp_params.dma_dir = DMA_BIDIRECTIONAL; + pp_params.dev = cpsw->dev; + + pool = page_pool_create(&pp_params); + if (IS_ERR(pool)) + dev_err(cpsw->dev, "cannot create rx page pool\n"); + + return pool; +} + +static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch) +{ + struct page_pool *pool; + int ret = 0, pool_size; + + pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch); + pool = cpsw_create_page_pool(cpsw, pool_size); + if (IS_ERR(pool)) + ret = PTR_ERR(pool); + else + cpsw->page_pool[ch] = pool; + + return ret; +} + +static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct xdp_rxq_info *rxq; + struct page_pool *pool; + int ret; + + pool = cpsw->page_pool[ch]; + rxq = &priv->xdp_rxq[ch]; + + ret = xdp_rxq_info_reg(rxq, priv->ndev, ch); + if (ret) + return ret; + + ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool); + if (ret) + xdp_rxq_info_unreg(rxq); + + return ret; +} + +static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch) +{ + struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch]; + + if (!xdp_rxq_info_is_reg(rxq)) + return; + + xdp_rxq_info_unreg(rxq); +} + +void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw) +{ + struct net_device *ndev; + int i, ch; + + for (ch = 0; ch < cpsw->rx_ch_num; ch++) { + for (i = 0; i < cpsw->data.slaves; i++) { + ndev = cpsw->slaves[i].ndev; + if (!ndev) + continue; + + cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch); + } + + page_pool_destroy(cpsw->page_pool[ch]); + cpsw->page_pool[ch] = NULL; + } +} + +int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw) +{ + struct net_device *ndev; + int i, ch, ret; + + for (ch = 0; ch < cpsw->rx_ch_num; ch++) { + ret = cpsw_create_rx_pool(cpsw, ch); + if (ret) + goto err_cleanup; + + /* using same page pool is allowed as no running rx handlers + * simultaneously for both ndevs + */ + for (i = 0; i < cpsw->data.slaves; i++) { + ndev = cpsw->slaves[i].ndev; + if (!ndev) + continue; + + ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch); + if (ret) + goto err_cleanup; + } + } + + return 0; + +err_cleanup: + cpsw_destroy_xdp_rxqs(cpsw); + + return ret; +} + +static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf) +{ + struct bpf_prog *prog = bpf->prog; + + if (!priv->xdpi.prog && !prog) + return 0; + + if (!xdp_attachment_flags_ok(&priv->xdpi, bpf)) + return -EBUSY; + + WRITE_ONCE(priv->xdp_prog, prog); + + xdp_attachment_setup(&priv->xdpi, bpf); + + return 0; +} + +int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + switch (bpf->command) { + case XDP_SETUP_PROG: + return cpsw_xdp_prog_setup(priv, bpf); + + case XDP_QUERY_PROG: + return xdp_attachment_query(&priv->xdpi, bpf); + + default: + return -EINVAL; + } +} + +int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf, + struct page *page, int port) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_meta_xdp *xmeta; + struct cpdma_chan *txch; + dma_addr_t dma; + int ret; + + xmeta = (void *)xdpf + CPSW_XMETA_OFFSET; + xmeta->ndev = priv->ndev; + xmeta->ch = 0; + txch = cpsw->txv[0].ch; + + if (page) { + dma = page_pool_get_dma_addr(page); + dma += xdpf->headroom + sizeof(struct xdp_frame); + ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf), + dma, xdpf->len, port); + } else { + if (sizeof(*xmeta) > xdpf->headroom) { + xdp_return_frame_rx_napi(xdpf); + return -EINVAL; + } + + ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf), + xdpf->data, xdpf->len, port); + } + + if (ret) { + priv->ndev->stats.tx_dropped++; + xdp_return_frame_rx_napi(xdpf); + } + + return ret; +} + +int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp, + struct page *page, int port) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct net_device *ndev = priv->ndev; + int ret = CPSW_XDP_CONSUMED; + struct xdp_frame *xdpf; + struct bpf_prog *prog; + u32 act; + + rcu_read_lock(); + + prog = READ_ONCE(priv->xdp_prog); + if (!prog) { + ret = CPSW_XDP_PASS; + goto out; + } + + act = bpf_prog_run_xdp(prog, xdp); + switch (act) { + case XDP_PASS: + ret = CPSW_XDP_PASS; + break; + case XDP_TX: + xdpf = convert_to_xdp_frame(xdp); + if (unlikely(!xdpf)) + goto drop; + + cpsw_xdp_tx_frame(priv, xdpf, page, port); + break; + case XDP_REDIRECT: + if (xdp_do_redirect(ndev, xdp, prog)) + goto drop; + + /* Have to flush here, per packet, instead of doing it in bulk + * at the end of the napi handler. The RX devices on this + * particular hardware is sharing a common queue, so the + * incoming device might change per packet. + */ + xdp_do_flush_map(); + break; + default: + bpf_warn_invalid_xdp_action(act); + /* fall through */ + case XDP_ABORTED: + trace_xdp_exception(ndev, prog, act); + /* fall through -- handle aborts by dropping packet */ + case XDP_DROP: + goto drop; + } +out: + rcu_read_unlock(); + return ret; +drop: + rcu_read_unlock(); + page_pool_recycle_direct(cpsw->page_pool[ch], page); return ret; } diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h index 362c5a986869..bc726356a72c 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.h +++ b/drivers/net/ethernet/ti/cpsw_priv.h @@ -54,6 +54,7 @@ do { \ #define HOST_PORT_NUM 0 #define CPSW_ALE_PORTS_NUM 3 +#define CPSW_SLAVE_PORTS_NUM 2 #define SLIVER_SIZE 0x40 #define CPSW1_HOST_PORT_OFFSET 0x028 @@ -65,6 +66,7 @@ do { \ #define CPSW1_CPTS_OFFSET 0x500 #define CPSW1_ALE_OFFSET 0x600 #define CPSW1_SLIVER_OFFSET 0x700 +#define CPSW1_WR_OFFSET 0x900 #define CPSW2_HOST_PORT_OFFSET 0x108 #define CPSW2_SLAVE_OFFSET 0x200 @@ -76,6 +78,7 @@ do { \ #define CPSW2_ALE_OFFSET 0xd00 #define CPSW2_SLIVER_OFFSET 0xd80 #define CPSW2_BD_OFFSET 0x2000 +#define CPSW2_WR_OFFSET 0x1200 #define CPDMA_RXTHRESH 0x0c0 #define CPDMA_RXFREE 0x0e0 @@ -113,12 +116,15 @@ do { \ #define IRQ_NUM 2 #define CPSW_MAX_QUEUES 8 #define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256 +#define CPSW_ALE_AGEOUT_DEFAULT 10 /* sec */ +#define CPSW_ALE_NUM_ENTRIES 1024 #define CPSW_FIFO_QUEUE_TYPE_SHIFT 16 #define CPSW_FIFO_SHAPE_EN_SHIFT 16 #define CPSW_FIFO_RATE_EN_SHIFT 20 #define CPSW_TC_NUM 4 #define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1) #define CPSW_PCT_MASK 0x7f +#define CPSW_BD_RAM_SIZE 0x2000 #define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29 #define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0) @@ -275,10 +281,11 @@ struct cpsw_slave_data { struct device_node *slave_node; struct device_node *phy_node; char phy_id[MII_BUS_ID_SIZE]; - int phy_if; + phy_interface_t phy_if; u8 mac_addr[ETH_ALEN]; u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */ struct phy *ifphy; + bool disabled; }; struct cpsw_platform_data { @@ -286,9 +293,9 @@ struct cpsw_platform_data { u32 ss_reg_ofs; /* Subsystem control register offset */ u32 channels; /* number of cpdma channels (symmetric) */ u32 slaves; /* number of slave cpgmac ports */ - u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */ + u32 active_slave;/* time stamping, ethtool and SIOCGMIIPHY slave */ u32 ale_entries; /* ale table size */ - u32 bd_ram_size; /*buffer descriptor ram size */ + u32 bd_ram_size; /*buffer descriptor ram size */ u32 mac_control; /* Mac control register */ u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/ bool dual_emac; /* Enable Dual EMAC mode */ @@ -344,10 +351,15 @@ struct cpsw_common { bool tx_irq_disabled; u32 irqs_table[IRQ_NUM]; struct cpts *cpts; + struct devlink *devlink; int rx_ch_num, tx_ch_num; int speed; int usage_count; struct page_pool *page_pool[CPSW_MAX_QUEUES]; + u8 br_members; + struct net_device *hw_bridge_dev; + bool ale_bypass; + u8 base_mac[ETH_ALEN]; }; struct cpsw_priv { @@ -368,19 +380,14 @@ struct cpsw_priv { u32 emac_port; struct cpsw_common *cpsw; + int offload_fwd_mark; }; #define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw) #define napi_to_cpsw(napi) container_of(napi, struct cpsw_common, napi) -#define cpsw_slave_index(cpsw, priv) \ - ((cpsw->data.dual_emac) ? priv->emac_port : \ - cpsw->data.active_slave) - -static inline int cpsw_get_slave_port(u32 slave_num) -{ - return slave_num + 1; -} +extern int (*cpsw_slave_index)(struct cpsw_common *cpsw, + struct cpsw_priv *priv); struct addr_sync_ctx { struct net_device *ndev; @@ -389,6 +396,35 @@ struct addr_sync_ctx { int flush; /* flush flag */ }; +#define CPSW_XMETA_OFFSET ALIGN(sizeof(struct xdp_frame), sizeof(long)) + +#define CPSW_XDP_CONSUMED 1 +#define CPSW_XDP_PASS 0 + +struct __aligned(sizeof(long)) cpsw_meta_xdp { + struct net_device *ndev; + int ch; +}; + +/* The buf includes headroom compatible with both skb and xdpf */ +#define CPSW_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN) +#define CPSW_HEADROOM ALIGN(CPSW_HEADROOM_NA, sizeof(long)) + +static inline int cpsw_is_xdpf_handle(void *handle) +{ + return (unsigned long)handle & BIT(0); +} + +static inline void *cpsw_xdpf_to_handle(struct xdp_frame *xdpf) +{ + return (void *)((unsigned long)xdpf | BIT(0)); +} + +static inline struct xdp_frame *cpsw_handle_to_xdpf(void *handle) +{ + return (struct xdp_frame *)((unsigned long)handle & ~BIT(0)); +} + int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs, int ale_ageout, phys_addr_t desc_mem_phys, int descs_pool_size); @@ -399,6 +435,29 @@ void cpsw_intr_disable(struct cpsw_common *cpsw); void cpsw_tx_handler(void *token, int len, int status); int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw); void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw); +int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf); +int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf, + struct page *page, int port); +int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp, + struct page *page, int port); +irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id); +irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id); +int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget); +int cpsw_tx_poll(struct napi_struct *napi_tx, int budget); +int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget); +int cpsw_rx_poll(struct napi_struct *napi_rx, int budget); +void cpsw_rx_vlan_encap(struct sk_buff *skb); +void soft_reset(const char *module, void __iomem *reg); +void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv); +void cpsw_ndo_tx_timeout(struct net_device *ndev); +int cpsw_need_resplit(struct cpsw_common *cpsw); +int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd); +int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate); +int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void *type_data); +bool cpsw_shp_is_off(struct cpsw_priv *priv); +void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv); +void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv); /* ethtool */ u32 cpsw_get_msglevel(struct net_device *ndev); diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c new file mode 100644 index 000000000000..985a929bb957 --- /dev/null +++ b/drivers/net/ethernet/ti/cpsw_switchdev.c @@ -0,0 +1,589 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Texas Instruments switchdev Driver + * + * Copyright (C) 2019 Texas Instruments + * + */ + +#include <linux/etherdevice.h> +#include <linux/if_bridge.h> +#include <linux/netdevice.h> +#include <linux/workqueue.h> +#include <net/switchdev.h> + +#include "cpsw.h" +#include "cpsw_ale.h" +#include "cpsw_priv.h" +#include "cpsw_switchdev.h" + +struct cpsw_switchdev_event_work { + struct work_struct work; + struct switchdev_notifier_fdb_info fdb_info; + struct cpsw_priv *priv; + unsigned long event; +}; + +static int cpsw_port_stp_state_set(struct cpsw_priv *priv, + struct switchdev_trans *trans, u8 state) +{ + struct cpsw_common *cpsw = priv->cpsw; + u8 cpsw_state; + int ret = 0; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + switch (state) { + case BR_STATE_FORWARDING: + cpsw_state = ALE_PORT_STATE_FORWARD; + break; + case BR_STATE_LEARNING: + cpsw_state = ALE_PORT_STATE_LEARN; + break; + case BR_STATE_DISABLED: + cpsw_state = ALE_PORT_STATE_DISABLE; + break; + case BR_STATE_LISTENING: + case BR_STATE_BLOCKING: + cpsw_state = ALE_PORT_STATE_BLOCK; + break; + default: + return -EOPNOTSUPP; + } + + ret = cpsw_ale_control_set(cpsw->ale, priv->emac_port, + ALE_PORT_STATE, cpsw_state); + dev_dbg(priv->dev, "ale state: %u\n", cpsw_state); + + return ret; +} + +static int cpsw_port_attr_br_flags_set(struct cpsw_priv *priv, + struct switchdev_trans *trans, + struct net_device *orig_dev, + unsigned long brport_flags) +{ + struct cpsw_common *cpsw = priv->cpsw; + bool unreg_mcast_add = false; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + if (brport_flags & BR_MCAST_FLOOD) + unreg_mcast_add = true; + dev_dbg(priv->dev, "BR_MCAST_FLOOD: %d port %u\n", + unreg_mcast_add, priv->emac_port); + + cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(priv->emac_port), + unreg_mcast_add); + + return 0; +} + +static int cpsw_port_attr_br_flags_pre_set(struct net_device *netdev, + struct switchdev_trans *trans, + unsigned long flags) +{ + if (flags & ~(BR_LEARNING | BR_MCAST_FLOOD)) + return -EINVAL; + + return 0; +} + +static int cpsw_port_attr_set(struct net_device *ndev, + const struct switchdev_attr *attr, + struct switchdev_trans *trans) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int ret; + + dev_dbg(priv->dev, "attr: id %u port: %u\n", attr->id, priv->emac_port); + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + ret = cpsw_port_attr_br_flags_pre_set(ndev, trans, + attr->u.brport_flags); + break; + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + ret = cpsw_port_stp_state_set(priv, trans, attr->u.stp_state); + dev_dbg(priv->dev, "stp state: %u\n", attr->u.stp_state); + break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + ret = cpsw_port_attr_br_flags_set(priv, trans, attr->orig_dev, + attr->u.brport_flags); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static u16 cpsw_get_pvid(struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + u32 __iomem *port_vlan_reg; + u32 pvid; + + if (priv->emac_port) { + int reg = CPSW2_PORT_VLAN; + + if (cpsw->version == CPSW_VERSION_1) + reg = CPSW1_PORT_VLAN; + pvid = slave_read(cpsw->slaves + (priv->emac_port - 1), reg); + } else { + port_vlan_reg = &cpsw->host_port_regs->port_vlan; + pvid = readl(port_vlan_reg); + } + + pvid = pvid & 0xfff; + + return pvid; +} + +static void cpsw_set_pvid(struct cpsw_priv *priv, u16 vid, bool cfi, u32 cos) +{ + struct cpsw_common *cpsw = priv->cpsw; + void __iomem *port_vlan_reg; + u32 pvid; + + pvid = vid; + pvid |= cfi ? BIT(12) : 0; + pvid |= (cos & 0x7) << 13; + + if (priv->emac_port) { + int reg = CPSW2_PORT_VLAN; + + if (cpsw->version == CPSW_VERSION_1) + reg = CPSW1_PORT_VLAN; + /* no barrier */ + slave_write(cpsw->slaves + (priv->emac_port - 1), pvid, reg); + } else { + /* CPU port */ + port_vlan_reg = &cpsw->host_port_regs->port_vlan; + writel(pvid, port_vlan_reg); + } +} + +static int cpsw_port_vlan_add(struct cpsw_priv *priv, bool untag, bool pvid, + u16 vid, struct net_device *orig_dev) +{ + bool cpu_port = netif_is_bridge_master(orig_dev); + struct cpsw_common *cpsw = priv->cpsw; + int unreg_mcast_mask = 0; + int reg_mcast_mask = 0; + int untag_mask = 0; + int port_mask; + int ret = 0; + u32 flags; + + if (cpu_port) { + port_mask = BIT(HOST_PORT_NUM); + flags = orig_dev->flags; + unreg_mcast_mask = port_mask; + } else { + port_mask = BIT(priv->emac_port); + flags = priv->ndev->flags; + } + + if (flags & IFF_MULTICAST) + reg_mcast_mask = port_mask; + + if (untag) + untag_mask = port_mask; + + ret = cpsw_ale_vlan_add_modify(cpsw->ale, vid, port_mask, untag_mask, + reg_mcast_mask, unreg_mcast_mask); + if (ret) { + dev_err(priv->dev, "Unable to add vlan\n"); + return ret; + } + + if (cpu_port) + cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, + HOST_PORT_NUM, ALE_VLAN, vid); + if (!pvid) + return ret; + + cpsw_set_pvid(priv, vid, 0, 0); + + dev_dbg(priv->dev, "VID add: %s: vid:%u ports:%X\n", + priv->ndev->name, vid, port_mask); + return ret; +} + +static int cpsw_port_vlan_del(struct cpsw_priv *priv, u16 vid, + struct net_device *orig_dev) +{ + bool cpu_port = netif_is_bridge_master(orig_dev); + struct cpsw_common *cpsw = priv->cpsw; + int port_mask; + int ret = 0; + + if (cpu_port) + port_mask = BIT(HOST_PORT_NUM); + else + port_mask = BIT(priv->emac_port); + + ret = cpsw_ale_del_vlan(cpsw->ale, vid, port_mask); + if (ret != 0) + return ret; + + /* We don't care for the return value here, error is returned only if + * the unicast entry is not present + */ + if (cpu_port) + cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, + HOST_PORT_NUM, ALE_VLAN, vid); + + if (vid == cpsw_get_pvid(priv)) + cpsw_set_pvid(priv, 0, 0, 0); + + /* We don't care for the return value here, error is returned only if + * the multicast entry is not present + */ + cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, + port_mask, ALE_VLAN, vid); + dev_dbg(priv->dev, "VID del: %s: vid:%u ports:%X\n", + priv->ndev->name, vid, port_mask); + + return ret; +} + +static int cpsw_port_vlans_add(struct cpsw_priv *priv, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + struct net_device *orig_dev = vlan->obj.orig_dev; + bool cpu_port = netif_is_bridge_master(orig_dev); + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + u16 vid; + + dev_dbg(priv->dev, "VID add: %s: vid:%u flags:%X\n", + priv->ndev->name, vlan->vid_begin, vlan->flags); + + if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY)) + return 0; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + int err; + + err = cpsw_port_vlan_add(priv, untag, pvid, vid, orig_dev); + if (err) + return err; + } + + return 0; +} + +static int cpsw_port_vlans_del(struct cpsw_priv *priv, + const struct switchdev_obj_port_vlan *vlan) + +{ + struct net_device *orig_dev = vlan->obj.orig_dev; + u16 vid; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + int err; + + err = cpsw_port_vlan_del(priv, vid, orig_dev); + if (err) + return err; + } + + return 0; +} + +static int cpsw_port_mdb_add(struct cpsw_priv *priv, + struct switchdev_obj_port_mdb *mdb, + struct switchdev_trans *trans) + +{ + struct net_device *orig_dev = mdb->obj.orig_dev; + bool cpu_port = netif_is_bridge_master(orig_dev); + struct cpsw_common *cpsw = priv->cpsw; + int port_mask; + int err; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + if (cpu_port) + port_mask = BIT(HOST_PORT_NUM); + else + port_mask = BIT(priv->emac_port); + + err = cpsw_ale_add_mcast(cpsw->ale, mdb->addr, port_mask, + ALE_VLAN, mdb->vid, 0); + dev_dbg(priv->dev, "MDB add: %s: vid %u:%pM ports: %X\n", + priv->ndev->name, mdb->vid, mdb->addr, port_mask); + + return err; +} + +static int cpsw_port_mdb_del(struct cpsw_priv *priv, + struct switchdev_obj_port_mdb *mdb) + +{ + struct net_device *orig_dev = mdb->obj.orig_dev; + bool cpu_port = netif_is_bridge_master(orig_dev); + struct cpsw_common *cpsw = priv->cpsw; + int del_mask; + int err; + + if (cpu_port) + del_mask = BIT(HOST_PORT_NUM); + else + del_mask = BIT(priv->emac_port); + + err = cpsw_ale_del_mcast(cpsw->ale, mdb->addr, del_mask, + ALE_VLAN, mdb->vid); + dev_dbg(priv->dev, "MDB del: %s: vid %u:%pM ports: %X\n", + priv->ndev->name, mdb->vid, mdb->addr, del_mask); + + return err; +} + +static int cpsw_port_obj_add(struct net_device *ndev, + const struct switchdev_obj *obj, + struct switchdev_trans *trans, + struct netlink_ext_ack *extack) +{ + struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); + struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj); + struct cpsw_priv *priv = netdev_priv(ndev); + int err = 0; + + dev_dbg(priv->dev, "obj_add: id %u port: %u\n", + obj->id, priv->emac_port); + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = cpsw_port_vlans_add(priv, vlan, trans); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + case SWITCHDEV_OBJ_ID_HOST_MDB: + err = cpsw_port_mdb_add(priv, mdb, trans); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int cpsw_port_obj_del(struct net_device *ndev, + const struct switchdev_obj *obj) +{ + struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); + struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj); + struct cpsw_priv *priv = netdev_priv(ndev); + int err = 0; + + dev_dbg(priv->dev, "obj_del: id %u port: %u\n", + obj->id, priv->emac_port); + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = cpsw_port_vlans_del(priv, vlan); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + case SWITCHDEV_OBJ_ID_HOST_MDB: + err = cpsw_port_mdb_del(priv, mdb); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static void cpsw_fdb_offload_notify(struct net_device *ndev, + struct switchdev_notifier_fdb_info *rcv) +{ + struct switchdev_notifier_fdb_info info; + + info.addr = rcv->addr; + info.vid = rcv->vid; + info.offloaded = true; + call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, + ndev, &info.info, NULL); +} + +static void cpsw_switchdev_event_work(struct work_struct *work) +{ + struct cpsw_switchdev_event_work *switchdev_work = + container_of(work, struct cpsw_switchdev_event_work, work); + struct cpsw_priv *priv = switchdev_work->priv; + struct switchdev_notifier_fdb_info *fdb; + struct cpsw_common *cpsw = priv->cpsw; + int port = priv->emac_port; + + rtnl_lock(); + switch (switchdev_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + fdb = &switchdev_work->fdb_info; + + dev_dbg(cpsw->dev, "cpsw_fdb_add: MACID = %pM vid = %u flags = %u %u -- port %d\n", + fdb->addr, fdb->vid, fdb->added_by_user, + fdb->offloaded, port); + + if (!fdb->added_by_user) + break; + if (memcmp(priv->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0) + port = HOST_PORT_NUM; + + cpsw_ale_add_ucast(cpsw->ale, (u8 *)fdb->addr, port, + fdb->vid ? ALE_VLAN : 0, fdb->vid); + cpsw_fdb_offload_notify(priv->ndev, fdb); + break; + case SWITCHDEV_FDB_DEL_TO_DEVICE: + fdb = &switchdev_work->fdb_info; + + dev_dbg(cpsw->dev, "cpsw_fdb_del: MACID = %pM vid = %u flags = %u %u -- port %d\n", + fdb->addr, fdb->vid, fdb->added_by_user, + fdb->offloaded, port); + + if (!fdb->added_by_user) + break; + if (memcmp(priv->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0) + port = HOST_PORT_NUM; + + cpsw_ale_del_ucast(cpsw->ale, (u8 *)fdb->addr, port, + fdb->vid ? ALE_VLAN : 0, fdb->vid); + break; + default: + break; + } + rtnl_unlock(); + + kfree(switchdev_work->fdb_info.addr); + kfree(switchdev_work); + dev_put(priv->ndev); +} + +/* called under rcu_read_lock() */ +static int cpsw_switchdev_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *ndev = switchdev_notifier_info_to_dev(ptr); + struct switchdev_notifier_fdb_info *fdb_info = ptr; + struct cpsw_switchdev_event_work *switchdev_work; + struct cpsw_priv *priv = netdev_priv(ndev); + int err; + + if (event == SWITCHDEV_PORT_ATTR_SET) { + err = switchdev_handle_port_attr_set(ndev, ptr, + cpsw_port_dev_check, + cpsw_port_attr_set); + return notifier_from_errno(err); + } + + if (!cpsw_port_dev_check(ndev)) + return NOTIFY_DONE; + + switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); + if (WARN_ON(!switchdev_work)) + return NOTIFY_BAD; + + INIT_WORK(&switchdev_work->work, cpsw_switchdev_event_work); + switchdev_work->priv = priv; + switchdev_work->event = event; + + switch (event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: + memcpy(&switchdev_work->fdb_info, ptr, + sizeof(switchdev_work->fdb_info)); + switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); + if (!switchdev_work->fdb_info.addr) + goto err_addr_alloc; + ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, + fdb_info->addr); + dev_hold(ndev); + break; + default: + kfree(switchdev_work); + return NOTIFY_DONE; + } + + queue_work(system_long_wq, &switchdev_work->work); + + return NOTIFY_DONE; + +err_addr_alloc: + kfree(switchdev_work); + return NOTIFY_BAD; +} + +static struct notifier_block cpsw_switchdev_notifier = { + .notifier_call = cpsw_switchdev_event, +}; + +static int cpsw_switchdev_blocking_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + case SWITCHDEV_PORT_OBJ_ADD: + err = switchdev_handle_port_obj_add(dev, ptr, + cpsw_port_dev_check, + cpsw_port_obj_add); + return notifier_from_errno(err); + case SWITCHDEV_PORT_OBJ_DEL: + err = switchdev_handle_port_obj_del(dev, ptr, + cpsw_port_dev_check, + cpsw_port_obj_del); + return notifier_from_errno(err); + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + cpsw_port_dev_check, + cpsw_port_attr_set); + return notifier_from_errno(err); + default: + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block cpsw_switchdev_bl_notifier = { + .notifier_call = cpsw_switchdev_blocking_event, +}; + +int cpsw_switchdev_register_notifiers(struct cpsw_common *cpsw) +{ + int ret = 0; + + ret = register_switchdev_notifier(&cpsw_switchdev_notifier); + if (ret) { + dev_err(cpsw->dev, "register switchdev notifier fail ret:%d\n", + ret); + return ret; + } + + ret = register_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier); + if (ret) { + dev_err(cpsw->dev, "register switchdev blocking notifier ret:%d\n", + ret); + unregister_switchdev_notifier(&cpsw_switchdev_notifier); + } + + return ret; +} + +void cpsw_switchdev_unregister_notifiers(struct cpsw_common *cpsw) +{ + unregister_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier); + unregister_switchdev_notifier(&cpsw_switchdev_notifier); +} diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.h b/drivers/net/ethernet/ti/cpsw_switchdev.h new file mode 100644 index 000000000000..04a045dba7d4 --- /dev/null +++ b/drivers/net/ethernet/ti/cpsw_switchdev.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Texas Instruments Ethernet Switch Driver + */ + +#ifndef DRIVERS_NET_ETHERNET_TI_CPSW_SWITCHDEV_H_ +#define DRIVERS_NET_ETHERNET_TI_CPSW_SWITCHDEV_H_ + +#include <net/switchdev.h> + +bool cpsw_port_dev_check(const struct net_device *dev); +int cpsw_switchdev_register_notifiers(struct cpsw_common *cpsw); +void cpsw_switchdev_unregister_notifiers(struct cpsw_common *cpsw); + +#endif /* DRIVERS_NET_ETHERNET_TI_CPSW_SWITCHDEV_H_ */ diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index 61136428e2c0..729ce09dded9 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -459,7 +459,7 @@ int cpts_register(struct cpts *cpts) cpts_write32(cpts, CPTS_EN, control); cpts_write32(cpts, TS_PEND_EN, int_enable); - timecounter_init(&cpts->tc, &cpts->cc, ktime_to_ns(ktime_get_real())); + timecounter_init(&cpts->tc, &cpts->cc, ktime_get_real_ns()); cpts->clock = ptp_clock_register(&cpts->info, cpts->dev); if (IS_ERR(cpts->clock)) { diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 2c1fac33136c..86a3f42a3dcc 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -2291,6 +2291,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf) struct gbe_slave *slave = gbe_intf->slave; phy_interface_t phy_mode; bool has_phy = false; + int err; void (*hndlr)(struct net_device *) = gbe_adjust_link; @@ -2320,11 +2321,11 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf) slave->phy_port_t = PORT_MII; } else if (slave->link_interface == RGMII_LINK_MAC_PHY) { has_phy = true; - phy_mode = of_get_phy_mode(slave->node); + err = of_get_phy_mode(slave->node, &phy_mode); /* if phy-mode is not present, default to * PHY_INTERFACE_MODE_RGMII */ - if (phy_mode < 0) + if (err) phy_mode = PHY_INTERFACE_MODE_RGMII; if (!phy_interface_mode_is_rgmii(phy_mode)) { diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig index 8d994cebb6b0..6304ebd8b5c6 100644 --- a/drivers/net/ethernet/xilinx/Kconfig +++ b/drivers/net/ethernet/xilinx/Kconfig @@ -6,7 +6,6 @@ config NET_VENDOR_XILINX bool "Xilinx devices" default y - depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || X86 || ARM || COMPILE_TEST ---help--- If you have a network (Ethernet) card belonging to this class, say Y. @@ -26,11 +25,10 @@ config XILINX_EMACLITE config XILINX_AXI_EMAC tristate "Xilinx 10/100/1000 AXI Ethernet support" - depends on MICROBLAZE || X86 || ARM || COMPILE_TEST select PHYLINK ---help--- This driver supports the 10/100/1000 Ethernet from Xilinx for the - AXI bus interface used in Xilinx Virtex FPGAs. + AXI bus interface used in Xilinx Virtex FPGAs and Soc's. config XILINX_LL_TEMAC tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver" diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 676006f32f91..8f32db6d2c45 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1761,11 +1761,9 @@ static int axienet_probe(struct platform_device *pdev) goto free_netdev; } } else { - lp->phy_mode = of_get_phy_mode(pdev->dev.of_node); - if ((int)lp->phy_mode < 0) { - ret = -EINVAL; + ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); + if (ret) goto free_netdev; - } } /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ @@ -1790,10 +1788,6 @@ static int axienet_probe(struct platform_device *pdev) /* Check for these resources directly on the Ethernet node. */ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res) { - dev_err(&pdev->dev, "unable to get DMA memory resource\n"); - goto free_netdev; - } lp->dma_regs = devm_ioremap_resource(&pdev->dev, res); lp->rx_irq = platform_get_irq(pdev, 1); lp->tx_irq = platform_get_irq(pdev, 0); |