summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMichael Chan <mchan@broadcom.com>2015-10-22 16:01:17 -0400
committerDavid S. Miller <davem@davemloft.net>2015-10-22 19:30:33 -0700
commitc0c050c58d840994ba842ad1c338a98e7c12b764 (patch)
tree2e0b598883e15b64d7e1650bf74a2af21d5992d0 /drivers
parent0a31adae0b2ab70127b2d5faa6a685e2ea70cc1b (diff)
downloadtalos-obmc-linux-c0c050c58d840994ba842ad1c338a98e7c12b764.tar.gz
talos-obmc-linux-c0c050c58d840994ba842ad1c338a98e7c12b764.zip
bnxt_en: New Broadcom ethernet driver.
Broadcom ethernet driver for the new family of NetXtreme-C/E ethernet devices. v5: - Removed empty blank lines at end of files (noted by David Miller). - Moved busy poll helper functions to bnxt.h to at least make the .c file look less cluttered with #ifdef (noted by Stephen Hemminger). v4: - Broke up 2 long message strings with "\n" (suggested by John Linville) - Constify an array of strings (suggested by Stephen Hemminger) - Improve bnxt_vf_pciid() (suggested by Stephen Hemminger) - Use PCI_VDEVICE() to populate pci_device_id table for more compact source. v3: - Fixed 2 more sparse warnings. - Removed some unused structures in .h files. v2: - Fixed all kbuild test robot reported warnings. - Fixed many of the checkpatch.pl errors and warnings. - Fixed the Kconfig description (noted by Dmitry Kravkov). Acked-by: Eddie Wai <eddie.wai@broadcom.com> Acked-by: Jeffrey Huang <huangjw@broadcom.com> Signed-off-by: Prashant Sreedharan <prashant@broadcom.com> Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig19
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c5746
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1081
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c1149
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h104
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h4046
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h59
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c787
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h22
12 files changed, 13034 insertions, 0 deletions
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index e930aa9a3cfb..67a7d520d9f5 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -170,4 +170,23 @@ config SYSTEMPORT
Broadcom BCM7xxx Set Top Box family chipset using an internal
Ethernet switch.
+config BNXT
+ tristate "Broadcom NetXtreme-C/E support"
+ depends on PCI
+ select FW_LOADER
+ select LIBCRC32C
+ ---help---
+ This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
+ Ethernet cards. To compile this driver as a module, choose M here:
+ the module will be called bnxt_en. This is recommended.
+
+config BNXT_SRIOV
+ bool "Broadcom NetXtreme-C/E SR-IOV support"
+ depends on BNXT && PCI_IOV
+ default y
+ ---help---
+ This configuration parameter enables Single Root Input Output
+ Virtualization support in the NetXtreme-C/E products. This
+ allows for virtual function acceleration in virtual environments.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index e2a958a657e0..00584d78b3e0 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
obj-$(CONFIG_TIGON3) += tg3.o
obj-$(CONFIG_BGMAC) += bgmac.o
obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
+obj-$(CONFIG_BNXT) += bnxt/
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
new file mode 100644
index 000000000000..97e78e217928
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_BNXT) += bnxt_en.o
+
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
new file mode 100644
index 000000000000..94b11a59e703
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -0,0 +1,5746 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+
+#include <linux/stringify.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+#include <linux/time.h>
+#include <linux/mii.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
+#include <net/vxlan.h>
+#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include <net/busy_poll.h>
+#endif
+#include <linux/workqueue.h>
+#include <linux/prefetch.h>
+#include <linux/cache.h>
+#include <linux/log2.h>
+#include <linux/aer.h>
+#include <linux/bitmap.h>
+#include <linux/cpu_rmap.h>
+
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_sriov.h"
+#include "bnxt_ethtool.h"
+
+#define BNXT_TX_TIMEOUT (5 * HZ)
+
+static const char version[] =
+ "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
+#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
+#define BNXT_RX_COPY_THRESH 256
+
+#define BNXT_TX_PUSH_THRESH 92
+
+enum board_idx {
+ BCM57302,
+ BCM57304,
+ BCM57404,
+ BCM57406,
+ BCM57304_VF,
+ BCM57404_VF,
+};
+
+/* indexed by enum above */
+static const struct {
+ char *name;
+} board_info[] = {
+ { "Broadcom BCM57302 NetXtreme-C Single-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57406 NetXtreme-E Dual-port 10Gb Ethernet" },
+ { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
+ { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
+};
+
+static const struct pci_device_id bnxt_pci_tbl[] = {
+ { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
+ { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
+ { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
+ { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
+#ifdef CONFIG_BNXT_SRIOV
+ { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
+#endif
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
+
+static const u16 bnxt_vf_req_snif[] = {
+ HWRM_FUNC_CFG,
+ HWRM_PORT_PHY_QCFG,
+ HWRM_CFA_L2_FILTER_ALLOC,
+};
+
+static bool bnxt_vf_pciid(enum board_idx idx)
+{
+ return (idx == BCM57304_VF || idx == BCM57404_VF);
+}
+
+#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
+#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
+#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
+
+#define BNXT_CP_DB_REARM(db, raw_cons) \
+ writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
+
+#define BNXT_CP_DB(db, raw_cons) \
+ writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
+
+#define BNXT_CP_DB_IRQ_DIS(db) \
+ writel(DB_CP_IRQ_DIS_FLAGS, db)
+
+static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
+{
+ /* Tell compiler to fetch tx indices from memory. */
+ barrier();
+
+ return bp->tx_ring_size -
+ ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
+}
+
+static const u16 bnxt_lhint_arr[] = {
+ TX_BD_FLAGS_LHINT_512_AND_SMALLER,
+ TX_BD_FLAGS_LHINT_512_TO_1023,
+ TX_BD_FLAGS_LHINT_1024_TO_2047,
+ TX_BD_FLAGS_LHINT_1024_TO_2047,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+};
+
+static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct tx_bd *txbd;
+ struct tx_bd_ext *txbd1;
+ struct netdev_queue *txq;
+ int i;
+ dma_addr_t mapping;
+ unsigned int length, pad = 0;
+ u32 len, free_size, vlan_tag_flags, cfa_action, flags;
+ u16 prod, last_frag;
+ struct pci_dev *pdev = bp->pdev;
+ struct bnxt_napi *bnapi;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_sw_tx_bd *tx_buf;
+
+ i = skb_get_queue_mapping(skb);
+ if (unlikely(i >= bp->tx_nr_rings)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ bnapi = bp->bnapi[i];
+ txr = &bnapi->tx_ring;
+ txq = netdev_get_tx_queue(dev, i);
+ prod = txr->tx_prod;
+
+ free_size = bnxt_tx_avail(bp, txr);
+ if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
+ netif_tx_stop_queue(txq);
+ return NETDEV_TX_BUSY;
+ }
+
+ length = skb->len;
+ len = skb_headlen(skb);
+ last_frag = skb_shinfo(skb)->nr_frags;
+
+ txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+ txbd->tx_bd_opaque = prod;
+
+ tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf->skb = skb;
+ tx_buf->nr_frags = last_frag;
+
+ vlan_tag_flags = 0;
+ cfa_action = 0;
+ if (skb_vlan_tag_present(skb)) {
+ vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
+ skb_vlan_tag_get(skb);
+ /* Currently supports 8021Q, 8021AD vlan offloads
+ * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
+ */
+ if (skb->vlan_proto == htons(ETH_P_8021Q))
+ vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
+ }
+
+ if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
+ struct tx_push_bd *push = txr->tx_push;
+ struct tx_bd *tx_push = &push->txbd1;
+ struct tx_bd_ext *tx_push1 = &push->txbd2;
+ void *pdata = tx_push1 + 1;
+ int j;
+
+ /* Set COAL_NOW to be ready quickly for the next push */
+ tx_push->tx_bd_len_flags_type =
+ cpu_to_le32((length << TX_BD_LEN_SHIFT) |
+ TX_BD_TYPE_LONG_TX_BD |
+ TX_BD_FLAGS_LHINT_512_AND_SMALLER |
+ TX_BD_FLAGS_COAL_NOW |
+ TX_BD_FLAGS_PACKET_END |
+ (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ tx_push1->tx_bd_hsize_lflags =
+ cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
+ else
+ tx_push1->tx_bd_hsize_lflags = 0;
+
+ tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
+ tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
+
+ skb_copy_from_linear_data(skb, pdata, len);
+ pdata += len;
+ for (j = 0; j < last_frag; j++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
+ void *fptr;
+
+ fptr = skb_frag_address_safe(frag);
+ if (!fptr)
+ goto normal_tx;
+
+ memcpy(pdata, fptr, skb_frag_size(frag));
+ pdata += skb_frag_size(frag);
+ }
+
+ memcpy(txbd, tx_push, sizeof(*txbd));
+ prod = NEXT_TX(prod);
+ txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ memcpy(txbd, tx_push1, sizeof(*txbd));
+ prod = NEXT_TX(prod);
+ push->doorbell =
+ cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
+ txr->tx_prod = prod;
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ __iowrite64_copy(txr->tx_doorbell, push,
+ (length + sizeof(*push) + 8) / 8);
+
+ tx_buf->is_push = 1;
+
+ goto tx_done;
+ }
+
+normal_tx:
+ if (length < BNXT_MIN_PKT_SIZE) {
+ pad = BNXT_MIN_PKT_SIZE - length;
+ if (skb_pad(skb, pad)) {
+ /* SKB already freed. */
+ tx_buf->skb = NULL;
+ return NETDEV_TX_OK;
+ }
+ length = BNXT_MIN_PKT_SIZE;
+ }
+
+ mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
+ dev_kfree_skb_any(skb);
+ tx_buf->skb = NULL;
+ return NETDEV_TX_OK;
+ }
+
+ dma_unmap_addr_set(tx_buf, mapping, mapping);
+ flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
+ ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
+
+ txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+ prod = NEXT_TX(prod);
+ txbd1 = (struct tx_bd_ext *)
+ &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+ txbd1->tx_bd_hsize_lflags = 0;
+ if (skb_is_gso(skb)) {
+ u32 hdr_len;
+
+ if (skb->encapsulation)
+ hdr_len = skb_inner_network_offset(skb) +
+ skb_inner_network_header_len(skb) +
+ inner_tcp_hdrlen(skb);
+ else
+ hdr_len = skb_transport_offset(skb) +
+ tcp_hdrlen(skb);
+
+ txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
+ TX_BD_FLAGS_T_IPID |
+ (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
+ length = skb_shinfo(skb)->gso_size;
+ txbd1->tx_bd_mss = cpu_to_le32(length);
+ length += hdr_len;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ txbd1->tx_bd_hsize_lflags =
+ cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
+ txbd1->tx_bd_mss = 0;
+ }
+
+ length >>= 9;
+ flags |= bnxt_lhint_arr[length];
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+
+ txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
+ txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
+ for (i = 0; i < last_frag; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ prod = NEXT_TX(prod);
+ txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+ len = skb_frag_size(frag);
+ mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
+ goto tx_dma_error;
+
+ tx_buf = &txr->tx_buf_ring[prod];
+ dma_unmap_addr_set(tx_buf, mapping, mapping);
+
+ txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+ flags = len << TX_BD_LEN_SHIFT;
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+ }
+
+ flags &= ~TX_BD_LEN;
+ txbd->tx_bd_len_flags_type =
+ cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
+ TX_BD_FLAGS_PACKET_END);
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* Sync BD data before updating doorbell */
+ wmb();
+
+ prod = NEXT_TX(prod);
+ txr->tx_prod = prod;
+
+ writel(DB_KEY_TX | prod, txr->tx_doorbell);
+ writel(DB_KEY_TX | prod, txr->tx_doorbell);
+
+tx_done:
+
+ mmiowb();
+
+ if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
+ netif_tx_stop_queue(txq);
+
+ /* netif_tx_stop_queue() must be done before checking
+ * tx index in bnxt_tx_avail() below, because in
+ * bnxt_tx_int(), we update tx index before checking for
+ * netif_tx_queue_stopped().
+ */
+ smp_mb();
+ if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
+ netif_tx_wake_queue(txq);
+ }
+ return NETDEV_TX_OK;
+
+tx_dma_error:
+ last_frag = i;
+
+ /* start back at beginning and unmap skb */
+ prod = txr->tx_prod;
+ tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf->skb = NULL;
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ prod = NEXT_TX(prod);
+
+ /* unmap remaining mapped pages */
+ for (i = 0; i < last_frag; i++) {
+ prod = NEXT_TX(prod);
+ tx_buf = &txr->tx_buf_ring[prod];
+ dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
+ PCI_DMA_TODEVICE);
+ }
+
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+{
+ struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+ int index = bnapi->index;
+ struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
+ u16 cons = txr->tx_cons;
+ struct pci_dev *pdev = bp->pdev;
+ int i;
+ unsigned int tx_bytes = 0;
+
+ for (i = 0; i < nr_pkts; i++) {
+ struct bnxt_sw_tx_bd *tx_buf;
+ struct sk_buff *skb;
+ int j, last;
+
+ tx_buf = &txr->tx_buf_ring[cons];
+ cons = NEXT_TX(cons);
+ skb = tx_buf->skb;
+ tx_buf->skb = NULL;
+
+ if (tx_buf->is_push) {
+ tx_buf->is_push = 0;
+ goto next_tx_int;
+ }
+
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ last = tx_buf->nr_frags;
+
+ for (j = 0; j < last; j++) {
+ cons = NEXT_TX(cons);
+ tx_buf = &txr->tx_buf_ring[cons];
+ dma_unmap_page(
+ &pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(&skb_shinfo(skb)->frags[j]),
+ PCI_DMA_TODEVICE);
+ }
+
+next_tx_int:
+ cons = NEXT_TX(cons);
+
+ tx_bytes += skb->len;
+ dev_kfree_skb_any(skb);
+ }
+
+ netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
+ txr->tx_cons = cons;
+
+ /* Need to make the tx_cons update visible to bnxt_start_xmit()
+ * before checking for netif_tx_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that bnxt_start_xmit()
+ * will miss it and cause the queue to be stopped forever.
+ */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(txq)) &&
+ (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
+ __netif_tx_lock(txq, smp_processor_id());
+ if (netif_tx_queue_stopped(txq) &&
+ bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+ txr->dev_state != BNXT_DEV_STATE_CLOSING)
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock(txq);
+ }
+}
+
+static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
+ gfp_t gfp)
+{
+ u8 *data;
+ struct pci_dev *pdev = bp->pdev;
+
+ data = kmalloc(bp->rx_buf_size, gfp);
+ if (!data)
+ return NULL;
+
+ *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
+ bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
+
+ if (dma_mapping_error(&pdev->dev, *mapping)) {
+ kfree(data);
+ data = NULL;
+ }
+ return data;
+}
+
+static inline int bnxt_alloc_rx_data(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
+{
+ struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
+ u8 *data;
+ dma_addr_t mapping;
+
+ data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
+ if (!data)
+ return -ENOMEM;
+
+ rx_buf->data = data;
+ dma_unmap_addr_set(rx_buf, mapping, mapping);
+
+ rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+
+ return 0;
+}
+
+static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
+ u8 *data)
+{
+ u16 prod = rxr->rx_prod;
+ struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_bd *cons_bd, *prod_bd;
+
+ prod_rx_buf = &rxr->rx_buf_ring[prod];
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+
+ prod_rx_buf->data = data;
+
+ dma_unmap_addr_set(prod_rx_buf, mapping,
+ dma_unmap_addr(cons_rx_buf, mapping));
+
+ prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+
+ prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
+}
+
+static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
+{
+ u16 next, max = rxr->rx_agg_bmap_size;
+
+ next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
+ if (next >= max)
+ next = find_first_zero_bit(rxr->rx_agg_bmap, max);
+ return next;
+}
+
+static inline int bnxt_alloc_rx_page(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
+{
+ struct rx_bd *rxbd =
+ &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ struct bnxt_sw_rx_agg_bd *rx_agg_buf;
+ struct pci_dev *pdev = bp->pdev;
+ struct page *page;
+ dma_addr_t mapping;
+ u16 sw_prod = rxr->rx_sw_agg_prod;
+
+ page = alloc_page(gfp);
+ if (!page)
+ return -ENOMEM;
+
+ mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&pdev->dev, mapping)) {
+ __free_page(page);
+ return -EIO;
+ }
+
+ if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+ sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
+
+ __set_bit(sw_prod, rxr->rx_agg_bmap);
+ rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
+ rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
+
+ rx_agg_buf->page = page;
+ rx_agg_buf->mapping = mapping;
+ rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+ rxbd->rx_bd_opaque = sw_prod;
+ return 0;
+}
+
+static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
+ u32 agg_bufs)
+{
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ u16 prod = rxr->rx_agg_prod;
+ u16 sw_prod = rxr->rx_sw_agg_prod;
+ u32 i;
+
+ for (i = 0; i < agg_bufs; i++) {
+ u16 cons;
+ struct rx_agg_cmp *agg;
+ struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_bd *prod_bd;
+ struct page *page;
+
+ agg = (struct rx_agg_cmp *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ cons = agg->rx_agg_cmp_opaque;
+ __clear_bit(cons, rxr->rx_agg_bmap);
+
+ if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+ sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
+
+ __set_bit(sw_prod, rxr->rx_agg_bmap);
+ prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
+ cons_rx_buf = &rxr->rx_agg_ring[cons];
+
+ /* It is possible for sw_prod to be equal to cons, so
+ * set cons_rx_buf->page to NULL first.
+ */
+ page = cons_rx_buf->page;
+ cons_rx_buf->page = NULL;
+ prod_rx_buf->page = page;
+
+ prod_rx_buf->mapping = cons_rx_buf->mapping;
+
+ prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+
+ prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
+ prod_bd->rx_bd_opaque = sw_prod;
+
+ prod = NEXT_RX_AGG(prod);
+ sw_prod = NEXT_RX_AGG(sw_prod);
+ cp_cons = NEXT_CMP(cp_cons);
+ }
+ rxr->rx_agg_prod = prod;
+ rxr->rx_sw_agg_prod = sw_prod;
+}
+
+static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr, u16 cons,
+ u16 prod, u8 *data, dma_addr_t dma_addr,
+ unsigned int len)
+{
+ int err;
+ struct sk_buff *skb;
+
+ err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+ if (unlikely(err)) {
+ bnxt_reuse_rx_data(rxr, cons, data);
+ return NULL;
+ }
+
+ skb = build_skb(data, 0);
+ dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
+ if (!skb) {
+ kfree(data);
+ return NULL;
+ }
+
+ skb_reserve(skb, BNXT_RX_OFFSET);
+ skb_put(skb, len);
+ return skb;
+}
+
+static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct sk_buff *skb, u16 cp_cons,
+ u32 agg_bufs)
+{
+ struct pci_dev *pdev = bp->pdev;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ u16 prod = rxr->rx_agg_prod;
+ u32 i;
+
+ for (i = 0; i < agg_bufs; i++) {
+ u16 cons, frag_len;
+ struct rx_agg_cmp *agg;
+ struct bnxt_sw_rx_agg_bd *cons_rx_buf;
+ struct page *page;
+ dma_addr_t mapping;
+
+ agg = (struct rx_agg_cmp *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ cons = agg->rx_agg_cmp_opaque;
+ frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
+ RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
+
+ cons_rx_buf = &rxr->rx_agg_ring[cons];
+ skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
+ __clear_bit(cons, rxr->rx_agg_bmap);
+
+ /* It is possible for bnxt_alloc_rx_page() to allocate
+ * a sw_prod index that equals the cons index, so we
+ * need to clear the cons entry now.
+ */
+ mapping = dma_unmap_addr(cons_rx_buf, mapping);
+ page = cons_rx_buf->page;
+ cons_rx_buf->page = NULL;
+
+ if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
+ struct skb_shared_info *shinfo;
+ unsigned int nr_frags;
+
+ shinfo = skb_shinfo(skb);
+ nr_frags = --shinfo->nr_frags;
+ __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
+
+ dev_kfree_skb(skb);
+
+ cons_rx_buf->page = page;
+
+ /* Update prod since possibly some pages have been
+ * allocated already.
+ */
+ rxr->rx_agg_prod = prod;
+ bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
+ return NULL;
+ }
+
+ dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+
+ skb->data_len += frag_len;
+ skb->len += frag_len;
+ skb->truesize += PAGE_SIZE;
+
+ prod = NEXT_RX_AGG(prod);
+ cp_cons = NEXT_CMP(cp_cons);
+ }
+ rxr->rx_agg_prod = prod;
+ return skb;
+}
+
+static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ u8 agg_bufs, u32 *raw_cons)
+{
+ u16 last;
+ struct rx_agg_cmp *agg;
+
+ *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
+ last = RING_CMP(*raw_cons);
+ agg = (struct rx_agg_cmp *)
+ &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
+ return RX_AGG_CMP_VALID(agg, *raw_cons);
+}
+
+static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
+ unsigned int len,
+ dma_addr_t mapping)
+{
+ struct bnxt *bp = bnapi->bp;
+ struct pci_dev *pdev = bp->pdev;
+ struct sk_buff *skb;
+
+ skb = napi_alloc_skb(&bnapi->napi, len);
+ if (!skb)
+ return NULL;
+
+ dma_sync_single_for_cpu(&pdev->dev, mapping,
+ bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
+
+ memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
+
+ dma_sync_single_for_device(&pdev->dev, mapping,
+ bp->rx_copy_thresh,
+ PCI_DMA_FROMDEVICE);
+
+ skb_put(skb, len);
+ return skb;
+}
+
+static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ struct rx_tpa_start_cmp *tpa_start,
+ struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+ u8 agg_id = TPA_START_AGG_ID(tpa_start);
+ u16 cons, prod;
+ struct bnxt_tpa_info *tpa_info;
+ struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_bd *prod_bd;
+ dma_addr_t mapping;
+
+ cons = tpa_start->rx_tpa_start_cmp_opaque;
+ prod = rxr->rx_prod;
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+ prod_rx_buf = &rxr->rx_buf_ring[prod];
+ tpa_info = &rxr->rx_tpa[agg_id];
+
+ prod_rx_buf->data = tpa_info->data;
+
+ mapping = tpa_info->mapping;
+ dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
+
+ prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+
+ prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
+
+ tpa_info->data = cons_rx_buf->data;
+ cons_rx_buf->data = NULL;
+ tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
+
+ tpa_info->len =
+ le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
+ RX_TPA_START_CMP_LEN_SHIFT;
+ if (likely(TPA_START_HASH_VALID(tpa_start))) {
+ u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
+
+ tpa_info->hash_type = PKT_HASH_TYPE_L4;
+ tpa_info->gso_type = SKB_GSO_TCPV4;
+ /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
+ if (hash_type == 3)
+ tpa_info->gso_type = SKB_GSO_TCPV6;
+ tpa_info->rss_hash =
+ le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
+ } else {
+ tpa_info->hash_type = PKT_HASH_TYPE_NONE;
+ tpa_info->gso_type = 0;
+ if (netif_msg_rx_err(bp))
+ netdev_warn(bp->dev, "TPA packet without valid hash\n");
+ }
+ tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
+ tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
+
+ rxr->rx_prod = NEXT_RX(prod);
+ cons = NEXT_RX(cons);
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+
+ bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
+ rxr->rx_prod = NEXT_RX(rxr->rx_prod);
+ cons_rx_buf->data = NULL;
+}
+
+static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
+ u16 cp_cons, u32 agg_bufs)
+{
+ if (agg_bufs)
+ bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+}
+
+#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
+#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
+
+static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
+ struct rx_tpa_end_cmp *tpa_end,
+ struct rx_tpa_end_cmp_ext *tpa_end1,
+ struct sk_buff *skb)
+{
+ struct tcphdr *th;
+ int payload_off, tcp_opt_len = 0;
+ int len, nw_off;
+
+ NAPI_GRO_CB(skb)->count = TPA_END_TPA_SEGS(tpa_end);
+ skb_shinfo(skb)->gso_size =
+ le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
+ skb_shinfo(skb)->gso_type = tpa_info->gso_type;
+ payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+ RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
+ RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
+ if (TPA_END_GRO_TS(tpa_end))
+ tcp_opt_len = 12;
+
+#ifdef CONFIG_INET
+ if (tpa_info->gso_type == SKB_GSO_TCPV4) {
+ struct iphdr *iph;
+
+ nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
+ ETH_HLEN;
+ skb_set_network_header(skb, nw_off);
+ iph = ip_hdr(skb);
+ skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
+ len = skb->len - skb_transport_offset(skb);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
+ } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
+ struct ipv6hdr *iph;
+
+ nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
+ ETH_HLEN;
+ skb_set_network_header(skb, nw_off);
+ iph = ipv6_hdr(skb);
+ skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
+ len = skb->len - skb_transport_offset(skb);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
+ } else {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+ tcp_gro_complete(skb);
+
+ if (nw_off) { /* tunnel */
+ struct udphdr *uh = NULL;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ if (iph->protocol == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ } else {
+ struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+ if (iph->nexthdr == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ }
+ if (uh) {
+ if (uh->check)
+ skb_shinfo(skb)->gso_type |=
+ SKB_GSO_UDP_TUNNEL_CSUM;
+ else
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
+ }
+#endif
+ return skb;
+}
+
+static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+ struct bnxt_napi *bnapi,
+ u32 *raw_cons,
+ struct rx_tpa_end_cmp *tpa_end,
+ struct rx_tpa_end_cmp_ext *tpa_end1,
+ bool *agg_event)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ u8 agg_id = TPA_END_AGG_ID(tpa_end);
+ u8 *data, agg_bufs;
+ u16 cp_cons = RING_CMP(*raw_cons);
+ unsigned int len;
+ struct bnxt_tpa_info *tpa_info;
+ dma_addr_t mapping;
+ struct sk_buff *skb;
+
+ tpa_info = &rxr->rx_tpa[agg_id];
+ data = tpa_info->data;
+ prefetch(data);
+ len = tpa_info->len;
+ mapping = tpa_info->mapping;
+
+ agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+ RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
+
+ if (agg_bufs) {
+ if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
+ return ERR_PTR(-EBUSY);
+
+ *agg_event = true;
+ cp_cons = NEXT_CMP(cp_cons);
+ }
+
+ if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
+ bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
+ agg_bufs, (int)MAX_SKB_FRAGS);
+ return NULL;
+ }
+
+ if (len <= bp->rx_copy_thresh) {
+ skb = bnxt_copy_skb(bnapi, data, len, mapping);
+ if (!skb) {
+ bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ return NULL;
+ }
+ } else {
+ u8 *new_data;
+ dma_addr_t new_mapping;
+
+ new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
+ if (!new_data) {
+ bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ return NULL;
+ }
+
+ tpa_info->data = new_data;
+ tpa_info->mapping = new_mapping;
+
+ skb = build_skb(data, 0);
+ dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
+
+ if (!skb) {
+ kfree(data);
+ bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ return NULL;
+ }
+ skb_reserve(skb, BNXT_RX_OFFSET);
+ skb_put(skb, len);
+ }
+
+ if (agg_bufs) {
+ skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+ if (!skb) {
+ /* Page reuse already handled by bnxt_rx_pages(). */
+ return NULL;
+ }
+ }
+ skb->protocol = eth_type_trans(skb, bp->dev);
+
+ if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
+ skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
+
+ if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
+ netdev_features_t features = skb->dev->features;
+ u16 vlan_proto = tpa_info->metadata >>
+ RX_CMP_FLAGS2_METADATA_TPID_SFT;
+
+ if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ vlan_proto == ETH_P_8021Q) ||
+ ((features & NETIF_F_HW_VLAN_STAG_RX) &&
+ vlan_proto == ETH_P_8021AD)) {
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
+ tpa_info->metadata &
+ RX_CMP_FLAGS2_METADATA_VID_MASK);
+ }
+ }
+
+ skb_checksum_none_assert(skb);
+ if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level =
+ (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
+ }
+
+ if (TPA_END_GRO(tpa_end))
+ skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb);
+
+ return skb;
+}
+
+/* returns the following:
+ * 1 - 1 packet successfully received
+ * 0 - successful TPA_START, packet not completed yet
+ * -EBUSY - completion ring does not have all the agg buffers yet
+ * -ENOMEM - packet aborted due to out of memory
+ * -EIO - packet aborted due to hw error indicated in BD
+ */
+static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
+ bool *agg_event)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct net_device *dev = bp->dev;
+ struct rx_cmp *rxcmp;
+ struct rx_cmp_ext *rxcmp1;
+ u32 tmp_raw_cons = *raw_cons;
+ u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+ struct bnxt_sw_rx_bd *rx_buf;
+ unsigned int len;
+ u8 *data, agg_bufs, cmp_type;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+ int rc = 0;
+
+ rxcmp = (struct rx_cmp *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
+ cp_cons = RING_CMP(tmp_raw_cons);
+ rxcmp1 = (struct rx_cmp_ext *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
+ return -EBUSY;
+
+ cmp_type = RX_CMP_TYPE(rxcmp);
+
+ prod = rxr->rx_prod;
+
+ if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
+ bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
+ (struct rx_tpa_start_cmp_ext *)rxcmp1);
+
+ goto next_rx_no_prod;
+
+ } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+ skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
+ (struct rx_tpa_end_cmp *)rxcmp,
+ (struct rx_tpa_end_cmp_ext *)rxcmp1,
+ agg_event);
+
+ if (unlikely(IS_ERR(skb)))
+ return -EBUSY;
+
+ rc = -ENOMEM;
+ if (likely(skb)) {
+ skb_record_rx_queue(skb, bnapi->index);
+ skb_mark_napi_id(skb, &bnapi->napi);
+ if (bnxt_busy_polling(bnapi))
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&bnapi->napi, skb);
+ rc = 1;
+ }
+ goto next_rx_no_prod;
+ }
+
+ cons = rxcmp->rx_cmp_opaque;
+ rx_buf = &rxr->rx_buf_ring[cons];
+ data = rx_buf->data;
+ prefetch(data);
+
+ agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
+ RX_CMP_AGG_BUFS_SHIFT;
+
+ if (agg_bufs) {
+ if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
+ return -EBUSY;
+
+ cp_cons = NEXT_CMP(cp_cons);
+ *agg_event = true;
+ }
+
+ rx_buf->data = NULL;
+ if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
+ bnxt_reuse_rx_data(rxr, cons, data);
+ if (agg_bufs)
+ bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+
+ rc = -EIO;
+ goto next_rx;
+ }
+
+ len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
+ dma_addr = dma_unmap_addr(rx_buf, mapping);
+
+ if (len <= bp->rx_copy_thresh) {
+ skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
+ bnxt_reuse_rx_data(rxr, cons, data);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ } else {
+ skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ }
+
+ if (agg_bufs) {
+ skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ }
+
+ if (RX_CMP_HASH_VALID(rxcmp)) {
+ u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
+ enum pkt_hash_types type = PKT_HASH_TYPE_L4;
+
+ /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
+ if (hash_type != 1 && hash_type != 3)
+ type = PKT_HASH_TYPE_L3;
+ skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (rxcmp1->rx_cmp_flags2 &
+ cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
+ netdev_features_t features = skb->dev->features;
+ u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+ u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
+
+ if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ vlan_proto == ETH_P_8021Q) ||
+ ((features & NETIF_F_HW_VLAN_STAG_RX) &&
+ vlan_proto == ETH_P_8021AD))
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
+ meta_data &
+ RX_CMP_FLAGS2_METADATA_VID_MASK);
+ }
+
+ skb_checksum_none_assert(skb);
+ if (RX_CMP_L4_CS_OK(rxcmp1)) {
+ if (dev->features & NETIF_F_RXCSUM) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = RX_CMP_ENCAP(rxcmp1);
+ }
+ } else {
+ if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS)
+ cpr->rx_l4_csum_errors++;
+ }
+
+ skb_record_rx_queue(skb, bnapi->index);
+ skb_mark_napi_id(skb, &bnapi->napi);
+ if (bnxt_busy_polling(bnapi))
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&bnapi->napi, skb);
+ rc = 1;
+
+next_rx:
+ rxr->rx_prod = NEXT_RX(prod);
+
+next_rx_no_prod:
+ *raw_cons = tmp_raw_cons;
+
+ return rc;
+}
+
+static int bnxt_async_event_process(struct bnxt *bp,
+ struct hwrm_async_event_cmpl *cmpl)
+{
+ u16 event_id = le16_to_cpu(cmpl->event_id);
+
+ /* TODO CHIMP_FW: Define event id's for link change, error etc */
+ switch (event_id) {
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
+ set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ break;
+ default:
+ netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
+ event_id);
+ break;
+ }
+ return 0;
+}
+
+static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
+{
+ u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
+ struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
+ struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
+ (struct hwrm_fwd_req_cmpl *)txcmp;
+
+ switch (cmpl_type) {
+ case CMPL_BASE_TYPE_HWRM_DONE:
+ seq_id = le16_to_cpu(h_cmpl->sequence_id);
+ if (seq_id == bp->hwrm_intr_seq_id)
+ bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
+ else
+ netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
+ break;
+
+ case CMPL_BASE_TYPE_HWRM_FWD_REQ:
+ vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
+
+ if ((vf_id < bp->pf.first_vf_id) ||
+ (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
+ netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
+ vf_id);
+ return -EINVAL;
+ }
+
+ set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
+ set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ break;
+
+ case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
+ bnxt_async_event_process(bp,
+ (struct hwrm_async_event_cmpl *)txcmp);
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static irqreturn_t bnxt_msix(int irq, void *dev_instance)
+{
+ struct bnxt_napi *bnapi = dev_instance;
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ u32 cons = RING_CMP(cpr->cp_raw_cons);
+
+ prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
+ napi_schedule(&bnapi->napi);
+ return IRQ_HANDLED;
+}
+
+static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+ u32 raw_cons = cpr->cp_raw_cons;
+ u16 cons = RING_CMP(raw_cons);
+ struct tx_cmp *txcmp;
+
+ txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+ return TX_CMP_VALID(txcmp, raw_cons);
+}
+
+#define CAG_LEGACY_INT_STATUS 0x2014
+
+static irqreturn_t bnxt_inta(int irq, void *dev_instance)
+{
+ struct bnxt_napi *bnapi = dev_instance;
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ u32 cons = RING_CMP(cpr->cp_raw_cons);
+ u32 int_status;
+
+ prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
+
+ if (!bnxt_has_work(bp, cpr)) {
+ int_status = readl(bp->bar0 + CAG_LEGACY_INT_STATUS);
+ /* return if erroneous interrupt */
+ if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
+ return IRQ_NONE;
+ }
+
+ /* disable ring IRQ */
+ BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
+
+ /* Return here if interrupt is shared and is disabled. */
+ if (unlikely(atomic_read(&bp->intr_sem) != 0))
+ return IRQ_HANDLED;
+
+ napi_schedule(&bnapi->napi);
+ return IRQ_HANDLED;
+}
+
+static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ u32 raw_cons = cpr->cp_raw_cons;
+ u32 cons;
+ int tx_pkts = 0;
+ int rx_pkts = 0;
+ bool rx_event = false;
+ bool agg_event = false;
+ struct tx_cmp *txcmp;
+
+ while (1) {
+ int rc;
+
+ cons = RING_CMP(raw_cons);
+ txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+ if (!TX_CMP_VALID(txcmp, raw_cons))
+ break;
+
+ if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
+ tx_pkts++;
+ /* return full budget so NAPI will complete. */
+ if (unlikely(tx_pkts > bp->tx_wake_thresh))
+ rx_pkts = budget;
+ } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
+ rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
+ if (likely(rc >= 0))
+ rx_pkts += rc;
+ else if (rc == -EBUSY) /* partial completion */
+ break;
+ rx_event = true;
+ } else if (unlikely((TX_CMP_TYPE(txcmp) ==
+ CMPL_BASE_TYPE_HWRM_DONE) ||
+ (TX_CMP_TYPE(txcmp) ==
+ CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
+ (TX_CMP_TYPE(txcmp) ==
+ CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
+ bnxt_hwrm_handler(bp, txcmp);
+ }
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+
+ if (rx_pkts == budget)
+ break;
+ }
+
+ cpr->cp_raw_cons = raw_cons;
+ /* ACK completion ring before freeing tx ring and producing new
+ * buffers in rx/agg rings to prevent overflowing the completion
+ * ring.
+ */
+ BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+
+ if (tx_pkts)
+ bnxt_tx_int(bp, bnapi, tx_pkts);
+
+ if (rx_event) {
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+
+ writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+ writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+ if (agg_event) {
+ writel(DB_KEY_RX | rxr->rx_agg_prod,
+ rxr->rx_agg_doorbell);
+ writel(DB_KEY_RX | rxr->rx_agg_prod,
+ rxr->rx_agg_doorbell);
+ }
+ }
+ return rx_pkts;
+}
+
+static int bnxt_poll(struct napi_struct *napi, int budget)
+{
+ struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ int work_done = 0;
+
+ if (!bnxt_lock_napi(bnapi))
+ return budget;
+
+ while (1) {
+ work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
+
+ if (work_done >= budget)
+ break;
+
+ if (!bnxt_has_work(bp, cpr)) {
+ napi_complete(napi);
+ BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ break;
+ }
+ }
+ mmiowb();
+ bnxt_unlock_napi(bnapi);
+ return work_done;
+}
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static int bnxt_busy_poll(struct napi_struct *napi)
+{
+ struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ int rx_work, budget = 4;
+
+ if (atomic_read(&bp->intr_sem) != 0)
+ return LL_FLUSH_FAILED;
+
+ if (!bnxt_lock_poll(bnapi))
+ return LL_FLUSH_BUSY;
+
+ rx_work = bnxt_poll_work(bp, bnapi, budget);
+
+ BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+
+ bnxt_unlock_poll(bnapi);
+ return rx_work;
+}
+#endif
+
+static void bnxt_free_tx_skbs(struct bnxt *bp)
+{
+ int i, max_idx;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (!bp->bnapi)
+ return;
+
+ max_idx = bp->tx_nr_pages * TX_DESC_CNT;
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr;
+ int j;
+
+ if (!bnapi)
+ continue;
+
+ txr = &bnapi->tx_ring;
+ for (j = 0; j < max_idx;) {
+ struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
+ struct sk_buff *skb = tx_buf->skb;
+ int k, last;
+
+ if (!skb) {
+ j++;
+ continue;
+ }
+
+ tx_buf->skb = NULL;
+
+ if (tx_buf->is_push) {
+ dev_kfree_skb(skb);
+ j += 2;
+ continue;
+ }
+
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+
+ last = tx_buf->nr_frags;
+ j += 2;
+ for (k = 0; k < last; k++, j = NEXT_TX(j)) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
+
+ tx_buf = &txr->tx_buf_ring[j];
+ dma_unmap_page(
+ &pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
+ }
+ dev_kfree_skb(skb);
+ }
+ netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
+ }
+}
+
+static void bnxt_free_rx_skbs(struct bnxt *bp)
+{
+ int i, max_idx, max_agg_idx;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (!bp->bnapi)
+ return;
+
+ max_idx = bp->rx_nr_pages * RX_DESC_CNT;
+ max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr;
+ int j;
+
+ if (!bnapi)
+ continue;
+
+ rxr = &bnapi->rx_ring;
+
+ if (rxr->rx_tpa) {
+ for (j = 0; j < MAX_TPA; j++) {
+ struct bnxt_tpa_info *tpa_info =
+ &rxr->rx_tpa[j];
+ u8 *data = tpa_info->data;
+
+ if (!data)
+ continue;
+
+ dma_unmap_single(
+ &pdev->dev,
+ dma_unmap_addr(tpa_info, mapping),
+ bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
+
+ tpa_info->data = NULL;
+
+ kfree(data);
+ }
+ }
+
+ for (j = 0; j < max_idx; j++) {
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
+ u8 *data = rx_buf->data;
+
+ if (!data)
+ continue;
+
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
+
+ rx_buf->data = NULL;
+
+ kfree(data);
+ }
+
+ for (j = 0; j < max_agg_idx; j++) {
+ struct bnxt_sw_rx_agg_bd *rx_agg_buf =
+ &rxr->rx_agg_ring[j];
+ struct page *page = rx_agg_buf->page;
+
+ if (!page)
+ continue;
+
+ dma_unmap_page(&pdev->dev,
+ dma_unmap_addr(rx_agg_buf, mapping),
+ PAGE_SIZE, PCI_DMA_FROMDEVICE);
+
+ rx_agg_buf->page = NULL;
+ __clear_bit(j, rxr->rx_agg_bmap);
+
+ __free_page(page);
+ }
+ }
+}
+
+static void bnxt_free_skbs(struct bnxt *bp)
+{
+ bnxt_free_tx_skbs(bp);
+ bnxt_free_rx_skbs(bp);
+}
+
+static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+{
+ struct pci_dev *pdev = bp->pdev;
+ int i;
+
+ for (i = 0; i < ring->nr_pages; i++) {
+ if (!ring->pg_arr[i])
+ continue;
+
+ dma_free_coherent(&pdev->dev, ring->page_size,
+ ring->pg_arr[i], ring->dma_arr[i]);
+
+ ring->pg_arr[i] = NULL;
+ }
+ if (ring->pg_tbl) {
+ dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
+ ring->pg_tbl, ring->pg_tbl_map);
+ ring->pg_tbl = NULL;
+ }
+ if (ring->vmem_size && *ring->vmem) {
+ vfree(*ring->vmem);
+ *ring->vmem = NULL;
+ }
+}
+
+static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+{
+ int i;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (ring->nr_pages > 1) {
+ ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
+ ring->nr_pages * 8,
+ &ring->pg_tbl_map,
+ GFP_KERNEL);
+ if (!ring->pg_tbl)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ring->nr_pages; i++) {
+ ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
+ ring->page_size,
+ &ring->dma_arr[i],
+ GFP_KERNEL);
+ if (!ring->pg_arr[i])
+ return -ENOMEM;
+
+ if (ring->nr_pages > 1)
+ ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
+ }
+
+ if (ring->vmem_size) {
+ *ring->vmem = vzalloc(ring->vmem_size);
+ if (!(*ring->vmem))
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void bnxt_free_rx_rings(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ rxr = &bnapi->rx_ring;
+
+ kfree(rxr->rx_tpa);
+ rxr->rx_tpa = NULL;
+
+ kfree(rxr->rx_agg_bmap);
+ rxr->rx_agg_bmap = NULL;
+
+ ring = &rxr->rx_ring_struct;
+ bnxt_free_ring(bp, ring);
+
+ ring = &rxr->rx_agg_ring_struct;
+ bnxt_free_ring(bp, ring);
+ }
+}
+
+static int bnxt_alloc_rx_rings(struct bnxt *bp)
+{
+ int i, rc, agg_rings = 0, tpa_rings = 0;
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ agg_rings = 1;
+
+ if (bp->flags & BNXT_FLAG_TPA)
+ tpa_rings = 1;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ rxr = &bnapi->rx_ring;
+ ring = &rxr->rx_ring_struct;
+
+ rc = bnxt_alloc_ring(bp, ring);
+ if (rc)
+ return rc;
+
+ if (agg_rings) {
+ u16 mem_size;
+
+ ring = &rxr->rx_agg_ring_struct;
+ rc = bnxt_alloc_ring(bp, ring);
+ if (rc)
+ return rc;
+
+ rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
+ mem_size = rxr->rx_agg_bmap_size / 8;
+ rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+ if (!rxr->rx_agg_bmap)
+ return -ENOMEM;
+
+ if (tpa_rings) {
+ rxr->rx_tpa = kcalloc(MAX_TPA,
+ sizeof(struct bnxt_tpa_info),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa)
+ return -ENOMEM;
+ }
+ }
+ }
+ return 0;
+}
+
+static void bnxt_free_tx_rings(struct bnxt *bp)
+{
+ int i;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ txr = &bnapi->tx_ring;
+
+ if (txr->tx_push) {
+ dma_free_coherent(&pdev->dev, bp->tx_push_size,
+ txr->tx_push, txr->tx_push_mapping);
+ txr->tx_push = NULL;
+ }
+
+ ring = &txr->tx_ring_struct;
+
+ bnxt_free_ring(bp, ring);
+ }
+}
+
+static int bnxt_alloc_tx_rings(struct bnxt *bp)
+{
+ int i, j, rc;
+ struct pci_dev *pdev = bp->pdev;
+
+ bp->tx_push_size = 0;
+ if (bp->tx_push_thresh) {
+ int push_size;
+
+ push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
+ bp->tx_push_thresh);
+
+ if (push_size > 128) {
+ push_size = 0;
+ bp->tx_push_thresh = 0;
+ }
+
+ bp->tx_push_size = push_size;
+ }
+
+ for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ txr = &bnapi->tx_ring;
+ ring = &txr->tx_ring_struct;
+
+ rc = bnxt_alloc_ring(bp, ring);
+ if (rc)
+ return rc;
+
+ if (bp->tx_push_size) {
+ struct tx_bd *txbd;
+ dma_addr_t mapping;
+
+ /* One pre-allocated DMA buffer to backup
+ * TX push operation
+ */
+ txr->tx_push = dma_alloc_coherent(&pdev->dev,
+ bp->tx_push_size,
+ &txr->tx_push_mapping,
+ GFP_KERNEL);
+
+ if (!txr->tx_push)
+ return -ENOMEM;
+
+ txbd = &txr->tx_push->txbd1;
+
+ mapping = txr->tx_push_mapping +
+ sizeof(struct tx_push_bd);
+ txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+ memset(txbd + 1, 0, sizeof(struct tx_bd_ext));
+ }
+ ring->queue_id = bp->q_info[j].queue_id;
+ if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
+ j++;
+ }
+ return 0;
+}
+
+static void bnxt_free_cp_rings(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ cpr = &bnapi->cp_ring;
+ ring = &cpr->cp_ring_struct;
+
+ bnxt_free_ring(bp, ring);
+ }
+}
+
+static int bnxt_alloc_cp_rings(struct bnxt *bp)
+{
+ int i, rc;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ cpr = &bnapi->cp_ring;
+ ring = &cpr->cp_ring_struct;
+
+ rc = bnxt_alloc_ring(bp, ring);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static void bnxt_init_ring_struct(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ cpr = &bnapi->cp_ring;
+ ring = &cpr->cp_ring_struct;
+ ring->nr_pages = bp->cp_nr_pages;
+ ring->page_size = HW_CMPD_RING_SIZE;
+ ring->pg_arr = (void **)cpr->cp_desc_ring;
+ ring->dma_arr = cpr->cp_desc_mapping;
+ ring->vmem_size = 0;
+
+ rxr = &bnapi->rx_ring;
+ ring = &rxr->rx_ring_struct;
+ ring->nr_pages = bp->rx_nr_pages;
+ ring->page_size = HW_RXBD_RING_SIZE;
+ ring->pg_arr = (void **)rxr->rx_desc_ring;
+ ring->dma_arr = rxr->rx_desc_mapping;
+ ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
+ ring->vmem = (void **)&rxr->rx_buf_ring;
+
+ ring = &rxr->rx_agg_ring_struct;
+ ring->nr_pages = bp->rx_agg_nr_pages;
+ ring->page_size = HW_RXBD_RING_SIZE;
+ ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
+ ring->dma_arr = rxr->rx_agg_desc_mapping;
+ ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
+ ring->vmem = (void **)&rxr->rx_agg_ring;
+
+ txr = &bnapi->tx_ring;
+ ring = &txr->tx_ring_struct;
+ ring->nr_pages = bp->tx_nr_pages;
+ ring->page_size = HW_RXBD_RING_SIZE;
+ ring->pg_arr = (void **)txr->tx_desc_ring;
+ ring->dma_arr = txr->tx_desc_mapping;
+ ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
+ ring->vmem = (void **)&txr->tx_buf_ring;
+ }
+}
+
+static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
+{
+ int i;
+ u32 prod;
+ struct rx_bd **rx_buf_ring;
+
+ rx_buf_ring = (struct rx_bd **)ring->pg_arr;
+ for (i = 0, prod = 0; i < ring->nr_pages; i++) {
+ int j;
+ struct rx_bd *rxbd;
+
+ rxbd = rx_buf_ring[i];
+ if (!rxbd)
+ continue;
+
+ for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
+ rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
+ rxbd->rx_bd_opaque = prod;
+ }
+ }
+}
+
+static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+ struct net_device *dev = bp->dev;
+ struct bnxt_napi *bnapi = bp->bnapi[ring_nr];
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_ring_struct *ring;
+ u32 prod, type;
+ int i;
+
+ if (!bnapi)
+ return -EINVAL;
+
+ type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
+
+ if (NET_IP_ALIGN == 2)
+ type |= RX_BD_FLAGS_SOP;
+
+ rxr = &bnapi->rx_ring;
+ ring = &rxr->rx_ring_struct;
+ bnxt_init_rxbd_pages(ring, type);
+
+ prod = rxr->rx_prod;
+ for (i = 0; i < bp->rx_ring_size; i++) {
+ if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
+ netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
+ ring_nr, i, bp->rx_ring_size);
+ break;
+ }
+ prod = NEXT_RX(prod);
+ }
+ rxr->rx_prod = prod;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
+ return 0;
+
+ ring = &rxr->rx_agg_ring_struct;
+
+ type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+
+ bnxt_init_rxbd_pages(ring, type);
+
+ prod = rxr->rx_agg_prod;
+ for (i = 0; i < bp->rx_agg_ring_size; i++) {
+ if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
+ netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
+ ring_nr, i, bp->rx_ring_size);
+ break;
+ }
+ prod = NEXT_RX_AGG(prod);
+ }
+ rxr->rx_agg_prod = prod;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ if (bp->flags & BNXT_FLAG_TPA) {
+ if (rxr->rx_tpa) {
+ u8 *data;
+ dma_addr_t mapping;
+
+ for (i = 0; i < MAX_TPA; i++) {
+ data = __bnxt_alloc_rx_data(bp, &mapping,
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ rxr->rx_tpa[i].data = data;
+ rxr->rx_tpa[i].mapping = mapping;
+ }
+ } else {
+ netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int bnxt_init_rx_rings(struct bnxt *bp)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ rc = bnxt_init_one_rx_ring(bp, i);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+static int bnxt_init_tx_rings(struct bnxt *bp)
+{
+ u16 i;
+
+ bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
+ MAX_SKB_FRAGS + 1);
+
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+
+ return 0;
+}
+
+static void bnxt_free_ring_grps(struct bnxt *bp)
+{
+ kfree(bp->grp_info);
+ bp->grp_info = NULL;
+}
+
+static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
+{
+ int i;
+
+ if (irq_re_init) {
+ bp->grp_info = kcalloc(bp->cp_nr_rings,
+ sizeof(struct bnxt_ring_grp_info),
+ GFP_KERNEL);
+ if (!bp->grp_info)
+ return -ENOMEM;
+ }
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ if (irq_re_init)
+ bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
+ bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
+ }
+ return 0;
+}
+
+static void bnxt_free_vnics(struct bnxt *bp)
+{
+ kfree(bp->vnic_info);
+ bp->vnic_info = NULL;
+ bp->nr_vnics = 0;
+}
+
+static int bnxt_alloc_vnics(struct bnxt *bp)
+{
+ int num_vnics = 1;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (bp->flags & BNXT_FLAG_RFS)
+ num_vnics += bp->rx_nr_rings;
+#endif
+
+ bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
+ GFP_KERNEL);
+ if (!bp->vnic_info)
+ return -ENOMEM;
+
+ bp->nr_vnics = num_vnics;
+ return 0;
+}
+
+static void bnxt_init_vnics(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+ vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
+
+ if (bp->vnic_info[i].rss_hash_key) {
+ if (i == 0)
+ prandom_bytes(vnic->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ else
+ memcpy(vnic->rss_hash_key,
+ bp->vnic_info[0].rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ }
+ }
+}
+
+static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
+{
+ int pages;
+
+ pages = ring_size / desc_per_pg;
+
+ if (!pages)
+ return 1;
+
+ pages++;
+
+ while (pages & (pages - 1))
+ pages++;
+
+ return pages;
+}
+
+static void bnxt_set_tpa_flags(struct bnxt *bp)
+{
+ bp->flags &= ~BNXT_FLAG_TPA;
+ if (bp->dev->features & NETIF_F_LRO)
+ bp->flags |= BNXT_FLAG_LRO;
+ if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0))
+ bp->flags |= BNXT_FLAG_GRO;
+}
+
+/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
+ * be set on entry.
+ */
+void bnxt_set_ring_params(struct bnxt *bp)
+{
+ u32 ring_size, rx_size, rx_space;
+ u32 agg_factor = 0, agg_ring_size = 0;
+
+ /* 8 for CRC and VLAN */
+ rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
+
+ rx_space = rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
+ ring_size = bp->rx_ring_size;
+ bp->rx_agg_ring_size = 0;
+ bp->rx_agg_nr_pages = 0;
+
+ if (bp->flags & BNXT_FLAG_TPA)
+ agg_factor = 4;
+
+ bp->flags &= ~BNXT_FLAG_JUMBO;
+ if (rx_space > PAGE_SIZE) {
+ u32 jumbo_factor;
+
+ bp->flags |= BNXT_FLAG_JUMBO;
+ jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
+ if (jumbo_factor > agg_factor)
+ agg_factor = jumbo_factor;
+ }
+ agg_ring_size = ring_size * agg_factor;
+
+ if (agg_ring_size) {
+ bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
+ RX_DESC_CNT);
+ if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
+ u32 tmp = agg_ring_size;
+
+ bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
+ agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
+ netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
+ tmp, agg_ring_size);
+ }
+ bp->rx_agg_ring_size = agg_ring_size;
+ bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
+ rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_space = rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ }
+
+ bp->rx_buf_use_size = rx_size;
+ bp->rx_buf_size = rx_space;
+
+ bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
+ bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
+
+ ring_size = bp->tx_ring_size;
+ bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
+ bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
+
+ ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
+ bp->cp_ring_size = ring_size;
+
+ bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
+ if (bp->cp_nr_pages > MAX_CP_PAGES) {
+ bp->cp_nr_pages = MAX_CP_PAGES;
+ bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
+ netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
+ ring_size, bp->cp_ring_size);
+ }
+ bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
+ bp->cp_ring_mask = bp->cp_bit - 1;
+}
+
+static void bnxt_free_vnic_attributes(struct bnxt *bp)
+{
+ int i;
+ struct bnxt_vnic_info *vnic;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (!bp->vnic_info)
+ return;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+
+ kfree(vnic->fw_grp_ids);
+ vnic->fw_grp_ids = NULL;
+
+ kfree(vnic->uc_list);
+ vnic->uc_list = NULL;
+
+ if (vnic->mc_list) {
+ dma_free_coherent(&pdev->dev, vnic->mc_list_size,
+ vnic->mc_list, vnic->mc_list_mapping);
+ vnic->mc_list = NULL;
+ }
+
+ if (vnic->rss_table) {
+ dma_free_coherent(&pdev->dev, PAGE_SIZE,
+ vnic->rss_table,
+ vnic->rss_table_dma_addr);
+ vnic->rss_table = NULL;
+ }
+
+ vnic->rss_hash_key = NULL;
+ vnic->flags = 0;
+ }
+}
+
+static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
+{
+ int i, rc = 0, size;
+ struct bnxt_vnic_info *vnic;
+ struct pci_dev *pdev = bp->pdev;
+ int max_rings;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+
+ if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
+ int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
+
+ if (mem_size > 0) {
+ vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
+ if (!vnic->uc_list) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ }
+ }
+
+ if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
+ vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
+ vnic->mc_list =
+ dma_alloc_coherent(&pdev->dev,
+ vnic->mc_list_size,
+ &vnic->mc_list_mapping,
+ GFP_KERNEL);
+ if (!vnic->mc_list) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+ max_rings = bp->rx_nr_rings;
+ else
+ max_rings = 1;
+
+ vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
+ if (!vnic->fw_grp_ids) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Allocate rss table and hash key */
+ vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &vnic->rss_table_dma_addr,
+ GFP_KERNEL);
+ if (!vnic->rss_table) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
+
+ vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
+ vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
+ }
+ return 0;
+
+out:
+ return rc;
+}
+
+static void bnxt_free_hwrm_resources(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
+ bp->hwrm_cmd_resp_dma_addr);
+
+ bp->hwrm_cmd_resp_addr = NULL;
+ if (bp->hwrm_dbg_resp_addr) {
+ dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
+ bp->hwrm_dbg_resp_addr,
+ bp->hwrm_dbg_resp_dma_addr);
+
+ bp->hwrm_dbg_resp_addr = NULL;
+ }
+}
+
+static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+
+ bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &bp->hwrm_cmd_resp_dma_addr,
+ GFP_KERNEL);
+ if (!bp->hwrm_cmd_resp_addr)
+ return -ENOMEM;
+ bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
+ HWRM_DBG_REG_BUF_SIZE,
+ &bp->hwrm_dbg_resp_dma_addr,
+ GFP_KERNEL);
+ if (!bp->hwrm_dbg_resp_addr)
+ netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
+
+ return 0;
+}
+
+static void bnxt_free_stats(struct bnxt *bp)
+{
+ u32 size, i;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (!bp->bnapi)
+ return;
+
+ size = sizeof(struct ctx_hw_stats);
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ if (cpr->hw_stats) {
+ dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
+ cpr->hw_stats_map);
+ cpr->hw_stats = NULL;
+ }
+ }
+}
+
+static int bnxt_alloc_stats(struct bnxt *bp)
+{
+ u32 size, i;
+ struct pci_dev *pdev = bp->pdev;
+
+ size = sizeof(struct ctx_hw_stats);
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
+ &cpr->hw_stats_map,
+ GFP_KERNEL);
+ if (!cpr->hw_stats)
+ return -ENOMEM;
+
+ cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+ }
+ return 0;
+}
+
+static void bnxt_clear_ring_indices(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_tx_ring_info *txr;
+
+ if (!bnapi)
+ continue;
+
+ cpr = &bnapi->cp_ring;
+ cpr->cp_raw_cons = 0;
+
+ txr = &bnapi->tx_ring;
+ txr->tx_prod = 0;
+ txr->tx_cons = 0;
+
+ rxr = &bnapi->rx_ring;
+ rxr->rx_prod = 0;
+ rxr->rx_agg_prod = 0;
+ rxr->rx_sw_agg_prod = 0;
+ }
+}
+
+static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
+{
+#ifdef CONFIG_RFS_ACCEL
+ int i;
+
+ /* Under rtnl_lock and all our NAPIs have been disabled. It's
+ * safe to delete the hash table.
+ */
+ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+ struct bnxt_ntuple_filter *fltr;
+
+ head = &bp->ntp_fltr_hash_tbl[i];
+ hlist_for_each_entry_safe(fltr, tmp, head, hash) {
+ hlist_del(&fltr->hash);
+ kfree(fltr);
+ }
+ }
+ if (irq_reinit) {
+ kfree(bp->ntp_fltr_bmap);
+ bp->ntp_fltr_bmap = NULL;
+ }
+ bp->ntp_fltr_count = 0;
+#endif
+}
+
+static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
+{
+#ifdef CONFIG_RFS_ACCEL
+ int i, rc = 0;
+
+ if (!(bp->flags & BNXT_FLAG_RFS))
+ return 0;
+
+ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
+
+ bp->ntp_fltr_count = 0;
+ bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
+ GFP_KERNEL);
+
+ if (!bp->ntp_fltr_bmap)
+ rc = -ENOMEM;
+
+ return rc;
+#else
+ return 0;
+#endif
+}
+
+static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
+{
+ bnxt_free_vnic_attributes(bp);
+ bnxt_free_tx_rings(bp);
+ bnxt_free_rx_rings(bp);
+ bnxt_free_cp_rings(bp);
+ bnxt_free_ntp_fltrs(bp, irq_re_init);
+ if (irq_re_init) {
+ bnxt_free_stats(bp);
+ bnxt_free_ring_grps(bp);
+ bnxt_free_vnics(bp);
+ kfree(bp->bnapi);
+ bp->bnapi = NULL;
+ } else {
+ bnxt_clear_ring_indices(bp);
+ }
+}
+
+static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
+{
+ int i, rc, size, arr_size;
+ void *bnapi;
+
+ if (irq_re_init) {
+ /* Allocate bnapi mem pointer array and mem block for
+ * all queues
+ */
+ arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
+ bp->cp_nr_rings);
+ size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
+ bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
+ if (!bnapi)
+ return -ENOMEM;
+
+ bp->bnapi = bnapi;
+ bnapi += arr_size;
+ for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
+ bp->bnapi[i] = bnapi;
+ bp->bnapi[i]->index = i;
+ bp->bnapi[i]->bp = bp;
+ }
+
+ rc = bnxt_alloc_stats(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_ntp_fltrs(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_vnics(bp);
+ if (rc)
+ goto alloc_mem_err;
+ }
+
+ bnxt_init_ring_struct(bp);
+
+ rc = bnxt_alloc_rx_rings(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_tx_rings(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_cp_rings(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
+ BNXT_VNIC_UCAST_FLAG;
+ rc = bnxt_alloc_vnic_attributes(bp);
+ if (rc)
+ goto alloc_mem_err;
+ return 0;
+
+alloc_mem_err:
+ bnxt_free_mem(bp, true);
+ return rc;
+}
+
+void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
+ u16 cmpl_ring, u16 target_id)
+{
+ struct hwrm_cmd_req_hdr *req = request;
+
+ req->cmpl_ring_req_type =
+ cpu_to_le32(req_type | (cmpl_ring << HWRM_CMPL_RING_SFT));
+ req->target_id_seq_id = cpu_to_le32(target_id << HWRM_TARGET_FID_SFT);
+ req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
+}
+
+int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
+{
+ int i, intr_process, rc;
+ struct hwrm_cmd_req_hdr *req = msg;
+ u32 *data = msg;
+ __le32 *resp_len, *valid;
+ u16 cp_ring_id, len = 0;
+ struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
+
+ req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++);
+ memset(resp, 0, PAGE_SIZE);
+ cp_ring_id = (le32_to_cpu(req->cmpl_ring_req_type) &
+ HWRM_CMPL_RING_MASK) >>
+ HWRM_CMPL_RING_SFT;
+ intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
+
+ /* Write request msg to hwrm channel */
+ __iowrite32_copy(bp->bar0, data, msg_len / 4);
+
+ /* currently supports only one outstanding message */
+ if (intr_process)
+ bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) &
+ HWRM_SEQ_ID_MASK;
+
+ /* Ring channel doorbell */
+ writel(1, bp->bar0 + 0x100);
+
+ i = 0;
+ if (intr_process) {
+ /* Wait until hwrm response cmpl interrupt is processed */
+ while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
+ i++ < timeout) {
+ usleep_range(600, 800);
+ }
+
+ if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
+ netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
+ req->cmpl_ring_req_type);
+ return -1;
+ }
+ } else {
+ /* Check if response len is updated */
+ resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
+ for (i = 0; i < timeout; i++) {
+ len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
+ HWRM_RESP_LEN_SFT;
+ if (len)
+ break;
+ usleep_range(600, 800);
+ }
+
+ if (i >= timeout) {
+ netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
+ timeout, req->cmpl_ring_req_type,
+ req->target_id_seq_id, *resp_len);
+ return -1;
+ }
+
+ /* Last word of resp contains valid bit */
+ valid = bp->hwrm_cmd_resp_addr + len - 4;
+ for (i = 0; i < timeout; i++) {
+ if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
+ break;
+ usleep_range(600, 800);
+ }
+
+ if (i >= timeout) {
+ netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
+ timeout, req->cmpl_ring_req_type,
+ req->target_id_seq_id, len, *valid);
+ return -1;
+ }
+ }
+
+ rc = le16_to_cpu(resp->error_code);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
+ le16_to_cpu(resp->req_type),
+ le16_to_cpu(resp->seq_id), rc);
+ return rc;
+ }
+ return 0;
+}
+
+int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
+{
+ int rc;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, msg, msg_len, timeout);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
+{
+ struct hwrm_func_drv_rgtr_input req = {0};
+ int i;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
+
+ req.enables =
+ cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
+ FUNC_DRV_RGTR_REQ_ENABLES_VER |
+ FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
+
+ /* TODO: current async event fwd bits are not defined and the firmware
+ * only checks if it is non-zero to enable async event forwarding
+ */
+ req.async_event_fwd[0] |= cpu_to_le32(1);
+ req.os_type = cpu_to_le16(1);
+ req.ver_maj = DRV_VER_MAJ;
+ req.ver_min = DRV_VER_MIN;
+ req.ver_upd = DRV_VER_UPD;
+
+ if (BNXT_PF(bp)) {
+ unsigned long vf_req_snif_bmap[4];
+ u32 *data = (u32 *)vf_req_snif_bmap;
+
+ memset(vf_req_snif_bmap, 0, 32);
+ for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
+ __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
+
+ for (i = 0; i < 8; i++) {
+ req.vf_req_fwd[i] = cpu_to_le32(*data);
+ data++;
+ }
+ req.enables |=
+ cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
+ }
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
+{
+ u32 rc = 0;
+ struct hwrm_tunnel_dst_port_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
+ req.tunnel_type = tunnel_type;
+
+ switch (tunnel_type) {
+ case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
+ req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
+ break;
+ case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
+ req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
+ break;
+ default:
+ break;
+ }
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
+ rc);
+ return rc;
+}
+
+static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
+ u8 tunnel_type)
+{
+ u32 rc = 0;
+ struct hwrm_tunnel_dst_port_alloc_input req = {0};
+ struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
+
+ req.tunnel_type = tunnel_type;
+ req.tunnel_dst_port_val = port;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
+ rc);
+ goto err_out;
+ }
+
+ if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN)
+ bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
+
+ else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE)
+ bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
+err_out:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
+{
+ struct hwrm_cfa_l2_set_rx_mask_input req = {0};
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
+ req.dflt_vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+
+ req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+ req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ req.mask = cpu_to_le32(vnic->rx_mask);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr)
+{
+ struct hwrm_cfa_ntuple_filter_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
+ req.ntuple_filter_id = fltr->filter_id;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+#define BNXT_NTP_FLTR_FLAGS \
+ (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID)
+
+static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr)
+{
+ int rc = 0;
+ struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
+ struct hwrm_cfa_ntuple_filter_alloc_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ struct flow_keys *keys = &fltr->fkeys;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
+ req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0];
+
+ req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
+
+ req.ethertype = htons(ETH_P_IP);
+ memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
+ req.ipaddr_type = 4;
+ req.ip_protocol = keys->basic.ip_proto;
+
+ req.src_ipaddr[0] = keys->addrs.v4addrs.src;
+ req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+ req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+
+ req.src_port = keys->ports.src;
+ req.src_port_mask = cpu_to_be16(0xffff);
+ req.dst_port = keys->ports.dst;
+ req.dst_port_mask = cpu_to_be16(0xffff);
+
+ req.dst_vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ fltr->filter_id = resp->ntuple_filter_id;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+#endif
+
+static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
+ u8 *mac_addr)
+{
+ u32 rc = 0;
+ struct hwrm_cfa_l2_filter_alloc_input req = {0};
+ struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
+ req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX |
+ CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
+ req.dst_vnic_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
+ req.enables =
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
+ memcpy(req.l2_addr, mac_addr, ETH_ALEN);
+ req.l2_addr_mask[0] = 0xff;
+ req.l2_addr_mask[1] = 0xff;
+ req.l2_addr_mask[2] = 0xff;
+ req.l2_addr_mask[3] = 0xff;
+ req.l2_addr_mask[4] = 0xff;
+ req.l2_addr_mask[5] = 0xff;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
+ resp->l2_filter_id;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
+{
+ u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
+ int rc = 0;
+
+ /* Any associated ntuple filters will also be cleared by firmware. */
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < num_of_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ for (j = 0; j < vnic->uc_filter_count; j++) {
+ struct hwrm_cfa_l2_filter_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req,
+ HWRM_CFA_L2_FILTER_FREE, -1, -1);
+
+ req.l2_filter_id = vnic->fw_l2_filter_id[j];
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ }
+ vnic->uc_filter_count = 0;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ return rc;
+}
+
+static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ struct hwrm_vnic_tpa_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
+
+ if (tpa_flags) {
+ u16 mss = bp->dev->mtu - 40;
+ u32 nsegs, n, segs = 0, flags;
+
+ flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
+ VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
+ VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
+ VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
+ VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
+ if (tpa_flags & BNXT_FLAG_GRO)
+ flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
+
+ req.flags = cpu_to_le32(flags);
+
+ req.enables =
+ cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
+ VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS);
+
+ /* Number of segs are log2 units, and first packet is not
+ * included as part of this units.
+ */
+ if (mss <= PAGE_SIZE) {
+ n = PAGE_SIZE / mss;
+ nsegs = (MAX_SKB_FRAGS - 1) * n;
+ } else {
+ n = mss / PAGE_SIZE;
+ if (mss & (PAGE_SIZE - 1))
+ n++;
+ nsegs = (MAX_SKB_FRAGS - n) / n;
+ }
+
+ segs = ilog2(nsegs);
+ req.max_agg_segs = cpu_to_le16(segs);
+ req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
+ }
+ req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
+{
+ u32 i, j, max_rings;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ struct hwrm_vnic_rss_cfg_input req = {0};
+
+ if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
+ if (set_rss) {
+ vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 |
+ BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 |
+ BNXT_RSS_HASH_TYPE_FLAG_IPV6 |
+ BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6;
+
+ req.hash_type = cpu_to_le32(vnic->hash_type);
+
+ if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+ max_rings = bp->rx_nr_rings;
+ else
+ max_rings = 1;
+
+ /* Fill the RSS indirection table with ring group ids */
+ for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
+ if (j == max_rings)
+ j = 0;
+ vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
+ }
+
+ req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
+ req.hash_key_tbl_addr =
+ cpu_to_le64(vnic->rss_hash_key_dma_addr);
+ }
+ req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ struct hwrm_vnic_plcmodes_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
+ req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
+ VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+ VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+ req.enables =
+ cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
+ VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+ /* thresholds not implemented in firmware yet */
+ req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
+ req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+ req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id)
+{
+ struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
+ req.rss_cos_lb_ctx_id =
+ cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx);
+
+ hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+}
+
+static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID)
+ bnxt_hwrm_vnic_ctx_free_one(bp, i);
+ }
+ bp->rsscos_nr_ctxs = 0;
+}
+
+static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id)
+{
+ int rc;
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
+ bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
+ -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx =
+ le16_to_cpu(resp->rss_cos_lb_ctx_id);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ return rc;
+}
+
+static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
+{
+ int grp_idx = 0;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ struct hwrm_vnic_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
+ /* Only RSS support for now TBD: COS & LB */
+ req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP |
+ VNIC_CFG_REQ_ENABLES_RSS_RULE);
+ req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
+ req.cos_rule = cpu_to_le16(0xffff);
+ if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+ grp_idx = 0;
+ else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
+ grp_idx = vnic_id - 1;
+
+ req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
+
+ req.lb_rule = cpu_to_le16(0xffff);
+ req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
+ VLAN_HLEN);
+
+ if (bp->flags & BNXT_FLAG_STRIP_VLAN)
+ req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
+{
+ u32 rc = 0;
+
+ if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
+ struct hwrm_vnic_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
+ req.vnic_id =
+ cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return rc;
+ bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
+ }
+ return rc;
+}
+
+static void bnxt_hwrm_vnic_free(struct bnxt *bp)
+{
+ u16 i;
+
+ for (i = 0; i < bp->nr_vnics; i++)
+ bnxt_hwrm_vnic_free_one(bp, i);
+}
+
+static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, u16 start_grp_id,
+ u16 end_grp_id)
+{
+ u32 rc = 0, i, j;
+ struct hwrm_vnic_alloc_input req = {0};
+ struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ /* map ring groups to this vnic */
+ for (i = start_grp_id, j = 0; i < end_grp_id; i++, j++) {
+ if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) {
+ netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
+ j, (end_grp_id - start_grp_id));
+ break;
+ }
+ bp->vnic_info[vnic_id].fw_grp_ids[j] =
+ bp->grp_info[i].fw_grp_id;
+ }
+
+ bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+ if (vnic_id == 0)
+ req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
+{
+ u16 i;
+ u32 rc = 0;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct hwrm_ring_grp_alloc_input req = {0};
+ struct hwrm_ring_grp_alloc_output *resp =
+ bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
+
+ req.cr = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+ req.rr = cpu_to_le16(bp->grp_info[i].rx_fw_ring_id);
+ req.ar = cpu_to_le16(bp->grp_info[i].agg_fw_ring_id);
+ req.sc = cpu_to_le16(bp->grp_info[i].fw_stats_ctx);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+
+ bp->grp_info[i].fw_grp_id = le32_to_cpu(resp->ring_group_id);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
+{
+ u16 i;
+ u32 rc = 0;
+ struct hwrm_ring_grp_free_input req = {0};
+
+ if (!bp->grp_info)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
+ continue;
+ req.ring_group_id =
+ cpu_to_le32(bp->grp_info[i].fw_grp_id);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+ bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
+ struct bnxt_ring_struct *ring,
+ u32 ring_type, u32 map_index,
+ u32 stats_ctx_id)
+{
+ int rc = 0, err = 0;
+ struct hwrm_ring_alloc_input req = {0};
+ struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ u16 ring_id;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
+
+ req.enables = 0;
+ if (ring->nr_pages > 1) {
+ req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
+ /* Page size is in log2 units */
+ req.page_size = BNXT_PAGE_SHIFT;
+ req.page_tbl_depth = 1;
+ } else {
+ req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]);
+ }
+ req.fbo = 0;
+ /* Association of ring index with doorbell index and MSIX number */
+ req.logical_id = cpu_to_le16(map_index);
+
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
+ /* Association of transmit ring with completion ring */
+ req.cmpl_ring_id =
+ cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
+ req.length = cpu_to_le32(bp->tx_ring_mask + 1);
+ req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
+ req.queue_id = cpu_to_le16(ring->queue_id);
+ break;
+ case HWRM_RING_ALLOC_RX:
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req.length = cpu_to_le32(bp->rx_ring_mask + 1);
+ break;
+ case HWRM_RING_ALLOC_AGG:
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
+ req.length = cpu_to_le32(bp->cp_ring_mask + 1);
+ if (bp->flags & BNXT_FLAG_USING_MSIX)
+ req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ break;
+ default:
+ netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
+ ring_type);
+ return -1;
+ }
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ err = le16_to_cpu(resp->error_code);
+ ring_id = le16_to_cpu(resp->ring_id);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (rc || err) {
+ switch (ring_type) {
+ case RING_FREE_REQ_RING_TYPE_CMPL:
+ netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
+ rc, err);
+ return -1;
+
+ case RING_FREE_REQ_RING_TYPE_RX:
+ netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
+ rc, err);
+ return -1;
+
+ case RING_FREE_REQ_RING_TYPE_TX:
+ netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
+ rc, err);
+ return -1;
+
+ default:
+ netdev_err(bp->dev, "Invalid ring\n");
+ return -1;
+ }
+ }
+ ring->fw_ring_id = ring_id;
+ return rc;
+}
+
+static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
+{
+ int i, rc = 0;
+
+ if (bp->cp_nr_rings) {
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring,
+ HWRM_RING_ALLOC_CMPL, i,
+ INVALID_STATS_CTX_ID);
+ if (rc)
+ goto err_out;
+ cpr->cp_doorbell = bp->bar1 + i * 0x80;
+ BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
+ }
+ }
+
+ if (bp->tx_nr_rings) {
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ u16 fw_stats_ctx = bp->grp_info[i].fw_stats_ctx;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring,
+ HWRM_RING_ALLOC_TX, i,
+ fw_stats_ctx);
+ if (rc)
+ goto err_out;
+ txr->tx_doorbell = bp->bar1 + i * 0x80;
+ }
+ }
+
+ if (bp->rx_nr_rings) {
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring,
+ HWRM_RING_ALLOC_RX, i,
+ INVALID_STATS_CTX_ID);
+ if (rc)
+ goto err_out;
+ rxr->rx_doorbell = bp->bar1 + i * 0x80;
+ writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+ bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
+ }
+ }
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnxt_ring_struct *ring =
+ &rxr->rx_agg_ring_struct;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring,
+ HWRM_RING_ALLOC_AGG,
+ bp->rx_nr_rings + i,
+ INVALID_STATS_CTX_ID);
+ if (rc)
+ goto err_out;
+
+ rxr->rx_agg_doorbell =
+ bp->bar1 + (bp->rx_nr_rings + i) * 0x80;
+ writel(DB_KEY_RX | rxr->rx_agg_prod,
+ rxr->rx_agg_doorbell);
+ bp->grp_info[i].agg_fw_ring_id = ring->fw_ring_id;
+ }
+ }
+err_out:
+ return rc;
+}
+
+static int hwrm_ring_free_send_msg(struct bnxt *bp,
+ struct bnxt_ring_struct *ring,
+ u32 ring_type, int cmpl_ring_id)
+{
+ int rc;
+ struct hwrm_ring_free_input req = {0};
+ struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
+ u16 error_code;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1);
+ req.ring_type = ring_type;
+ req.ring_id = cpu_to_le16(ring->fw_ring_id);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ error_code = le16_to_cpu(resp->error_code);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (rc || error_code) {
+ switch (ring_type) {
+ case RING_FREE_REQ_RING_TYPE_CMPL:
+ netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
+ rc);
+ return rc;
+ case RING_FREE_REQ_RING_TYPE_RX:
+ netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
+ rc);
+ return rc;
+ case RING_FREE_REQ_RING_TYPE_TX:
+ netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
+ rc);
+ return rc;
+ default:
+ netdev_err(bp->dev, "Invalid ring\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
+{
+ int i, rc = 0;
+
+ if (!bp->bnapi)
+ return 0;
+
+ if (bp->tx_nr_rings) {
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(
+ bp, ring,
+ RING_FREE_REQ_RING_TYPE_TX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+ }
+ }
+
+ if (bp->rx_nr_rings) {
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+ u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(
+ bp, ring,
+ RING_FREE_REQ_RING_TYPE_RX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].rx_fw_ring_id =
+ INVALID_HW_RING_ID;
+ }
+ }
+ }
+
+ if (bp->rx_agg_nr_pages) {
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnxt_ring_struct *ring =
+ &rxr->rx_agg_ring_struct;
+ u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(
+ bp, ring,
+ RING_FREE_REQ_RING_TYPE_RX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].agg_fw_ring_id =
+ INVALID_HW_RING_ID;
+ }
+ }
+ }
+
+ if (bp->cp_nr_rings) {
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(
+ bp, ring,
+ RING_FREE_REQ_RING_TYPE_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].cp_fw_ring_id =
+ INVALID_HW_RING_ID;
+ }
+ }
+ }
+
+ return rc;
+}
+
+int bnxt_hwrm_set_coal(struct bnxt *bp)
+{
+ int i, rc = 0;
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
+ u16 max_buf, max_buf_irq;
+ u16 buf_tmr, buf_tmr_irq;
+ u32 flags;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
+ -1, -1);
+
+ /* Each rx completion (2 records) should be DMAed immediately */
+ max_buf = min_t(u16, bp->coal_bufs / 4, 2);
+ /* max_buf must not be zero */
+ max_buf = clamp_t(u16, max_buf, 1, 63);
+ max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63);
+ buf_tmr = max_t(u16, bp->coal_ticks / 4, 1);
+ buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1);
+
+ flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+
+ /* RING_IDLE generates more IRQs for lower latency. Enable it only
+ * if coal_ticks is less than 25 us.
+ */
+ if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25)
+ flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
+
+ req.flags = cpu_to_le16(flags);
+ req.num_cmpl_dma_aggr = cpu_to_le16(max_buf);
+ req.num_cmpl_dma_aggr_during_int = cpu_to_le16(max_buf_irq);
+ req.cmpl_aggr_dma_tmr = cpu_to_le16(buf_tmr);
+ req.cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmr_irq);
+ req.int_lat_tmr_min = cpu_to_le16(buf_tmr);
+ req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks);
+ req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
+{
+ int rc = 0, i;
+ struct hwrm_stat_ctx_free_input req = {0};
+
+ if (!bp->bnapi)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
+ req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+
+ cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+ }
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
+{
+ int rc = 0, i;
+ struct hwrm_stat_ctx_alloc_input req = {0};
+ struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
+
+ req.update_period_ms = cpu_to_le32(1000);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+
+ cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
+
+ bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return 0;
+}
+
+static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_func_qcaps_input req = {0};
+ struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto hwrm_func_qcaps_exit;
+
+ if (BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ pf->fw_fid = le16_to_cpu(resp->fid);
+ pf->port_id = le16_to_cpu(resp->port_id);
+ memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+ pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+ pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+ pf->max_pf_tx_rings = pf->max_tx_rings;
+ pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ pf->max_pf_rx_rings = pf->max_rx_rings;
+ pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+ pf->max_vnics = le16_to_cpu(resp->max_vnics);
+ pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+ pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
+ pf->max_vfs = le16_to_cpu(resp->max_vfs);
+ pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
+ pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
+ pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
+ pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
+ pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
+ pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+ } else {
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ vf->fw_fid = le16_to_cpu(resp->fid);
+ memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+ if (!is_valid_ether_addr(vf->mac_addr))
+ random_ether_addr(vf->mac_addr);
+
+ vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+ vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+ vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+ vf->max_vnics = le16_to_cpu(resp->max_vnics);
+ vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+ }
+
+ bp->tx_push_thresh = 0;
+ if (resp->flags &
+ cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
+ bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
+
+hwrm_func_qcaps_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_func_reset(struct bnxt *bp)
+{
+ struct hwrm_func_reset_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
+ req.enables = 0;
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
+}
+
+static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_queue_qportcfg_input req = {0};
+ struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ u8 i, *qptr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto qportcfg_exit;
+
+ if (!resp->max_configurable_queues) {
+ rc = -EINVAL;
+ goto qportcfg_exit;
+ }
+ bp->max_tc = resp->max_configurable_queues;
+ if (bp->max_tc > BNXT_MAX_QUEUE)
+ bp->max_tc = BNXT_MAX_QUEUE;
+
+ qptr = &resp->queue_id0;
+ for (i = 0; i < bp->max_tc; i++) {
+ bp->q_info[i].queue_id = *qptr++;
+ bp->q_info[i].queue_profile = *qptr++;
+ }
+
+qportcfg_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_ver_get(struct bnxt *bp)
+{
+ int rc;
+ struct hwrm_ver_get_input req = {0};
+ struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
+ req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
+ req.hwrm_intf_min = HWRM_VERSION_MINOR;
+ req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto hwrm_ver_get_exit;
+
+ memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
+
+ if (req.hwrm_intf_maj != resp->hwrm_intf_maj ||
+ req.hwrm_intf_min != resp->hwrm_intf_min ||
+ req.hwrm_intf_upd != resp->hwrm_intf_upd) {
+ netdev_warn(bp->dev, "HWRM interface %d.%d.%d does not match driver interface %d.%d.%d.\n",
+ resp->hwrm_intf_maj, resp->hwrm_intf_min,
+ resp->hwrm_intf_upd, req.hwrm_intf_maj,
+ req.hwrm_intf_min, req.hwrm_intf_upd);
+ netdev_warn(bp->dev, "Please update driver or firmware with matching interface versions.\n");
+ }
+ snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d",
+ resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
+ resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
+
+hwrm_ver_get_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
+{
+ if (bp->vxlan_port_cnt) {
+ bnxt_hwrm_tunnel_dst_port_free(
+ bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+ }
+ bp->vxlan_port_cnt = 0;
+ if (bp->nge_port_cnt) {
+ bnxt_hwrm_tunnel_dst_port_free(
+ bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
+ }
+ bp->nge_port_cnt = 0;
+}
+
+static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
+{
+ int rc, i;
+ u32 tpa_flags = 0;
+
+ if (set_tpa)
+ tpa_flags = bp->flags & BNXT_FLAG_TPA;
+ for (i = 0; i < bp->nr_vnics; i++) {
+ rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
+ rc, i);
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->nr_vnics; i++)
+ bnxt_hwrm_vnic_set_rss(bp, i, false);
+}
+
+static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
+ bool irq_re_init)
+{
+ if (bp->vnic_info) {
+ bnxt_hwrm_clear_vnic_filter(bp);
+ /* clear all RSS setting before free vnic ctx */
+ bnxt_hwrm_clear_vnic_rss(bp);
+ bnxt_hwrm_vnic_ctx_free(bp);
+ /* before free the vnic, undo the vnic tpa settings */
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_set_tpa(bp, false);
+ bnxt_hwrm_vnic_free(bp);
+ }
+ bnxt_hwrm_ring_free(bp, close_path);
+ bnxt_hwrm_ring_grp_free(bp);
+ if (irq_re_init) {
+ bnxt_hwrm_stat_ctx_free(bp);
+ bnxt_hwrm_free_tunnel_ports(bp);
+ }
+}
+
+static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+{
+ int rc;
+
+ /* allocate context for vnic */
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+ vnic_id, rc);
+ goto vnic_setup_err;
+ }
+ bp->rsscos_nr_ctxs++;
+
+ /* configure default vnic, ring grp */
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
+ vnic_id, rc);
+ goto vnic_setup_err;
+ }
+
+ /* Enable RSS hashing on vnic */
+ rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
+ vnic_id, rc);
+ goto vnic_setup_err;
+ }
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
+ vnic_id, rc);
+ }
+ }
+
+vnic_setup_err:
+ return rc;
+}
+
+static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
+{
+#ifdef CONFIG_RFS_ACCEL
+ int i, rc = 0;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ u16 vnic_id = i + 1;
+ u16 ring_id = i;
+
+ if (vnic_id >= bp->nr_vnics)
+ break;
+
+ bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, ring_id + 1);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+ vnic_id, rc);
+ break;
+ }
+ rc = bnxt_setup_vnic(bp, vnic_id);
+ if (rc)
+ break;
+ }
+ return rc;
+#else
+ return 0;
+#endif
+}
+
+static void bnxt_update_vf_mac(struct bnxt *bp)
+{
+ struct hwrm_func_qcaps_input req = {0};
+ struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
+ goto update_vf_mac_exit;
+
+ if (!is_valid_ether_addr(resp->perm_mac_address))
+ goto update_vf_mac_exit;
+
+ if (ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
+ goto update_vf_mac_exit;
+
+ memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
+ memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+update_vf_mac_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+}
+
+static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
+{
+ int rc = 0;
+
+ if (irq_re_init) {
+ rc = bnxt_hwrm_stat_ctx_alloc(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
+ rc);
+ goto err_out;
+ }
+ }
+
+ rc = bnxt_hwrm_ring_alloc(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ rc = bnxt_hwrm_ring_grp_alloc(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
+ goto err_out;
+ }
+
+ /* default vnic 0 */
+ rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ rc = bnxt_setup_vnic(bp, 0);
+ if (rc)
+ goto err_out;
+
+ if (bp->flags & BNXT_FLAG_RFS) {
+ rc = bnxt_alloc_rfs_vnics(bp);
+ if (rc)
+ goto err_out;
+ }
+
+ if (bp->flags & BNXT_FLAG_TPA) {
+ rc = bnxt_set_tpa(bp, true);
+ if (rc)
+ goto err_out;
+ }
+
+ if (BNXT_VF(bp))
+ bnxt_update_vf_mac(bp);
+
+ /* Filter for default vnic 0 */
+ rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
+ if (rc) {
+ netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
+ goto err_out;
+ }
+ bp->vnic_info[0].uc_filter_count = 1;
+
+ bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST |
+ CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+
+ if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
+ bp->vnic_info[0].rx_mask |=
+ CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+ rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+ if (rc) {
+ netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ rc = bnxt_hwrm_set_coal(bp);
+ if (rc)
+ netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
+ rc);
+
+ return 0;
+
+err_out:
+ bnxt_hwrm_resource_free(bp, 0, true);
+
+ return rc;
+}
+
+static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
+{
+ bnxt_hwrm_resource_free(bp, 1, irq_re_init);
+ return 0;
+}
+
+static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
+{
+ bnxt_init_rx_rings(bp);
+ bnxt_init_tx_rings(bp);
+ bnxt_init_ring_grps(bp, irq_re_init);
+ bnxt_init_vnics(bp);
+
+ return bnxt_init_chip(bp, irq_re_init);
+}
+
+static void bnxt_disable_int(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ }
+}
+
+static void bnxt_enable_int(struct bnxt *bp)
+{
+ int i;
+
+ atomic_set(&bp->intr_sem, 0);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ }
+}
+
+static int bnxt_set_real_num_queues(struct bnxt *bp)
+{
+ int rc;
+ struct net_device *dev = bp->dev;
+
+ rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
+ if (rc)
+ return rc;
+
+ rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
+ if (rc)
+ return rc;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (bp->rx_nr_rings)
+ dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
+ if (!dev->rx_cpu_rmap)
+ rc = -ENOMEM;
+#endif
+
+ return rc;
+}
+
+static int bnxt_setup_msix(struct bnxt *bp)
+{
+ struct msix_entry *msix_ent;
+ struct net_device *dev = bp->dev;
+ int i, total_vecs, rc = 0;
+ const int len = sizeof(bp->irq_tbl[0].name);
+
+ bp->flags &= ~BNXT_FLAG_USING_MSIX;
+ total_vecs = bp->cp_nr_rings;
+
+ msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
+ if (!msix_ent)
+ return -ENOMEM;
+
+ for (i = 0; i < total_vecs; i++) {
+ msix_ent[i].entry = i;
+ msix_ent[i].vector = 0;
+ }
+
+ total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, 1, total_vecs);
+ if (total_vecs < 0) {
+ rc = -ENODEV;
+ goto msix_setup_exit;
+ }
+
+ bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
+ if (bp->irq_tbl) {
+ int tcs;
+
+ /* Trim rings based upon num of vectors allocated */
+ bp->rx_nr_rings = min_t(int, total_vecs, bp->rx_nr_rings);
+ bp->tx_nr_rings = min_t(int, total_vecs, bp->tx_nr_rings);
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ tcs = netdev_get_num_tc(dev);
+ if (tcs > 1) {
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
+ if (bp->tx_nr_rings_per_tc == 0) {
+ netdev_reset_tc(dev);
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ } else {
+ int i, off, count;
+
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
+ for (i = 0; i < tcs; i++) {
+ count = bp->tx_nr_rings_per_tc;
+ off = i * count;
+ netdev_set_tc_queue(dev, i, count, off);
+ }
+ }
+ }
+ bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ bp->irq_tbl[i].vector = msix_ent[i].vector;
+ snprintf(bp->irq_tbl[i].name, len,
+ "%s-%s-%d", dev->name, "TxRx", i);
+ bp->irq_tbl[i].handler = bnxt_msix;
+ }
+ rc = bnxt_set_real_num_queues(bp);
+ if (rc)
+ goto msix_setup_exit;
+ } else {
+ rc = -ENOMEM;
+ goto msix_setup_exit;
+ }
+ bp->flags |= BNXT_FLAG_USING_MSIX;
+ kfree(msix_ent);
+ return 0;
+
+msix_setup_exit:
+ netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc);
+ pci_disable_msix(bp->pdev);
+ kfree(msix_ent);
+ return rc;
+}
+
+static int bnxt_setup_inta(struct bnxt *bp)
+{
+ int rc;
+ const int len = sizeof(bp->irq_tbl[0].name);
+
+ if (netdev_get_num_tc(bp->dev))
+ netdev_reset_tc(bp->dev);
+
+ bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
+ if (!bp->irq_tbl) {
+ rc = -ENOMEM;
+ return rc;
+ }
+ bp->rx_nr_rings = 1;
+ bp->tx_nr_rings = 1;
+ bp->cp_nr_rings = 1;
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->irq_tbl[0].vector = bp->pdev->irq;
+ snprintf(bp->irq_tbl[0].name, len,
+ "%s-%s-%d", bp->dev->name, "TxRx", 0);
+ bp->irq_tbl[0].handler = bnxt_inta;
+ rc = bnxt_set_real_num_queues(bp);
+ return rc;
+}
+
+static int bnxt_setup_int_mode(struct bnxt *bp)
+{
+ int rc = 0;
+
+ if (bp->flags & BNXT_FLAG_MSIX_CAP)
+ rc = bnxt_setup_msix(bp);
+
+ if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+ /* fallback to INTA */
+ rc = bnxt_setup_inta(bp);
+ }
+ return rc;
+}
+
+static void bnxt_free_irq(struct bnxt *bp)
+{
+ struct bnxt_irq *irq;
+ int i;
+
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
+ bp->dev->rx_cpu_rmap = NULL;
+#endif
+ if (!bp->irq_tbl)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ irq = &bp->irq_tbl[i];
+ if (irq->requested)
+ free_irq(irq->vector, bp->bnapi[i]);
+ irq->requested = 0;
+ }
+ if (bp->flags & BNXT_FLAG_USING_MSIX)
+ pci_disable_msix(bp->pdev);
+ kfree(bp->irq_tbl);
+ bp->irq_tbl = NULL;
+}
+
+static int bnxt_request_irq(struct bnxt *bp)
+{
+ int i, rc = 0;
+ unsigned long flags = 0;
+#ifdef CONFIG_RFS_ACCEL
+ struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
+#endif
+
+ if (!(bp->flags & BNXT_FLAG_USING_MSIX))
+ flags = IRQF_SHARED;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_irq *irq = &bp->irq_tbl[i];
+#ifdef CONFIG_RFS_ACCEL
+ if (rmap && (i < bp->rx_nr_rings)) {
+ rc = irq_cpu_rmap_add(rmap, irq->vector);
+ if (rc)
+ netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
+ i);
+ }
+#endif
+ rc = request_irq(irq->vector, irq->handler, flags, irq->name,
+ bp->bnapi[i]);
+ if (rc)
+ break;
+
+ irq->requested = 1;
+ }
+ return rc;
+}
+
+static void bnxt_del_napi(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+
+ napi_hash_del(&bnapi->napi);
+ netif_napi_del(&bnapi->napi);
+ }
+}
+
+static void bnxt_init_napi(struct bnxt *bp)
+{
+ int i;
+ struct bnxt_napi *bnapi;
+
+ if (bp->flags & BNXT_FLAG_USING_MSIX) {
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ bnapi = bp->bnapi[i];
+ netif_napi_add(bp->dev, &bnapi->napi,
+ bnxt_poll, 64);
+ napi_hash_add(&bnapi->napi);
+ }
+ } else {
+ bnapi = bp->bnapi[0];
+ netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
+ napi_hash_add(&bnapi->napi);
+ }
+}
+
+static void bnxt_disable_napi(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ napi_disable(&bp->bnapi[i]->napi);
+ bnxt_disable_poll(bp->bnapi[i]);
+ }
+}
+
+static void bnxt_enable_napi(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ bnxt_enable_poll(bp->bnapi[i]);
+ napi_enable(&bp->bnapi[i]->napi);
+ }
+}
+
+static void bnxt_tx_disable(struct bnxt *bp)
+{
+ int i;
+ struct bnxt_napi *bnapi;
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+
+ if (bp->bnapi) {
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ bnapi = bp->bnapi[i];
+ txr = &bnapi->tx_ring;
+ txq = netdev_get_tx_queue(bp->dev, i);
+ __netif_tx_lock(txq, smp_processor_id());
+ txr->dev_state = BNXT_DEV_STATE_CLOSING;
+ __netif_tx_unlock(txq);
+ }
+ }
+ /* Stop all TX queues */
+ netif_tx_disable(bp->dev);
+ netif_carrier_off(bp->dev);
+}
+
+static void bnxt_tx_enable(struct bnxt *bp)
+{
+ int i;
+ struct bnxt_napi *bnapi;
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ bnapi = bp->bnapi[i];
+ txr = &bnapi->tx_ring;
+ txq = netdev_get_tx_queue(bp->dev, i);
+ txr->dev_state = 0;
+ }
+ netif_tx_wake_all_queues(bp->dev);
+ if (bp->link_info.link_up)
+ netif_carrier_on(bp->dev);
+}
+
+static void bnxt_report_link(struct bnxt *bp)
+{
+ if (bp->link_info.link_up) {
+ const char *duplex;
+ const char *flow_ctrl;
+ u16 speed;
+
+ netif_carrier_on(bp->dev);
+ if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
+ duplex = "full";
+ else
+ duplex = "half";
+ if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
+ flow_ctrl = "ON - receive & transmit";
+ else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
+ flow_ctrl = "ON - transmit";
+ else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
+ flow_ctrl = "ON - receive";
+ else
+ flow_ctrl = "none";
+ speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+ netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
+ speed, duplex, flow_ctrl);
+ } else {
+ netif_carrier_off(bp->dev);
+ netdev_err(bp->dev, "NIC Link is Down\n");
+ }
+}
+
+static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
+{
+ int rc = 0;
+ struct bnxt_link_info *link_info = &bp->link_info;
+ struct hwrm_port_phy_qcfg_input req = {0};
+ struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ u8 link_up = link_info->link_up;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+ }
+
+ memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
+ link_info->phy_link_status = resp->link;
+ link_info->duplex = resp->duplex;
+ link_info->pause = resp->pause;
+ link_info->auto_mode = resp->auto_mode;
+ link_info->auto_pause_setting = resp->auto_pause;
+ link_info->force_pause_setting = resp->force_pause;
+ link_info->duplex_setting = resp->duplex_setting;
+ if (link_info->phy_link_status == BNXT_LINK_LINK)
+ link_info->link_speed = le16_to_cpu(resp->link_speed);
+ else
+ link_info->link_speed = 0;
+ link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
+ link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed);
+ link_info->support_speeds = le16_to_cpu(resp->support_speeds);
+ link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
+ link_info->preemphasis = le32_to_cpu(resp->preemphasis);
+ link_info->phy_ver[0] = resp->phy_maj;
+ link_info->phy_ver[1] = resp->phy_min;
+ link_info->phy_ver[2] = resp->phy_bld;
+ link_info->media_type = resp->media_type;
+ link_info->transceiver = resp->transceiver_type;
+ link_info->phy_addr = resp->phy_addr;
+
+ /* TODO: need to add more logic to report VF link */
+ if (chng_link_state) {
+ if (link_info->phy_link_status == BNXT_LINK_LINK)
+ link_info->link_up = 1;
+ else
+ link_info->link_up = 0;
+ if (link_up != link_info->link_up)
+ bnxt_report_link(bp);
+ } else {
+ /* alwasy link down if not require to update link state */
+ link_info->link_up = 0;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return 0;
+}
+
+static void
+bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
+{
+ if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
+ if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
+ req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
+ if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
+ req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
+ req->enables |=
+ cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
+ } else {
+ if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
+ req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
+ if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
+ req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
+ req->enables |=
+ cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
+ }
+}
+
+static void bnxt_hwrm_set_link_common(struct bnxt *bp,
+ struct hwrm_port_phy_cfg_input *req)
+{
+ u8 autoneg = bp->link_info.autoneg;
+ u16 fw_link_speed = bp->link_info.req_link_speed;
+ u32 advertising = bp->link_info.advertising;
+
+ if (autoneg & BNXT_AUTONEG_SPEED) {
+ req->auto_mode |=
+ PORT_PHY_CFG_REQ_AUTO_MODE_MASK;
+
+ req->enables |= cpu_to_le32(
+ PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
+ req->auto_link_speed_mask = cpu_to_le16(advertising);
+
+ req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
+ req->flags |=
+ cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
+ } else {
+ req->force_link_speed = cpu_to_le16(fw_link_speed);
+ req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
+ }
+
+ /* currently don't support half duplex */
+ req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL;
+ req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX);
+ /* tell chimp that the setting takes effect immediately */
+ req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
+}
+
+int bnxt_hwrm_set_pause(struct bnxt *bp)
+{
+ struct hwrm_port_phy_cfg_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+ bnxt_hwrm_set_pause_common(bp, &req);
+
+ if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
+ bp->link_info.force_link_chng)
+ bnxt_hwrm_set_link_common(bp, &req);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
+ /* since changing of pause setting doesn't trigger any link
+ * change event, the driver needs to update the current pause
+ * result upon successfully return of the phy_cfg command
+ */
+ bp->link_info.pause =
+ bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
+ bp->link_info.auto_pause_setting = 0;
+ if (!bp->link_info.force_link_chng)
+ bnxt_report_link(bp);
+ }
+ bp->link_info.force_link_chng = false;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause)
+{
+ struct hwrm_port_phy_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+ if (set_pause)
+ bnxt_hwrm_set_pause_common(bp, &req);
+
+ bnxt_hwrm_set_link_common(bp, &req);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_update_phy_setting(struct bnxt *bp)
+{
+ int rc;
+ bool update_link = false;
+ bool update_pause = false;
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ rc = bnxt_update_link(bp, true);
+ if (rc) {
+ netdev_err(bp->dev, "failed to update link (rc: %x)\n",
+ rc);
+ return rc;
+ }
+ if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
+ link_info->auto_pause_setting != link_info->req_flow_ctrl)
+ update_pause = true;
+ if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
+ link_info->force_pause_setting != link_info->req_flow_ctrl)
+ update_pause = true;
+ if (link_info->req_duplex != link_info->duplex_setting)
+ update_link = true;
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+ if (BNXT_AUTO_MODE(link_info->auto_mode))
+ update_link = true;
+ if (link_info->req_link_speed != link_info->force_link_speed)
+ update_link = true;
+ } else {
+ if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
+ update_link = true;
+ if (link_info->advertising != link_info->auto_link_speeds)
+ update_link = true;
+ if (link_info->req_link_speed != link_info->auto_link_speed)
+ update_link = true;
+ }
+
+ if (update_link)
+ rc = bnxt_hwrm_set_link_setting(bp, update_pause);
+ else if (update_pause)
+ rc = bnxt_hwrm_set_pause(bp);
+ if (rc) {
+ netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
+ rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+ int rc = 0;
+
+ netif_carrier_off(bp->dev);
+ if (irq_re_init) {
+ rc = bnxt_setup_int_mode(bp);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
+ rc);
+ return rc;
+ }
+ }
+ if ((bp->flags & BNXT_FLAG_RFS) &&
+ !(bp->flags & BNXT_FLAG_USING_MSIX)) {
+ /* disable RFS if falling back to INTA */
+ bp->dev->hw_features &= ~NETIF_F_NTUPLE;
+ bp->flags &= ~BNXT_FLAG_RFS;
+ }
+
+ rc = bnxt_alloc_mem(bp, irq_re_init);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
+ goto open_err_free_mem;
+ }
+
+ if (irq_re_init) {
+ bnxt_init_napi(bp);
+ rc = bnxt_request_irq(bp);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
+ goto open_err;
+ }
+ }
+
+ bnxt_enable_napi(bp);
+
+ rc = bnxt_init_nic(bp, irq_re_init);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
+ goto open_err;
+ }
+
+ if (link_re_init) {
+ rc = bnxt_update_phy_setting(bp);
+ if (rc)
+ goto open_err;
+ }
+
+ if (irq_re_init) {
+#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
+ vxlan_get_rx_port(bp->dev);
+#endif
+ if (!bnxt_hwrm_tunnel_dst_port_alloc(
+ bp, htons(0x17c1),
+ TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE))
+ bp->nge_port_cnt = 1;
+ }
+
+ bp->state = BNXT_STATE_OPEN;
+ bnxt_enable_int(bp);
+ /* Enable TX queues */
+ bnxt_tx_enable(bp);
+ mod_timer(&bp->timer, jiffies + bp->current_interval);
+
+ return 0;
+
+open_err:
+ bnxt_disable_napi(bp);
+ bnxt_del_napi(bp);
+
+open_err_free_mem:
+ bnxt_free_skbs(bp);
+ bnxt_free_irq(bp);
+ bnxt_free_mem(bp, true);
+ return rc;
+}
+
+/* rtnl_lock held */
+int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+ int rc = 0;
+
+ rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
+ if (rc) {
+ netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
+ dev_close(bp->dev);
+ }
+ return rc;
+}
+
+static int bnxt_open(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
+ rc);
+ rc = -1;
+ return rc;
+ }
+ return __bnxt_open_nic(bp, true, true);
+}
+
+static void bnxt_disable_int_sync(struct bnxt *bp)
+{
+ int i;
+
+ atomic_inc(&bp->intr_sem);
+ if (!netif_running(bp->dev))
+ return;
+
+ bnxt_disable_int(bp);
+ for (i = 0; i < bp->cp_nr_rings; i++)
+ synchronize_irq(bp->irq_tbl[i].vector);
+}
+
+int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+ int rc = 0;
+
+#ifdef CONFIG_BNXT_SRIOV
+ if (bp->sriov_cfg) {
+ rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
+ !bp->sriov_cfg,
+ BNXT_SRIOV_CFG_WAIT_TMO);
+ if (rc)
+ netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
+ }
+#endif
+ /* Change device state to avoid TX queue wake up's */
+ bnxt_tx_disable(bp);
+
+ bp->state = BNXT_STATE_CLOSED;
+ cancel_work_sync(&bp->sp_task);
+
+ /* Flush rings before disabling interrupts */
+ bnxt_shutdown_nic(bp, irq_re_init);
+
+ /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
+
+ bnxt_disable_napi(bp);
+ bnxt_disable_int_sync(bp);
+ del_timer_sync(&bp->timer);
+ bnxt_free_skbs(bp);
+
+ if (irq_re_init) {
+ bnxt_free_irq(bp);
+ bnxt_del_napi(bp);
+ }
+ bnxt_free_mem(bp, irq_re_init);
+ return rc;
+}
+
+static int bnxt_close(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ bnxt_close_nic(bp, true, true);
+ return 0;
+}
+
+/* rtnl_lock held */
+static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ /* fallthru */
+ case SIOCGMIIREG: {
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ return 0;
+ }
+
+ case SIOCSMIIREG:
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ return 0;
+
+ default:
+ /* do nothing */
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static struct rtnl_link_stats64 *
+bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ u32 i;
+ struct bnxt *bp = netdev_priv(dev);
+
+ memset(stats, 0, sizeof(struct rtnl_link_stats64));
+
+ if (!bp->bnapi)
+ return stats;
+
+ /* TODO check if we need to synchronize with bnxt_close path */
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct ctx_hw_stats *hw_stats = cpr->hw_stats;
+
+ stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
+ stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
+ stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
+
+ stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
+ stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
+ stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
+
+ stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
+ stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
+ stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
+
+ stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
+ stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
+ stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
+
+ stats->rx_missed_errors +=
+ le64_to_cpu(hw_stats->rx_discard_pkts);
+
+ stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
+
+ stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts);
+
+ stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
+ }
+
+ return stats;
+}
+
+static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
+{
+ struct net_device *dev = bp->dev;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct netdev_hw_addr *ha;
+ u8 *haddr;
+ int mc_count = 0;
+ bool update = false;
+ int off = 0;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ if (mc_count >= BNXT_MAX_MC_ADDRS) {
+ *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ return false;
+ }
+ haddr = ha->addr;
+ if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
+ memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
+ update = true;
+ }
+ off += ETH_ALEN;
+ mc_count++;
+ }
+ if (mc_count)
+ *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
+
+ if (mc_count != vnic->mc_list_count) {
+ vnic->mc_list_count = mc_count;
+ update = true;
+ }
+ return update;
+}
+
+static bool bnxt_uc_list_updated(struct bnxt *bp)
+{
+ struct net_device *dev = bp->dev;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct netdev_hw_addr *ha;
+ int off = 0;
+
+ if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
+ return true;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
+ return true;
+
+ off += ETH_ALEN;
+ }
+ return false;
+}
+
+static void bnxt_set_rx_mode(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ u32 mask = vnic->rx_mask;
+ bool mc_update = false;
+ bool uc_update;
+
+ if (!netif_running(dev))
+ return;
+
+ mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
+ CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
+ CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
+
+ /* Only allow PF to be in promiscuous mode */
+ if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp))
+ mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+ uc_update = bnxt_uc_list_updated(bp);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ } else {
+ mc_update = bnxt_mc_list_updated(bp, &mask);
+ }
+
+ if (mask != vnic->rx_mask || uc_update || mc_update) {
+ vnic->rx_mask = mask;
+
+ set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ }
+}
+
+static void bnxt_cfg_rx_mode(struct bnxt *bp)
+{
+ struct net_device *dev = bp->dev;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct netdev_hw_addr *ha;
+ int i, off = 0, rc;
+ bool uc_update;
+
+ netif_addr_lock_bh(dev);
+ uc_update = bnxt_uc_list_updated(bp);
+ netif_addr_unlock_bh(dev);
+
+ if (!uc_update)
+ goto skip_uc;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 1; i < vnic->uc_filter_count; i++) {
+ struct hwrm_cfa_l2_filter_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
+ -1);
+
+ req.l2_filter_id = vnic->fw_l2_filter_id[i];
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ vnic->uc_filter_count = 1;
+
+ netif_addr_lock_bh(dev);
+ if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+ } else {
+ netdev_for_each_uc_addr(ha, dev) {
+ memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
+ off += ETH_ALEN;
+ vnic->uc_filter_count++;
+ }
+ }
+ netif_addr_unlock_bh(dev);
+
+ for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
+ rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
+ if (rc) {
+ netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
+ rc);
+ vnic->uc_filter_count = i;
+ }
+ }
+
+skip_uc:
+ rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+ if (rc)
+ netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
+ rc);
+}
+
+static netdev_features_t bnxt_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ return features;
+}
+
+static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u32 flags = bp->flags;
+ u32 changes;
+ int rc = 0;
+ bool re_init = false;
+ bool update_tpa = false;
+
+ flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
+ if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0))
+ flags |= BNXT_FLAG_GRO;
+ if (features & NETIF_F_LRO)
+ flags |= BNXT_FLAG_LRO;
+
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ flags |= BNXT_FLAG_STRIP_VLAN;
+
+ if (features & NETIF_F_NTUPLE)
+ flags |= BNXT_FLAG_RFS;
+
+ changes = flags ^ bp->flags;
+ if (changes & BNXT_FLAG_TPA) {
+ update_tpa = true;
+ if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
+ (flags & BNXT_FLAG_TPA) == 0)
+ re_init = true;
+ }
+
+ if (changes & ~BNXT_FLAG_TPA)
+ re_init = true;
+
+ if (flags != bp->flags) {
+ u32 old_flags = bp->flags;
+
+ bp->flags = flags;
+
+ if (!netif_running(dev)) {
+ if (update_tpa)
+ bnxt_set_ring_params(bp);
+ return rc;
+ }
+
+ if (re_init) {
+ bnxt_close_nic(bp, false, false);
+ if (update_tpa)
+ bnxt_set_ring_params(bp);
+
+ return bnxt_open_nic(bp, false, false);
+ }
+ if (update_tpa) {
+ rc = bnxt_set_tpa(bp,
+ (flags & BNXT_FLAG_TPA) ?
+ true : false);
+ if (rc)
+ bp->flags = old_flags;
+ }
+ }
+ return rc;
+}
+
+static void bnxt_dbg_dump_states(struct bnxt *bp)
+{
+ int i;
+ struct bnxt_napi *bnapi;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_cp_ring_info *cpr;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ bnapi = bp->bnapi[i];
+ txr = &bnapi->tx_ring;
+ rxr = &bnapi->rx_ring;
+ cpr = &bnapi->cp_ring;
+ if (netif_msg_drv(bp)) {
+ netdev_info(bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
+ i, txr->tx_ring_struct.fw_ring_id,
+ txr->tx_prod, txr->tx_cons);
+ netdev_info(bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
+ i, rxr->rx_ring_struct.fw_ring_id,
+ rxr->rx_prod,
+ rxr->rx_agg_ring_struct.fw_ring_id,
+ rxr->rx_agg_prod, rxr->rx_sw_agg_prod);
+ netdev_info(bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
+ i, cpr->cp_ring_struct.fw_ring_id,
+ cpr->cp_raw_cons);
+ }
+ }
+}
+
+static void bnxt_reset_task(struct bnxt *bp)
+{
+ bnxt_dbg_dump_states(bp);
+ if (netif_running(bp->dev))
+ bnxt_tx_disable(bp); /* prevent tx timout again */
+}
+
+static void bnxt_tx_timeout(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
+ set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bnxt_poll_controller(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_irq *irq = &bp->irq_tbl[i];
+
+ disable_irq(irq->vector);
+ irq->handler(irq->vector, bp->bnapi[i]);
+ enable_irq(irq->vector);
+ }
+}
+#endif
+
+static void bnxt_timer(unsigned long data)
+{
+ struct bnxt *bp = (struct bnxt *)data;
+ struct net_device *dev = bp->dev;
+
+ if (!netif_running(dev))
+ return;
+
+ if (atomic_read(&bp->intr_sem) != 0)
+ goto bnxt_restart_timer;
+
+bnxt_restart_timer:
+ mod_timer(&bp->timer, jiffies + bp->current_interval);
+}
+
+static void bnxt_cfg_ntp_filters(struct bnxt *);
+
+static void bnxt_sp_task(struct work_struct *work)
+{
+ struct bnxt *bp = container_of(work, struct bnxt, sp_task);
+ int rc;
+
+ if (bp->state != BNXT_STATE_OPEN)
+ return;
+
+ if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
+ bnxt_cfg_rx_mode(bp);
+
+ if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
+ bnxt_cfg_ntp_filters(bp);
+ if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+ rc = bnxt_update_link(bp, true);
+ if (rc)
+ netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
+ rc);
+ }
+ if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
+ bnxt_hwrm_exec_fwd_req(bp);
+ if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
+ bnxt_hwrm_tunnel_dst_port_alloc(
+ bp, bp->vxlan_port,
+ TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+ }
+ if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
+ bnxt_hwrm_tunnel_dst_port_free(
+ bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+ }
+ if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
+ bnxt_reset_task(bp);
+}
+
+static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
+{
+ int rc;
+ struct bnxt *bp = netdev_priv(dev);
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* enable device (incl. PCI PM wakeup), and bus-mastering */
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+ goto init_err;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev,
+ "Cannot find PCI device base address, aborting\n");
+ rc = -ENODEV;
+ goto init_err_disable;
+ }
+
+ rc = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+ goto init_err_disable;
+ }
+
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
+ dev_err(&pdev->dev, "System does not support DMA, aborting\n");
+ goto init_err_disable;
+ }
+
+ pci_set_master(pdev);
+
+ bp->dev = dev;
+ bp->pdev = pdev;
+
+ bp->bar0 = pci_ioremap_bar(pdev, 0);
+ if (!bp->bar0) {
+ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
+ rc = -ENOMEM;
+ goto init_err_release;
+ }
+
+ bp->bar1 = pci_ioremap_bar(pdev, 2);
+ if (!bp->bar1) {
+ dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
+ rc = -ENOMEM;
+ goto init_err_release;
+ }
+
+ bp->bar2 = pci_ioremap_bar(pdev, 4);
+ if (!bp->bar2) {
+ dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
+ rc = -ENOMEM;
+ goto init_err_release;
+ }
+
+ INIT_WORK(&bp->sp_task, bnxt_sp_task);
+
+ spin_lock_init(&bp->ntp_fltr_lock);
+
+ bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
+ bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
+
+ bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4);
+ bp->coal_bufs = 20;
+ bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1);
+ bp->coal_bufs_irq = 2;
+
+ init_timer(&bp->timer);
+ bp->timer.data = (unsigned long)bp;
+ bp->timer.function = bnxt_timer;
+ bp->current_interval = BNXT_TIMER_INTERVAL;
+
+ bp->state = BNXT_STATE_CLOSED;
+
+ return 0;
+
+init_err_release:
+ if (bp->bar2) {
+ pci_iounmap(pdev, bp->bar2);
+ bp->bar2 = NULL;
+ }
+
+ if (bp->bar1) {
+ pci_iounmap(pdev, bp->bar1);
+ bp->bar1 = NULL;
+ }
+
+ if (bp->bar0) {
+ pci_iounmap(pdev, bp->bar0);
+ bp->bar0 = NULL;
+ }
+
+ pci_release_regions(pdev);
+
+init_err_disable:
+ pci_disable_device(pdev);
+
+init_err:
+ return rc;
+}
+
+/* rtnl_lock held */
+static int bnxt_change_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ return 0;
+}
+
+/* rtnl_lock held */
+static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (new_mtu < 60 || new_mtu > 9000)
+ return -EINVAL;
+
+ if (netif_running(dev))
+ bnxt_close_nic(bp, false, false);
+
+ dev->mtu = new_mtu;
+ bnxt_set_ring_params(bp);
+
+ if (netif_running(dev))
+ return bnxt_open_nic(bp, false, false);
+
+ return 0;
+}
+
+static int bnxt_setup_tc(struct net_device *dev, u8 tc)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (tc > bp->max_tc) {
+ netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
+ tc, bp->max_tc);
+ return -EINVAL;
+ }
+
+ if (netdev_get_num_tc(dev) == tc)
+ return 0;
+
+ if (tc) {
+ int max_rx_rings, max_tx_rings;
+
+ bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+ if (bp->tx_nr_rings_per_tc * tc > max_tx_rings)
+ return -ENOMEM;
+ }
+
+ /* Needs to close the device and do hw resource re-allocations */
+ if (netif_running(bp->dev))
+ bnxt_close_nic(bp, true, false);
+
+ if (tc) {
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
+ netdev_set_num_tc(dev, tc);
+ } else {
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ netdev_reset_tc(dev);
+ }
+ bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+ bp->num_stat_ctxs = bp->cp_nr_rings;
+
+ if (netif_running(bp->dev))
+ return bnxt_open_nic(bp, true, false);
+
+ return 0;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
+ struct bnxt_ntuple_filter *f2)
+{
+ struct flow_keys *keys1 = &f1->fkeys;
+ struct flow_keys *keys2 = &f2->fkeys;
+
+ if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
+ keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
+ keys1->ports.ports == keys2->ports.ports &&
+ keys1->basic.ip_proto == keys2->basic.ip_proto &&
+ keys1->basic.n_proto == keys2->basic.n_proto &&
+ ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr))
+ return true;
+
+ return false;
+}
+
+static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ntuple_filter *fltr, *new_fltr;
+ struct flow_keys *fkeys;
+ struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
+ int rc = 0, idx;
+ struct hlist_head *head;
+
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
+ new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
+ if (!new_fltr)
+ return -ENOMEM;
+
+ fkeys = &new_fltr->fkeys;
+ if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
+ rc = -EPROTONOSUPPORT;
+ goto err_free;
+ }
+
+ if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
+ ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
+ (fkeys->basic.ip_proto != IPPROTO_UDP))) {
+ rc = -EPROTONOSUPPORT;
+ goto err_free;
+ }
+
+ memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
+
+ idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
+ head = &bp->ntp_fltr_hash_tbl[idx];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(fltr, head, hash) {
+ if (bnxt_fltr_match(fltr, new_fltr)) {
+ rcu_read_unlock();
+ rc = 0;
+ goto err_free;
+ }
+ }
+ rcu_read_unlock();
+
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ new_fltr->sw_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
+ BNXT_NTP_FLTR_MAX_FLTR, 0);
+ if (new_fltr->sw_id < 0) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ rc = -ENOMEM;
+ goto err_free;
+ }
+
+ new_fltr->flow_id = flow_id;
+ new_fltr->rxq = rxq_index;
+ hlist_add_head_rcu(&new_fltr->hash, head);
+ bp->ntp_fltr_count++;
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+
+ set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+
+ return new_fltr->sw_id;
+
+err_free:
+ kfree(new_fltr);
+ return rc;
+}
+
+static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+ struct bnxt_ntuple_filter *fltr;
+ int rc;
+
+ head = &bp->ntp_fltr_hash_tbl[i];
+ hlist_for_each_entry_safe(fltr, tmp, head, hash) {
+ bool del = false;
+
+ if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
+ if (rps_may_expire_flow(bp->dev, fltr->rxq,
+ fltr->flow_id,
+ fltr->sw_id)) {
+ bnxt_hwrm_cfa_ntuple_filter_free(bp,
+ fltr);
+ del = true;
+ }
+ } else {
+ rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
+ fltr);
+ if (rc)
+ del = true;
+ else
+ set_bit(BNXT_FLTR_VALID, &fltr->state);
+ }
+
+ if (del) {
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ hlist_del_rcu(&fltr->hash);
+ bp->ntp_fltr_count--;
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ synchronize_rcu();
+ clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
+ kfree(fltr);
+ }
+ }
+ }
+}
+
+#else
+
+static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+{
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
+ __be16 port)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return;
+
+ if (sa_family != AF_INET6 && sa_family != AF_INET)
+ return;
+
+ if (bp->vxlan_port_cnt && bp->vxlan_port != port)
+ return;
+
+ bp->vxlan_port_cnt++;
+ if (bp->vxlan_port_cnt == 1) {
+ bp->vxlan_port = port;
+ set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ }
+}
+
+static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
+ __be16 port)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return;
+
+ if (sa_family != AF_INET6 && sa_family != AF_INET)
+ return;
+
+ if (bp->vxlan_port_cnt && bp->vxlan_port == port) {
+ bp->vxlan_port_cnt--;
+
+ if (bp->vxlan_port_cnt == 0) {
+ set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ }
+ }
+}
+
+static const struct net_device_ops bnxt_netdev_ops = {
+ .ndo_open = bnxt_open,
+ .ndo_start_xmit = bnxt_start_xmit,
+ .ndo_stop = bnxt_close,
+ .ndo_get_stats64 = bnxt_get_stats64,
+ .ndo_set_rx_mode = bnxt_set_rx_mode,
+ .ndo_do_ioctl = bnxt_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = bnxt_change_mac_addr,
+ .ndo_change_mtu = bnxt_change_mtu,
+ .ndo_fix_features = bnxt_fix_features,
+ .ndo_set_features = bnxt_set_features,
+ .ndo_tx_timeout = bnxt_tx_timeout,
+#ifdef CONFIG_BNXT_SRIOV
+ .ndo_get_vf_config = bnxt_get_vf_config,
+ .ndo_set_vf_mac = bnxt_set_vf_mac,
+ .ndo_set_vf_vlan = bnxt_set_vf_vlan,
+ .ndo_set_vf_rate = bnxt_set_vf_bw,
+ .ndo_set_vf_link_state = bnxt_set_vf_link_state,
+ .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = bnxt_poll_controller,
+#endif
+ .ndo_setup_tc = bnxt_setup_tc,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = bnxt_rx_flow_steer,
+#endif
+ .ndo_add_vxlan_port = bnxt_add_vxlan_port,
+ .ndo_del_vxlan_port = bnxt_del_vxlan_port,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ .ndo_busy_poll = bnxt_busy_poll,
+#endif
+};
+
+static void bnxt_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (BNXT_PF(bp))
+ bnxt_sriov_disable(bp);
+
+ unregister_netdev(dev);
+ cancel_work_sync(&bp->sp_task);
+ bp->sp_event = 0;
+
+ bnxt_free_hwrm_resources(bp);
+ pci_iounmap(pdev, bp->bar2);
+ pci_iounmap(pdev, bp->bar1);
+ pci_iounmap(pdev, bp->bar0);
+ free_netdev(dev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static int bnxt_probe_phy(struct bnxt *bp)
+{
+ int rc = 0;
+ struct bnxt_link_info *link_info = &bp->link_info;
+ char phy_ver[PHY_VER_STR_LEN];
+
+ rc = bnxt_update_link(bp, false);
+ if (rc) {
+ netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
+ rc);
+ return rc;
+ }
+
+ /*initialize the ethool setting copy with NVM settings */
+ if (BNXT_AUTO_MODE(link_info->auto_mode))
+ link_info->autoneg |= BNXT_AUTONEG_SPEED;
+
+ if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+ if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH)
+ link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+ link_info->req_flow_ctrl = link_info->auto_pause_setting;
+ } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+ link_info->req_flow_ctrl = link_info->force_pause_setting;
+ }
+ link_info->req_duplex = link_info->duplex_setting;
+ if (link_info->autoneg & BNXT_AUTONEG_SPEED)
+ link_info->req_link_speed = link_info->auto_link_speed;
+ else
+ link_info->req_link_speed = link_info->force_link_speed;
+ link_info->advertising = link_info->auto_link_speeds;
+ snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
+ link_info->phy_ver[0],
+ link_info->phy_ver[1],
+ link_info->phy_ver[2]);
+ strcat(bp->fw_ver_str, phy_ver);
+ return rc;
+}
+
+static int bnxt_get_max_irq(struct pci_dev *pdev)
+{
+ u16 ctrl;
+
+ if (!pdev->msix_cap)
+ return 1;
+
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+ return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
+}
+
+void bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx)
+{
+ int max_rings;
+
+ if (BNXT_PF(bp)) {
+ *max_tx = bp->pf.max_pf_tx_rings;
+ *max_rx = bp->pf.max_pf_rx_rings;
+ max_rings = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
+ max_rings = min_t(int, max_rings, bp->pf.max_stat_ctxs);
+ } else {
+ *max_tx = bp->vf.max_tx_rings;
+ *max_rx = bp->vf.max_rx_rings;
+ max_rings = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
+ max_rings = min_t(int, max_rings, bp->vf.max_stat_ctxs);
+ }
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ *max_rx >>= 1;
+
+ *max_rx = min_t(int, *max_rx, max_rings);
+ *max_tx = min_t(int, *max_tx, max_rings);
+}
+
+static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int version_printed;
+ struct net_device *dev;
+ struct bnxt *bp;
+ int rc, max_rx_rings, max_tx_rings, max_irqs, dflt_rings;
+
+ if (version_printed++ == 0)
+ pr_info("%s", version);
+
+ max_irqs = bnxt_get_max_irq(pdev);
+ dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
+ if (!dev)
+ return -ENOMEM;
+
+ bp = netdev_priv(dev);
+
+ if (bnxt_vf_pciid(ent->driver_data))
+ bp->flags |= BNXT_FLAG_VF;
+
+ if (pdev->msix_cap) {
+ bp->flags |= BNXT_FLAG_MSIX_CAP;
+ if (BNXT_PF(bp))
+ bp->flags |= BNXT_FLAG_RFS;
+ }
+
+ rc = bnxt_init_board(pdev, dev);
+ if (rc < 0)
+ goto init_err_free;
+
+ dev->netdev_ops = &bnxt_netdev_ops;
+ dev->watchdog_timeo = BNXT_TX_TIMEOUT;
+ dev->ethtool_ops = &bnxt_ethtool_ops;
+
+ pci_set_drvdata(pdev, dev);
+
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+ NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
+
+ if (bp->flags & BNXT_FLAG_RFS)
+ dev->hw_features |= NETIF_F_NTUPLE;
+
+ dev->hw_enc_features =
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+ NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
+ dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
+ dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
+#ifdef CONFIG_BNXT_SRIOV
+ init_waitqueue_head(&bp->sriov_cfg_wait);
+#endif
+ rc = bnxt_alloc_hwrm_resources(bp);
+ if (rc)
+ goto init_err;
+
+ mutex_init(&bp->hwrm_cmd_lock);
+ bnxt_hwrm_ver_get(bp);
+
+ rc = bnxt_hwrm_func_drv_rgtr(bp);
+ if (rc)
+ goto init_err;
+
+ /* Get the MAX capabilities for this function */
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
+ rc);
+ rc = -1;
+ goto init_err;
+ }
+
+ rc = bnxt_hwrm_queue_qportcfg(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
+ rc);
+ rc = -1;
+ goto init_err;
+ }
+
+ bnxt_set_tpa_flags(bp);
+ bnxt_set_ring_params(bp);
+ dflt_rings = netif_get_num_default_rss_queues();
+ if (BNXT_PF(bp)) {
+ memcpy(dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
+ bp->pf.max_irqs = max_irqs;
+ } else {
+ memcpy(dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+ bp->vf.max_irqs = max_irqs;
+ }
+ bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+ bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
+ bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
+ bp->num_stat_ctxs = bp->cp_nr_rings;
+
+ if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
+ bp->flags |= BNXT_FLAG_STRIP_VLAN;
+
+ rc = bnxt_probe_phy(bp);
+ if (rc)
+ goto init_err;
+
+ rc = register_netdev(dev);
+ if (rc)
+ goto init_err;
+
+ netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
+ board_info[ent->driver_data].name,
+ (long)pci_resource_start(pdev, 0), dev->dev_addr);
+
+ return 0;
+
+init_err:
+ pci_iounmap(pdev, bp->bar0);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+init_err_free:
+ free_netdev(dev);
+ return rc;
+}
+
+static struct pci_driver bnxt_pci_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = bnxt_pci_tbl,
+ .probe = bnxt_init_one,
+ .remove = bnxt_remove_one,
+#if defined(CONFIG_BNXT_SRIOV)
+ .sriov_configure = bnxt_sriov_configure,
+#endif
+};
+
+module_pci_driver(bnxt_pci_driver);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
new file mode 100644
index 000000000000..7c89367634d0
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -0,0 +1,1081 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_H
+#define BNXT_H
+
+#define DRV_MODULE_NAME "bnxt_en"
+#define DRV_MODULE_VERSION "0.1.24"
+
+#define DRV_VER_MAJ 0
+#define DRV_VER_MIN 1
+#define DRV_VER_UPD 24
+
+struct tx_bd {
+ __le32 tx_bd_len_flags_type;
+ #define TX_BD_TYPE (0x3f << 0)
+ #define TX_BD_TYPE_SHORT_TX_BD (0x00 << 0)
+ #define TX_BD_TYPE_LONG_TX_BD (0x10 << 0)
+ #define TX_BD_FLAGS_PACKET_END (1 << 6)
+ #define TX_BD_FLAGS_NO_CMPL (1 << 7)
+ #define TX_BD_FLAGS_BD_CNT (0x1f << 8)
+ #define TX_BD_FLAGS_BD_CNT_SHIFT 8
+ #define TX_BD_FLAGS_LHINT (3 << 13)
+ #define TX_BD_FLAGS_LHINT_SHIFT 13
+ #define TX_BD_FLAGS_LHINT_512_AND_SMALLER (0 << 13)
+ #define TX_BD_FLAGS_LHINT_512_TO_1023 (1 << 13)
+ #define TX_BD_FLAGS_LHINT_1024_TO_2047 (2 << 13)
+ #define TX_BD_FLAGS_LHINT_2048_AND_LARGER (3 << 13)
+ #define TX_BD_FLAGS_COAL_NOW (1 << 15)
+ #define TX_BD_LEN (0xffff << 16)
+ #define TX_BD_LEN_SHIFT 16
+
+ u32 tx_bd_opaque;
+ __le64 tx_bd_haddr;
+} __packed;
+
+struct tx_bd_ext {
+ __le32 tx_bd_hsize_lflags;
+ #define TX_BD_FLAGS_TCP_UDP_CHKSUM (1 << 0)
+ #define TX_BD_FLAGS_IP_CKSUM (1 << 1)
+ #define TX_BD_FLAGS_NO_CRC (1 << 2)
+ #define TX_BD_FLAGS_STAMP (1 << 3)
+ #define TX_BD_FLAGS_T_IP_CHKSUM (1 << 4)
+ #define TX_BD_FLAGS_LSO (1 << 5)
+ #define TX_BD_FLAGS_IPID_FMT (1 << 6)
+ #define TX_BD_FLAGS_T_IPID (1 << 7)
+ #define TX_BD_HSIZE (0xff << 16)
+ #define TX_BD_HSIZE_SHIFT 16
+
+ __le32 tx_bd_mss;
+ __le32 tx_bd_cfa_action;
+ #define TX_BD_CFA_ACTION (0xffff << 16)
+ #define TX_BD_CFA_ACTION_SHIFT 16
+
+ __le32 tx_bd_cfa_meta;
+ #define TX_BD_CFA_META_MASK 0xfffffff
+ #define TX_BD_CFA_META_VID_MASK 0xfff
+ #define TX_BD_CFA_META_PRI_MASK (0xf << 12)
+ #define TX_BD_CFA_META_PRI_SHIFT 12
+ #define TX_BD_CFA_META_TPID_MASK (3 << 16)
+ #define TX_BD_CFA_META_TPID_SHIFT 16
+ #define TX_BD_CFA_META_KEY (0xf << 28)
+ #define TX_BD_CFA_META_KEY_SHIFT 28
+ #define TX_BD_CFA_META_KEY_VLAN (1 << 28)
+};
+
+struct rx_bd {
+ __le32 rx_bd_len_flags_type;
+ #define RX_BD_TYPE (0x3f << 0)
+ #define RX_BD_TYPE_RX_PACKET_BD 0x4
+ #define RX_BD_TYPE_RX_BUFFER_BD 0x5
+ #define RX_BD_TYPE_RX_AGG_BD 0x6
+ #define RX_BD_TYPE_16B_BD_SIZE (0 << 4)
+ #define RX_BD_TYPE_32B_BD_SIZE (1 << 4)
+ #define RX_BD_TYPE_48B_BD_SIZE (2 << 4)
+ #define RX_BD_TYPE_64B_BD_SIZE (3 << 4)
+ #define RX_BD_FLAGS_SOP (1 << 6)
+ #define RX_BD_FLAGS_EOP (1 << 7)
+ #define RX_BD_FLAGS_BUFFERS (3 << 8)
+ #define RX_BD_FLAGS_1_BUFFER_PACKET (0 << 8)
+ #define RX_BD_FLAGS_2_BUFFER_PACKET (1 << 8)
+ #define RX_BD_FLAGS_3_BUFFER_PACKET (2 << 8)
+ #define RX_BD_FLAGS_4_BUFFER_PACKET (3 << 8)
+ #define RX_BD_LEN (0xffff << 16)
+ #define RX_BD_LEN_SHIFT 16
+
+ u32 rx_bd_opaque;
+ __le64 rx_bd_haddr;
+};
+
+struct tx_cmp {
+ __le32 tx_cmp_flags_type;
+ #define CMP_TYPE (0x3f << 0)
+ #define CMP_TYPE_TX_L2_CMP 0
+ #define CMP_TYPE_RX_L2_CMP 17
+ #define CMP_TYPE_RX_AGG_CMP 18
+ #define CMP_TYPE_RX_L2_TPA_START_CMP 19
+ #define CMP_TYPE_RX_L2_TPA_END_CMP 21
+ #define CMP_TYPE_STATUS_CMP 32
+ #define CMP_TYPE_REMOTE_DRIVER_REQ 34
+ #define CMP_TYPE_REMOTE_DRIVER_RESP 36
+ #define CMP_TYPE_ERROR_STATUS 48
+ #define CMPL_BASE_TYPE_STAT_EJECT (0x1aUL << 0)
+ #define CMPL_BASE_TYPE_HWRM_DONE (0x20UL << 0)
+ #define CMPL_BASE_TYPE_HWRM_FWD_REQ (0x22UL << 0)
+ #define CMPL_BASE_TYPE_HWRM_FWD_RESP (0x24UL << 0)
+ #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+
+ #define TX_CMP_FLAGS_ERROR (1 << 6)
+ #define TX_CMP_FLAGS_PUSH (1 << 7)
+
+ u32 tx_cmp_opaque;
+ __le32 tx_cmp_errors_v;
+ #define TX_CMP_V (1 << 0)
+ #define TX_CMP_ERRORS_BUFFER_ERROR (7 << 1)
+ #define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR 0
+ #define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT 2
+ #define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG 4
+ #define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS 5
+ #define TX_CMP_ERRORS_ZERO_LENGTH_PKT (1 << 4)
+ #define TX_CMP_ERRORS_EXCESSIVE_BD_LEN (1 << 5)
+ #define TX_CMP_ERRORS_DMA_ERROR (1 << 6)
+ #define TX_CMP_ERRORS_HINT_TOO_SHORT (1 << 7)
+
+ __le32 tx_cmp_unsed_3;
+};
+
+struct rx_cmp {
+ __le32 rx_cmp_len_flags_type;
+ #define RX_CMP_CMP_TYPE (0x3f << 0)
+ #define RX_CMP_FLAGS_ERROR (1 << 6)
+ #define RX_CMP_FLAGS_PLACEMENT (7 << 7)
+ #define RX_CMP_FLAGS_RSS_VALID (1 << 10)
+ #define RX_CMP_FLAGS_UNUSED (1 << 11)
+ #define RX_CMP_FLAGS_ITYPES_SHIFT 12
+ #define RX_CMP_FLAGS_ITYPE_UNKNOWN (0 << 12)
+ #define RX_CMP_FLAGS_ITYPE_IP (1 << 12)
+ #define RX_CMP_FLAGS_ITYPE_TCP (2 << 12)
+ #define RX_CMP_FLAGS_ITYPE_UDP (3 << 12)
+ #define RX_CMP_FLAGS_ITYPE_FCOE (4 << 12)
+ #define RX_CMP_FLAGS_ITYPE_ROCE (5 << 12)
+ #define RX_CMP_FLAGS_ITYPE_PTP_WO_TS (8 << 12)
+ #define RX_CMP_FLAGS_ITYPE_PTP_W_TS (9 << 12)
+ #define RX_CMP_LEN (0xffff << 16)
+ #define RX_CMP_LEN_SHIFT 16
+
+ u32 rx_cmp_opaque;
+ __le32 rx_cmp_misc_v1;
+ #define RX_CMP_V1 (1 << 0)
+ #define RX_CMP_AGG_BUFS (0x1f << 1)
+ #define RX_CMP_AGG_BUFS_SHIFT 1
+ #define RX_CMP_RSS_HASH_TYPE (0x7f << 9)
+ #define RX_CMP_RSS_HASH_TYPE_SHIFT 9
+ #define RX_CMP_PAYLOAD_OFFSET (0xff << 16)
+ #define RX_CMP_PAYLOAD_OFFSET_SHIFT 16
+
+ __le32 rx_cmp_rss_hash;
+};
+
+#define RX_CMP_HASH_VALID(rxcmp) \
+ ((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
+
+#define RX_CMP_HASH_TYPE(rxcmp) \
+ ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
+ RX_CMP_RSS_HASH_TYPE_SHIFT)
+
+struct rx_cmp_ext {
+ __le32 rx_cmp_flags2;
+ #define RX_CMP_FLAGS2_IP_CS_CALC 0x1
+ #define RX_CMP_FLAGS2_L4_CS_CALC (0x1 << 1)
+ #define RX_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2)
+ #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
+ #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4)
+ __le32 rx_cmp_meta_data;
+ #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff
+ #define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000
+ #define RX_CMP_FLAGS2_METADATA_TPID_SFT 16
+ __le32 rx_cmp_cfa_code_errors_v2;
+ #define RX_CMP_V (1 << 0)
+ #define RX_CMPL_ERRORS_MASK (0x7fff << 1)
+ #define RX_CMPL_ERRORS_SFT 1
+ #define RX_CMPL_ERRORS_BUFFER_ERROR_MASK (0x7 << 1)
+ #define RX_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
+ #define RX_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1 << 1)
+ #define RX_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1)
+ #define RX_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
+ #define RX_CMPL_ERRORS_IP_CS_ERROR (0x1 << 4)
+ #define RX_CMPL_ERRORS_L4_CS_ERROR (0x1 << 5)
+ #define RX_CMPL_ERRORS_T_IP_CS_ERROR (0x1 << 6)
+ #define RX_CMPL_ERRORS_T_L4_CS_ERROR (0x1 << 7)
+ #define RX_CMPL_ERRORS_CRC_ERROR (0x1 << 8)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_MASK (0x7 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR (0x0 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (0x3 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x4 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x5 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x6 << 9)
+ #define RX_CMPL_ERRORS_PKT_ERROR_MASK (0xf << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_NO_ERROR (0x0 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8 << 12)
+
+ #define RX_CMPL_CFA_CODE_MASK (0xffff << 16)
+ #define RX_CMPL_CFA_CODE_SFT 16
+
+ __le32 rx_cmp_unused3;
+};
+
+#define RX_CMP_L2_ERRORS \
+ cpu_to_le32(RX_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_CMPL_ERRORS_CRC_ERROR)
+
+#define RX_CMP_L4_CS_BITS \
+ (cpu_to_le32(RX_CMP_FLAGS2_L4_CS_CALC | RX_CMP_FLAGS2_T_L4_CS_CALC))
+
+#define RX_CMP_L4_CS_ERR_BITS \
+ (cpu_to_le32(RX_CMPL_ERRORS_L4_CS_ERROR | RX_CMPL_ERRORS_T_L4_CS_ERROR))
+
+#define RX_CMP_L4_CS_OK(rxcmp1) \
+ (((rxcmp1)->rx_cmp_flags2 & RX_CMP_L4_CS_BITS) && \
+ !((rxcmp1)->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS))
+
+#define RX_CMP_ENCAP(rxcmp1) \
+ ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) & \
+ RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3)
+
+struct rx_agg_cmp {
+ __le32 rx_agg_cmp_len_flags_type;
+ #define RX_AGG_CMP_TYPE (0x3f << 0)
+ #define RX_AGG_CMP_LEN (0xffff << 16)
+ #define RX_AGG_CMP_LEN_SHIFT 16
+ u32 rx_agg_cmp_opaque;
+ __le32 rx_agg_cmp_v;
+ #define RX_AGG_CMP_V (1 << 0)
+ __le32 rx_agg_cmp_unused;
+};
+
+struct rx_tpa_start_cmp {
+ __le32 rx_tpa_start_cmp_len_flags_type;
+ #define RX_TPA_START_CMP_TYPE (0x3f << 0)
+ #define RX_TPA_START_CMP_FLAGS (0x3ff << 6)
+ #define RX_TPA_START_CMP_FLAGS_SHIFT 6
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT (0x7 << 7)
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT 7
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7)
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7)
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7)
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7)
+ #define RX_TPA_START_CMP_FLAGS_RSS_VALID (0x1 << 10)
+ #define RX_TPA_START_CMP_FLAGS_ITYPES (0xf << 12)
+ #define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT 12
+ #define RX_TPA_START_CMP_FLAGS_ITYPE_TCP (0x2 << 12)
+ #define RX_TPA_START_CMP_LEN (0xffff << 16)
+ #define RX_TPA_START_CMP_LEN_SHIFT 16
+
+ u32 rx_tpa_start_cmp_opaque;
+ __le32 rx_tpa_start_cmp_misc_v1;
+ #define RX_TPA_START_CMP_V1 (0x1 << 0)
+ #define RX_TPA_START_CMP_RSS_HASH_TYPE (0x7f << 9)
+ #define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9
+ #define RX_TPA_START_CMP_AGG_ID (0x7f << 25)
+ #define RX_TPA_START_CMP_AGG_ID_SHIFT 25
+
+ __le32 rx_tpa_start_cmp_rss_hash;
+};
+
+#define TPA_START_HASH_VALID(rx_tpa_start) \
+ ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \
+ cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID))
+
+#define TPA_START_HASH_TYPE(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_RSS_HASH_TYPE) >> \
+ RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT)
+
+#define TPA_START_AGG_ID(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
+
+struct rx_tpa_start_cmp_ext {
+ __le32 rx_tpa_start_cmp_flags2;
+ #define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC (0x1 << 0)
+ #define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC (0x1 << 1)
+ #define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2)
+ #define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
+
+ __le32 rx_tpa_start_cmp_metadata;
+ __le32 rx_tpa_start_cmp_cfa_code_v2;
+ #define RX_TPA_START_CMP_V2 (0x1 << 0)
+ #define RX_TPA_START_CMP_CFA_CODE (0xffff << 16)
+ #define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16
+ __le32 rx_tpa_start_cmp_unused5;
+};
+
+struct rx_tpa_end_cmp {
+ __le32 rx_tpa_end_cmp_len_flags_type;
+ #define RX_TPA_END_CMP_TYPE (0x3f << 0)
+ #define RX_TPA_END_CMP_FLAGS (0x3ff << 6)
+ #define RX_TPA_END_CMP_FLAGS_SHIFT 6
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT (0x7 << 7)
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT_SHIFT 7
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7)
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7)
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7)
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7)
+ #define RX_TPA_END_CMP_FLAGS_RSS_VALID (0x1 << 10)
+ #define RX_TPA_END_CMP_FLAGS_ITYPES (0xf << 12)
+ #define RX_TPA_END_CMP_FLAGS_ITYPES_SHIFT 12
+ #define RX_TPA_END_CMP_FLAGS_ITYPE_TCP (0x2 << 12)
+ #define RX_TPA_END_CMP_LEN (0xffff << 16)
+ #define RX_TPA_END_CMP_LEN_SHIFT 16
+
+ u32 rx_tpa_end_cmp_opaque;
+ __le32 rx_tpa_end_cmp_misc_v1;
+ #define RX_TPA_END_CMP_V1 (0x1 << 0)
+ #define RX_TPA_END_CMP_AGG_BUFS (0x3f << 1)
+ #define RX_TPA_END_CMP_AGG_BUFS_SHIFT 1
+ #define RX_TPA_END_CMP_TPA_SEGS (0xff << 8)
+ #define RX_TPA_END_CMP_TPA_SEGS_SHIFT 8
+ #define RX_TPA_END_CMP_PAYLOAD_OFFSET (0xff << 16)
+ #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16
+ #define RX_TPA_END_CMP_AGG_ID (0x7f << 25)
+ #define RX_TPA_END_CMP_AGG_ID_SHIFT 25
+
+ __le32 rx_tpa_end_cmp_tsdelta;
+ #define RX_TPA_END_GRO_TS (0x1 << 31)
+};
+
+#define TPA_END_AGG_ID(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT)
+
+#define TPA_END_TPA_SEGS(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT)
+
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO \
+ cpu_to_le32(RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO & \
+ RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS)
+
+#define TPA_END_GRO(rx_tpa_end) \
+ ((rx_tpa_end)->rx_tpa_end_cmp_len_flags_type & \
+ RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO)
+
+#define TPA_END_GRO_TS(rx_tpa_end) \
+ ((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & cpu_to_le32(RX_TPA_END_GRO_TS))
+
+struct rx_tpa_end_cmp_ext {
+ __le32 rx_tpa_end_cmp_dup_acks;
+ #define RX_TPA_END_CMP_TPA_DUP_ACKS (0xf << 0)
+
+ __le32 rx_tpa_end_cmp_seg_len;
+ #define RX_TPA_END_CMP_TPA_SEG_LEN (0xffff << 0)
+
+ __le32 rx_tpa_end_cmp_errors_v2;
+ #define RX_TPA_END_CMP_V2 (0x1 << 0)
+ #define RX_TPA_END_CMP_ERRORS (0x7fff << 1)
+ #define RX_TPA_END_CMPL_ERRORS_SHIFT 1
+
+ u32 rx_tpa_end_cmp_start_opaque;
+};
+
+#define DB_IDX_MASK 0xffffff
+#define DB_IDX_VALID (0x1 << 26)
+#define DB_IRQ_DIS (0x1 << 27)
+#define DB_KEY_TX (0x0 << 28)
+#define DB_KEY_RX (0x1 << 28)
+#define DB_KEY_CP (0x2 << 28)
+#define DB_KEY_ST (0x3 << 28)
+#define DB_KEY_TX_PUSH (0x4 << 28)
+#define DB_LONG_TX_PUSH (0x2 << 24)
+
+#define INVALID_HW_RING_ID ((u16)-1)
+
+#define BNXT_RSS_HASH_TYPE_FLAG_IPV4 0x01
+#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 0x02
+#define BNXT_RSS_HASH_TYPE_FLAG_IPV6 0x04
+#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6 0x08
+
+/* The hardware supports certain page sizes. Use the supported page sizes
+ * to allocate the rings.
+ */
+#if (PAGE_SHIFT < 12)
+#define BNXT_PAGE_SHIFT 12
+#elif (PAGE_SHIFT <= 13)
+#define BNXT_PAGE_SHIFT PAGE_SHIFT
+#elif (PAGE_SHIFT < 16)
+#define BNXT_PAGE_SHIFT 13
+#else
+#define BNXT_PAGE_SHIFT 16
+#endif
+
+#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT)
+
+#define BNXT_MIN_PKT_SIZE 45
+
+#define BNXT_NUM_TESTS(bp) 0
+
+#define BNXT_DEFAULT_RX_RING_SIZE 1023
+#define BNXT_DEFAULT_TX_RING_SIZE 512
+
+#define MAX_TPA 64
+
+#define MAX_RX_PAGES 8
+#define MAX_RX_AGG_PAGES 32
+#define MAX_TX_PAGES 8
+#define MAX_CP_PAGES 64
+
+#define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd))
+#define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd))
+#define CP_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_cmp))
+
+#define SW_RXBD_RING_SIZE (sizeof(struct bnxt_sw_rx_bd) * RX_DESC_CNT)
+#define HW_RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT)
+
+#define SW_RXBD_AGG_RING_SIZE (sizeof(struct bnxt_sw_rx_agg_bd) * RX_DESC_CNT)
+
+#define SW_TXBD_RING_SIZE (sizeof(struct bnxt_sw_tx_bd) * TX_DESC_CNT)
+#define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
+
+#define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT)
+
+#define BNXT_MAX_RX_DESC_CNT (RX_DESC_CNT * MAX_RX_PAGES - 1)
+#define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
+#define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
+
+#define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
+
+#define TX_RING(x) (((x) & ~(TX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1))
+
+#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define CP_IDX(x) ((x) & (CP_DESC_CNT - 1))
+
+#define TX_CMP_VALID(txcmp, raw_cons) \
+ (!!((txcmp)->tx_cmp_errors_v & cpu_to_le32(TX_CMP_V)) == \
+ !((raw_cons) & bp->cp_bit))
+
+#define RX_CMP_VALID(rxcmp1, raw_cons) \
+ (!!((rxcmp1)->rx_cmp_cfa_code_errors_v2 & cpu_to_le32(RX_CMP_V)) ==\
+ !((raw_cons) & bp->cp_bit))
+
+#define RX_AGG_CMP_VALID(agg, raw_cons) \
+ (!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) == \
+ !((raw_cons) & bp->cp_bit))
+
+#define TX_CMP_TYPE(txcmp) \
+ (le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE)
+
+#define RX_CMP_TYPE(rxcmp) \
+ (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE)
+
+#define NEXT_RX(idx) (((idx) + 1) & bp->rx_ring_mask)
+
+#define NEXT_RX_AGG(idx) (((idx) + 1) & bp->rx_agg_ring_mask)
+
+#define NEXT_TX(idx) (((idx) + 1) & bp->tx_ring_mask)
+
+#define ADV_RAW_CMP(idx, n) ((idx) + (n))
+#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)
+#define RING_CMP(idx) ((idx) & bp->cp_ring_mask)
+#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
+
+#define HWRM_CMD_TIMEOUT 500
+#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
+#define HWRM_RESP_ERR_CODE_MASK 0xffff
+#define HWRM_RESP_LEN_MASK 0xffff0000
+#define HWRM_RESP_LEN_SFT 16
+#define HWRM_RESP_VALID_MASK 0xff000000
+#define BNXT_HWRM_REQ_MAX_SIZE 128
+#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
+ BNXT_HWRM_REQ_MAX_SIZE)
+
+struct bnxt_sw_tx_bd {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ u8 is_gso;
+ u8 is_push;
+ unsigned short nr_frags;
+};
+
+struct bnxt_sw_rx_bd {
+ u8 *data;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct bnxt_sw_rx_agg_bd {
+ struct page *page;
+ dma_addr_t mapping;
+};
+
+struct bnxt_ring_struct {
+ int nr_pages;
+ int page_size;
+ void **pg_arr;
+ dma_addr_t *dma_arr;
+
+ __le64 *pg_tbl;
+ dma_addr_t pg_tbl_map;
+
+ int vmem_size;
+ void **vmem;
+
+ u16 fw_ring_id; /* Ring id filled by Chimp FW */
+ u8 queue_id;
+};
+
+struct tx_push_bd {
+ __le32 doorbell;
+ struct tx_bd txbd1;
+ struct tx_bd_ext txbd2;
+};
+
+struct bnxt_tx_ring_info {
+ u16 tx_prod;
+ u16 tx_cons;
+ void __iomem *tx_doorbell;
+
+ struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
+ struct bnxt_sw_tx_bd *tx_buf_ring;
+
+ dma_addr_t tx_desc_mapping[MAX_TX_PAGES];
+
+ struct tx_push_bd *tx_push;
+ dma_addr_t tx_push_mapping;
+
+#define BNXT_DEV_STATE_CLOSING 0x1
+ u32 dev_state;
+
+ struct bnxt_ring_struct tx_ring_struct;
+};
+
+struct bnxt_tpa_info {
+ u8 *data;
+ dma_addr_t mapping;
+ u16 len;
+ unsigned short gso_type;
+ u32 flags2;
+ u32 metadata;
+ enum pkt_hash_types hash_type;
+ u32 rss_hash;
+};
+
+struct bnxt_rx_ring_info {
+ u16 rx_prod;
+ u16 rx_agg_prod;
+ u16 rx_sw_agg_prod;
+ void __iomem *rx_doorbell;
+ void __iomem *rx_agg_doorbell;
+
+ struct rx_bd *rx_desc_ring[MAX_RX_PAGES];
+ struct bnxt_sw_rx_bd *rx_buf_ring;
+
+ struct rx_bd *rx_agg_desc_ring[MAX_RX_AGG_PAGES];
+ struct bnxt_sw_rx_agg_bd *rx_agg_ring;
+
+ unsigned long *rx_agg_bmap;
+ u16 rx_agg_bmap_size;
+
+ dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
+ dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
+
+ struct bnxt_tpa_info *rx_tpa;
+
+ struct bnxt_ring_struct rx_ring_struct;
+ struct bnxt_ring_struct rx_agg_ring_struct;
+};
+
+struct bnxt_cp_ring_info {
+ u32 cp_raw_cons;
+ void __iomem *cp_doorbell;
+
+ struct tx_cmp *cp_desc_ring[MAX_CP_PAGES];
+
+ dma_addr_t cp_desc_mapping[MAX_CP_PAGES];
+
+ struct ctx_hw_stats *hw_stats;
+ dma_addr_t hw_stats_map;
+ u32 hw_stats_ctx_id;
+ u64 rx_l4_csum_errors;
+
+ struct bnxt_ring_struct cp_ring_struct;
+};
+
+struct bnxt_napi {
+ struct napi_struct napi;
+ struct bnxt *bp;
+
+ int index;
+ struct bnxt_cp_ring_info cp_ring;
+ struct bnxt_rx_ring_info rx_ring;
+ struct bnxt_tx_ring_info tx_ring;
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ atomic_t poll_state;
+#endif
+};
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+enum bnxt_poll_state_t {
+ BNXT_STATE_IDLE = 0,
+ BNXT_STATE_NAPI,
+ BNXT_STATE_POLL,
+ BNXT_STATE_DISABLE,
+};
+#endif
+
+struct bnxt_irq {
+ irq_handler_t handler;
+ unsigned int vector;
+ u8 requested;
+ char name[IFNAMSIZ + 2];
+};
+
+#define HWRM_RING_ALLOC_TX 0x1
+#define HWRM_RING_ALLOC_RX 0x2
+#define HWRM_RING_ALLOC_AGG 0x4
+#define HWRM_RING_ALLOC_CMPL 0x8
+
+#define INVALID_STATS_CTX_ID -1
+
+struct hwrm_cmd_req_hdr {
+#define HWRM_CMPL_RING_MASK 0xffff0000
+#define HWRM_CMPL_RING_SFT 16
+ __le32 cmpl_ring_req_type;
+#define HWRM_SEQ_ID_MASK 0xffff
+#define HWRM_SEQ_ID_INVALID -1
+#define HWRM_RESP_LEN_OFFSET 4
+#define HWRM_TARGET_FID_MASK 0xffff0000
+#define HWRM_TARGET_FID_SFT 16
+ __le32 target_id_seq_id;
+ __le64 resp_addr;
+};
+
+struct bnxt_ring_grp_info {
+ u16 fw_stats_ctx;
+ u16 fw_grp_id;
+ u16 rx_fw_ring_id;
+ u16 agg_fw_ring_id;
+ u16 cp_fw_ring_id;
+};
+
+struct bnxt_vnic_info {
+ u16 fw_vnic_id; /* returned by Chimp during alloc */
+ u16 fw_rss_cos_lb_ctx;
+ u16 fw_l2_ctx_id;
+#define BNXT_MAX_UC_ADDRS 4
+ __le64 fw_l2_filter_id[BNXT_MAX_UC_ADDRS];
+ /* index 0 always dev_addr */
+ u16 uc_filter_count;
+ u8 *uc_list;
+
+ u16 *fw_grp_ids;
+ u16 hash_type;
+ dma_addr_t rss_table_dma_addr;
+ __le16 *rss_table;
+ dma_addr_t rss_hash_key_dma_addr;
+ u64 *rss_hash_key;
+ u32 rx_mask;
+
+ u8 *mc_list;
+ int mc_list_size;
+ int mc_list_count;
+ dma_addr_t mc_list_mapping;
+#define BNXT_MAX_MC_ADDRS 16
+
+ u32 flags;
+#define BNXT_VNIC_RSS_FLAG 1
+#define BNXT_VNIC_RFS_FLAG 2
+#define BNXT_VNIC_MCAST_FLAG 4
+#define BNXT_VNIC_UCAST_FLAG 8
+};
+
+#if defined(CONFIG_BNXT_SRIOV)
+struct bnxt_vf_info {
+ u16 fw_fid;
+ u8 mac_addr[ETH_ALEN];
+ u16 max_rsscos_ctxs;
+ u16 max_cp_rings;
+ u16 max_tx_rings;
+ u16 max_rx_rings;
+ u16 max_l2_ctxs;
+ u16 max_irqs;
+ u16 max_vnics;
+ u16 max_stat_ctxs;
+ u16 vlan;
+ u32 flags;
+#define BNXT_VF_QOS 0x1
+#define BNXT_VF_SPOOFCHK 0x2
+#define BNXT_VF_LINK_FORCED 0x4
+#define BNXT_VF_LINK_UP 0x8
+ u32 func_flags; /* func cfg flags */
+ u32 min_tx_rate;
+ u32 max_tx_rate;
+ void *hwrm_cmd_req_addr;
+ dma_addr_t hwrm_cmd_req_dma_addr;
+};
+
+struct bnxt_pf_info {
+#define BNXT_FIRST_PF_FID 1
+#define BNXT_FIRST_VF_FID 128
+ u32 fw_fid;
+ u8 port_id;
+ u8 mac_addr[ETH_ALEN];
+ u16 max_rsscos_ctxs;
+ u16 max_cp_rings;
+ u16 max_tx_rings; /* HW assigned max tx rings for this PF */
+ u16 max_pf_tx_rings; /* runtime max tx rings owned by PF */
+ u16 max_rx_rings; /* HW assigned max rx rings for this PF */
+ u16 max_pf_rx_rings; /* runtime max rx rings owned by PF */
+ u16 max_irqs;
+ u16 max_l2_ctxs;
+ u16 max_vnics;
+ u16 max_stat_ctxs;
+ u32 first_vf_id;
+ u16 active_vfs;
+ u16 max_vfs;
+ u32 max_encap_records;
+ u32 max_decap_records;
+ u32 max_tx_em_flows;
+ u32 max_tx_wm_flows;
+ u32 max_rx_em_flows;
+ u32 max_rx_wm_flows;
+ unsigned long *vf_event_bmap;
+ u16 hwrm_cmd_req_pages;
+ void *hwrm_cmd_req_addr[4];
+ dma_addr_t hwrm_cmd_req_dma_addr[4];
+ struct bnxt_vf_info *vf;
+};
+#endif
+
+struct bnxt_ntuple_filter {
+ struct hlist_node hash;
+ u8 src_mac_addr[ETH_ALEN];
+ struct flow_keys fkeys;
+ __le64 filter_id;
+ u16 sw_id;
+ u16 rxq;
+ u32 flow_id;
+ unsigned long state;
+#define BNXT_FLTR_VALID 0
+#define BNXT_FLTR_UPDATE 1
+};
+
+#define BNXT_ALL_COPPER_ETHTOOL_SPEED \
+ (ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full | \
+ ADVERTISED_10000baseT_Full)
+
+struct bnxt_link_info {
+ u8 media_type;
+ u8 transceiver;
+ u8 phy_addr;
+ u8 phy_link_status;
+#define BNXT_LINK_NO_LINK PORT_PHY_QCFG_RESP_LINK_NO_LINK
+#define BNXT_LINK_SIGNAL PORT_PHY_QCFG_RESP_LINK_SIGNAL
+#define BNXT_LINK_LINK PORT_PHY_QCFG_RESP_LINK_LINK
+ u8 wire_speed;
+ u8 loop_back;
+ u8 link_up;
+ u8 duplex;
+#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_HALF
+#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_FULL
+ u8 pause;
+#define BNXT_LINK_PAUSE_TX PORT_PHY_QCFG_RESP_PAUSE_TX
+#define BNXT_LINK_PAUSE_RX PORT_PHY_QCFG_RESP_PAUSE_RX
+#define BNXT_LINK_PAUSE_BOTH (PORT_PHY_QCFG_RESP_PAUSE_RX | \
+ PORT_PHY_QCFG_RESP_PAUSE_TX)
+ u8 auto_pause_setting;
+ u8 force_pause_setting;
+ u8 duplex_setting;
+ u8 auto_mode;
+#define BNXT_AUTO_MODE(mode) ((mode) > BNXT_LINK_AUTO_NONE && \
+ (mode) <= BNXT_LINK_AUTO_MSK)
+#define BNXT_LINK_AUTO_NONE PORT_PHY_QCFG_RESP_AUTO_MODE_NONE
+#define BNXT_LINK_AUTO_ALLSPDS PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS
+#define BNXT_LINK_AUTO_ONESPD PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED
+#define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW
+#define BNXT_LINK_AUTO_MSK PORT_PHY_QCFG_RESP_AUTO_MODE_MASK
+#define PHY_VER_LEN 3
+ u8 phy_ver[PHY_VER_LEN];
+ u16 link_speed;
+#define BNXT_LINK_SPEED_100MB PORT_PHY_QCFG_RESP_LINK_SPEED_100MB
+#define BNXT_LINK_SPEED_1GB PORT_PHY_QCFG_RESP_LINK_SPEED_1GB
+#define BNXT_LINK_SPEED_2GB PORT_PHY_QCFG_RESP_LINK_SPEED_2GB
+#define BNXT_LINK_SPEED_2_5GB PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB
+#define BNXT_LINK_SPEED_10GB PORT_PHY_QCFG_RESP_LINK_SPEED_10GB
+#define BNXT_LINK_SPEED_20GB PORT_PHY_QCFG_RESP_LINK_SPEED_20GB
+#define BNXT_LINK_SPEED_25GB PORT_PHY_QCFG_RESP_LINK_SPEED_25GB
+#define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
+#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
+ u16 support_speeds;
+ u16 auto_link_speeds;
+#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
+#define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
+#define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB
+#define BNXT_LINK_SPEED_MSK_10GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB
+#define BNXT_LINK_SPEED_MSK_2_5GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB
+#define BNXT_LINK_SPEED_MSK_20GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB
+#define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB
+#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
+#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
+ u16 auto_link_speed;
+ u16 force_link_speed;
+ u32 preemphasis;
+
+ /* copy of requested setting from ethtool cmd */
+ u8 autoneg;
+#define BNXT_AUTONEG_SPEED 1
+#define BNXT_AUTONEG_FLOW_CTRL 2
+ u8 req_duplex;
+ u8 req_flow_ctrl;
+ u16 req_link_speed;
+ u32 advertising;
+ bool force_link_chng;
+ /* a copy of phy_qcfg output used to report link
+ * info to VF
+ */
+ struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+};
+
+#define BNXT_MAX_QUEUE 8
+
+struct bnxt_queue_info {
+ u8 queue_id;
+ u8 queue_profile;
+};
+
+struct bnxt {
+ void __iomem *bar0;
+ void __iomem *bar1;
+ void __iomem *bar2;
+
+ u32 reg_base;
+
+ struct net_device *dev;
+ struct pci_dev *pdev;
+
+ atomic_t intr_sem;
+
+ u32 flags;
+ #define BNXT_FLAG_DCB_ENABLED 0x1
+ #define BNXT_FLAG_VF 0x2
+ #define BNXT_FLAG_LRO 0x4
+ #define BNXT_FLAG_GRO 0x8
+ #define BNXT_FLAG_TPA (BNXT_FLAG_LRO | BNXT_FLAG_GRO)
+ #define BNXT_FLAG_JUMBO 0x10
+ #define BNXT_FLAG_STRIP_VLAN 0x20
+ #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
+ BNXT_FLAG_LRO)
+ #define BNXT_FLAG_USING_MSIX 0x40
+ #define BNXT_FLAG_MSIX_CAP 0x80
+ #define BNXT_FLAG_RFS 0x100
+ #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
+ BNXT_FLAG_RFS | \
+ BNXT_FLAG_STRIP_VLAN)
+
+#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
+#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
+
+ struct bnxt_napi **bnapi;
+
+ u32 rx_buf_size;
+ u32 rx_buf_use_size; /* useable size */
+ u32 rx_ring_size;
+ u32 rx_agg_ring_size;
+ u32 rx_copy_thresh;
+ u32 rx_ring_mask;
+ u32 rx_agg_ring_mask;
+ int rx_nr_pages;
+ int rx_agg_nr_pages;
+ int rx_nr_rings;
+ int rsscos_nr_ctxs;
+
+ u32 tx_ring_size;
+ u32 tx_ring_mask;
+ int tx_nr_pages;
+ int tx_nr_rings;
+ int tx_nr_rings_per_tc;
+
+ int tx_wake_thresh;
+ int tx_push_thresh;
+ int tx_push_size;
+
+ u32 cp_ring_size;
+ u32 cp_ring_mask;
+ u32 cp_bit;
+ int cp_nr_pages;
+ int cp_nr_rings;
+
+ int num_stat_ctxs;
+ struct bnxt_ring_grp_info *grp_info;
+ struct bnxt_vnic_info *vnic_info;
+ int nr_vnics;
+
+ u8 max_tc;
+ struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
+
+ unsigned int current_interval;
+#define BNXT_TIMER_INTERVAL (HZ / 2)
+
+ struct timer_list timer;
+
+ int state;
+#define BNXT_STATE_CLOSED 0
+#define BNXT_STATE_OPEN 1
+
+ struct bnxt_irq *irq_tbl;
+ u8 mac_addr[ETH_ALEN];
+
+ u32 msg_enable;
+
+ u16 hwrm_cmd_seq;
+ u32 hwrm_intr_seq_id;
+ void *hwrm_cmd_resp_addr;
+ dma_addr_t hwrm_cmd_resp_dma_addr;
+ void *hwrm_dbg_resp_addr;
+ dma_addr_t hwrm_dbg_resp_dma_addr;
+#define HWRM_DBG_REG_BUF_SIZE 128
+ struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
+ struct hwrm_ver_get_output ver_resp;
+#define FW_VER_STR_LEN 32
+#define BC_HWRM_STR_LEN 21
+#define PHY_VER_STR_LEN (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
+ char fw_ver_str[FW_VER_STR_LEN];
+ __be16 vxlan_port;
+ u8 vxlan_port_cnt;
+ __le16 vxlan_fw_dst_port_id;
+ u8 nge_port_cnt;
+ __le16 nge_fw_dst_port_id;
+ u16 coal_ticks;
+ u16 coal_ticks_irq;
+ u16 coal_bufs;
+ u16 coal_bufs_irq;
+
+#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2)
+#define BNXT_COAL_TIMER_TO_USEC(x) ((x) * 2 / 25)
+
+ struct work_struct sp_task;
+ unsigned long sp_event;
+#define BNXT_RX_MASK_SP_EVENT 0
+#define BNXT_RX_NTP_FLTR_SP_EVENT 1
+#define BNXT_LINK_CHNG_SP_EVENT 2
+#define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT 4
+#define BNXT_VXLAN_ADD_PORT_SP_EVENT 8
+#define BNXT_VXLAN_DEL_PORT_SP_EVENT 16
+#define BNXT_RESET_TASK_SP_EVENT 32
+#define BNXT_RST_RING_SP_EVENT 64
+
+#ifdef CONFIG_BNXT_SRIOV
+ int nr_vfs;
+ struct bnxt_pf_info pf;
+ struct bnxt_vf_info vf;
+ wait_queue_head_t sriov_cfg_wait;
+ bool sriov_cfg;
+#define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000)
+#endif
+
+#define BNXT_NTP_FLTR_MAX_FLTR 4096
+#define BNXT_NTP_FLTR_HASH_SIZE 512
+#define BNXT_NTP_FLTR_HASH_MASK (BNXT_NTP_FLTR_HASH_SIZE - 1)
+ struct hlist_head ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE];
+ spinlock_t ntp_fltr_lock; /* for hash table add, del */
+
+ unsigned long *ntp_fltr_bmap;
+ int ntp_fltr_count;
+
+ struct bnxt_link_info link_info;
+};
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
+{
+ atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+/* called from the NAPI poll routine to get ownership of a bnapi */
+static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
+{
+ int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+ BNXT_STATE_NAPI);
+
+ return rc == BNXT_STATE_IDLE;
+}
+
+static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
+{
+ atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+/* called from the busy poll routine to get ownership of a bnapi */
+static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
+{
+ int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+ BNXT_STATE_POLL);
+
+ return rc == BNXT_STATE_IDLE;
+}
+
+static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
+{
+ atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
+{
+ return atomic_read(&bnapi->poll_state) == BNXT_STATE_POLL;
+}
+
+static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
+{
+ int old;
+
+ while (1) {
+ old = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+ BNXT_STATE_DISABLE);
+ if (old == BNXT_STATE_IDLE)
+ break;
+ usleep_range(500, 5000);
+ }
+}
+
+#else
+
+static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
+{
+ return true;
+}
+
+static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
+{
+ return false;
+}
+
+static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
+{
+ return false;
+}
+
+static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
+{
+}
+
+#endif
+
+void bnxt_set_ring_params(struct bnxt *);
+void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
+int _hwrm_send_message(struct bnxt *, void *, u32, int);
+int hwrm_send_message(struct bnxt *, void *, u32, int);
+int bnxt_hwrm_set_coal(struct bnxt *);
+int bnxt_hwrm_set_pause(struct bnxt *);
+int bnxt_hwrm_set_link_setting(struct bnxt *, bool);
+int bnxt_open_nic(struct bnxt *, bool, bool);
+int bnxt_close_nic(struct bnxt *, bool, bool);
+void bnxt_get_max_rings(struct bnxt *, int *, int *);
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
new file mode 100644
index 000000000000..45bd628eaf3a
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -0,0 +1,1149 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/firmware.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_ethtool.h"
+#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
+#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
+#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
+
+static u32 bnxt_get_msglevel(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ return bp->msg_enable;
+}
+
+static void bnxt_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ bp->msg_enable = value;
+}
+
+static int bnxt_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ memset(coal, 0, sizeof(*coal));
+
+ coal->rx_coalesce_usecs =
+ max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks), 1);
+ coal->rx_max_coalesced_frames = bp->coal_bufs / 2;
+ coal->rx_coalesce_usecs_irq =
+ max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks_irq), 1);
+ coal->rx_max_coalesced_frames_irq = bp->coal_bufs_irq / 2;
+
+ return 0;
+}
+
+static int bnxt_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs);
+ bp->coal_bufs = coal->rx_max_coalesced_frames * 2;
+ bp->coal_ticks_irq =
+ BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs_irq);
+ bp->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2;
+
+ if (netif_running(dev))
+ rc = bnxt_hwrm_set_coal(bp);
+
+ return rc;
+}
+
+#define BNXT_NUM_STATS 21
+
+static int bnxt_get_sset_count(struct net_device *dev, int sset)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return BNXT_NUM_STATS * bp->cp_nr_rings;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void bnxt_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *buf)
+{
+ u32 i, j = 0;
+ struct bnxt *bp = netdev_priv(dev);
+ u32 buf_size = sizeof(struct ctx_hw_stats) * bp->cp_nr_rings;
+ u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
+
+ memset(buf, 0, buf_size);
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ __le64 *hw_stats = (__le64 *)cpr->hw_stats;
+ int k;
+
+ for (k = 0; k < stat_fields; j++, k++)
+ buf[j] = le64_to_cpu(hw_stats[k]);
+ buf[j++] = cpr->rx_l4_csum_errors;
+ }
+}
+
+static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u32 i;
+
+ switch (stringset) {
+ /* The number of strings must match BNXT_NUM_STATS defined above. */
+ case ETH_SS_STATS:
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ sprintf(buf, "[%d]: rx_ucast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_mcast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_bcast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_discards", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_drops", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_ucast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_mcast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_bcast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_ucast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_mcast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_bcast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_discards", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_drops", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_ucast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_mcast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_bcast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tpa_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tpa_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tpa_events", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tpa_aborts", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_l4_csum_errors", i);
+ buf += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
+ stringset);
+ break;
+ }
+}
+
+static void bnxt_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
+ ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
+ ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
+
+ ering->rx_pending = bp->rx_ring_size;
+ ering->rx_jumbo_pending = bp->rx_agg_ring_size;
+ ering->tx_pending = bp->tx_ring_size;
+}
+
+static int bnxt_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
+ (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
+ (ering->tx_pending <= MAX_SKB_FRAGS))
+ return -EINVAL;
+
+ if (netif_running(dev))
+ bnxt_close_nic(bp, false, false);
+
+ bp->rx_ring_size = ering->rx_pending;
+ bp->tx_ring_size = ering->tx_pending;
+ bnxt_set_ring_params(bp);
+
+ if (netif_running(dev))
+ return bnxt_open_nic(bp, false, false);
+
+ return 0;
+}
+
+static void bnxt_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int max_rx_rings, max_tx_rings, tcs;
+
+ bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+ tcs = netdev_get_num_tc(dev);
+ if (tcs > 1)
+ max_tx_rings /= tcs;
+
+ channel->max_rx = max_rx_rings;
+ channel->max_tx = max_tx_rings;
+ channel->max_other = 0;
+ channel->max_combined = 0;
+ channel->rx_count = bp->rx_nr_rings;
+ channel->tx_count = bp->tx_nr_rings_per_tc;
+}
+
+static int bnxt_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int max_rx_rings, max_tx_rings, tcs;
+ u32 rc = 0;
+
+ if (channel->other_count || channel->combined_count ||
+ !channel->rx_count || !channel->tx_count)
+ return -EINVAL;
+
+ bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+ tcs = netdev_get_num_tc(dev);
+ if (tcs > 1)
+ max_tx_rings /= tcs;
+
+ if (channel->rx_count > max_rx_rings ||
+ channel->tx_count > max_tx_rings)
+ return -EINVAL;
+
+ if (netif_running(dev)) {
+ if (BNXT_PF(bp)) {
+ /* TODO CHIMP_FW: Send message to all VF's
+ * before PF unload
+ */
+ }
+ rc = bnxt_close_nic(bp, true, false);
+ if (rc) {
+ netdev_err(bp->dev, "Set channel failure rc :%x\n",
+ rc);
+ return rc;
+ }
+ }
+
+ bp->rx_nr_rings = channel->rx_count;
+ bp->tx_nr_rings_per_tc = channel->tx_count;
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ if (tcs > 1)
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
+ bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+ bp->num_stat_ctxs = bp->cp_nr_rings;
+
+ if (netif_running(dev)) {
+ rc = bnxt_open_nic(bp, true, false);
+ if ((!rc) && BNXT_PF(bp)) {
+ /* TODO CHIMP_FW: Send message to all VF's
+ * to renable
+ */
+ }
+ }
+
+ return rc;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ int i, j = 0;
+
+ cmd->data = bp->ntp_fltr_count;
+ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+ struct hlist_head *head;
+ struct bnxt_ntuple_filter *fltr;
+
+ head = &bp->ntp_fltr_hash_tbl[i];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(fltr, head, hash) {
+ if (j == cmd->rule_cnt)
+ break;
+ rule_locs[j++] = fltr->sw_id;
+ }
+ rcu_read_unlock();
+ if (j == cmd->rule_cnt)
+ break;
+ }
+ cmd->rule_cnt = j;
+ return 0;
+}
+
+static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fs =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct bnxt_ntuple_filter *fltr;
+ struct flow_keys *fkeys;
+ int i, rc = -EINVAL;
+
+ if (fs->location < 0 || fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
+ return rc;
+
+ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+ struct hlist_head *head;
+
+ head = &bp->ntp_fltr_hash_tbl[i];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(fltr, head, hash) {
+ if (fltr->sw_id == fs->location)
+ goto fltr_found;
+ }
+ rcu_read_unlock();
+ }
+ return rc;
+
+fltr_found:
+ fkeys = &fltr->fkeys;
+ if (fkeys->basic.ip_proto == IPPROTO_TCP)
+ fs->flow_type = TCP_V4_FLOW;
+ else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+ fs->flow_type = UDP_V4_FLOW;
+ else
+ goto fltr_err;
+
+ fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
+ fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
+
+ fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
+ fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
+
+ fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
+ fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
+
+ fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
+ fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+
+ fs->ring_cookie = fltr->rxq;
+ rc = 0;
+
+fltr_err:
+ rcu_read_unlock();
+
+ return rc;
+}
+
+static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = bp->rx_nr_rings;
+ break;
+
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = bp->ntp_fltr_count;
+ cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
+ break;
+
+ case ETHTOOL_GRXCLSRLALL:
+ rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
+ break;
+
+ case ETHTOOL_GRXCLSRULE:
+ rc = bnxt_grxclsrule(bp, cmd);
+ break;
+
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+
+ return rc;
+}
+#endif
+
+static u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
+{
+ return HW_HASH_INDEX_SIZE;
+}
+
+static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
+{
+ return HW_HASH_KEY_SIZE;
+}
+
+static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ int i = 0;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (indir)
+ for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
+ indir[i] = le16_to_cpu(vnic->rss_table[i]);
+
+ if (key)
+ memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
+
+ return 0;
+}
+
+static void bnxt_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
+ strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
+ info->testinfo_len = BNXT_NUM_TESTS(bp);
+ /* TODO CHIMP_FW: eeprom dump details */
+ info->eedump_len = 0;
+ /* TODO CHIMP FW: reg dump details */
+ info->regdump_len = 0;
+}
+
+static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
+{
+ u16 fw_speeds = link_info->support_speeds;
+ u32 speed_mask = 0;
+
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
+ speed_mask |= SUPPORTED_100baseT_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
+ speed_mask |= SUPPORTED_1000baseT_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
+ speed_mask |= SUPPORTED_2500baseX_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
+ speed_mask |= SUPPORTED_10000baseT_Full;
+ /* TODO: support 25GB, 50GB with different cable type */
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
+ speed_mask |= SUPPORTED_20000baseMLD2_Full |
+ SUPPORTED_20000baseKR2_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
+ speed_mask |= SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_40000baseCR4_Full |
+ SUPPORTED_40000baseSR4_Full |
+ SUPPORTED_40000baseLR4_Full;
+
+ return speed_mask;
+}
+
+static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
+{
+ u16 fw_speeds = link_info->auto_link_speeds;
+ u32 speed_mask = 0;
+
+ /* TODO: support 25GB, 40GB, 50GB with different cable type */
+ /* set the advertised speeds */
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
+ speed_mask |= ADVERTISED_100baseT_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
+ speed_mask |= ADVERTISED_1000baseT_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
+ speed_mask |= ADVERTISED_2500baseX_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
+ speed_mask |= ADVERTISED_10000baseT_Full;
+ /* TODO: how to advertise 20, 25, 40, 50GB with different cable type ?*/
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
+ speed_mask |= ADVERTISED_20000baseMLD2_Full |
+ ADVERTISED_20000baseKR2_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
+ speed_mask |= ADVERTISED_40000baseKR4_Full |
+ ADVERTISED_40000baseCR4_Full |
+ ADVERTISED_40000baseSR4_Full |
+ ADVERTISED_40000baseLR4_Full;
+ return speed_mask;
+}
+
+u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
+{
+ switch (fw_link_speed) {
+ case BNXT_LINK_SPEED_100MB:
+ return SPEED_100;
+ case BNXT_LINK_SPEED_1GB:
+ return SPEED_1000;
+ case BNXT_LINK_SPEED_2_5GB:
+ return SPEED_2500;
+ case BNXT_LINK_SPEED_10GB:
+ return SPEED_10000;
+ case BNXT_LINK_SPEED_20GB:
+ return SPEED_20000;
+ case BNXT_LINK_SPEED_25GB:
+ return SPEED_25000;
+ case BNXT_LINK_SPEED_40GB:
+ return SPEED_40000;
+ case BNXT_LINK_SPEED_50GB:
+ return SPEED_50000;
+ default:
+ return SPEED_UNKNOWN;
+ }
+}
+
+static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+ u16 ethtool_speed;
+
+ cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info);
+
+ if (link_info->auto_link_speeds)
+ cmd->supported |= SUPPORTED_Autoneg;
+
+ if (BNXT_AUTO_MODE(link_info->auto_mode)) {
+ cmd->advertising =
+ bnxt_fw_to_ethtool_advertised_spds(link_info);
+ cmd->advertising |= ADVERTISED_Autoneg;
+ cmd->autoneg = AUTONEG_ENABLE;
+ } else {
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->advertising = 0;
+ }
+ if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+ if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
+ BNXT_LINK_PAUSE_BOTH) {
+ cmd->advertising |= ADVERTISED_Pause;
+ cmd->supported |= SUPPORTED_Pause;
+ } else {
+ cmd->advertising |= ADVERTISED_Asym_Pause;
+ cmd->supported |= SUPPORTED_Asym_Pause;
+ if (link_info->auto_pause_setting &
+ BNXT_LINK_PAUSE_RX)
+ cmd->advertising |= ADVERTISED_Pause;
+ }
+ } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+ if ((link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
+ BNXT_LINK_PAUSE_BOTH) {
+ cmd->supported |= SUPPORTED_Pause;
+ } else {
+ cmd->supported |= SUPPORTED_Asym_Pause;
+ if (link_info->force_pause_setting &
+ BNXT_LINK_PAUSE_RX)
+ cmd->supported |= SUPPORTED_Pause;
+ }
+ }
+
+ cmd->port = PORT_NONE;
+ if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+ cmd->port = PORT_TP;
+ cmd->supported |= SUPPORTED_TP;
+ cmd->advertising |= ADVERTISED_TP;
+ } else {
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_FIBRE;
+
+ if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
+ cmd->port = PORT_DA;
+ else if (link_info->media_type ==
+ PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
+ cmd->port = PORT_FIBRE;
+ }
+
+ if (link_info->phy_link_status == BNXT_LINK_LINK) {
+ if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+ cmd->duplex = DUPLEX_FULL;
+ } else {
+ cmd->duplex = DUPLEX_UNKNOWN;
+ }
+ ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
+ ethtool_cmd_speed_set(cmd, ethtool_speed);
+ if (link_info->transceiver ==
+ PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL)
+ cmd->transceiver = XCVR_INTERNAL;
+ else
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->phy_address = link_info->phy_addr;
+
+ return 0;
+}
+
+static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
+{
+ switch (ethtool_speed) {
+ case SPEED_100:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
+ case SPEED_1000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
+ case SPEED_2500:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
+ case SPEED_10000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
+ case SPEED_20000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
+ case SPEED_25000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
+ case SPEED_40000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
+ case SPEED_50000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
+ default:
+ netdev_err(dev, "unsupported speed!\n");
+ break;
+ }
+ return 0;
+}
+
+static u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
+{
+ u16 fw_speed_mask = 0;
+
+ /* only support autoneg at speed 100, 1000, and 10000 */
+ if (advertising & (ADVERTISED_100baseT_Full |
+ ADVERTISED_100baseT_Half)) {
+ fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
+ }
+ if (advertising & (ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseT_Half)) {
+ fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
+ }
+ if (advertising & ADVERTISED_10000baseT_Full)
+ fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
+
+ return fw_speed_mask;
+}
+
+static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ int rc = 0;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+ u32 speed, fw_advertising = 0;
+ bool set_pause = false;
+
+ if (BNXT_VF(bp))
+ return rc;
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (link_info->media_type != PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+ netdev_err(dev, "Media type doesn't support autoneg\n");
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
+ if (cmd->advertising & ~(BNXT_ALL_COPPER_ETHTOOL_SPEED |
+ ADVERTISED_Autoneg |
+ ADVERTISED_TP |
+ ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause)) {
+ netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n",
+ cmd->advertising);
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
+ fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising);
+ if (fw_advertising & ~link_info->support_speeds) {
+ netdev_err(dev, "Advertising parameters are not supported! (adv: 0x%x)\n",
+ cmd->advertising);
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
+ link_info->autoneg |= BNXT_AUTONEG_SPEED;
+ if (!fw_advertising)
+ link_info->advertising = link_info->support_speeds;
+ else
+ link_info->advertising = fw_advertising;
+ /* any change to autoneg will cause link change, therefore the
+ * driver should put back the original pause setting in autoneg
+ */
+ set_pause = true;
+ } else {
+ /* TODO: currently don't support half duplex */
+ if (cmd->duplex == DUPLEX_HALF) {
+ netdev_err(dev, "HALF DUPLEX is not supported!\n");
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
+ /* If received a request for an unknown duplex, assume full*/
+ if (cmd->duplex == DUPLEX_UNKNOWN)
+ cmd->duplex = DUPLEX_FULL;
+ speed = ethtool_cmd_speed(cmd);
+ link_info->req_link_speed = bnxt_get_fw_speed(dev, speed);
+ link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
+ link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+ link_info->advertising = 0;
+ }
+
+ if (netif_running(dev))
+ rc = bnxt_hwrm_set_link_setting(bp, set_pause);
+
+set_setting_exit:
+ return rc;
+}
+
+static void bnxt_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ if (BNXT_VF(bp))
+ return;
+ epause->autoneg = !!(link_info->auto_pause_setting &
+ BNXT_LINK_PAUSE_BOTH);
+ epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0);
+ epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0);
+}
+
+static int bnxt_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ int rc = 0;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ if (BNXT_VF(bp))
+ return rc;
+
+ if (epause->autoneg) {
+ link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+ link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH;
+ } else {
+ /* when transition from auto pause to force pause,
+ * force a link change
+ */
+ if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+ link_info->force_link_chng = true;
+ link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
+ link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_BOTH;
+ }
+ if (epause->rx_pause)
+ link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
+ else
+ link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_RX;
+
+ if (epause->tx_pause)
+ link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
+ else
+ link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_TX;
+
+ if (netif_running(dev))
+ rc = bnxt_hwrm_set_pause(bp);
+ return rc;
+}
+
+static u32 bnxt_get_link(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ /* TODO: handle MF, VF, driver close case */
+ return bp->link_info.link_up;
+}
+
+static int bnxt_flash_nvram(struct net_device *dev,
+ u16 dir_type,
+ u16 dir_ordinal,
+ u16 dir_ext,
+ u16 dir_attr,
+ const u8 *data,
+ size_t data_len)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+ struct hwrm_nvm_write_input req = {0};
+ dma_addr_t dma_handle;
+ u8 *kmem;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
+
+ req.dir_type = cpu_to_le16(dir_type);
+ req.dir_ordinal = cpu_to_le16(dir_ordinal);
+ req.dir_ext = cpu_to_le16(dir_ext);
+ req.dir_attr = cpu_to_le16(dir_attr);
+ req.dir_data_length = cpu_to_le32(data_len);
+
+ kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle,
+ GFP_KERNEL);
+ if (!kmem) {
+ netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+ (unsigned)data_len);
+ return -ENOMEM;
+ }
+ memcpy(kmem, data, data_len);
+ req.host_src_addr = cpu_to_le64(dma_handle);
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
+ dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
+
+ return rc;
+}
+
+static int bnxt_flash_firmware(struct net_device *dev,
+ u16 dir_type,
+ const u8 *fw_data,
+ size_t fw_size)
+{
+ int rc = 0;
+ u16 code_type;
+ u32 stored_crc;
+ u32 calculated_crc;
+ struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
+
+ switch (dir_type) {
+ case BNX_DIR_TYPE_BOOTCODE:
+ case BNX_DIR_TYPE_BOOTCODE_2:
+ code_type = CODE_BOOT;
+ break;
+ default:
+ netdev_err(dev, "Unsupported directory entry type: %u\n",
+ dir_type);
+ return -EINVAL;
+ }
+ if (fw_size < sizeof(struct bnxt_fw_header)) {
+ netdev_err(dev, "Invalid firmware file size: %u\n",
+ (unsigned int)fw_size);
+ return -EINVAL;
+ }
+ if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
+ netdev_err(dev, "Invalid firmware signature: %08X\n",
+ le32_to_cpu(header->signature));
+ return -EINVAL;
+ }
+ if (header->code_type != code_type) {
+ netdev_err(dev, "Expected firmware type: %d, read: %d\n",
+ code_type, header->code_type);
+ return -EINVAL;
+ }
+ if (header->device != DEVICE_CUMULUS_FAMILY) {
+ netdev_err(dev, "Expected firmware device family %d, read: %d\n",
+ DEVICE_CUMULUS_FAMILY, header->device);
+ return -EINVAL;
+ }
+ /* Confirm the CRC32 checksum of the file: */
+ stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
+ sizeof(stored_crc)));
+ calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
+ if (calculated_crc != stored_crc) {
+ netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
+ (unsigned long)stored_crc,
+ (unsigned long)calculated_crc);
+ return -EINVAL;
+ }
+ /* TODO: Validate digital signature (RSA-encrypted SHA-256 hash) here */
+ rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+ 0, 0, fw_data, fw_size);
+ if (rc == 0) { /* Firmware update successful */
+ /* TODO: Notify processor it needs to reset itself
+ */
+ }
+ return rc;
+}
+
+static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
+{
+ switch (dir_type) {
+ case BNX_DIR_TYPE_CHIMP_PATCH:
+ case BNX_DIR_TYPE_BOOTCODE:
+ case BNX_DIR_TYPE_BOOTCODE_2:
+ case BNX_DIR_TYPE_APE_FW:
+ case BNX_DIR_TYPE_APE_PATCH:
+ case BNX_DIR_TYPE_KONG_FW:
+ case BNX_DIR_TYPE_KONG_PATCH:
+ return true;
+ }
+
+ return false;
+}
+
+static bool bnxt_dir_type_is_unprotected_exec_format(u16 dir_type)
+{
+ switch (dir_type) {
+ case BNX_DIR_TYPE_AVS:
+ case BNX_DIR_TYPE_EXP_ROM_MBA:
+ case BNX_DIR_TYPE_PCIE:
+ case BNX_DIR_TYPE_TSCF_UCODE:
+ case BNX_DIR_TYPE_EXT_PHY:
+ case BNX_DIR_TYPE_CCM:
+ case BNX_DIR_TYPE_ISCSI_BOOT:
+ case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
+ case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
+ return true;
+ }
+
+ return false;
+}
+
+static bool bnxt_dir_type_is_executable(u16 dir_type)
+{
+ return bnxt_dir_type_is_ape_bin_format(dir_type) ||
+ bnxt_dir_type_is_unprotected_exec_format(dir_type);
+}
+
+static int bnxt_flash_firmware_from_file(struct net_device *dev,
+ u16 dir_type,
+ const char *filename)
+{
+ const struct firmware *fw;
+ int rc;
+
+ if (bnxt_dir_type_is_executable(dir_type) == false)
+ return -EINVAL;
+
+ rc = request_firmware(&fw, filename, &dev->dev);
+ if (rc != 0) {
+ netdev_err(dev, "Error %d requesting firmware file: %s\n",
+ rc, filename);
+ return rc;
+ }
+ if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
+ rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
+ else
+ rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+ 0, 0, fw->data, fw->size);
+ release_firmware(fw);
+ return rc;
+}
+
+static int bnxt_flash_package_from_file(struct net_device *dev,
+ char *filename)
+{
+ netdev_err(dev, "packages are not yet supported\n");
+ return -EINVAL;
+}
+
+static int bnxt_flash_device(struct net_device *dev,
+ struct ethtool_flash *flash)
+{
+ if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
+ netdev_err(dev, "flashdev not supported from a virtual function\n");
+ return -EINVAL;
+ }
+
+ if (flash->region == ETHTOOL_FLASH_ALL_REGIONS)
+ return bnxt_flash_package_from_file(dev, flash->data);
+
+ return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
+}
+
+static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+ struct hwrm_nvm_get_dir_info_input req = {0};
+ struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ *entries = le32_to_cpu(output->entries);
+ *length = le32_to_cpu(output->entry_length);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_get_eeprom_len(struct net_device *dev)
+{
+ /* The -1 return value allows the entire 32-bit range of offsets to be
+ * passed via the ethtool command-line utility.
+ */
+ return -1;
+}
+
+static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+ u32 dir_entries;
+ u32 entry_length;
+ u8 *buf;
+ size_t buflen;
+ dma_addr_t dma_handle;
+ struct hwrm_nvm_get_dir_entries_input req = {0};
+
+ rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
+ if (rc != 0)
+ return rc;
+
+ /* Insert 2 bytes of directory info (count and size of entries) */
+ if (len < 2)
+ return -EINVAL;
+
+ *data++ = dir_entries;
+ *data++ = entry_length;
+ len -= 2;
+ memset(data, 0xff, len);
+
+ buflen = dir_entries * entry_length;
+ buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle,
+ GFP_KERNEL);
+ if (!buf) {
+ netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+ (unsigned)buflen);
+ return -ENOMEM;
+ }
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
+ req.host_dest_addr = cpu_to_le64(dma_handle);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc == 0)
+ memcpy(data, buf, len > buflen ? buflen : len);
+ dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle);
+ return rc;
+}
+
+static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+ u32 length, u8 *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+ u8 *buf;
+ dma_addr_t dma_handle;
+ struct hwrm_nvm_read_input req = {0};
+
+ buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
+ GFP_KERNEL);
+ if (!buf) {
+ netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+ (unsigned)length);
+ return -ENOMEM;
+ }
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
+ req.host_dest_addr = cpu_to_le64(dma_handle);
+ req.dir_idx = cpu_to_le16(index);
+ req.offset = cpu_to_le32(offset);
+ req.len = cpu_to_le32(length);
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc == 0)
+ memcpy(data, buf, length);
+ dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle);
+ return rc;
+}
+
+static int bnxt_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ u32 index;
+ u32 offset;
+
+ if (eeprom->offset == 0) /* special offset value to get directory */
+ return bnxt_get_nvram_directory(dev, eeprom->len, data);
+
+ index = eeprom->offset >> 24;
+ offset = eeprom->offset & 0xffffff;
+
+ if (index == 0) {
+ netdev_err(dev, "unsupported index value: %d\n", index);
+ return -EINVAL;
+ }
+
+ return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
+}
+
+static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct hwrm_nvm_erase_dir_entry_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
+ req.dir_idx = cpu_to_le16(index);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u8 index, dir_op;
+ u16 type, ext, ordinal, attr;
+
+ if (!BNXT_PF(bp)) {
+ netdev_err(dev, "NVM write not supported from a virtual function\n");
+ return -EINVAL;
+ }
+
+ type = eeprom->magic >> 16;
+
+ if (type == 0xffff) { /* special value for directory operations */
+ index = eeprom->magic & 0xff;
+ dir_op = eeprom->magic >> 8;
+ if (index == 0)
+ return -EINVAL;
+ switch (dir_op) {
+ case 0x0e: /* erase */
+ if (eeprom->offset != ~eeprom->magic)
+ return -EINVAL;
+ return bnxt_erase_nvram_directory(dev, index - 1);
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Create or re-write an NVM item: */
+ if (bnxt_dir_type_is_executable(type) == true)
+ return -EINVAL;
+ ext = eeprom->magic & 0xffff;
+ ordinal = eeprom->offset >> 16;
+ attr = eeprom->offset & 0xffff;
+
+ return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data,
+ eeprom->len);
+}
+
+const struct ethtool_ops bnxt_ethtool_ops = {
+ .get_settings = bnxt_get_settings,
+ .set_settings = bnxt_set_settings,
+ .get_pauseparam = bnxt_get_pauseparam,
+ .set_pauseparam = bnxt_set_pauseparam,
+ .get_drvinfo = bnxt_get_drvinfo,
+ .get_coalesce = bnxt_get_coalesce,
+ .set_coalesce = bnxt_set_coalesce,
+ .get_msglevel = bnxt_get_msglevel,
+ .set_msglevel = bnxt_set_msglevel,
+ .get_sset_count = bnxt_get_sset_count,
+ .get_strings = bnxt_get_strings,
+ .get_ethtool_stats = bnxt_get_ethtool_stats,
+ .set_ringparam = bnxt_set_ringparam,
+ .get_ringparam = bnxt_get_ringparam,
+ .get_channels = bnxt_get_channels,
+ .set_channels = bnxt_set_channels,
+#ifdef CONFIG_RFS_ACCEL
+ .get_rxnfc = bnxt_get_rxnfc,
+#endif
+ .get_rxfh_indir_size = bnxt_get_rxfh_indir_size,
+ .get_rxfh_key_size = bnxt_get_rxfh_key_size,
+ .get_rxfh = bnxt_get_rxfh,
+ .flash_device = bnxt_flash_device,
+ .get_eeprom_len = bnxt_get_eeprom_len,
+ .get_eeprom = bnxt_get_eeprom,
+ .set_eeprom = bnxt_set_eeprom,
+ .get_link = bnxt_get_link,
+};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
new file mode 100644
index 000000000000..98fa81e08b58
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -0,0 +1,17 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_ETHTOOL_H
+#define BNXT_ETHTOOL_H
+
+extern const struct ethtool_ops bnxt_ethtool_ops;
+
+u32 bnxt_fw_to_ethtool_speed(u16);
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
new file mode 100644
index 000000000000..e0aac65c6d82
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
@@ -0,0 +1,104 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __BNXT_FW_HDR_H__
+#define __BNXT_FW_HDR_H__
+
+#define BNXT_FIRMWARE_BIN_SIGNATURE 0x1a4d4342 /* "BCM"+0x1a */
+
+enum SUPPORTED_FAMILY {
+ DEVICE_5702_3_4_FAMILY, /* 0 - Denali, Vinson, K2 */
+ DEVICE_5705_FAMILY, /* 1 - Bachelor */
+ DEVICE_SHASTA_FAMILY, /* 2 - 5751 */
+ DEVICE_5706_FAMILY, /* 3 - Teton */
+ DEVICE_5714_FAMILY, /* 4 - Hamilton */
+ DEVICE_STANFORD_FAMILY, /* 5 - 5755 */
+ DEVICE_STANFORD_ME_FAMILY, /* 6 - 5756 */
+ DEVICE_SOLEDAD_FAMILY, /* 7 - 5761[E] */
+ DEVICE_CILAI_FAMILY, /* 8 - 57780/60/90/91 */
+ DEVICE_ASPEN_FAMILY, /* 9 - 57781/85/61/65/91/95 */
+ DEVICE_ASPEN_PLUS_FAMILY, /* 10 - 57786 */
+ DEVICE_LOGAN_FAMILY, /* 11 - Any device in the Logan family
+ */
+ DEVICE_LOGAN_5762, /* 12 - Logan Enterprise (aka Columbia)
+ */
+ DEVICE_LOGAN_57767, /* 13 - Logan Client */
+ DEVICE_LOGAN_57787, /* 14 - Logan Consumer */
+ DEVICE_LOGAN_5725, /* 15 - Logan Server (TruManage-enabled)
+ */
+ DEVICE_SAWTOOTH_FAMILY, /* 16 - 5717/18 */
+ DEVICE_COTOPAXI_FAMILY, /* 17 - 5719 */
+ DEVICE_SNAGGLETOOTH_FAMILY, /* 18 - 5720 */
+ DEVICE_CUMULUS_FAMILY, /* 19 - Cumulus/Whitney */
+ MAX_DEVICE_FAMILY
+};
+
+enum SUPPORTED_CODE {
+ CODE_ASF1, /* 0 - ASF VERSION 1.03 <deprecated> */
+ CODE_ASF2, /* 1 - ASF VERSION 2.00 <deprecated> */
+ CODE_PASSTHRU, /* 2 - PassThru <deprecated> */
+ CODE_PT_SEC, /* 3 - PassThru with security <deprecated> */
+ CODE_UMP, /* 4 - UMP <deprecated> */
+ CODE_BOOT, /* 5 - Bootcode */
+ CODE_DASH, /* 6 - TruManage (DASH + ASF + PMCI)
+ * Management firmwares
+ */
+ CODE_MCTP_PASSTHRU, /* 7 - NCSI / MCTP Passt-hrough firmware */
+ CODE_PM_OFFLOAD, /* 8 - Power-Management Proxy Offload firmwares
+ */
+ CODE_MDNS_SD_OFFLOAD, /* 9 - Multicast DNS Service Discovery Proxys
+ * Offload firmware
+ */
+ CODE_DISC_OFFLOAD, /* 10 - Discovery Offload firmware */
+ CODE_MUSTANG, /* 11 - I2C Error reporting APE firmwares
+ * <deprecated>
+ */
+ CODE_ARP_BATCH, /* 12 - ARP Batch firmware */
+ CODE_SMASH, /* 13 - TruManage (SMASH + DCMI/IPMI + PMCI)
+ * Management firmware
+ */
+ CODE_APE_DIAG, /* 14 - APE Test Diag firmware */
+ CODE_APE_PATCH, /* 15 - APE Patch firmware */
+ CODE_TANG_PATCH, /* 16 - TANG Patch firmware */
+ CODE_KONG_FW, /* 17 - KONG firmware */
+ CODE_KONG_PATCH, /* 18 - KONG Patch firmware */
+ CODE_BONO_FW, /* 19 - BONO firmware */
+ CODE_BONO_PATCH, /* 20 - BONO Patch firmware */
+
+ MAX_CODE_TYPE,
+};
+
+enum SUPPORTED_MEDIA {
+ MEDIA_COPPER, /* 0 */
+ MEDIA_FIBER, /* 1 */
+ MEDIA_NONE, /* 2 */
+ MEDIA_COPPER_FIBER, /* 3 */
+ MAX_MEDIA_TYPE,
+};
+
+struct bnxt_fw_header {
+ __le32 signature; /* constains the constant value of
+ * BNXT_Firmware_Bin_Signatures
+ */
+ u8 flags; /* reserved for ChiMP use */
+ u8 code_type; /* enum SUPPORTED_CODE */
+ u8 device; /* enum SUPPORTED_FAMILY */
+ u8 media; /* enum SUPPORTED_MEDIA */
+ u8 version[16]; /* the null terminated version string to
+ * indicate the version of the
+ * file, this will be copied from the binary
+ * file version string
+ */
+ u8 build;
+ u8 revision;
+ u8 minor_ver;
+ u8 major_ver;
+};
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
new file mode 100644
index 000000000000..70fc8253c07f
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -0,0 +1,4046 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_HSI_H
+#define BNXT_HSI_H
+
+/* per-context HW statistics -- chip view */
+struct ctx_hw_stats {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 tpa_pkts;
+ __le64 tpa_bytes;
+ __le64 tpa_events;
+ __le64 tpa_aborts;
+};
+
+/* Statistics Ejection Buffer Completion Record (16 bytes) */
+struct eject_cmpl {
+ __le16 type;
+ #define EJECT_CMPL_TYPE_MASK 0x3fUL
+ #define EJECT_CMPL_TYPE_SFT 0
+ #define EJECT_CMPL_TYPE_STAT_EJECT (0x1aUL << 0)
+ __le16 len;
+ __le32 opaque;
+ __le32 v;
+ #define EJECT_CMPL_V 0x1UL
+ __le32 unused_2;
+};
+
+/* HWRM Completion Record (16 bytes) */
+struct hwrm_cmpl {
+ __le16 type;
+ #define HWRM_CMPL_TYPE_MASK 0x3fUL
+ #define HWRM_CMPL_TYPE_SFT 0
+ #define HWRM_CMPL_TYPE_HWRM_DONE (0x20UL << 0)
+ __le16 sequence_id;
+ __le32 unused_1;
+ __le32 v;
+ #define HWRM_CMPL_V 0x1UL
+ __le32 unused_3;
+};
+
+/* HWRM Forwarded Request (16 bytes) */
+struct hwrm_fwd_req_cmpl {
+ __le16 req_len_type;
+ #define HWRM_FWD_REQ_CMPL_TYPE_MASK 0x3fUL
+ #define HWRM_FWD_REQ_CMPL_TYPE_SFT 0
+ #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ (0x22UL << 0)
+ #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
+ #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6
+ __le16 source_id;
+ __le32 unused_0;
+ __le32 req_buf_addr_v[2];
+ #define HWRM_FWD_REQ_CMPL_V 0x1UL
+ #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
+ #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+};
+
+/* HWRM Forwarded Response (16 bytes) */
+struct hwrm_fwd_resp_cmpl {
+ __le16 type;
+ #define HWRM_FWD_RESP_CMPL_TYPE_MASK 0x3fUL
+ #define HWRM_FWD_RESP_CMPL_TYPE_SFT 0
+ #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP (0x24UL << 0)
+ __le16 source_id;
+ __le16 resp_len;
+ __le16 unused_1;
+ __le32 resp_buf_addr_v[2];
+ #define HWRM_FWD_RESP_CMPL_V 0x1UL
+ #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
+ #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
+};
+
+/* HWRM Asynchronous Event Completion Record (16 bytes) */
+struct hwrm_async_event_cmpl {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD (0x20UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (0x30UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR (0xffUL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+};
+
+/* HWRM Asynchronous Event Completion Record for link status change (16 bytes) */
+struct hwrm_async_event_cmpl_link_status_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_UP 0x1UL
+};
+
+/* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */
+struct hwrm_async_event_cmpl_link_mtu_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for link speed change (16 bytes) */
+struct hwrm_async_event_cmpl_link_speed_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
+};
+
+/* HWRM Asynchronous Event Completion Record for DCB Config change (16 bytes) */
+struct hwrm_async_event_cmpl_dcb_config_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for port connection not allowed (16 bytes) */
+struct hwrm_async_event_cmpl_port_conn_not_allowed {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
+struct hwrm_async_event_cmpl_func_drvr_unload {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for Function Driver load (16 bytes) */
+struct hwrm_async_event_cmpl_func_drvr_load {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for PF Driver Unload (16 bytes) */
+struct hwrm_async_event_cmpl_pf_drvr_unload {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */
+struct hwrm_async_event_cmpl_pf_drvr_load {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD (0x20UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */
+struct hwrm_async_event_cmpl_vf_flr {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR (0x30UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for VF MAC Addr change (16 bytes) */
+struct hwrm_async_event_cmpl_vf_mac_addr_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
+struct hwrm_async_event_cmpl_hwrm_error {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR (0xffUL << 0)
+ __le32 event_data2;
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0)
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
+};
+
+/* HW Resource Manager Specification 0.7.8 */
+#define HWRM_VERSION_MAJOR 0
+#define HWRM_VERSION_MINOR 7
+#define HWRM_VERSION_UPDATE 8
+
+#define HWRM_VERSION_STR "0.7.8"
+/* Following is the signature for HWRM message field that indicates not
+ * applicable (All F's). Need to cast it the size of the field if needed.
+ */
+#define HWRM_NA_SIGNATURE ((__le32)(-1))
+#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
+#define HWRM_MAX_RESP_LEN (176) /* hwrm_func_qstats */
+#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
+#define HW_HASH_KEY_SIZE 40
+#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
+/* Input (16 bytes) */
+struct input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (8 bytes) */
+struct output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+/* Command numbering (8 bytes) */
+struct cmd_nums {
+ __le16 req_type;
+ #define HWRM_VER_GET (0x0UL)
+ #define HWRM_FUNC_DISABLE (0x10UL)
+ #define HWRM_FUNC_RESET (0x11UL)
+ #define HWRM_FUNC_GETFID (0x12UL)
+ #define HWRM_FUNC_VF_ALLOC (0x13UL)
+ #define HWRM_FUNC_VF_FREE (0x14UL)
+ #define HWRM_FUNC_QCAPS (0x15UL)
+ #define HWRM_FUNC_QCFG (0x16UL)
+ #define HWRM_FUNC_CFG (0x17UL)
+ #define HWRM_FUNC_QSTATS (0x18UL)
+ #define HWRM_FUNC_CLR_STATS (0x19UL)
+ #define HWRM_FUNC_DRV_UNRGTR (0x1aUL)
+ #define HWRM_FUNC_VF_RESC_FREE (0x1bUL)
+ #define HWRM_FUNC_VF_VNIC_IDS_QUERY (0x1cUL)
+ #define HWRM_FUNC_DRV_RGTR (0x1dUL)
+ #define HWRM_FUNC_DRV_QVER (0x1eUL)
+ #define HWRM_FUNC_BUF_RGTR (0x1fUL)
+ #define HWRM_FUNC_VF_CFG (0x20UL)
+ #define HWRM_PORT_PHY_CFG (0x20UL)
+ #define HWRM_PORT_MAC_CFG (0x21UL)
+ #define HWRM_PORT_ENABLE (0x22UL)
+ #define HWRM_PORT_QSTATS (0x23UL)
+ #define HWRM_PORT_LPBK_QSTATS (0x24UL)
+ #define HWRM_PORT_CLR_STATS (0x25UL)
+ #define HWRM_PORT_LPBK_CLR_STATS (0x26UL)
+ #define HWRM_PORT_PHY_QCFG (0x27UL)
+ #define HWRM_PORT_MAC_QCFG (0x28UL)
+ #define HWRM_PORT_BLINK_LED (0x29UL)
+ #define HWRM_QUEUE_QPORTCFG (0x30UL)
+ #define HWRM_QUEUE_QCFG (0x31UL)
+ #define HWRM_QUEUE_CFG (0x32UL)
+ #define HWRM_QUEUE_BUFFERS_QCFG (0x33UL)
+ #define HWRM_QUEUE_BUFFERS_CFG (0x34UL)
+ #define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL)
+ #define HWRM_QUEUE_PFCENABLE_CFG (0x36UL)
+ #define HWRM_QUEUE_PRI2COS_QCFG (0x37UL)
+ #define HWRM_QUEUE_PRI2COS_CFG (0x38UL)
+ #define HWRM_QUEUE_COS2BW_QCFG (0x39UL)
+ #define HWRM_QUEUE_COS2BW_CFG (0x3aUL)
+ #define HWRM_VNIC_ALLOC (0x40UL)
+ #define HWRM_VNIC_FREE (0x41UL)
+ #define HWRM_VNIC_CFG (0x42UL)
+ #define HWRM_VNIC_QCFG (0x43UL)
+ #define HWRM_VNIC_TPA_CFG (0x44UL)
+ #define HWRM_VNIC_TPA_QCFG (0x45UL)
+ #define HWRM_VNIC_RSS_CFG (0x46UL)
+ #define HWRM_VNIC_RSS_QCFG (0x47UL)
+ #define HWRM_VNIC_PLCMODES_CFG (0x48UL)
+ #define HWRM_VNIC_PLCMODES_QCFG (0x49UL)
+ #define HWRM_RING_ALLOC (0x50UL)
+ #define HWRM_RING_FREE (0x51UL)
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (0x52UL)
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (0x53UL)
+ #define HWRM_RING_RESET (0x5eUL)
+ #define HWRM_RING_GRP_ALLOC (0x60UL)
+ #define HWRM_RING_GRP_FREE (0x61UL)
+ #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL)
+ #define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL)
+ #define HWRM_ARB_GRP_ALLOC (0x80UL)
+ #define HWRM_ARB_GRP_CFG (0x81UL)
+ #define HWRM_CFA_L2_FILTER_ALLOC (0x90UL)
+ #define HWRM_CFA_L2_FILTER_FREE (0x91UL)
+ #define HWRM_CFA_L2_FILTER_CFG (0x92UL)
+ #define HWRM_CFA_L2_SET_RX_MASK (0x93UL)
+ #define HWRM_CFA_L2_SET_BCASTMCAST_MIRRORING (0x94UL)
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL)
+ #define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL)
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL)
+ #define HWRM_CFA_ENCAP_RECORD_FREE (0x98UL)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC (0x99UL)
+ #define HWRM_CFA_NTUPLE_FILTER_FREE (0x9aUL)
+ #define HWRM_CFA_NTUPLE_FILTER_CFG (0x9bUL)
+ #define HWRM_TUNNEL_DST_PORT_QUERY (0xa0UL)
+ #define HWRM_TUNNEL_DST_PORT_ALLOC (0xa1UL)
+ #define HWRM_TUNNEL_DST_PORT_FREE (0xa2UL)
+ #define HWRM_STAT_CTX_ALLOC (0xb0UL)
+ #define HWRM_STAT_CTX_FREE (0xb1UL)
+ #define HWRM_STAT_CTX_QUERY (0xb2UL)
+ #define HWRM_STAT_CTX_CLR_STATS (0xb3UL)
+ #define HWRM_FW_RESET (0xc0UL)
+ #define HWRM_FW_QSTATUS (0xc1UL)
+ #define HWRM_EXEC_FWD_RESP (0xd0UL)
+ #define HWRM_REJECT_FWD_RESP (0xd1UL)
+ #define HWRM_FWD_RESP (0xd2UL)
+ #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL)
+ #define HWRM_TEMP_MONITOR_QUERY (0xe0UL)
+ #define HWRM_MGMT_L2_FILTER_ALLOC (0x100UL)
+ #define HWRM_MGMT_L2_FILTER_FREE (0x101UL)
+ #define HWRM_DBG_READ_DIRECT (0xff10UL)
+ #define HWRM_DBG_READ_INDIRECT (0xff11UL)
+ #define HWRM_DBG_WRITE_DIRECT (0xff12UL)
+ #define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
+ #define HWRM_DBG_DUMP (0xff14UL)
+ #define HWRM_NVM_MODIFY (0xfff4UL)
+ #define HWRM_NVM_VERIFY_UPDATE (0xfff5UL)
+ #define HWRM_NVM_GET_DEV_INFO (0xfff6UL)
+ #define HWRM_NVM_ERASE_DIR_ENTRY (0xfff7UL)
+ #define HWRM_NVM_MOD_DIR_ENTRY (0xfff8UL)
+ #define HWRM_NVM_FIND_DIR_ENTRY (0xfff9UL)
+ #define HWRM_NVM_GET_DIR_ENTRIES (0xfffaUL)
+ #define HWRM_NVM_GET_DIR_INFO (0xfffbUL)
+ #define HWRM_NVM_RAW_DUMP (0xfffcUL)
+ #define HWRM_NVM_READ (0xfffdUL)
+ #define HWRM_NVM_WRITE (0xfffeUL)
+ #define HWRM_NVM_RAW_WRITE_BLK (0xffffUL)
+ __le16 unused_0[3];
+};
+
+/* Return Codes (8 bytes) */
+struct ret_codes {
+ __le16 error_code;
+ #define HWRM_ERR_CODE_SUCCESS (0x0UL)
+ #define HWRM_ERR_CODE_FAIL (0x1UL)
+ #define HWRM_ERR_CODE_INVALID_PARAMS (0x2UL)
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (0x3UL)
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (0x4UL)
+ #define HWRM_ERR_CODE_INVALID_FLAGS (0x5UL)
+ #define HWRM_ERR_CODE_INVALID_ENABLES (0x6UL)
+ #define HWRM_ERR_CODE_HWRM_ERROR (0xfUL)
+ #define HWRM_ERR_CODE_UNKNOWN_ERR (0xfffeUL)
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (0xffffUL)
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_err_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 opaque_0;
+ __le16 opaque_1;
+ u8 opaque_2;
+ u8 valid;
+};
+
+/* Port Tx Statistics Formats (408 bytes) */
+struct tx_port_stats {
+ __le64 tx_64b_frames;
+ __le64 tx_65b_127b_frames;
+ __le64 tx_128b_255b_frames;
+ __le64 tx_256b_511b_frames;
+ __le64 tx_512b_1023b_frames;
+ __le64 tx_1024b_1518_frames;
+ __le64 tx_good_vlan_frames;
+ __le64 tx_1519b_2047_frames;
+ __le64 tx_2048b_4095b_frames;
+ __le64 tx_4096b_9216b_frames;
+ __le64 tx_9217b_16383b_frames;
+ __le64 tx_good_frames;
+ __le64 tx_total_frames;
+ __le64 tx_ucast_frames;
+ __le64 tx_mcast_frames;
+ __le64 tx_bcast_frames;
+ __le64 tx_pause_frames;
+ __le64 tx_pfc_frames;
+ __le64 tx_jabber_frames;
+ __le64 tx_fcs_err_frames;
+ __le64 tx_control_frames;
+ __le64 tx_oversz_frames;
+ __le64 tx_single_dfrl_frames;
+ __le64 tx_multi_dfrl_frames;
+ __le64 tx_single_coll_frames;
+ __le64 tx_multi_coll_frames;
+ __le64 tx_late_coll_frames;
+ __le64 tx_excessive_coll_frames;
+ __le64 tx_frag_frames;
+ __le64 tx_err;
+ __le64 tx_tagged_frames;
+ __le64 tx_dbl_tagged_frames;
+ __le64 tx_runt_frames;
+ __le64 tx_fifo_underruns;
+ __le64 tx_pfc_ena_frames_pri0;
+ __le64 tx_pfc_ena_frames_pri1;
+ __le64 tx_pfc_ena_frames_pri2;
+ __le64 tx_pfc_ena_frames_pri3;
+ __le64 tx_pfc_ena_frames_pri4;
+ __le64 tx_pfc_ena_frames_pri5;
+ __le64 tx_pfc_ena_frames_pri6;
+ __le64 tx_pfc_ena_frames_pri7;
+ __le64 tx_eee_lpi_events;
+ __le64 tx_eee_lpi_duration;
+ __le64 tx_llfc_logical_msgs;
+ __le64 tx_hcfc_msgs;
+ __le64 tx_total_collisions;
+ __le64 tx_bytes;
+ __le64 tx_xthol_frames;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+};
+
+/* Port Rx Statistics Formats (528 bytes) */
+struct rx_port_stats {
+ __le64 rx_64b_frames;
+ __le64 rx_65b_127b_frames;
+ __le64 rx_128b_255b_frames;
+ __le64 rx_256b_511b_frames;
+ __le64 rx_512b_1023b_frames;
+ __le64 rx_1024b_1518_frames;
+ __le64 rx_good_vlan_frames;
+ __le64 rx_1519b_2047b_frames;
+ __le64 rx_2048b_4095b_frames;
+ __le64 rx_4096b_9216b_frames;
+ __le64 rx_9217b_16383b_frames;
+ __le64 rx_total_frames;
+ __le64 rx_ucast_frames;
+ __le64 rx_mcast_frames;
+ __le64 rx_bcast_frames;
+ __le64 rx_fcs_err_frames;
+ __le64 rx_ctrl_frames;
+ __le64 rx_pause_frames;
+ __le64 rx_pfc_frames;
+ __le64 rx_unsupported_opcode_frames;
+ __le64 rx_unsupported_da_pausepfc_frames;
+ __le64 rx_wrong_sa_frames;
+ __le64 rx_align_err_frames;
+ __le64 rx_oor_len_frames;
+ __le64 rx_code_err_frames;
+ __le64 rx_false_carrier_frames;
+ __le64 rx_ovrsz_frames;
+ __le64 rx_jbr_frames;
+ __le64 rx_mtu_err_frames;
+ __le64 rx_match_crc_frames;
+ __le64 rx_promiscuous_frames;
+ __le64 rx_tagged_frames;
+ __le64 rx_double_tagged_frames;
+ __le64 rx_trunc_frames;
+ __le64 rx_good_frames;
+ __le64 rx_pfc_xon2xoff_frames_pri0;
+ __le64 rx_pfc_xon2xoff_frames_pri1;
+ __le64 rx_pfc_xon2xoff_frames_pri2;
+ __le64 rx_pfc_xon2xoff_frames_pri3;
+ __le64 rx_pfc_xon2xoff_frames_pri4;
+ __le64 rx_pfc_xon2xoff_frames_pri5;
+ __le64 rx_pfc_xon2xoff_frames_pri6;
+ __le64 rx_pfc_xon2xoff_frames_pri7;
+ __le64 rx_pfc_ena_frames_pri0;
+ __le64 rx_pfc_ena_frames_pri1;
+ __le64 rx_pfc_ena_frames_pri2;
+ __le64 rx_pfc_ena_frames_pri3;
+ __le64 rx_pfc_ena_frames_pri4;
+ __le64 rx_pfc_ena_frames_pri5;
+ __le64 rx_pfc_ena_frames_pri6;
+ __le64 rx_pfc_ena_frames_pri7;
+ __le64 rx_sch_crc_err_frames;
+ __le64 rx_undrsz_frames;
+ __le64 rx_frag_frames;
+ __le64 rx_eee_lpi_events;
+ __le64 rx_eee_lpi_duration;
+ __le64 rx_llfc_physical_msgs;
+ __le64 rx_llfc_logical_msgs;
+ __le64 rx_llfc_msgs_with_crc_err;
+ __le64 rx_hcfc_msgs;
+ __le64 rx_hcfc_msgs_with_crc_err;
+ __le64 rx_bytes;
+ __le64 rx_runt_bytes;
+ __le64 rx_runt_frames;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_err;
+};
+
+/* hwrm_ver_get */
+/* Input (24 bytes) */
+struct hwrm_ver_get_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 hwrm_intf_maj;
+ u8 hwrm_intf_min;
+ u8 hwrm_intf_upd;
+ u8 unused_0[5];
+};
+
+/* Output (128 bytes) */
+struct hwrm_ver_get_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hwrm_intf_maj;
+ u8 hwrm_intf_min;
+ u8 hwrm_intf_upd;
+ u8 hwrm_intf_rsvd;
+ u8 hwrm_fw_maj;
+ u8 hwrm_fw_min;
+ u8 hwrm_fw_bld;
+ u8 hwrm_fw_rsvd;
+ u8 ape_fw_maj;
+ u8 ape_fw_min;
+ u8 ape_fw_bld;
+ u8 ape_fw_rsvd;
+ u8 kong_fw_maj;
+ u8 kong_fw_min;
+ u8 kong_fw_bld;
+ u8 kong_fw_rsvd;
+ u8 tang_fw_maj;
+ u8 tang_fw_min;
+ u8 tang_fw_bld;
+ u8 tang_fw_rsvd;
+ u8 bono_fw_maj;
+ u8 bono_fw_min;
+ u8 bono_fw_bld;
+ u8 bono_fw_rsvd;
+ char hwrm_fw_name[16];
+ char ape_fw_name[16];
+ char kong_fw_name[16];
+ char tang_fw_name[16];
+ char bono_fw_name[16];
+ __le16 chip_num;
+ u8 chip_rev;
+ u8 chip_metal;
+ u8 chip_bond_id;
+ u8 unused_0;
+ __le16 max_req_win_len;
+ __le16 max_resp_len;
+ __le16 def_req_timeout;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_disable */
+/* Input (24 bytes) */
+struct hwrm_func_disable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_DISABLE_REQ_ENABLES_VF_ID_VALID 0x1UL
+ __le16 vf_id;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_disable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_reset */
+/* Input (24 bytes) */
+struct hwrm_func_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
+ __le16 vf_id;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_getfid */
+/* Input (24 bytes) */
+struct hwrm_func_getfid_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
+ __le16 pci_id;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_getfid_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_func_vf_alloc */
+/* Input (24 bytes) */
+struct hwrm_func_vf_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 first_vf_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_func_vf_free */
+/* Input (24 bytes) */
+struct hwrm_func_vf_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_vf_cfg */
+/* Input (24 bytes) */
+struct hwrm_func_vf_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
+ __le16 mtu;
+ __le16 guest_vlan;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_qcaps */
+/* Input (24 bytes) */
+struct hwrm_func_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 unused_0[3];
+};
+
+/* Output (80 bytes) */
+struct hwrm_func_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le32 flags;
+ #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
+ u8 perm_mac_address[6];
+ __le16 max_rsscos_ctx;
+ __le16 max_cmpl_rings;
+ __le16 max_tx_rings;
+ __le16 max_rx_rings;
+ __le16 max_l2_ctxs;
+ __le16 max_vnics;
+ __le16 first_vf_id;
+ __le16 max_vfs;
+ __le16 max_stat_ctx;
+ __le32 max_encap_records;
+ __le32 max_decap_records;
+ __le32 max_tx_em_flows;
+ __le32 max_tx_wm_flows;
+ __le32 max_rx_em_flows;
+ __le32 max_rx_wm_flows;
+ __le32 max_mcast_filters;
+ __le32 max_flow_id;
+ __le32 max_hw_ring_grps;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_func_cfg */
+/* Input (88 bytes) */
+struct hwrm_func_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0;
+ u8 unused_1;
+ __le32 flags;
+ #define FUNC_CFG_REQ_FLAGS_PROM_MODE 0x1UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK 0x2UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_IP_ADDR_CHECK 0x4UL
+ #define FUNC_CFG_REQ_FLAGS_VLAN_PRI_MATCH 0x8UL
+ #define FUNC_CFG_REQ_FLAGS_DFLT_PRI_NOMATCH 0x10UL
+ #define FUNC_CFG_REQ_FLAGS_DISABLE_PAUSE 0x20UL
+ #define FUNC_CFG_REQ_FLAGS_DISABLE_STP 0x40UL
+ #define FUNC_CFG_REQ_FLAGS_DISABLE_LLDP 0x80UL
+ #define FUNC_CFG_REQ_FLAGS_DISABLE_PTPV2 0x100UL
+ __le32 enables;
+ #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
+ #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
+ #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
+ #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
+ #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
+ #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
+ #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
+ __le16 mtu;
+ __le16 mru;
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ u8 dflt_mac_addr[6];
+ __le16 dflt_vlan;
+ __be32 dflt_ip_addr[4];
+ __le32 min_bw;
+ __le32 max_bw;
+ __le16 async_event_cr;
+ u8 vlan_antispoof_mode;
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK (0x0UL << 0)
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN (0x1UL << 0)
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE (0x2UL << 0)
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0)
+ u8 allowed_vlan_pris;
+ #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_NOCHECK (0x0UL << 0)
+ #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_VALIDATE_VLAN (0x1UL << 0)
+ #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_IF_VLANDNE (0x2UL << 0)
+ #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0)
+ u8 evb_mode;
+ #define FUNC_CFG_REQ_EVB_MODE_NO_EVB (0x0UL << 0)
+ #define FUNC_CFG_REQ_EVB_MODE_VEB (0x1UL << 0)
+ #define FUNC_CFG_REQ_EVB_MODE_VEPA (0x2UL << 0)
+ u8 unused_2;
+ __le16 num_mcast_filters;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_qstats */
+/* Input (24 bytes) */
+struct hwrm_func_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 unused_0[3];
+};
+
+/* Output (176 bytes) */
+struct hwrm_func_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_err_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_err_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_func_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_vf_resc_free */
+/* Input (24 bytes) */
+struct hwrm_func_vf_resc_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_resc_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_vf_vnic_ids_query */
+/* Input (32 bytes) */
+struct hwrm_func_vf_vnic_ids_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0;
+ u8 unused_1;
+ __le32 max_vnic_id_cnt;
+ __le64 vnic_id_tbl_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_vnic_ids_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id_cnt;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_func_drv_rgtr */
+/* Input (80 bytes) */
+struct hwrm_func_drv_rgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+ __le32 enables;
+ #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
+ __le16 os_type;
+ u8 ver_maj;
+ u8 ver_min;
+ u8 ver_upd;
+ u8 unused_0;
+ __le16 unused_1;
+ __le32 timestamp;
+ __le32 unused_2;
+ __le32 vf_req_fwd[8];
+ __le32 async_event_fwd[8];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_rgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_drv_unrgtr */
+/* Input (24 bytes) */
+struct hwrm_func_drv_unrgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_unrgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_buf_rgtr */
+/* Input (128 bytes) */
+struct hwrm_func_buf_rgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
+ #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
+ __le16 vf_id;
+ __le16 req_buf_num_pages;
+ __le16 req_buf_page_size;
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B (0x4UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K (0xcUL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K (0xdUL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K (0x10UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x16UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x17UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G (0x1eUL << 0)
+ __le16 req_buf_len;
+ __le16 resp_buf_len;
+ u8 unused_0;
+ u8 unused_1;
+ __le64 req_buf_page_addr0;
+ __le64 req_buf_page_addr1;
+ __le64 req_buf_page_addr2;
+ __le64 req_buf_page_addr3;
+ __le64 req_buf_page_addr4;
+ __le64 req_buf_page_addr5;
+ __le64 req_buf_page_addr6;
+ __le64 req_buf_page_addr7;
+ __le64 req_buf_page_addr8;
+ __le64 req_buf_page_addr9;
+ __le64 error_buf_addr;
+ __le64 resp_buf_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_buf_rgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_drv_qver */
+/* Input (24 bytes) */
+struct hwrm_func_drv_qver_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_DRV_QVER_REQ_ENABLES_OS_TYPE_VALID 0x1UL
+ #define FUNC_DRV_QVER_REQ_ENABLES_VER_VALID 0x2UL
+ __le16 fid;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_qver_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 os_type;
+ u8 ver_maj;
+ u8 ver_min;
+ u8 ver_upd;
+ u8 unused_0;
+ u8 unused_1;
+ u8 valid;
+};
+
+/* hwrm_port_phy_cfg */
+/* Input (48 bytes) */
+struct hwrm_port_phy_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN 0x2UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ __le32 enables;
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
+ #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
+ #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
+ #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+ __le16 port_id;
+ __le16 force_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB (0xaUL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB (0x14UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB (0x19UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB (0x64UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB (0xc8UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB (0x190UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
+ u8 auto_mode;
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE (0x0UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED (0x2UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_MASK (0x4UL << 0)
+ u8 auto_duplex;
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF (0x0UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH (0x2UL << 0)
+ u8 auto_pause;
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
+ u8 unused_0;
+ __le16 auto_link_speed;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB (0xaUL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB (0x14UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB (0x19UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB (0x64UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB (0xc8UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB (0x190UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ u8 wirespeed;
+ #define PORT_PHY_CFG_REQ_WIRESPEED_OFF (0x0UL << 0)
+ #define PORT_PHY_CFG_REQ_WIRESPEED_ON (0x1UL << 0)
+ u8 lpbk;
+ #define PORT_PHY_CFG_REQ_LPBK_NONE (0x0UL << 0)
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE (0x2UL << 0)
+ u8 force_pause;
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
+ u8 unused_1;
+ __le32 preemphasis;
+ __le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_phy_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_phy_qcfg */
+/* Input (24 bytes) */
+struct hwrm_port_phy_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 unused_0[3];
+};
+
+/* Output (48 bytes) */
+struct hwrm_port_phy_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ #define PORT_PHY_QCFG_RESP_LINK_NO_LINK (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_LINK (0x2UL << 0)
+ u8 unused_0;
+ __le16 link_speed;
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB (0xaUL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB (0x14UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB (0x19UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB (0x64UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB (0xc8UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB (0xfaUL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB (0x190UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB (0x1f4UL << 0)
+ u8 duplex;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_HALF (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_DUPLEX_FULL (0x1UL << 0)
+ u8 pause;
+ #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
+ __le16 support_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
+ __le16 force_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB (0xaUL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB (0x14UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB (0x19UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB (0x64UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB (0xc8UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB (0x190UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
+ u8 auto_mode;
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_MASK (0x4UL << 0)
+ u8 auto_pause;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
+ __le16 auto_link_speed;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB (0xaUL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB (0x14UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB (0x19UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB (0x64UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB (0xc8UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB (0x190UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ u8 wirespeed;
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_ON (0x1UL << 0)
+ u8 lpbk;
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE (0x2UL << 0)
+ u8 force_pause;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
+ u8 duplex_setting;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_HALF (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_FULL (0x1UL << 0)
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR4 (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR4 (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR4 (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 (0x5UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX4 (0x6UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR (0x7UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET (0x8UL << 0)
+ u8 media_type;
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE (0x3UL << 0)
+ u8 transceiver_type;
+ #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_EXTERNAL (0x2UL << 0)
+ u8 phy_addr;
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
+ u8 unused_2;
+ __le16 link_partner_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
+ u8 link_partner_adv_auto_mode;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_MASK (0x4UL << 0)
+ u8 link_partner_adv_pause;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
+ u8 unused_3;
+ u8 unused_4;
+ u8 unused_5;
+ u8 valid;
+};
+
+/* hwrm_port_mac_cfg */
+/* Input (32 bytes) */
+struct hwrm_port_mac_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
+ #define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE 0x2UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
+ __le32 enables;
+ #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
+ #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
+ #define PORT_MAC_CFG_REQ_ENABLES_IVLAN_PRI2COS_MAP_PRI 0x4UL
+ #define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI 0x8UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
+ #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
+ __le16 port_id;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_REQ_LPBK_NONE (0x0UL << 0)
+ #define PORT_MAC_CFG_REQ_LPBK_LOCAL (0x1UL << 0)
+ #define PORT_MAC_CFG_REQ_LPBK_REMOTE (0x2UL << 0)
+ u8 ivlan_pri2cos_map_pri;
+ u8 lcos_map_pri;
+ u8 tunnel_pri2cos_map_pri;
+ u8 dscp2pri_map_pri;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_mac_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ __le16 mtu;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_RESP_LPBK_NONE (0x0UL << 0)
+ #define PORT_MAC_CFG_RESP_LPBK_LOCAL (0x1UL << 0)
+ #define PORT_MAC_CFG_RESP_LPBK_REMOTE (0x2UL << 0)
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_port_enable */
+/* Input (24 bytes) */
+struct hwrm_port_enable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_ENABLE_REQ_FLAGS_FORWARD_TRAFFIC 0x1UL
+ __le16 port_id;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_enable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_qstats */
+/* Input (40 bytes) */
+struct hwrm_port_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2[3];
+ u8 unused_3;
+ __le64 tx_stat_host_addr;
+ __le64 rx_stat_host_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_qstats */
+/* Input (16 bytes) */
+struct hwrm_port_lpbk_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (64 bytes) */
+struct hwrm_port_lpbk_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 lpbk_ucast_frames;
+ __le64 lpbk_mcast_frames;
+ __le64 lpbk_bcast_frames;
+ __le64 lpbk_ucast_bytes;
+ __le64 lpbk_mcast_bytes;
+ __le64 lpbk_bcast_bytes;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_port_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_clr_stats */
+/* Input (16 bytes) */
+struct hwrm_port_lpbk_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_lpbk_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_blink_led */
+/* Input (24 bytes) */
+struct hwrm_port_blink_led_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 num_blinks;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_blink_led_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_qportcfg */
+/* Input (24 bytes) */
+struct hwrm_queue_qportcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ __le16 port_id;
+ __le16 unused_0;
+};
+
+/* Output (32 bytes) */
+struct hwrm_queue_qportcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 max_configurable_queues;
+ u8 max_configurable_lossless_queues;
+ u8 queue_cfg_allowed;
+ u8 queue_buffers_cfg_allowed;
+ u8 queue_pfcenable_cfg_allowed;
+ u8 queue_pri2cos_cfg_allowed;
+ u8 queue_cos2bw_cfg_allowed;
+ u8 queue_id0;
+ u8 queue_id0_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id1;
+ u8 queue_id1_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id2;
+ u8 queue_id2_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id3;
+ u8 queue_id3_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id4;
+ u8 queue_id4_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id5;
+ u8 queue_id5_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id6;
+ u8 queue_id6_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id7;
+ u8 queue_id7_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 valid;
+};
+
+/* hwrm_queue_cfg */
+/* Input (40 bytes) */
+struct hwrm_queue_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_CFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define QUEUE_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ __le32 enables;
+ #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
+ #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
+ __le32 queue_id;
+ __le32 dflt_len;
+ u8 service_profile;
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_buffers_cfg */
+/* Input (56 bytes) */
+struct hwrm_queue_buffers_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ __le32 enables;
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED 0x1UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED 0x2UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_GROUP 0x4UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XOFF 0x8UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XON 0x10UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_FULL 0x20UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_NOTFULL 0x40UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_MAX 0x80UL
+ __le32 queue_id;
+ __le32 reserved;
+ __le32 shared;
+ __le32 xoff;
+ __le32 xon;
+ __le32 full;
+ __le32 notfull;
+ __le32 max;
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_buffers_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_cfg */
+/* Input (24 bytes) */
+struct hwrm_queue_pfcenable_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI7_PFC_ENABLED 0x80UL
+ __le16 port_id;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_pfcenable_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_cfg */
+/* Input (40 bytes) */
+struct hwrm_queue_pri2cos_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x2UL
+ __le32 enables;
+ u8 port_id;
+ u8 pri0_cos;
+ u8 pri1_cos;
+ u8 pri2_cos;
+ u8 pri3_cos;
+ u8 pri4_cos;
+ u8 pri5_cos;
+ u8 pri6_cos;
+ u8 pri7_cos;
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_pri2cos_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_cfg */
+/* Input (128 bytes) */
+struct hwrm_queue_cos2bw_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
+ __le16 port_id;
+ u8 queue_id0;
+ u8 unused_0;
+ __le32 queue_id0_min_bw;
+ __le32 queue_id0_max_bw;
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ u8 queue_id1;
+ __le32 queue_id1_min_bw;
+ __le32 queue_id1_max_bw;
+ u8 queue_id1_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id1_pri_lvl;
+ u8 queue_id1_bw_weight;
+ u8 queue_id2;
+ __le32 queue_id2_min_bw;
+ __le32 queue_id2_max_bw;
+ u8 queue_id2_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id2_pri_lvl;
+ u8 queue_id2_bw_weight;
+ u8 queue_id3;
+ __le32 queue_id3_min_bw;
+ __le32 queue_id3_max_bw;
+ u8 queue_id3_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id3_pri_lvl;
+ u8 queue_id3_bw_weight;
+ u8 queue_id4;
+ __le32 queue_id4_min_bw;
+ __le32 queue_id4_max_bw;
+ u8 queue_id4_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id4_pri_lvl;
+ u8 queue_id4_bw_weight;
+ u8 queue_id5;
+ __le32 queue_id5_min_bw;
+ __le32 queue_id5_max_bw;
+ u8 queue_id5_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id5_pri_lvl;
+ u8 queue_id5_bw_weight;
+ u8 queue_id6;
+ __le32 queue_id6_min_bw;
+ __le32 queue_id6_max_bw;
+ u8 queue_id6_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id6_pri_lvl;
+ u8 queue_id6_bw_weight;
+ u8 queue_id7;
+ __le32 queue_id7_min_bw;
+ __le32 queue_id7_max_bw;
+ u8 queue_id7_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id7_pri_lvl;
+ u8 queue_id7_bw_weight;
+ u8 unused_1[5];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_cos2bw_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_alloc */
+/* Input (24 bytes) */
+struct hwrm_vnic_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_vnic_free */
+/* Input (24 bytes) */
+struct hwrm_vnic_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
+ __le32 enables;
+ #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
+ #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
+ #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
+ #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
+ #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ __le16 vnic_id;
+ __le16 dflt_ring_grp;
+ __le16 rss_rule;
+ __le16 cos_rule;
+ __le16 lb_rule;
+ __le16 mru;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_tpa_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
+ __le32 enables;
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
+ __le16 vnic_id;
+ __le16 max_agg_segs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 (0x0UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 (0x1UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 (0x2UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 (0x3UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX (0x1fUL << 0)
+ __le16 max_aggs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 (0x0UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 (0x1UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 (0x2UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 (0x3UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 (0x4UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX (0x7UL << 0)
+ u8 unused_0;
+ u8 unused_1;
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_tpa_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg */
+/* Input (48 bytes) */
+struct hwrm_vnic_rss_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 hash_type;
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ __le32 unused_0;
+ __le64 ring_grp_tbl_addr;
+ __le64 hash_key_tbl_addr;
+ __le16 rss_ctx_idx;
+ __le16 unused_1[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_plcmodes_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
+ __le32 enables;
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
+ __le32 vnic_id;
+ __le16 jumbo_thresh;
+ __le16 hds_offset;
+ __le16 hds_threshold;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_plcmodes_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc */
+/* Input (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free */
+/* Input (24 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_cos_lb_ctx_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_ring_alloc */
+/* Input (80 bytes) */
+struct hwrm_ring_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define RING_ALLOC_REQ_ENABLES_ARB_GRP_ID_VALID 0x1UL
+ #define RING_ALLOC_REQ_ENABLES_INPUT_NUM_VALID 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_WEIGHT_VALID 0x4UL
+ #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
+ #define RING_ALLOC_REQ_ENABLES_MIN_BW_VALID 0x10UL
+ #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ u8 ring_type;
+ #define RING_ALLOC_REQ_RING_TYPE_CMPL (0x0UL << 0)
+ #define RING_ALLOC_REQ_RING_TYPE_TX (0x1UL << 0)
+ #define RING_ALLOC_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_ALLOC_REQ_RING_TYPE_STATUS (0x3UL << 0)
+ #define RING_ALLOC_REQ_RING_TYPE_CMD (0x4UL << 0)
+ u8 unused_0;
+ __le16 unused_1;
+ __le64 page_tbl_addr;
+ __le32 fbo;
+ u8 page_size;
+ u8 page_tbl_depth;
+ u8 unused_2;
+ u8 unused_3;
+ __le32 length;
+ __le16 logical_id;
+ __le16 cmpl_ring_id;
+ __le16 queue_id;
+ u8 unused_4;
+ u8 unused_5;
+ __le32 arb_grp_id;
+ __le16 input_number;
+ u8 unused_6;
+ u8 unused_7;
+ __le32 weight;
+ __le32 stat_ctx_id;
+ __le32 min_bw;
+ __le32 max_bw;
+ u8 int_mode;
+ #define RING_ALLOC_REQ_INT_MODE_LEGACY (0x0UL << 0)
+ #define RING_ALLOC_REQ_INT_MODE_MSI (0x1UL << 0)
+ #define RING_ALLOC_REQ_INT_MODE_MSIX (0x2UL << 0)
+ #define RING_ALLOC_REQ_INT_MODE_POLL (0x3UL << 0)
+ u8 unused_8[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ring_id;
+ __le16 logical_ring_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_ring_free */
+/* Input (24 bytes) */
+struct hwrm_ring_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_FREE_REQ_RING_TYPE_CMPL (0x0UL << 0)
+ #define RING_FREE_REQ_RING_TYPE_TX (0x1UL << 0)
+ #define RING_FREE_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_FREE_REQ_RING_TYPE_STATUS (0x3UL << 0)
+ #define RING_FREE_REQ_RING_TYPE_CMD (0x4UL << 0)
+ u8 unused_0;
+ __le16 ring_id;
+ __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params */
+/* Input (24 bytes) */
+struct hwrm_ring_cmpl_ring_qaggint_params_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 unused_0[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_ring_cmpl_ring_qaggint_params_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params */
+/* Input (40 bytes) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 flags;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_ring_reset */
+/* Input (24 bytes) */
+struct hwrm_ring_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_RESET_REQ_RING_TYPE_CMPL (0x0UL << 0)
+ #define RING_RESET_REQ_RING_TYPE_TX (0x1UL << 0)
+ #define RING_RESET_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_RESET_REQ_RING_TYPE_STATUS (0x3UL << 0)
+ #define RING_RESET_REQ_RING_TYPE_CMD (0x4UL << 0)
+ u8 unused_0;
+ __le16 ring_id;
+ __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_ring_grp_alloc */
+/* Input (24 bytes) */
+struct hwrm_ring_grp_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 cr;
+ __le16 rr;
+ __le16 ar;
+ __le16 sc;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_grp_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 ring_group_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_ring_grp_free */
+/* Input (24 bytes) */
+struct hwrm_ring_grp_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 ring_group_id;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_grp_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_arb_grp_alloc */
+/* Input (24 bytes) */
+struct hwrm_arb_grp_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 input_number;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_arb_grp_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 arb_grp_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_arb_grp_cfg */
+/* Input (32 bytes) */
+struct hwrm_arb_grp_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 arb_grp_id;
+ __le16 input_number;
+ __le16 tx_ring;
+ __le32 weight;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_arb_grp_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_alloc */
+/* Input (96 bytes) */
+struct hwrm_cfa_l2_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
+ __le32 enables;
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x8000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ u8 l2_addr[6];
+ u8 unused_0;
+ u8 unused_1;
+ u8 l2_addr_mask[6];
+ __le16 l2_ovlan;
+ __le16 l2_ovlan_mask;
+ __le16 l2_ivlan;
+ __le16 l2_ivlan_mask;
+ u8 unused_2;
+ u8 unused_3;
+ u8 t_l2_addr[6];
+ u8 unused_4;
+ u8 unused_5;
+ u8 t_l2_addr_mask[6];
+ __le16 t_l2_ovlan;
+ __le16 t_l2_ovlan_mask;
+ __le16 t_l2_ivlan;
+ __le16 t_l2_ivlan_mask;
+ u8 src_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT (0x0UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF (0x1UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF (0x2UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC (0x3UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG (0x4UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE (0x5UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO (0x6UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG (0x7UL << 0)
+ u8 unused_6;
+ __le32 src_id;
+ u8 tunnel_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 unused_7;
+ __le16 dst_vnic_id;
+ __le16 mirror_vnic_id;
+ u8 pri_hint;
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER (0x0UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER (0x1UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER (0x2UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX (0x3UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN (0x4UL << 0)
+ u8 unused_8;
+ __le32 unused_9;
+ __le64 l2_filter_id_hint;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_l2_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 l2_filter_id;
+ __le32 flow_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_l2_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 l2_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_cfg */
+/* Input (40 bytes) */
+struct hwrm_cfa_l2_filter_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ __le32 enables;
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_VNIC_ID_VALID 0x1UL
+ __le64 l2_filter_id;
+ __le32 dst_vnic_id;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_filter_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask */
+/* Input (40 bytes) */
+struct hwrm_cfa_l2_set_rx_mask_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 dflt_vnic_id;
+ __le32 mask;
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST 0x1UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
+ __le64 mc_tbl_addr;
+ __le32 num_mc_entries;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_set_rx_mask_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_bcastmcast_mirroring */
+/* Input (32 bytes) */
+struct hwrm_cfa_l2_set_bcastmcast_mirroring_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 dflt_vnic_id;
+ __le32 mirroring_flags;
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_MIRRORING 0x1UL
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_MIRRORING 0x2UL
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_SRC_KNOCKOUT 0x4UL
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_SRC_KNOCKOUT 0x8UL
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_VLAN_ID_VALID 0x10UL
+ __le16 vlan_id;
+ u8 bcast_domain;
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_PFONLY (0x0UL << 0)
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFS (0x1UL << 0)
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFSVFS (0x2UL << 0)
+ u8 mcast_domain;
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_PFONLY (0x0UL << 0)
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFS (0x1UL << 0)
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFSVFS (0x2UL << 0)
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_set_bcastmcast_mirroring_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_alloc */
+/* Input (88 bytes) */
+struct hwrm_cfa_tunnel_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ __le32 enables;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
+ __le64 l2_filter_id;
+ u8 l2_addr[6];
+ __le16 l2_ivlan;
+ __le32 l3_addr[4];
+ __le32 t_l3_addr[4];
+ u8 l3_addr_type;
+ u8 t_l3_addr_type;
+ u8 tunnel_type;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 unused_0;
+ __le32 vni;
+ __le32 dst_vnic_id;
+ __le32 mirror_vnic_id;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_tunnel_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tunnel_filter_id;
+ __le32 flow_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_tunnel_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 tunnel_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_tunnel_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_encap_record_alloc */
+/* Input (32 bytes) */
+struct hwrm_cfa_encap_record_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ u8 encap_type;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN (0x1UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE (0x2UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE (0x3UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP (0x4UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE (0x5UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS (0x6UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN (0x7UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE (0x8UL << 0)
+ u8 unused_0;
+ __le16 unused_1;
+ __le32 encap_data[16];
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_encap_record_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 encap_record_id;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_encap_record_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_encap_record_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 encap_record_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_encap_record_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc */
+/* Input (128 bytes) */
+struct hwrm_cfa_ntuple_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x10000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
+ __le64 l2_filter_id;
+ u8 src_macaddr[6];
+ __be16 ethertype;
+ u8 ipaddr_type;
+ u8 ip_protocol;
+ __le16 dst_vnic_id;
+ __le16 mirror_vnic_id;
+ u8 tunnel_type;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 pri_hint;
+ __be32 src_ipaddr[4];
+ __be32 src_ipaddr_mask[4];
+ __be32 dst_ipaddr[4];
+ __be32 dst_ipaddr_mask[4];
+ __be16 src_port;
+ __be16 src_port_mask;
+ __be16 dst_port;
+ __be16 dst_port_mask;
+ __le64 ntuple_filter_id_hint;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_ntuple_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ntuple_filter_id;
+ __le32 flow_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_ntuple_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 ntuple_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_ntuple_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_cfg */
+/* Input (40 bytes) */
+struct hwrm_cfa_ntuple_filter_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_VNIC_ID_VALID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID_VALID 0x2UL
+ __le32 unused_0;
+ __le64 ntuple_filter_id;
+ __le32 new_dst_vnic_id;
+ __le32 new_mirror_vnic_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_ntuple_filter_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_query */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ __be16 tunnel_dst_port_val;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_alloc */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 unused_0;
+ __be16 tunnel_dst_port_val;
+ __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_free */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 unused_0;
+ __le16 tunnel_dst_port_id;
+ __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_alloc */
+/* Input (32 bytes) */
+struct hwrm_stat_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 stats_dma_addr;
+ __le32 update_period_ms;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_free */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_query */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ __le32 unused_0;
+};
+
+/* Output (176 bytes) */
+struct hwrm_stat_ctx_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_err_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_err_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_mgmt_l2_filter_alloc */
+/* Input (56 bytes) */
+struct hwrm_mgmt_l2_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+ #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ __le32 enables;
+ #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDRESS 0x1UL
+ #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_OVLAN 0x2UL
+ #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_IVLAN 0x4UL
+ #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_ACTION_ID 0x8UL
+ u8 l2_address[6];
+ u8 unused_0;
+ u8 unused_1;
+ u8 l2_address_mask[6];
+ __le16 ovlan;
+ __le16 ovlan_mask;
+ __le16 ivlan;
+ __le16 ivlan_mask;
+ u8 unused_2;
+ u8 unused_3;
+ __le32 action_id;
+ u8 action_bypass;
+ #define MGMT_L2_FILTER_ALLOC_REQ_ACTION_BYPASS 0x1UL
+ u8 unused_5[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_mgmt_l2_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mgmt_l2_filter_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_mgmt_l2_filter_free */
+/* Input (24 bytes) */
+struct hwrm_mgmt_l2_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 mgmt_l2_filter_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_mgmt_l2_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_raw_write_blk */
+/* Input (32 bytes) */
+struct hwrm_nvm_raw_write_blk_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le32 dest_addr;
+ __le32 len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_raw_write_blk_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_read */
+/* Input (40 bytes) */
+struct hwrm_nvm_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le16 dir_idx;
+ u8 unused_0;
+ u8 unused_1;
+ __le32 offset;
+ __le32 len;
+ __le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_raw_dump */
+/* Input (32 bytes) */
+struct hwrm_nvm_raw_dump_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 offset;
+ __le32 len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_raw_dump_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_entries */
+/* Input (24 bytes) */
+struct hwrm_nvm_get_dir_entries_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_get_dir_entries_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_info */
+/* Input (16 bytes) */
+struct hwrm_nvm_get_dir_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (24 bytes) */
+struct hwrm_nvm_get_dir_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 entries;
+ __le32 entry_length;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_write */
+/* Input (40 bytes) */
+struct hwrm_nvm_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 dir_data_length;
+ __le16 option;
+ __le16 flags;
+ #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_modify */
+/* Input (40 bytes) */
+struct hwrm_nvm_modify_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_idx;
+ u8 unused_0;
+ u8 unused_1;
+ __le32 offset;
+ __le32 len;
+ __le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_modify_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_find_dir_entry */
+/* Input (32 bytes) */
+struct hwrm_nvm_find_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
+ __le16 dir_idx;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 opt_ordinal;
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ (0x0UL << 0)
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE (0x1UL << 0)
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT (0x2UL << 0)
+ u8 unused_1[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_nvm_find_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le32 dir_data_length;
+ __le32 fw_ver;
+ __le16 dir_ordinal;
+ __le16 dir_idx;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_erase_dir_entry */
+/* Input (24 bytes) */
+struct hwrm_nvm_erase_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_idx;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_erase_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dev_info */
+/* Input (16 bytes) */
+struct hwrm_nvm_get_dev_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (32 bytes) */
+struct hwrm_nvm_get_dev_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 manufacturer_id;
+ __le16 device_id;
+ __le32 sector_size;
+ __le32 nvram_size;
+ __le32 reserved_size;
+ __le32 available_size;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_nvm_mod_dir_entry */
+/* Input (32 bytes) */
+struct hwrm_nvm_mod_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
+ __le16 dir_idx;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 checksum;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_mod_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_verify_update */
+/* Input (24 bytes) */
+struct hwrm_nvm_verify_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_verify_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_exec_fwd_resp */
+/* Input (120 bytes) */
+struct hwrm_exec_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[24];
+ __le16 encap_resp_target_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_exec_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_reject_fwd_resp */
+/* Input (120 bytes) */
+struct hwrm_reject_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[24];
+ __le16 encap_resp_target_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_reject_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_fwd_resp */
+/* Input (40 bytes) */
+struct hwrm_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_resp_target_id;
+ __le16 encap_resp_cmpl_ring;
+ __le16 encap_resp_len;
+ u8 unused_0;
+ u8 unused_1;
+ __le64 encap_resp_addr;
+ __le32 encap_resp[24];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_fwd_async_event_cmpl */
+/* Input (32 bytes) */
+struct hwrm_fwd_async_event_cmpl_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_async_event_target_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2[3];
+ u8 unused_3;
+ __le32 encap_async_event_cmpl[4];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fwd_async_event_cmpl_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_fw_reset */
+/* Input (24 bytes) */
+struct hwrm_fw_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIMP (0x0UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_APE (0x1UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_KONG (0x2UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BONO (0x3UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_TANG (0x4UL << 0)
+ u8 selfrst_status;
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_fw_qstatus */
+/* Input (24 bytes) */
+struct hwrm_fw_qstatus_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIMP (0x0UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_APE (0x1UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_KONG (0x2UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BONO (0x3UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_TANG (0x4UL << 0)
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_qstatus_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_temp_monitor_query */
+/* Input (16 bytes) */
+struct hwrm_temp_monitor_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_temp_monitor_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 temp;
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
new file mode 100644
index 000000000000..3cf3e1b70b64
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
@@ -0,0 +1,59 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _BNXT_NVM_DEFS_H_
+#define _BNXT_NVM_DEFS_H_
+
+enum bnxt_nvm_directory_type {
+ BNX_DIR_TYPE_UNUSED = 0,
+ BNX_DIR_TYPE_PKG_LOG = 1,
+ BNX_DIR_TYPE_CHIMP_PATCH = 3,
+ BNX_DIR_TYPE_BOOTCODE = 4,
+ BNX_DIR_TYPE_VPD = 5,
+ BNX_DIR_TYPE_EXP_ROM_MBA = 6,
+ BNX_DIR_TYPE_AVS = 7,
+ BNX_DIR_TYPE_PCIE = 8,
+ BNX_DIR_TYPE_PORT_MACRO = 9,
+ BNX_DIR_TYPE_APE_FW = 10,
+ BNX_DIR_TYPE_APE_PATCH = 11,
+ BNX_DIR_TYPE_KONG_FW = 12,
+ BNX_DIR_TYPE_KONG_PATCH = 13,
+ BNX_DIR_TYPE_BONO_FW = 14,
+ BNX_DIR_TYPE_BONO_PATCH = 15,
+ BNX_DIR_TYPE_TANG_FW = 16,
+ BNX_DIR_TYPE_TANG_PATCH = 17,
+ BNX_DIR_TYPE_BOOTCODE_2 = 18,
+ BNX_DIR_TYPE_CCM = 19,
+ BNX_DIR_TYPE_PCI_CFG = 20,
+ BNX_DIR_TYPE_TSCF_UCODE = 21,
+ BNX_DIR_TYPE_ISCSI_BOOT = 22,
+ BNX_DIR_TYPE_ISCSI_BOOT_IPV6 = 24,
+ BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6 = 25,
+ BNX_DIR_TYPE_ISCSI_BOOT_CFG6 = 26,
+ BNX_DIR_TYPE_EXT_PHY = 27,
+ BNX_DIR_TYPE_SHARED_CFG = 40,
+ BNX_DIR_TYPE_PORT_CFG = 41,
+ BNX_DIR_TYPE_FUNC_CFG = 42,
+ BNX_DIR_TYPE_MGMT_CFG = 48,
+ BNX_DIR_TYPE_MGMT_DATA = 49,
+ BNX_DIR_TYPE_MGMT_WEB_DATA = 50,
+ BNX_DIR_TYPE_MGMT_WEB_META = 51,
+ BNX_DIR_TYPE_MGMT_EVENT_LOG = 52,
+ BNX_DIR_TYPE_MGMT_AUDIT_LOG = 53
+};
+
+#define BNX_DIR_ORDINAL_FIRST 0
+
+#define BNX_DIR_EXT_INACTIVE (1 << 0)
+#define BNX_DIR_EXT_UPDATE (1 << 1)
+
+#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0)
+#define BNX_DIR_ATTR_PROP_STREAM (1 << 1)
+
+#endif /* Don't add anything after this line */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
new file mode 100644
index 000000000000..3422147a65f4
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -0,0 +1,787 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_sriov.h"
+#include "bnxt_ethtool.h"
+
+#ifdef CONFIG_BNXT_SRIOV
+static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
+{
+ if (bp->state != BNXT_STATE_OPEN) {
+ netdev_err(bp->dev, "vf ndo called though PF is down\n");
+ return -EINVAL;
+ }
+ if (!bp->pf.active_vfs) {
+ netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
+ return -EINVAL;
+ }
+ if (vf_id >= bp->pf.max_vfs) {
+ netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ bool old_setting = false;
+ u32 func_flags;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+
+ vf = &bp->pf.vf[vf_id];
+ if (vf->flags & BNXT_VF_SPOOFCHK)
+ old_setting = true;
+ if (old_setting == setting)
+ return 0;
+
+ func_flags = vf->func_flags;
+ if (setting)
+ func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+ else
+ func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+ /*TODO: if the driver supports VLAN filter on guest VLAN,
+ * the spoof check should also include vlan anti-spoofing
+ */
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.vf_id = cpu_to_le16(vf->fw_fid);
+ req.flags = cpu_to_le32(func_flags);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ vf->func_flags = func_flags;
+ if (setting)
+ vf->flags |= BNXT_VF_SPOOFCHK;
+ else
+ vf->flags &= ~BNXT_VF_SPOOFCHK;
+ }
+ return rc;
+}
+
+int bnxt_get_vf_config(struct net_device *dev, int vf_id,
+ struct ifla_vf_info *ivi)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+
+ ivi->vf = vf_id;
+ vf = &bp->pf.vf[vf_id];
+
+ memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
+ ivi->max_tx_rate = vf->max_tx_rate;
+ ivi->min_tx_rate = vf->min_tx_rate;
+ ivi->vlan = vf->vlan;
+ ivi->qos = vf->flags & BNXT_VF_QOS;
+ ivi->spoofchk = vf->flags & BNXT_VF_SPOOFCHK;
+ if (!(vf->flags & BNXT_VF_LINK_FORCED))
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->flags & BNXT_VF_LINK_UP)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+
+ return 0;
+}
+
+int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+ /* reject bc or mc mac addr, zero mac addr means allow
+ * VF to use its own mac addr
+ */
+ if (is_multicast_ether_addr(mac)) {
+ netdev_err(dev, "Invalid VF ethernet address\n");
+ return -EINVAL;
+ }
+ vf = &bp->pf.vf[vf_id];
+
+ memcpy(vf->mac_addr, mac, ETH_ALEN);
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.vf_id = cpu_to_le16(vf->fw_fid);
+ req.flags = cpu_to_le32(vf->func_flags);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+ memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ u16 vlan_tag;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+
+ /* TODO: needed to implement proper handling of user priority,
+ * currently fail the command if there is valid priority
+ */
+ if (vlan_id > 4095 || qos)
+ return -EINVAL;
+
+ vf = &bp->pf.vf[vf_id];
+ vlan_tag = vlan_id;
+ if (vlan_tag == vf->vlan)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.vf_id = cpu_to_le16(vf->fw_fid);
+ req.flags = cpu_to_le32(vf->func_flags);
+ req.dflt_vlan = cpu_to_le16(vlan_tag);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ vf->vlan = vlan_tag;
+ return rc;
+}
+
+int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
+ int max_tx_rate)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ u32 pf_link_speed;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+
+ vf = &bp->pf.vf[vf_id];
+ pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+ if (max_tx_rate > pf_link_speed) {
+ netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
+ max_tx_rate, vf_id);
+ return -EINVAL;
+ }
+
+ if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
+ netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
+ min_tx_rate, vf_id);
+ return -EINVAL;
+ }
+ if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
+ return 0;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.vf_id = cpu_to_le16(vf->fw_fid);
+ req.flags = cpu_to_le32(vf->func_flags);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
+ req.max_bw = cpu_to_le32(max_tx_rate);
+ req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
+ req.min_bw = cpu_to_le32(min_tx_rate);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ vf->min_tx_rate = min_tx_rate;
+ vf->max_tx_rate = max_tx_rate;
+ }
+ return rc;
+}
+
+int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+
+ vf = &bp->pf.vf[vf_id];
+
+ vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
+ switch (link) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ vf->flags |= BNXT_VF_LINK_UP;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ vf->flags |= BNXT_VF_LINK_FORCED;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
+ break;
+ default:
+ netdev_err(bp->dev, "Invalid link option\n");
+ rc = -EINVAL;
+ break;
+ }
+ /* CHIMP TODO: send msg to VF to update new link state */
+
+ return rc;
+}
+
+static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
+{
+ int i;
+ struct bnxt_vf_info *vf;
+
+ for (i = 0; i < num_vfs; i++) {
+ vf = &bp->pf.vf[i];
+ memset(vf, 0, sizeof(*vf));
+ vf->flags = BNXT_VF_QOS | BNXT_VF_LINK_UP;
+ }
+ return 0;
+}
+
+static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp)
+{
+ int i, rc = 0;
+ struct bnxt_pf_info *pf = &bp->pf;
+ struct hwrm_func_vf_resc_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = pf->first_vf_id; i < pf->first_vf_id + pf->active_vfs; i++) {
+ req.vf_id = cpu_to_le16(i);
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static void bnxt_free_vf_resources(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+ int i;
+
+ kfree(bp->pf.vf_event_bmap);
+ bp->pf.vf_event_bmap = NULL;
+
+ for (i = 0; i < 4; i++) {
+ if (bp->pf.hwrm_cmd_req_addr[i]) {
+ dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
+ bp->pf.hwrm_cmd_req_addr[i],
+ bp->pf.hwrm_cmd_req_dma_addr[i]);
+ bp->pf.hwrm_cmd_req_addr[i] = NULL;
+ }
+ }
+
+ kfree(bp->pf.vf);
+ bp->pf.vf = NULL;
+}
+
+static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
+{
+ struct pci_dev *pdev = bp->pdev;
+ u32 nr_pages, size, i, j, k = 0;
+
+ bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
+ if (!bp->pf.vf)
+ return -ENOMEM;
+
+ bnxt_set_vf_attr(bp, num_vfs);
+
+ size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
+ nr_pages = size / BNXT_PAGE_SIZE;
+ if (size & (BNXT_PAGE_SIZE - 1))
+ nr_pages++;
+
+ for (i = 0; i < nr_pages; i++) {
+ bp->pf.hwrm_cmd_req_addr[i] =
+ dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
+ &bp->pf.hwrm_cmd_req_dma_addr[i],
+ GFP_KERNEL);
+
+ if (!bp->pf.hwrm_cmd_req_addr[i])
+ return -ENOMEM;
+
+ for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
+ struct bnxt_vf_info *vf = &bp->pf.vf[k];
+
+ vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
+ j * BNXT_HWRM_REQ_MAX_SIZE;
+ vf->hwrm_cmd_req_dma_addr =
+ bp->pf.hwrm_cmd_req_dma_addr[i] + j *
+ BNXT_HWRM_REQ_MAX_SIZE;
+ k++;
+ }
+ }
+
+ /* Max 128 VF's */
+ bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
+ if (!bp->pf.vf_event_bmap)
+ return -ENOMEM;
+
+ bp->pf.hwrm_cmd_req_pages = nr_pages;
+ return 0;
+}
+
+static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
+{
+ struct hwrm_func_buf_rgtr_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
+
+ req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
+ req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
+ req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
+ req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
+ req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
+ req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
+ req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+/* only call by PF to reserve resources for VF */
+static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs)
+{
+ u32 rc = 0, mtu, i;
+ u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+
+ /* Remaining rings are distributed equally amongs VF's for now */
+ /* TODO: the following workaroud is needed to restrict total number
+ * of vf_cp_rings not exceed number of HW ring groups. This WA should
+ * be removed once new HWRM provides HW ring groups capability in
+ * hwrm_func_qcap.
+ */
+ vf_cp_rings = min_t(u16, bp->pf.max_cp_rings, bp->pf.max_stat_ctxs);
+ vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / *num_vfs;
+ /* TODO: restore this logic below once the WA above is removed */
+ /* vf_cp_rings = (bp->pf.max_cp_rings - bp->cp_nr_rings) / *num_vfs; */
+ vf_stat_ctx = (bp->pf.max_stat_ctxs - bp->num_stat_ctxs) / *num_vfs;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings * 2) /
+ *num_vfs;
+ else
+ vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings) /
+ *num_vfs;
+ vf_tx_rings = (bp->pf.max_tx_rings - bp->tx_nr_rings) / *num_vfs;
+
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
+ FUNC_CFG_REQ_ENABLES_MRU |
+ FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
+ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
+ FUNC_CFG_REQ_ENABLES_NUM_VNICS);
+
+ mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ req.mru = cpu_to_le16(mtu);
+ req.mtu = cpu_to_le16(mtu);
+
+ req.num_rsscos_ctxs = cpu_to_le16(1);
+ req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
+ req.num_tx_rings = cpu_to_le16(vf_tx_rings);
+ req.num_rx_rings = cpu_to_le16(vf_rx_rings);
+ req.num_l2_ctxs = cpu_to_le16(4);
+ vf_vnics = 1;
+
+ req.num_vnics = cpu_to_le16(vf_vnics);
+ /* FIXME spec currently uses 1 bit for stats ctx */
+ req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < *num_vfs; i++) {
+ req.vf_id = cpu_to_le16(pf->first_vf_id + i);
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+ bp->pf.active_vfs = i + 1;
+ bp->pf.vf[i].fw_fid = le16_to_cpu(req.vf_id);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ if (!rc) {
+ bp->pf.max_pf_tx_rings = bp->tx_nr_rings;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ bp->pf.max_pf_rx_rings = bp->rx_nr_rings * 2;
+ else
+ bp->pf.max_pf_rx_rings = bp->rx_nr_rings;
+ }
+ return rc;
+}
+
+static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
+{
+ int rc = 0, vfs_supported;
+ int min_rx_rings, min_tx_rings, min_rss_ctxs;
+ int tx_ok = 0, rx_ok = 0, rss_ok = 0;
+
+ /* Check if we can enable requested num of vf's. At a mininum
+ * we require 1 RX 1 TX rings for each VF. In this minimum conf
+ * features like TPA will not be available.
+ */
+ vfs_supported = *num_vfs;
+
+ while (vfs_supported) {
+ min_rx_rings = vfs_supported;
+ min_tx_rings = vfs_supported;
+ min_rss_ctxs = vfs_supported;
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
+ min_rx_rings)
+ rx_ok = 1;
+ } else {
+ if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
+ min_rx_rings)
+ rx_ok = 1;
+ }
+
+ if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
+ tx_ok = 1;
+
+ if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
+ rss_ok = 1;
+
+ if (tx_ok && rx_ok && rss_ok)
+ break;
+
+ vfs_supported--;
+ }
+
+ if (!vfs_supported) {
+ netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
+ return -EINVAL;
+ }
+
+ if (vfs_supported != *num_vfs) {
+ netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
+ *num_vfs, vfs_supported);
+ *num_vfs = vfs_supported;
+ }
+
+ rc = bnxt_alloc_vf_resources(bp, *num_vfs);
+ if (rc)
+ goto err_out1;
+
+ /* Reserve resources for VFs */
+ rc = bnxt_hwrm_func_cfg(bp, num_vfs);
+ if (rc)
+ goto err_out2;
+
+ /* Register buffers for VFs */
+ rc = bnxt_hwrm_func_buf_rgtr(bp);
+ if (rc)
+ goto err_out2;
+
+ rc = pci_enable_sriov(bp->pdev, *num_vfs);
+ if (rc)
+ goto err_out2;
+
+ return 0;
+
+err_out2:
+ /* Free the resources reserved for various VF's */
+ bnxt_hwrm_func_vf_resource_free(bp);
+
+err_out1:
+ bnxt_free_vf_resources(bp);
+
+ return rc;
+}
+
+void bnxt_sriov_disable(struct bnxt *bp)
+{
+ if (!bp->pf.active_vfs)
+ return;
+
+ pci_disable_sriov(bp->pdev);
+
+ /* Free the resources reserved for various VF's */
+ bnxt_hwrm_func_vf_resource_free(bp);
+
+ bnxt_free_vf_resources(bp);
+
+ bp->pf.active_vfs = 0;
+ bp->pf.max_pf_rx_rings = bp->pf.max_rx_rings;
+ bp->pf.max_pf_tx_rings = bp->pf.max_tx_rings;
+}
+
+int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+ netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
+ return 0;
+ }
+
+ rtnl_lock();
+ if (!netif_running(dev)) {
+ netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
+ rtnl_unlock();
+ return 0;
+ }
+ bp->sriov_cfg = true;
+ rtnl_unlock();
+ if (!num_vfs) {
+ bnxt_sriov_disable(bp);
+ return 0;
+ }
+
+ /* Check if enabled VFs is same as requested */
+ if (num_vfs == bp->pf.active_vfs)
+ return 0;
+
+ bnxt_sriov_enable(bp, &num_vfs);
+
+ bp->sriov_cfg = false;
+ wake_up(&bp->sriov_cfg_wait);
+
+ return num_vfs;
+}
+
+static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+ void *encap_resp, __le64 encap_resp_addr,
+ __le16 encap_resp_cpr, u32 msg_size)
+{
+ int rc = 0;
+ struct hwrm_fwd_resp_input req = {0};
+ struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
+
+ /* Set the new target id */
+ req.target_id = cpu_to_le16(vf->fw_fid);
+ req.encap_resp_len = cpu_to_le16(msg_size);
+ req.encap_resp_addr = encap_resp_addr;
+ req.encap_resp_cmpl_ring = encap_resp_cpr;
+ memcpy(req.encap_resp, encap_resp, msg_size);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
+ goto fwd_resp_exit;
+ }
+
+ if (resp->error_code) {
+ netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
+ resp->error_code);
+ rc = -1;
+ }
+
+fwd_resp_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+ u32 msg_size)
+{
+ int rc = 0;
+ struct hwrm_reject_fwd_resp_input req = {0};
+ struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
+ /* Set the new target id */
+ req.target_id = cpu_to_le16(vf->fw_fid);
+ memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
+ goto fwd_err_resp_exit;
+ }
+
+ if (resp->error_code) {
+ netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
+ resp->error_code);
+ rc = -1;
+ }
+
+fwd_err_resp_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+ u32 msg_size)
+{
+ int rc = 0;
+ struct hwrm_exec_fwd_resp_input req = {0};
+ struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
+ /* Set the new target id */
+ req.target_id = cpu_to_le16(vf->fw_fid);
+ memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
+ goto exec_fwd_resp_exit;
+ }
+
+ if (resp->error_code) {
+ netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
+ resp->error_code);
+ rc = -1;
+ }
+
+exec_fwd_resp_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+ u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
+ struct hwrm_cfa_l2_filter_alloc_input *req =
+ (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
+
+ if (!is_valid_ether_addr(vf->mac_addr) ||
+ ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
+ return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+ else
+ return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+}
+
+static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+ int rc = 0;
+
+ if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
+ /* real link */
+ rc = bnxt_hwrm_exec_fwd_resp(
+ bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
+ } else {
+ struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+ struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
+
+ phy_qcfg_req =
+ (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
+ mutex_lock(&bp->hwrm_cmd_lock);
+ memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
+ sizeof(phy_qcfg_resp));
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
+
+ if (vf->flags & BNXT_VF_LINK_UP) {
+ /* if physical link is down, force link up on VF */
+ if (phy_qcfg_resp.link ==
+ PORT_PHY_QCFG_RESP_LINK_NO_LINK) {
+ phy_qcfg_resp.link =
+ PORT_PHY_QCFG_RESP_LINK_LINK;
+ if (phy_qcfg_resp.auto_link_speed)
+ phy_qcfg_resp.link_speed =
+ phy_qcfg_resp.auto_link_speed;
+ else
+ phy_qcfg_resp.link_speed =
+ phy_qcfg_resp.force_link_speed;
+ phy_qcfg_resp.duplex =
+ PORT_PHY_QCFG_RESP_DUPLEX_FULL;
+ phy_qcfg_resp.pause =
+ (PORT_PHY_QCFG_RESP_PAUSE_TX |
+ PORT_PHY_QCFG_RESP_PAUSE_RX);
+ }
+ } else {
+ /* force link down */
+ phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
+ phy_qcfg_resp.link_speed = 0;
+ phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF;
+ phy_qcfg_resp.pause = 0;
+ }
+ rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
+ phy_qcfg_req->resp_addr,
+ phy_qcfg_req->cmpl_ring,
+ sizeof(phy_qcfg_resp));
+ }
+ return rc;
+}
+
+static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+ int rc = 0;
+ struct hwrm_cmd_req_hdr *encap_req = vf->hwrm_cmd_req_addr;
+ u32 req_type = le32_to_cpu(encap_req->cmpl_ring_req_type) & 0xffff;
+
+ switch (req_type) {
+ case HWRM_CFA_L2_FILTER_ALLOC:
+ rc = bnxt_vf_validate_set_mac(bp, vf);
+ break;
+ case HWRM_FUNC_CFG:
+ /* TODO Validate if VF is allowed to change mac address,
+ * mtu, num of rings etc
+ */
+ rc = bnxt_hwrm_exec_fwd_resp(
+ bp, vf, sizeof(struct hwrm_func_cfg_input));
+ break;
+ case HWRM_PORT_PHY_QCFG:
+ rc = bnxt_vf_set_link(bp, vf);
+ break;
+ default:
+ break;
+ }
+ return rc;
+}
+
+void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
+{
+ u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
+
+ /* Scan through VF's and process commands */
+ while (1) {
+ vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
+ if (vf_id >= active_vfs)
+ break;
+
+ clear_bit(vf_id, bp->pf.vf_event_bmap);
+ bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
+ i = vf_id + 1;
+ }
+}
+#else
+
+void bnxt_sriov_disable(struct bnxt *bp)
+{
+}
+
+void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
+{
+ netdev_err(dev, "Invalid VF message received when SRIOV is not enable\n");
+}
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
new file mode 100644
index 000000000000..0b430f157b9e
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -0,0 +1,22 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_SRIOV_H
+#define BNXT_SRIOV_H
+
+int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *);
+int bnxt_set_vf_mac(struct net_device *, int, u8 *);
+int bnxt_set_vf_vlan(struct net_device *, int, u16, u8);
+int bnxt_set_vf_bw(struct net_device *, int, int, int);
+int bnxt_set_vf_link_state(struct net_device *, int, int);
+int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
+int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
+void bnxt_sriov_disable(struct bnxt *);
+void bnxt_hwrm_exec_fwd_req(struct bnxt *);
+#endif
OpenPOWER on IntegriCloud