summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2012-08-01 20:40:02 +1000
committerNeilBrown <neilb@suse.de>2012-08-01 20:40:02 +1000
commitbb181e2e48f8c85db08c9cb015cbba9618dbf05c (patch)
tree191bc24dd97bcb174535cc217af082f16da3b43d /drivers/net/ethernet/broadcom
parentd57368afe63b3b7b45ce6c2b8c5276417935be2f (diff)
parentc039c332f23e794deb6d6f37b9f07ff3b27fb2cf (diff)
downloadtalos-op-linux-bb181e2e48f8c85db08c9cb015cbba9618dbf05c.tar.gz
talos-op-linux-bb181e2e48f8c85db08c9cb015cbba9618dbf05c.zip
Merge commit 'c039c332f23e794deb6d6f37b9f07ff3b27fb2cf' into md
Pull in pre-requisites for adding raid10 support to dm-raid.
Diffstat (limited to 'drivers/net/ethernet/broadcom')
-rw-r--r--drivers/net/ethernet/broadcom/b44.c100
-rw-r--r--drivers/net/ethernet/broadcom/b44.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c106
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h45
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h197
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c252
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h63
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c585
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h184
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c1232
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h53
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c310
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h168
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h128
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h42
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c68
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c63
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h13
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c288
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h47
24 files changed, 2949 insertions, 1042 deletions
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 46b8b7d81633..9786c0e9890e 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -483,9 +483,11 @@ out:
static void b44_stats_update(struct b44 *bp)
{
unsigned long reg;
- u32 *val;
+ u64 *val;
val = &bp->hw_stats.tx_good_octets;
+ u64_stats_update_begin(&bp->hw_stats.syncp);
+
for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
*val++ += br32(bp, reg);
}
@@ -496,6 +498,8 @@ static void b44_stats_update(struct b44 *bp)
for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
*val++ += br32(bp, reg);
}
+
+ u64_stats_update_end(&bp->hw_stats.syncp);
}
static void b44_link_report(struct b44 *bp)
@@ -656,7 +660,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
dma_unmap_single(bp->sdev->dma_dev, mapping,
RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
- skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
+ skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
if (skb == NULL)
return -ENOMEM;
mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
@@ -967,7 +971,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_unmap_single(bp->sdev->dma_dev, mapping, len,
DMA_TO_DEVICE);
- bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
+ bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb)
goto err_out;
@@ -1635,44 +1639,49 @@ static int b44_close(struct net_device *dev)
return 0;
}
-static struct net_device_stats *b44_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *nstat)
{
struct b44 *bp = netdev_priv(dev);
- struct net_device_stats *nstat = &dev->stats;
struct b44_hw_stats *hwstat = &bp->hw_stats;
-
- /* Convert HW stats into netdevice stats. */
- nstat->rx_packets = hwstat->rx_pkts;
- nstat->tx_packets = hwstat->tx_pkts;
- nstat->rx_bytes = hwstat->rx_octets;
- nstat->tx_bytes = hwstat->tx_octets;
- nstat->tx_errors = (hwstat->tx_jabber_pkts +
- hwstat->tx_oversize_pkts +
- hwstat->tx_underruns +
- hwstat->tx_excessive_cols +
- hwstat->tx_late_cols);
- nstat->multicast = hwstat->tx_multicast_pkts;
- nstat->collisions = hwstat->tx_total_cols;
-
- nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
- hwstat->rx_undersize);
- nstat->rx_over_errors = hwstat->rx_missed_pkts;
- nstat->rx_frame_errors = hwstat->rx_align_errs;
- nstat->rx_crc_errors = hwstat->rx_crc_errs;
- nstat->rx_errors = (hwstat->rx_jabber_pkts +
- hwstat->rx_oversize_pkts +
- hwstat->rx_missed_pkts +
- hwstat->rx_crc_align_errs +
- hwstat->rx_undersize +
- hwstat->rx_crc_errs +
- hwstat->rx_align_errs +
- hwstat->rx_symbol_errs);
-
- nstat->tx_aborted_errors = hwstat->tx_underruns;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&hwstat->syncp);
+
+ /* Convert HW stats into rtnl_link_stats64 stats. */
+ nstat->rx_packets = hwstat->rx_pkts;
+ nstat->tx_packets = hwstat->tx_pkts;
+ nstat->rx_bytes = hwstat->rx_octets;
+ nstat->tx_bytes = hwstat->tx_octets;
+ nstat->tx_errors = (hwstat->tx_jabber_pkts +
+ hwstat->tx_oversize_pkts +
+ hwstat->tx_underruns +
+ hwstat->tx_excessive_cols +
+ hwstat->tx_late_cols);
+ nstat->multicast = hwstat->tx_multicast_pkts;
+ nstat->collisions = hwstat->tx_total_cols;
+
+ nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
+ hwstat->rx_undersize);
+ nstat->rx_over_errors = hwstat->rx_missed_pkts;
+ nstat->rx_frame_errors = hwstat->rx_align_errs;
+ nstat->rx_crc_errors = hwstat->rx_crc_errs;
+ nstat->rx_errors = (hwstat->rx_jabber_pkts +
+ hwstat->rx_oversize_pkts +
+ hwstat->rx_missed_pkts +
+ hwstat->rx_crc_align_errs +
+ hwstat->rx_undersize +
+ hwstat->rx_crc_errs +
+ hwstat->rx_align_errs +
+ hwstat->rx_symbol_errs);
+
+ nstat->tx_aborted_errors = hwstat->tx_underruns;
#if 0
- /* Carrier lost counter seems to be broken for some devices */
- nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
+ /* Carrier lost counter seems to be broken for some devices */
+ nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
#endif
+ } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
return nstat;
}
@@ -1993,17 +2002,24 @@ static void b44_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct b44 *bp = netdev_priv(dev);
- u32 *val = &bp->hw_stats.tx_good_octets;
+ struct b44_hw_stats *hwstat = &bp->hw_stats;
+ u64 *data_src, *data_dst;
+ unsigned int start;
u32 i;
spin_lock_irq(&bp->lock);
-
b44_stats_update(bp);
+ spin_unlock_irq(&bp->lock);
- for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
- *data++ = *val++;
+ do {
+ data_src = &hwstat->tx_good_octets;
+ data_dst = data;
+ start = u64_stats_fetch_begin_bh(&hwstat->syncp);
- spin_unlock_irq(&bp->lock);
+ for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
+ *data_dst++ = *data_src++;
+
+ } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
}
static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -2113,7 +2129,7 @@ static const struct net_device_ops b44_netdev_ops = {
.ndo_open = b44_open,
.ndo_stop = b44_close,
.ndo_start_xmit = b44_start_xmit,
- .ndo_get_stats = b44_get_stats,
+ .ndo_get_stats64 = b44_get_stats64,
.ndo_set_rx_mode = b44_set_rx_mode,
.ndo_set_mac_address = b44_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
index e1905a49279f..8993d72f0420 100644
--- a/drivers/net/ethernet/broadcom/b44.h
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -338,9 +338,10 @@ struct ring_info {
* the layout
*/
struct b44_hw_stats {
-#define _B44(x) u32 x;
+#define _B44(x) u64 x;
B44_STAT_REG_DECLARE
#undef _B44
+ struct u64_stats_sync syncp;
};
struct ssb_device;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index ac7b74488531..79cebd8525ce 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/stringify.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/errno.h>
@@ -57,8 +58,8 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.2.1"
-#define DRV_MODULE_RELDATE "Dec 18, 2011"
+#define DRV_MODULE_VERSION "2.2.3"
+#define DRV_MODULE_RELDATE "June 27, 2012"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -872,8 +873,7 @@ bnx2_alloc_mem(struct bnx2 *bp)
bnapi = &bp->bnx2_napi[i];
- sblk = (void *) (status_blk +
- BNX2_SBLK_MSIX_ALIGN_SIZE * i);
+ sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
bnapi->status_blk.msix = sblk;
bnapi->hw_tx_cons_ptr =
&sblk->status_tx_quick_consumer_index;
@@ -1972,22 +1972,26 @@ bnx2_remote_phy_event(struct bnx2 *bp)
switch (speed) {
case BNX2_LINK_STATUS_10HALF:
bp->duplex = DUPLEX_HALF;
+ /* fall through */
case BNX2_LINK_STATUS_10FULL:
bp->line_speed = SPEED_10;
break;
case BNX2_LINK_STATUS_100HALF:
bp->duplex = DUPLEX_HALF;
+ /* fall through */
case BNX2_LINK_STATUS_100BASE_T4:
case BNX2_LINK_STATUS_100FULL:
bp->line_speed = SPEED_100;
break;
case BNX2_LINK_STATUS_1000HALF:
bp->duplex = DUPLEX_HALF;
+ /* fall through */
case BNX2_LINK_STATUS_1000FULL:
bp->line_speed = SPEED_1000;
break;
case BNX2_LINK_STATUS_2500HALF:
bp->duplex = DUPLEX_HALF;
+ /* fall through */
case BNX2_LINK_STATUS_2500FULL:
bp->line_speed = SPEED_2500;
break;
@@ -2473,6 +2477,7 @@ bnx2_dump_mcp_state(struct bnx2 *bp)
bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
pr_cont(" condition[%08x]\n",
bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
+ DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
DP_SHMEM_LINE(bp, 0x3cc);
DP_SHMEM_LINE(bp, 0x3dc);
DP_SHMEM_LINE(bp, 0x3ec);
@@ -5372,7 +5377,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
int k, last;
if (skb == NULL) {
- j++;
+ j = NEXT_TX_BD(j);
continue;
}
@@ -5384,8 +5389,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
tx_buf->skb = NULL;
last = tx_buf->nr_frags;
- j++;
- for (k = 0; k < last; k++, j++) {
+ j = NEXT_TX_BD(j);
+ for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(tx_buf, mapping),
@@ -6245,7 +6250,7 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
static int
bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
{
- int cpus = num_online_cpus();
+ int cpus = netif_get_num_default_rss_queues();
int msix_vecs;
if (!bp->num_req_rx_rings)
@@ -6383,6 +6388,7 @@ bnx2_reset_task(struct work_struct *work)
{
struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
int rc;
+ u16 pcicmd;
rtnl_lock();
if (!netif_running(bp->dev)) {
@@ -6392,6 +6398,12 @@ bnx2_reset_task(struct work_struct *work)
bnx2_netif_stop(bp, true);
+ pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
+ if (!(pcicmd & PCI_COMMAND_MEMORY)) {
+ /* in case PCI block has reset */
+ pci_restore_state(bp->pdev);
+ pci_save_state(bp->pdev);
+ }
rc = bnx2_init_nic(bp, 1);
if (rc) {
netdev_err(bp->dev, "failed to reset NIC, closing\n");
@@ -6406,6 +6418,75 @@ bnx2_reset_task(struct work_struct *work)
rtnl_unlock();
}
+#define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
+
+static void
+bnx2_dump_ftq(struct bnx2 *bp)
+{
+ int i;
+ u32 reg, bdidx, cid, valid;
+ struct net_device *dev = bp->dev;
+ static const struct ftq_reg {
+ char *name;
+ u32 off;
+ } ftq_arr[] = {
+ BNX2_FTQ_ENTRY(RV2P_P),
+ BNX2_FTQ_ENTRY(RV2P_T),
+ BNX2_FTQ_ENTRY(RV2P_M),
+ BNX2_FTQ_ENTRY(TBDR_),
+ BNX2_FTQ_ENTRY(TDMA_),
+ BNX2_FTQ_ENTRY(TXP_),
+ BNX2_FTQ_ENTRY(TXP_),
+ BNX2_FTQ_ENTRY(TPAT_),
+ BNX2_FTQ_ENTRY(RXP_C),
+ BNX2_FTQ_ENTRY(RXP_),
+ BNX2_FTQ_ENTRY(COM_COMXQ_),
+ BNX2_FTQ_ENTRY(COM_COMTQ_),
+ BNX2_FTQ_ENTRY(COM_COMQ_),
+ BNX2_FTQ_ENTRY(CP_CPQ_),
+ };
+
+ netdev_err(dev, "<--- start FTQ dump --->\n");
+ for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
+ netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
+ bnx2_reg_rd_ind(bp, ftq_arr[i].off));
+
+ netdev_err(dev, "CPU states:\n");
+ for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
+ netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
+ reg, bnx2_reg_rd_ind(bp, reg),
+ bnx2_reg_rd_ind(bp, reg + 4),
+ bnx2_reg_rd_ind(bp, reg + 8),
+ bnx2_reg_rd_ind(bp, reg + 0x1c),
+ bnx2_reg_rd_ind(bp, reg + 0x1c),
+ bnx2_reg_rd_ind(bp, reg + 0x20));
+
+ netdev_err(dev, "<--- end FTQ dump --->\n");
+ netdev_err(dev, "<--- start TBDC dump --->\n");
+ netdev_err(dev, "TBDC free cnt: %ld\n",
+ REG_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
+ netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
+ for (i = 0; i < 0x20; i++) {
+ int j = 0;
+
+ REG_WR(bp, BNX2_TBDC_BD_ADDR, i);
+ REG_WR(bp, BNX2_TBDC_CAM_OPCODE,
+ BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
+ REG_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
+ while ((REG_RD(bp, BNX2_TBDC_COMMAND) &
+ BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
+ j++;
+
+ cid = REG_RD(bp, BNX2_TBDC_CID);
+ bdidx = REG_RD(bp, BNX2_TBDC_BIDX);
+ valid = REG_RD(bp, BNX2_TBDC_CAM_OPCODE);
+ netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
+ i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
+ bdidx >> 24, (valid >> 8) & 0x0ff);
+ }
+ netdev_err(dev, "<--- end TBDC dump --->\n");
+}
+
static void
bnx2_dump_state(struct bnx2 *bp)
{
@@ -6435,6 +6516,7 @@ bnx2_tx_timeout(struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
+ bnx2_dump_ftq(bp);
bnx2_dump_state(bp);
bnx2_dump_mcp_state(bp);
@@ -6628,6 +6710,7 @@ bnx2_close(struct net_device *dev)
bnx2_disable_int_sync(bp);
bnx2_napi_disable(bp);
+ netif_tx_disable(dev);
del_timer_sync(&bp->timer);
bnx2_shutdown_chip(bp);
bnx2_free_irq(bp);
@@ -7832,7 +7915,7 @@ bnx2_get_5709_media(struct bnx2 *bp)
else
strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
- if (PCI_FUNC(bp->pdev->devfn) == 0) {
+ if (bp->func == 0) {
switch (strap) {
case 0x4:
case 0x5:
@@ -8131,9 +8214,12 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
+ if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
+ bp->func = 1;
+
if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
BNX2_SHM_HDR_SIGNATURE_SIG) {
- u32 off = PCI_FUNC(pdev->devfn) << 2;
+ u32 off = bp->func << 2;
bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
} else
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index dc06bda73be7..af6451dec295 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -4642,6 +4642,47 @@ struct l2_fhdr {
#define BNX2_TBDR_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
+/*
+ * tbdc definition
+ * offset: 0x5400
+ */
+#define BNX2_TBDC_COMMAND 0x5400
+#define BNX2_TBDC_COMMAND_CMD_ENABLED (1UL<<0)
+#define BNX2_TBDC_COMMAND_CMD_FLUSH (1UL<<1)
+#define BNX2_TBDC_COMMAND_CMD_SOFT_RST (1UL<<2)
+#define BNX2_TBDC_COMMAND_CMD_REG_ARB (1UL<<3)
+#define BNX2_TBDC_COMMAND_WRCHK_RANGE_ERROR (1UL<<4)
+#define BNX2_TBDC_COMMAND_WRCHK_ALL_ONES_ERROR (1UL<<5)
+#define BNX2_TBDC_COMMAND_WRCHK_ALL_ZEROS_ERROR (1UL<<6)
+#define BNX2_TBDC_COMMAND_WRCHK_ANY_ONES_ERROR (1UL<<7)
+#define BNX2_TBDC_COMMAND_WRCHK_ANY_ZEROS_ERROR (1UL<<8)
+
+#define BNX2_TBDC_STATUS 0x5404
+#define BNX2_TBDC_STATUS_FREE_CNT (0x3fUL<<0)
+
+#define BNX2_TBDC_BD_ADDR 0x5424
+
+#define BNX2_TBDC_BIDX 0x542c
+#define BNX2_TBDC_BDIDX_BDIDX (0xffffUL<<0)
+#define BNX2_TBDC_BDIDX_CMD (0xffUL<<24)
+
+#define BNX2_TBDC_CID 0x5430
+
+#define BNX2_TBDC_CAM_OPCODE 0x5434
+#define BNX2_TBDC_CAM_OPCODE_OPCODE (0x7UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_SEARCH (0UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_CACHE_WRITE (1UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_INVALIDATE (2UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_WRITE (4UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ (5UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_RAM_WRITE (6UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_RAM_READ (7UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_SMASK_BDIDX (1UL<<4)
+#define BNX2_TBDC_CAM_OPCODE_SMASK_CID (1UL<<5)
+#define BNX2_TBDC_CAM_OPCODE_SMASK_CMD (1UL<<6)
+#define BNX2_TBDC_CAM_OPCODE_WMT_FAILED (1UL<<7)
+#define BNX2_TBDC_CAM_OPCODE_CAM_VALIDS (0xffUL<<8)
+
/*
* tdma_reg definition
@@ -6930,6 +6971,8 @@ struct bnx2 {
struct bnx2_irq irq_tbl[BNX2_MAX_MSIX_VEC];
int irq_nvecs;
+ u8 func;
+
u8 num_tx_rings;
u8 num_rx_rings;
@@ -7314,6 +7357,8 @@ struct bnx2_rv2p_fw_file {
#define BNX2_BC_STATE_RESET_TYPE_VALUE(msg) (BNX2_BC_STATE_RESET_TYPE_SIG | \
(msg))
+#define BNX2_BC_RESET_TYPE 0x000001c0
+
#define BNX2_BC_STATE 0x000001c4
#define BNX2_BC_STATE_ERR_MASK 0x0000ff00
#define BNX2_BC_STATE_SIGN 0x42530000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 7de824184979..77bcd4cb4ffb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,8 +23,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.72.50-0"
-#define DRV_MODULE_RELDATE "2012/04/23"
+#define DRV_MODULE_VERSION "1.72.51-0"
+#define DRV_MODULE_RELDATE "2012/06/18"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -51,6 +51,7 @@
#include "bnx2x_reg.h"
#include "bnx2x_fw_defs.h"
+#include "bnx2x_mfw_req.h"
#include "bnx2x_hsi.h"
#include "bnx2x_link.h"
#include "bnx2x_sp.h"
@@ -248,13 +249,12 @@ enum {
BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
};
-#define BNX2X_CNIC_START_ETH_CID 48
-enum {
+#define BNX2X_CNIC_START_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) *\
+ (bp)->max_cos)
/* iSCSI L2 */
- BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID,
+#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp))
/* FCoE L2 */
- BNX2X_FCOE_ETH_CID,
-};
+#define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1)
/** Additional rings budgeting */
#ifdef BCM_CNIC
@@ -276,29 +276,30 @@ enum {
#define FIRST_TX_ONLY_COS_INDEX 1
#define FIRST_TX_COS_INDEX 0
-/* defines for decodeing the fastpath index and the cos index out of the
- * transmission queue index
- */
-#define MAX_TXQS_PER_COS FP_SB_MAX_E1x
-
-#define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS)
-#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS)
-
/* rules for calculating the cids of tx-only connections */
-#define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS)
-#define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS)
+#define CID_TO_FP(cid, bp) ((cid) % BNX2X_NUM_NON_CNIC_QUEUES(bp))
+#define CID_COS_TO_TX_ONLY_CID(cid, cos, bp) \
+ (cid + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
/* fp index inside class of service range */
-#define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS)
-
-/*
- * 0..15 eth cos0
- * 16..31 eth cos1 if applicable
- * 32..47 eth cos2 If applicable
- * fcoe queue follows eth queues (16, 32, 48 depending on cos)
+#define FP_COS_TO_TXQ(fp, cos, bp) \
+ ((fp)->index + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
+
+/* Indexes for transmission queues array:
+ * txdata for RSS i CoS j is at location i + (j * num of RSS)
+ * txdata for FCoE (if exist) is at location max cos * num of RSS
+ * txdata for FWD (if exist) is one location after FCoE
+ * txdata for OOO (if exist) is one location after FWD
*/
-#define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos)
-#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp))
+enum {
+ FCOE_TXQ_IDX_OFFSET,
+ FWD_TXQ_IDX_OFFSET,
+ OOO_TXQ_IDX_OFFSET,
+};
+#define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos)
+#ifdef BCM_CNIC
+#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET)
+#endif
/* fast path */
/*
@@ -453,6 +454,7 @@ struct bnx2x_agg_info {
u16 vlan_tag;
u16 len_on_bd;
u32 rxhash;
+ bool l4_rxhash;
u16 gro_size;
u16 full_page;
};
@@ -481,6 +483,8 @@ struct bnx2x_fp_txdata {
__le16 *tx_cons_sb;
int txq_index;
+ struct bnx2x_fastpath *parent_fp;
+ int tx_ring_size;
};
enum bnx2x_tpa_mode_t {
@@ -507,7 +511,7 @@ struct bnx2x_fastpath {
enum bnx2x_tpa_mode_t mode;
u8 max_cos; /* actual number of active tx coses */
- struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS];
+ struct bnx2x_fp_txdata *txdata_ptr[BNX2X_MULTI_TX_COS];
struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */
struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */
@@ -547,51 +551,45 @@ struct bnx2x_fastpath {
rx_calls;
/* TPA related */
- struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2];
+ struct bnx2x_agg_info *tpa_info;
u8 disable_tpa;
#ifdef BNX2X_STOP_ON_ERROR
u64 tpa_queue_used;
#endif
-
- struct tstorm_per_queue_stats old_tclient;
- struct ustorm_per_queue_stats old_uclient;
- struct xstorm_per_queue_stats old_xclient;
- struct bnx2x_eth_q_stats eth_q_stats;
- struct bnx2x_eth_q_stats_old eth_q_stats_old;
-
/* The size is calculated using the following:
sizeof name field from netdev structure +
4 ('-Xx-' string) +
4 (for the digits and to make it DWORD aligned) */
#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
char name[FP_NAME_SIZE];
-
- /* MACs object */
- struct bnx2x_vlan_mac_obj mac_obj;
-
- /* Queue State object */
- struct bnx2x_queue_sp_obj q_obj;
-
};
-#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
+#define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var)
+#define bnx2x_sp_obj(bp, fp) ((bp)->sp_objs[(fp)->index])
+#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
+#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
/* Use 2500 as a mini-jumbo MTU for FCoE */
#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
-/* FCoE L2 `fastpath' entry is right after the eth entries */
-#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
-#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX])
-#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
-#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \
- txdata[FIRST_TX_COS_INDEX].var)
+#define FCOE_IDX_OFFSET 0
+
+#define FCOE_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) + \
+ FCOE_IDX_OFFSET)
+#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX(bp)])
+#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
+#define bnx2x_fcoe_inner_sp_obj(bp) (&bp->sp_objs[FCOE_IDX(bp)])
+#define bnx2x_fcoe_sp_obj(bp, var) (bnx2x_fcoe_inner_sp_obj(bp)->var)
+#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \
+ txdata_ptr[FIRST_TX_COS_INDEX] \
+ ->var)
#define IS_ETH_FP(fp) (fp->index < \
BNX2X_NUM_ETH_QUEUES(fp->bp))
#ifdef BCM_CNIC
-#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX)
-#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX)
+#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX(fp->bp))
+#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
#else
#define IS_FCOE_FP(fp) false
#define IS_FCOE_IDX(idx) false
@@ -616,6 +614,22 @@ struct bnx2x_fastpath {
#define TX_BD(x) ((x) & MAX_TX_BD)
#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
+/* number of NEXT_PAGE descriptors may be required during placement */
+#define NEXT_CNT_PER_TX_PKT(bds) \
+ (((bds) + MAX_TX_DESC_CNT - 1) / \
+ MAX_TX_DESC_CNT * NEXT_PAGE_TX_DESC_CNT)
+/* max BDs per tx packet w/o next_pages:
+ * START_BD - describes packed
+ * START_BD(splitted) - includes unpaged data segment for GSO
+ * PARSING_BD - for TSO and CSUM data
+ * Frag BDs - decribes pages for frags
+ */
+#define BDS_PER_TX_PKT 3
+#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT)
+/* max BDs per tx packet including next pages */
+#define MAX_DESC_PER_TX_PKT (MAX_BDS_PER_TX_PKT + \
+ NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))
+
/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
#define NUM_RX_RINGS 8
#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
@@ -805,8 +819,11 @@ struct bnx2x_common {
#define CHIP_NUM_57810_MF 0x16ae
#define CHIP_NUM_57811 0x163d
#define CHIP_NUM_57811_MF 0x163e
-#define CHIP_NUM_57840 0x168d
-#define CHIP_NUM_57840_MF 0x16ab
+#define CHIP_NUM_57840_OBSOLETE 0x168d
+#define CHIP_NUM_57840_MF_OBSOLETE 0x16ab
+#define CHIP_NUM_57840_4_10 0x16a1
+#define CHIP_NUM_57840_2_20 0x16a2
+#define CHIP_NUM_57840_MF 0x16a4
#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
@@ -818,8 +835,12 @@ struct bnx2x_common {
#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF)
#define CHIP_IS_57811(bp) (CHIP_NUM(bp) == CHIP_NUM_57811)
#define CHIP_IS_57811_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_MF)
-#define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840)
-#define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF)
+#define CHIP_IS_57840(bp) \
+ ((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) || \
+ (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) || \
+ (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE))
+#define CHIP_IS_57840_MF(bp) ((CHIP_NUM(bp) == CHIP_NUM_57840_MF) || \
+ (CHIP_NUM(bp) == CHIP_NUM_57840_MF_OBSOLETE))
#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
CHIP_IS_57711E(bp))
#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
@@ -978,8 +999,8 @@ union cdu_context {
};
/* CDU host DB constants */
-#define CDU_ILT_PAGE_SZ_HW 3
-#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */
+#define CDU_ILT_PAGE_SZ_HW 2
+#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */
#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
#ifdef BCM_CNIC
@@ -1182,11 +1203,31 @@ struct bnx2x_prev_path_list {
struct list_head list;
};
+struct bnx2x_sp_objs {
+ /* MACs object */
+ struct bnx2x_vlan_mac_obj mac_obj;
+
+ /* Queue State object */
+ struct bnx2x_queue_sp_obj q_obj;
+};
+
+struct bnx2x_fp_stats {
+ struct tstorm_per_queue_stats old_tclient;
+ struct ustorm_per_queue_stats old_uclient;
+ struct xstorm_per_queue_stats old_xclient;
+ struct bnx2x_eth_q_stats eth_q_stats;
+ struct bnx2x_eth_q_stats_old eth_q_stats_old;
+};
+
struct bnx2x {
/* Fields used in the tx and intr/napi performance paths
* are grouped together in the beginning of the structure
*/
struct bnx2x_fastpath *fp;
+ struct bnx2x_sp_objs *sp_objs;
+ struct bnx2x_fp_stats *fp_stats;
+ struct bnx2x_fp_txdata *bnx2x_txq;
+ int bnx2x_txq_size;
void __iomem *regview;
void __iomem *doorbells;
u16 db_size;
@@ -1301,7 +1342,9 @@ struct bnx2x {
#define NO_ISCSI_FLAG (1 << 14)
#define NO_FCOE_FLAG (1 << 15)
#define BC_SUPPORTS_PFC_STATS (1 << 17)
+#define BC_SUPPORTS_FCOE_FEATURES (1 << 19)
#define USING_SINGLE_MSIX_FLAG (1 << 20)
+#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1377,6 +1420,7 @@ struct bnx2x {
#define BNX2X_MAX_COS 3
#define BNX2X_MAX_TX_COS 2
int num_queues;
+ int num_napi_queues;
int disable_tpa;
u32 rx_mode;
@@ -1389,6 +1433,7 @@ struct bnx2x {
u8 igu_dsb_id;
u8 igu_base_sb;
u8 igu_sb_cnt;
+
dma_addr_t def_status_blk_mapping;
struct bnx2x_slowpath *slowpath;
@@ -1420,7 +1465,11 @@ struct bnx2x {
dma_addr_t fw_stats_data_mapping;
int fw_stats_data_sz;
- struct hw_context context;
+ /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB
+ * context size we need 8 ILT entries.
+ */
+#define ILT_MAX_L2_LINES 8
+ struct hw_context context[ILT_MAX_L2_LINES];
struct bnx2x_ilt *ilt;
#define BP_ILT(bp) ((bp)->ilt)
@@ -1433,13 +1482,14 @@ struct bnx2x {
/*
* Maximum CID count that might be required by the bnx2x:
- * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related)
+ * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
*/
-#define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\
- NON_ETH_CONTEXT_USE + CNIC_PRESENT)
+#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
+ + NON_ETH_CONTEXT_USE + CNIC_PRESENT)
+#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
+ + NON_ETH_CONTEXT_USE + CNIC_PRESENT)
#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
ILT_PAGE_CIDS))
-#define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT))
int qm_cid_count;
@@ -1598,6 +1648,8 @@ struct bnx2x {
extern int num_queues;
#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE)
+#define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \
+ NON_ETH_CONTEXT_USE)
#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp)
#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
@@ -1656,6 +1708,9 @@ struct bnx2x_func_init_params {
continue; \
else
+#define for_each_napi_rx_queue(bp, var) \
+ for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
+
/* Skip OOO FP */
#define for_each_tx_queue(bp, var) \
for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
@@ -1709,15 +1764,6 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
struct bnx2x_vlan_mac_obj *obj, bool set,
int mac_type, unsigned long *ramrod_flags);
/**
- * Deletes all MACs configured for the specific MAC object.
- *
- * @param bp Function driver instance
- * @param mac_obj MAC object to cleanup
- *
- * @return zero if all MACs were cleaned
- */
-
-/**
* bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
*
* @bp: driver handle
@@ -1817,6 +1863,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define LOAD_NORMAL 0
#define LOAD_OPEN 1
#define LOAD_DIAG 2
+#define LOAD_LOOPBACK_EXT 3
#define UNLOAD_NORMAL 0
#define UNLOAD_CLOSE 1
#define UNLOAD_RECOVERY 2
@@ -1899,13 +1946,17 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define PCICFG_LINK_SPEED 0xf0000
#define PCICFG_LINK_SPEED_SHIFT 16
-
-#define BNX2X_NUM_TESTS 7
+#define BNX2X_NUM_TESTS_SF 7
+#define BNX2X_NUM_TESTS_MF 3
+#define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
+ BNX2X_NUM_TESTS_SF)
#define BNX2X_PHY_LOOPBACK 0
#define BNX2X_MAC_LOOPBACK 1
+#define BNX2X_EXT_LOOPBACK 2
#define BNX2X_PHY_LOOPBACK_FAILED 1
#define BNX2X_MAC_LOOPBACK_FAILED 2
+#define BNX2X_EXT_LOOPBACK_FAILED 3
#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \
BNX2X_PHY_LOOPBACK_FAILED)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 8098eea9704d..e879e19eb0d6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -40,12 +40,19 @@
* Makes sure the contents of the bp->fp[to].napi is kept
* intact. This is done by first copying the napi struct from
* the target to the source, and then mem copying the entire
- * source onto the target
+ * source onto the target. Update txdata pointers and related
+ * content.
*/
static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
{
struct bnx2x_fastpath *from_fp = &bp->fp[from];
struct bnx2x_fastpath *to_fp = &bp->fp[to];
+ struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
+ struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
+ struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
+ struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
+ int old_max_eth_txqs, new_max_eth_txqs;
+ int old_txdata_index = 0, new_txdata_index = 0;
/* Copy the NAPI object as it has been already initialized */
from_fp->napi = to_fp->napi;
@@ -53,6 +60,30 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
/* Move bnx2x_fastpath contents */
memcpy(to_fp, from_fp, sizeof(*to_fp));
to_fp->index = to;
+
+ /* move sp_objs contents as well, as their indices match fp ones */
+ memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
+
+ /* move fp_stats contents as well, as their indices match fp ones */
+ memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
+
+ /* Update txdata pointers in fp and move txdata content accordingly:
+ * Each fp consumes 'max_cos' txdata structures, so the index should be
+ * decremented by max_cos x delta.
+ */
+
+ old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
+ new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
+ (bp)->max_cos;
+ if (from == FCOE_IDX(bp)) {
+ old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
+ new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
+ }
+
+ memcpy(&bp->bnx2x_txq[old_txdata_index],
+ &bp->bnx2x_txq[new_txdata_index],
+ sizeof(struct bnx2x_fp_txdata));
+ to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
}
int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
@@ -190,7 +221,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
if ((netif_tx_queue_stopped(txq)) &&
(bp->state == BNX2X_STATE_OPEN) &&
- (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
+ (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
netif_tx_wake_queue(txq);
__netif_tx_unlock(txq);
@@ -264,12 +295,20 @@ static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
* CQE (calculated by HW).
*/
static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
- const struct eth_fast_path_rx_cqe *cqe)
+ const struct eth_fast_path_rx_cqe *cqe,
+ bool *l4_rxhash)
{
/* Set Toeplitz hash from CQE */
if ((bp->dev->features & NETIF_F_RXHASH) &&
- (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+ (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
+ enum eth_rss_hash_type htype;
+
+ htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
+ *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
+ (htype == TCP_IPV6_HASH_TYPE);
return le32_to_cpu(cqe->rss_hash_result);
+ }
+ *l4_rxhash = false;
return 0;
}
@@ -323,7 +362,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->tpa_state = BNX2X_TPA_START;
tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
tpa_info->placement_offset = cqe->placement_offset;
- tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
+ tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
if (fp->mode == TPA_MODE_GRO) {
u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
tpa_info->full_page =
@@ -479,7 +518,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
where we are and drop the whole packet */
err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
if (unlikely(err)) {
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
return err;
}
@@ -558,6 +597,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb_reserve(skb, pad + NET_SKB_PAD);
skb_put(skb, len);
skb->rxhash = tpa_info->rxhash;
+ skb->l4_rxhash = tpa_info->l4_rxhash;
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -584,7 +624,7 @@ drop:
/* drop the packet and keep the buffer in the bin */
DP(NETIF_MSG_RX_STATUS,
"Failed to allocate or map a new skb - dropping packet!\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
}
static int bnx2x_alloc_rx_data(struct bnx2x *bp,
@@ -617,8 +657,10 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
return 0;
}
-static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
- struct bnx2x_fastpath *fp)
+static
+void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
+ struct bnx2x_fastpath *fp,
+ struct bnx2x_eth_q_stats *qstats)
{
/* Do nothing if no IP/L4 csum validation was done */
@@ -632,7 +674,7 @@ static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
if (cqe->fast_path_cqe.type_error_flags &
(ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
- fp->eth_q_stats.hw_csum_err++;
+ qstats->hw_csum_err++;
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@@ -679,6 +721,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
enum eth_rx_cqe_type cqe_fp_type;
u16 len, pad, queue;
u8 *data;
+ bool l4_rxhash;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -776,7 +819,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR flags %x rx packet %u\n",
cqe_fp_flags, sw_comp_cons);
- fp->eth_q_stats.rx_err_discard_pkt++;
+ bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
goto reuse_rx;
}
@@ -789,7 +832,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
if (skb == NULL) {
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR packet dropped because of alloc failure\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
goto reuse_rx;
}
memcpy(skb->data, data + pad, len);
@@ -803,14 +846,15 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
skb = build_skb(data, 0);
if (unlikely(!skb)) {
kfree(data);
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_qstats(bp, fp)->
+ rx_skb_alloc_failed++;
goto next_rx;
}
skb_reserve(skb, pad);
} else {
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR packet dropped because of alloc failure\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
reuse_rx:
bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
goto next_rx;
@@ -821,13 +865,14 @@ reuse_rx:
skb->protocol = eth_type_trans(skb, bp->dev);
/* Set Toeplitz hash for a none-LRO skb */
- skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
+ skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
+ skb->l4_rxhash = l4_rxhash;
skb_checksum_none_assert(skb);
if (bp->dev->features & NETIF_F_RXCSUM)
- bnx2x_csum_validate(skb, cqe, fp);
-
+ bnx2x_csum_validate(skb, cqe, fp,
+ bnx2x_fp_qstats(bp, fp));
skb_record_rx_queue(skb, fp->rx_queue);
@@ -888,7 +933,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
prefetch(fp->rx_cons_sb);
for_each_cos_in_tx_queue(fp, cos)
- prefetch(fp->txdata[cos].tx_cons_sb);
+ prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
prefetch(&fp->sb_running_index[SM_RX_ID]);
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
@@ -1205,7 +1250,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
for_each_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
unsigned pkts_compl = 0, bytes_compl = 0;
u16 sw_prod = txdata->tx_pkt_prod;
@@ -1217,7 +1262,8 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
sw_cons++;
}
netdev_tx_reset_queue(
- netdev_get_tx_queue(bp->dev, txdata->txq_index));
+ netdev_get_tx_queue(bp->dev,
+ txdata->txq_index));
}
}
}
@@ -1325,7 +1371,7 @@ void bnx2x_free_irq(struct bnx2x *bp)
free_irq(bp->dev->irq, bp->dev);
}
-int __devinit bnx2x_enable_msix(struct bnx2x *bp)
+int bnx2x_enable_msix(struct bnx2x *bp)
{
int msix_vec = 0, i, rc, req_cnt;
@@ -1579,6 +1625,8 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
#endif
/* Add special queues */
bp->num_queues += NON_ETH_CONTEXT_USE;
+
+ BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
}
/**
@@ -1607,8 +1655,8 @@ static int bnx2x_set_real_num_queues(struct bnx2x *bp)
{
int rc, tx, rx;
- tx = MAX_TXQS_PER_COS * bp->max_cos;
- rx = BNX2X_NUM_ETH_QUEUES(bp);
+ tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
+ rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE;
/* account for fcoe queue */
#ifdef BCM_CNIC
@@ -1666,14 +1714,13 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
static int bnx2x_init_rss_pf(struct bnx2x *bp)
{
int i;
- u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
/* Prepare the initial contents fo the indirection table if RSS is
* enabled
*/
- for (i = 0; i < sizeof(ind_table); i++)
- ind_table[i] =
+ for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
+ bp->rss_conf_obj.ind_table[i] =
bp->fp->cl_id +
ethtool_rxfh_indir_default(i, num_eth_queues);
@@ -1685,12 +1732,11 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp)
* For 57712 and newer on the other hand it's a per-function
* configuration.
*/
- return bnx2x_config_rss_eth(bp, ind_table,
- bp->port.pmf || !CHIP_IS_E1x(bp));
+ return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
}
int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
- u8 *ind_table, bool config_hash)
+ bool config_hash)
{
struct bnx2x_config_rss_params params = {NULL};
int i;
@@ -1713,11 +1759,15 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
__set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
__set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
__set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
+ if (rss_obj->udp_rss_v4)
+ __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
+ if (rss_obj->udp_rss_v6)
+ __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
/* Hash bits */
params.rss_result_mask = MULTI_MASK;
- memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
+ memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
if (config_hash) {
/* RSS keys */
@@ -1754,7 +1804,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
int rc;
unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
struct bnx2x_mcast_ramrod_params rparam = {NULL};
- struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+ struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
/***************** Cleanup MACs' object first *************************/
@@ -1765,7 +1815,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
/* Clean ETH primary MAC */
__set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
- rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
+ rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
&ramrod_flags);
if (rc != 0)
BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
@@ -1851,11 +1901,16 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
static void bnx2x_bz_fp(struct bnx2x *bp, int index)
{
struct bnx2x_fastpath *fp = &bp->fp[index];
+ struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
+
+ int cos;
struct napi_struct orig_napi = fp->napi;
+ struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
/* bzero bnx2x_fastpath contents */
- if (bp->stats_init)
+ if (bp->stats_init) {
+ memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
memset(fp, 0, sizeof(*fp));
- else {
+ } else {
/* Keep Queue statistics */
struct bnx2x_eth_q_stats *tmp_eth_q_stats;
struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
@@ -1863,26 +1918,27 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
GFP_KERNEL);
if (tmp_eth_q_stats)
- memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
+ memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
sizeof(struct bnx2x_eth_q_stats));
tmp_eth_q_stats_old =
kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
GFP_KERNEL);
if (tmp_eth_q_stats_old)
- memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
+ memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
sizeof(struct bnx2x_eth_q_stats_old));
+ memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
memset(fp, 0, sizeof(*fp));
if (tmp_eth_q_stats) {
- memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
- sizeof(struct bnx2x_eth_q_stats));
+ memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
+ sizeof(struct bnx2x_eth_q_stats));
kfree(tmp_eth_q_stats);
}
if (tmp_eth_q_stats_old) {
- memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
+ memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
sizeof(struct bnx2x_eth_q_stats_old));
kfree(tmp_eth_q_stats_old);
}
@@ -1891,7 +1947,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
/* Restore the NAPI object as it has been already initialized */
fp->napi = orig_napi;
-
+ fp->tpa_info = orig_tpa_info;
fp->bp = bp;
fp->index = index;
if (IS_ETH_FP(fp))
@@ -1900,6 +1956,16 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
/* Special queues support only one CoS */
fp->max_cos = 1;
+ /* Init txdata pointers */
+#ifdef BCM_CNIC
+ if (IS_FCOE_FP(fp))
+ fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
+#endif
+ if (IS_ETH_FP(fp))
+ for_each_cos_in_tx_queue(fp, cos)
+ fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
+ BNX2X_NUM_ETH_QUEUES(bp) + index];
+
/*
* set the tpa flag for each queue. The tpa flag determines the queue
* minimal size so it must be set prior to queue memory allocation
@@ -1949,11 +2015,13 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/*
* Zero fastpath structures preserving invariants like napi, which are
* allocated only once, fp index, max_cos, bp pointer.
- * Also set fp->disable_tpa.
+ * Also set fp->disable_tpa and txdata_ptr.
*/
DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
for_each_queue(bp, i)
bnx2x_bz_fp(bp, i);
+ memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size *
+ sizeof(struct bnx2x_fp_txdata));
/* Set the receive queues buffer size */
@@ -2176,6 +2244,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
break;
case LOAD_DIAG:
+ case LOAD_LOOPBACK_EXT:
bp->state = BNX2X_STATE_DIAG;
break;
@@ -2195,6 +2264,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* re-read iscsi info */
bnx2x_get_iscsi_info(bp);
bnx2x_setup_cnic_irq_info(bp);
+ bnx2x_setup_cnic_info(bp);
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
#endif
@@ -2215,7 +2285,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
return -EBUSY;
}
- bnx2x_dcbx_init(bp);
+ /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
+ if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
+ bnx2x_dcbx_init(bp, false);
+
return 0;
#ifndef BNX2X_STOP_ON_ERROR
@@ -2298,6 +2371,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
/* Stop Tx */
bnx2x_tx_disable(bp);
+ netdev_reset_tc(bp->dev);
#ifdef BCM_CNIC
bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
@@ -2456,8 +2530,8 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
#endif
for_each_cos_in_tx_queue(fp, cos)
- if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
- bnx2x_tx_int(bp, &fp->txdata[cos]);
+ if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
+ bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
if (bnx2x_has_rx_work(fp)) {
@@ -2834,7 +2908,6 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- struct bnx2x_fastpath *fp;
struct netdev_queue *txq;
struct bnx2x_fp_txdata *txdata;
struct sw_tx_bd *tx_buf;
@@ -2844,7 +2917,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
u32 pbd_e2_parsing_data = 0;
u16 pkt_prod, bd_prod;
- int nbd, txq_index, fp_index, txdata_index;
+ int nbd, txq_index;
dma_addr_t mapping;
u32 xmit_type = bnx2x_xmit_type(bp, skb);
int i;
@@ -2863,39 +2936,22 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
- /* decode the fastpath index and the cos index from the txq */
- fp_index = TXQ_TO_FP(txq_index);
- txdata_index = TXQ_TO_COS(txq_index);
-
-#ifdef BCM_CNIC
- /*
- * Override the above for the FCoE queue:
- * - FCoE fp entry is right after the ETH entries.
- * - FCoE L2 queue uses bp->txdata[0] only.
- */
- if (unlikely(!NO_FCOE(bp) && (txq_index ==
- bnx2x_fcoe_tx(bp, txq_index)))) {
- fp_index = FCOE_IDX;
- txdata_index = 0;
- }
-#endif
+ txdata = &bp->bnx2x_txq[txq_index];
/* enable this debug print to view the transmission queue being used
DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
txq_index, fp_index, txdata_index); */
- /* locate the fastpath and the txdata */
- fp = &bp->fp[fp_index];
- txdata = &fp->txdata[txdata_index];
-
/* enable this debug print to view the tranmission details
DP(NETIF_MSG_TX_QUEUED,
"transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
txdata->cid, fp_index, txdata_index, txdata, fp); */
if (unlikely(bnx2x_tx_avail(bp, txdata) <
- (skb_shinfo(skb)->nr_frags + 3))) {
- fp->eth_q_stats.driver_xoff++;
+ skb_shinfo(skb)->nr_frags +
+ BDS_PER_TX_PKT +
+ NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
+ bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
netif_tx_stop_queue(txq);
BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
return NETDEV_TX_BUSY;
@@ -3169,7 +3225,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
txdata->tx_bd_prod += nbd;
- if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
+ if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
netif_tx_stop_queue(txq);
/* paired memory barrier is in bnx2x_tx_int(), we have to keep
@@ -3177,8 +3233,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
* fp->bd_tx_cons */
smp_mb();
- fp->eth_q_stats.driver_xoff++;
- if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
+ bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
+ if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
netif_tx_wake_queue(txq);
}
txdata->tx_pkt++;
@@ -3243,7 +3299,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
/* configure traffic class to transmission queue mapping */
for (cos = 0; cos < bp->max_cos; cos++) {
count = BNX2X_NUM_ETH_QUEUES(bp);
- offset = cos * MAX_TXQS_PER_COS;
+ offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
netdev_set_tc_queue(dev, cos, count, offset);
DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
"mapping tc %d to offset %d count %d\n",
@@ -3342,7 +3398,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
if (!skip_tx_queue(bp, fp_index)) {
/* fastpath tx rings: tx_buf tx_desc */
for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
DP(NETIF_MSG_IFDOWN,
"freeing tx memory of fp %d cos %d cid %d\n",
@@ -3414,7 +3470,7 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
cqe_ring_prod);
fp->rx_pkt = fp->rx_calls = 0;
- fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
+ bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
return i - failure_cnt;
}
@@ -3499,7 +3555,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
if (!skip_tx_queue(bp, index)) {
/* fastpath tx rings: tx_buf tx_desc */
for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
DP(NETIF_MSG_IFUP,
"allocating tx memory of fp %d cos %d\n",
@@ -3582,7 +3638,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
#ifdef BCM_CNIC
if (!NO_FCOE(bp))
/* FCoE */
- if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
+ if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
/* we will fail load process instead of mark
* NO_FCOE_FLAG
*/
@@ -3607,7 +3663,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
*/
/* move FCoE fp even NO_FCOE_FLAG is on */
- bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
+ bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
#endif
bp->num_queues -= delta;
BNX2X_ERR("Adjusted num of queues from %d to %d\n",
@@ -3619,7 +3675,11 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
void bnx2x_free_mem_bp(struct bnx2x *bp)
{
+ kfree(bp->fp->tpa_info);
kfree(bp->fp);
+ kfree(bp->sp_objs);
+ kfree(bp->fp_stats);
+ kfree(bp->bnx2x_txq);
kfree(bp->msix_table);
kfree(bp->ilt);
}
@@ -3630,6 +3690,8 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
struct msix_entry *tbl;
struct bnx2x_ilt *ilt;
int msix_table_size = 0;
+ int fp_array_size;
+ int i;
/*
* The biggest MSI-X table we might need is as a maximum number of fast
@@ -3638,12 +3700,44 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
msix_table_size = bp->igu_sb_cnt + 1;
/* fp array: RSS plus CNIC related L2 queues */
- fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
- sizeof(*fp), GFP_KERNEL);
+ fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE;
+ BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
+
+ fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
if (!fp)
goto alloc_err;
+ for (i = 0; i < fp_array_size; i++) {
+ fp[i].tpa_info =
+ kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
+ sizeof(struct bnx2x_agg_info), GFP_KERNEL);
+ if (!(fp[i].tpa_info))
+ goto alloc_err;
+ }
+
bp->fp = fp;
+ /* allocate sp objs */
+ bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
+ GFP_KERNEL);
+ if (!bp->sp_objs)
+ goto alloc_err;
+
+ /* allocate fp_stats */
+ bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
+ GFP_KERNEL);
+ if (!bp->fp_stats)
+ goto alloc_err;
+
+ /* Allocate memory for the transmission queues array */
+ bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS;
+#ifdef BCM_CNIC
+ bp->bnx2x_txq_size++;
+#endif
+ bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size,
+ sizeof(struct bnx2x_fp_txdata), GFP_KERNEL);
+ if (!bp->bnx2x_txq)
+ goto alloc_err;
+
/* msix table */
tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
if (!tbl)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 7cd99b75347a..dfa757e74296 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -29,6 +29,7 @@
extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
extern int num_queues;
+extern int int_mode;
/************************ Macros ********************************/
#define BNX2X_PCI_FREE(x, y, size) \
@@ -89,12 +90,12 @@ void bnx2x_send_unload_done(struct bnx2x *bp);
* bnx2x_config_rss_pf - configure RSS parameters in a PF.
*
* @bp: driver handle
- * @rss_obj RSS object to use
+ * @rss_obj: RSS object to use
* @ind_table: indirection table to configure
* @config_hash: re-configure RSS hash keys configuration
*/
int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
- u8 *ind_table, bool config_hash);
+ bool config_hash);
/**
* bnx2x__init_func_obj - init function object
@@ -244,6 +245,14 @@ int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
* @bp: driver handle
*/
void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
+
+/**
+ * bnx2x_setup_cnic_info - provides cnic with updated info
+ *
+ * @bp: driver handle
+ */
+void bnx2x_setup_cnic_info(struct bnx2x *bp);
+
#endif
/**
@@ -409,7 +418,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp);
*
* @bp: driver handle
*/
-void bnx2x_dcbx_init(struct bnx2x *bp);
+void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem);
/**
* bnx2x_set_power_state - set power state to the requested value.
@@ -487,7 +496,7 @@ void bnx2x_netif_start(struct bnx2x *bp);
* fills msix_table, requests vectors, updates num_queues
* according to number of available vectors.
*/
-int __devinit bnx2x_enable_msix(struct bnx2x *bp);
+int bnx2x_enable_msix(struct bnx2x *bp);
/**
* bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
@@ -728,7 +737,7 @@ static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
{
u8 cos;
for_each_cos_in_tx_queue(fp, cos)
- if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
+ if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
return true;
return false;
}
@@ -780,8 +789,10 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
{
int i;
+ bp->num_napi_queues = bp->num_queues;
+
/* Add NAPI objects */
- for_each_rx_queue(bp, i)
+ for_each_napi_rx_queue(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
bnx2x_poll, BNX2X_NAPI_WEIGHT);
}
@@ -790,10 +801,12 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
{
int i;
- for_each_rx_queue(bp, i)
+ for_each_napi_rx_queue(bp, i)
netif_napi_del(&bnx2x_fp(bp, i, napi));
}
+void bnx2x_set_int_mode(struct bnx2x *bp);
+
static inline void bnx2x_disable_msi(struct bnx2x *bp)
{
if (bp->flags & USING_MSIX_FLAG) {
@@ -809,7 +822,8 @@ static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
{
return num_queues ?
min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
- min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp));
+ min_t(int, netif_get_num_default_rss_queues(),
+ BNX2X_MAX_QUEUES(bp));
}
static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
@@ -865,11 +879,9 @@ static inline int func_by_vn(struct bnx2x *bp, int vn)
return 2 * vn + BP_PORT(bp);
}
-static inline int bnx2x_config_rss_eth(struct bnx2x *bp, u8 *ind_table,
- bool config_hash)
+static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash)
{
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, ind_table,
- config_hash);
+ return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash);
}
/**
@@ -975,8 +987,8 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
struct bnx2x *bp = fp->bp;
/* Configure classification DBs */
- bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid,
- BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
+ bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id,
+ fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
bnx2x_sp_mapping(bp, mac_rdata),
BNX2X_FILTER_MAC_PENDING,
&bp->sp_state, obj_type,
@@ -1068,12 +1080,14 @@ static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
}
static inline void bnx2x_init_txdata(struct bnx2x *bp,
- struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index,
- __le16 *tx_cons_sb)
+ struct bnx2x_fp_txdata *txdata, u32 cid,
+ int txq_index, __le16 *tx_cons_sb,
+ struct bnx2x_fastpath *fp)
{
txdata->cid = cid;
txdata->txq_index = txq_index;
txdata->tx_cons_sb = tx_cons_sb;
+ txdata->parent_fp = fp;
DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n",
txdata->cid, txdata->txq_index);
@@ -1107,18 +1121,13 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
BNX2X_FCOE_ETH_CL_ID_IDX);
- /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
- * 16 ETH clients per function when CNIC is enabled!
- *
- * Fix it ASAP!!!
- */
- bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
+ bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
-
- bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]),
- fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX);
+ bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
+ fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
+ fp);
DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
@@ -1135,8 +1144,8 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
/* No multi-CoS for FCoE L2 client */
BUG_ON(fp->max_cos != 1);
- bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1,
- BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
+ &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
bnx2x_sp_mapping(bp, q_rdata), q_type);
DP(NETIF_MSG_IFUP,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 4f9244bd7530..8a73374e52a7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -972,23 +972,26 @@ void bnx2x_dcbx_init_params(struct bnx2x *bp)
bp->dcbx_config_params.admin_default_priority = 0;
}
-void bnx2x_dcbx_init(struct bnx2x *bp)
+void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem)
{
u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE;
+ /* only PMF can send ADMIN msg to MFW in old MFW versions */
+ if ((!bp->port.pmf) && (!(bp->flags & BC_SUPPORTS_DCBX_MSG_NON_PMF)))
+ return;
+
if (bp->dcbx_enabled <= 0)
return;
/* validate:
* chip of good for dcbx version,
* dcb is wanted
- * the function is pmf
* shmem2 contains DCBX support fields
*/
DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n",
bp->dcb_state, bp->port.pmf);
- if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&
+ if (bp->dcb_state == BNX2X_DCB_STATE_ON &&
SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
dcbx_lldp_params_offset =
SHMEM2_RD(bp, dcbx_lldp_params_offset);
@@ -999,12 +1002,23 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
- bnx2x_dcbx_admin_mib_updated_params(bp,
- dcbx_lldp_params_offset);
+ /* need HW lock to avoid scenario of two drivers
+ * writing in parallel to shmem
+ */
+ bnx2x_acquire_hw_lock(bp,
+ HW_LOCK_RESOURCE_DCBX_ADMIN_MIB);
+ if (update_shmem)
+ bnx2x_dcbx_admin_mib_updated_params(bp,
+ dcbx_lldp_params_offset);
/* Let HW start negotiation */
bnx2x_fw_command(bp,
DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
+ /* release HW lock only after MFW acks that it finished
+ * reading values from shmem
+ */
+ bnx2x_release_hw_lock(bp,
+ HW_LOCK_RESOURCE_DCBX_ADMIN_MIB);
}
}
}
@@ -2063,10 +2077,8 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
"Handling parity error recovery. Try again later\n");
return 1;
}
- if (netif_running(bp->dev)) {
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
- rc = bnx2x_nic_load(bp, LOAD_NORMAL);
- }
+ if (netif_running(bp->dev))
+ bnx2x_dcbx_init(bp, true);
DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc);
if (rc)
return 1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index ddc18ee5c5ae..fc4e0e3885b0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -177,6 +177,8 @@ static const struct {
4, STATS_FLAGS_FUNC, "recoverable_errors" },
{ STATS_OFFSET32(unrecoverable_error),
4, STATS_FLAGS_FUNC, "unrecoverable_errors" },
+ { STATS_OFFSET32(eee_tx_lpi),
+ 4, STATS_FLAGS_PORT, "Tx LPI entry count"}
};
#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
@@ -185,7 +187,8 @@ static int bnx2x_get_port_type(struct bnx2x *bp)
int port_type;
u32 phy_idx = bnx2x_get_cur_phy_idx(bp);
switch (bp->link_params.phy[phy_idx].media_type) {
- case ETH_PHY_SFP_FIBER:
+ case ETH_PHY_SFPP_10G_FIBER:
+ case ETH_PHY_SFP_1G_FIBER:
case ETH_PHY_XFP_FIBER:
case ETH_PHY_KR:
case ETH_PHY_CX4:
@@ -218,6 +221,11 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
(bp->port.supported[cfg_idx ^ 1] &
(SUPPORTED_TP | SUPPORTED_FIBRE));
cmd->advertising = bp->port.advertising[cfg_idx];
+ if (bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type ==
+ ETH_PHY_SFP_1G_FIBER) {
+ cmd->supported &= ~(SUPPORTED_10000baseT_Full);
+ cmd->advertising &= ~(ADVERTISED_10000baseT_Full);
+ }
if ((bp->state == BNX2X_STATE_OPEN) && (bp->link_vars.link_up)) {
if (!(bp->flags & MF_FUNC_DIS)) {
@@ -293,7 +301,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
- u32 speed;
+ u32 speed, phy_idx;
if (IS_MF_SD(bp))
return 0;
@@ -548,9 +556,11 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
"10G half not supported\n");
return -EINVAL;
}
-
+ phy_idx = bnx2x_get_cur_phy_idx(bp);
if (!(bp->port.supported[cfg_idx]
- & SUPPORTED_10000baseT_Full)) {
+ & SUPPORTED_10000baseT_Full) ||
+ (bp->link_params.phy[phy_idx].media_type ==
+ ETH_PHY_SFP_1G_FIBER)) {
DP(BNX2X_MSG_ETHTOOL,
"10G full not supported\n");
return -EINVAL;
@@ -824,7 +834,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = BNX2X_NUM_STATS;
- info->testinfo_len = BNX2X_NUM_TESTS;
+ info->testinfo_len = BNX2X_NUM_TESTS(bp);
info->eedump_len = bp->common.flash_size;
info->regdump_len = bnx2x_get_regs_len(dev);
}
@@ -1150,6 +1160,65 @@ static int bnx2x_get_eeprom(struct net_device *dev,
return rc;
}
+static int bnx2x_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc = 0, phy_idx;
+ u8 *user_data = data;
+ int remaining_len = ee->len, xfer_size;
+ unsigned int page_off = ee->offset;
+
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
+ return -EAGAIN;
+ }
+
+ phy_idx = bnx2x_get_cur_phy_idx(bp);
+ bnx2x_acquire_phy_lock(bp);
+ while (!rc && remaining_len > 0) {
+ xfer_size = (remaining_len > SFP_EEPROM_PAGE_SIZE) ?
+ SFP_EEPROM_PAGE_SIZE : remaining_len;
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ page_off,
+ xfer_size,
+ user_data);
+ remaining_len -= xfer_size;
+ user_data += xfer_size;
+ page_off += xfer_size;
+ }
+
+ bnx2x_release_phy_lock(bp);
+ return rc;
+}
+
+static int bnx2x_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int phy_idx;
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
+ return -EAGAIN;
+ }
+
+ phy_idx = bnx2x_get_cur_phy_idx(bp);
+ switch (bp->link_params.phy[phy_idx].media_type) {
+ case ETH_PHY_SFPP_10G_FIBER:
+ case ETH_PHY_SFP_1G_FIBER:
+ case ETH_PHY_DA_TWINAX:
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
u32 cmd_flags)
{
@@ -1531,18 +1600,146 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
return 0;
}
-static const struct {
- char string[ETH_GSTRING_LEN];
-} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
- { "register_test (offline)" },
- { "memory_test (offline)" },
- { "loopback_test (offline)" },
- { "nvram_test (online)" },
- { "interrupt_test (online)" },
- { "link_test (online)" },
- { "idle check (online)" }
+static char *bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF] = {
+ "register_test (offline) ",
+ "memory_test (offline) ",
+ "int_loopback_test (offline)",
+ "ext_loopback_test (offline)",
+ "nvram_test (online) ",
+ "interrupt_test (online) ",
+ "link_test (online) "
};
+static u32 bnx2x_eee_to_adv(u32 eee_adv)
+{
+ u32 modes = 0;
+
+ if (eee_adv & SHMEM_EEE_100M_ADV)
+ modes |= ADVERTISED_100baseT_Full;
+ if (eee_adv & SHMEM_EEE_1G_ADV)
+ modes |= ADVERTISED_1000baseT_Full;
+ if (eee_adv & SHMEM_EEE_10G_ADV)
+ modes |= ADVERTISED_10000baseT_Full;
+
+ return modes;
+}
+
+static u32 bnx2x_adv_to_eee(u32 modes, u32 shift)
+{
+ u32 eee_adv = 0;
+ if (modes & ADVERTISED_100baseT_Full)
+ eee_adv |= SHMEM_EEE_100M_ADV;
+ if (modes & ADVERTISED_1000baseT_Full)
+ eee_adv |= SHMEM_EEE_1G_ADV;
+ if (modes & ADVERTISED_10000baseT_Full)
+ eee_adv |= SHMEM_EEE_10G_ADV;
+
+ return eee_adv << shift;
+}
+
+static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 eee_cfg;
+
+ if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
+ DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
+ return -EOPNOTSUPP;
+ }
+
+ eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
+
+ edata->supported =
+ bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
+ SHMEM_EEE_SUPPORTED_SHIFT);
+
+ edata->advertised =
+ bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
+ SHMEM_EEE_ADV_STATUS_SHIFT);
+ edata->lp_advertised =
+ bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
+ SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+
+ /* SHMEM value is in 16u units --> Convert to 1u units. */
+ edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4;
+
+ edata->eee_enabled = (eee_cfg & SHMEM_EEE_REQUESTED_BIT) ? 1 : 0;
+ edata->eee_active = (eee_cfg & SHMEM_EEE_ACTIVE_BIT) ? 1 : 0;
+ edata->tx_lpi_enabled = (eee_cfg & SHMEM_EEE_LPI_REQUESTED_BIT) ? 1 : 0;
+
+ return 0;
+}
+
+static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 eee_cfg;
+ u32 advertised;
+
+ if (IS_MF(bp))
+ return 0;
+
+ if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
+ DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
+ return -EOPNOTSUPP;
+ }
+
+ eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
+
+ if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) {
+ DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n");
+ return -EOPNOTSUPP;
+ }
+
+ advertised = bnx2x_adv_to_eee(edata->advertised,
+ SHMEM_EEE_ADV_STATUS_SHIFT);
+ if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Direct manipulation of EEE advertisment is not supported\n");
+ return -EINVAL;
+ }
+
+ if (edata->tx_lpi_timer > EEE_MODE_TIMER_MASK) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Maximal Tx Lpi timer supported is %x(u)\n",
+ EEE_MODE_TIMER_MASK);
+ return -EINVAL;
+ }
+ if (edata->tx_lpi_enabled &&
+ (edata->tx_lpi_timer < EEE_MODE_NVRAM_AGGRESSIVE_TIME)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Minimal Tx Lpi timer supported is %d(u)\n",
+ EEE_MODE_NVRAM_AGGRESSIVE_TIME);
+ return -EINVAL;
+ }
+
+ /* All is well; Apply changes*/
+ if (edata->eee_enabled)
+ bp->link_params.eee_mode |= EEE_MODE_ADV_LPI;
+ else
+ bp->link_params.eee_mode &= ~EEE_MODE_ADV_LPI;
+
+ if (edata->tx_lpi_enabled)
+ bp->link_params.eee_mode |= EEE_MODE_ENABLE_LPI;
+ else
+ bp->link_params.eee_mode &= ~EEE_MODE_ENABLE_LPI;
+
+ bp->link_params.eee_mode &= ~EEE_MODE_TIMER_MASK;
+ bp->link_params.eee_mode |= (edata->tx_lpi_timer &
+ EEE_MODE_TIMER_MASK) |
+ EEE_MODE_OVERRIDE_NVRAM |
+ EEE_MODE_OUTPUT_TIME;
+
+ /* Restart link to propogate changes */
+ if (netif_running(dev)) {
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_link_set(bp);
+ }
+
+ return 0;
+}
+
+
enum {
BNX2X_CHIP_E1_OFST = 0,
BNX2X_CHIP_E1H_OFST,
@@ -1811,6 +2008,14 @@ static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n");
+
+ cnt = 1400;
+ while (!bp->link_vars.link_up && cnt--)
+ msleep(20);
+
+ if (cnt <= 0 && !bp->link_vars.link_up)
+ DP(BNX2X_MSG_ETHTOOL,
+ "Timeout waiting for link init\n");
}
}
@@ -1821,7 +2026,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
unsigned char *packet;
struct bnx2x_fastpath *fp_rx = &bp->fp[0];
struct bnx2x_fastpath *fp_tx = &bp->fp[0];
- struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0];
+ struct bnx2x_fp_txdata *txdata = fp_tx->txdata_ptr[0];
u16 tx_start_idx, tx_idx;
u16 rx_start_idx, rx_idx;
u16 pkt_prod, bd_prod;
@@ -1836,13 +2041,16 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
u16 len;
int rc = -ENODEV;
u8 *data;
- struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
+ struct netdev_queue *txq = netdev_get_tx_queue(bp->dev,
+ txdata->txq_index);
/* check the loopback mode */
switch (loopback_mode) {
case BNX2X_PHY_LOOPBACK:
- if (bp->link_params.loopback_mode != LOOPBACK_XGXS)
+ if (bp->link_params.loopback_mode != LOOPBACK_XGXS) {
+ DP(BNX2X_MSG_ETHTOOL, "PHY loopback not supported\n");
return -EINVAL;
+ }
break;
case BNX2X_MAC_LOOPBACK:
if (CHIP_IS_E3(bp)) {
@@ -1859,6 +2067,13 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
break;
+ case BNX2X_EXT_LOOPBACK:
+ if (bp->link_params.loopback_mode != LOOPBACK_EXT) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't configure external loopback\n");
+ return -EINVAL;
+ }
+ break;
default:
DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EINVAL;
@@ -2030,6 +2245,38 @@ static int bnx2x_test_loopback(struct bnx2x *bp)
return rc;
}
+static int bnx2x_test_ext_loopback(struct bnx2x *bp)
+{
+ int rc;
+ u8 is_serdes =
+ (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
+
+ if (BP_NOMCP(bp))
+ return -ENODEV;
+
+ if (!netif_running(bp->dev))
+ return BNX2X_EXT_LOOPBACK_FAILED;
+
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test, nic_load (for external lb) failed\n");
+ return -ENODEV;
+ }
+ bnx2x_wait_for_link(bp, 1, is_serdes);
+
+ bnx2x_netif_stop(bp, 1);
+
+ rc = bnx2x_run_loopback(bp, BNX2X_EXT_LOOPBACK);
+ if (rc)
+ DP(BNX2X_MSG_ETHTOOL, "EXT loopback failed (res %d)\n", rc);
+
+ bnx2x_netif_start(bp);
+
+ return rc;
+}
+
#define CRC32_RESIDUAL 0xdebb20e3
static int bnx2x_test_nvram(struct bnx2x *bp)
@@ -2112,7 +2359,7 @@ static int bnx2x_test_intr(struct bnx2x *bp)
return -ENODEV;
}
- params.q_obj = &bp->fp->q_obj;
+ params.q_obj = &bp->sp_objs->q_obj;
params.cmd = BNX2X_Q_CMD_EMPTY;
__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
@@ -2125,24 +2372,31 @@ static void bnx2x_self_test(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
u8 is_serdes;
+ int rc;
+
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
netdev_err(bp->dev,
"Handling parity error recovery. Try again later\n");
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
+ DP(BNX2X_MSG_ETHTOOL,
+ "Self-test command parameters: offline = %d, external_lb = %d\n",
+ (etest->flags & ETH_TEST_FL_OFFLINE),
+ (etest->flags & ETH_TEST_FL_EXTERNAL_LB)>>2);
- memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
+ memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
- if (!netif_running(dev))
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test when interface is down\n");
return;
+ }
- /* offline tests are not supported in MF mode */
- if (IS_MF(bp))
- etest->flags &= ~ETH_TEST_FL_OFFLINE;
is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
- if (etest->flags & ETH_TEST_FL_OFFLINE) {
+ /* offline tests are not supported in MF mode */
+ if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) {
int port = BP_PORT(bp);
u32 val;
u8 link_up;
@@ -2155,7 +2409,14 @@ static void bnx2x_self_test(struct net_device *dev,
link_up = bp->link_vars.link_up;
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
- bnx2x_nic_load(bp, LOAD_DIAG);
+ rc = bnx2x_nic_load(bp, LOAD_DIAG);
+ if (rc) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test, nic_load (for offline) failed\n");
+ return;
+ }
+
/* wait until link state is restored */
bnx2x_wait_for_link(bp, 1, is_serdes);
@@ -2168,30 +2429,51 @@ static void bnx2x_self_test(struct net_device *dev,
etest->flags |= ETH_TEST_FL_FAILED;
}
- buf[2] = bnx2x_test_loopback(bp);
+ buf[2] = bnx2x_test_loopback(bp); /* internal LB */
if (buf[2] != 0)
etest->flags |= ETH_TEST_FL_FAILED;
+ if (etest->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ buf[3] = bnx2x_test_ext_loopback(bp); /* external LB */
+ if (buf[3] != 0)
+ etest->flags |= ETH_TEST_FL_FAILED;
+ etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+ }
+
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
/* restore input for TX port IF */
REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
-
- bnx2x_nic_load(bp, LOAD_NORMAL);
+ rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+ if (rc) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test, nic_load (for online) failed\n");
+ return;
+ }
/* wait until link state is restored */
bnx2x_wait_for_link(bp, link_up, is_serdes);
}
if (bnx2x_test_nvram(bp) != 0) {
- buf[3] = 1;
+ if (!IS_MF(bp))
+ buf[4] = 1;
+ else
+ buf[0] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
if (bnx2x_test_intr(bp) != 0) {
- buf[4] = 1;
+ if (!IS_MF(bp))
+ buf[5] = 1;
+ else
+ buf[1] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
if (bnx2x_link_test(bp, is_serdes) != 0) {
- buf[5] = 1;
+ if (!IS_MF(bp))
+ buf[6] = 1;
+ else
+ buf[2] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
@@ -2236,7 +2518,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
return num_stats;
case ETH_SS_TEST:
- return BNX2X_NUM_TESTS;
+ return BNX2X_NUM_TESTS(bp);
default:
return -EINVAL;
@@ -2246,7 +2528,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
- int i, j, k;
+ int i, j, k, offset, start;
char queue_name[MAX_QUEUE_NAME_LEN+1];
switch (stringset) {
@@ -2277,7 +2559,17 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
break;
case ETH_SS_TEST:
- memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
+ /* First 4 tests cannot be done in MF mode */
+ if (!IS_MF(bp))
+ start = 0;
+ else
+ start = 4;
+ for (i = 0, j = start; j < (start + BNX2X_NUM_TESTS(bp));
+ i++, j++) {
+ offset = sprintf(buf+32*i, "%s",
+ bnx2x_tests_str_arr[j]);
+ *(buf+offset) = '\0';
+ }
break;
}
}
@@ -2291,7 +2583,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
if (is_multi(bp)) {
for_each_eth_queue(bp, i) {
- hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
+ hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats;
for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
if (bnx2x_q_stats_arr[j].size == 0) {
/* skip this counter */
@@ -2375,6 +2667,41 @@ static int bnx2x_set_phys_id(struct net_device *dev,
return 0;
}
+static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+{
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ if (bp->rss_conf_obj.udp_rss_v4)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V6_FLOW:
+ if (bp->rss_conf_obj.udp_rss_v6)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ info->data = 0;
+ break;
+ }
+
+ return 0;
+}
+
static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rules __always_unused)
{
@@ -2384,7 +2711,102 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
case ETHTOOL_GRXRINGS:
info->data = BNX2X_NUM_ETH_QUEUES(bp);
return 0;
+ case ETHTOOL_GRXFH:
+ return bnx2x_get_rss_flags(bp, info);
+ default:
+ DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+{
+ int udp_rss_requested;
+
+ DP(BNX2X_MSG_ETHTOOL,
+ "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ info->flow_type, info->data);
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ /* For TCP only 4-tupple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Command parameters not supported\n");
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ /* For UDP either 2-tupple hash or 4-tupple hash is supported */
+ if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ udp_rss_requested = 1;
+ else if (info->data == (RXH_IP_SRC | RXH_IP_DST))
+ udp_rss_requested = 0;
+ else
+ return -EINVAL;
+ if ((info->flow_type == UDP_V4_FLOW) &&
+ (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
+ bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
+ DP(BNX2X_MSG_ETHTOOL,
+ "rss re-configured, UDP 4-tupple %s\n",
+ udp_rss_requested ? "enabled" : "disabled");
+ return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
+ } else if ((info->flow_type == UDP_V6_FLOW) &&
+ (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
+ bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
+ return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
+ DP(BNX2X_MSG_ETHTOOL,
+ "rss re-configured, UDP 4-tupple %s\n",
+ udp_rss_requested ? "enabled" : "disabled");
+ } else {
+ return 0;
+ }
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ /* For IP only 2-tupple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Command parameters not supported\n");
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IP_USER_FLOW:
+ case ETHER_FLOW:
+ /* RSS is not supported for these protocols */
+ if (info->data) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Command parameters not supported\n");
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ return bnx2x_set_rss_flags(bp, info);
default:
DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EOPNOTSUPP;
@@ -2424,7 +2846,6 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
- u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
/*
@@ -2436,10 +2857,88 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
* align the received table to the Client ID of the leading RSS
* queue
*/
- ind_table[i] = indir[i] + bp->fp->cl_id;
+ bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
}
- return bnx2x_config_rss_eth(bp, ind_table, false);
+ return bnx2x_config_rss_eth(bp, false);
+}
+
+/**
+ * bnx2x_get_channels - gets the number of RSS queues.
+ *
+ * @dev: net device
+ * @channels: returns the number of max / current queues
+ */
+static void bnx2x_get_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ channels->max_combined = BNX2X_MAX_RSS_COUNT(bp);
+ channels->combined_count = BNX2X_NUM_ETH_QUEUES(bp);
+}
+
+/**
+ * bnx2x_change_num_queues - change the number of RSS queues.
+ *
+ * @bp: bnx2x private structure
+ *
+ * Re-configure interrupt mode to get the new number of MSI-X
+ * vectors and re-add NAPI objects.
+ */
+static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
+{
+ bnx2x_del_all_napi(bp);
+ bnx2x_disable_msi(bp);
+ BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
+ bnx2x_set_int_mode(bp);
+ bnx2x_add_all_napi(bp);
+}
+
+/**
+ * bnx2x_set_channels - sets the number of RSS queues.
+ *
+ * @dev: net device
+ * @channels: includes the number of queues requested
+ */
+static int bnx2x_set_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+
+ DP(BNX2X_MSG_ETHTOOL,
+ "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
+ channels->rx_count, channels->tx_count, channels->other_count,
+ channels->combined_count);
+
+ /* We don't support separate rx / tx channels.
+ * We don't allow setting 'other' channels.
+ */
+ if (channels->rx_count || channels->tx_count || channels->other_count
+ || (channels->combined_count == 0) ||
+ (channels->combined_count > BNX2X_MAX_RSS_COUNT(bp))) {
+ DP(BNX2X_MSG_ETHTOOL, "command parameters not supported\n");
+ return -EINVAL;
+ }
+
+ /* Check if there was a change in the active parameters */
+ if (channels->combined_count == BNX2X_NUM_ETH_QUEUES(bp)) {
+ DP(BNX2X_MSG_ETHTOOL, "No change in active parameters\n");
+ return 0;
+ }
+
+ /* Set the requested number of queues in bp context.
+ * Note that the actual number of queues created during load may be
+ * less than requested if memory is low.
+ */
+ if (unlikely(!netif_running(dev))) {
+ bnx2x_change_num_queues(bp, channels->combined_count);
+ return 0;
+ }
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_change_num_queues(bp, channels->combined_count);
+ return bnx2x_nic_load(bp, LOAD_NORMAL);
}
static const struct ethtool_ops bnx2x_ethtool_ops = {
@@ -2469,9 +2968,17 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.set_phys_id = bnx2x_set_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
.get_rxnfc = bnx2x_get_rxnfc,
+ .set_rxnfc = bnx2x_set_rxnfc,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh_indir = bnx2x_get_rxfh_indir,
.set_rxfh_indir = bnx2x_set_rxfh_indir,
+ .get_channels = bnx2x_get_channels,
+ .set_channels = bnx2x_set_channels,
+ .get_module_info = bnx2x_get_module_info,
+ .get_module_eeprom = bnx2x_get_module_eeprom,
+ .get_eee = bnx2x_get_eee,
+ .set_eee = bnx2x_set_eee,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void bnx2x_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 426f77aa721a..bbc66ced9c25 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -321,9 +321,7 @@
#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0
-/**
- * This file defines HSI constants common to all microcode flows
- */
+/* This file defines HSI constants common to all microcode flows */
#define PROTOCOL_STATE_BIT_OFFSET 6
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index a440a8ba85f2..76b6e65790f8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -10,6 +10,7 @@
#define BNX2X_HSI_H
#include "bnx2x_fw_defs.h"
+#include "bnx2x_mfw_req.h"
#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
@@ -33,12 +34,6 @@ struct license_key {
u32 reserved_b[4];
};
-
-#define PORT_0 0
-#define PORT_1 1
-#define PORT_MAX 2
-#define NVM_PATH_MAX 2
-
/****************************************************************************
* Shared HW configuration *
****************************************************************************/
@@ -1067,8 +1062,18 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
uses the same defines as link_config */
u32 mfw_wol_link_cfg2; /* 0x480 */
- u32 Reserved2[17]; /* 0x484 */
+ /* EEE power saving mode */
+ u32 eee_power_mode; /* 0x484 */
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_MASK 0x000000FF
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT 0
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED 0x00000001
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE 0x00000002
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY 0x00000003
+
+
+ u32 Reserved2[16]; /* 0x488 */
};
@@ -1140,6 +1145,7 @@ struct drv_port_mb {
u32 link_status;
/* Driver should update this field on any link change event */
+ #define LINK_STATUS_NONE (0<<0)
#define LINK_STATUS_LINK_FLAG_MASK 0x00000001
#define LINK_STATUS_LINK_UP 0x00000001
#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E
@@ -1197,6 +1203,7 @@ struct drv_port_mb {
#define LINK_STATUS_PFC_ENABLED 0x20000000
#define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000
+ #define LINK_STATUS_SFP_TX_FAULT 0x80000000
u32 port_stx;
@@ -1240,9 +1247,11 @@ struct drv_func_mb {
#define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002
#define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
#define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201
+ #define REQ_BC_VER_4_FCOE_FEATURES 0x00070209
#define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
#define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
+ #define REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF 0x00070401
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
@@ -1255,6 +1264,8 @@ struct drv_func_mb {
#define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000
#define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000
+ #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000
+
#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
#define REQ_BC_VER_4_SET_MF_BW 0x00060202
#define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
@@ -1320,6 +1331,8 @@ struct drv_func_mb {
#define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000
#define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000
+ #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000
+
#define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
#define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
@@ -1383,6 +1396,8 @@ struct drv_func_mb {
#define DRV_STATUS_DRV_INFO_REQ 0x04000000
+ #define DRV_STATUS_EEE_NEGOTIATION_RESULTS 0x08000000
+
u32 virt_mac_upper;
#define VIRT_MAC_SIGN_MASK 0xffff0000
#define VIRT_MAC_SIGNATURE 0x564d0000
@@ -1613,6 +1628,11 @@ struct fw_flr_mb {
struct fw_flr_ack ack;
};
+struct eee_remote_vals {
+ u32 tx_tw;
+ u32 rx_tw;
+};
+
/**** SUPPORT FOR SHMEM ARRRAYS ***
* The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to
* define arrays with storage types smaller then unsigned dwords.
@@ -2053,6 +2073,41 @@ struct shmem2_region {
#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00
#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8
u32 ibft_host_addr; /* initialized by option ROM */
+ struct eee_remote_vals eee_remote_vals[PORT_MAX];
+ u32 reserved[E2_FUNC_MAX];
+
+
+ /* the status of EEE auto-negotiation
+ * bits 15:0 the configured tx-lpi entry timer value. Depends on bit 31.
+ * bits 19:16 the supported modes for EEE.
+ * bits 23:20 the speeds advertised for EEE.
+ * bits 27:24 the speeds the Link partner advertised for EEE.
+ * The supported/adv. modes in bits 27:19 originate from the
+ * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed).
+ * bit 28 when 1'b1 EEE was requested.
+ * bit 29 when 1'b1 tx lpi was requested.
+ * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted iff
+ * 30:29 are 2'b11.
+ * bit 31 when 1'b0 bits 15:0 contain a PORT_FEAT_CFG_EEE_ define as
+ * value. When 1'b1 those bits contains a value times 16 microseconds.
+ */
+ u32 eee_status[PORT_MAX];
+ #define SHMEM_EEE_TIMER_MASK 0x0000ffff
+ #define SHMEM_EEE_SUPPORTED_MASK 0x000f0000
+ #define SHMEM_EEE_SUPPORTED_SHIFT 16
+ #define SHMEM_EEE_ADV_STATUS_MASK 0x00f00000
+ #define SHMEM_EEE_100M_ADV (1<<0)
+ #define SHMEM_EEE_1G_ADV (1<<1)
+ #define SHMEM_EEE_10G_ADV (1<<2)
+ #define SHMEM_EEE_ADV_STATUS_SHIFT 20
+ #define SHMEM_EEE_LP_ADV_STATUS_MASK 0x0f000000
+ #define SHMEM_EEE_LP_ADV_STATUS_SHIFT 24
+ #define SHMEM_EEE_REQUESTED_BIT 0x10000000
+ #define SHMEM_EEE_LPI_REQUESTED_BIT 0x20000000
+ #define SHMEM_EEE_ACTIVE_BIT 0x40000000
+ #define SHMEM_EEE_TIME_OUTPUT_BIT 0x80000000
+
+ u32 sizeof_port_stats;
};
@@ -2599,6 +2654,9 @@ struct host_port_stats {
u32 pfc_frames_tx_lo;
u32 pfc_frames_rx_hi;
u32 pfc_frames_rx_lo;
+
+ u32 eee_lpi_count_hi;
+ u32 eee_lpi_count_lo;
};
@@ -2638,118 +2696,6 @@ struct host_func_stats {
/* VIC definitions */
#define VICSTATST_UIF_INDEX 2
-/* current drv_info version */
-#define DRV_INFO_CUR_VER 1
-
-/* drv_info op codes supported */
-enum drv_info_opcode {
- ETH_STATS_OPCODE,
- FCOE_STATS_OPCODE,
- ISCSI_STATS_OPCODE
-};
-
-#define ETH_STAT_INFO_VERSION_LEN 12
-/* Per PCI Function Ethernet Statistics required from the driver */
-struct eth_stats_info {
- /* Function's Driver Version. padded to 12 */
- u8 version[ETH_STAT_INFO_VERSION_LEN];
- /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
- u8 mac_local[8];
- u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
- u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
- u32 mtu_size; /* MTU Size. Note : Negotiated MTU */
- u32 feature_flags; /* Feature_Flags. */
-#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
-#define FEATURE_ETH_LSO_MASK 0x02
-#define FEATURE_ETH_BOOTMODE_MASK 0x1C
-#define FEATURE_ETH_BOOTMODE_SHIFT 2
-#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
-#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
-#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
-#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
-#define FEATURE_ETH_TOE_MASK 0x20
- u32 lso_max_size; /* LSO MaxOffloadSize. */
- u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */
- /* Num Offloaded Connections TCP_IPv4. */
- u32 ipv4_ofld_cnt;
- /* Num Offloaded Connections TCP_IPv6. */
- u32 ipv6_ofld_cnt;
- u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */
- u32 txq_size; /* TX Descriptors Queue Size */
- u32 rxq_size; /* RX Descriptors Queue Size */
- /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
- u32 txq_avg_depth;
- /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
- u32 rxq_avg_depth;
- /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
- u32 iov_offload;
- /* Number of NetQueue/VMQ Config'd. */
- u32 netq_cnt;
- u32 vf_cnt; /* Num VF assigned to this PF. */
-};
-
-/* Per PCI Function FCOE Statistics required from the driver */
-struct fcoe_stats_info {
- u8 version[12]; /* Function's Driver Version. */
- u8 mac_local[8]; /* Locally Admin Addr. */
- u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
- u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
- /* QoS Priority (per 802.1p). 0-7255 */
- u32 qos_priority;
- u32 txq_size; /* FCoE TX Descriptors Queue Size. */
- u32 rxq_size; /* FCoE RX Descriptors Queue Size. */
- /* FCoE TX Descriptor Queue Avg Depth. */
- u32 txq_avg_depth;
- /* FCoE RX Descriptors Queue Avg Depth. */
- u32 rxq_avg_depth;
- u32 rx_frames_lo; /* FCoE RX Frames received. */
- u32 rx_frames_hi; /* FCoE RX Frames received. */
- u32 rx_bytes_lo; /* FCoE RX Bytes received. */
- u32 rx_bytes_hi; /* FCoE RX Bytes received. */
- u32 tx_frames_lo; /* FCoE TX Frames sent. */
- u32 tx_frames_hi; /* FCoE TX Frames sent. */
- u32 tx_bytes_lo; /* FCoE TX Bytes sent. */
- u32 tx_bytes_hi; /* FCoE TX Bytes sent. */
-};
-
-/* Per PCI Function iSCSI Statistics required from the driver*/
-struct iscsi_stats_info {
- u8 version[12]; /* Function's Driver Version. */
- u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
- u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
- /* QoS Priority (per 802.1p). 0-7255 */
- u32 qos_priority;
- u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */
- u8 ww_port_name[64]; /* iSCSI World wide port name */
- u8 boot_target_name[64];/* iSCSI Boot Target Name. */
- u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */
- u32 boot_target_portal; /* iSCSI Boot Target Portal. */
- u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
- u32 max_frame_size; /* Max Frame Size. bytes */
- u32 txq_size; /* PDU TX Descriptors Queue Size. */
- u32 rxq_size; /* PDU RX Descriptors Queue Size. */
- u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */
- u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */
- u32 rx_pdus_lo; /* iSCSI PDUs received. */
- u32 rx_pdus_hi; /* iSCSI PDUs received. */
- u32 rx_bytes_lo; /* iSCSI RX Bytes received. */
- u32 rx_bytes_hi; /* iSCSI RX Bytes received. */
- u32 tx_pdus_lo; /* iSCSI PDUs sent. */
- u32 tx_pdus_hi; /* iSCSI PDUs sent. */
- u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
- u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
- u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable.
- * 9 nibbles, the position of each nibble
- * represents the C-PCP value, the value
- * of the nibble = S-PCP value.
- */
-};
-
-union drv_info_to_mcp {
- struct eth_stats_info ether_stat;
- struct fcoe_stats_info fcoe_stat;
- struct iscsi_stats_info iscsi_stat;
-};
/* stats collected for afex.
* NOTE: structure is exactly as expected to be received by the switch.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 6e7d5c0843b4..f4beb46c4709 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -285,7 +285,6 @@
#define ETS_E3B0_PBF_MIN_W_VAL (10000)
#define MAX_PACKET_SIZE (9700)
-#define WC_UC_TIMEOUT 100
#define MAX_KR_LINK_RETRY 4
/**********************************************************/
@@ -1306,6 +1305,94 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
return 0;
}
+
+/******************************************************************/
+/* EEE section */
+/******************************************************************/
+static u8 bnx2x_eee_has_cap(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (REG_RD(bp, params->shmem2_base) <=
+ offsetof(struct shmem2_region, eee_status[params->port]))
+ return 0;
+
+ return 1;
+}
+
+static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
+{
+ switch (nvram_mode) {
+ case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
+ *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
+ break;
+ case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
+ *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
+ break;
+ case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
+ *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
+ break;
+ default:
+ *idle_timer = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
+{
+ switch (idle_timer) {
+ case EEE_MODE_NVRAM_BALANCED_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
+ break;
+ case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
+ break;
+ case EEE_MODE_NVRAM_LATENCY_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
+ break;
+ default:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
+ break;
+ }
+
+ return 0;
+}
+
+static u32 bnx2x_eee_calc_timer(struct link_params *params)
+{
+ u32 eee_mode, eee_idle;
+ struct bnx2x *bp = params->bp;
+
+ if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
+ if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+ /* time value in eee_mode --> used directly*/
+ eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
+ } else {
+ /* hsi value in eee_mode --> time */
+ if (bnx2x_eee_nvram_to_time(params->eee_mode &
+ EEE_MODE_NVRAM_MASK,
+ &eee_idle))
+ return 0;
+ }
+ } else {
+ /* hsi values in nvram --> time*/
+ eee_mode = ((REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].
+ eee_power_mode)) &
+ PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
+ PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
+
+ if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
+ return 0;
+ }
+
+ return eee_idle;
+}
+
+
/******************************************************************/
/* PFC section */
/******************************************************************/
@@ -1540,7 +1627,7 @@ static void bnx2x_umac_enable(struct link_params *params,
/* Reset UMAC */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
@@ -1631,7 +1718,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
* ports of the path
*/
- if ((CHIP_NUM(bp) == CHIP_NUM_57840) &&
+ if ((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) &&
(REG_RD(bp, MISC_REG_RESET_REG_2) &
MISC_REGISTERS_RESET_REG_2_XMAC)) {
DP(NETIF_MSG_LINK,
@@ -1642,7 +1729,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
/* Hard reset */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
MISC_REGISTERS_RESET_REG_2_XMAC);
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
MISC_REGISTERS_RESET_REG_2_XMAC);
@@ -1672,7 +1759,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
/* Soft reset */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
@@ -1730,6 +1817,14 @@ static int bnx2x_xmac_enable(struct link_params *params,
/* update PFC */
bnx2x_update_pfc_xmac(params, vars, 0);
+ if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
+ DP(NETIF_MSG_LINK, "Setting XMAC for EEE\n");
+ REG_WR(bp, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008);
+ REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x1);
+ } else {
+ REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x0);
+ }
+
/* Enable TX and RX */
val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
@@ -1785,11 +1880,6 @@ static int bnx2x_emac_enable(struct link_params *params,
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
EMAC_TX_MODE_RESET);
- if (CHIP_REV_IS_SLOW(bp)) {
- /* config GMII mode */
- val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
- EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
- } else { /* ASIC */
/* pause enable/disable */
bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
EMAC_RX_MODE_FLOW_EN);
@@ -1812,7 +1902,6 @@ static int bnx2x_emac_enable(struct link_params *params,
} else
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
EMAC_TX_MODE_FLOW_EN);
- }
/* KEEP_VLAN_TAG, promiscuous */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
@@ -1851,23 +1940,23 @@ static int bnx2x_emac_enable(struct link_params *params,
val &= ~0x810;
EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
- /* enable emac */
+ /* Enable emac */
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
- /* enable emac for jumbo packets */
+ /* Enable emac for jumbo packets */
EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
(EMAC_RX_MTU_SIZE_JUMBO_ENA |
(ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
- /* strip CRC */
+ /* Strip CRC */
REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
- /* disable the NIG in/out to the bmac */
+ /* Disable the NIG in/out to the bmac */
REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0);
REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
- /* enable the NIG in/out to the emac */
+ /* Enable the NIG in/out to the emac */
REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
val = 0;
if ((params->feature_config_flags &
@@ -1902,7 +1991,7 @@ static void bnx2x_update_pfc_bmac1(struct link_params *params,
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
- /* tx control */
+ /* TX control */
val = 0xc0;
if (!(params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED) &&
@@ -1962,7 +2051,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
wb_data[0] &= ~(1<<2);
} else {
DP(NETIF_MSG_LINK, "PFC is disabled\n");
- /* disable PFC RX & TX & STATS and set 8 COS */
+ /* Disable PFC RX & TX & STATS and set 8 COS */
wb_data[0] = 0x8;
wb_data[1] = 0;
}
@@ -2056,7 +2145,7 @@ static int bnx2x_pfc_brb_get_config_params(
PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
+ /* Non pause able*/
config_val->non_pauseable_th.pause_xoff =
PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
@@ -2084,7 +2173,7 @@ static int bnx2x_pfc_brb_get_config_params(
PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
+ /* Non pause able*/
config_val->non_pauseable_th.pause_xoff =
PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
@@ -2114,7 +2203,7 @@ static int bnx2x_pfc_brb_get_config_params(
PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
+ /* Non pause able*/
config_val->non_pauseable_th.pause_xoff =
PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
@@ -2132,7 +2221,7 @@ static int bnx2x_pfc_brb_get_config_params(
PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
+ /* Non pause able*/
config_val->non_pauseable_th.pause_xoff =
PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
@@ -2189,7 +2278,7 @@ static void bnx2x_pfc_brb_get_e3b0_config_params(
if (pfc_params->cos0_pauseable !=
pfc_params->cos1_pauseable) {
- /* nonpauseable= Lossy + pauseable = Lossless*/
+ /* Nonpauseable= Lossy + pauseable = Lossless*/
e3b0_val->lb_guarantied =
PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
e3b0_val->mac_0_class_t_guarantied =
@@ -2388,9 +2477,9 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
******************************************************************************/
-int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
- u8 cos_entry,
- u32 priority_mask, u8 port)
+static int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
+ u8 cos_entry,
+ u32 priority_mask, u8 port)
{
u32 nig_reg_rx_priority_mask_add = 0;
@@ -2440,6 +2529,16 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
port_mb[params->port].link_status), link_status);
}
+static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (bnx2x_eee_has_cap(params))
+ REG_WR(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ eee_status[params->port]), eee_status);
+}
+
static void bnx2x_update_pfc_nig(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *nig_params)
@@ -2507,7 +2606,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 :
NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
- /* output enable for RX_XCM # IF */
+ /* Output enable for RX_XCM # IF */
REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN :
NIG_REG_XCM0_OUT_EN, xcm_out_en);
@@ -2556,10 +2655,10 @@ int bnx2x_update_pfc(struct link_params *params,
bnx2x_update_mng(params, vars->link_status);
- /* update NIG params */
+ /* Update NIG params */
bnx2x_update_pfc_nig(params, vars, pfc_params);
- /* update BRB params */
+ /* Update BRB params */
bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
if (bnx2x_status)
return bnx2x_status;
@@ -2614,7 +2713,7 @@ static int bnx2x_bmac1_enable(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
wb_data, 2);
- /* tx MAC SA */
+ /* TX MAC SA */
wb_data[0] = ((params->mac_addr[2] << 24) |
(params->mac_addr[3] << 16) |
(params->mac_addr[4] << 8) |
@@ -2623,7 +2722,7 @@ static int bnx2x_bmac1_enable(struct link_params *params,
params->mac_addr[1]);
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
- /* mac control */
+ /* MAC control */
val = 0x3;
if (is_lb) {
val |= 0x4;
@@ -2633,24 +2732,24 @@ static int bnx2x_bmac1_enable(struct link_params *params,
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
- /* set rx mtu */
+ /* Set rx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
bnx2x_update_pfc_bmac1(params, vars);
- /* set tx mtu */
+ /* Set tx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
- /* set cnt max size */
+ /* Set cnt max size */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
- /* configure safc */
+ /* Configure SAFC */
wb_data[0] = 0x1000200;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
@@ -2684,7 +2783,7 @@ static int bnx2x_bmac2_enable(struct link_params *params,
udelay(30);
- /* tx MAC SA */
+ /* TX MAC SA */
wb_data[0] = ((params->mac_addr[2] << 24) |
(params->mac_addr[3] << 16) |
(params->mac_addr[4] << 8) |
@@ -2703,18 +2802,18 @@ static int bnx2x_bmac2_enable(struct link_params *params,
wb_data, 2);
udelay(30);
- /* set rx mtu */
+ /* Set RX MTU */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
udelay(30);
- /* set tx mtu */
+ /* Set TX MTU */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
udelay(30);
- /* set cnt max size */
+ /* Set cnt max size */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
@@ -2732,15 +2831,15 @@ static int bnx2x_bmac_enable(struct link_params *params,
u8 port = params->port;
struct bnx2x *bp = params->bp;
u32 val;
- /* reset and unreset the BigMac */
+ /* Reset and unreset the BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- msleep(1);
+ usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- /* enable access for bmac registers */
+ /* Enable access for bmac registers */
REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
/* Enable BMAC according to BMAC type*/
@@ -2798,7 +2897,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
BIGMAC_REGISTER_BMAC_CONTROL,
wb_data, 2);
}
- msleep(1);
+ usleep_range(1000, 2000);
}
}
@@ -2810,17 +2909,16 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
u32 init_crd, crd;
u32 count = 1000;
- /* disable port */
+ /* Disable port */
REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
- /* wait for init credit */
+ /* Wait for init credit */
init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
while ((init_crd != crd) && count) {
- msleep(5);
-
+ usleep_range(5000, 10000);
crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
count--;
}
@@ -2837,18 +2935,18 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
line_speed == SPEED_1000 ||
line_speed == SPEED_2500) {
REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
- /* update threshold */
+ /* Update threshold */
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
- /* update init credit */
+ /* Update init credit */
init_crd = 778; /* (800-18-4) */
} else {
u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
ETH_OVREHEAD)/16;
REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
- /* update threshold */
+ /* Update threshold */
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
- /* update init credit */
+ /* Update init credit */
switch (line_speed) {
case SPEED_10000:
init_crd = thresh + 553 - 22;
@@ -2863,12 +2961,12 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
line_speed, init_crd);
- /* probe the credit changes */
+ /* Probe the credit changes */
REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
- msleep(5);
+ usleep_range(5000, 10000);
REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
- /* enable port */
+ /* Enable port */
REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
return 0;
}
@@ -2935,7 +3033,7 @@ static int bnx2x_cl22_write(struct bnx2x *bp,
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
mode & ~EMAC_MDIO_MODE_CLAUSE_45);
- /* address */
+ /* Address */
tmp = ((phy->addr << 21) | (reg << 16) | val |
EMAC_MDIO_COMM_COMMAND_WRITE_22 |
EMAC_MDIO_COMM_START_BUSY);
@@ -2971,7 +3069,7 @@ static int bnx2x_cl22_read(struct bnx2x *bp,
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
mode & ~EMAC_MDIO_MODE_CLAUSE_45);
- /* address */
+ /* Address */
val = ((phy->addr << 21) | (reg << 16) |
EMAC_MDIO_COMM_COMMAND_READ_22 |
EMAC_MDIO_COMM_START_BUSY);
@@ -3009,7 +3107,7 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
EMAC_MDIO_STATUS_10MB);
- /* address */
+ /* Address */
val = ((phy->addr << 21) | (devad << 16) | reg |
EMAC_MDIO_COMM_COMMAND_ADDRESS |
EMAC_MDIO_COMM_START_BUSY);
@@ -3030,7 +3128,7 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
*ret_val = 0;
rc = -EFAULT;
} else {
- /* data */
+ /* Data */
val = ((phy->addr << 21) | (devad << 16) |
EMAC_MDIO_COMM_COMMAND_READ_45 |
EMAC_MDIO_COMM_START_BUSY);
@@ -3078,7 +3176,7 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
EMAC_MDIO_STATUS_10MB);
- /* address */
+ /* Address */
tmp = ((phy->addr << 21) | (devad << 16) | reg |
EMAC_MDIO_COMM_COMMAND_ADDRESS |
EMAC_MDIO_COMM_START_BUSY);
@@ -3098,7 +3196,7 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
netdev_err(bp->dev, "MDC/MDIO access timeout\n");
rc = -EFAULT;
} else {
- /* data */
+ /* Data */
tmp = ((phy->addr << 21) | (devad << 16) | val |
EMAC_MDIO_COMM_COMMAND_WRITE_45 |
EMAC_MDIO_COMM_START_BUSY);
@@ -3188,23 +3286,23 @@ static int bnx2x_bsc_read(struct link_params *params,
xfer_cnt = 16 - lc_addr;
- /* enable the engine */
+ /* Enable the engine */
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
val |= MCPR_IMC_COMMAND_ENABLE;
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
- /* program slave device ID */
+ /* Program slave device ID */
val = (sl_devid << 16) | sl_addr;
REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val);
- /* start xfer with 0 byte to update the address pointer ???*/
+ /* Start xfer with 0 byte to update the address pointer ???*/
val = (MCPR_IMC_COMMAND_ENABLE) |
(MCPR_IMC_COMMAND_WRITE_OP <<
MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
(lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0);
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
- /* poll for completion */
+ /* Poll for completion */
i = 0;
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
@@ -3220,7 +3318,7 @@ static int bnx2x_bsc_read(struct link_params *params,
if (rc == -EFAULT)
return rc;
- /* start xfer with read op */
+ /* Start xfer with read op */
val = (MCPR_IMC_COMMAND_ENABLE) |
(MCPR_IMC_COMMAND_READ_OP <<
MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
@@ -3228,7 +3326,7 @@ static int bnx2x_bsc_read(struct link_params *params,
(xfer_cnt);
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
- /* poll for completion */
+ /* Poll for completion */
i = 0;
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
@@ -3331,7 +3429,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
port = port ^ 1;
lane = (port<<1) + path;
- } else { /* two port mode - no port swap */
+ } else { /* Two port mode - no port swap */
/* Figure out path swap value */
path_swap_ovr =
@@ -3409,7 +3507,7 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
val = SERDES_RESET_BITS << (port*16);
- /* reset and unreset the SerDes/XGXS */
+ /* Reset and unreset the SerDes/XGXS */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
udelay(500);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
@@ -3430,7 +3528,7 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
val = XGXS_RESET_BITS << (port*16);
- /* reset and unreset the SerDes/XGXS */
+ /* Reset and unreset the SerDes/XGXS */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
udelay(500);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
@@ -3522,7 +3620,7 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
{
u16 val;
struct bnx2x *bp = params->bp;
- /* read modify write pause advertizing */
+ /* Read modify write pause advertizing */
bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
@@ -3657,44 +3755,35 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars) {
- u16 val16 = 0, lane, bam37 = 0;
- struct bnx2x *bp = params->bp;
+ u16 val16 = 0, lane, i;
+ struct bnx2x *bp = params->bp;
+ static struct bnx2x_reg_set reg_set[] = {
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555},
+ {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190},
+ /* Disable Autoneg: re-enable it after adv is done. */
+ {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0}
+ };
DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
/* Set to default registers that may be overriden by 10G force */
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
- MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_RX66_CONTROL, 0x7415);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190);
- /* Disable Autoneg: re-enable it after adv is done. */
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, 0);
+ for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
/* Check adding advertisement for 1G KX */
if (((vars->line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
(vars->line_speed == SPEED_1000)) {
- u16 sd_digital;
+ u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
val16 |= (1<<5);
/* Enable CL37 1G Parallel Detect */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &sd_digital);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
- (sd_digital | 0x1));
-
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1);
DP(NETIF_MSG_LINK, "Advertize 1G\n");
}
if (((vars->line_speed == SPEED_AUTO_NEG) &&
@@ -3704,7 +3793,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
val16 |= (1<<7);
/* Enable 10G Parallel Detect */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
+ MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
DP(NETIF_MSG_LINK, "Advertize 10G\n");
}
@@ -3738,10 +3827,9 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
offsetof(struct shmem_region, dev_info.
port_hw_config[params->port].default_cfg)) &
PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, &bam37);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, bam37 | 1);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL,
+ 1);
DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
}
@@ -3755,11 +3843,8 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Enable AN KR work-around\n");
vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
}
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL5_MISC7, &val16);
-
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
/* Over 1G - AN local device user page 1 */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -3776,50 +3861,35 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 val;
-
- /* Disable Autoneg */
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
-
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
-
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00);
-
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0);
-
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
-
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL3_UP1, 0x1);
-
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL5_MISC7, 0xa);
-
- /* Disable CL36 PCS Tx */
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0);
-
- /* Double Wide Single Data Rate @ pll rate */
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF);
-
- /* Leave cl72 training enable, needed for KR */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+ u16 i;
+ static struct bnx2x_reg_set reg_set[] = {
+ /* Disable Autoneg */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+ 0x3f00},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa},
+ /* Disable CL36 PCS Tx */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0},
+ /* Double Wide Single Data Rate @ pll rate */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF},
+ /* Leave cl72 training enable, needed for KR */
+ {MDIO_PMA_DEVAD,
MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
- 0x2);
+ 0x2}
+ };
+
+ for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
/* Leave CL72 enabled */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
- &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
- val | 0x3800);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+ 0x3800);
/* Set speed via PMA/PMD register */
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
@@ -3840,7 +3910,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_CONTROL, 0xF9);
- /* set and clear loopback to cause a reset to 64/66 decoder */
+ /* Set and clear loopback to cause a reset to 64/66 decoder */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -3855,16 +3925,12 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 misc1_val, tap_val, tx_driver_val, lane, val;
/* Hold rxSeqStart */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val | 0x8000));
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000);
/* Hold tx_fifo_reset */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, (val | 0x1));
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x1);
/* Disable CL73 AN */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
@@ -3876,10 +3942,8 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA));
/* Disable 100FX Idle detect */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL3, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL3, (val | 0x0080));
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL3, 0x0080);
/* Set Block address to Remote PHY & Clear forced_speed[5] */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -3940,16 +4004,20 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
tx_driver_val);
/* Enable fiber mode, enable and invert sig_det */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, val | 0xd);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0xd);
/* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL4_MISC3, &val);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, 0x8080);
+
+ /* Enable LPI pass through */
+ DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL4_MISC3, val | 0x8080);
+ MDIO_WC_REG_EEE_COMBO_CONTROL0,
+ 0x7c);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
/* 10G XFI Full Duplex */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -4139,40 +4207,35 @@ static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
u16 lane)
{
struct bnx2x *bp = params->bp;
- u16 val16;
-
+ u16 i;
+ static struct bnx2x_reg_set wc_regs[] = {
+ {MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL1, 0x014a},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL3, 0x0800},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL4_MISC3, 0x8008},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ 0x0195},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ 0x0007},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3,
+ 0x0002},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_TX_FIR_TAP, 0x0000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140}
+ };
/* Set XFI clock comp as default. */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_RX66_CONTROL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_RX66_CONTROL, val16 | (3<<13));
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, (3<<13));
+
+ for (i = 0; i < sizeof(wc_regs)/sizeof(struct bnx2x_reg_set); i++)
+ bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg,
+ wc_regs[i].val);
- bnx2x_warpcore_reset_lane(bp, phy, 1);
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL1, 0x014a);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL3, 0x0800);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL4_MISC3, 0x8008);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x0195);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x0007);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x0002);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000);
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_TX_FIR_TAP, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140);
- bnx2x_warpcore_reset_lane(bp, phy, 0);
+
}
static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
@@ -4260,7 +4323,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
if (!vars->turn_to_run_wc_rt)
return;
- /* return if there is no link partner */
+ /* Return if there is no link partner */
if (!(bnx2x_warpcore_get_sigdet(phy, params))) {
DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n");
return;
@@ -4294,7 +4357,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
bnx2x_warpcore_reset_lane(bp, phy, 1);
bnx2x_warpcore_reset_lane(bp, phy, 0);
- /* restart Autoneg */
+ /* Restart Autoneg */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
@@ -4311,6 +4374,23 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
} /*params->rx_tx_asic_rst*/
}
+static void bnx2x_warpcore_config_sfi(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u16 lane = bnx2x_get_warpcore_lane(phy, params);
+ struct bnx2x *bp = params->bp;
+ bnx2x_warpcore_clear_regs(phy, params, lane);
+ if ((params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] ==
+ SPEED_10000) &&
+ (phy->media_type != ETH_PHY_SFP_1G_FIBER)) {
+ DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
+ bnx2x_warpcore_set_10G_XFI(phy, params, 0);
+ } else {
+ DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
+ bnx2x_warpcore_set_sgmii_speed(phy, params, 1, 0);
+ }
+}
+
static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -4371,19 +4451,11 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
break;
case PORT_HW_CFG_NET_SERDES_IF_SFI:
-
- bnx2x_warpcore_clear_regs(phy, params, lane);
- if (vars->line_speed == SPEED_10000) {
- DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
- bnx2x_warpcore_set_10G_XFI(phy, params, 0);
- } else if (vars->line_speed == SPEED_1000) {
- DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
- bnx2x_warpcore_set_sgmii_speed(
- phy, params, 1, 0);
- }
/* Issue Module detection */
if (bnx2x_is_sfp_module_plugged(phy, params))
bnx2x_sfp_module_detection(phy, params);
+
+ bnx2x_warpcore_config_sfi(phy, params);
break;
case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
@@ -4500,12 +4572,9 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, 0);
/* Enable 1G MDIO (1-copy) */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
- &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
- val16 | 0x10);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ 0x10);
/* Set 1G loopback based on lane (1-copy) */
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4518,22 +4587,19 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
bnx2x_set_aer_mmd(params, phy);
} else {
/* 10G & 20G */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
- 0x4000);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
+ 0x4000);
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 | 0x1);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1);
}
}
-void bnx2x_sync_link(struct link_params *params,
- struct link_vars *vars)
+
+static void bnx2x_sync_link(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 link_10g_plus;
@@ -4606,7 +4672,7 @@ void bnx2x_sync_link(struct link_params *params,
USES_WARPCORE(bp) &&
(vars->line_speed == SPEED_1000))
vars->phy_flags |= PHY_SGMII_FLAG;
- /* anything 10 and over uses the bmac */
+ /* Anything 10 and over uses the bmac */
link_10g_plus = (vars->line_speed >= SPEED_10000);
if (link_10g_plus) {
@@ -4620,7 +4686,7 @@ void bnx2x_sync_link(struct link_params *params,
else
vars->mac_type = MAC_TYPE_EMAC;
}
- } else { /* link down */
+ } else { /* Link down */
DP(NETIF_MSG_LINK, "phy link down\n");
vars->phy_link_up = 0;
@@ -4629,10 +4695,12 @@ void bnx2x_sync_link(struct link_params *params,
vars->duplex = DUPLEX_FULL;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- /* indicate no mac active */
+ /* Indicate no mac active */
vars->mac_type = MAC_TYPE_NONE;
if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ if (vars->link_status & LINK_STATUS_SFP_TX_FAULT)
+ vars->phy_flags |= PHY_SFP_TX_FAULT_FLAG;
}
}
@@ -4698,7 +4766,7 @@ static void bnx2x_set_master_ln(struct link_params *params,
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
- /* set the master_ln for AN */
+ /* Set the master_ln for AN */
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
@@ -4721,7 +4789,7 @@ static int bnx2x_reset_unicore(struct link_params *params,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
- /* reset the unicore */
+ /* Reset the unicore */
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
@@ -4730,11 +4798,11 @@ static int bnx2x_reset_unicore(struct link_params *params,
if (set_serdes)
bnx2x_set_serdes_access(bp, params->port);
- /* wait for the reset to self clear */
+ /* Wait for the reset to self clear */
for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
udelay(5);
- /* the reset erased the previous bank value */
+ /* The reset erased the previous bank value */
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
@@ -4952,7 +5020,7 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
}
-/* program SerDes, forced speed */
+/* Program SerDes, forced speed */
static void bnx2x_program_serdes(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -4960,7 +5028,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 reg_val;
- /* program duplex, disable autoneg and sgmii*/
+ /* Program duplex, disable autoneg and sgmii*/
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
@@ -4979,7 +5047,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_MISC1, &reg_val);
- /* clearing the speed value before setting the right speed */
+ /* Clearing the speed value before setting the right speed */
DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
@@ -5008,7 +5076,7 @@ static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 val = 0;
- /* set extended capabilities */
+ /* Set extended capabilities */
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
val |= MDIO_OVER_1G_UP1_2_5G;
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
@@ -5028,7 +5096,7 @@ static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 val;
- /* for AN, we are always publishing full duplex */
+ /* For AN, we are always publishing full duplex */
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
@@ -5090,14 +5158,14 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 control1;
- /* in SGMII mode, the unicore is always slave */
+ /* In SGMII mode, the unicore is always slave */
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
&control1);
control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
- /* set sgmii mode (and not fiber) */
+ /* Set sgmii mode (and not fiber) */
control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
@@ -5106,9 +5174,9 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
control1);
- /* if forced speed */
+ /* If forced speed */
if (!(vars->line_speed == SPEED_AUTO_NEG)) {
- /* set speed, disable autoneg */
+ /* Set speed, disable autoneg */
u16 mii_control;
CL22_RD_OVER_CL45(bp, phy,
@@ -5129,16 +5197,16 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
break;
case SPEED_10:
- /* there is nothing to set for 10M */
+ /* There is nothing to set for 10M */
break;
default:
- /* invalid speed for SGMII */
+ /* Invalid speed for SGMII */
DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
vars->line_speed);
break;
}
- /* setting the full duplex */
+ /* Setting the full duplex */
if (phy->req_duplex == DUPLEX_FULL)
mii_control |=
MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
@@ -5148,7 +5216,7 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
mii_control);
} else { /* AN mode */
- /* enable and restart AN */
+ /* Enable and restart AN */
bnx2x_restart_autoneg(phy, params, 0);
}
}
@@ -5244,7 +5312,7 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- /* resolve from gp_status in case of AN complete and not sgmii */
+ /* Resolve from gp_status in case of AN complete and not sgmii */
if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) {
/* Update the advertised flow-controled of LD/LP in AN */
if (phy->req_line_speed == SPEED_AUTO_NEG)
@@ -5468,7 +5536,7 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
bnx2x_xgxs_an_resolve(phy, params, vars,
gp_status);
}
- } else { /* link_down */
+ } else { /* Link_down */
if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
SINGLE_MEDIA_DIRECT(params)) {
/* Check signal is detected */
@@ -5617,12 +5685,12 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
u16 tx_driver;
u16 bank;
- /* read precomp */
+ /* Read precomp */
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_OVER_1G,
MDIO_OVER_1G_LP_UP2, &lp_up2);
- /* bits [10:7] at lp_up2, positioned at [15:12] */
+ /* Bits [10:7] at lp_up2, positioned at [15:12] */
lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
@@ -5636,7 +5704,7 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
bank,
MDIO_TX0_TX_DRIVER, &tx_driver);
- /* replace tx_driver bits [15:12] */
+ /* Replace tx_driver bits [15:12] */
if (lp_up2 !=
(tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
@@ -5732,16 +5800,16 @@ static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
bnx2x_set_preemphasis(phy, params);
- /* forced speed requested? */
+ /* Forced speed requested? */
if (vars->line_speed != SPEED_AUTO_NEG ||
(SINGLE_MEDIA_DIRECT(params) &&
params->loopback_mode == LOOPBACK_EXT)) {
DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
- /* disable autoneg */
+ /* Disable autoneg */
bnx2x_set_autoneg(phy, params, vars, 0);
- /* program speed and duplex */
+ /* Program speed and duplex */
bnx2x_program_serdes(phy, params, vars);
} else { /* AN_mode */
@@ -5750,14 +5818,14 @@ static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
/* AN enabled */
bnx2x_set_brcm_cl37_advertisement(phy, params);
- /* program duplex & pause advertisement (for aneg) */
+ /* Program duplex & pause advertisement (for aneg) */
bnx2x_set_ieee_aneg_advertisement(phy, params,
vars->ieee_fc);
- /* enable autoneg */
+ /* Enable autoneg */
bnx2x_set_autoneg(phy, params, vars, enable_cl73);
- /* enable and restart AN */
+ /* Enable and restart AN */
bnx2x_restart_autoneg(phy, params, enable_cl73);
}
@@ -5793,12 +5861,12 @@ static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy,
bnx2x_set_master_ln(params, phy);
rc = bnx2x_reset_unicore(params, phy, 0);
- /* reset the SerDes and wait for reset bit return low */
- if (rc != 0)
+ /* Reset the SerDes and wait for reset bit return low */
+ if (rc)
return rc;
bnx2x_set_aer_mmd(params, phy);
- /* setting the masterLn_def again after the reset */
+ /* Setting the masterLn_def again after the reset */
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
bnx2x_set_master_ln(params, phy);
bnx2x_set_swap_lanes(params, phy);
@@ -5823,7 +5891,7 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
MDIO_PMA_REG_CTRL, &ctrl);
if (!(ctrl & (1<<15)))
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (cnt == 1000)
@@ -6054,7 +6122,7 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
if (!CHIP_IS_E3(bp)) {
- /* change the uni_phy_addr in the nig */
+ /* Change the uni_phy_addr in the nig */
md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
port*0x18));
@@ -6074,11 +6142,11 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
(MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
0x6041);
msleep(200);
- /* set aer mmd back */
+ /* Set aer mmd back */
bnx2x_set_aer_mmd(params, phy);
if (!CHIP_IS_E3(bp)) {
- /* and md_devad */
+ /* And md_devad */
REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
md_devad);
}
@@ -6275,7 +6343,7 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
MDIO_REG_BANK_GP_STATUS,
MDIO_GP_STATUS_TOP_AN_STATUS1,
&gp_status);
- /* link is up only if both local phy and external phy are up */
+ /* Link is up only if both local phy and external phy are up */
if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
return -ESRCH;
}
@@ -6296,7 +6364,9 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
for (phy_index = EXT_PHY1; phy_index < params->num_phys;
phy_index++) {
serdes_phy_type = ((params->phy[phy_index].media_type ==
- ETH_PHY_SFP_FIBER) ||
+ ETH_PHY_SFPP_10G_FIBER) ||
+ (params->phy[phy_index].media_type ==
+ ETH_PHY_SFP_1G_FIBER) ||
(params->phy[phy_index].media_type ==
ETH_PHY_XFP_FIBER) ||
(params->phy[phy_index].media_type ==
@@ -6397,7 +6467,7 @@ static int bnx2x_link_initialize(struct link_params *params,
static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
- /* reset the SerDes/XGXS */
+ /* Reset the SerDes/XGXS */
REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
(0x1ff << (params->port*16)));
}
@@ -6430,10 +6500,10 @@ static int bnx2x_update_link_down(struct link_params *params,
DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG;
- /* indicate no mac active */
+ /* Indicate no mac active */
vars->mac_type = MAC_TYPE_NONE;
- /* update shared memory */
+ /* Update shared memory */
vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK |
LINK_STATUS_LINK_UP |
LINK_STATUS_PHYSICAL_LINK_FLAG |
@@ -6446,15 +6516,15 @@ static int bnx2x_update_link_down(struct link_params *params,
vars->line_speed = 0;
bnx2x_update_mng(params, vars->link_status);
- /* activate nig drain */
+ /* Activate nig drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
- /* disable emac */
+ /* Disable emac */
if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
- msleep(10);
- /* reset BigMac/Xmac */
+ usleep_range(10000, 20000);
+ /* Reset BigMac/Xmac */
if (CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp)) {
bnx2x_bmac_rx_disable(bp, params->port);
@@ -6463,6 +6533,16 @@ static int bnx2x_update_link_down(struct link_params *params,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
}
if (CHIP_IS_E3(bp)) {
+ /* Prevent LPI Generation by chip */
+ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2),
+ 0);
+ REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
+ REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2),
+ 0);
+ vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
+ SHMEM_EEE_ACTIVE_BIT);
+
+ bnx2x_update_mng_eee(params, vars->eee_status);
bnx2x_xmac_disable(params);
bnx2x_umac_disable(params);
}
@@ -6502,6 +6582,16 @@ static int bnx2x_update_link_up(struct link_params *params,
bnx2x_umac_enable(params, vars, 0);
bnx2x_set_led(params, vars,
LED_MODE_OPER, vars->line_speed);
+
+ if ((vars->eee_status & SHMEM_EEE_ACTIVE_BIT) &&
+ (vars->eee_status & SHMEM_EEE_LPI_REQUESTED_BIT)) {
+ DP(NETIF_MSG_LINK, "Enabling LPI assertion\n");
+ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 +
+ (params->port << 2), 1);
+ REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 1);
+ REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 +
+ (params->port << 2), 0xfc20);
+ }
}
if ((CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp))) {
@@ -6534,12 +6624,12 @@ static int bnx2x_update_link_up(struct link_params *params,
rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
vars->line_speed);
- /* disable drain */
+ /* Disable drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
- /* update shared memory */
+ /* Update shared memory */
bnx2x_update_mng(params, vars->link_status);
-
+ bnx2x_update_mng_eee(params, vars->eee_status);
/* Check remote fault */
for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
@@ -6583,6 +6673,8 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
phy_vars[phy_index].phy_link_up = 0;
phy_vars[phy_index].link_up = 0;
phy_vars[phy_index].fault_detected = 0;
+ /* different consideration, since vars holds inner state */
+ phy_vars[phy_index].eee_status = vars->eee_status;
}
if (USES_WARPCORE(bp))
@@ -6603,7 +6695,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
- /* disable emac */
+ /* Disable emac */
if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
@@ -6712,6 +6804,9 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
vars->link_status |= LINK_STATUS_SERDES_LINK;
else
vars->link_status &= ~LINK_STATUS_SERDES_LINK;
+
+ vars->eee_status = phy_vars[active_external_phy].eee_status;
+
DP(NETIF_MSG_LINK, "Active external phy selected: %x\n",
active_external_phy);
}
@@ -6745,11 +6840,11 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
} else if (prev_line_speed != vars->line_speed) {
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
0);
- msleep(1);
+ usleep_range(1000, 2000);
}
}
- /* anything 10 and over uses the bmac */
+ /* Anything 10 and over uses the bmac */
link_10g_plus = (vars->line_speed >= SPEED_10000);
bnx2x_link_int_ack(params, vars, link_10g_plus);
@@ -6815,7 +6910,7 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
{
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
- msleep(1);
+ usleep_range(1000, 2000);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
}
@@ -6912,7 +7007,7 @@ static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
MDIO_PMA_REG_GEN_CTRL,
0x0001);
- /* ucode reboot and rst */
+ /* Ucode reboot and rst */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_GEN_CTRL,
@@ -6956,7 +7051,7 @@ static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
- msleep(1);
+ usleep_range(1000, 2000);
} while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
((fw_msgout & 0xff) != 0x03 && (phy->type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
@@ -7050,11 +7145,11 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
"XAUI workaround has completed\n");
return 0;
}
- msleep(3);
+ usleep_range(3000, 6000);
}
break;
}
- msleep(3);
+ usleep_range(3000, 6000);
}
DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n");
return -EINVAL;
@@ -7128,7 +7223,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
- /* enable LASI */
+ /* Enable LASI */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
bnx2x_cl45_write(bp, phy,
@@ -7276,7 +7371,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
- /* clear the interrupt LASI status register */
+ /* Clear the interrupt LASI status register */
bnx2x_cl45_read(bp, phy,
MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
bnx2x_cl45_read(bp, phy,
@@ -7601,7 +7696,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 val = 0;
u16 i;
- if (byte_cnt > 16) {
+ if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
DP(NETIF_MSG_LINK,
"Reading from eeprom is limited to 0xf\n");
return -EINVAL;
@@ -7655,7 +7750,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
return 0;
- msleep(1);
+ usleep_range(1000, 2000);
}
return -EINVAL;
}
@@ -7692,7 +7787,8 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
u32 data_array[4];
u16 addr32;
struct bnx2x *bp = params->bp;
- if (byte_cnt > 16) {
+
+ if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
DP(NETIF_MSG_LINK,
"Reading from eeprom is limited to 16 bytes\n");
return -EINVAL;
@@ -7728,7 +7824,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 val, i;
- if (byte_cnt > 16) {
+ if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
DP(NETIF_MSG_LINK,
"Reading from eeprom is limited to 0xf\n");
return -EINVAL;
@@ -7765,7 +7861,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Wait appropriate time for two-wire command to finish before
* polling the status register
*/
- msleep(1);
+ usleep_range(1000, 2000);
/* Wait up to 500us for command complete status */
for (i = 0; i < 100; i++) {
@@ -7801,7 +7897,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
return 0;
- msleep(1);
+ usleep_range(1000, 2000);
}
return -EINVAL;
@@ -7811,7 +7907,7 @@ int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params, u16 addr,
u8 byte_cnt, u8 *o_buf)
{
- int rc = -EINVAL;
+ int rc = -EOPNOTSUPP;
switch (phy->type) {
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
@@ -7836,7 +7932,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u32 sync_offset = 0, phy_idx, media_types;
- u8 val, check_limiting_mode = 0;
+ u8 val[2], check_limiting_mode = 0;
*edc_mode = EDC_MODE_LIMITING;
phy->media_type = ETH_PHY_UNSPECIFIED;
@@ -7844,13 +7940,13 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
if (bnx2x_read_sfp_module_eeprom(phy,
params,
SFP_EEPROM_CON_TYPE_ADDR,
- 1,
- &val) != 0) {
+ 2,
+ (u8 *)val) != 0) {
DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
return -EINVAL;
}
- switch (val) {
+ switch (val[0]) {
case SFP_EEPROM_CON_TYPE_VAL_COPPER:
{
u8 copper_module_type;
@@ -7888,13 +7984,29 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
break;
}
case SFP_EEPROM_CON_TYPE_VAL_LC:
- phy->media_type = ETH_PHY_SFP_FIBER;
- DP(NETIF_MSG_LINK, "Optic module detected\n");
check_limiting_mode = 1;
+ if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK |
+ SFP_EEPROM_COMP_CODE_LR_MASK |
+ SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
+ DP(NETIF_MSG_LINK, "1G Optic module detected\n");
+ phy->media_type = ETH_PHY_SFP_1G_FIBER;
+ phy->req_line_speed = SPEED_1000;
+ } else {
+ int idx, cfg_idx = 0;
+ DP(NETIF_MSG_LINK, "10G Optic module detected\n");
+ for (idx = INT_PHY; idx < MAX_PHYS; idx++) {
+ if (params->phy[idx].type == phy->type) {
+ cfg_idx = LINK_CONFIG_IDX(idx);
+ break;
+ }
+ }
+ phy->media_type = ETH_PHY_SFPP_10G_FIBER;
+ phy->req_line_speed = params->req_line_speed[cfg_idx];
+ }
break;
default:
DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
- val);
+ val[0]);
return -EINVAL;
}
sync_offset = params->shmem_base +
@@ -7980,7 +8092,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
return 0;
}
- /* format the warning message */
+ /* Format the warning message */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
SFP_EEPROM_VENDOR_NAME_ADDR,
@@ -8026,7 +8138,7 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
timeout * 5);
return 0;
}
- msleep(5);
+ usleep_range(5000, 10000);
}
return -EINVAL;
}
@@ -8338,7 +8450,7 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
return -EINVAL;
} else if (bnx2x_verify_sfp_module(phy, params) != 0) {
- /* check SFP+ module compatibility */
+ /* Check SFP+ module compatibility */
DP(NETIF_MSG_LINK, "Module verification failed!!\n");
rc = -EINVAL;
/* Turn on fault module-detected led */
@@ -8401,14 +8513,34 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
/* Call the handling function in case module is detected */
if (gpio_val == 0) {
+ bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
+ bnx2x_set_aer_mmd(params, phy);
+
bnx2x_power_sfp_module(params, phy, 1);
bnx2x_set_gpio_int(bp, gpio_num,
MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
gpio_port);
- if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
+ if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) {
bnx2x_sfp_module_detection(phy, params);
- else
+ if (CHIP_IS_E3(bp)) {
+ u16 rx_tx_in_reset;
+ /* In case WC is out of reset, reconfigure the
+ * link speed while taking into account 1G
+ * module limitation.
+ */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6,
+ &rx_tx_in_reset);
+ if (!rx_tx_in_reset) {
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
+ bnx2x_warpcore_config_sfi(phy, params);
+ bnx2x_warpcore_reset_lane(bp, phy, 0);
+ }
+ }
+ } else {
DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
+ }
} else {
u32 val = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
@@ -8469,7 +8601,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
MDIO_PMA_LASI_TXCTRL);
- /* clear LASI indication*/
+ /* Clear LASI indication*/
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
bnx2x_cl45_read(bp, phy,
@@ -8537,7 +8669,7 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val);
if (val)
break;
- msleep(10);
+ usleep_range(10000, 20000);
}
DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt);
if ((params->feature_config_flags &
@@ -8666,7 +8798,7 @@ static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
MDIO_PMA_REG_GEN_CTRL,
MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
- /* wait for 150ms for microcode load */
+ /* Wait for 150ms for microcode load */
msleep(150);
/* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
@@ -8860,6 +8992,63 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
}
+static void bnx2x_8727_config_speed(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 tmp1, val;
+ /* Set option 1G speed */
+ if ((phy->req_line_speed == SPEED_1000) ||
+ (phy->media_type == ETH_PHY_SFP_1G_FIBER)) {
+ DP(NETIF_MSG_LINK, "Setting 1G force\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
+ DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
+ /* Power down the XAUI until link is up in case of dual-media
+ * and 1G
+ */
+ if (DUAL_MEDIA(params)) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, &val);
+ val |= (3<<10);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, val);
+ }
+ } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
+ } else {
+ /* Since the 8727 has only single reset pin, need to set the 10G
+ * registers although it is default
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
+ 0x0020);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
+ 0x0008);
+ }
+}
+
static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -8877,7 +9066,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
lasi_ctrl_val = 0x0006;
DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
- /* enable LASI */
+ /* Enable LASI */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
rx_alarm_ctrl_val);
@@ -8929,56 +9118,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
- /* Set option 1G speed */
- if (phy->req_line_speed == SPEED_1000) {
- DP(NETIF_MSG_LINK, "Setting 1G force\n");
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
- DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
- /* Power down the XAUI until link is up in case of dual-media
- * and 1G
- */
- if (DUAL_MEDIA(params)) {
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_PCS_GP, &val);
- val |= (3<<10);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_PCS_GP, val);
- }
- } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
- ((phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
- ((phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
-
- DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
- } else {
- /* Since the 8727 has only single reset pin, need to set the 10G
- * registers although it is default
- */
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
- 0x0020);
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
- 0x0008);
- }
-
+ bnx2x_8727_config_speed(phy, params);
/* Set 2-wire transfer rate of SFP+ module EEPROM
* to 100Khz since some DACs(direct attached cables) do
* not work at 400Khz.
@@ -9105,6 +9245,9 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
bnx2x_sfp_module_detection(phy, params);
else
DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
+
+ /* Reconfigure link speed based on module type limitations */
+ bnx2x_8727_config_speed(phy, params);
}
DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
@@ -9585,9 +9728,9 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
struct link_params *params,
u16 fw_cmd,
- u16 cmd_args[])
+ u16 cmd_args[], int argc)
{
- u32 idx;
+ int idx;
u16 val;
struct bnx2x *bp = params->bp;
/* Write CMD_OPEN_OVERRIDE to STATUS reg */
@@ -9599,7 +9742,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
MDIO_84833_CMD_HDLR_STATUS, &val);
if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (idx >= PHY84833_CMDHDLR_WAIT) {
DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
@@ -9607,7 +9750,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
}
/* Prepare argument(s) and issue command */
- for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+ for (idx = 0; idx < argc; idx++) {
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
MDIO_84833_CMD_HDLR_DATA1 + idx,
cmd_args[idx]);
@@ -9620,7 +9763,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
(val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if ((idx >= PHY84833_CMDHDLR_WAIT) ||
(val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
@@ -9628,7 +9771,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
return -EINVAL;
}
/* Gather returning data */
- for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+ for (idx = 0; idx < argc; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
MDIO_84833_CMD_HDLR_DATA1 + idx,
&cmd_args[idx]);
@@ -9662,7 +9805,7 @@ static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
data[1] = (u16)pair_swap;
status = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_PAIR_SWAP, data);
+ PHY84833_CMD_SET_PAIR_SWAP, data, PHY84833_CMDHDLR_MAX_ARGS);
if (status == 0)
DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
@@ -9740,6 +9883,95 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
return 0;
}
+static int bnx2x_8483x_eee_timers(struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 eee_idle = 0, eee_mode;
+ struct bnx2x *bp = params->bp;
+
+ eee_idle = bnx2x_eee_calc_timer(params);
+
+ if (eee_idle) {
+ REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
+ eee_idle);
+ } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
+ (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
+ (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
+ DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
+ return -EINVAL;
+ }
+
+ vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
+ if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+ /* eee_idle in 1u --> eee_status in 16u */
+ eee_idle >>= 4;
+ vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
+ SHMEM_EEE_TIME_OUTPUT_BIT;
+ } else {
+ if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
+ return -EINVAL;
+ vars->eee_status |= eee_mode;
+ }
+
+ return 0;
+}
+
+static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ int rc;
+ struct bnx2x *bp = params->bp;
+ u16 cmd_args = 0;
+
+ DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
+
+ /* Make Certain LPI is disabled */
+ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
+ REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
+
+ /* Prevent Phy from working in EEE and advertising it */
+ rc = bnx2x_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "EEE disable failed.\n");
+ return rc;
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0);
+ vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+
+ return 0;
+}
+
+static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ int rc;
+ struct bnx2x *bp = params->bp;
+ u16 cmd_args = 1;
+
+ DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
+
+ rc = bnx2x_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "EEE enable failed.\n");
+ return rc;
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x8);
+
+ /* Mask events preventing LPI generation */
+ REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
+
+ vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+ vars->eee_status |= (SHMEM_EEE_10G_ADV << SHMEM_EEE_ADV_STATUS_SHIFT);
+
+ return 0;
+}
+
#define PHY84833_CONSTANT_LATENCY 1193
static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
struct link_params *params,
@@ -9752,7 +9984,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
int rc = 0;
- msleep(1);
+ usleep_range(1000, 2000);
if (!(CHIP_IS_E1x(bp)))
port = BP_PATH(bp);
@@ -9839,8 +10071,9 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
cmd_args[3] = PHY84833_CONSTANT_LATENCY;
rc = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_EEE_MODE, cmd_args);
- if (rc != 0)
+ PHY84833_CMD_SET_EEE_MODE, cmd_args,
+ PHY84833_CMDHDLR_MAX_ARGS);
+ if (rc)
DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
}
if (initialize)
@@ -9864,6 +10097,48 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
MDIO_CTL_REG_84823_USER_CTRL_REG, val);
}
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_FW_REV, &val);
+
+ /* Configure EEE support */
+ if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && bnx2x_eee_has_cap(params)) {
+ phy->flags |= FLAGS_EEE_10GBT;
+ vars->eee_status |= SHMEM_EEE_10G_ADV <<
+ SHMEM_EEE_SUPPORTED_SHIFT;
+ /* Propogate params' bits --> vars (for migration exposure) */
+ if (params->eee_mode & EEE_MODE_ENABLE_LPI)
+ vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
+ else
+ vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
+
+ if (params->eee_mode & EEE_MODE_ADV_LPI)
+ vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
+ else
+ vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
+
+ rc = bnx2x_8483x_eee_timers(params, vars);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
+ bnx2x_8483x_disable_eee(phy, params, vars);
+ return rc;
+ }
+
+ if ((params->req_duplex[actual_phy_selection] == DUPLEX_FULL) &&
+ (params->eee_mode & EEE_MODE_ADV_LPI) &&
+ (bnx2x_eee_calc_timer(params) ||
+ !(params->eee_mode & EEE_MODE_ENABLE_LPI)))
+ rc = bnx2x_8483x_enable_eee(phy, params, vars);
+ else
+ rc = bnx2x_8483x_disable_eee(phy, params, vars);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "Failed to set EEE advertisment\n");
+ return rc;
+ }
+ } else {
+ phy->flags &= ~FLAGS_EEE_10GBT;
+ vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
+ }
+
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
/* Bring PHY out of super isolate mode as the final step. */
bnx2x_cl45_read(bp, phy,
@@ -9918,17 +10193,19 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Legacy speed status = 0x%x\n",
legacy_status);
link_up = ((legacy_status & (1<<11)) == (1<<11));
- if (link_up) {
- legacy_speed = (legacy_status & (3<<9));
- if (legacy_speed == (0<<9))
- vars->line_speed = SPEED_10;
- else if (legacy_speed == (1<<9))
- vars->line_speed = SPEED_100;
- else if (legacy_speed == (2<<9))
- vars->line_speed = SPEED_1000;
- else /* Should not happen */
- vars->line_speed = 0;
+ legacy_speed = (legacy_status & (3<<9));
+ if (legacy_speed == (0<<9))
+ vars->line_speed = SPEED_10;
+ else if (legacy_speed == (1<<9))
+ vars->line_speed = SPEED_100;
+ else if (legacy_speed == (2<<9))
+ vars->line_speed = SPEED_1000;
+ else { /* Should not happen: Treat as link down */
+ vars->line_speed = 0;
+ link_up = 0;
+ }
+ if (link_up) {
if (legacy_status & (1<<8))
vars->duplex = DUPLEX_FULL;
else
@@ -9956,7 +10233,7 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
}
}
if (link_up) {
- DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n",
+ DP(NETIF_MSG_LINK, "BCM848x3: link speed is %d\n",
vars->line_speed);
bnx2x_ext_phy_resolve_fc(phy, params, vars);
@@ -9995,6 +10272,31 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
if (val & (1<<11))
vars->link_status |=
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+ /* Determine if EEE was negotiated */
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ u32 eee_shmem = 0;
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_EEE_ADV, &val1);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_EEE_ADV, &val2);
+ if ((val1 & val2) & 0x8) {
+ DP(NETIF_MSG_LINK, "EEE negotiated\n");
+ vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
+ }
+
+ if (val2 & 0x12)
+ eee_shmem |= SHMEM_EEE_100M_ADV;
+ if (val2 & 0x4)
+ eee_shmem |= SHMEM_EEE_1G_ADV;
+ if (val2 & 0x68)
+ eee_shmem |= SHMEM_EEE_10G_ADV;
+
+ vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
+ vars->eee_status |= (eee_shmem <<
+ SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+ }
}
return link_up;
@@ -10273,7 +10575,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
u32 cfg_pin;
DP(NETIF_MSG_LINK, "54618SE cfg init\n");
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
/* This works with E3 only, no need to check the chip
* before determining the port.
@@ -10342,7 +10644,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
- /* read all advertisement */
+ /* Read all advertisement */
bnx2x_cl22_read(bp, phy,
0x09,
&an_1000_val);
@@ -10379,7 +10681,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
0x09,
&an_1000_val);
- /* set 100 speed advertisement */
+ /* Set 100 speed advertisement */
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask &
(PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
@@ -10393,7 +10695,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Advertising 100M\n");
}
- /* set 10 speed advertisement */
+ /* Set 10 speed advertisement */
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask &
(PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
@@ -10532,7 +10834,7 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
/* Get speed operation status */
bnx2x_cl22_read(bp, phy,
- 0x19,
+ MDIO_REG_GPHY_AUX_STATUS,
&legacy_status);
DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status);
@@ -10759,7 +11061,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
val2, val1);
link_up = ((val1 & 4) == 4);
- /* if link is up print the AN outcome of the SFX7101 PHY */
+ /* If link is up print the AN outcome of the SFX7101 PHY */
if (link_up) {
bnx2x_cl45_read(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
@@ -10771,7 +11073,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
bnx2x_ext_phy_resolve_fc(phy, params, vars);
- /* read LP advertised speeds */
+ /* Read LP advertised speeds */
if (val2 & (1<<11))
vars->link_status |=
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
@@ -11090,7 +11392,7 @@ static struct bnx2x_phy phy_8706 = {
SUPPORTED_FIBRE |
SUPPORTED_Pause |
SUPPORTED_Asym_Pause),
- .media_type = ETH_PHY_SFP_FIBER,
+ .media_type = ETH_PHY_SFPP_10G_FIBER,
.ver_addr = 0,
.req_flow_ctrl = 0,
.req_line_speed = 0,
@@ -11249,7 +11551,8 @@ static struct bnx2x_phy phy_84833 = {
.def_md_devad = 0,
.flags = (FLAGS_FAN_FAILURE_DET_REQ |
FLAGS_REARM_LATCH_SIGNAL |
- FLAGS_TX_ERROR_CHECK),
+ FLAGS_TX_ERROR_CHECK |
+ FLAGS_EEE_10GBT),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -11428,7 +11731,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
SUPPORTED_FIBRE |
SUPPORTED_Pause |
SUPPORTED_Asym_Pause);
- phy->media_type = ETH_PHY_SFP_FIBER;
+ phy->media_type = ETH_PHY_SFPP_10G_FIBER;
break;
case PORT_HW_CFG_NET_SERDES_IF_KR:
phy->media_type = ETH_PHY_KR;
@@ -11968,7 +12271,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
vars->mac_type = MAC_TYPE_NONE;
vars->phy_flags = 0;
- /* disable attentions */
+ /* Disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
(NIG_MASK_XGXS0_LINK_STATUS |
NIG_MASK_XGXS0_LINK10G |
@@ -12017,6 +12320,8 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
break;
}
bnx2x_update_mng(params, vars->link_status);
+
+ bnx2x_update_mng_eee(params, vars->eee_status);
return 0;
}
@@ -12026,19 +12331,22 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
struct bnx2x *bp = params->bp;
u8 phy_index, port = params->port, clear_latch_ind = 0;
DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
- /* disable attentions */
+ /* Disable attentions */
vars->link_status = 0;
bnx2x_update_mng(params, vars->link_status);
+ vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
+ SHMEM_EEE_ACTIVE_BIT);
+ bnx2x_update_mng_eee(params, vars->eee_status);
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
(NIG_MASK_XGXS0_LINK_STATUS |
NIG_MASK_XGXS0_LINK10G |
NIG_MASK_SERDES0_LINK_STATUS |
NIG_MASK_MI_INT));
- /* activate nig drain */
+ /* Activate nig drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
- /* disable nig egress interface */
+ /* Disable nig egress interface */
if (!CHIP_IS_E3(bp)) {
REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
@@ -12051,15 +12359,15 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
bnx2x_xmac_disable(params);
bnx2x_umac_disable(params);
}
- /* disable emac */
+ /* Disable emac */
if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
- msleep(10);
+ usleep_range(10000, 20000);
/* The PHY reset is controlled by GPIO 1
* Hold it as vars low
*/
- /* clear link led */
+ /* Clear link led */
bnx2x_set_mdio_clk(bp, params->chip_id, port);
bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
@@ -12089,9 +12397,9 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
params->phy[INT_PHY].link_reset(
&params->phy[INT_PHY], params);
- /* disable nig ingress interface */
+ /* Disable nig ingress interface */
if (!CHIP_IS_E3(bp)) {
- /* reset BigMac */
+ /* Reset BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
@@ -12148,7 +12456,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
DP(NETIF_MSG_LINK, "populate_phy failed\n");
return -EINVAL;
}
- /* disable attentions */
+ /* Disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
port_of_path*4,
(NIG_MASK_XGXS0_LINK_STATUS |
@@ -12222,7 +12530,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
bnx2x_cl45_write(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
- msleep(15);
+ usleep_range(15000, 30000);
/* Read modify write the SPI-ROM version select register */
bnx2x_cl45_read(bp, phy_blk[port],
@@ -12254,7 +12562,7 @@ static int bnx2x_8726_common_init_phy(struct bnx2x *bp,
REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
bnx2x_ext_phy_hw_reset(bp, 0);
- msleep(5);
+ usleep_range(5000, 10000);
for (port = 0; port < PORT_MAX; port++) {
u32 shmem_base, shmem2_base;
@@ -12361,11 +12669,11 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
/* Initiate PHY reset*/
bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
port);
- msleep(1);
+ usleep_range(1000, 2000);
bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
port);
- msleep(5);
+ usleep_range(5000, 10000);
/* PART1 - Reset both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
@@ -12459,7 +12767,7 @@ static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
MDIO_PMA_REG_CTRL, &val);
if (!(val & (1<<15)))
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (cnt >= 1500) {
DP(NETIF_MSG_LINK, "84833 reset timeout\n");
@@ -12549,7 +12857,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
break;
}
- if (rc != 0)
+ if (rc)
netdev_err(bp->dev, "Warning: PHY was not initialized,"
" Port %d\n",
0);
@@ -12630,30 +12938,41 @@ static void bnx2x_check_over_curr(struct link_params *params,
vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
}
-static void bnx2x_analyze_link_error(struct link_params *params,
- struct link_vars *vars, u32 lss_status,
- u8 notify)
+/* Returns 0 if no change occured since last check; 1 otherwise. */
+static u8 bnx2x_analyze_link_error(struct link_params *params,
+ struct link_vars *vars, u32 status,
+ u32 phy_flag, u32 link_flag, u8 notify)
{
struct bnx2x *bp = params->bp;
/* Compare new value with previous value */
u8 led_mode;
- u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0;
+ u32 old_status = (vars->phy_flags & phy_flag) ? 1 : 0;
- if ((lss_status ^ half_open_conn) == 0)
- return;
+ if ((status ^ old_status) == 0)
+ return 0;
/* If values differ */
- DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up,
- half_open_conn, lss_status);
+ switch (phy_flag) {
+ case PHY_HALF_OPEN_CONN_FLAG:
+ DP(NETIF_MSG_LINK, "Analyze Remote Fault\n");
+ break;
+ case PHY_SFP_TX_FAULT_FLAG:
+ DP(NETIF_MSG_LINK, "Analyze TX Fault\n");
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Analyze UNKOWN\n");
+ }
+ DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
+ old_status, status);
/* a. Update shmem->link_status accordingly
* b. Update link_vars->link_up
*/
- if (lss_status) {
- DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n");
+ if (status) {
vars->link_status &= ~LINK_STATUS_LINK_UP;
+ vars->link_status |= link_flag;
vars->link_up = 0;
- vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ vars->phy_flags |= phy_flag;
/* activate nig drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
@@ -12662,10 +12981,10 @@ static void bnx2x_analyze_link_error(struct link_params *params,
*/
led_mode = LED_MODE_OFF;
} else {
- DP(NETIF_MSG_LINK, "Remote Fault cleared\n");
vars->link_status |= LINK_STATUS_LINK_UP;
+ vars->link_status &= ~link_flag;
vars->link_up = 1;
- vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
+ vars->phy_flags &= ~phy_flag;
led_mode = LED_MODE_OPER;
/* Clear nig drain */
@@ -12682,6 +13001,8 @@ static void bnx2x_analyze_link_error(struct link_params *params,
vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
if (notify)
bnx2x_notify_link_changed(bp);
+
+ return 1;
}
/******************************************************************************
@@ -12723,7 +13044,9 @@ int bnx2x_check_half_open_conn(struct link_params *params,
if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
lss_status = 1;
- bnx2x_analyze_link_error(params, vars, lss_status, notify);
+ bnx2x_analyze_link_error(params, vars, lss_status,
+ PHY_HALF_OPEN_CONN_FLAG,
+ LINK_STATUS_NONE, notify);
} else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
/* Check E1X / E2 BMAC */
@@ -12740,11 +13063,55 @@ int bnx2x_check_half_open_conn(struct link_params *params,
REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
lss_status = (wb_data[0] > 0);
- bnx2x_analyze_link_error(params, vars, lss_status, notify);
+ bnx2x_analyze_link_error(params, vars, lss_status,
+ PHY_HALF_OPEN_CONN_FLAG,
+ LINK_STATUS_NONE, notify);
}
return 0;
}
+static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u32 cfg_pin, value = 0;
+ u8 led_change, port = params->port;
+ /* Get The SFP+ TX_Fault controlling pin ([eg]pio) */
+ cfg_pin = (REG_RD(bp, params->shmem_base + offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+ PORT_HW_CFG_E3_TX_FAULT_MASK) >>
+ PORT_HW_CFG_E3_TX_FAULT_SHIFT;
+
+ if (bnx2x_get_cfg_pin(bp, cfg_pin, &value)) {
+ DP(NETIF_MSG_LINK, "Failed to read pin 0x%02x\n", cfg_pin);
+ return;
+ }
+
+ led_change = bnx2x_analyze_link_error(params, vars, value,
+ PHY_SFP_TX_FAULT_FLAG,
+ LINK_STATUS_SFP_TX_FAULT, 1);
+
+ if (led_change) {
+ /* Change TX_Fault led, set link status for further syncs */
+ u8 led_mode;
+
+ if (vars->phy_flags & PHY_SFP_TX_FAULT_FLAG) {
+ led_mode = MISC_REGISTERS_GPIO_HIGH;
+ vars->link_status |= LINK_STATUS_SFP_TX_FAULT;
+ } else {
+ led_mode = MISC_REGISTERS_GPIO_LOW;
+ vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT;
+ }
+
+ /* If module is unapproved, led should be on regardless */
+ if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) {
+ DP(NETIF_MSG_LINK, "Change TX_Fault LED: ->%x\n",
+ led_mode);
+ bnx2x_set_e3_module_fault_led(params, led_mode);
+ }
+ }
+}
void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
{
u16 phy_idx;
@@ -12763,7 +13130,26 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
struct bnx2x_phy *phy = &params->phy[INT_PHY];
bnx2x_set_aer_mmd(params, phy);
bnx2x_check_over_curr(params, vars);
- bnx2x_warpcore_config_runtime(phy, params, vars);
+ if (vars->rx_tx_asic_rst)
+ bnx2x_warpcore_config_runtime(phy, params, vars);
+
+ if ((REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg))
+ & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
+ PORT_HW_CFG_NET_SERDES_IF_SFI) {
+ if (bnx2x_is_sfp_module_plugged(phy, params)) {
+ bnx2x_sfp_tx_fault_detection(phy, params, vars);
+ } else if (vars->link_status &
+ LINK_STATUS_SFP_TX_FAULT) {
+ /* Clean trail, interrupt corrects the leds */
+ vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT;
+ vars->phy_flags &= ~PHY_SFP_TX_FAULT_FLAG;
+ /* Update link status in the shared memory */
+ bnx2x_update_mng(params, vars->link_status);
+ }
+ }
+
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index ea4371f4335f..51cac8130051 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -41,6 +41,7 @@
#define SPEED_AUTO_NEG 0
#define SPEED_20000 20000
+#define SFP_EEPROM_PAGE_SIZE 16
#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
#define SFP_EEPROM_VENDOR_NAME_SIZE 16
#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
@@ -125,6 +126,11 @@ typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
struct link_params *params, u8 mode);
typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy,
struct link_params *params, u32 action);
+struct bnx2x_reg_set {
+ u8 devad;
+ u16 reg;
+ u16 val;
+};
struct bnx2x_phy {
u32 type;
@@ -149,6 +155,7 @@ struct bnx2x_phy {
#define FLAGS_DUMMY_READ (1<<9)
#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
#define FLAGS_TX_ERROR_CHECK (1<<12)
+#define FLAGS_EEE_10GBT (1<<13)
/* preemphasis values for the rx side */
u16 rx_preemphasis[4];
@@ -162,14 +169,15 @@ struct bnx2x_phy {
u32 supported;
u32 media_type;
-#define ETH_PHY_UNSPECIFIED 0x0
-#define ETH_PHY_SFP_FIBER 0x1
-#define ETH_PHY_XFP_FIBER 0x2
-#define ETH_PHY_DA_TWINAX 0x3
-#define ETH_PHY_BASE_T 0x4
-#define ETH_PHY_KR 0xf0
-#define ETH_PHY_CX4 0xf1
-#define ETH_PHY_NOT_PRESENT 0xff
+#define ETH_PHY_UNSPECIFIED 0x0
+#define ETH_PHY_SFPP_10G_FIBER 0x1
+#define ETH_PHY_XFP_FIBER 0x2
+#define ETH_PHY_DA_TWINAX 0x3
+#define ETH_PHY_BASE_T 0x4
+#define ETH_PHY_SFP_1G_FIBER 0x5
+#define ETH_PHY_KR 0xf0
+#define ETH_PHY_CX4 0xf1
+#define ETH_PHY_NOT_PRESENT 0xff
/* The address in which version is located*/
u32 ver_addr;
@@ -265,6 +273,30 @@ struct link_params {
u8 num_phys;
u8 rsrv;
+
+ /* Used to configure the EEE Tx LPI timer, has several modes of
+ * operation, according to bits 29:28 -
+ * 2'b00: Timer will be configured by nvram, output will be the value
+ * from nvram.
+ * 2'b01: Timer will be configured by nvram, output will be in
+ * microseconds.
+ * 2'b10: bits 1:0 contain an nvram value which will be used instead
+ * of the one located in the nvram. Output will be that value.
+ * 2'b11: bits 19:0 contain the idle timer in microseconds; output
+ * will be in microseconds.
+ * Bits 31:30 should be 2'b11 in order for EEE to be enabled.
+ */
+ u32 eee_mode;
+#define EEE_MODE_NVRAM_BALANCED_TIME (0xa00)
+#define EEE_MODE_NVRAM_AGGRESSIVE_TIME (0x100)
+#define EEE_MODE_NVRAM_LATENCY_TIME (0x6000)
+#define EEE_MODE_NVRAM_MASK (0x3)
+#define EEE_MODE_TIMER_MASK (0xfffff)
+#define EEE_MODE_OUTPUT_TIME (1<<28)
+#define EEE_MODE_OVERRIDE_NVRAM (1<<29)
+#define EEE_MODE_ENABLE_LPI (1<<30)
+#define EEE_MODE_ADV_LPI (1<<31)
+
u16 hw_led_mode; /* part of the hw_config read from the shmem */
u32 multi_phy_config;
@@ -282,6 +314,7 @@ struct link_vars {
#define PHY_PHYSICAL_LINK_FLAG (1<<2)
#define PHY_HALF_OPEN_CONN_FLAG (1<<3)
#define PHY_OVER_CURRENT_FLAG (1<<4)
+#define PHY_SFP_TX_FAULT_FLAG (1<<5)
u8 mac_type;
#define MAC_TYPE_NONE 0
@@ -301,6 +334,7 @@ struct link_vars {
/* The same definitions as the shmem parameter */
u32 link_status;
+ u32 eee_status;
u8 fault_detected;
u8 rsrv1;
u16 periodic_flags;
@@ -459,8 +493,7 @@ struct bnx2x_ets_params {
struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS];
};
-/**
- * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
+/* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
* when link is already up
*/
int bnx2x_update_pfc(struct link_params *params,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index f755a665dab3..9aaf863b4237 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -74,6 +74,8 @@
#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
+#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
+
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
@@ -104,7 +106,7 @@ MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
#define INT_MODE_INTx 1
#define INT_MODE_MSI 2
-static int int_mode;
+int int_mode;
module_param(int_mode, int, 0);
MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
"(1 INT#x; 2 MSI)");
@@ -135,7 +137,10 @@ enum bnx2x_board_type {
BCM57800_MF,
BCM57810,
BCM57810_MF,
- BCM57840,
+ BCM57840_O,
+ BCM57840_4_10,
+ BCM57840_2_20,
+ BCM57840_MFO,
BCM57840_MF,
BCM57811,
BCM57811_MF
@@ -155,6 +160,9 @@ static struct {
{ "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
{ "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
{ "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
{ "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
{ "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"},
{ "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"},
@@ -187,8 +195,17 @@ static struct {
#ifndef PCI_DEVICE_ID_NX2_57810_MF
#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
#endif
-#ifndef PCI_DEVICE_ID_NX2_57840
-#define PCI_DEVICE_ID_NX2_57840 CHIP_NUM_57840
+#ifndef PCI_DEVICE_ID_NX2_57840_O
+#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_4_10
+#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_2_20
+#define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_MFO
+#define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
#endif
#ifndef PCI_DEVICE_ID_NX2_57840_MF
#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
@@ -209,7 +226,10 @@ static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
- { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
@@ -758,7 +778,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
/* Tx */
for_each_cos_in_tx_queue(fp, cos)
{
- txdata = fp->txdata[cos];
+ txdata = *fp->txdata_ptr[cos];
BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
i, txdata.tx_pkt_prod,
txdata.tx_pkt_cons, txdata.tx_bd_prod,
@@ -876,7 +896,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
for_each_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
@@ -1583,7 +1603,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
- struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj;
+ struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
DP(BNX2X_MSG_SP,
"fp %d cid %d got ramrod #%d state is %x type is %d\n",
@@ -1710,7 +1730,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
/* Handle Rx or Tx according to SB id */
prefetch(fp->rx_cons_sb);
for_each_cos_in_tx_queue(fp, cos)
- prefetch(fp->txdata[cos].tx_cons_sb);
+ prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
prefetch(&fp->sb_running_index[SM_RX_ID]);
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
status &= ~mask;
@@ -2124,6 +2144,11 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
}
}
+ if (load_mode == LOAD_LOOPBACK_EXT) {
+ struct link_params *lp = &bp->link_params;
+ lp->loopback_mode = LOOPBACK_EXT;
+ }
+
rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
bnx2x_release_phy_lock(bp);
@@ -2916,7 +2941,7 @@ static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
u8 cos)
{
- txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping;
+ txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
txq_init->fw_sb_id = fp->fw_sb_id;
@@ -3030,9 +3055,9 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
memcpy(ether_stat->version, DRV_MODULE_VERSION,
ETH_STAT_INFO_VERSION_LEN - 1);
- bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj,
- DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
- ether_stat->mac_local);
+ bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
+ DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+ ether_stat->mac_local);
ether_stat->mtu_size = bp->dev->mtu;
@@ -3055,7 +3080,8 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
struct fcoe_stats_info *fcoe_stat =
&bp->slowpath->drv_info_to_mcp.fcoe_stat;
- memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN);
+ memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
+ bp->fip_mac, ETH_ALEN);
fcoe_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
@@ -3063,11 +3089,11 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
/* insert FCoE stats from ramrod response */
if (!NO_FCOE(bp)) {
struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
- &bp->fw_stats_data->queue_stats[FCOE_IDX].
+ &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
tstorm_queue_statistics;
struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
- &bp->fw_stats_data->queue_stats[FCOE_IDX].
+ &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
xstorm_queue_statistics;
struct fcoe_statistics_params *fw_fcoe_stat =
@@ -3146,7 +3172,8 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
struct iscsi_stats_info *iscsi_stat =
&bp->slowpath->drv_info_to_mcp.iscsi_stat;
- memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+ memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
+ bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
iscsi_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
@@ -3176,6 +3203,12 @@ static void bnx2x_set_mf_bw(struct bnx2x *bp)
bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
}
+static void bnx2x_handle_eee_event(struct bnx2x *bp)
+{
+ DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
+}
+
static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
{
enum drv_info_opcode op_code;
@@ -3742,6 +3775,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
if (val & DRV_STATUS_AFEX_EVENT_MASK)
bnx2x_handle_afex_cmd(bp,
val & DRV_STATUS_AFEX_EVENT_MASK);
+ if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
+ bnx2x_handle_eee_event(bp);
if (bp->link_vars.periodic_flags &
PERIODIC_FLAGS_LINK_EVENT) {
/* sync with link */
@@ -4615,11 +4650,11 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
case BNX2X_FILTER_MAC_PENDING:
DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
#ifdef BCM_CNIC
- if (cid == BNX2X_ISCSI_ETH_CID)
+ if (cid == BNX2X_ISCSI_ETH_CID(bp))
vlan_mac_obj = &bp->iscsi_l2_mac_obj;
else
#endif
- vlan_mac_obj = &bp->fp[cid].mac_obj;
+ vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
break;
case BNX2X_FILTER_MCAST_PENDING:
@@ -4717,7 +4752,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
for_each_eth_queue(bp, q) {
/* Set the appropriate Queue object */
fp = &bp->fp[q];
- queue_params.q_obj = &fp->q_obj;
+ queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* send the ramrod */
rc = bnx2x_queue_state_change(bp, &queue_params);
@@ -4728,8 +4763,8 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
#ifdef BCM_CNIC
if (!NO_FCOE(bp)) {
- fp = &bp->fp[FCOE_IDX];
- queue_params.q_obj = &fp->q_obj;
+ fp = &bp->fp[FCOE_IDX(bp)];
+ queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* clear pending completion bit */
__clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
@@ -4761,11 +4796,11 @@ static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
{
DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
#ifdef BCM_CNIC
- if (cid == BNX2X_FCOE_ETH_CID)
- return &bnx2x_fcoe(bp, q_obj);
+ if (cid == BNX2X_FCOE_ETH_CID(bp))
+ return &bnx2x_fcoe_sp_obj(bp, q_obj);
else
#endif
- return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj);
+ return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
}
static void bnx2x_eq_int(struct bnx2x *bp)
@@ -5647,15 +5682,15 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
/* init tx data */
for_each_cos_in_tx_queue(fp, cos) {
- bnx2x_init_txdata(bp, &fp->txdata[cos],
- CID_COS_TO_TX_ONLY_CID(fp->cid, cos),
- FP_COS_TO_TXQ(fp, cos),
- BNX2X_TX_SB_INDEX_BASE + cos);
- cids[cos] = fp->txdata[cos].cid;
+ bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
+ CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
+ FP_COS_TO_TXQ(fp, cos, bp),
+ BNX2X_TX_SB_INDEX_BASE + cos, fp);
+ cids[cos] = fp->txdata_ptr[cos]->cid;
}
- bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos,
- BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
+ fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
bnx2x_sp_mapping(bp, q_rdata), q_type);
/**
@@ -5706,7 +5741,7 @@ static void bnx2x_init_tx_rings(struct bnx2x *bp)
for_each_tx_queue(bp, i)
for_each_cos_in_tx_queue(&bp->fp[i], cos)
- bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
+ bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
}
void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
@@ -7055,12 +7090,10 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
for (i = 0; i < L2_ILT_LINES(bp); i++) {
- ilt->lines[cdu_ilt_start + i].page =
- bp->context.vcxt + (ILT_PAGE_CIDS * i);
+ ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
ilt->lines[cdu_ilt_start + i].page_mapping =
- bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
- /* cdu ilt pages are allocated manually so there's no need to
- set the size */
+ bp->context[i].cxt_mapping;
+ ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
}
bnx2x_ilt_init_op(bp, INITOP_SET);
@@ -7327,6 +7360,8 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
void bnx2x_free_mem(struct bnx2x *bp)
{
+ int i;
+
/* fastpath */
bnx2x_free_fp_mem(bp);
/* end of fastpath */
@@ -7340,9 +7375,9 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath));
- BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
- bp->context.size);
-
+ for (i = 0; i < L2_ILT_LINES(bp); i++)
+ BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
+ bp->context[i].size);
bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
BNX2X_FREE(bp->ilt->lines);
@@ -7428,6 +7463,8 @@ alloc_mem_err:
int bnx2x_alloc_mem(struct bnx2x *bp)
{
+ int i, allocated, context_size;
+
#ifdef BCM_CNIC
if (!CHIP_IS_E1x(bp))
/* size = the status block + ramrod buffers */
@@ -7457,11 +7494,29 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
if (bnx2x_alloc_fw_stats_mem(bp))
goto alloc_mem_err;
- bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
-
- BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
- bp->context.size);
+ /* Allocate memory for CDU context:
+ * This memory is allocated separately and not in the generic ILT
+ * functions because CDU differs in few aspects:
+ * 1. There are multiple entities allocating memory for context -
+ * 'regular' driver, CNIC and SRIOV driver. Each separately controls
+ * its own ILT lines.
+ * 2. Since CDU page-size is not a single 4KB page (which is the case
+ * for the other ILT clients), to be efficient we want to support
+ * allocation of sub-page-size in the last entry.
+ * 3. Context pointers are used by the driver to pass to FW / update
+ * the context (for the other ILT clients the pointers are used just to
+ * free the memory during unload).
+ */
+ context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
+ for (i = 0, allocated = 0; allocated < context_size; i++) {
+ bp->context[i].size = min(CDU_ILT_PAGE_SZ,
+ (context_size - allocated));
+ BNX2X_PCI_ALLOC(bp->context[i].vcxt,
+ &bp->context[i].cxt_mapping,
+ bp->context[i].size);
+ allocated += bp->context[i].size;
+ }
BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
@@ -7563,8 +7618,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
/* Eth MAC is set on RSS leading client (fp[0]) */
- return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set,
- BNX2X_ETH_MAC, &ramrod_flags);
+ return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj,
+ set, BNX2X_ETH_MAC, &ramrod_flags);
}
int bnx2x_setup_leading(struct bnx2x *bp)
@@ -7579,7 +7634,7 @@ int bnx2x_setup_leading(struct bnx2x *bp)
*
* In case of MSI-X it will also try to enable MSI-X.
*/
-static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
+void bnx2x_set_int_mode(struct bnx2x *bp)
{
switch (int_mode) {
case INT_MODE_MSI:
@@ -7590,11 +7645,6 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
BNX2X_DEV_INFO("set number of queues to 1\n");
break;
default:
- /* Set number of queues for MSI-X mode */
- bnx2x_set_num_queues(bp);
-
- BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
-
/* if we can't use MSI-X we only need one fp,
* so try to enable MSI-X with the requested number of fp's
* and fallback to MSI or legacy INTx with one fp
@@ -7735,6 +7785,8 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
{
u8 cos;
+ int cxt_index, cxt_offset;
+
/* FCoE Queue uses Default SB, thus has no HC capabilities */
if (!IS_FCOE_FP(fp)) {
__set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
@@ -7771,9 +7823,13 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
fp->index, init_params->max_cos);
/* set the context pointers queue object */
- for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++)
+ for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
+ cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
+ cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
+ ILT_PAGE_CIDS);
init_params->cxts[cos] =
- &bp->context.vcxt[fp->txdata[cos].cid].eth;
+ &bp->context[cxt_index].vcxt[cxt_offset].eth;
+ }
}
int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
@@ -7838,7 +7894,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
IGU_INT_ENABLE, 0);
- q_params.q_obj = &fp->q_obj;
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* We want to wait for completion in this context */
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
@@ -7911,7 +7967,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
- q_params.q_obj = &fp->q_obj;
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* We want to wait for completion in this context */
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
@@ -7922,7 +7978,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
tx_index++){
/* ascertain this is a normal queue*/
- txdata = &fp->txdata[tx_index];
+ txdata = fp->txdata_ptr[tx_index];
DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
txdata->txq_index);
@@ -8289,7 +8345,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos)
- rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]);
+ rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
#ifdef BNX2X_STOP_ON_ERROR
if (rc)
return;
@@ -8300,12 +8356,13 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
usleep_range(1000, 1000);
/* Clean all ETH MACs */
- rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false);
+ rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
+ false);
if (rc < 0)
BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
/* Clean up UC list */
- rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC,
+ rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
true);
if (rc < 0)
BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
@@ -9697,6 +9754,11 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
BC_SUPPORTS_PFC_STATS : 0;
+ bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
+ BC_SUPPORTS_FCOE_FEATURES : 0;
+
+ bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
+ BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
boot_mode = SHMEM_RD(bp,
dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
@@ -10082,7 +10144,7 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
{
int port = BP_PORT(bp);
u32 config;
- u32 ext_phy_type, ext_phy_config;
+ u32 ext_phy_type, ext_phy_config, eee_mode;
bp->link_params.bp = bp;
bp->link_params.port = port;
@@ -10149,6 +10211,19 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
bp->common.shmem_base,
bp->common.shmem2_base);
+
+ /* Configure link feature according to nvram value */
+ eee_mode = (((SHMEM_RD(bp, dev_info.
+ port_feature_config[port].eee_power_mode)) &
+ PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
+ PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
+ if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
+ bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
+ EEE_MODE_ENABLE_LPI |
+ EEE_MODE_OUTPUT_TIME;
+ } else {
+ bp->link_params.eee_mode = 0;
+ }
}
void bnx2x_get_iscsi_info(struct bnx2x *bp)
@@ -10997,7 +11072,7 @@ static int bnx2x_set_uc_list(struct bnx2x *bp)
int rc;
struct net_device *dev = bp->dev;
struct netdev_hw_addr *ha;
- struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+ struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
unsigned long ramrod_flags = 0;
/* First schedule a cleanup up of old configuration */
@@ -11503,8 +11578,7 @@ static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
}
}
-/**
- * IRO array is stored in the following format:
+/* IRO array is stored in the following format:
* {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
*/
static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
@@ -11672,7 +11746,7 @@ void bnx2x__init_func_obj(struct bnx2x *bp)
/* must be called after sriov-enable */
static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
{
- int cid_count = BNX2X_L2_CID_COUNT(bp);
+ int cid_count = BNX2X_L2_MAX_CID(bp);
#ifdef BCM_CNIC
cid_count += CNIC_CID_MAX;
@@ -11717,7 +11791,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
struct bnx2x *bp;
int pcie_width, pcie_speed;
int rc, max_non_def_sbs;
- int rx_count, tx_count, rss_count;
+ int rx_count, tx_count, rss_count, doorbell_size;
/*
* An estimated maximum supported CoS number according to the chip
* version.
@@ -11745,7 +11819,10 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
case BCM57800_MF:
case BCM57810:
case BCM57810_MF:
- case BCM57840:
+ case BCM57840_O:
+ case BCM57840_4_10:
+ case BCM57840_2_20:
+ case BCM57840_MFO:
case BCM57840_MF:
case BCM57811:
case BCM57811_MF:
@@ -11760,13 +11837,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
- /* !!! FIXME !!!
- * Do not allow the maximum SB count to grow above 16
- * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48.
- * We will use the FP_SB_MAX_E1x macro for this matter.
- */
- max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs);
-
WARN_ON(!max_non_def_sbs);
/* Maximum number of RSS queues: one IGU SB goes to CNIC */
@@ -11777,9 +11847,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/*
* Maximum number of netdev Tx queues:
- * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
+ * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
*/
- tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT;
+ tx_count = rss_count * max_cos_est + FCOE_PRESENT;
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
@@ -11788,9 +11858,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
bp = netdev_priv(dev);
- BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
- tx_count, rx_count);
-
bp->igu_sb_cnt = max_non_def_sbs;
bp->msg_enable = debug;
pci_set_drvdata(pdev, dev);
@@ -11803,6 +11870,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
+ BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
+ tx_count, rx_count);
+
rc = bnx2x_init_bp(bp);
if (rc)
goto init_one_exit;
@@ -11811,9 +11881,15 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
* Map doorbels here as we need the real value of bp->max_cos which
* is initialized in bnx2x_init_bp().
*/
+ doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
+ if (doorbell_size > pci_resource_len(pdev, 2)) {
+ dev_err(&bp->pdev->dev,
+ "Cannot map doorbells, bar size too small, aborting\n");
+ rc = -ENOMEM;
+ goto init_one_exit;
+ }
bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
- min_t(u64, BNX2X_DB_SIZE(bp),
- pci_resource_len(pdev, 2)));
+ doorbell_size);
if (!bp->doorbells) {
dev_err(&bp->pdev->dev,
"Cannot map doorbell space, aborting\n");
@@ -11831,8 +11907,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
#endif
+
+ /* Set bp->num_queues for MSI-X mode*/
+ bnx2x_set_num_queues(bp);
+
/* Configure interrupt mode: try to enable MSI-X/MSI if
- * needed, set bp->num_queues appropriately.
+ * needed.
*/
bnx2x_set_int_mode(bp);
@@ -12176,6 +12256,7 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
{
struct eth_spe *spe;
+ int cxt_index, cxt_offset;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -12198,10 +12279,16 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
* ramrod
*/
if (type == ETH_CONNECTION_TYPE) {
- if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
- bnx2x_set_ctx_validation(bp, &bp->context.
- vcxt[BNX2X_ISCSI_ETH_CID].eth,
- BNX2X_ISCSI_ETH_CID);
+ if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
+ cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
+ ILT_PAGE_CIDS;
+ cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
+ (cxt_index * ILT_PAGE_CIDS);
+ bnx2x_set_ctx_validation(bp,
+ &bp->context[cxt_index].
+ vcxt[cxt_offset].eth,
+ BNX2X_ISCSI_ETH_CID(bp));
+ }
}
/*
@@ -12488,21 +12575,45 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
break;
}
case DRV_CTL_ULP_REGISTER_CMD: {
- int ulp_type = ctl->data.ulp_type;
+ int ulp_type = ctl->data.register_data.ulp_type;
if (CHIP_IS_E3(bp)) {
int idx = BP_FW_MB_IDX(bp);
- u32 cap;
+ u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+ int path = BP_PATH(bp);
+ int port = BP_PORT(bp);
+ int i;
+ u32 scratch_offset;
+ u32 *host_addr;
- cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+ /* first write capability to shmem2 */
if (ulp_type == CNIC_ULP_ISCSI)
cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
else if (ulp_type == CNIC_ULP_FCOE)
cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+
+ if ((ulp_type != CNIC_ULP_FCOE) ||
+ (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
+ (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
+ break;
+
+ /* if reached here - should write fcoe capabilities */
+ scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
+ if (!scratch_offset)
+ break;
+ scratch_offset += offsetof(struct glob_ncsi_oem_data,
+ fcoe_features[path][port]);
+ host_addr = (u32 *) &(ctl->data.register_data.
+ fcoe_features);
+ for (i = 0; i < sizeof(struct fcoe_capabilities);
+ i += 4)
+ REG_WR(bp, scratch_offset + i,
+ *(host_addr + i/4));
}
break;
}
+
case DRV_CTL_ULP_UNREGISTER_CMD: {
int ulp_type = ctl->data.ulp_type;
@@ -12554,6 +12665,21 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
cp->num_irq = 2;
}
+void bnx2x_setup_cnic_info(struct bnx2x *bp)
+{
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+
+ cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
+ bnx2x_cid_ilt_lines(bp);
+ cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
+ cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
+ cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
+
+ if (NO_ISCSI_OOO(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+}
+
static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
void *data)
{
@@ -12632,10 +12758,10 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
cp->drv_ctl = bnx2x_drv_ctl;
cp->drv_register_cnic = bnx2x_register_cnic;
cp->drv_unregister_cnic = bnx2x_unregister_cnic;
- cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
+ cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
cp->iscsi_l2_client_id =
bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
- cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
+ cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
if (NO_ISCSI_OOO(bp))
cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
new file mode 100644
index 000000000000..ddd5106ad2f9
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
@@ -0,0 +1,168 @@
+/* bnx2x_mfw_req.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNX2X_MFW_REQ_H
+#define BNX2X_MFW_REQ_H
+
+#define PORT_0 0
+#define PORT_1 1
+#define PORT_MAX 2
+#define NVM_PATH_MAX 2
+
+/* FCoE capabilities required from the driver */
+struct fcoe_capabilities {
+ u32 capability1;
+ /* Maximum number of I/Os per connection */
+ #define FCOE_IOS_PER_CONNECTION_MASK 0x0000ffff
+ #define FCOE_IOS_PER_CONNECTION_SHIFT 0
+ /* Maximum number of Logins per port */
+ #define FCOE_LOGINS_PER_PORT_MASK 0xffff0000
+ #define FCOE_LOGINS_PER_PORT_SHIFT 16
+
+ u32 capability2;
+ /* Maximum number of exchanges */
+ #define FCOE_NUMBER_OF_EXCHANGES_MASK 0x0000ffff
+ #define FCOE_NUMBER_OF_EXCHANGES_SHIFT 0
+ /* Maximum NPIV WWN per port */
+ #define FCOE_NPIV_WWN_PER_PORT_MASK 0xffff0000
+ #define FCOE_NPIV_WWN_PER_PORT_SHIFT 16
+
+ u32 capability3;
+ /* Maximum number of targets supported */
+ #define FCOE_TARGETS_SUPPORTED_MASK 0x0000ffff
+ #define FCOE_TARGETS_SUPPORTED_SHIFT 0
+ /* Maximum number of outstanding commands across all connections */
+ #define FCOE_OUTSTANDING_COMMANDS_MASK 0xffff0000
+ #define FCOE_OUTSTANDING_COMMANDS_SHIFT 16
+
+ u32 capability4;
+ #define FCOE_CAPABILITY4_STATEFUL 0x00000001
+ #define FCOE_CAPABILITY4_STATELESS 0x00000002
+ #define FCOE_CAPABILITY4_CAPABILITIES_REPORTED_VALID 0x00000004
+};
+
+struct glob_ncsi_oem_data {
+ u32 driver_version;
+ u32 unused[3];
+ struct fcoe_capabilities fcoe_features[NVM_PATH_MAX][PORT_MAX];
+};
+
+/* current drv_info version */
+#define DRV_INFO_CUR_VER 2
+
+/* drv_info op codes supported */
+enum drv_info_opcode {
+ ETH_STATS_OPCODE,
+ FCOE_STATS_OPCODE,
+ ISCSI_STATS_OPCODE
+};
+
+#define ETH_STAT_INFO_VERSION_LEN 12
+/* Per PCI Function Ethernet Statistics required from the driver */
+struct eth_stats_info {
+ /* Function's Driver Version. padded to 12 */
+ u8 version[ETH_STAT_INFO_VERSION_LEN];
+ /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
+ u8 mac_local[8];
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ u32 mtu_size; /* MTU Size. Note : Negotiated MTU */
+ u32 feature_flags; /* Feature_Flags. */
+#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
+#define FEATURE_ETH_LSO_MASK 0x02
+#define FEATURE_ETH_BOOTMODE_MASK 0x1C
+#define FEATURE_ETH_BOOTMODE_SHIFT 2
+#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
+#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
+#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
+#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
+#define FEATURE_ETH_TOE_MASK 0x20
+ u32 lso_max_size; /* LSO MaxOffloadSize. */
+ u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */
+ /* Num Offloaded Connections TCP_IPv4. */
+ u32 ipv4_ofld_cnt;
+ /* Num Offloaded Connections TCP_IPv6. */
+ u32 ipv6_ofld_cnt;
+ u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */
+ u32 txq_size; /* TX Descriptors Queue Size */
+ u32 rxq_size; /* RX Descriptors Queue Size */
+ /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
+ u32 txq_avg_depth;
+ /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
+ u32 rxq_avg_depth;
+ /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
+ u32 iov_offload;
+ /* Number of NetQueue/VMQ Config'd. */
+ u32 netq_cnt;
+ u32 vf_cnt; /* Num VF assigned to this PF. */
+};
+
+/* Per PCI Function FCOE Statistics required from the driver */
+struct fcoe_stats_info {
+ u8 version[12]; /* Function's Driver Version. */
+ u8 mac_local[8]; /* Locally Admin Addr. */
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ u32 qos_priority;
+ u32 txq_size; /* FCoE TX Descriptors Queue Size. */
+ u32 rxq_size; /* FCoE RX Descriptors Queue Size. */
+ /* FCoE TX Descriptor Queue Avg Depth. */
+ u32 txq_avg_depth;
+ /* FCoE RX Descriptors Queue Avg Depth. */
+ u32 rxq_avg_depth;
+ u32 rx_frames_lo; /* FCoE RX Frames received. */
+ u32 rx_frames_hi; /* FCoE RX Frames received. */
+ u32 rx_bytes_lo; /* FCoE RX Bytes received. */
+ u32 rx_bytes_hi; /* FCoE RX Bytes received. */
+ u32 tx_frames_lo; /* FCoE TX Frames sent. */
+ u32 tx_frames_hi; /* FCoE TX Frames sent. */
+ u32 tx_bytes_lo; /* FCoE TX Bytes sent. */
+ u32 tx_bytes_hi; /* FCoE TX Bytes sent. */
+};
+
+/* Per PCI Function iSCSI Statistics required from the driver*/
+struct iscsi_stats_info {
+ u8 version[12]; /* Function's Driver Version. */
+ u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ u32 qos_priority;
+ u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */
+ u8 ww_port_name[64]; /* iSCSI World wide port name */
+ u8 boot_target_name[64];/* iSCSI Boot Target Name. */
+ u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */
+ u32 boot_target_portal; /* iSCSI Boot Target Portal. */
+ u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
+ u32 max_frame_size; /* Max Frame Size. bytes */
+ u32 txq_size; /* PDU TX Descriptors Queue Size. */
+ u32 rxq_size; /* PDU RX Descriptors Queue Size. */
+ u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */
+ u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */
+ u32 rx_pdus_lo; /* iSCSI PDUs received. */
+ u32 rx_pdus_hi; /* iSCSI PDUs received. */
+ u32 rx_bytes_lo; /* iSCSI RX Bytes received. */
+ u32 rx_bytes_hi; /* iSCSI RX Bytes received. */
+ u32 tx_pdus_lo; /* iSCSI PDUs sent. */
+ u32 tx_pdus_hi; /* iSCSI PDUs sent. */
+ u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
+ u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
+ u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable.
+ * 9 nibbles, the position of each nibble
+ * represents the C-PCP value, the value
+ * of the nibble = S-PCP value.
+ */
+};
+
+union drv_info_to_mcp {
+ struct eth_stats_info ether_stat;
+ struct fcoe_stats_info fcoe_stat;
+ struct iscsi_stats_info iscsi_stat;
+};
+#endif /* BNX2X_MFW_REQ_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index bbd387492a80..ec62a5c8bd37 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1488,6 +1488,121 @@
* 2:1 - otp_misc_do[51:50]; 0 - otp_misc_do[1]. */
#define MISC_REG_CHIP_TYPE 0xac60
#define MISC_REG_CHIP_TYPE_57811_MASK (1<<1)
+#define MISC_REG_CPMU_LP_DR_ENABLE 0xa858
+/* [RW 1] FW EEE LPI Enable. When 1 indicates that EEE LPI mode is enabled
+ * by FW. When 0 indicates that the EEE LPI mode is disabled by FW. Clk
+ * 25MHz. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_FW_ENABLE_P0 0xa84c
+/* [RW 32] EEE LPI Idle Threshold. The threshold value for the idle EEE LPI
+ * counter. Timer tick is 1 us. Clock 25MHz. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_IDLE_THR_P0 0xa8a0
+/* [RW 18] LPI entry events mask. [0] - Vmain SM Mask. When 1 indicates that
+ * the Vmain SM end state is disabled. When 0 indicates that the Vmain SM
+ * end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates that
+ * the FW command that all Queues are empty is disabled. When 0 indicates
+ * that the FW command that all Queues are empty is enabled. [2] - FW Early
+ * Exit Mask / Reserved (Entry mask). When 1 indicates that the FW Early
+ * Exit command is disabled. When 0 indicates that the FW Early Exit command
+ * is enabled. This bit applicable only in the EXIT Events Mask registers.
+ * [3] - PBF Request Mask. When 1 indicates that the PBF Request indication
+ * is disabled. When 0 indicates that the PBF Request indication is enabled.
+ * [4] - Tx Request Mask. When =1 indicates that the Tx other Than PBF
+ * Request indication is disabled. When 0 indicates that the Tx Other Than
+ * PBF Request indication is enabled. [5] - Rx EEE LPI Status Mask. When 1
+ * indicates that the RX EEE LPI Status indication is disabled. When 0
+ * indicates that the RX EEE LPI Status indication is enabled. In the EXIT
+ * Events Masks registers; this bit masks the falling edge detect of the LPI
+ * Status (Rx LPI is on - off). [6] - Tx Pause Mask. When 1 indicates that
+ * the Tx Pause indication is disabled. When 0 indicates that the Tx Pause
+ * indication is enabled. [7] - BRB1 Empty Mask. When 1 indicates that the
+ * BRB1 EMPTY indication is disabled. When 0 indicates that the BRB1 EMPTY
+ * indication is enabled. [8] - QM Idle Mask. When 1 indicates that the QM
+ * IDLE indication is disabled. When 0 indicates that the QM IDLE indication
+ * is enabled. (One bit for both VOQ0 and VOQ1). [9] - QM LB Idle Mask. When
+ * 1 indicates that the QM IDLE indication for LOOPBACK is disabled. When 0
+ * indicates that the QM IDLE indication for LOOPBACK is enabled. [10] - L1
+ * Status Mask. When 1 indicates that the L1 Status indication from the PCIE
+ * CORE is disabled. When 0 indicates that the RX EEE LPI Status indication
+ * from the PCIE CORE is enabled. In the EXIT Events Masks registers; this
+ * bit masks the falling edge detect of the L1 status (L1 is on - off). [11]
+ * - P0 E0 EEE EEE LPI REQ Mask. When =1 indicates that the P0 E0 EEE EEE
+ * LPI REQ indication is disabled. When =0 indicates that the P0 E0 EEE LPI
+ * REQ indication is enabled. [12] - P1 E0 EEE LPI REQ Mask. When =1
+ * indicates that the P0 EEE LPI REQ indication is disabled. When =0
+ * indicates that the P0 EEE LPI REQ indication is enabled. [13] - P0 E1 EEE
+ * LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication is
+ * disabled. When =0 indicates that the P0 EEE LPI REQ indication is
+ * enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE
+ * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ
+ * indication is enabled. [15] - L1 REQ Mask. When =1 indicates that the L1
+ * REQ indication is disabled. When =0 indicates that the L1 indication is
+ * enabled. [16] - Rx EEE LPI Status Edge Detect Mask. When =1 indicates
+ * that the RX EEE LPI Status Falling Edge Detect indication is disabled (Rx
+ * EEE LPI is on - off). When =0 indicates that the RX EEE LPI Status
+ * Falling Edge Detec indication is enabled (Rx EEE LPI is on - off). This
+ * bit is applicable only in the EXIT Events Masks registers. [17] - L1
+ * Status Edge Detect Mask. When =1 indicates that the L1 Status Falling
+ * Edge Detect indication from the PCIE CORE is disabled (L1 is on - off).
+ * When =0 indicates that the L1 Status Falling Edge Detect indication from
+ * the PCIE CORE is enabled (L1 is on - off). This bit is applicable only in
+ * the EXIT Events Masks registers. Clock 25MHz. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_MASK_ENT_P0 0xa880
+/* [RW 18] EEE LPI exit events mask. [0] - Vmain SM Mask. When 1 indicates
+ * that the Vmain SM end state is disabled. When 0 indicates that the Vmain
+ * SM end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates
+ * that the FW command that all Queues are empty is disabled. When 0
+ * indicates that the FW command that all Queues are empty is enabled. [2] -
+ * FW Early Exit Mask / Reserved (Entry mask). When 1 indicates that the FW
+ * Early Exit command is disabled. When 0 indicates that the FW Early Exit
+ * command is enabled. This bit applicable only in the EXIT Events Mask
+ * registers. [3] - PBF Request Mask. When 1 indicates that the PBF Request
+ * indication is disabled. When 0 indicates that the PBF Request indication
+ * is enabled. [4] - Tx Request Mask. When =1 indicates that the Tx other
+ * Than PBF Request indication is disabled. When 0 indicates that the Tx
+ * Other Than PBF Request indication is enabled. [5] - Rx EEE LPI Status
+ * Mask. When 1 indicates that the RX EEE LPI Status indication is disabled.
+ * When 0 indicates that the RX LPI Status indication is enabled. In the
+ * EXIT Events Masks registers; this bit masks the falling edge detect of
+ * the EEE LPI Status (Rx EEE LPI is on - off). [6] - Tx Pause Mask. When 1
+ * indicates that the Tx Pause indication is disabled. When 0 indicates that
+ * the Tx Pause indication is enabled. [7] - BRB1 Empty Mask. When 1
+ * indicates that the BRB1 EMPTY indication is disabled. When 0 indicates
+ * that the BRB1 EMPTY indication is enabled. [8] - QM Idle Mask. When 1
+ * indicates that the QM IDLE indication is disabled. When 0 indicates that
+ * the QM IDLE indication is enabled. (One bit for both VOQ0 and VOQ1). [9]
+ * - QM LB Idle Mask. When 1 indicates that the QM IDLE indication for
+ * LOOPBACK is disabled. When 0 indicates that the QM IDLE indication for
+ * LOOPBACK is enabled. [10] - L1 Status Mask. When 1 indicates that the L1
+ * Status indication from the PCIE CORE is disabled. When 0 indicates that
+ * the RX EEE LPI Status indication from the PCIE CORE is enabled. In the
+ * EXIT Events Masks registers; this bit masks the falling edge detect of
+ * the L1 status (L1 is on - off). [11] - P0 E0 EEE EEE LPI REQ Mask. When
+ * =1 indicates that the P0 E0 EEE EEE LPI REQ indication is disabled. When
+ * =0 indicates that the P0 E0 EEE LPI REQ indication is enabled. [12] - P1
+ * E0 EEE LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication
+ * is disabled. When =0 indicates that the P0 EEE LPI REQ indication is
+ * enabled. [13] - P0 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE
+ * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ
+ * indication is enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates
+ * that the P0 EEE LPI REQ indication is disabled. When =0 indicates that
+ * the P0 EEE LPI REQ indication is enabled. [15] - L1 REQ Mask. When =1
+ * indicates that the L1 REQ indication is disabled. When =0 indicates that
+ * the L1 indication is enabled. [16] - Rx EEE LPI Status Edge Detect Mask.
+ * When =1 indicates that the RX EEE LPI Status Falling Edge Detect
+ * indication is disabled (Rx EEE LPI is on - off). When =0 indicates that
+ * the RX EEE LPI Status Falling Edge Detec indication is enabled (Rx EEE
+ * LPI is on - off). This bit is applicable only in the EXIT Events Masks
+ * registers. [17] - L1 Status Edge Detect Mask. When =1 indicates that the
+ * L1 Status Falling Edge Detect indication from the PCIE CORE is disabled
+ * (L1 is on - off). When =0 indicates that the L1 Status Falling Edge
+ * Detect indication from the PCIE CORE is enabled (L1 is on - off). This
+ * bit is applicable only in the EXIT Events Masks registers.Clock 25MHz.
+ * Reset on hard reset. */
+#define MISC_REG_CPMU_LP_MASK_EXT_P0 0xa888
+/* [RW 16] EEE LPI Entry Events Counter. A statistic counter with the number
+ * of counts that the SM entered the EEE LPI state. Clock 25MHz. Read only
+ * register. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_SM_ENT_CNT_P0 0xa8b8
/* [RW 32] The following driver registers(1...16) represent 16 drivers and
32 clients. Each client can be controlled by one driver only. One in each
bit represent that this driver control the appropriate client (Ex: bit 5
@@ -5372,6 +5487,8 @@
/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
* packets transmitted by the MAC */
#define XMAC_REG_CTRL_SA_LO 0x28
+#define XMAC_REG_EEE_CTRL 0xd8
+#define XMAC_REG_EEE_TIMERS_HI 0xe4
#define XMAC_REG_PAUSE_CTRL 0x68
#define XMAC_REG_PFC_CTRL 0x70
#define XMAC_REG_PFC_CTRL_HI 0x74
@@ -5796,6 +5913,7 @@
#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
#define MISC_REGISTERS_SPIO_SET_POS 8
#define HW_LOCK_MAX_RESOURCE_VALUE 31
+#define HW_LOCK_RESOURCE_DCBX_ADMIN_MIB 13
#define HW_LOCK_RESOURCE_DRV_FLAGS 10
#define HW_LOCK_RESOURCE_GPIO 1
#define HW_LOCK_RESOURCE_MDIO 0
@@ -6813,6 +6931,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
#define MDIO_AN_REG_LP_AUTO_NEG2 0x0014
#define MDIO_AN_REG_MASTER_STATUS 0x0021
+#define MDIO_AN_REG_EEE_ADV 0x003c
+#define MDIO_AN_REG_LP_EEE_ADV 0x003d
/*bcm*/
#define MDIO_AN_REG_LINK_STATUS 0x8304
#define MDIO_AN_REG_CL37_CL73 0x8370
@@ -6866,6 +6986,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
/* BCM84833 only */
+#define MDIO_84833_TOP_CFG_FW_REV 0x400f
+#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1
#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
#define MDIO_84833_SUPER_ISOLATE 0x8000
/* These are mailbox register set used by 84833. */
@@ -6993,11 +7115,13 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_DIGITAL3_UP1 0x8329
#define MDIO_WC_REG_DIGITAL3_LP_UP1 0x832c
#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c
+#define MDIO_WC_REG_DIGITAL4_MISC5 0x833e
#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345
#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349
#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e
#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350
#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368
+#define MDIO_WC_REG_EEE_COMBO_CONTROL0 0x8390
#define MDIO_WC_REG_TX66_CONTROL 0x83b0
#define MDIO_WC_REG_RX66_CONTROL 0x83c0
#define MDIO_WC_REG_RX66_SCW0 0x83c2
@@ -7036,6 +7160,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_REG_GPHY_EEE_1G (0x1 << 2)
#define MDIO_REG_GPHY_EEE_100 (0x1 << 1)
#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
+#define MDIO_REG_GPHY_AUX_STATUS 0x19
#define MDIO_REG_INTR_STATUS 0x1a
#define MDIO_REG_INTR_MASK 0x1b
#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
@@ -7150,8 +7275,7 @@ Theotherbitsarereservedandshouldbezero*/
#define CDU_REGION_NUMBER_UCM_AG 4
-/**
- * String-to-compress [31:8] = CID (all 24 bits)
+/* String-to-compress [31:8] = CID (all 24 bits)
* String-to-compress [7:4] = Region
* String-to-compress [3:0] = Type
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 6c14b4a4e82c..734fd87cd990 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4107,6 +4107,10 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
data->capabilities |=
ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
+ if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
+
if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
data->capabilities |=
ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
@@ -4115,6 +4119,10 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
data->capabilities |=
ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
+ if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
+
/* Hashing mask */
data->rss_result_mask = p->rss_result_mask;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index efd80bdd0dfe..f83e033da6da 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -167,9 +167,8 @@ typedef int (*exe_q_remove)(struct bnx2x *bp,
union bnx2x_qable_obj *o,
struct bnx2x_exeq_elem *elem);
-/**
- * @return positive is entry was optimized, 0 - if not, negative
- * in case of an error.
+/* Return positive if entry was optimized, 0 - if not, negative
+ * in case of an error.
*/
typedef int (*exe_q_optimize)(struct bnx2x *bp,
union bnx2x_qable_obj *o,
@@ -694,8 +693,10 @@ enum {
BNX2X_RSS_IPV4,
BNX2X_RSS_IPV4_TCP,
+ BNX2X_RSS_IPV4_UDP,
BNX2X_RSS_IPV6,
BNX2X_RSS_IPV6_TCP,
+ BNX2X_RSS_IPV6_UDP,
};
struct bnx2x_config_rss_params {
@@ -729,6 +730,10 @@ struct bnx2x_rss_config_obj {
/* Last configured indirection table */
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ /* flags for enabling 4-tupple hash on UDP */
+ u8 udp_rss_v4;
+ u8 udp_rss_v6;
+
int (*config_rss)(struct bnx2x *bp,
struct bnx2x_config_rss_params *p);
};
@@ -1280,12 +1285,11 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
struct bnx2x_rx_mode_obj *o);
/**
- * Send and RX_MODE ramrod according to the provided parameters.
+ * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
*
- * @param bp
- * @param p Command parameters
+ * @p: Command parameters
*
- * @return 0 - if operation was successfull and there is no pending completions,
+ * Return: 0 - if operation was successfull and there is no pending completions,
* positive number - if there are pending completions,
* negative - if there were errors
*/
@@ -1302,7 +1306,11 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
bnx2x_obj_type type);
/**
- * Configure multicast MACs list. May configure a new list
+ * bnx2x_config_mcast - Configure multicast MACs list.
+ *
+ * @cmd: command to execute: BNX2X_MCAST_CMD_X
+ *
+ * May configure a new list
* provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up
* (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current
* configuration, continue to execute the pending commands
@@ -1313,11 +1321,7 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
* the current command will be enqueued to the tail of the
* pending commands list.
*
- * @param bp
- * @param p
- * @param command to execute: BNX2X_MCAST_CMD_X
- *
- * @return 0 is operation was sucessfull and there are no pending completions,
+ * Return: 0 is operation was sucessfull and there are no pending completions,
* negative if there were errors, positive if there are pending
* completions.
*/
@@ -1342,21 +1346,17 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
bnx2x_obj_type type);
/**
- * Updates RSS configuration according to provided parameters.
- *
- * @param bp
- * @param p
+ * bnx2x_config_rss - Updates RSS configuration according to provided parameters
*
- * @return 0 in case of success
+ * Return: 0 in case of success
*/
int bnx2x_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *p);
/**
- * Return the current ind_table configuration.
+ * bnx2x_get_rss_ind_table - Return the current ind_table configuration.
*
- * @param bp
- * @param ind_table buffer to fill with the current indirection
+ * @ind_table: buffer to fill with the current indirection
* table content. Should be at least
* T_ETH_INDIRECTION_TABLE_SIZE bytes long.
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 1e2785cd11d0..667d89042d35 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -785,6 +785,10 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
pstats->host_port_stats_counter++;
+ if (CHIP_IS_E3(bp))
+ estats->eee_tx_lpi += REG_RD(bp,
+ MISC_REG_CPMU_LP_SM_ENT_CNT_P0);
+
if (!BP_NOMCP(bp)) {
u32 nig_timer_max =
SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
@@ -855,17 +859,22 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
struct tstorm_per_queue_stats *tclient =
&bp->fw_stats_data->queue_stats[i].
tstorm_queue_statistics;
- struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
+ struct tstorm_per_queue_stats *old_tclient =
+ &bnx2x_fp_stats(bp, fp)->old_tclient;
struct ustorm_per_queue_stats *uclient =
&bp->fw_stats_data->queue_stats[i].
ustorm_queue_statistics;
- struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
+ struct ustorm_per_queue_stats *old_uclient =
+ &bnx2x_fp_stats(bp, fp)->old_uclient;
struct xstorm_per_queue_stats *xclient =
&bp->fw_stats_data->queue_stats[i].
xstorm_queue_statistics;
- struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
- struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
- struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
+ struct xstorm_per_queue_stats *old_xclient =
+ &bnx2x_fp_stats(bp, fp)->old_xclient;
+ struct bnx2x_eth_q_stats *qstats =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
u32 diff;
@@ -1048,8 +1057,11 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
tmp = estats->mac_discard;
- for_each_rx_queue(bp, i)
- tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
+ for_each_rx_queue(bp, i) {
+ struct tstorm_per_queue_stats *old_tclient =
+ &bp->fp_stats[i].old_tclient;
+ tmp += le32_to_cpu(old_tclient->checksum_discard);
+ }
nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
nstats->tx_dropped = 0;
@@ -1099,9 +1111,9 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp)
int i;
for_each_queue(bp, i) {
- struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
+ struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
struct bnx2x_eth_q_stats_old *qstats_old =
- &bp->fp[i].eth_q_stats_old;
+ &bp->fp_stats[i].eth_q_stats_old;
UPDATE_ESTAT_QSTAT(driver_xoff);
UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
@@ -1309,12 +1321,9 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
bnx2x_stats_comp(bp);
}
-/**
- * This function will prepare the statistics ramrod data the way
+/* This function will prepare the statistics ramrod data the way
* we will only have to increment the statistics counter and
* send the ramrod each time we have to.
- *
- * @param bp
*/
static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
{
@@ -1428,7 +1437,7 @@ static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
query[first_queue_query_index + i];
cur_query_entry->kind = STATS_TYPE_QUEUE;
- cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]);
+ cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
cur_query_entry->address.hi =
cpu_to_le32(U64_HI(cur_data_offset));
@@ -1479,15 +1488,19 @@ void bnx2x_stats_init(struct bnx2x *bp)
/* function stats */
for_each_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
-
- memset(&fp->old_tclient, 0, sizeof(fp->old_tclient));
- memset(&fp->old_uclient, 0, sizeof(fp->old_uclient));
- memset(&fp->old_xclient, 0, sizeof(fp->old_xclient));
+ struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
+
+ memset(&fp_stats->old_tclient, 0,
+ sizeof(fp_stats->old_tclient));
+ memset(&fp_stats->old_uclient, 0,
+ sizeof(fp_stats->old_uclient));
+ memset(&fp_stats->old_xclient, 0,
+ sizeof(fp_stats->old_xclient));
if (bp->stats_init) {
- memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats));
- memset(&fp->eth_q_stats_old, 0,
- sizeof(fp->eth_q_stats_old));
+ memset(&fp_stats->eth_q_stats, 0,
+ sizeof(fp_stats->eth_q_stats));
+ memset(&fp_stats->eth_q_stats_old, 0,
+ sizeof(fp_stats->eth_q_stats_old));
}
}
@@ -1529,8 +1542,10 @@ void bnx2x_save_statistics(struct bnx2x *bp)
/* save queue statistics */
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
- struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
+ struct bnx2x_eth_q_stats *qstats =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
@@ -1569,7 +1584,7 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
struct bnx2x_eth_stats *estats = &bp->eth_stats;
struct per_queue_stats *fcoe_q_stats =
- &bp->fw_stats_data->queue_stats[FCOE_IDX];
+ &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)];
struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
&fcoe_q_stats->tstorm_queue_statistics;
@@ -1586,8 +1601,7 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
memset(afex_stats, 0, sizeof(struct afex_stats));
for_each_eth_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
- struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+ struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
ADD_64(afex_stats->rx_unicast_bytes_hi,
qstats->total_unicast_bytes_received_hi,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 93e689fdfeda..24b8e505b60c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -203,6 +203,8 @@ struct bnx2x_eth_stats {
/* Recovery */
u32 recoverable_error;
u32 unrecoverable_error;
+ /* src: Clear-on-Read register; Will not survive PMF Migration */
+ u32 eee_tx_lpi;
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index c95e7b5e2b85..3b4fc61f24cf 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -256,11 +256,16 @@ static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
+ struct fcoe_capabilities *fcoe_cap =
+ &info.data.register_data.fcoe_features;
- if (reg)
+ if (reg) {
info.cmd = DRV_CTL_ULP_REGISTER_CMD;
- else
+ if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
+ memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
+ } else {
info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
+ }
info.data.ulp_type = ulp_type;
ethdev->drv_ctl(dev->netdev, &info);
@@ -286,6 +291,9 @@ static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
{
u32 i;
+ if (!cp->ctx_tbl)
+ return -EINVAL;
+
for (i = 0; i < cp->max_cid_space; i++) {
if (cp->ctx_tbl[i].cid == cid) {
*l5_cid = i;
@@ -534,7 +542,8 @@ int cnic_unregister_driver(int ulp_type)
}
if (atomic_read(&ulp_ops->ref_count) != 0)
- netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
+ pr_warn("%s: Failed waiting for ref count to go to zero\n",
+ __func__);
return 0;
out_unlock:
@@ -611,6 +620,8 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
if (ulp_type == CNIC_ULP_ISCSI)
cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+ else if (ulp_type == CNIC_ULP_FCOE)
+ dev->fcoe_cap = NULL;
synchronize_rcu();
@@ -1053,12 +1064,13 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo = &udev->cnic_uinfo;
- uinfo->mem[0].addr = dev->netdev->base_addr;
+ uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
uinfo->mem[0].internal_addr = dev->regview;
- uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
uinfo->mem[0].memtype = UIO_MEM_PHYS;
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+ uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
+ TX_MAX_TSS_RINGS + 1);
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
PAGE_MASK;
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
@@ -1068,6 +1080,8 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo->name = "bnx2_cnic";
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+ uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
+
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
PAGE_MASK;
uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
@@ -2585,7 +2599,7 @@ static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
return;
}
- cqes[0] = (struct kcqe *) &kcqe;
+ cqes[0] = &kcqe;
cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
}
@@ -3213,6 +3227,9 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
u32 l5_cid;
struct cnic_local *cp = dev->cnic_priv;
+ if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+ break;
+
if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
@@ -3943,6 +3960,15 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
cnic_cm_upcall(cp, csk, opcode);
break;
+ case L5CM_RAMROD_CMD_ID_CLOSE:
+ if (l4kcqe->status != 0) {
+ netdev_warn(dev->netdev, "RAMROD CLOSE compl with "
+ "status 0x%x\n", l4kcqe->status);
+ opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
+ /* Fall through */
+ } else {
+ break;
+ }
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
@@ -4246,8 +4272,6 @@ static int cnic_cm_shutdown(struct cnic_dev *dev)
struct cnic_local *cp = dev->cnic_priv;
int i;
- cp->stop_cm(dev);
-
if (!cp->csk_tbl)
return 0;
@@ -4665,9 +4689,9 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
cp->kcq1.sw_prod_idx = 0;
cp->kcq1.hw_prod_idx_ptr =
- (u16 *) &sblk->status_completion_producer_index;
+ &sblk->status_completion_producer_index;
- cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
+ cp->kcq1.status_idx_ptr = &sblk->status_idx;
/* Initialize the kernel complete queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
@@ -4693,9 +4717,9 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
cp->kcq1.hw_prod_idx_ptr =
- (u16 *) &msblk->status_completion_producer_index;
- cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
- cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
+ &msblk->status_completion_producer_index;
+ cp->kcq1.status_idx_ptr = &msblk->status_idx;
+ cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
@@ -4977,8 +5001,14 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
cp->port_mode = CHIP_PORT_MODE_NONE;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
- u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
+ u32 val;
+ pci_read_config_dword(dev->pcidev, PCICFG_ME_REGISTER, &val);
+ cp->func = (u8) ((val & ME_REG_ABS_PF_NUM) >>
+ ME_REG_ABS_PF_NUM_SHIFT);
+ func = CNIC_FUNC(cp);
+
+ val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
if (!(val & 1))
val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
else
@@ -5283,6 +5313,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
i++;
}
cnic_shutdown_rings(dev);
+ cp->stop_cm(dev);
clear_bit(CNIC_F_CNIC_UP, &dev->flags);
RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
synchronize_rcu();
@@ -5512,9 +5543,7 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
rcu_read_unlock();
}
-/**
- * netdev event handler
- */
+/* netdev event handler */
static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 289274e546be..5cb88881bba1 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -12,8 +12,10 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.5.10"
-#define CNIC_MODULE_RELDATE "March 21, 2012"
+#include "bnx2x/bnx2x_mfw_req.h"
+
+#define CNIC_MODULE_VERSION "2.5.12"
+#define CNIC_MODULE_RELDATE "June 29, 2012"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -131,6 +133,11 @@ struct drv_ctl_l2_ring {
u32 cid;
};
+struct drv_ctl_register_data {
+ int ulp_type;
+ struct fcoe_capabilities fcoe_features;
+};
+
struct drv_ctl_info {
int cmd;
union {
@@ -138,6 +145,7 @@ struct drv_ctl_info {
struct drv_ctl_io io;
struct drv_ctl_l2_ring ring;
int ulp_type;
+ struct drv_ctl_register_data register_data;
char bytes[MAX_DRV_CTL_DATA];
} data;
};
@@ -305,6 +313,7 @@ struct cnic_dev {
int max_rdma_conn;
union drv_info_to_mcp *stats_addr;
+ struct fcoe_capabilities *fcoe_cap;
void *cnic_priv;
};
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e47ff8be1d7b..9a009fd6ea1b 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -44,6 +44,10 @@
#include <linux/prefetch.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
+#if IS_ENABLED(CONFIG_HWMON)
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#endif
#include <net/checksum.h>
#include <net/ip.h>
@@ -298,6 +302,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -730,44 +735,131 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
tg3_ape_write32(tp, gnt + 4 * locknum, bit);
}
-static void tg3_ape_send_event(struct tg3 *tp, u32 event)
+static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
{
- int i;
u32 apedata;
- /* NCSI does not support APE events */
- if (tg3_flag(tp, APE_HAS_NCSI))
- return;
+ while (timeout_us) {
+ if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
+ return -EBUSY;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
+ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+ break;
+
+ tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
+
+ udelay(10);
+ timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
+ }
+
+ return timeout_us ? 0 : -EBUSY;
+}
+
+static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
+{
+ u32 i, apedata;
+
+ for (i = 0; i < timeout_us / 10; i++) {
+ apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
+
+ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+ break;
+
+ udelay(10);
+ }
+
+ return i == timeout_us / 10;
+}
+
+int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, u32 len)
+{
+ int err;
+ u32 i, bufoff, msgoff, maxlen, apedata;
+
+ if (!tg3_flag(tp, APE_HAS_NCSI))
+ return 0;
apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
if (apedata != APE_SEG_SIG_MAGIC)
- return;
+ return -ENODEV;
apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
if (!(apedata & APE_FW_STATUS_READY))
- return;
+ return -EAGAIN;
- /* Wait for up to 1 millisecond for APE to service previous event. */
- for (i = 0; i < 10; i++) {
- if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
- return;
+ bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
+ TG3_APE_SHMEM_BASE;
+ msgoff = bufoff + 2 * sizeof(u32);
+ maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
- apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
+ while (len) {
+ u32 length;
- if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
- tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
- event | APE_EVENT_STATUS_EVENT_PENDING);
+ /* Cap xfer sizes to scratchpad limits. */
+ length = (len > maxlen) ? maxlen : len;
+ len -= length;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
+ if (!(apedata & APE_FW_STATUS_READY))
+ return -EAGAIN;
+
+ /* Wait for up to 1 msec for APE to service previous event. */
+ err = tg3_ape_event_lock(tp, 1000);
+ if (err)
+ return err;
+
+ apedata = APE_EVENT_STATUS_DRIVER_EVNT |
+ APE_EVENT_STATUS_SCRTCHPD_READ |
+ APE_EVENT_STATUS_EVENT_PENDING;
+ tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
+
+ tg3_ape_write32(tp, bufoff, base_off);
+ tg3_ape_write32(tp, bufoff + sizeof(u32), length);
tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
+ tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
- if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
- break;
+ base_off += length;
- udelay(100);
+ if (tg3_ape_wait_for_event(tp, 30000))
+ return -EAGAIN;
+
+ for (i = 0; length; i += 4, length -= 4) {
+ u32 val = tg3_ape_read32(tp, msgoff + i);
+ memcpy(data, &val, sizeof(u32));
+ data++;
+ }
}
- if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
- tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
+ return 0;
+}
+
+static int tg3_ape_send_event(struct tg3 *tp, u32 event)
+{
+ int err;
+ u32 apedata;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
+ if (apedata != APE_SEG_SIG_MAGIC)
+ return -EAGAIN;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
+ if (!(apedata & APE_FW_STATUS_READY))
+ return -EAGAIN;
+
+ /* Wait for up to 1 millisecond for APE to service previous event. */
+ err = tg3_ape_event_lock(tp, 1000);
+ if (err)
+ return err;
+
+ tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
+ event | APE_EVENT_STATUS_EVENT_PENDING);
+
+ tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
+ tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
+
+ return 0;
}
static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
@@ -9393,6 +9485,110 @@ static int tg3_init_hw(struct tg3 *tp, int reset_phy)
return tg3_reset_hw(tp, reset_phy);
}
+#if IS_ENABLED(CONFIG_HWMON)
+static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
+{
+ int i;
+
+ for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
+ u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
+
+ tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
+ off += len;
+
+ if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
+ !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
+ memset(ocir, 0, TG3_OCIR_LEN);
+ }
+}
+
+/* sysfs attributes for hwmon */
+static ssize_t tg3_show_temp(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(netdev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ u32 temperature;
+
+ spin_lock_bh(&tp->lock);
+ tg3_ape_scratchpad_read(tp, &temperature, attr->index,
+ sizeof(temperature));
+ spin_unlock_bh(&tp->lock);
+ return sprintf(buf, "%u\n", temperature);
+}
+
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
+ TG3_TEMP_SENSOR_OFFSET);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
+ TG3_TEMP_CAUTION_OFFSET);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
+ TG3_TEMP_MAX_OFFSET);
+
+static struct attribute *tg3_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group tg3_group = {
+ .attrs = tg3_attributes,
+};
+
+#endif
+
+static void tg3_hwmon_close(struct tg3 *tp)
+{
+#if IS_ENABLED(CONFIG_HWMON)
+ if (tp->hwmon_dev) {
+ hwmon_device_unregister(tp->hwmon_dev);
+ tp->hwmon_dev = NULL;
+ sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
+ }
+#endif
+}
+
+static void tg3_hwmon_open(struct tg3 *tp)
+{
+#if IS_ENABLED(CONFIG_HWMON)
+ int i, err;
+ u32 size = 0;
+ struct pci_dev *pdev = tp->pdev;
+ struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
+
+ tg3_sd_scan_scratchpad(tp, ocirs);
+
+ for (i = 0; i < TG3_SD_NUM_RECS; i++) {
+ if (!ocirs[i].src_data_length)
+ continue;
+
+ size += ocirs[i].src_hdr_length;
+ size += ocirs[i].src_data_length;
+ }
+
+ if (!size)
+ return;
+
+ /* Register hwmon sysfs hooks */
+ err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
+ return;
+ }
+
+ tp->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(tp->hwmon_dev)) {
+ tp->hwmon_dev = NULL;
+ dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
+ sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
+ }
+#endif
+}
+
+
#define TG3_STAT_ADD32(PSTAT, REG) \
do { u32 __val = tr32(REG); \
(PSTAT)->low += __val; \
@@ -9908,7 +10104,7 @@ static bool tg3_enable_msix(struct tg3 *tp)
int i, rc;
struct msix_entry msix_ent[tp->irq_max];
- tp->irq_cnt = num_online_cpus();
+ tp->irq_cnt = netif_get_num_default_rss_queues();
if (tp->irq_cnt > 1) {
/* We want as many rx rings enabled as there are cpus.
* In multiqueue MSI-X mode, the first MSI-X vector
@@ -10101,6 +10297,8 @@ static int tg3_open(struct net_device *dev)
tg3_phy_start(tp);
+ tg3_hwmon_open(tp);
+
tg3_full_lock(tp, 0);
tg3_timer_start(tp);
@@ -10150,6 +10348,8 @@ static int tg3_close(struct net_device *dev)
tg3_timer_stop(tp);
+ tg3_hwmon_close(tp);
+
tg3_phy_stop(tp);
tg3_full_lock(tp, 1);
@@ -13857,14 +14057,9 @@ static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
}
}
-static void __devinit tg3_read_dash_ver(struct tg3 *tp)
+static void __devinit tg3_probe_ncsi(struct tg3 *tp)
{
- int vlen;
u32 apedata;
- char *fwtype;
-
- if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
- return;
apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
if (apedata != APE_SEG_SIG_MAGIC)
@@ -13874,14 +14069,22 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
if (!(apedata & APE_FW_STATUS_READY))
return;
+ if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
+ tg3_flag_set(tp, APE_HAS_NCSI);
+}
+
+static void __devinit tg3_read_dash_ver(struct tg3 *tp)
+{
+ int vlen;
+ u32 apedata;
+ char *fwtype;
+
apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
- if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
- tg3_flag_set(tp, APE_HAS_NCSI);
+ if (tg3_flag(tp, APE_HAS_NCSI))
fwtype = "NCSI";
- } else {
+ else
fwtype = "DASH";
- }
vlen = strlen(tp->fw_ver);
@@ -13915,20 +14118,17 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
tg3_read_sb_ver(tp, val);
else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
tg3_read_hwsb_ver(tp);
- else
- return;
-
- if (vpd_vers)
- goto done;
- if (tg3_flag(tp, ENABLE_APE)) {
- if (tg3_flag(tp, ENABLE_ASF))
- tg3_read_dash_ver(tp);
- } else if (tg3_flag(tp, ENABLE_ASF)) {
- tg3_read_mgmtfw_ver(tp);
+ if (tg3_flag(tp, ENABLE_ASF)) {
+ if (tg3_flag(tp, ENABLE_APE)) {
+ tg3_probe_ncsi(tp);
+ if (!vpd_vers)
+ tg3_read_dash_ver(tp);
+ } else if (!vpd_vers) {
+ tg3_read_mgmtfw_ver(tp);
+ }
}
-done:
tp->fw_ver[TG3_VER_SIZE - 1] = 0;
}
@@ -14168,7 +14368,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (bridge->subordinate &&
(bridge->subordinate->number <=
tp->pdev->bus->number) &&
- (bridge->subordinate->subordinate >=
+ (bridge->subordinate->busn_res.end >=
tp->pdev->bus->number)) {
tg3_flag_set(tp, 5701_DMA_BUG);
pci_dev_put(bridge);
@@ -14196,7 +14396,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (bridge && bridge->subordinate &&
(bridge->subordinate->number <=
tp->pdev->bus->number) &&
- (bridge->subordinate->subordinate >=
+ (bridge->subordinate->busn_res.end >=
tp->pdev->bus->number)) {
tg3_flag_set(tp, 40BIT_DMA_BUG);
pci_dev_put(bridge);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 93865f899a4f..a1b75cd67b9d 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2311,10 +2311,11 @@
#define APE_LOCK_REQ_DRIVER 0x00001000
#define TG3_APE_LOCK_GRANT 0x004c
#define APE_LOCK_GRANT_DRIVER 0x00001000
-#define TG3_APE_SEG_SIG 0x4000
-#define APE_SEG_SIG_MAGIC 0x41504521
/* APE shared memory. Accessible through BAR1 */
+#define TG3_APE_SHMEM_BASE 0x4000
+#define TG3_APE_SEG_SIG 0x4000
+#define APE_SEG_SIG_MAGIC 0x41504521
#define TG3_APE_FW_STATUS 0x400c
#define APE_FW_STATUS_READY 0x00000100
#define TG3_APE_FW_FEATURES 0x4010
@@ -2327,6 +2328,8 @@
#define APE_FW_VERSION_REVMSK 0x0000ff00
#define APE_FW_VERSION_REVSFT 8
#define APE_FW_VERSION_BLDMSK 0x000000ff
+#define TG3_APE_SEG_MSG_BUF_OFF 0x401c
+#define TG3_APE_SEG_MSG_BUF_LEN 0x4020
#define TG3_APE_HOST_SEG_SIG 0x4200
#define APE_HOST_SEG_SIG_MAGIC 0x484f5354
#define TG3_APE_HOST_SEG_LEN 0x4204
@@ -2353,6 +2356,8 @@
#define APE_EVENT_STATUS_DRIVER_EVNT 0x00000010
#define APE_EVENT_STATUS_STATE_CHNGE 0x00000500
+#define APE_EVENT_STATUS_SCRTCHPD_READ 0x00001600
+#define APE_EVENT_STATUS_SCRTCHPD_WRITE 0x00001700
#define APE_EVENT_STATUS_STATE_START 0x00010000
#define APE_EVENT_STATUS_STATE_UNLOAD 0x00020000
#define APE_EVENT_STATUS_STATE_WOL 0x00030000
@@ -2671,6 +2676,40 @@ struct tg3_hw_stats {
u8 __reserved4[0xb00-0x9c8];
};
+#define TG3_SD_NUM_RECS 3
+#define TG3_OCIR_LEN (sizeof(struct tg3_ocir))
+#define TG3_OCIR_SIG_MAGIC 0x5253434f
+#define TG3_OCIR_FLAG_ACTIVE 0x00000001
+
+#define TG3_TEMP_CAUTION_OFFSET 0xc8
+#define TG3_TEMP_MAX_OFFSET 0xcc
+#define TG3_TEMP_SENSOR_OFFSET 0xd4
+
+
+struct tg3_ocir {
+ u32 signature;
+ u16 version_flags;
+ u16 refresh_int;
+ u32 refresh_tmr;
+ u32 update_tmr;
+ u32 dst_base_addr;
+ u16 src_hdr_offset;
+ u16 src_hdr_length;
+ u16 src_data_offset;
+ u16 src_data_length;
+ u16 dst_hdr_offset;
+ u16 dst_data_offset;
+ u16 dst_reg_upd_offset;
+ u16 dst_sem_offset;
+ u32 reserved1[2];
+ u32 port0_flags;
+ u32 port1_flags;
+ u32 port2_flags;
+ u32 port3_flags;
+ u32 reserved2[1];
+};
+
+
/* 'mapping' is superfluous as the chip does not write into
* the tx/rx post rings so we could just fetch it from there.
* But the cache behavior is better how we are doing it now.
@@ -3206,6 +3245,10 @@ struct tg3 {
const char *fw_needed;
const struct firmware *fw;
u32 fw_len; /* includes BSS */
+
+#if IS_ENABLED(CONFIG_HWMON)
+ struct device *hwmon_dev;
+#endif
};
#endif /* !(_T3_H) */
OpenPOWER on IntegriCloud