summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c509.c6
-rw-r--r--drivers/net/ethernet/3com/3c515.c25
-rw-r--r--drivers/net/ethernet/3com/3c59x.c27
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c6
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h2
-rw-r--r--drivers/net/ethernet/arc/Kconfig18
-rw-r--r--drivers/net/ethernet/arc/Makefile4
-rw-r--r--drivers/net/ethernet/arc/emac.h8
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c95
-rw-r--r--drivers/net/ethernet/arc/emac_main.c129
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c7
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c229
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h93
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c140
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h19
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c44
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h222
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h257
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c1048
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h178
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c169
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h85
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c48
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c9
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c7
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c18
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c9
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c39
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c50
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c152
-rw-r--r--drivers/net/ethernet/ec_bhf.c101
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h30
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c182
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h48
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c173
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c368
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c1
-rw-r--r--drivers/net/ethernet/freescale/fec.h201
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c1008
-rw-r--r--drivers/net/ethernet/hp/hp100.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h19
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c187
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c78
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c498
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c70
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c259
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c90
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c9
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c8
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c8
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c31
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c24
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c204
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h116
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c160
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c310
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c15
-rw-r--r--drivers/net/ethernet/marvell/skge.c6
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h6
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c2
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c218
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c156
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c472
-rw-r--r--drivers/net/ethernet/sfc/tx.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c62
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c4
-rw-r--r--drivers/net/ethernet/sun/sungem.c34
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c75
-rw-r--r--drivers/net/ethernet/sun/sunvnet.h4
-rw-r--r--drivers/net/ethernet/ti/cpmac.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c60
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c1
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c1
-rw-r--r--drivers/net/ethernet/tile/tilepro.c8
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c1
147 files changed, 6321 insertions, 2667 deletions
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index a968654b631d..4547a1b8b958 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -695,9 +695,9 @@ el3_tx_timeout (struct net_device *dev)
int ioaddr = dev->base_addr;
/* Transmitter timeout, serious problems. */
- pr_warning("%s: transmit timed out, Tx_status %2.2x status %4.4x Tx FIFO room %d.\n",
- dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
- inw(ioaddr + TX_FREE));
+ pr_warn("%s: transmit timed out, Tx_status %2.2x status %4.4x Tx FIFO room %d\n",
+ dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
+ inw(ioaddr + TX_FREE));
dev->stats.tx_errors++;
dev->trans_start = jiffies; /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 94c656f5a05d..942fb0d5aace 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -515,7 +515,7 @@ static struct net_device *corkscrew_scan(int unit)
if (pnp_device_attach(idev) < 0)
continue;
if (pnp_activate_dev(idev) < 0) {
- pr_warning("pnp activate failed (out of resources?)\n");
+ pr_warn("pnp activate failed (out of resources?)\n");
pnp_device_detach(idev);
continue;
}
@@ -659,7 +659,7 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
pr_cont(", IRQ %d\n", dev->irq);
/* Tell them about an invalid IRQ. */
if (corkscrew_debug && (dev->irq <= 0 || dev->irq > 15))
- pr_warning(" *** Warning: this IRQ is unlikely to work! ***\n");
+ pr_warn(" *** Warning: this IRQ is unlikely to work! ***\n");
{
static const char * const ram_split[] = {
@@ -967,13 +967,13 @@ static void corkscrew_timeout(struct net_device *dev)
struct corkscrew_private *vp = netdev_priv(dev);
int ioaddr = dev->base_addr;
- pr_warning("%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
- dev->name, inb(ioaddr + TxStatus),
- inw(ioaddr + EL3_STATUS));
+ pr_warn("%s: transmit timed out, tx_status %2.2x status %4.4x\n",
+ dev->name, inb(ioaddr + TxStatus),
+ inw(ioaddr + EL3_STATUS));
/* Slight code bloat to be user friendly. */
if ((inb(ioaddr + TxStatus) & 0x88) == 0x88)
- pr_warning("%s: Transmitter encountered 16 collisions --"
- " network cable problem?\n", dev->name);
+ pr_warn("%s: Transmitter encountered 16 collisions -- network cable problem?\n",
+ dev->name);
#ifndef final_version
pr_debug(" Flags; bus-master %d, full %d; dirty %d current %d.\n",
vp->full_bus_master_tx, vp->tx_full, vp->dirty_tx,
@@ -1382,13 +1382,10 @@ static int boomerang_rx(struct net_device *dev)
temp = skb_put(skb, pkt_len);
/* Remove this checking code for final release. */
if (isa_bus_to_virt(vp->rx_ring[entry].addr) != temp)
- pr_warning("%s: Warning -- the skbuff addresses do not match"
- " in boomerang_rx: %p vs. %p / %p.\n",
- dev->name,
- isa_bus_to_virt(vp->
- rx_ring[entry].
- addr), skb->head,
- temp);
+ pr_warn("%s: Warning -- the skbuff addresses do not match in boomerang_rx: %p vs. %p / %p\n",
+ dev->name,
+ isa_bus_to_virt(vp->rx_ring[entry].addr),
+ skb->head, temp);
rx_nocopy++;
}
skb->protocol = eth_type_trans(skb, dev);
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 8ca49f04acec..86e621142d5b 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1310,8 +1310,8 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
pr_cont(", IRQ %d\n", dev->irq);
/* Tell them about an invalid IRQ. */
if (dev->irq <= 0 || dev->irq >= nr_irqs)
- pr_warning(" *** Warning: IRQ %d is unlikely to work! ***\n",
- dev->irq);
+ pr_warn(" *** Warning: IRQ %d is unlikely to work! ***\n",
+ dev->irq);
step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1;
if (print_info) {
@@ -1425,7 +1425,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
}
mii_preamble_required--;
if (phy_idx == 0) {
- pr_warning(" ***WARNING*** No MII transceivers found!\n");
+ pr_warn(" ***WARNING*** No MII transceivers found!\n");
vp->phys[0] = 24;
} else {
vp->advertising = mdio_read(dev, vp->phys[0], MII_ADVERTISE);
@@ -1566,8 +1566,7 @@ vortex_up(struct net_device *dev)
pci_restore_state(VORTEX_PCI(vp));
err = pci_enable_device(VORTEX_PCI(vp));
if (err) {
- pr_warning("%s: Could not enable device\n",
- dev->name);
+ pr_warn("%s: Could not enable device\n", dev->name);
goto err_out;
}
}
@@ -2007,8 +2006,8 @@ vortex_error(struct net_device *dev, int status)
/* This occurs when we have the wrong media type! */
if (DoneDidThat == 0 &&
ioread16(ioaddr + EL3_STATUS) & StatsFull) {
- pr_warning("%s: Updating statistics failed, disabling "
- "stats as an interrupt source.\n", dev->name);
+ pr_warn("%s: Updating statistics failed, disabling stats as an interrupt source\n",
+ dev->name);
iowrite16(SetIntrEnb |
(window_read16(vp, 5, 10) & ~StatsFull),
ioaddr + EL3_CMD);
@@ -2148,8 +2147,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
if (vortex_debug > 0)
- pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n",
- dev->name);
+ pr_warn("%s: BUG! Tx Ring full, refusing to send buffer\n",
+ dev->name);
netif_stop_queue(dev);
return NETDEV_TX_BUSY;
}
@@ -2343,7 +2342,7 @@ vortex_interrupt(int irq, void *dev_id)
}
if (--work_done < 0) {
- pr_warning("%s: Too much work in interrupt, status %4.4x.\n",
+ pr_warn("%s: Too much work in interrupt, status %4.4x\n",
dev->name, status);
/* Disable all pending interrupts. */
do {
@@ -2476,7 +2475,7 @@ boomerang_interrupt(int irq, void *dev_id)
vortex_error(dev, status);
if (--work_done < 0) {
- pr_warning("%s: Too much work in interrupt, status %4.4x.\n",
+ pr_warn("%s: Too much work in interrupt, status %4.4x\n",
dev->name, status);
/* Disable all pending interrupts. */
do {
@@ -2652,7 +2651,8 @@ boomerang_rx(struct net_device *dev)
if (skb == NULL) {
static unsigned long last_jif;
if (time_after(jiffies, last_jif + 10 * HZ)) {
- pr_warning("%s: memory shortage\n", dev->name);
+ pr_warn("%s: memory shortage\n",
+ dev->name);
last_jif = jiffies;
}
if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
@@ -2751,7 +2751,8 @@ vortex_close(struct net_device *dev)
if (vp->rx_csumhits &&
(vp->drv_flags & HAS_HWCKSM) == 0 &&
(vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) {
- pr_warning("%s supports hardware checksums, and we're not using them!\n", dev->name);
+ pr_warn("%s supports hardware checksums, and we're not using them!\n",
+ dev->name);
}
#endif
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 31c48a7ac2b6..6c323f4f457b 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1140,7 +1140,6 @@ static const struct net_device_ops au1000_netdev_ops = {
static int au1000_probe(struct platform_device *pdev)
{
- static unsigned version_printed;
struct au1000_private *aup = NULL;
struct au1000_eth_platform_data *pd;
struct net_device *dev = NULL;
@@ -1371,9 +1370,8 @@ static int au1000_probe(struct platform_device *pdev)
netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
(unsigned long)base->start, irq);
- if (version_printed++ == 0)
- pr_info("%s version %s %s\n",
- DRV_NAME, DRV_VERSION, DRV_AUTHOR);
+
+ pr_info_once("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
return 0;
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index abf3b1581c82..5b22764ba88d 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -621,7 +621,7 @@ static int nmclan_config(struct pcmcia_device *link)
ret = pcmcia_request_io(link);
if (ret)
goto failed;
- ret = pcmcia_request_exclusive_irq(link, mace_interrupt);
+ ret = pcmcia_request_irq(link, mace_interrupt);
if (ret)
goto failed;
ret = pcmcia_enable_device(link);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index cc25a3a9e7cf..caade30820d5 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -271,7 +271,6 @@
#define DMA_PBL_X8_DISABLE 0x00
#define DMA_PBL_X8_ENABLE 0x01
-
/* MAC register offsets */
#define MAC_TCR 0x0000
#define MAC_RCR 0x0004
@@ -792,7 +791,6 @@
#define MTL_Q_DISABLED 0x00
#define MTL_Q_ENABLED 0x02
-
/* MTL traffic class register offsets
* Multiple traffic classes can be active. The first class has registers
* that begin at 0x1100. Each subsequent queue has registers that
@@ -815,7 +813,6 @@
#define MTL_TSA_SP 0x00
#define MTL_TSA_ETS 0x02
-
/* PCS MMD select register offset
* The MMD select register is used for accessing PCS registers
* when the underlying APB3 interface is using indirect addressing.
@@ -825,7 +822,6 @@
*/
#define PCS_MMD_SELECT 0xff
-
/* Descriptor/Packet entry bit positions and sizes */
#define RX_PACKET_ERRORS_CRC_INDEX 2
#define RX_PACKET_ERRORS_CRC_WIDTH 1
@@ -929,7 +925,6 @@
#define MDIO_AN_COMP_STAT 0x0030
#endif
-
/* Bit setting and getting macros
* The get macro will extract the current bit field value from within
* the variable
@@ -957,7 +952,6 @@ do { \
((0x1 << (_width)) - 1)) << (_index))); \
} while (0)
-
/* Bit setting and getting macros based on register fields
* The get macro uses the bit field definitions formed using the input
* names to extract the current bit field value from within the
@@ -986,7 +980,6 @@ do { \
_prefix##_##_field##_INDEX, \
_prefix##_##_field##_WIDTH, (_val))
-
/* Macros for reading or writing registers
* The ioread macros will get bit fields or full values using the
* register definitions formed using the input names
@@ -1014,7 +1007,6 @@ do { \
XGMAC_IOWRITE((_pdata), _reg, reg_val); \
} while (0)
-
/* Macros for reading or writing MTL queue or traffic class registers
* Similar to the standard read and write macros except that the
* base register value is calculated by the queue or traffic class number
@@ -1041,7 +1033,6 @@ do { \
XGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \
} while (0)
-
/* Macros for reading or writing DMA channel registers
* Similar to the standard read and write macros except that the
* base register value is obtained from the ring
@@ -1066,7 +1057,6 @@ do { \
XGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \
} while (0)
-
/* Macros for building, reading or writing register values or bits
* within the register values of XPCS registers.
*/
@@ -1076,7 +1066,6 @@ do { \
#define XPCS_IOREAD(_pdata, _off) \
ioread32((_pdata)->xpcs_regs + (_off))
-
/* Macros for building, reading or writing register values or bits
* using MDIO. Different from above because of the use of standardized
* Linux include values. No shifting is performed with the bit
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
index 7d6a49b24321..8a50b01c2686 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
@@ -120,7 +120,6 @@
#include "xgbe.h"
#include "xgbe-common.h"
-
static int xgbe_dcb_ieee_getets(struct net_device *netdev,
struct ieee_ets *ets)
{
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index a3c11355a34d..76479d04b903 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -121,7 +121,6 @@
#include "xgbe.h"
#include "xgbe-common.h"
-
static ssize_t xgbe_common_read(char __user *buffer, size_t count,
loff_t *ppos, unsigned int value)
{
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 1c5d62e8dab6..6fc5da01437d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -117,7 +117,6 @@
#include "xgbe.h"
#include "xgbe-common.h"
-
static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *);
static void xgbe_free_ring(struct xgbe_prv_data *pdata,
@@ -524,11 +523,8 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
/* Allocate skb & assign to each rdesc */
skb = dev_alloc_skb(pdata->rx_buf_size);
- if (skb == NULL) {
- netdev_alert(pdata->netdev,
- "failed to allocate skb\n");
+ if (skb == NULL)
break;
- }
skb_dma = dma_map_single(pdata->dev, skb->data,
pdata->rx_buf_size, DMA_FROM_DEVICE);
if (dma_mapping_error(pdata->dev, skb_dma)) {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index ea273836d999..9da3a03e8c07 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -122,7 +122,6 @@
#include "xgbe.h"
#include "xgbe-common.h"
-
static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
unsigned int usec)
{
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index b26d75856553..29554992215a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -126,7 +126,6 @@
#include "xgbe.h"
#include "xgbe-common.h"
-
static int xgbe_poll(struct napi_struct *, int);
static void xgbe_set_rx_mode(struct net_device *);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 46f613028e9c..49508ec98b72 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -121,7 +121,6 @@
#include "xgbe.h"
#include "xgbe-common.h"
-
struct xgbe_stats {
char stat_string[ETH_GSTRING_LEN];
int stat_size;
@@ -173,6 +172,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = {
XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
};
+
#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats)
static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index bdf9cfa70e88..f5a8fa03921a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -128,7 +128,6 @@
#include "xgbe.h"
#include "xgbe-common.h"
-
MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(XGBE_DRV_VERSION);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 6d2221e023f4..363b210560f3 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -123,7 +123,6 @@
#include "xgbe.h"
#include "xgbe-common.h"
-
static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
{
struct xgbe_prv_data *pdata = mii->priv;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index 37e64cfa5718..a1bf9d1cdae1 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -122,7 +122,6 @@
#include "xgbe.h"
#include "xgbe-common.h"
-
static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
{
struct xgbe_prv_data *pdata = container_of(cc,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index e9fe6e6ddcc3..789957d43a13 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -128,7 +128,6 @@
#include <linux/net_tstamp.h>
#include <net/dcbnl.h>
-
#define XGBE_DRV_NAME "amd-xgbe"
#define XGBE_DRV_VERSION "1.0.0-a"
#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
@@ -199,7 +198,6 @@
((_ring)->rdata + \
((_idx) & ((_ring)->rdesc_count - 1)))
-
/* Default coalescing parameters */
#define XGMAC_INIT_DMA_TX_USECS 50
#define XGMAC_INIT_DMA_TX_FRAMES 25
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
index 514c57fd26f1..8e262e2b39b6 100644
--- a/drivers/net/ethernet/arc/Kconfig
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -17,10 +17,14 @@ config NET_VENDOR_ARC
if NET_VENDOR_ARC
-config ARC_EMAC
- tristate "ARC EMAC support"
+config ARC_EMAC_CORE
+ tristate
select MII
select PHYLIB
+
+config ARC_EMAC
+ tristate "ARC EMAC support"
+ select ARC_EMAC_CORE
depends on OF_IRQ
depends on OF_NET
---help---
@@ -28,4 +32,14 @@ config ARC_EMAC
non-standard on-chip ethernet device ARC EMAC 10/100 is used.
Say Y here if you have such a board. If unsure, say N.
+config EMAC_ROCKCHIP
+ tristate "Rockchip EMAC support"
+ select ARC_EMAC_CORE
+ depends on OF_IRQ && OF_NET && REGULATOR
+ ---help---
+ Support for Rockchip RK3066/RK3188 EMAC ethernet controllers.
+ This selects Rockchip SoC glue layer support for the
+ emac device driver. This driver is used for RK3066/RK3188
+ EMAC ethernet controller.
+
endif # NET_VENDOR_ARC
diff --git a/drivers/net/ethernet/arc/Makefile b/drivers/net/ethernet/arc/Makefile
index 00c8657637d5..79108af553fb 100644
--- a/drivers/net/ethernet/arc/Makefile
+++ b/drivers/net/ethernet/arc/Makefile
@@ -3,4 +3,6 @@
#
arc_emac-objs := emac_main.o emac_mdio.o
-obj-$(CONFIG_ARC_EMAC) += arc_emac.o
+obj-$(CONFIG_ARC_EMAC_CORE) += arc_emac.o
+obj-$(CONFIG_ARC_EMAC) += emac_arc.o
+obj-$(CONFIG_EMAC_ROCKCHIP) += emac_rockchip.o
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index 36cc9bd07c47..dae1ac300a49 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -123,6 +123,10 @@ struct buffer_state {
* @speed: PHY's last set speed.
*/
struct arc_emac_priv {
+ const char *drv_name;
+ const char *drv_version;
+ void (*set_mac_speed)(void *priv, unsigned int speed);
+
/* Devices */
struct device *dev;
struct phy_device *phy_dev;
@@ -204,7 +208,9 @@ static inline void arc_reg_clr(struct arc_emac_priv *priv, int reg, int mask)
arc_reg_set(priv, reg, value & ~mask);
}
-int arc_mdio_probe(struct platform_device *pdev, struct arc_emac_priv *priv);
+int arc_mdio_probe(struct arc_emac_priv *priv);
int arc_mdio_remove(struct arc_emac_priv *priv);
+int arc_emac_probe(struct net_device *ndev, int interface);
+int arc_emac_remove(struct net_device *ndev);
#endif /* ARC_EMAC_H */
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
new file mode 100644
index 000000000000..f9cb99bfb511
--- /dev/null
+++ b/drivers/net/ethernet/arc/emac_arc.c
@@ -0,0 +1,95 @@
+/**
+ * emac_arc.c - ARC EMAC specific glue layer
+ *
+ * Copyright (C) 2014 Romain Perier
+ *
+ * Romain Perier <romain.perier@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+
+#include "emac.h"
+
+#define DRV_NAME "emac_arc"
+#define DRV_VERSION "1.0"
+
+static int emac_arc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *ndev;
+ struct arc_emac_priv *priv;
+ int interface, err;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ ndev = alloc_etherdev(sizeof(struct arc_emac_priv));
+ if (!ndev)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, dev);
+
+ priv = netdev_priv(ndev);
+ priv->drv_name = DRV_NAME;
+ priv->drv_version = DRV_VERSION;
+
+ interface = of_get_phy_mode(dev->of_node);
+ if (interface < 0)
+ interface = PHY_INTERFACE_MODE_MII;
+
+ priv->clk = devm_clk_get(dev, "hclk");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to retrieve host clock from device tree\n");
+ err = -EINVAL;
+ goto out_netdev;
+ }
+
+ err = arc_emac_probe(ndev, interface);
+out_netdev:
+ if (err)
+ free_netdev(ndev);
+ return err;
+}
+
+static int emac_arc_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ int err;
+
+ err = arc_emac_remove(ndev);
+ free_netdev(ndev);
+ return err;
+}
+
+static const struct of_device_id emac_arc_dt_ids[] = {
+ { .compatible = "snps,arc-emac" },
+ { /* Sentinel */ }
+};
+
+static struct platform_driver emac_arc_driver = {
+ .probe = emac_arc_probe,
+ .remove = emac_arc_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = emac_arc_dt_ids,
+ },
+};
+
+module_platform_driver(emac_arc_driver);
+
+MODULE_AUTHOR("Romain Perier <romain.perier@gmail.com>");
+MODULE_DESCRIPTION("ARC EMAC platform driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 5919394d9f58..abe1eabc0171 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -26,8 +26,6 @@
#include "emac.h"
-#define DRV_NAME "arc_emac"
-#define DRV_VERSION "1.0"
/**
* arc_emac_tx_avail - Return the number of available slots in the tx ring.
@@ -61,6 +59,8 @@ static void arc_emac_adjust_link(struct net_device *ndev)
if (priv->speed != phy_dev->speed) {
priv->speed = phy_dev->speed;
state_changed = 1;
+ if (priv->set_mac_speed)
+ priv->set_mac_speed(priv, priv->speed);
}
if (priv->duplex != phy_dev->duplex) {
@@ -131,8 +131,10 @@ static int arc_emac_set_settings(struct net_device *ndev,
static void arc_emac_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ struct arc_emac_priv *priv = netdev_priv(ndev);
+
+ strlcpy(info->driver, priv->drv_name, sizeof(info->driver));
+ strlcpy(info->version, priv->drv_version, sizeof(info->version));
}
static const struct ethtool_ops arc_emac_ethtool_ops = {
@@ -692,46 +694,38 @@ static const struct net_device_ops arc_emac_netdev_ops = {
#endif
};
-static int arc_emac_probe(struct platform_device *pdev)
+int arc_emac_probe(struct net_device *ndev, int interface)
{
+ struct device *dev = ndev->dev.parent;
struct resource res_regs;
struct device_node *phy_node;
struct arc_emac_priv *priv;
- struct net_device *ndev;
const char *mac_addr;
unsigned int id, clock_frequency, irq;
int err;
- if (!pdev->dev.of_node)
- return -ENODEV;
/* Get PHY from device tree */
- phy_node = of_parse_phandle(pdev->dev.of_node, "phy", 0);
+ phy_node = of_parse_phandle(dev->of_node, "phy", 0);
if (!phy_node) {
- dev_err(&pdev->dev, "failed to retrieve phy description from device tree\n");
+ dev_err(dev, "failed to retrieve phy description from device tree\n");
return -ENODEV;
}
/* Get EMAC registers base address from device tree */
- err = of_address_to_resource(pdev->dev.of_node, 0, &res_regs);
+ err = of_address_to_resource(dev->of_node, 0, &res_regs);
if (err) {
- dev_err(&pdev->dev, "failed to retrieve registers base from device tree\n");
+ dev_err(dev, "failed to retrieve registers base from device tree\n");
return -ENODEV;
}
/* Get IRQ from device tree */
- irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ irq = irq_of_parse_and_map(dev->of_node, 0);
if (!irq) {
- dev_err(&pdev->dev, "failed to retrieve <irq> value from device tree\n");
+ dev_err(dev, "failed to retrieve <irq> value from device tree\n");
return -ENODEV;
}
- ndev = alloc_etherdev(sizeof(struct arc_emac_priv));
- if (!ndev)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ndev);
- SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->netdev_ops = &arc_emac_netdev_ops;
ndev->ethtool_ops = &arc_emac_ethtool_ops;
@@ -740,60 +734,57 @@ static int arc_emac_probe(struct platform_device *pdev)
ndev->flags &= ~IFF_MULTICAST;
priv = netdev_priv(ndev);
- priv->dev = &pdev->dev;
+ priv->dev = dev;
- priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs);
+ priv->regs = devm_ioremap_resource(dev, &res_regs);
if (IS_ERR(priv->regs)) {
- err = PTR_ERR(priv->regs);
- goto out_netdev;
+ return PTR_ERR(priv->regs);
}
- dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs);
+ dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs);
- priv->clk = of_clk_get(pdev->dev.of_node, 0);
- if (IS_ERR(priv->clk)) {
- /* Get CPU clock frequency from device tree */
- if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
- &clock_frequency)) {
- dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n");
- err = -EINVAL;
- goto out_netdev;
- }
- } else {
+ if (priv->clk) {
err = clk_prepare_enable(priv->clk);
if (err) {
- dev_err(&pdev->dev, "failed to enable clock\n");
- goto out_clkget;
+ dev_err(dev, "failed to enable clock\n");
+ return err;
}
clock_frequency = clk_get_rate(priv->clk);
+ } else {
+ /* Get CPU clock frequency from device tree */
+ if (of_property_read_u32(dev->of_node, "clock-frequency",
+ &clock_frequency)) {
+ dev_err(dev, "failed to retrieve <clock-frequency> from device tree\n");
+ return -EINVAL;
+ }
}
id = arc_reg_get(priv, R_ID);
/* Check for EMAC revision 5 or 7, magic number */
if (!(id == 0x0005fd02 || id == 0x0007fd02)) {
- dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id);
+ dev_err(dev, "ARC EMAC not detected, id=0x%x\n", id);
err = -ENODEV;
goto out_clken;
}
- dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id);
+ dev_info(dev, "ARC EMAC detected with id: 0x%x\n", id);
/* Set poll rate so that it polls every 1 ms */
arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000);
ndev->irq = irq;
- dev_info(&pdev->dev, "IRQ is %d\n", ndev->irq);
+ dev_info(dev, "IRQ is %d\n", ndev->irq);
/* Register interrupt handler for device */
- err = devm_request_irq(&pdev->dev, ndev->irq, arc_emac_intr, 0,
+ err = devm_request_irq(dev, ndev->irq, arc_emac_intr, 0,
ndev->name, ndev);
if (err) {
- dev_err(&pdev->dev, "could not allocate IRQ\n");
+ dev_err(dev, "could not allocate IRQ\n");
goto out_clken;
}
/* Get MAC address from device tree */
- mac_addr = of_get_mac_address(pdev->dev.of_node);
+ mac_addr = of_get_mac_address(dev->of_node);
if (mac_addr)
memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
@@ -801,14 +792,14 @@ static int arc_emac_probe(struct platform_device *pdev)
eth_hw_addr_random(ndev);
arc_emac_set_address_internal(ndev);
- dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr);
+ dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
/* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */
- priv->rxbd = dmam_alloc_coherent(&pdev->dev, RX_RING_SZ + TX_RING_SZ,
+ priv->rxbd = dmam_alloc_coherent(dev, RX_RING_SZ + TX_RING_SZ,
&priv->rxbd_dma, GFP_KERNEL);
if (!priv->rxbd) {
- dev_err(&pdev->dev, "failed to allocate data buffers\n");
+ dev_err(dev, "failed to allocate data buffers\n");
err = -ENOMEM;
goto out_clken;
}
@@ -816,31 +807,31 @@ static int arc_emac_probe(struct platform_device *pdev)
priv->txbd = priv->rxbd + RX_BD_NUM;
priv->txbd_dma = priv->rxbd_dma + RX_RING_SZ;
- dev_dbg(&pdev->dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n",
+ dev_dbg(dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n",
(unsigned int)priv->rxbd_dma, (unsigned int)priv->txbd_dma);
- err = arc_mdio_probe(pdev, priv);
+ err = arc_mdio_probe(priv);
if (err) {
- dev_err(&pdev->dev, "failed to probe MII bus\n");
+ dev_err(dev, "failed to probe MII bus\n");
goto out_clken;
}
priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
- PHY_INTERFACE_MODE_MII);
+ interface);
if (!priv->phy_dev) {
- dev_err(&pdev->dev, "of_phy_connect() failed\n");
+ dev_err(dev, "of_phy_connect() failed\n");
err = -ENODEV;
goto out_mdio;
}
- dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n",
+ dev_info(dev, "connected to %s phy with id 0x%x\n",
priv->phy_dev->drv->name, priv->phy_dev->phy_id);
netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT);
err = register_netdev(ndev);
if (err) {
- dev_err(&pdev->dev, "failed to register network device\n");
+ dev_err(dev, "failed to register network device\n");
goto out_netif_api;
}
@@ -853,19 +844,14 @@ out_netif_api:
out_mdio:
arc_mdio_remove(priv);
out_clken:
- if (!IS_ERR(priv->clk))
+ if (priv->clk)
clk_disable_unprepare(priv->clk);
-out_clkget:
- if (!IS_ERR(priv->clk))
- clk_put(priv->clk);
-out_netdev:
- free_netdev(ndev);
return err;
}
+EXPORT_SYMBOL_GPL(arc_emac_probe);
-static int arc_emac_remove(struct platform_device *pdev)
+int arc_emac_remove(struct net_device *ndev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
struct arc_emac_priv *priv = netdev_priv(ndev);
phy_disconnect(priv->phy_dev);
@@ -876,31 +862,12 @@ static int arc_emac_remove(struct platform_device *pdev)
if (!IS_ERR(priv->clk)) {
clk_disable_unprepare(priv->clk);
- clk_put(priv->clk);
}
- free_netdev(ndev);
return 0;
}
-
-static const struct of_device_id arc_emac_dt_ids[] = {
- { .compatible = "snps,arc-emac" },
- { /* Sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, arc_emac_dt_ids);
-
-static struct platform_driver arc_emac_driver = {
- .probe = arc_emac_probe,
- .remove = arc_emac_remove,
- .driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- .of_match_table = arc_emac_dt_ids,
- },
-};
-
-module_platform_driver(arc_emac_driver);
+EXPORT_SYMBOL_GPL(arc_emac_remove);
MODULE_AUTHOR("Alexey Brodkin <abrodkin@synopsys.com>");
MODULE_DESCRIPTION("ARC EMAC driver");
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 26ba2423f33a..d5ee986936da 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -100,7 +100,6 @@ static int arc_mdio_write(struct mii_bus *bus, int phy_addr,
/**
* arc_mdio_probe - MDIO probe function.
- * @pdev: Pointer to platform device.
* @priv: Pointer to ARC EMAC private data structure.
*
* returns: 0 on success, -ENOMEM when mdiobus_alloc
@@ -108,7 +107,7 @@ static int arc_mdio_write(struct mii_bus *bus, int phy_addr,
*
* Sets up and registers the MDIO interface.
*/
-int arc_mdio_probe(struct platform_device *pdev, struct arc_emac_priv *priv)
+int arc_mdio_probe(struct arc_emac_priv *priv)
{
struct mii_bus *bus;
int error;
@@ -124,9 +123,9 @@ int arc_mdio_probe(struct platform_device *pdev, struct arc_emac_priv *priv)
bus->read = &arc_mdio_read;
bus->write = &arc_mdio_write;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", bus->name);
- error = of_mdiobus_register(bus, pdev->dev.of_node);
+ error = of_mdiobus_register(bus, priv->dev->of_node);
if (error) {
dev_err(priv->dev, "cannot register MDIO bus %s\n", bus->name);
mdiobus_free(bus);
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
new file mode 100644
index 000000000000..c31c7407b753
--- /dev/null
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -0,0 +1,229 @@
+/**
+ * emac-rockchip.c - Rockchip EMAC specific glue layer
+ *
+ * Copyright (C) 2014 Romain Perier <romain.perier@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include "emac.h"
+
+#define DRV_NAME "rockchip_emac"
+#define DRV_VERSION "1.0"
+
+#define GRF_MODE_MII (1UL << 0)
+#define GRF_MODE_RMII (0UL << 0)
+#define GRF_SPEED_10M (0UL << 1)
+#define GRF_SPEED_100M (1UL << 1)
+#define GRF_SPEED_ENABLE_BIT (1UL << 17)
+#define GRF_MODE_ENABLE_BIT (1UL << 16)
+
+struct emac_rockchip_soc_data {
+ int grf_offset;
+};
+
+struct rockchip_priv_data {
+ struct arc_emac_priv emac;
+ struct regmap *grf;
+ const struct emac_rockchip_soc_data *soc_data;
+ struct regulator *regulator;
+ struct clk *refclk;
+};
+
+static void emac_rockchip_set_mac_speed(void *priv, unsigned int speed)
+{
+ struct rockchip_priv_data *emac = priv;
+ u32 data;
+ int err = 0;
+
+ /* write-enable bits */
+ data = GRF_SPEED_ENABLE_BIT;
+
+ switch(speed) {
+ case 10:
+ data |= GRF_SPEED_10M;
+ break;
+ case 100:
+ data |= GRF_SPEED_100M;
+ break;
+ default:
+ pr_err("speed %u not supported\n", speed);
+ return;
+ }
+
+ err = regmap_write(emac->grf, emac->soc_data->grf_offset, data);
+ if (err)
+ pr_err("unable to apply speed %u to grf (%d)\n", speed, err);
+}
+
+static const struct emac_rockchip_soc_data emac_rockchip_dt_data[] = {
+ { .grf_offset = 0x154 }, /* rk3066 */
+ { .grf_offset = 0x0a4 }, /* rk3188 */
+};
+
+static const struct of_device_id emac_rockchip_dt_ids[] = {
+ { .compatible = "rockchip,rk3066-emac", .data = &emac_rockchip_dt_data[0] },
+ { .compatible = "rockchip,rk3188-emac", .data = &emac_rockchip_dt_data[1] },
+ { /* Sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, emac_rockchip_dt_ids);
+
+static int emac_rockchip_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *ndev;
+ struct rockchip_priv_data *priv;
+ const struct of_device_id *match;
+ u32 data;
+ int err, interface;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ ndev = alloc_etherdev(sizeof(struct rockchip_priv_data));
+ if (!ndev)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, dev);
+
+ priv = netdev_priv(ndev);
+ priv->emac.drv_name = DRV_NAME;
+ priv->emac.drv_version = DRV_VERSION;
+ priv->emac.set_mac_speed = emac_rockchip_set_mac_speed;
+
+ interface = of_get_phy_mode(dev->of_node);
+
+ /* RK3066 and RK3188 SoCs only support RMII */
+ if (interface != PHY_INTERFACE_MODE_RMII) {
+ dev_err(dev, "unsupported phy interface mode %d\n", interface);
+ err = -ENOTSUPP;
+ goto out_netdev;
+ }
+
+ priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
+ if (IS_ERR(priv->grf)) {
+ dev_err(dev, "failed to retrieve global register file (%ld)\n", PTR_ERR(priv->grf));
+ err = PTR_ERR(priv->grf);
+ goto out_netdev;
+ }
+
+ match = of_match_node(emac_rockchip_dt_ids, dev->of_node);
+ priv->soc_data = match->data;
+
+ priv->emac.clk = devm_clk_get(dev, "hclk");
+ if (IS_ERR(priv->emac.clk)) {
+ dev_err(dev, "failed to retrieve host clock (%ld)\n", PTR_ERR(priv->emac.clk));
+ err = PTR_ERR(priv->emac.clk);
+ goto out_netdev;
+ }
+
+ priv->refclk = devm_clk_get(dev, "macref");
+ if (IS_ERR(priv->refclk)) {
+ dev_err(dev, "failed to retrieve reference clock (%ld)\n", PTR_ERR(priv->refclk));
+ err = PTR_ERR(priv->refclk);
+ goto out_netdev;
+ }
+
+ err = clk_prepare_enable(priv->refclk);
+ if (err) {
+ dev_err(dev, "failed to enable reference clock (%d)\n", err);
+ goto out_netdev;
+ }
+
+ /* Optional regulator for PHY */
+ priv->regulator = devm_regulator_get_optional(dev, "phy");
+ if (IS_ERR(priv->regulator)) {
+ if (PTR_ERR(priv->regulator) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_err(dev, "no regulator found\n");
+ priv->regulator = NULL;
+ }
+
+ if (priv->regulator) {
+ err = regulator_enable(priv->regulator);
+ if (err) {
+ dev_err(dev, "failed to enable phy-supply (%d)\n", err);
+ goto out_clk_disable;
+ }
+ }
+
+ err = arc_emac_probe(ndev, interface);
+ if (err)
+ goto out_regulator_disable;
+
+ /* write-enable bits */
+ data = GRF_MODE_ENABLE_BIT | GRF_SPEED_ENABLE_BIT;
+
+ data |= GRF_SPEED_100M;
+ data |= GRF_MODE_RMII;
+
+ err = regmap_write(priv->grf, priv->soc_data->grf_offset, data);
+ if (err) {
+ dev_err(dev, "unable to apply initial settings to grf (%d)\n", err);
+ goto out_regulator_disable;
+ }
+
+ /* RMII interface needs always a rate of 50MHz */
+ err = clk_set_rate(priv->refclk, 50000000);
+ if (err)
+ dev_err(dev, "failed to change reference clock rate (%d)\n", err);
+ return 0;
+
+out_regulator_disable:
+ if (priv->regulator)
+ regulator_disable(priv->regulator);
+out_clk_disable:
+ clk_disable_unprepare(priv->refclk);
+out_netdev:
+ free_netdev(ndev);
+ return err;
+}
+
+static int emac_rockchip_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct rockchip_priv_data *priv = netdev_priv(ndev);
+ int err;
+
+ err = arc_emac_remove(ndev);
+
+ clk_disable_unprepare(priv->refclk);
+
+ if (priv->regulator)
+ regulator_disable(priv->regulator);
+
+ free_netdev(ndev);
+ return err;
+}
+
+static struct platform_driver emac_rockchip_driver = {
+ .probe = emac_rockchip_probe,
+ .remove = emac_rockchip_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = emac_rockchip_dt_ids,
+ },
+};
+
+module_platform_driver(emac_rockchip_driver);
+
+MODULE_AUTHOR("Romain Perier <romain.perier@gmail.com>");
+MODULE_DESCRIPTION("Rockchip EMAC platform driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index d8d07a818b89..c3e260c21734 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -122,6 +122,7 @@ config TIGON3
config BNX2X
tristate "Broadcom NetXtremeII 10Gb support"
depends on PCI
+ select PTP_1588_CLOCK
select FW_LOADER
select ZLIB_INFLATE
select LIBCRC32C
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index d588136b23b9..416620fa8fac 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -427,7 +427,7 @@ static void b44_wap54g10_workaround(struct b44 *bp)
}
return;
error:
- pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
+ pr_warn("PHY: cannot reset MII transceiver isolate bit\n");
}
#else
static inline void b44_wap54g10_workaround(struct b44 *bp)
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index d9b9170ed2fc..77f1ff7396ac 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -139,6 +139,15 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev,
else
reg &= ~RXCHK_SKIP_FCS;
+ /* If Broadcom tags are enabled (e.g: using a switch), make
+ * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
+ * tag after the Ethernet MAC Source Address.
+ */
+ if (netdev_uses_dsa(dev))
+ reg |= RXCHK_BRCM_TAG_EN;
+ else
+ reg &= ~RXCHK_BRCM_TAG_EN;
+
rxchk_writel(priv, reg, RXCHK_CONTROL);
return 0;
@@ -1069,16 +1078,19 @@ static void bcm_sysport_adj_link(struct net_device *dev)
if (!phydev->pause)
cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
- if (changed) {
+ if (!changed)
+ return;
+
+ if (phydev->link) {
reg = umac_readl(priv, UMAC_CMD);
reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
CMD_TX_PAUSE_IGNORE);
reg |= cmd_bits;
umac_writel(priv, reg, UMAC_CMD);
-
- phy_print_status(priv->phydev);
}
+
+ phy_print_status(priv->phydev);
}
static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index d777fae86988..c3a6072134f5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -20,13 +20,17 @@
#include <linux/types.h>
#include <linux/pci_regs.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/clocksource.h>
+
/* compilation time flags */
/* define this to make the driver freeze on error to allow getting debug info
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.78.19-0"
+#define DRV_MODULE_VERSION "1.710.51-0"
#define DRV_MODULE_RELDATE "2014/02/10"
#define BNX2X_BC_VER 0x040200
@@ -70,6 +74,7 @@ enum bnx2x_int_mode {
#define BNX2X_MSG_SP 0x0100000 /* was: NETIF_MSG_INTR */
#define BNX2X_MSG_FP 0x0200000 /* was: NETIF_MSG_INTR */
#define BNX2X_MSG_IOV 0x0800000
+#define BNX2X_MSG_PTP 0x1000000
#define BNX2X_MSG_IDLE 0x2000000 /* used for idle check*/
#define BNX2X_MSG_ETHTOOL 0x4000000
#define BNX2X_MSG_DCB 0x8000000
@@ -1443,6 +1448,12 @@ struct bnx2x_fp_stats {
struct bnx2x_eth_q_stats_old eth_q_stats_old;
};
+enum {
+ SUB_MF_MODE_UNKNOWN = 0,
+ SUB_MF_MODE_UFP,
+ SUB_MF_MODE_NPAR1_DOT_5,
+};
+
struct bnx2x {
/* Fields used in the tx and intr/napi performance paths
* are grouped together in the beginning of the structure
@@ -1587,10 +1598,11 @@ struct bnx2x {
#define USING_SINGLE_MSIX_FLAG (1 << 20)
#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
#define IS_VF_FLAG (1 << 22)
-#define INTERRUPTS_ENABLED_FLAG (1 << 23)
-#define BC_SUPPORTS_RMMOD_CMD (1 << 24)
-#define HAS_PHYS_PORT_ID (1 << 25)
-#define AER_ENABLED (1 << 26)
+#define BC_SUPPORTS_RMMOD_CMD (1 << 23)
+#define HAS_PHYS_PORT_ID (1 << 24)
+#define AER_ENABLED (1 << 25)
+#define PTP_SUPPORTED (1 << 26)
+#define TX_TIMESTAMPING_EN (1 << 27)
#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
@@ -1653,6 +1665,9 @@ struct bnx2x {
#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI)
#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD)
#define IS_MF_AFEX(bp) (bp->mf_mode == MULTI_FUNCTION_AFEX)
+ u8 mf_sub_mode;
+#define IS_MF_UFP(bp) (IS_MF_SD(bp) && \
+ bp->mf_sub_mode == SUB_MF_MODE_UFP)
u8 wol;
@@ -1684,13 +1699,9 @@ struct bnx2x {
#define BNX2X_STATE_ERROR 0xf000
#define BNX2X_MAX_PRIORITY 8
-#define BNX2X_MAX_ENTRIES_PER_PRI 16
-#define BNX2X_MAX_COS 3
-#define BNX2X_MAX_TX_COS 2
int num_queues;
uint num_ethernet_queues;
uint num_cnic_queues;
- int num_napi_queues;
int disable_tpa;
u32 rx_mode;
@@ -1933,6 +1944,19 @@ struct bnx2x {
u8 phys_port_id[ETH_ALEN];
+ /* PTP related context */
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct work_struct ptp_task;
+ struct cyclecounter cyclecounter;
+ struct timecounter timecounter;
+ bool timecounter_init_done;
+ struct sk_buff *ptp_tx_skb;
+ unsigned long ptp_tx_start;
+ bool hwtstamp_ioctl_called;
+ u16 tx_type;
+ u16 rx_filter;
+
struct bnx2x_link_report_data vf_link_vars;
};
@@ -2346,7 +2370,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define ATTN_HARD_WIRED_MASK 0xff00
#define ATTENTION_ID 4
-#define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_SD(bp) || \
+#define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_PERSONALITY_ONLY(bp) || \
IS_MF_FCOE_AFEX(bp))
/* stuff added to make the code fit 80Col */
@@ -2522,14 +2546,44 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp))
#define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))
+#define IS_MF_ISCSI_SI(bp) (IS_MF_SI(bp) && BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp))
+
+#define IS_MF_ISCSI_ONLY(bp) (IS_MF_ISCSI_SD(bp) || IS_MF_ISCSI_SI(bp))
+
+#define BNX2X_MF_EXT_PROTOCOL_MASK \
+ (MACP_FUNC_CFG_FLAGS_ETHERNET | \
+ MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD | \
+ MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
+
+#define BNX2X_MF_EXT_PROT(bp) ((bp)->mf_ext_config & \
+ BNX2X_MF_EXT_PROTOCOL_MASK)
-#define BNX2X_MF_EXT_PROTOCOL_FCOE(bp) ((bp)->mf_ext_config & \
- MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
+#define BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp) \
+ (BNX2X_MF_EXT_PROT(bp) & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
+
+#define BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp) \
+ (BNX2X_MF_EXT_PROT(bp) == MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
+
+#define BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp) \
+ (BNX2X_MF_EXT_PROT(bp) == MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD)
+
+#define IS_MF_FCOE_AFEX(bp) \
+ (IS_MF_AFEX(bp) && BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp))
+
+#define IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) \
+ (IS_MF_SD(bp) && \
+ (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
+ BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
+
+#define IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp) \
+ (IS_MF_SI(bp) && \
+ (BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp) || \
+ BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp)))
+
+#define IS_MF_STORAGE_PERSONALITY_ONLY(bp) \
+ (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) || \
+ IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp))
-#define IS_MF_FCOE_AFEX(bp) (IS_MF_AFEX(bp) && BNX2X_MF_EXT_PROTOCOL_FCOE(bp))
-#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \
- (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
- BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
#define SET_FLAG(value, mask, flag) \
do {\
@@ -2559,4 +2613,11 @@ void bnx2x_update_mng_version(struct bnx2x *bp);
#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
+void bnx2x_init_ptp(struct bnx2x *bp);
+int bnx2x_configure_ptp_filters(struct bnx2x *bp);
+void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb);
+
+#define BNX2X_MAX_PHC_DRIFT 31000000
+#define BNX2X_PTP_TX_TIMEOUT
+
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4ccc806b1150..40beef5bca88 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -21,6 +21,7 @@
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/ip.h>
+#include <linux/crash_dump.h>
#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
@@ -64,7 +65,7 @@ static int bnx2x_calc_num_queues(struct bnx2x *bp)
int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
/* Reduce memory usage in kdump environment by using only one queue */
- if (reset_devices)
+ if (is_kdump_kernel())
nq = 1;
nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
@@ -1063,6 +1064,11 @@ reuse_rx:
skb_record_rx_queue(skb, fp->rx_queue);
+ /* Check if this packet was timestamped */
+ if (unlikely(cqe->fast_path_cqe.type_error_flags &
+ (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
+ bnx2x_set_rx_ts(bp, skb);
+
if (le16_to_cpu(cqe_fp->pars_flags.flags) &
PARSING_FLAGS_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
@@ -1932,7 +1938,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
/* override in STORAGE SD modes */
- if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
+ if (IS_MF_STORAGE_ONLY(bp))
bp->num_ethernet_queues = 1;
/* Add special queues */
@@ -2078,6 +2084,10 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
__set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
if (rss_obj->udp_rss_v6)
__set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
+
+ if (!CHIP_IS_E1x(bp))
+ /* valid only for TUNN_MODE_GRE tunnel mode */
+ __set_bit(BNX2X_RSS_GRE_INNER_HDRS, &params.rss_flags);
} else {
__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
}
@@ -2800,7 +2810,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Initialize Rx filter. */
bnx2x_set_rx_mode_inner(bp);
- /* Start the Tx */
+ if (bp->flags & PTP_SUPPORTED) {
+ bnx2x_init_ptp(bp);
+ bnx2x_configure_ptp_filters(bp);
+ }
+ /* Start Tx */
switch (load_mode) {
case LOAD_NORMAL:
/* Tx queue should be only re-enabled */
@@ -3437,26 +3451,6 @@ exit_lbl:
}
#endif
-static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
- u32 xmit_type)
-{
- struct ipv6hdr *ipv6;
-
- *parsing_data |= (skb_shinfo(skb)->gso_size <<
- ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
- ETH_TX_PARSE_BD_E2_LSO_MSS;
-
- if (xmit_type & XMIT_GSO_ENC_V6)
- ipv6 = inner_ipv6_hdr(skb);
- else if (xmit_type & XMIT_GSO_V6)
- ipv6 = ipv6_hdr(skb);
- else
- ipv6 = NULL;
-
- if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
- *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
-}
-
/**
* bnx2x_set_pbd_gso - update PBD in GSO case.
*
@@ -3466,7 +3460,6 @@ static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
*/
static void bnx2x_set_pbd_gso(struct sk_buff *skb,
struct eth_tx_parse_bd_e1x *pbd,
- struct eth_tx_start_bd *tx_start_bd,
u32 xmit_type)
{
pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
@@ -3479,9 +3472,6 @@ static void bnx2x_set_pbd_gso(struct sk_buff *skb,
bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr,
0, IPPROTO_TCP, 0));
-
- /* GSO on 57710/57711 needs FW to calculate IP checksum */
- tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
} else {
pbd->tcp_pseudo_csum =
bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
@@ -3653,18 +3643,23 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
(__force u32)iph->tot_len -
(__force u32)iph->frag_off;
+ outerip_len = iph->ihl << 1;
+
pbd2->fw_ip_csum_wo_len_flags_frag =
bswab16(csum_fold((__force __wsum)csum));
} else {
pbd2->fw_ip_hdr_to_payload_w =
hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
+ pbd_e2->data.tunnel_data.flags |=
+ ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER;
}
pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
- if (xmit_type & XMIT_GSO_V4) {
+ /* inner IP header info */
+ if (xmit_type & XMIT_CSUM_ENC_V4) {
pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
pbd_e2->data.tunnel_data.pseudo_csum =
@@ -3672,8 +3667,6 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
inner_ip_hdr(skb)->saddr,
inner_ip_hdr(skb)->daddr,
0, IPPROTO_TCP, 0));
-
- outerip_len = ip_hdr(skb)->ihl << 1;
} else {
pbd_e2->data.tunnel_data.pseudo_csum =
bswab16(~csum_ipv6_magic(
@@ -3686,8 +3679,6 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
*global_data |=
outerip_off |
- (!!(xmit_type & XMIT_CSUM_V6) <<
- ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
(outerip_len <<
ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
@@ -3699,6 +3690,23 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
}
}
+static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
+ u32 xmit_type)
+{
+ struct ipv6hdr *ipv6;
+
+ if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
+ return;
+
+ if (xmit_type & XMIT_GSO_ENC_V6)
+ ipv6 = inner_ipv6_hdr(skb);
+ else /* XMIT_GSO_V6 */
+ ipv6 = ipv6_hdr(skb);
+
+ if (ipv6->nexthdr == NEXTHDR_IPV6)
+ *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
+}
+
/* called with netif_tx_lock
* bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
* netif_wake_queue()
@@ -3831,6 +3839,20 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ if (!(bp->flags & TX_TIMESTAMPING_EN)) {
+ BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
+ } else if (bp->ptp_tx_skb) {
+ BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
+ } else {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ /* schedule check for Tx timestamp */
+ bp->ptp_tx_skb = skb_get(skb);
+ bp->ptp_tx_start = jiffies;
+ schedule_work(&bp->ptp_task);
+ }
+ }
+
/* header nbd: indirectly zero other flags! */
tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
@@ -3852,12 +3874,16 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* when transmitting in a vf, start bd must hold the ethertype
* for fw to enforce it
*/
+#ifndef BNX2X_STOP_ON_ERROR
if (IS_VF(bp))
+#endif
tx_start_bd->vlan_or_ethertype =
cpu_to_le16(ntohs(eth->h_proto));
+#ifndef BNX2X_STOP_ON_ERROR
else
/* used by FW for packet accounting */
tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+#endif
}
nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
@@ -3915,6 +3941,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
xmit_type);
}
+ bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
/* Add the macs to the parsing BD if this is a vf or if
* Tx Switching is enabled.
*/
@@ -3929,11 +3956,22 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
&pbd_e2->data.mac_addr.dst_mid,
&pbd_e2->data.mac_addr.dst_lo,
eth->h_dest);
- } else if (bp->flags & TX_SWITCHING) {
- bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
- &pbd_e2->data.mac_addr.dst_mid,
- &pbd_e2->data.mac_addr.dst_lo,
- eth->h_dest);
+ } else {
+ if (bp->flags & TX_SWITCHING)
+ bnx2x_set_fw_mac_addr(
+ &pbd_e2->data.mac_addr.dst_hi,
+ &pbd_e2->data.mac_addr.dst_mid,
+ &pbd_e2->data.mac_addr.dst_lo,
+ eth->h_dest);
+#ifdef BNX2X_STOP_ON_ERROR
+ /* Enforce security is always set in Stop on Error -
+ * source mac should be present in the parsing BD
+ */
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
+ &pbd_e2->data.mac_addr.src_mid,
+ &pbd_e2->data.mac_addr.src_lo,
+ eth->h_source);
+#endif
}
SET_FLAG(pbd_e2_parsing_data,
@@ -3980,10 +4018,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
bd_prod);
}
if (!CHIP_IS_E1x(bp))
- bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
- xmit_type);
+ pbd_e2_parsing_data |=
+ (skb_shinfo(skb)->gso_size <<
+ ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
+ ETH_TX_PARSE_BD_E2_LSO_MSS;
else
- bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
+ bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
}
/* Set the PBD's parsing_data field if not zero
@@ -4191,14 +4231,13 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
struct bnx2x *bp = netdev_priv(dev);
int rc = 0;
- if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
+ if (!is_valid_ether_addr(addr->sa_data)) {
BNX2X_ERR("Requested MAC address is not valid\n");
return -EINVAL;
}
- if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
- !is_zero_ether_addr(addr->sa_data)) {
- BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
+ if (IS_MF_STORAGE_ONLY(bp)) {
+ BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
return -EINVAL;
}
@@ -4377,8 +4416,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
u8 cos;
int rx_ring_size = 0;
- if (!bp->rx_ring_size &&
- (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
+ if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
rx_ring_size = MIN_RX_SIZE_NONTPA;
bp->rx_ring_size = rx_ring_size;
} else if (!bp->rx_ring_size) {
@@ -4771,11 +4809,15 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
/* TPA requires Rx CSUM offloading */
- if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
+ if (!(features & NETIF_F_RXCSUM)) {
features &= ~NETIF_F_LRO;
features &= ~NETIF_F_GRO;
}
+ /* Note: do not disable SW GRO in kernel when HW GRO is off */
+ if (bp->disable_tpa)
+ features &= ~NETIF_F_LRO;
+
return features;
}
@@ -4814,6 +4856,10 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
changes &= ~GRO_ENABLE_FLAG;
+ /* if GRO is changed while HW TPA is off, don't force a reload */
+ if ((changes & GRO_ENABLE_FLAG) && bp->disable_tpa)
+ changes &= ~GRO_ENABLE_FLAG;
+
if (changes)
bnx2x_reload = true;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 571427c7226b..adcacda7af7b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -932,8 +932,15 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
else /* CHIP_IS_E1X */
start_params->network_cos_mode = FW_WRR;
- start_params->gre_tunnel_mode = L2GRE_TUNNEL;
- start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;
+ start_params->tunnel_mode = TUNN_MODE_GRE;
+ start_params->gre_tunnel_type = IPGRE_TUNNEL;
+ start_params->inner_gre_rss_en = 1;
+
+ if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
+ start_params->class_fail_ethtype = ETH_P_FIP;
+ start_params->class_fail = 1;
+ start_params->no_added_tags = 1;
+ }
return bnx2x_func_state_change(bp, &func_params);
}
@@ -1297,15 +1304,7 @@ static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
}
}
-static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
-{
- if (is_valid_ether_addr(addr) ||
- (is_zero_ether_addr(addr) &&
- (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))))
- return true;
- return false;
-}
/**
* bnx2x_fill_fw_str - Fill buffer with FW version string
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index fb26bc4c42a1..6e4294ed1fc9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -2092,7 +2092,6 @@ static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
{
struct bnx2x *bp = netdev_priv(netdev);
- int rc = 0;
DP(BNX2X_MSG_DCB, "SET-ALL\n");
@@ -2110,9 +2109,7 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
1);
bnx2x_dcbx_init(bp, true);
}
- DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc);
- if (rc)
- return 1;
+ DP(BNX2X_MSG_DCB, "set_dcbx_params done\n");
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
index 12eb4baee9f6..741aa130c19f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
@@ -40,7 +40,7 @@ struct dump_header {
u32 dump_meta_data; /* OR of CHIP and PATH. */
};
-#define BNX2X_DUMP_VERSION 0x50acff01
+#define BNX2X_DUMP_VERSION 0x61111111
struct reg_addr {
u32 addr;
u32 size;
@@ -1464,7 +1464,6 @@ static const struct reg_addr reg_addrs[] = {
{ 0x180398, 1, 0x1c, 0x924},
{ 0x1803a0, 5, 0x1c, 0x924},
{ 0x1803b4, 2, 0x18, 0x924},
- { 0x180400, 256, 0x3, 0xfff},
{ 0x181000, 4, 0x1f, 0x93c},
{ 0x181010, 1020, 0x1f, 0x38},
{ 0x182000, 4, 0x18, 0x924},
@@ -1576,7 +1575,6 @@ static const struct reg_addr reg_addrs[] = {
{ 0x200398, 1, 0x1c, 0x924},
{ 0x2003a0, 1, 0x1c, 0x924},
{ 0x2003a8, 2, 0x1c, 0x924},
- { 0x200400, 256, 0x3, 0xfff},
{ 0x202000, 4, 0x1f, 0x1927},
{ 0x202010, 2044, 0x1f, 0x1007},
{ 0x204000, 4, 0x18, 0x924},
@@ -1688,7 +1686,6 @@ static const struct reg_addr reg_addrs[] = {
{ 0x280398, 1, 0x1c, 0x924},
{ 0x2803a0, 1, 0x1c, 0x924},
{ 0x2803a8, 2, 0x1c, 0x924},
- { 0x280400, 256, 0x3, 0xfff},
{ 0x282000, 4, 0x1f, 0x9e4},
{ 0x282010, 2044, 0x1f, 0x1c0},
{ 0x284000, 4, 0x18, 0x924},
@@ -1800,7 +1797,6 @@ static const struct reg_addr reg_addrs[] = {
{ 0x300398, 1, 0x1c, 0x924},
{ 0x3003a0, 1, 0x1c, 0x924},
{ 0x3003a8, 2, 0x1c, 0x924},
- { 0x300400, 256, 0x3, 0xfff},
{ 0x302000, 4, 0x1f, 0xf24},
{ 0x302010, 2044, 0x1f, 0xe00},
{ 0x304000, 4, 0x18, 0x924},
@@ -2206,10 +2202,10 @@ static const struct wreg_addr wreg_addr_e3b0 = {
0x1b0c00, 128, 2, read_reg_e3b0, 0x1f, 0x1fff};
static const unsigned int dump_num_registers[NUM_CHIPS][NUM_PRESETS] = {
- {20782, 18567, 27975, 19729, 18311, 27719, 20836, 32391, 41799, 20812,
- 26247, 35655, 19074},
- {32774, 19297, 33277, 31721, 19041, 33021, 32828, 33121, 47101, 32804,
- 26977, 40957, 35895},
+ {19758, 17543, 26951, 18705, 17287, 26695, 19812, 31367, 40775, 19788,
+ 25223, 34631, 19074},
+ {31750, 18273, 32253, 30697, 18017, 31997, 31804, 32097, 46077, 31780,
+ 25953, 39933, 35895},
{36527, 17928, 33697, 35474, 18700, 34466, 36581, 31752, 47521, 36557,
25608, 41377, 43903},
{45239, 17936, 34387, 44186, 18708, 35156, 45293, 31760, 48211, 45269,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 92fee842f954..1edc931b1458 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1852,7 +1852,7 @@ static int bnx2x_set_ringparam(struct net_device *dev,
if ((ering->rx_pending > MAX_RX_AVAIL) ||
(ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
MIN_RX_SIZE_TPA)) ||
- (ering->tx_pending > (IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL)) ||
+ (ering->tx_pending > (IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL)) ||
(ering->tx_pending <= MAX_SKB_FRAGS + 4)) {
DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EINVAL;
@@ -3481,6 +3481,46 @@ static int bnx2x_set_channels(struct net_device *dev,
return bnx2x_nic_load(bp, LOAD_NORMAL);
}
+static int bnx2x_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (bp->flags & PTP_SUPPORTED) {
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (bp->ptp_clock)
+ info->phc_index = ptp_clock_index(bp->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON);
+
+ return 0;
+ }
+
+ return ethtool_op_get_ts_info(dev, info);
+}
+
static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_settings = bnx2x_get_settings,
.set_settings = bnx2x_set_settings,
@@ -3522,7 +3562,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_module_eeprom = bnx2x_get_module_eeprom,
.get_eee = bnx2x_get_eee,
.set_eee = bnx2x_set_eee,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = bnx2x_get_ts_info,
};
static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 95dc36543548..7636e3c18771 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -10,170 +10,170 @@
#ifndef BNX2X_FW_DEFS_H
#define BNX2X_FW_DEFS_H
-#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[148].base)
+#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[152].base)
#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
- (IRO[147].base + ((assertListEntry) * IRO[147].m1))
+ (IRO[151].base + ((assertListEntry) * IRO[151].m1))
#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
- (IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \
- IRO[153].m2))
+ (IRO[157].base + (((pfId)>>1) * IRO[157].m1) + (((pfId)&1) * \
+ IRO[157].m2))
#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
- (IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \
- IRO[154].m2))
+ (IRO[158].base + (((pfId)>>1) * IRO[158].m1) + (((pfId)&1) * \
+ IRO[158].m2))
#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
- (IRO[159].base + ((funcId) * IRO[159].m1))
+ (IRO[163].base + ((funcId) * IRO[163].m1))
#define CSTORM_FUNC_EN_OFFSET(funcId) \
- (IRO[149].base + ((funcId) * IRO[149].m1))
+ (IRO[153].base + ((funcId) * IRO[153].m1))
#define CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hcIndex, sbId) \
- (IRO[139].base + ((hcIndex) * IRO[139].m1) + ((sbId) * IRO[139].m2))
+ (IRO[143].base + ((hcIndex) * IRO[143].m1) + ((sbId) * IRO[143].m2))
#define CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hcIndex, sbId) \
- (IRO[138].base + (((hcIndex)>>2) * IRO[138].m1) + (((hcIndex)&3) \
- * IRO[138].m2) + ((sbId) * IRO[138].m3))
-#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
+ (IRO[142].base + (((hcIndex)>>2) * IRO[142].m1) + (((hcIndex)&3) \
+ * IRO[142].m2) + ((sbId) * IRO[142].m3))
+#define CSTORM_IGU_MODE_OFFSET (IRO[161].base)
#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[317].base + ((pfId) * IRO[317].m1))
+ (IRO[323].base + ((pfId) * IRO[323].m1))
#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
- (IRO[318].base + ((pfId) * IRO[318].m1))
+ (IRO[324].base + ((pfId) * IRO[324].m1))
#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
- (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
+ (IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2))
#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
+ (IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
+ (IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
- (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
+ (IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2))
#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
- (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
-#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
(IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
+#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
+ (IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2))
#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
- (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
+ (IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2))
#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[316].base + ((pfId) * IRO[316].m1))
+ (IRO[322].base + ((pfId) * IRO[322].m1))
#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[308].base + ((pfId) * IRO[308].m1))
+ (IRO[314].base + ((pfId) * IRO[314].m1))
#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[307].base + ((pfId) * IRO[307].m1))
+ (IRO[313].base + ((pfId) * IRO[313].m1))
#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[306].base + ((pfId) * IRO[306].m1))
+ (IRO[312].base + ((pfId) * IRO[312].m1))
#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
- (IRO[151].base + ((funcId) * IRO[151].m1))
+ (IRO[155].base + ((funcId) * IRO[155].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
- (IRO[142].base + ((pfId) * IRO[142].m1))
+ (IRO[146].base + ((pfId) * IRO[146].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \
- (IRO[143].base + ((pfId) * IRO[143].m1))
+ (IRO[147].base + ((pfId) * IRO[147].m1))
#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
- (IRO[141].base + ((pfId) * IRO[141].m1))
-#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size)
+ (IRO[145].base + ((pfId) * IRO[145].m1))
+#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[145].size)
#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
- (IRO[144].base + ((pfId) * IRO[144].m1))
-#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size)
+ (IRO[148].base + ((pfId) * IRO[148].m1))
+#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[148].size)
#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \
- (IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2))
+ (IRO[140].base + ((sbId) * IRO[140].m1) + ((hcIndex) * IRO[140].m2))
#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
- (IRO[133].base + ((sbId) * IRO[133].m1))
+ (IRO[137].base + ((sbId) * IRO[137].m1))
#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \
- (IRO[134].base + ((sbId) * IRO[134].m1))
+ (IRO[138].base + ((sbId) * IRO[138].m1))
#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \
- (IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2))
+ (IRO[139].base + ((sbId) * IRO[139].m1) + ((hcIndex) * IRO[139].m2))
#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
- (IRO[132].base + ((sbId) * IRO[132].m1))
-#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size)
+ (IRO[136].base + ((sbId) * IRO[136].m1))
+#define CSTORM_STATUS_BLOCK_SIZE (IRO[136].size)
#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
- (IRO[137].base + ((sbId) * IRO[137].m1))
-#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size)
+ (IRO[141].base + ((sbId) * IRO[141].m1))
+#define CSTORM_SYNC_BLOCK_SIZE (IRO[141].size)
#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
- (IRO[155].base + ((vfId) * IRO[155].m1))
+ (IRO[159].base + ((vfId) * IRO[159].m1))
#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
- (IRO[156].base + ((vfId) * IRO[156].m1))
+ (IRO[160].base + ((vfId) * IRO[160].m1))
#define CSTORM_VF_TO_PF_OFFSET(funcId) \
- (IRO[150].base + ((funcId) * IRO[150].m1))
+ (IRO[154].base + ((funcId) * IRO[154].m1))
#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
- (IRO[203].base + ((pfId) * IRO[203].m1))
+ (IRO[207].base + ((pfId) * IRO[207].m1))
#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[101].base + ((assertListEntry) * IRO[101].m1))
#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
- (IRO[201].base + ((pfId) * IRO[201].m1))
+ (IRO[205].base + ((pfId) * IRO[205].m1))
#define TSTORM_FUNC_EN_OFFSET(funcId) \
- (IRO[103].base + ((funcId) * IRO[103].m1))
+ (IRO[107].base + ((funcId) * IRO[107].m1))
#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
- (IRO[272].base + ((pfId) * IRO[272].m1))
+ (IRO[278].base + ((pfId) * IRO[278].m1))
#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
- (IRO[273].base + ((pfId) * IRO[273].m1))
+ (IRO[279].base + ((pfId) * IRO[279].m1))
#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
- (IRO[274].base + ((pfId) * IRO[274].m1))
+ (IRO[280].base + ((pfId) * IRO[280].m1))
#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
- (IRO[275].base + ((pfId) * IRO[275].m1))
+ (IRO[281].base + ((pfId) * IRO[281].m1))
#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[271].base + ((pfId) * IRO[271].m1))
+ (IRO[277].base + ((pfId) * IRO[277].m1))
#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[270].base + ((pfId) * IRO[270].m1))
+ (IRO[276].base + ((pfId) * IRO[276].m1))
#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[269].base + ((pfId) * IRO[269].m1))
+ (IRO[275].base + ((pfId) * IRO[275].m1))
#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
- (IRO[268].base + ((pfId) * IRO[268].m1))
+ (IRO[274].base + ((pfId) * IRO[274].m1))
#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
- (IRO[278].base + ((pfId) * IRO[278].m1))
+ (IRO[284].base + ((pfId) * IRO[284].m1))
#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
- (IRO[264].base + ((pfId) * IRO[264].m1))
+ (IRO[270].base + ((pfId) * IRO[270].m1))
#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
- (IRO[265].base + ((pfId) * IRO[265].m1))
+ (IRO[271].base + ((pfId) * IRO[271].m1))
#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
- (IRO[266].base + ((pfId) * IRO[266].m1))
+ (IRO[272].base + ((pfId) * IRO[272].m1))
#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
- (IRO[267].base + ((pfId) * IRO[267].m1))
+ (IRO[273].base + ((pfId) * IRO[273].m1))
#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
- (IRO[202].base + ((pfId) * IRO[202].m1))
+ (IRO[206].base + ((pfId) * IRO[206].m1))
#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
- (IRO[105].base + ((funcId) * IRO[105].m1))
+ (IRO[109].base + ((funcId) * IRO[109].m1))
#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
- (IRO[217].base + ((pfId) * IRO[217].m1))
+ (IRO[223].base + ((pfId) * IRO[223].m1))
#define TSTORM_VF_TO_PF_OFFSET(funcId) \
- (IRO[104].base + ((funcId) * IRO[104].m1))
-#define USTORM_AGG_DATA_OFFSET (IRO[206].base)
-#define USTORM_AGG_DATA_SIZE (IRO[206].size)
-#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base)
+ (IRO[108].base + ((funcId) * IRO[108].m1))
+#define USTORM_AGG_DATA_OFFSET (IRO[212].base)
+#define USTORM_AGG_DATA_SIZE (IRO[212].size)
+#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[181].base)
#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
- (IRO[176].base + ((assertListEntry) * IRO[176].m1))
+ (IRO[180].base + ((assertListEntry) * IRO[180].m1))
#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
- (IRO[183].base + ((portId) * IRO[183].m1))
+ (IRO[187].base + ((portId) * IRO[187].m1))
#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
- (IRO[319].base + ((pfId) * IRO[319].m1))
+ (IRO[325].base + ((pfId) * IRO[325].m1))
#define USTORM_FUNC_EN_OFFSET(funcId) \
- (IRO[178].base + ((funcId) * IRO[178].m1))
+ (IRO[182].base + ((funcId) * IRO[182].m1))
#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[283].base + ((pfId) * IRO[283].m1))
+ (IRO[289].base + ((pfId) * IRO[289].m1))
#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
- (IRO[284].base + ((pfId) * IRO[284].m1))
+ (IRO[290].base + ((pfId) * IRO[290].m1))
#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
- (IRO[288].base + ((pfId) * IRO[288].m1))
+ (IRO[294].base + ((pfId) * IRO[294].m1))
#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
- (IRO[285].base + ((pfId) * IRO[285].m1))
+ (IRO[291].base + ((pfId) * IRO[291].m1))
#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[281].base + ((pfId) * IRO[281].m1))
+ (IRO[287].base + ((pfId) * IRO[287].m1))
#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[280].base + ((pfId) * IRO[280].m1))
+ (IRO[286].base + ((pfId) * IRO[286].m1))
#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[279].base + ((pfId) * IRO[279].m1))
+ (IRO[285].base + ((pfId) * IRO[285].m1))
#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[282].base + ((pfId) * IRO[282].m1))
+ (IRO[288].base + ((pfId) * IRO[288].m1))
#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
- (IRO[286].base + ((pfId) * IRO[286].m1))
+ (IRO[292].base + ((pfId) * IRO[292].m1))
#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
- (IRO[287].base + ((pfId) * IRO[287].m1))
+ (IRO[293].base + ((pfId) * IRO[293].m1))
#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
- (IRO[182].base + ((pfId) * IRO[182].m1))
+ (IRO[186].base + ((pfId) * IRO[186].m1))
#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
- (IRO[180].base + ((funcId) * IRO[180].m1))
+ (IRO[184].base + ((funcId) * IRO[184].m1))
#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
- (IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \
- IRO[209].m2))
+ (IRO[215].base + ((portId) * IRO[215].m1) + ((clientId) * \
+ IRO[215].m2))
#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
- (IRO[210].base + ((qzoneId) * IRO[210].m1))
-#define USTORM_TPA_BTR_OFFSET (IRO[207].base)
-#define USTORM_TPA_BTR_SIZE (IRO[207].size)
+ (IRO[216].base + ((qzoneId) * IRO[216].m1))
+#define USTORM_TPA_BTR_OFFSET (IRO[213].base)
+#define USTORM_TPA_BTR_SIZE (IRO[213].size)
#define USTORM_VF_TO_PF_OFFSET(funcId) \
- (IRO[179].base + ((funcId) * IRO[179].m1))
+ (IRO[183].base + ((funcId) * IRO[183].m1))
#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base)
#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base)
@@ -186,39 +186,39 @@
#define XSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[47].base + ((funcId) * IRO[47].m1))
#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[296].base + ((pfId) * IRO[296].m1))
+ (IRO[302].base + ((pfId) * IRO[302].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
- (IRO[299].base + ((pfId) * IRO[299].m1))
+ (IRO[305].base + ((pfId) * IRO[305].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
- (IRO[300].base + ((pfId) * IRO[300].m1))
+ (IRO[306].base + ((pfId) * IRO[306].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
- (IRO[301].base + ((pfId) * IRO[301].m1))
+ (IRO[307].base + ((pfId) * IRO[307].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
- (IRO[302].base + ((pfId) * IRO[302].m1))
+ (IRO[308].base + ((pfId) * IRO[308].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
- (IRO[303].base + ((pfId) * IRO[303].m1))
+ (IRO[309].base + ((pfId) * IRO[309].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
- (IRO[304].base + ((pfId) * IRO[304].m1))
+ (IRO[310].base + ((pfId) * IRO[310].m1))
#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
- (IRO[305].base + ((pfId) * IRO[305].m1))
+ (IRO[311].base + ((pfId) * IRO[311].m1))
#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[295].base + ((pfId) * IRO[295].m1))
+ (IRO[301].base + ((pfId) * IRO[301].m1))
#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[294].base + ((pfId) * IRO[294].m1))
+ (IRO[300].base + ((pfId) * IRO[300].m1))
#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[293].base + ((pfId) * IRO[293].m1))
+ (IRO[299].base + ((pfId) * IRO[299].m1))
#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[298].base + ((pfId) * IRO[298].m1))
+ (IRO[304].base + ((pfId) * IRO[304].m1))
#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
- (IRO[297].base + ((pfId) * IRO[297].m1))
+ (IRO[303].base + ((pfId) * IRO[303].m1))
#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
- (IRO[292].base + ((pfId) * IRO[292].m1))
+ (IRO[298].base + ((pfId) * IRO[298].m1))
#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
- (IRO[291].base + ((pfId) * IRO[291].m1))
+ (IRO[297].base + ((pfId) * IRO[297].m1))
#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
- (IRO[290].base + ((pfId) * IRO[290].m1))
+ (IRO[296].base + ((pfId) * IRO[296].m1))
#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
- (IRO[289].base + ((pfId) * IRO[289].m1))
+ (IRO[295].base + ((pfId) * IRO[295].m1))
#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
(IRO[44].base + ((pfId) * IRO[44].m1))
#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -231,16 +231,19 @@
#define XSTORM_SPQ_PROD_OFFSET(funcId) \
(IRO[31].base + ((funcId) * IRO[31].m1))
#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
- (IRO[211].base + ((portId) * IRO[211].m1))
+ (IRO[217].base + ((portId) * IRO[217].m1))
#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
- (IRO[212].base + ((portId) * IRO[212].m1))
+ (IRO[218].base + ((portId) * IRO[218].m1))
#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
- (IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \
- IRO[214].m2))
+ (IRO[220].base + (((pfId)>>1) * IRO[220].m1) + (((pfId)&1) * \
+ IRO[220].m2))
#define XSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[48].base + ((funcId) * IRO[48].m1))
#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
+/* eth hsi version */
+#define ETH_FP_HSI_VERSION (ETH_FP_HSI_VER_2)
+
/* Ethernet Ring parameters */
#define X_ETH_LOCAL_RING_SIZE 13
#define FIRST_BD_IN_PKT 0
@@ -356,6 +359,7 @@
#define XSEMI_CLK1_RESUL_CHIP (1e-3)
#define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))
+#define TSDM_TIMER_TICK_RESUL_CHIP (1 * (1e-6))
/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index c4daa068f1db..583591d52497 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -280,17 +280,11 @@ struct shared_hw_cfg { /* NVRAM Offset */
#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000
#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000
-
- u32 power_dissipated; /* 0x11c */
- #define SHARED_HW_CFG_POWER_MGNT_SCALE_MASK 0x00ff0000
- #define SHARED_HW_CFG_POWER_MGNT_SCALE_SHIFT 16
- #define SHARED_HW_CFG_POWER_MGNT_UNKNOWN_SCALE 0x00000000
- #define SHARED_HW_CFG_POWER_MGNT_DOT_1_WATT 0x00010000
- #define SHARED_HW_CFG_POWER_MGNT_DOT_01_WATT 0x00020000
- #define SHARED_HW_CFG_POWER_MGNT_DOT_001_WATT 0x00030000
-
- #define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000
- #define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24
+ u32 config_3; /* 0x11C */
+ #define SHARED_HW_CFG_EXTENDED_MF_MODE_MASK 0x00000F00
+ #define SHARED_HW_CFG_EXTENDED_MF_MODE_SHIFT 8
+ #define SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5 0x00000000
+ #define SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR2_DOT_0 0x00000100
u32 ump_nc_si_config; /* 0x120 */
#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003
@@ -859,6 +853,8 @@ struct shared_feat_cfg { /* NVRAM Offset */
#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
#define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE 0x00000600
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE 0x00000700
/* The interval in seconds between sending LLDP packets. Set to zero
to disable the feature */
@@ -1268,6 +1264,10 @@ struct drv_func_mb {
#define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000
#define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000
#define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000
+ #define DRV_MSG_CODE_OEM_OK 0x00010000
+ #define DRV_MSG_CODE_OEM_FAILURE 0x00020000
+ #define DRV_MSG_CODE_OEM_UPDATE_SVID_OK 0x00030000
+ #define DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE 0x00040000
/*
* The optic module verification command requires bootcode
* v5.0.6 or later, te specific optic module verification command
@@ -1423,6 +1423,12 @@ struct drv_func_mb {
#define DRV_STATUS_SET_MF_BW 0x00000004
#define DRV_STATUS_LINK_EVENT 0x00000008
+ #define DRV_STATUS_OEM_EVENT_MASK 0x00000070
+ #define DRV_STATUS_OEM_DISABLE_ENABLE_PF 0x00000010
+ #define DRV_STATUS_OEM_BANDWIDTH_ALLOCATION 0x00000020
+
+ #define DRV_STATUS_OEM_UPDATE_SVID 0x00000080
+
#define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00
#define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100
#define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200
@@ -2881,8 +2887,8 @@ struct afex_stats {
};
#define BCM_5710_FW_MAJOR_VERSION 7
-#define BCM_5710_FW_MINOR_VERSION 8
-#define BCM_5710_FW_REVISION_VERSION 19
+#define BCM_5710_FW_MINOR_VERSION 10
+#define BCM_5710_FW_REVISION_VERSION 51
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -3451,6 +3457,7 @@ enum classify_rule {
CLASSIFY_RULE_OPCODE_MAC,
CLASSIFY_RULE_OPCODE_VLAN,
CLASSIFY_RULE_OPCODE_PAIR,
+ CLASSIFY_RULE_OPCODE_VXLAN,
MAX_CLASSIFY_RULE
};
@@ -3480,7 +3487,8 @@ struct client_init_general_data {
u8 func_id;
u8 cos;
u8 traffic_type;
- u32 reserved0;
+ u8 fp_hsi_ver;
+ u8 reserved0[3];
};
@@ -3550,7 +3558,9 @@ struct client_init_rx_data {
__le16 rx_cos_mask;
__le16 silent_vlan_value;
__le16 silent_vlan_mask;
- __le32 reserved6[2];
+ u8 handle_ptp_pkts_flg;
+ u8 reserved6[3];
+ __le32 reserved7;
};
/*
@@ -3581,7 +3591,7 @@ struct client_init_tx_data {
u8 tunnel_lso_inc_ip_id;
u8 refuse_outband_vlan_flg;
u8 tunnel_non_lso_pcsum_location;
- u8 reserved1;
+ u8 tunnel_non_lso_outer_ip_csum_location;
};
/*
@@ -3619,7 +3629,9 @@ struct client_update_ramrod_data {
u8 refuse_outband_vlan_change_flg;
u8 tx_switching_flg;
u8 tx_switching_change_flg;
- __le32 reserved1;
+ u8 handle_ptp_pkts_flg;
+ u8 handle_ptp_pkts_change_flg;
+ __le16 reserved1;
__le32 echo;
};
@@ -3639,6 +3651,11 @@ struct double_regpair {
u32 regpair1_hi;
};
+/* 2nd parse bd type used in ethernet tx BDs */
+enum eth_2nd_parse_bd_type {
+ ETH_2ND_PARSE_BD_TYPE_LSO_TUNNEL,
+ MAX_ETH_2ND_PARSE_BD_TYPE
+};
/*
* Ethernet address typesm used in ethernet tx BDs
@@ -3724,12 +3741,25 @@ struct eth_classify_vlan_cmd {
};
/*
+ * Command for adding/removing a VXLAN classification rule
+ */
+struct eth_classify_vxlan_cmd {
+ struct eth_classify_cmd_header header;
+ __le32 vni;
+ __le16 inner_mac_lsb;
+ __le16 inner_mac_mid;
+ __le16 inner_mac_msb;
+ __le16 reserved1;
+};
+
+/*
* union for eth classification rule
*/
union eth_classify_rule_cmd {
struct eth_classify_mac_cmd mac;
struct eth_classify_vlan_cmd vlan;
struct eth_classify_pair_cmd pair;
+ struct eth_classify_vxlan_cmd vxlan;
};
/*
@@ -3835,8 +3865,10 @@ struct eth_fast_path_rx_cqe {
#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4
#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5)
#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5
-#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6)
-#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6
+#define ETH_FAST_PATH_RX_CQE_PTP_PKT (0x1<<6)
+#define ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT 6
+#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x1<<7)
+#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 7
u8 status_flags;
#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0)
#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0
@@ -3907,6 +3939,13 @@ struct eth_filter_rules_ramrod_data {
struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT];
};
+/* Hsi version */
+enum eth_fp_hsi_ver {
+ ETH_FP_HSI_VER_0,
+ ETH_FP_HSI_VER_1,
+ ETH_FP_HSI_VER_2,
+ MAX_ETH_FP_HSI_VER
+};
/*
* parameters for eth classification configuration ramrod
@@ -3955,29 +3994,17 @@ struct eth_mac_addresses {
/* tunneling related data */
struct eth_tunnel_data {
-#if defined(__BIG_ENDIAN)
- __le16 dst_mid;
- __le16 dst_lo;
-#elif defined(__LITTLE_ENDIAN)
__le16 dst_lo;
__le16 dst_mid;
-#endif
-#if defined(__BIG_ENDIAN)
- __le16 reserved0;
- __le16 dst_hi;
-#elif defined(__LITTLE_ENDIAN)
__le16 dst_hi;
- __le16 reserved0;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 reserved1;
- u8 ip_hdr_start_inner_w;
- __le16 pseudo_csum;
-#elif defined(__LITTLE_ENDIAN)
+ __le16 fw_ip_hdr_csum;
__le16 pseudo_csum;
u8 ip_hdr_start_inner_w;
- u8 reserved1;
-#endif
+ u8 flags;
+#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0)
+#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0
+#define ETH_TUNNEL_DATA_RESERVED (0x7F<<1)
+#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1
};
/* union for mac addresses and for tunneling data.
@@ -4064,31 +4091,41 @@ enum eth_rss_mode {
*/
struct eth_rss_update_ramrod_data {
u8 rss_engine_id;
- u8 capabilities;
+ u8 rss_mode;
+ __le16 capabilities;
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0)
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1)
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2)
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2
-#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<3)
-#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 3
-#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<4)
-#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
-#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
-#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
-#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<6)
-#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 6
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7)
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY (0x1<<3)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY_SHIFT 3
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<4)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 4
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<5)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 5
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<6)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 6
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY (0x1<<7)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY_SHIFT 7
+#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<8)
+#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 8
+#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY (0x1<<9)
+#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY_SHIFT 9
+#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY (0x1<<10)
+#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY_SHIFT 10
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<11)
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 11
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0xF<<12)
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 12
u8 rss_result_mask;
- u8 rss_mode;
- __le16 udp_4tuple_dst_port_mask;
- __le16 udp_4tuple_dst_port_value;
+ u8 reserved3;
+ __le16 reserved4;
u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE];
__le32 rss_key[T_ETH_RSS_KEY];
__le32 echo;
- __le32 reserved3;
+ __le32 reserved5;
};
@@ -4260,10 +4297,10 @@ enum eth_tunnel_lso_inc_ip_id {
/* In case tunnel exist and L4 checksum offload,
* the pseudo checksum location, on packet or on BD.
*/
-enum eth_tunnel_non_lso_pcsum_location {
- PCSUM_ON_PKT,
- PCSUM_ON_BD,
- MAX_ETH_TUNNEL_NON_LSO_PCSUM_LOCATION
+enum eth_tunnel_non_lso_csum_location {
+ CSUM_ON_PKT,
+ CSUM_ON_BD,
+ MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION
};
/*
@@ -4310,8 +4347,10 @@ struct eth_tx_start_bd {
__le16 vlan_or_ethertype;
struct eth_tx_bd_flags bd_flags;
u8 general_data;
-#define ETH_TX_START_BD_HDR_NBDS (0xF<<0)
+#define ETH_TX_START_BD_HDR_NBDS (0x7<<0)
#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
+#define ETH_TX_START_BD_NO_ADDED_TAGS (0x1<<3)
+#define ETH_TX_START_BD_NO_ADDED_TAGS_SHIFT 3
#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4)
#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5)
@@ -4387,8 +4426,8 @@ struct eth_tx_parse_2nd_bd {
__le16 global_data;
#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0)
#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0
-#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER (0x1<<4)
-#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x1<<4)
+#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 4
#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5)
#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5
#define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6)
@@ -4397,9 +4436,14 @@ struct eth_tx_parse_2nd_bd {
#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7
#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8)
#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8
-#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x7<<13)
-#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 13
- __le16 reserved1;
+#define ETH_TX_PARSE_2ND_BD_RESERVED1 (0x7<<13)
+#define ETH_TX_PARSE_2ND_BD_RESERVED1_SHIFT 13
+ u8 bd_type;
+#define ETH_TX_PARSE_2ND_BD_TYPE (0xF<<0)
+#define ETH_TX_PARSE_2ND_BD_TYPE_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_RESERVED2 (0xF<<4)
+#define ETH_TX_PARSE_2ND_BD_RESERVED2_SHIFT 4
+ u8 reserved3;
u8 tcp_flags;
#define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0)
#define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0
@@ -4417,7 +4461,7 @@ struct eth_tx_parse_2nd_bd {
#define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6
#define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7)
#define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7
- u8 reserved2;
+ u8 reserved4;
u8 tunnel_udp_hdr_start_w;
u8 fw_ip_hdr_to_payload_w;
__le16 fw_ip_csum_wo_len_flags_frag;
@@ -5205,10 +5249,18 @@ struct function_start_data {
u8 path_id;
u8 network_cos_mode;
u8 dmae_cmd_id;
- u8 gre_tunnel_mode;
- u8 gre_tunnel_rss;
- u8 nvgre_clss_en;
- __le16 reserved1[2];
+ u8 tunnel_mode;
+ u8 gre_tunnel_type;
+ u8 tunn_clss_en;
+ u8 inner_gre_rss_en;
+ u8 sd_accept_mf_clss_fail;
+ __le16 vxlan_dst_port;
+ __le16 sd_accept_mf_clss_fail_ethtype;
+ __le16 sd_vlan_eth_type;
+ u8 sd_vlan_force_pri_flg;
+ u8 sd_vlan_force_pri_val;
+ u8 sd_accept_mf_clss_fail_match_ethtype;
+ u8 no_added_tags;
};
struct function_update_data {
@@ -5225,12 +5277,20 @@ struct function_update_data {
u8 tx_switch_suspend_change_flg;
u8 tx_switch_suspend;
u8 echo;
+ u8 update_tunn_cfg_flg;
+ u8 tunnel_mode;
+ u8 gre_tunnel_type;
+ u8 tunn_clss_en;
+ u8 inner_gre_rss_en;
+ __le16 vxlan_dst_port;
+ u8 sd_vlan_force_pri_change_flg;
+ u8 sd_vlan_force_pri_flg;
+ u8 sd_vlan_force_pri_val;
+ u8 sd_vlan_tag_change_flg;
+ u8 sd_vlan_eth_type_change_flg;
u8 reserved1;
- u8 update_gre_cfg_flg;
- u8 gre_tunnel_mode;
- u8 gre_tunnel_rss;
- u8 nvgre_clss_en;
- u32 reserved3;
+ __le16 sd_vlan_tag;
+ __le16 sd_vlan_eth_type;
};
/*
@@ -5259,17 +5319,9 @@ struct fw_version {
#define __FW_VERSION_RESERVED_SHIFT 4
};
-/* GRE RSS Mode */
-enum gre_rss_mode {
- GRE_OUTER_HEADERS_RSS,
- GRE_INNER_HEADERS_RSS,
- NVGRE_KEY_ENTROPY_RSS,
- MAX_GRE_RSS_MODE
-};
/* GRE Tunnel Mode */
enum gre_tunnel_type {
- NO_GRE_TUNNEL,
NVGRE_TUNNEL,
L2GRE_TUNNEL,
IPGRE_TUNNEL,
@@ -5442,6 +5494,7 @@ enum ip_ver {
* Malicious VF error ID
*/
enum malicious_vf_error_id {
+ MALICIOUS_VF_NO_ERROR,
VF_PF_CHANNEL_NOT_READY,
ETH_ILLEGAL_BD_LENGTHS,
ETH_PACKET_TOO_SHORT,
@@ -5602,6 +5655,16 @@ struct protocol_common_spe {
union protocol_common_specific_data data;
};
+/* The data for the Set Timesync Ramrod */
+struct set_timesync_ramrod_data {
+ u8 drift_adjust_cmd;
+ u8 offset_cmd;
+ u8 add_sub_drift_adjust_value;
+ u8 drift_adjust_value;
+ u32 drift_adjust_period;
+ struct regpair offset_delta;
+};
+
/*
* The send queue element
*/
@@ -5724,10 +5787,38 @@ struct tstorm_vf_zone_data {
struct regpair reserved;
};
+/* Add or Subtract Value for Set Timesync Ramrod */
+enum ts_add_sub_value {
+ TS_SUB_VALUE,
+ TS_ADD_VALUE,
+ MAX_TS_ADD_SUB_VALUE
+};
-/*
- * zone A per-queue data
- */
+/* Drift-Adjust Commands for Set Timesync Ramrod */
+enum ts_drift_adjust_cmd {
+ TS_DRIFT_ADJUST_KEEP,
+ TS_DRIFT_ADJUST_SET,
+ TS_DRIFT_ADJUST_RESET,
+ MAX_TS_DRIFT_ADJUST_CMD
+};
+
+/* Offset Commands for Set Timesync Ramrod */
+enum ts_offset_cmd {
+ TS_OFFSET_KEEP,
+ TS_OFFSET_INC,
+ TS_OFFSET_DEC,
+ MAX_TS_OFFSET_CMD
+};
+
+/* Tunnel Mode */
+enum tunnel_mode {
+ TUNN_MODE_NONE,
+ TUNN_MODE_VXLAN,
+ TUNN_MODE_GRE,
+ MAX_TUNNEL_MODE
+};
+
+ /* zone A per-queue data */
struct ustorm_queue_zone_data {
struct ustorm_eth_rx_producers eth_rx_producers;
struct regpair reserved[3];
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index d1c093dcb054..74fbf9ea7bd8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -41,6 +41,7 @@
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/if_vlan.h>
+#include <linux/crash_dump.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/tcp.h>
@@ -63,7 +64,6 @@
#include "bnx2x_vfpf.h"
#include "bnx2x_dcb.h"
#include "bnx2x_sp.h"
-
#include <linux/firmware.h>
#include "bnx2x_fw_file_hdr.h"
/* FW files */
@@ -290,6 +290,8 @@ static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
* General service functions
****************************************************************************/
+static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
+
static void __storm_memset_dma_mapping(struct bnx2x *bp,
u32 addr, dma_addr_t mapping)
{
@@ -523,6 +525,7 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
* as long as this code is called both from syscall context and
* from ndo_set_rx_mode() flow that may be called from BH.
*/
+
spin_lock_bh(&bp->dmae_lock);
/* reset completion */
@@ -551,7 +554,9 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
}
unlock:
+
spin_unlock_bh(&bp->dmae_lock);
+
return rc;
}
@@ -646,119 +651,98 @@ static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
}
+enum storms {
+ XSTORM,
+ TSTORM,
+ CSTORM,
+ USTORM,
+ MAX_STORMS
+};
+
+#define STORMS_NUM 4
+#define REGS_IN_ENTRY 4
+
+static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
+ enum storms storm,
+ int entry)
+{
+ switch (storm) {
+ case XSTORM:
+ return XSTORM_ASSERT_LIST_OFFSET(entry);
+ case TSTORM:
+ return TSTORM_ASSERT_LIST_OFFSET(entry);
+ case CSTORM:
+ return CSTORM_ASSERT_LIST_OFFSET(entry);
+ case USTORM:
+ return USTORM_ASSERT_LIST_OFFSET(entry);
+ case MAX_STORMS:
+ default:
+ BNX2X_ERR("unknown storm\n");
+ }
+ return -EINVAL;
+}
+
static int bnx2x_mc_assert(struct bnx2x *bp)
{
char last_idx;
- int i, rc = 0;
- u32 row0, row1, row2, row3;
-
- /* XSTORM */
- last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
- XSTORM_ASSERT_LIST_INDEX_OFFSET);
- if (last_idx)
- BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
-
- /* print the asserts */
- for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
-
- row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
- XSTORM_ASSERT_LIST_OFFSET(i));
- row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
- XSTORM_ASSERT_LIST_OFFSET(i) + 4);
- row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
- XSTORM_ASSERT_LIST_OFFSET(i) + 8);
- row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
- XSTORM_ASSERT_LIST_OFFSET(i) + 12);
-
- if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i, row3, row2, row1, row0);
- rc++;
- } else {
- break;
- }
- }
-
- /* TSTORM */
- last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ASSERT_LIST_INDEX_OFFSET);
- if (last_idx)
- BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
-
- /* print the asserts */
- for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
-
- row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ASSERT_LIST_OFFSET(i));
- row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ASSERT_LIST_OFFSET(i) + 4);
- row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ASSERT_LIST_OFFSET(i) + 8);
- row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ASSERT_LIST_OFFSET(i) + 12);
-
- if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i, row3, row2, row1, row0);
- rc++;
- } else {
- break;
- }
- }
+ int i, j, rc = 0;
+ enum storms storm;
+ u32 regs[REGS_IN_ENTRY];
+ u32 bar_storm_intmem[STORMS_NUM] = {
+ BAR_XSTRORM_INTMEM,
+ BAR_TSTRORM_INTMEM,
+ BAR_CSTRORM_INTMEM,
+ BAR_USTRORM_INTMEM
+ };
+ u32 storm_assert_list_index[STORMS_NUM] = {
+ XSTORM_ASSERT_LIST_INDEX_OFFSET,
+ TSTORM_ASSERT_LIST_INDEX_OFFSET,
+ CSTORM_ASSERT_LIST_INDEX_OFFSET,
+ USTORM_ASSERT_LIST_INDEX_OFFSET
+ };
+ char *storms_string[STORMS_NUM] = {
+ "XSTORM",
+ "TSTORM",
+ "CSTORM",
+ "USTORM"
+ };
- /* CSTORM */
- last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
- CSTORM_ASSERT_LIST_INDEX_OFFSET);
- if (last_idx)
- BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
-
- /* print the asserts */
- for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
-
- row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
- CSTORM_ASSERT_LIST_OFFSET(i));
- row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
- CSTORM_ASSERT_LIST_OFFSET(i) + 4);
- row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
- CSTORM_ASSERT_LIST_OFFSET(i) + 8);
- row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
- CSTORM_ASSERT_LIST_OFFSET(i) + 12);
-
- if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i, row3, row2, row1, row0);
- rc++;
- } else {
- break;
+ for (storm = XSTORM; storm < MAX_STORMS; storm++) {
+ last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
+ storm_assert_list_index[storm]);
+ if (last_idx)
+ BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
+ storms_string[storm], last_idx);
+
+ /* print the asserts */
+ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+ /* read a single assert entry */
+ for (j = 0; j < REGS_IN_ENTRY; j++)
+ regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
+ bnx2x_get_assert_list_entry(bp,
+ storm,
+ i) +
+ sizeof(u32) * j);
+
+ /* log entry if it contains a valid assert */
+ if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+ BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ storms_string[storm], i, regs[3],
+ regs[2], regs[1], regs[0]);
+ rc++;
+ } else {
+ break;
+ }
}
}
- /* USTORM */
- last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
- USTORM_ASSERT_LIST_INDEX_OFFSET);
- if (last_idx)
- BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
-
- /* print the asserts */
- for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
-
- row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
- USTORM_ASSERT_LIST_OFFSET(i));
- row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
- USTORM_ASSERT_LIST_OFFSET(i) + 4);
- row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
- USTORM_ASSERT_LIST_OFFSET(i) + 8);
- row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
- USTORM_ASSERT_LIST_OFFSET(i) + 12);
-
- if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i, row3, row2, row1, row0);
- rc++;
- } else {
- break;
- }
- }
+ BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
+ CHIP_IS_E1(bp) ? "everest1" :
+ CHIP_IS_E1H(bp) ? "everest1h" :
+ CHIP_IS_E2(bp) ? "everest2" : "everest3",
+ BCM_5710_FW_MAJOR_VERSION,
+ BCM_5710_FW_MINOR_VERSION,
+ BCM_5710_FW_REVISION_VERSION);
return rc;
}
@@ -983,6 +967,12 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
u32 *sb_data_p;
struct bnx2x_fp_txdata txdata;
+ if (!bp->fp)
+ break;
+
+ if (!fp->rx_cons_sb)
+ continue;
+
/* Rx */
BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
i, fp->rx_bd_prod, fp->rx_bd_cons,
@@ -995,7 +985,14 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
/* Tx */
for_each_cos_in_tx_queue(fp, cos)
{
+ if (!fp->txdata_ptr[cos])
+ break;
+
txdata = *fp->txdata_ptr[cos];
+
+ if (!txdata.tx_cons_sb)
+ continue;
+
BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
i, txdata.tx_pkt_prod,
txdata.tx_pkt_cons, txdata.tx_bd_prod,
@@ -1097,6 +1094,12 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
for_each_valid_rx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
+ if (!bp->fp)
+ break;
+
+ if (!fp->rx_cons_sb)
+ continue;
+
start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
for (j = start; j != end; j = RX_BD(j + 1)) {
@@ -1130,9 +1133,19 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
/* Tx */
for_each_valid_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ if (!bp->fp)
+ break;
+
for_each_cos_in_tx_queue(fp, cos) {
struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
+ if (!fp->txdata_ptr[cos])
+ break;
+
+ if (!txdata->tx_cons_sb)
+ continue;
+
start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
for (j = start; j != end; j = TX_BD(j + 1)) {
@@ -2071,8 +2084,6 @@ int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
else
value = 0;
- DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
-
return value;
}
@@ -2894,6 +2905,57 @@ static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
}
}
+static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
+{
+ struct bnx2x_func_switch_update_params *switch_update_params;
+ struct bnx2x_func_state_params func_params;
+
+ memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
+ switch_update_params = &func_params.params.switch_update;
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+
+ if (IS_MF_UFP(bp)) {
+ int func = BP_ABS_FUNC(bp);
+ u32 val;
+
+ /* Re-learn the S-tag from shmem */
+ val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+ FUNC_MF_CFG_E1HOV_TAG_MASK;
+ if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+ bp->mf_ov = val;
+ } else {
+ BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
+ goto fail;
+ }
+
+ /* Configure new S-tag in LLH */
+ REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
+ bp->mf_ov);
+
+ /* Send Ramrod to update FW of change */
+ __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
+ &switch_update_params->changes);
+ switch_update_params->vlan = bp->mf_ov;
+
+ if (bnx2x_func_state_change(bp, &func_params) < 0) {
+ BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
+ bp->mf_ov);
+ goto fail;
+ }
+
+ DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", bp->mf_ov);
+
+ bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
+
+ return;
+ }
+
+ /* not supported by SW yet */
+fail:
+ bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
+}
+
static void bnx2x_pmf_update(struct bnx2x *bp)
{
int port = BP_PORT(bp);
@@ -3286,7 +3348,8 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
{
int port = BP_PORT(bp);
- REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
+ if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
/* Tx queue should be only re-enabled */
netif_tx_wake_all_queues(bp->dev);
@@ -3641,14 +3704,30 @@ out:
ethver, iscsiver, fcoever);
}
-static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
+static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
{
- DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
+ u32 cmd_ok, cmd_fail;
- if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
+ /* sanity */
+ if (event & DRV_STATUS_DCC_EVENT_MASK &&
+ event & DRV_STATUS_OEM_EVENT_MASK) {
+ BNX2X_ERR("Received simultaneous events %08x\n", event);
+ return;
+ }
- /*
- * This is the only place besides the function initialization
+ if (event & DRV_STATUS_DCC_EVENT_MASK) {
+ cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
+ cmd_ok = DRV_MSG_CODE_DCC_OK;
+ } else /* if (event & DRV_STATUS_OEM_EVENT_MASK) */ {
+ cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
+ cmd_ok = DRV_MSG_CODE_OEM_OK;
+ }
+
+ DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);
+
+ if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
+ DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
+ /* This is the only place besides the function initialization
* where the bp->flags can change so it is done without any
* locks
*/
@@ -3663,18 +3742,22 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
bnx2x_e1h_enable(bp);
}
- dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
+ event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
+ DRV_STATUS_OEM_DISABLE_ENABLE_PF);
}
- if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
+
+ if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
+ DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
bnx2x_config_mf_bw(bp);
- dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
+ event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
+ DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
}
/* Report results to MCP */
- if (dcc_event)
- bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
+ if (event)
+ bnx2x_fw_command(bp, cmd_fail, 0);
else
- bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
+ bnx2x_fw_command(bp, cmd_ok, 0);
}
/* must be called under the spq lock */
@@ -4156,9 +4239,12 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
func_mf_config[BP_ABS_FUNC(bp)].config);
val = SHMEM_RD(bp,
func_mb[BP_FW_MB_IDX(bp)].drv_status);
- if (val & DRV_STATUS_DCC_EVENT_MASK)
- bnx2x_dcc_event(bp,
- (val & DRV_STATUS_DCC_EVENT_MASK));
+
+ if (val & (DRV_STATUS_DCC_EVENT_MASK |
+ DRV_STATUS_OEM_EVENT_MASK))
+ bnx2x_oem_event(bp,
+ (val & (DRV_STATUS_DCC_EVENT_MASK |
+ DRV_STATUS_OEM_EVENT_MASK)));
if (val & DRV_STATUS_SET_MF_BW)
bnx2x_set_mf_bw(bp);
@@ -4184,6 +4270,10 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
val & DRV_STATUS_AFEX_EVENT_MASK);
if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
bnx2x_handle_eee_event(bp);
+
+ if (val & DRV_STATUS_OEM_UPDATE_SVID)
+ bnx2x_handle_update_svid_cmd(bp);
+
if (bp->link_vars.periodic_flags &
PERIODIC_FLAGS_LINK_EVENT) {
/* sync with link */
@@ -4678,7 +4768,7 @@ static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
for (i = 0; sig; i++) {
cur_bit = (0x1UL << i);
if (sig & cur_bit) {
- res |= true; /* Each bit is real error! */
+ res = true; /* Each bit is real error! */
if (print) {
switch (cur_bit) {
case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
@@ -4757,21 +4847,21 @@ static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
_print_next_block((*par_num)++,
"MCP ROM");
*global = true;
- res |= true;
+ res = true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
if (print)
_print_next_block((*par_num)++,
"MCP UMP RX");
*global = true;
- res |= true;
+ res = true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
if (print)
_print_next_block((*par_num)++,
"MCP UMP TX");
*global = true;
- res |= true;
+ res = true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
if (print)
@@ -4803,7 +4893,7 @@ static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
for (i = 0; sig; i++) {
cur_bit = (0x1UL << i);
if (sig & cur_bit) {
- res |= true; /* Each bit is real error! */
+ res = true; /* Each bit is real error! */
if (print) {
switch (cur_bit) {
case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
@@ -5452,6 +5542,14 @@ static void bnx2x_eq_int(struct bnx2x *bp)
break;
goto next_spqe;
+
+ case EVENT_RING_OPCODE_SET_TIMESYNC:
+ DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
+ "got set_timesync ramrod completion\n");
+ if (f_obj->complete_cmd(bp, f_obj,
+ BNX2X_F_CMD_SET_TIMESYNC))
+ break;
+ goto next_spqe;
}
switch (opcode | bp->state) {
@@ -6102,7 +6200,7 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
}
/* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
- if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
+ if (rx_mode != BNX2X_RX_MODE_NONE) {
__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
}
@@ -7662,7 +7760,11 @@ static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
/* Function parameters */
- switch_update_params->suspend = suspend;
+ __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
+ &switch_update_params->changes);
+ if (suspend)
+ __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
+ &switch_update_params->changes);
rc = bnx2x_func_state_change(bp, &func_params);
@@ -7907,8 +8009,11 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
if (IS_MF(bp)) {
- REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
- REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
+ if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) {
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
+ REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8,
+ bp->mf_ov);
+ }
}
bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
@@ -8300,13 +8405,6 @@ int bnx2x_del_all_macs(struct bnx2x *bp,
int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
{
- if (is_zero_ether_addr(bp->dev->dev_addr) &&
- (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
- DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
- "Ignoring Zero MAC for STORAGE SD mode\n");
- return 0;
- }
-
if (IS_PF(bp)) {
unsigned long ramrod_flags = 0;
@@ -9025,7 +9123,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
struct bnx2x_func_state_params func_params = {NULL};
DP(NETIF_MSG_IFDOWN,
- "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n");
+ "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n");
func_params.f_obj = &bp->func_obj;
__set_bit(RAMROD_DRV_CLR_ONLY,
@@ -9044,6 +9142,48 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
return 0;
}
+static void bnx2x_disable_ptp(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+
+ /* Disable sending PTP packets to host */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
+ NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
+
+ /* Reset PTP event detection rules */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+ NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+ NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+ NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+ NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
+
+ /* Disable the PTP feature */
+ REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
+ NIG_REG_P0_PTP_EN, 0x0);
+}
+
+/* Called during unload, to stop PTP-related stuff */
+void bnx2x_stop_ptp(struct bnx2x *bp)
+{
+ /* Cancel PTP work queue. Should be done after the Tx queues are
+ * drained to prevent additional scheduling.
+ */
+ cancel_work_sync(&bp->ptp_task);
+
+ if (bp->ptp_tx_skb) {
+ dev_kfree_skb_any(bp->ptp_tx_skb);
+ bp->ptp_tx_skb = NULL;
+ }
+
+ /* Disable PTP in HW */
+ bnx2x_disable_ptp(bp);
+
+ DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n");
+}
+
void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
{
int port = BP_PORT(bp);
@@ -9162,6 +9302,13 @@ unload_error:
#endif
}
+ /* stop_ptp should be after the Tx queues are drained to prevent
+ * scheduling to the cancelled PTP work queue. It should also be after
+ * function stop ramrod is sent, since as part of this ramrod FW access
+ * PTP registers.
+ */
+ bnx2x_stop_ptp(bp);
+
/* Disable HW interrupts, NAPI */
bnx2x_netif_stop(bp, 1);
/* Delete all NAPI objects */
@@ -11283,15 +11430,14 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
dev_info.port_hw_config[port].
fcoe_wwn_node_name_lower);
} else if (!IS_MF_SD(bp)) {
- /*
- * Read the WWN info only if the FCoE feature is enabled for
+ /* Read the WWN info only if the FCoE feature is enabled for
* this function.
*/
- if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
+ if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp))
+ bnx2x_get_ext_wwn_info(bp, func);
+ } else {
+ if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
bnx2x_get_ext_wwn_info(bp, func);
-
- } else if (IS_MF_FCOE_SD(bp) && !CHIP_IS_E1x(bp)) {
- bnx2x_get_ext_wwn_info(bp, func);
}
BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
@@ -11329,7 +11475,7 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
* In non SD mode features configuration comes from struct
* func_ext_config.
*/
- if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) {
+ if (!IS_MF_SD(bp)) {
u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
val2 = MF_CFG_RD(bp, func_ext_config[func].
@@ -11448,7 +11594,7 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
- if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
+ if (!is_valid_ether_addr(bp->dev->dev_addr))
dev_err(&bp->pdev->dev,
"bad Ethernet MAC address configuration: %pM\n"
"change it manually before bringing up the appropriate network interface\n",
@@ -11478,11 +11624,27 @@ static bool bnx2x_get_dropless_info(struct bnx2x *bp)
return cfg;
}
+static void validate_set_si_mode(struct bnx2x *bp)
+{
+ u8 func = BP_ABS_FUNC(bp);
+ u32 val;
+
+ val = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
+
+ /* check for legal mac (upper bytes) */
+ if (val != 0xffff) {
+ bp->mf_mode = MULTI_FUNCTION_SI;
+ bp->mf_config[BP_VN(bp)] =
+ MF_CFG_RD(bp, func_mf_config[func].config);
+ } else
+ BNX2X_DEV_INFO("illegal MAC address for SI\n");
+}
+
static int bnx2x_get_hwinfo(struct bnx2x *bp)
{
int /*abs*/func = BP_ABS_FUNC(bp);
int vn;
- u32 val = 0;
+ u32 val = 0, val2 = 0;
int rc = 0;
bnx2x_get_common_hwinfo(bp);
@@ -11562,6 +11724,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
bp->mf_ov = 0;
bp->mf_mode = 0;
+ bp->mf_sub_mode = 0;
vn = BP_VN(bp);
if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
@@ -11591,15 +11754,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
switch (val) {
case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
- val = MF_CFG_RD(bp, func_mf_config[func].
- mac_upper);
- /* check for legal mac (upper bytes)*/
- if (val != 0xffff) {
- bp->mf_mode = MULTI_FUNCTION_SI;
- bp->mf_config[vn] = MF_CFG_RD(bp,
- func_mf_config[func].config);
- } else
- BNX2X_DEV_INFO("illegal MAC address for SI\n");
+ validate_set_si_mode(bp);
break;
case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
if ((!CHIP_IS_E1x(bp)) &&
@@ -11627,9 +11782,33 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
} else
BNX2X_DEV_INFO("illegal OV for SD\n");
break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
+ bp->mf_mode = MULTI_FUNCTION_SD;
+ bp->mf_sub_mode = SUB_MF_MODE_UFP;
+ bp->mf_config[vn] =
+ MF_CFG_RD(bp,
+ func_mf_config[func].config);
+ break;
case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
bp->mf_config[vn] = 0;
break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE:
+ val2 = SHMEM_RD(bp,
+ dev_info.shared_hw_config.config_3);
+ val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK;
+ switch (val2) {
+ case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5:
+ validate_set_si_mode(bp);
+ bp->mf_sub_mode =
+ SUB_MF_MODE_NPAR1_DOT_5;
+ break;
+ default:
+ /* Unknown configuration */
+ bp->mf_config[vn] = 0;
+ BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n",
+ val);
+ }
+ break;
default:
/* Unknown configuration: reset mf_config */
bp->mf_config[vn] = 0;
@@ -11650,6 +11829,11 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
func, bp->mf_ov, bp->mf_ov);
+ } else if (bp->mf_sub_mode == SUB_MF_MODE_UFP) {
+ dev_err(&bp->pdev->dev,
+ "Unexpected - no valid MF OV for func %d in UFP mode\n",
+ func);
+ bp->path_has_ovlan = true;
} else {
dev_err(&bp->pdev->dev,
"No valid MF OV for func %d, aborting\n",
@@ -11898,9 +12082,9 @@ static int bnx2x_init_bp(struct bnx2x *bp)
dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
bp->disable_tpa = disable_tpa;
- bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
+ bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp);
/* Reduce memory usage in kdump environment by disabling TPA */
- bp->disable_tpa |= reset_devices;
+ bp->disable_tpa |= is_kdump_kernel();
/* Set TPA flags */
if (bp->disable_tpa) {
@@ -11918,7 +12102,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->mrrs = mrrs;
- bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
+ bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL;
if (IS_VF(bp))
bp->rx_ring_size = MAX_RX_AVAIL;
@@ -11976,6 +12160,9 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->dump_preset_idx = 1;
+ if (CHIP_IS_E3B0(bp))
+ bp->flags |= PTP_SUPPORTED;
+
return rc;
}
@@ -12235,7 +12422,7 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
bp->rx_mode = rx_mode;
/* handle ISCSI SD mode */
- if (IS_MF_ISCSI_SD(bp))
+ if (IS_MF_ISCSI_ONLY(bp))
bp->rx_mode = BNX2X_RX_MODE_NONE;
/* Schedule the rx_mode command */
@@ -12308,13 +12495,17 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct bnx2x *bp = netdev_priv(dev);
struct mii_ioctl_data *mdio = if_mii(ifr);
- DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
- mdio->phy_id, mdio->reg_num, mdio->val_in);
-
if (!netif_running(dev))
return -EAGAIN;
- return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return bnx2x_hwtstamp_ioctl(bp, ifr);
+ default:
+ DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
+ mdio->phy_id, mdio->reg_num, mdio->val_in);
+ return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
+ }
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -12338,7 +12529,7 @@ static int bnx2x_validate_addr(struct net_device *dev)
if (IS_VF(bp))
bnx2x_sample_bulletin(bp);
- if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) {
+ if (!is_valid_ether_addr(dev->dev_addr)) {
BNX2X_ERR("Non-valid Ethernet address\n");
return -EADDRNOTAVAIL;
}
@@ -12958,6 +13149,191 @@ static int set_is_vf(int chip_id)
}
}
+/* nig_tsgen registers relative address */
+#define tsgen_ctrl 0x0
+#define tsgen_freecount 0x10
+#define tsgen_synctime_t0 0x20
+#define tsgen_offset_t0 0x28
+#define tsgen_drift_t0 0x30
+#define tsgen_synctime_t1 0x58
+#define tsgen_offset_t1 0x60
+#define tsgen_drift_t1 0x68
+
+/* FW workaround for setting drift */
+static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
+ int best_val, int best_period)
+{
+ struct bnx2x_func_state_params func_params = {NULL};
+ struct bnx2x_func_set_timesync_params *set_timesync_params =
+ &func_params.params.set_timesync;
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
+
+ /* Function parameters */
+ set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET;
+ set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
+ set_timesync_params->add_sub_drift_adjust_value =
+ drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE;
+ set_timesync_params->drift_adjust_value = best_val;
+ set_timesync_params->drift_adjust_period = best_period;
+
+ return bnx2x_func_state_change(bp, &func_params);
+}
+
+static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+ int rc;
+ int drift_dir = 1;
+ int val, period, period1, period2, dif, dif1, dif2;
+ int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
+
+ DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
+
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_PTP,
+ "PTP adjfreq called while the interface is down\n");
+ return -EFAULT;
+ }
+
+ if (ppb < 0) {
+ ppb = -ppb;
+ drift_dir = 0;
+ }
+
+ if (ppb == 0) {
+ best_val = 1;
+ best_period = 0x1FFFFFF;
+ } else if (ppb >= BNX2X_MAX_PHC_DRIFT) {
+ best_val = 31;
+ best_period = 1;
+ } else {
+ /* Changed not to allow val = 8, 16, 24 as these values
+ * are not supported in workaround.
+ */
+ for (val = 0; val <= 31; val++) {
+ if ((val & 0x7) == 0)
+ continue;
+ period1 = val * 1000000 / ppb;
+ period2 = period1 + 1;
+ if (period1 != 0)
+ dif1 = ppb - (val * 1000000 / period1);
+ else
+ dif1 = BNX2X_MAX_PHC_DRIFT;
+ if (dif1 < 0)
+ dif1 = -dif1;
+ dif2 = ppb - (val * 1000000 / period2);
+ if (dif2 < 0)
+ dif2 = -dif2;
+ dif = (dif1 < dif2) ? dif1 : dif2;
+ period = (dif1 < dif2) ? period1 : period2;
+ if (dif < best_dif) {
+ best_dif = dif;
+ best_val = val;
+ best_period = period;
+ }
+ }
+ }
+
+ rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val,
+ best_period);
+ if (rc) {
+ BNX2X_ERR("Failed to set drift\n");
+ return -EFAULT;
+ }
+
+ DP(BNX2X_MSG_PTP, "Configrued val = %d, period = %d\n", best_val,
+ best_period);
+
+ return 0;
+}
+
+static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+ u64 now;
+
+ DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
+
+ now = timecounter_read(&bp->timecounter);
+ now += delta;
+ /* Re-init the timecounter */
+ timecounter_init(&bp->timecounter, &bp->cyclecounter, now);
+
+ return 0;
+}
+
+static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+ u64 ns;
+ u32 remainder;
+
+ ns = timecounter_read(&bp->timecounter);
+
+ DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
+ ts->tv_nsec = remainder;
+
+ return 0;
+}
+
+static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+ u64 ns;
+
+ ns = ts->tv_sec * 1000000000ULL;
+ ns += ts->tv_nsec;
+
+ DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
+
+ /* Re-init the timecounter */
+ timecounter_init(&bp->timecounter, &bp->cyclecounter, ns);
+
+ return 0;
+}
+
+/* Enable (or disable) ancillary features of the phc subsystem */
+static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+
+ BNX2X_ERR("PHC ancillary features are not supported\n");
+ return -ENOTSUPP;
+}
+
+void bnx2x_register_phc(struct bnx2x *bp)
+{
+ /* Fill the ptp_clock_info struct and register PTP clock*/
+ bp->ptp_clock_info.owner = THIS_MODULE;
+ snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name);
+ bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */
+ bp->ptp_clock_info.n_alarm = 0;
+ bp->ptp_clock_info.n_ext_ts = 0;
+ bp->ptp_clock_info.n_per_out = 0;
+ bp->ptp_clock_info.pps = 0;
+ bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
+ bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
+ bp->ptp_clock_info.gettime = bnx2x_ptp_gettime;
+ bp->ptp_clock_info.settime = bnx2x_ptp_settime;
+ bp->ptp_clock_info.enable = bnx2x_ptp_enable;
+
+ bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
+ if (IS_ERR(bp->ptp_clock)) {
+ bp->ptp_clock = NULL;
+ BNX2X_ERR("PTP clock registeration failed\n");
+ }
+}
+
static int bnx2x_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -13129,6 +13505,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
"Unknown",
dev->base_addr, bp->pdev->irq, dev->dev_addr);
+ bnx2x_register_phc(bp);
+
return 0;
init_one_exit:
@@ -13155,6 +13533,11 @@ static void __bnx2x_remove(struct pci_dev *pdev,
struct bnx2x *bp,
bool remove_netdev)
{
+ if (bp->ptp_clock) {
+ ptp_clock_unregister(bp->ptp_clock);
+ bp->ptp_clock = NULL;
+ }
+
/* Delete storage MAC address */
if (!NO_FCOE(bp)) {
rtnl_lock();
@@ -14136,3 +14519,332 @@ int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
REG_RD(bp, pretend_reg);
return 0;
}
+
+static void bnx2x_ptp_task(struct work_struct *work)
+{
+ struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
+ int port = BP_PORT(bp);
+ u32 val_seq;
+ u64 timestamp, ns;
+ struct skb_shared_hwtstamps shhwtstamps;
+
+ /* Read Tx timestamp registers */
+ val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+ NIG_REG_P0_TLLH_PTP_BUF_SEQID);
+ if (val_seq & 0x10000) {
+ /* There is a valid timestamp value */
+ timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
+ NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
+ timestamp <<= 32;
+ timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB :
+ NIG_REG_P0_TLLH_PTP_BUF_TS_LSB);
+ /* Reset timestamp register to allow new timestamp */
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+ NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
+ ns = timecounter_cyc2time(&bp->timecounter, timestamp);
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
+ dev_kfree_skb_any(bp->ptp_tx_skb);
+ bp->ptp_tx_skb = NULL;
+
+ DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
+ timestamp, ns);
+ } else {
+ DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n");
+ /* Reschedule to keep checking for a valid timestamp value */
+ schedule_work(&bp->ptp_task);
+ }
+}
+
+void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
+{
+ int port = BP_PORT(bp);
+ u64 timestamp, ns;
+
+ timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB :
+ NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB);
+ timestamp <<= 32;
+ timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB :
+ NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB);
+
+ /* Reset timestamp register to allow new timestamp */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
+ NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
+
+ ns = timecounter_cyc2time(&bp->timecounter, timestamp);
+
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+
+ DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
+ timestamp, ns);
+}
+
+/* Read the PHC */
+static cycle_t bnx2x_cyclecounter_read(const struct cyclecounter *cc)
+{
+ struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
+ int port = BP_PORT(bp);
+ u32 wb_data[2];
+ u64 phc_cycles;
+
+ REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 :
+ NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2);
+ phc_cycles = wb_data[1];
+ phc_cycles = (phc_cycles << 32) + wb_data[0];
+
+ DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles);
+
+ return phc_cycles;
+}
+
+static void bnx2x_init_cyclecounter(struct bnx2x *bp)
+{
+ memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
+ bp->cyclecounter.read = bnx2x_cyclecounter_read;
+ bp->cyclecounter.mask = CLOCKSOURCE_MASK(64);
+ bp->cyclecounter.shift = 1;
+ bp->cyclecounter.mult = 1;
+}
+
+static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
+{
+ struct bnx2x_func_state_params func_params = {NULL};
+ struct bnx2x_func_set_timesync_params *set_timesync_params =
+ &func_params.params.set_timesync;
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
+
+ /* Function parameters */
+ set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET;
+ set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
+
+ return bnx2x_func_state_change(bp, &func_params);
+}
+
+int bnx2x_enable_ptp_packets(struct bnx2x *bp)
+{
+ struct bnx2x_queue_state_params q_params;
+ int rc, i;
+
+ /* send queue update ramrod to enable PTP packets */
+ memset(&q_params, 0, sizeof(q_params));
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
+ &q_params.params.update.update_flags);
+ __set_bit(BNX2X_Q_UPDATE_PTP_PKTS,
+ &q_params.params.update.update_flags);
+
+ /* send the ramrod on all the queues of the PF */
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ /* Set the appropriate Queue object */
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to enable PTP packets\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int bnx2x_configure_ptp_filters(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int rc;
+
+ if (!bp->hwtstamp_ioctl_called)
+ return 0;
+
+ switch (bp->tx_type) {
+ case HWTSTAMP_TX_ON:
+ bp->flags |= TX_TIMESTAMPING_EN;
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+ NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA);
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+ NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE);
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ BNX2X_ERR("One-step timestamping is not supported\n");
+ return -ERANGE;
+ }
+
+ switch (bp->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ bp->rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ /* Initialize PTP detection for UDP/IPv4 events */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+ NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE);
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+ NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+ NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA);
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+ NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ /* Initialize PTP detection L2 events */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+ NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF);
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+ NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF);
+
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+ NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA);
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+ NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE);
+ break;
+ }
+
+ /* Indicate to FW that this PF expects recorded PTP packets */
+ rc = bnx2x_enable_ptp_packets(bp);
+ if (rc)
+ return rc;
+
+ /* Enable sending PTP packets to host */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
+ NIG_REG_P0_LLH_PTP_TO_HOST, 0x1);
+
+ return 0;
+}
+
+static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int rc;
+
+ DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
+ config.tx_type, config.rx_filter);
+
+ if (config.flags) {
+ BNX2X_ERR("config.flags is reserved for future use\n");
+ return -EINVAL;
+ }
+
+ bp->hwtstamp_ioctl_called = 1;
+ bp->tx_type = config.tx_type;
+ bp->rx_filter = config.rx_filter;
+
+ rc = bnx2x_configure_ptp_filters(bp);
+ if (rc)
+ return rc;
+
+ config.rx_filter = bp->rx_filter;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/* Configrues HW for PTP */
+static int bnx2x_configure_ptp(struct bnx2x *bp)
+{
+ int rc, port = BP_PORT(bp);
+ u32 wb_data[2];
+
+ /* Reset PTP event detection rules - will be configured in the IOCTL */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+ NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+ NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+ NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+ NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
+
+ /* Disable PTP packets to host - will be configured in the IOCTL*/
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
+ NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
+
+ /* Enable the PTP feature */
+ REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
+ NIG_REG_P0_PTP_EN, 0x3F);
+
+ /* Enable the free-running counter */
+ wb_data[0] = 0;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2);
+
+ /* Reset drift register (offset register is not reset) */
+ rc = bnx2x_send_reset_timesync_ramrod(bp);
+ if (rc) {
+ BNX2X_ERR("Failed to reset PHC drift register\n");
+ return -EFAULT;
+ }
+
+ /* Reset possibly old timestamps */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
+ NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+ NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
+
+ return 0;
+}
+
+/* Called during load, to initialize PTP-related stuff */
+void bnx2x_init_ptp(struct bnx2x *bp)
+{
+ int rc;
+
+ /* Configure PTP in HW */
+ rc = bnx2x_configure_ptp(bp);
+ if (rc) {
+ BNX2X_ERR("Stopping PTP initialization\n");
+ return;
+ }
+
+ /* Init work queue for Tx timestamping */
+ INIT_WORK(&bp->ptp_task, bnx2x_ptp_task);
+
+ /* Init cyclecounter and timecounter. This is done only in the first
+ * load. If done in every load, PTP application will fail when doing
+ * unload / load (e.g. MTU change) while it is running.
+ */
+ if (!bp->timecounter_init_done) {
+ bnx2x_init_cyclecounter(bp);
+ timecounter_init(&bp->timecounter, &bp->cyclecounter,
+ ktime_to_ns(ktime_get_real()));
+ bp->timecounter_init_done = 1;
+ }
+
+ DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 2beb5430b876..b0779d773343 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -2182,6 +2182,45 @@
#define NIG_REG_P0_HWPFC_ENABLE 0x18078
#define NIG_REG_P0_LLH_FUNC_MEM2 0x18480
#define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440
+/* [RW 17] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. Bits [15:0] return the sequence ID of the packet. Bit 16
+ * indicates the validity of the data in the buffer. Writing a 1 to bit 16
+ * will clear the buffer.
+ */
+#define NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID 0x1875c
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. This location returns the lower 32 bits of timestamp value.
+ */
+#define NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB 0x18754
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. This location returns the upper 32 bits of timestamp value.
+ */
+#define NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB 0x18758
+/* [RW 11] Mask register for the various parameters used in determining PTP
+ * packet presence. Set each bit to 1 to mask out the particular parameter.
+ * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of
+ * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP
+ * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC
+ * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of
+ * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable
+ * MAC DA 2. The reset default is set to mask out all parameters.
+ */
+#define NIG_REG_P0_LLH_PTP_PARAM_MASK 0x187a0
+/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+ * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
+ * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
+ * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
+ * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC
+ * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype
+ * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset
+ * default is to mask out all of the rules. Note that rules 0-3 are for IPv4
+ * packets only and require that the packet is IPv4 for the rules to match.
+ * Note that rules 4-7 are for IPv6 packets only and require that the packet
+ * is IPv6 for the rules to match.
+ */
+#define NIG_REG_P0_LLH_PTP_RULE_MASK 0x187a4
+/* [RW 1] Set to 1 to enable PTP packets to be forwarded to the host. */
+#define NIG_REG_P0_LLH_PTP_TO_HOST 0x187ac
/* [RW 1] Input enable for RX MAC interface. */
#define NIG_REG_P0_MAC_IN_EN 0x185ac
/* [RW 1] Output enable for TX MAC interface */
@@ -2194,6 +2233,17 @@
* priority field is extracted from the outer-most VLAN in receive packet.
* Only COS 0 and COS 1 are supported in E2. */
#define NIG_REG_P0_PKT_PRIORITY_TO_COS 0x18054
+/* [RW 6] Enable for TimeSync feature. Bits [2:0] are for RX side. Bits
+ * [5:3] are for TX side. Bit 0 enables TimeSync on RX side. Bit 1 enables
+ * V1 frame format in timesync event detection on RX side. Bit 2 enables V2
+ * frame format in timesync event detection on RX side. Bit 3 enables
+ * TimeSync on TX side. Bit 4 enables V1 frame format in timesync event
+ * detection on TX side. Bit 5 enables V2 frame format in timesync event
+ * detection on TX side. Note that for HW to detect PTP packet and extract
+ * data from the packet, at least one of the version bits of that traffic
+ * direction has to be enabled.
+ */
+#define NIG_REG_P0_PTP_EN 0x18788
/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
* priority is mapped to COS 0 when the corresponding mask bit is 1. More
* than one bit may be set; allowing multiple priorities to be mapped to one
@@ -2300,7 +2350,46 @@
* Ethernet header. */
#define NIG_REG_P1_HDRS_AFTER_BASIC 0x1818c
#define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0
-#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460
+#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460a
+/* [RW 17] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. Bits [15:0] return the sequence ID of the packet. Bit 16
+ * indicates the validity of the data in the buffer. Writing a 1 to bit 16
+ * will clear the buffer.
+ */
+#define NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID 0x18774
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. This location returns the lower 32 bits of timestamp value.
+ */
+#define NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB 0x1876c
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * the host. This location returns the upper 32 bits of timestamp value.
+ */
+#define NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB 0x18770
+/* [RW 11] Mask register for the various parameters used in determining PTP
+ * packet presence. Set each bit to 1 to mask out the particular parameter.
+ * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of
+ * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP
+ * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC
+ * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of
+ * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable
+ * MAC DA 2. The reset default is set to mask out all parameters.
+ */
+#define NIG_REG_P1_LLH_PTP_PARAM_MASK 0x187c8
+/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+ * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
+ * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
+ * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
+ * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC
+ * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype
+ * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset
+ * default is to mask out all of the rules. Note that rules 0-3 are for IPv4
+ * packets only and require that the packet is IPv4 for the rules to match.
+ * Note that rules 4-7 are for IPv6 packets only and require that the packet
+ * is IPv6 for the rules to match.
+ */
+#define NIG_REG_P1_LLH_PTP_RULE_MASK 0x187cc
+/* [RW 1] Set to 1 to enable PTP packets to be forwarded to the host. */
+#define NIG_REG_P1_LLH_PTP_TO_HOST 0x187d4
/* [RW 32] Specify the client number to be assigned to each priority of the
* strict priority arbiter. This register specifies bits 31:0 of the 36-bit
* value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
@@ -2342,6 +2431,17 @@
* priority field is extracted from the outer-most VLAN in receive packet.
* Only COS 0 and COS 1 are supported in E2. */
#define NIG_REG_P1_PKT_PRIORITY_TO_COS 0x181a8
+/* [RW 6] Enable for TimeSync feature. Bits [2:0] are for RX side. Bits
+ * [5:3] are for TX side. Bit 0 enables TimeSync on RX side. Bit 1 enables
+ * V1 frame format in timesync event detection on RX side. Bit 2 enables V2
+ * frame format in timesync event detection on RX side. Bit 3 enables
+ * TimeSync on TX side. Bit 4 enables V1 frame format in timesync event
+ * detection on TX side. Bit 5 enables V2 frame format in timesync event
+ * detection on TX side. Note that for HW to detect PTP packet and extract
+ * data from the packet, at least one of the version bits of that traffic
+ * direction has to be enabled.
+ */
+#define NIG_REG_P1_PTP_EN 0x187b0
/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
* priority is mapped to COS 0 when the corresponding mask bit is 1. More
* than one bit may be set; allowing multiple priorities to be mapped to one
@@ -2361,6 +2461,78 @@
#define NIG_REG_P1_RX_MACFIFO_EMPTY 0x1858c
/* [R 1] TLLH FIFO is empty. */
#define NIG_REG_P1_TLLH_FIFO_EMPTY 0x18338
+/* [RW 19] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * TX side. Bits [15:0] reflect the sequence ID of the packet. Bit 16
+ * indicates the validity of the data in the buffer. Bit 17 indicates that
+ * the sequence ID is valid and it is waiting for the TX timestamp value.
+ * Bit 18 indicates whether the timestamp is from a SW request (value of 1)
+ * or HW request (value of 0). Writing a 1 to bit 16 will clear the buffer.
+ */
+#define NIG_REG_P0_TLLH_PTP_BUF_SEQID 0x187e0
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * MCP. This location returns the lower 32 bits of timestamp value.
+ */
+#define NIG_REG_P0_TLLH_PTP_BUF_TS_LSB 0x187d8
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * MCP. This location returns the upper 32 bits of timestamp value.
+ */
+#define NIG_REG_P0_TLLH_PTP_BUF_TS_MSB 0x187dc
+/* [RW 11] Mask register for the various parameters used in determining PTP
+ * packet presence. Set each bit to 1 to mask out the particular parameter.
+ * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of
+ * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP
+ * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC
+ * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of
+ * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable
+ * MAC DA 2. The reset default is set to mask out all parameters.
+ */
+#define NIG_REG_P0_TLLH_PTP_PARAM_MASK 0x187f0
+/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+ * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
+ * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
+ * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
+ * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC
+ * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype
+ * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset
+ * default is to mask out all of the rules.
+ */
+#define NIG_REG_P0_TLLH_PTP_RULE_MASK 0x187f4
+/* [RW 19] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * TX side. Bits [15:0] reflect the sequence ID of the packet. Bit 16
+ * indicates the validity of the data in the buffer. Bit 17 indicates that
+ * the sequence ID is valid and it is waiting for the TX timestamp value.
+ * Bit 18 indicates whether the timestamp is from a SW request (value of 1)
+ * or HW request (value of 0). Writing a 1 to bit 16 will clear the buffer.
+ */
+#define NIG_REG_P1_TLLH_PTP_BUF_SEQID 0x187ec
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * MCP. This location returns the lower 32 bits of timestamp value.
+ */
+#define NIG_REG_P1_TLLH_PTP_BUF_TS_LSB 0x187e4
+/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
+ * MCP. This location returns the upper 32 bits of timestamp value.
+ */
+#define NIG_REG_P1_TLLH_PTP_BUF_TS_MSB 0x187e8
+/* [RW 11] Mask register for the various parameters used in determining PTP
+ * packet presence. Set each bit to 1 to mask out the particular parameter.
+ * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of
+ * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP
+ * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC
+ * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of
+ * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable
+ * MAC DA 2. The reset default is set to mask out all parameters.
+ */
+#define NIG_REG_P1_TLLH_PTP_PARAM_MASK 0x187f8
+/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+ * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
+ * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
+ * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
+ * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC
+ * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype
+ * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset
+ * default is to mask out all of the rules.
+ */
+#define NIG_REG_P1_TLLH_PTP_RULE_MASK 0x187fc
/* [RW 32] Specify which of the credit registers the client is to be mapped
* to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are
* for client 0; bits [35:32] are for client 8. For clients that are not
@@ -2513,6 +2685,10 @@
swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then
ort swap is equal to ~nig_registers_port_swap.port_swap */
#define NIG_REG_STRAP_OVERRIDE 0x10398
+/* [WB 64] Addresses for TimeSync related registers in the timesync
+ * generator sub-module.
+ */
+#define NIG_REG_TIMESYNC_GEN_REG 0x18800
/* [RW 1] output enable for RX_XCM0 IF */
#define NIG_REG_XCM0_OUT_EN 0x100f0
/* [RW 1] output enable for RX_XCM1 IF */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index b1936044767a..7bc2924a7e24 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4019,6 +4019,7 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
struct bnx2x_raw_obj *r = &o->raw;
struct eth_rss_update_ramrod_data *data =
(struct eth_rss_update_ramrod_data *)(r->rdata);
+ u16 caps = 0;
u8 rss_mode = 0;
int rc;
@@ -4042,28 +4043,34 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
/* RSS capabilities */
if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
- data->capabilities |=
- ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
- data->capabilities |=
- ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
- data->capabilities |=
- ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
- data->capabilities |=
- ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
- data->capabilities |=
- ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
- data->capabilities |=
- ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
+
+ if (test_bit(BNX2X_RSS_GRE_INNER_HDRS, &p->rss_flags))
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY;
+
+ /* RSS keys */
+ if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
+ memcpy(&data->rss_key[0], &p->rss_key[0],
+ sizeof(data->rss_key));
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
+ }
+
+ data->capabilities = cpu_to_le16(caps);
/* Hashing mask */
data->rss_result_mask = p->rss_result_mask;
@@ -4084,13 +4091,6 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
if (netif_msg_ifup(bp))
bnx2x_debug_print_ind_table(bp, p);
- /* RSS keys */
- if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
- memcpy(&data->rss_key[0], &p->rss_key[0],
- sizeof(data->rss_key));
- data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
- }
-
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
@@ -4336,6 +4336,8 @@ static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
test_bit(BNX2X_Q_FLG_FCOE, flags) ?
LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
+ gen_data->fp_hsi_ver = ETH_FP_HSI_VERSION;
+
DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
}
@@ -4357,12 +4359,13 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
tx_data->force_default_pri_flg =
test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
-
+ tx_data->refuse_outband_vlan_flg =
+ test_bit(BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
tx_data->tunnel_lso_inc_ip_id =
test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
tx_data->tunnel_non_lso_pcsum_location =
- test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
- PCSUM_ON_BD;
+ test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
+ CSUM_ON_BD;
tx_data->tx_status_block_id = params->fw_sb_id;
tx_data->tx_sb_index_number = params->sb_cq_index;
@@ -4722,6 +4725,12 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp,
data->tx_switching_change_flg =
test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
&params->update_flags);
+
+ /* PTP */
+ data->handle_ptp_pkts_flg =
+ test_bit(BNX2X_Q_UPDATE_PTP_PKTS, &params->update_flags);
+ data->handle_ptp_pkts_change_flg =
+ test_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG, &params->update_flags);
}
static inline int bnx2x_q_send_update(struct bnx2x *bp,
@@ -5376,6 +5385,10 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
(!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
next_state = BNX2X_F_STATE_STARTED;
+ else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) &&
+ (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+ next_state = BNX2X_F_STATE_STARTED;
+
else if (cmd == BNX2X_F_CMD_TX_STOP)
next_state = BNX2X_F_STATE_TX_STOPPED;
@@ -5385,6 +5398,10 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
(!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
next_state = BNX2X_F_STATE_TX_STOPPED;
+ else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) &&
+ (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+ next_state = BNX2X_F_STATE_TX_STOPPED;
+
else if (cmd == BNX2X_F_CMD_TX_START)
next_state = BNX2X_F_STATE_STARTED;
@@ -5652,9 +5669,27 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
rdata->path_id = BP_PATH(bp);
rdata->network_cos_mode = start_params->network_cos_mode;
- rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
- rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
+ rdata->tunnel_mode = start_params->tunnel_mode;
+ rdata->gre_tunnel_type = start_params->gre_tunnel_type;
+ rdata->inner_gre_rss_en = start_params->inner_gre_rss_en;
+ rdata->vxlan_dst_port = cpu_to_le16(4789);
+ rdata->sd_accept_mf_clss_fail = start_params->class_fail;
+ if (start_params->class_fail_ethtype) {
+ rdata->sd_accept_mf_clss_fail_match_ethtype = 1;
+ rdata->sd_accept_mf_clss_fail_ethtype =
+ cpu_to_le16(start_params->class_fail_ethtype);
+ }
+ rdata->sd_vlan_force_pri_flg = start_params->sd_vlan_force_pri;
+ rdata->sd_vlan_force_pri_val = start_params->sd_vlan_force_pri_val;
+ if (start_params->sd_vlan_eth_type)
+ rdata->sd_vlan_eth_type =
+ cpu_to_le16(start_params->sd_vlan_eth_type);
+ else
+ rdata->sd_vlan_eth_type =
+ cpu_to_le16(0x8100);
+
+ rdata->no_added_tags = start_params->no_added_tags;
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
@@ -5680,8 +5715,52 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */
- rdata->tx_switch_suspend_change_flg = 1;
- rdata->tx_switch_suspend = switch_update_params->suspend;
+ if (test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
+ &switch_update_params->changes)) {
+ rdata->tx_switch_suspend_change_flg = 1;
+ rdata->tx_switch_suspend =
+ test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
+ &switch_update_params->changes);
+ }
+
+ if (test_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
+ &switch_update_params->changes)) {
+ rdata->sd_vlan_tag_change_flg = 1;
+ rdata->sd_vlan_tag =
+ cpu_to_le16(switch_update_params->vlan);
+ }
+
+ if (test_bit(BNX2X_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG,
+ &switch_update_params->changes)) {
+ rdata->sd_vlan_eth_type_change_flg = 1;
+ rdata->sd_vlan_eth_type =
+ cpu_to_le16(switch_update_params->vlan_eth_type);
+ }
+
+ if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
+ &switch_update_params->changes)) {
+ rdata->sd_vlan_force_pri_change_flg = 1;
+ if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
+ &switch_update_params->changes))
+ rdata->sd_vlan_force_pri_flg = 1;
+ rdata->sd_vlan_force_pri_flg =
+ switch_update_params->vlan_force_prio;
+ }
+
+ if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
+ &switch_update_params->changes)) {
+ rdata->update_tunn_cfg_flg = 1;
+ if (test_bit(BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
+ &switch_update_params->changes))
+ rdata->tunn_clss_en = 1;
+ if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
+ &switch_update_params->changes))
+ rdata->inner_gre_rss_en = 1;
+ rdata->tunnel_mode = switch_update_params->tunnel_mode;
+ rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type;
+ rdata->vxlan_dst_port = cpu_to_le16(4789);
+ }
+
rdata->echo = SWITCH_UPDATE;
/* No need for an explicit memory barrier here as long as we
@@ -5817,6 +5896,42 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
U64_LO(data_mapping), NONE_CONNECTION_TYPE);
}
+static inline
+int bnx2x_func_send_set_timesync(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ struct set_timesync_ramrod_data *rdata =
+ (struct set_timesync_ramrod_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ struct bnx2x_func_set_timesync_params *set_timesync_params =
+ &params->params.set_timesync;
+
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data with provided parameters */
+ rdata->drift_adjust_cmd = set_timesync_params->drift_adjust_cmd;
+ rdata->offset_cmd = set_timesync_params->offset_cmd;
+ rdata->add_sub_drift_adjust_value =
+ set_timesync_params->add_sub_drift_adjust_value;
+ rdata->drift_adjust_value = set_timesync_params->drift_adjust_value;
+ rdata->drift_adjust_period = set_timesync_params->drift_adjust_period;
+ rdata->offset_delta.lo =
+ cpu_to_le32(U64_LO(set_timesync_params->offset_delta));
+ rdata->offset_delta.hi =
+ cpu_to_le32(U64_HI(set_timesync_params->offset_delta));
+
+ DP(BNX2X_MSG_SP, "Set timesync command params: drift_cmd = %d, offset_cmd = %d, add_sub_drift = %d, drift_val = %d, drift_period = %d, offset_lo = %d, offset_hi = %d\n",
+ rdata->drift_adjust_cmd, rdata->offset_cmd,
+ rdata->add_sub_drift_adjust_value, rdata->drift_adjust_value,
+ rdata->drift_adjust_period, rdata->offset_delta.lo,
+ rdata->offset_delta.hi);
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_TIMESYNC, 0,
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
static int bnx2x_func_send_cmd(struct bnx2x *bp,
struct bnx2x_func_state_params *params)
{
@@ -5839,6 +5954,8 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp,
return bnx2x_func_send_tx_start(bp, params);
case BNX2X_F_CMD_SWITCH_UPDATE:
return bnx2x_func_send_switch_update(bp, params);
+ case BNX2X_F_CMD_SET_TIMESYNC:
+ return bnx2x_func_send_set_timesync(bp, params);
default:
BNX2X_ERR("Unknown command: %d\n", params->cmd);
return -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 718ecd294661..e97275f456c0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -711,6 +711,7 @@ enum {
BNX2X_RSS_IPV6,
BNX2X_RSS_IPV6_TCP,
BNX2X_RSS_IPV6_UDP,
+ BNX2X_RSS_GRE_INNER_HDRS,
};
struct bnx2x_config_rss_params {
@@ -769,7 +770,9 @@ enum {
BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
BNX2X_Q_UPDATE_SILENT_VLAN_REM,
BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
- BNX2X_Q_UPDATE_TX_SWITCHING
+ BNX2X_Q_UPDATE_TX_SWITCHING,
+ BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
+ BNX2X_Q_UPDATE_PTP_PKTS,
};
/* Allowed Queue states */
@@ -831,6 +834,7 @@ enum {
BNX2X_Q_FLG_ANTI_SPOOF,
BNX2X_Q_FLG_SILENT_VLAN_REM,
BNX2X_Q_FLG_FORCE_DEFAULT_PRI,
+ BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN,
BNX2X_Q_FLG_PCSUM_ON_PKT,
BNX2X_Q_FLG_TUN_INC_INNER_IP_ID
};
@@ -851,6 +855,10 @@ enum bnx2x_q_type {
#define BNX2X_MULTI_TX_COS 3 /* Maximum possible */
#define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
+/* DMAE channel to be used by FW for timesync workaroun. A driver that sends
+ * timesync-related ramrods must not use this DMAE command ID.
+ */
+#define FW_DMAE_CMD_ID 6
struct bnx2x_queue_init_params {
struct {
@@ -1085,6 +1093,20 @@ struct bnx2x_queue_sp_obj {
};
/********************** Function state update *********************************/
+
+/* UPDATE command options */
+enum {
+ BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
+ BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
+ BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
+ BNX2X_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG,
+ BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
+ BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
+ BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
+ BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
+ BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
+};
+
/* Allowed Function states */
enum bnx2x_func_state {
BNX2X_F_STATE_RESET,
@@ -1105,6 +1127,7 @@ enum bnx2x_func_cmd {
BNX2X_F_CMD_TX_STOP,
BNX2X_F_CMD_TX_START,
BNX2X_F_CMD_SWITCH_UPDATE,
+ BNX2X_F_CMD_SET_TIMESYNC,
BNX2X_F_CMD_MAX,
};
@@ -1146,18 +1169,44 @@ struct bnx2x_func_start_params {
/* Function cos mode */
u8 network_cos_mode;
- /* NVGRE classification enablement */
- u8 nvgre_clss_en;
+ /* TUNN_MODE_NONE/TUNN_MODE_VXLAN/TUNN_MODE_GRE */
+ u8 tunnel_mode;
- /* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
- u8 gre_tunnel_mode;
+ /* tunneling classification enablement */
+ u8 tunn_clss_en;
+
+ /* NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
+ u8 gre_tunnel_type;
+
+ /* Enables Inner GRE RSS on the function, depends on the client RSS
+ * capailities
+ */
+ u8 inner_gre_rss_en;
+
+ /* Allows accepting of packets failing MF classification, possibly
+ * only matching a given ethertype
+ */
+ u8 class_fail;
+ u16 class_fail_ethtype;
- /* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */
- u8 gre_tunnel_rss;
+ /* Override priority of output packets */
+ u8 sd_vlan_force_pri;
+ u8 sd_vlan_force_pri_val;
+
+ /* Replace vlan's ethertype */
+ u16 sd_vlan_eth_type;
+
+ /* Prevent inner vlans from being added by FW */
+ u8 no_added_tags;
};
struct bnx2x_func_switch_update_params {
- u8 suspend;
+ unsigned long changes; /* BNX2X_F_UPDATE_XX bits */
+ u16 vlan;
+ u16 vlan_eth_type;
+ u8 vlan_force_prio;
+ u8 tunnel_mode;
+ u8 gre_tunnel_type;
};
struct bnx2x_func_afex_update_params {
@@ -1172,6 +1221,7 @@ struct bnx2x_func_afex_viflists_params {
u8 afex_vif_list_command;
u8 func_to_clear;
};
+
struct bnx2x_func_tx_start_params {
struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
u8 dcb_enabled;
@@ -1179,6 +1229,24 @@ struct bnx2x_func_tx_start_params {
u8 dont_add_pri_0_en;
};
+struct bnx2x_func_set_timesync_params {
+ /* Reset, set or keep the current drift value */
+ u8 drift_adjust_cmd;
+
+ /* Dec, inc or keep the current offset */
+ u8 offset_cmd;
+
+ /* Drift value direction */
+ u8 add_sub_drift_adjust_value;
+
+ /* Drift, period and offset values to be used according to the commands
+ * above.
+ */
+ u8 drift_adjust_value;
+ u32 drift_adjust_period;
+ u64 offset_delta;
+};
+
struct bnx2x_func_state_params {
struct bnx2x_func_sp_obj *f_obj;
@@ -1197,6 +1265,7 @@ struct bnx2x_func_state_params {
struct bnx2x_func_afex_update_params afex_update;
struct bnx2x_func_afex_viflists_params afex_viflists;
struct bnx2x_func_tx_start_params tx_start;
+ struct bnx2x_func_set_timesync_params set_timesync;
} params;
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 662310c5f4e9..c88b20af87df 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1125,7 +1125,7 @@ static int bnx2x_ari_enabled(struct pci_dev *dev)
return dev->bus->self && dev->bus->self->ari_enabled;
}
-static void
+static int
bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
{
int sb_id;
@@ -1150,6 +1150,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
}
DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
+ return BP_VFDB(bp)->vf_sbs_pool;
}
static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
@@ -1314,15 +1315,17 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
}
/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
- bnx2x_get_vf_igu_cam_info(bp);
+ if (!bnx2x_get_vf_igu_cam_info(bp)) {
+ BNX2X_ERR("No entries in IGU CAM for vfs\n");
+ err = -EINVAL;
+ goto failed;
+ }
/* allocate the queue arrays for all VFs */
bp->vfdb->vfqs = kzalloc(
BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
GFP_KERNEL);
- DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs);
-
if (!bp->vfdb->vfqs) {
BNX2X_ERR("failed to allocate vf queue array\n");
err = -ENOMEM;
@@ -1349,9 +1352,7 @@ void bnx2x_iov_remove_one(struct bnx2x *bp)
if (!IS_SRIOV(bp))
return;
- DP(BNX2X_MSG_IOV, "about to call disable sriov\n");
- pci_disable_sriov(bp->pdev);
- DP(BNX2X_MSG_IOV, "sriov disabled\n");
+ bnx2x_disable_sriov(bp);
/* disable access to all VFs */
for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
@@ -1985,21 +1986,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
}
-static inline
-struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
-{
- int i;
- struct bnx2x_virtf *vf = NULL;
-
- for_each_vf(bp, i) {
- vf = BP_VF(bp, i);
- if (stat_id >= vf->igu_base_id &&
- stat_id < vf->igu_base_id + vf_sb_count(vf))
- break;
- }
- return vf;
-}
-
/* VF API helpers */
static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
u8 enable)
@@ -2362,12 +2348,6 @@ int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
return rc;
}
-static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
- struct bnx2x_virtf *vf, u32 *sbdf)
-{
- *sbdf = vf->devfn | (vf->bus << 8);
-}
-
void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs tlv)
{
@@ -2416,7 +2396,7 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* log the unlock */
DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
- vf->abs_vfid, vf->op_current);
+ vf->abs_vfid, current_tlv);
}
static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
@@ -2501,7 +2481,7 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
bp->requested_nr_virtfn = num_vfs_param;
if (num_vfs_param == 0) {
bnx2x_set_pf_tx_switching(bp, false);
- pci_disable_sriov(dev);
+ bnx2x_disable_sriov(bp);
return 0;
} else {
return bnx2x_enable_sriov(bp);
@@ -2614,6 +2594,12 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
void bnx2x_disable_sriov(struct bnx2x *bp)
{
+ if (pci_vfs_assigned(bp->pdev)) {
+ DP(BNX2X_MSG_IOV,
+ "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
+ return;
+ }
+
pci_disable_sriov(bp->pdev);
}
@@ -2628,7 +2614,7 @@ static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
}
if (!IS_SRIOV(bp)) {
- BNX2X_ERR("sriov is disabled - can't utilize iov-realted functionality\n");
+ BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index ca1055f3d8af..01bafa4ac045 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -299,7 +299,8 @@ struct bnx2x_vfdb {
#define BP_VFDB(bp) ((bp)->vfdb)
/* vf array */
struct bnx2x_virtf *vfs;
-#define BP_VF(bp, idx) (&((bp)->vfdb->vfs[idx]))
+#define BP_VF(bp, idx) ((BP_VFDB(bp) && (bp)->vfdb->vfs) ? \
+ &((bp)->vfdb->vfs[idx]) : NULL)
#define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[idx].var)
/* queue array - for all vfs */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index ca47665f94bf..d1608297c773 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -137,7 +137,7 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
cpu_to_le16(bp->stats_counter++);
DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
- bp->fw_stats_req->hdr.drv_stats_counter);
+ le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
/* adjust the ramrod to include VF queues statistics */
bnx2x_iov_adjust_stats_req(bp);
@@ -200,7 +200,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
}
}
-static int bnx2x_stats_comp(struct bnx2x *bp)
+static void bnx2x_stats_comp(struct bnx2x *bp)
{
u32 *stats_comp = bnx2x_sp(bp, stats_comp);
int cnt = 10;
@@ -214,7 +214,6 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
cnt--;
usleep_range(1000, 2000);
}
- return 1;
}
/*
@@ -1630,6 +1629,11 @@ void bnx2x_stats_init(struct bnx2x *bp)
int /*abs*/port = BP_PORT(bp);
int mb_idx = BP_FW_MB_IDX(bp);
+ if (IS_VF(bp)) {
+ bnx2x_memset_stats(bp);
+ return;
+ }
+
bp->stats_pending = 0;
bp->executer_idx = 0;
bp->stats_counter = 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 54e0427a9ee6..b1d9c44aa56c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -583,7 +583,6 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
flags |= VFPF_QUEUE_FLG_STATS;
flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
flags |= VFPF_QUEUE_FLG_VLAN;
- DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
/* Common */
req->vf_qid = fp_idx;
@@ -952,14 +951,6 @@ static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
REG_WR8(bp, addr, 1);
}
-static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
-{
- int i;
-
- for_each_vf(bp, i)
- storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
-}
-
/* enable vf_pf mailbox (aka vf-pf-channel) */
void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
{
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index a6a9f284c8dd..23f23c97c2ad 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -383,7 +383,7 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
break;
rcu_read_lock();
- if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
+ if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) {
rc = -ENODEV;
rcu_read_unlock();
break;
@@ -527,7 +527,7 @@ int cnic_unregister_driver(int ulp_type)
list_for_each_entry(dev, &cnic_dev_list, list) {
struct cnic_local *cp = dev->cnic_priv;
- if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+ if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
pr_err("%s: Type %d still has devices registered\n",
__func__, ulp_type);
read_unlock(&cnic_dev_lock);
@@ -575,7 +575,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
mutex_unlock(&cnic_lock);
return -EAGAIN;
}
- if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+ if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
pr_err("%s: Type %d has already been registered to this device\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 5cc9cae21ed5..e8c601d26c64 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2439,6 +2439,13 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
major, (reg >> 16) & 0x0f, reg & 0xffff);
+ /* Store the integrated PHY revision for the MDIO probing function
+ * to pass this information to the PHY driver. The PHY driver expects
+ * to find the PHY major revision in bits 15:8 while the GENET register
+ * stores that information in bits 7:0, account for that.
+ */
+ priv->gphy_rev = (reg & 0xffff) << 8;
+
#ifdef CONFIG_PHYS_ADDR_T_64BIT
if (!(params->flags & GENET_HAS_40BITS))
pr_warn("GENET does not support 40-bits PA\n");
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index c862d0666771..ad95fe57ebcd 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -545,6 +545,7 @@ struct bcmgenet_priv {
struct phy_device *phydev;
struct device_node *phy_dn;
struct mii_bus *mii_bus;
+ u16 gphy_rev;
/* PHY device variables */
int old_duplex;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index c88f7ae99636..75b26cbaa7c1 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -296,7 +296,7 @@ static int bcmgenet_mii_probe(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device_node *dn = priv->pdev->dev.of_node;
struct phy_device *phydev;
- unsigned int phy_flags;
+ u32 phy_flags;
int ret;
if (priv->phydev) {
@@ -315,8 +315,11 @@ static int bcmgenet_mii_probe(struct net_device *dev)
priv->phy_dn = of_node_get(dn);
}
- phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, 0,
- priv->phy_interface);
+ /* Communicate the integrated PHY revision */
+ phy_flags = priv->gphy_rev;
+
+ phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
+ phy_flags, priv->phy_interface);
if (!phydev) {
pr_err("could not attach to PHY\n");
return -ENODEV;
@@ -338,15 +341,6 @@ static int bcmgenet_mii_probe(struct net_device *dev)
return ret;
}
- phy_flags = PHY_BRCM_100MBPS_WAR;
-
- /* workarounds are only needed for 100Mpbs PHYs, and
- * never on GENET V1 hardware
- */
- if ((phydev->supported & PHY_GBIT_FEATURES) || GENET_IS_V1(priv))
- phy_flags = 0;
-
- phydev->dev_flags |= phy_flags;
phydev->advertising = phydev->supported;
/* The internal PHY has its link interrupts routed to the
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 13f9636cdba7..903466ef41c0 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -107,7 +107,8 @@ bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
{
struct bfi_enet_enable_req *admin_req =
&ethport->bfi_enet_cmd.admin_req;
- struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
+ struct bfi_enet_rsp *rsp =
+ container_of(msghdr, struct bfi_enet_rsp, mh);
switch (admin_req->enable) {
case BNA_STATUS_T_ENABLED:
@@ -133,7 +134,8 @@ bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
{
struct bfi_enet_diag_lb_req *diag_lb_req =
&ethport->bfi_enet_cmd.lpbk_req;
- struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
+ struct bfi_enet_rsp *rsp =
+ container_of(msghdr, struct bfi_enet_rsp, mh);
switch (diag_lb_req->enable) {
case BNA_STATUS_T_ENABLED:
@@ -161,7 +163,8 @@ static void
bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
struct bfi_msgq_mhdr *msghdr)
{
- struct bfi_enet_attr_rsp *rsp = (struct bfi_enet_attr_rsp *)msghdr;
+ struct bfi_enet_attr_rsp *rsp =
+ container_of(msghdr, struct bfi_enet_attr_rsp, mh);
/**
* Store only if not set earlier, since BNAD can override the HW
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 85e63546abe3..8ee3fdcc17cd 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -715,7 +715,7 @@ bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
struct bfi_msgq_mhdr *msghdr)
{
struct bfi_enet_rsp *rsp =
- (struct bfi_enet_rsp *)msghdr;
+ container_of(msghdr, struct bfi_enet_rsp, mh);
if (rsp->error) {
/* Clear ucast from cache */
@@ -732,7 +732,7 @@ bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
struct bfi_enet_mcast_add_req *req =
&rxf->bfi_enet_cmd.mcast_add_req;
struct bfi_enet_mcast_add_rsp *rsp =
- (struct bfi_enet_mcast_add_rsp *)msghdr;
+ container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh);
bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
ntohs(rsp->handle));
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index ca5d7798b265..d9b8e94b805f 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -2241,9 +2241,9 @@ static int __init macb_probe(struct platform_device *pdev)
netif_carrier_off(dev);
- netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n",
- macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr,
- dev->irq, dev->dev_addr);
+ netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
+ macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
+ dev->base_addr, dev->irq, dev->dev_addr);
phydev = bp->phy_dev;
netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index e5be511a3c38..eeec49540233 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -283,6 +283,7 @@ static const struct pci_device_id cxgb4_pci_tbl[] = {
CH_DEVICE(0x5083, 4),
CH_DEVICE(0x5084, 4),
CH_DEVICE(0x5085, 4),
+ CH_DEVICE(0x5086, 4),
CH_DEVICE(0x5401, 4),
CH_DEVICE(0x5402, 4),
CH_DEVICE(0x5403, 4),
@@ -310,6 +311,7 @@ static const struct pci_device_id cxgb4_pci_tbl[] = {
CH_DEVICE(0x5483, 4),
CH_DEVICE(0x5484, 4),
CH_DEVICE(0x5485, 4),
+ CH_DEVICE(0x5486, 4),
{ 0, }
};
@@ -4390,7 +4392,6 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
* bond. We need to find such different adapters and add clip
* in all of them only once.
*/
- read_lock(&bond->lock);
bond_for_each_slave(bond, slave, iter) {
if (!first_pdev) {
ret = clip_add(slave->dev, ifa, event);
@@ -4404,7 +4405,6 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
to_pci_dev(slave->dev->dev.parent))
ret = clip_add(slave->dev, ifa, event);
}
- read_unlock(&bond->lock);
} else
ret = clip_add(ifa->idev->dev, ifa, event);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 41d04462b72e..22d7581341a9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -1099,6 +1099,9 @@ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
{
int ret = 0;
+ if (end >= adapter->params.sf_nsec)
+ return -EINVAL;
+
while (start <= end) {
if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
(ret = sf1_write(adapter, 4, 0, 1,
@@ -3850,8 +3853,20 @@ int t4_wait_dev_ready(struct adapter *adap)
return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
}
+struct flash_desc {
+ u32 vendor_and_model_id;
+ u32 size_mb;
+};
+
static int get_flash_params(struct adapter *adap)
{
+ /* Table for non-Numonix supported flash parts. Numonix parts are left
+ * to the preexisting code. All flash parts have 64KB sectors.
+ */
+ static struct flash_desc supported_flash[] = {
+ { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
+ };
+
int ret;
u32 info;
@@ -3862,6 +3877,14 @@ static int get_flash_params(struct adapter *adap)
if (ret)
return ret;
+ for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
+ if (supported_flash[ret].vendor_and_model_id == info) {
+ adap->params.sf_size = supported_flash[ret].size_mb;
+ adap->params.sf_nsec =
+ adap->params.sf_size / SF_SEC_SIZE;
+ return 0;
+ }
+
if ((info & 0xff) != 0x20) /* not a Numonix flash */
return -EINVAL;
info >>= 16; /* log2 of size */
@@ -3874,6 +3897,10 @@ static int get_flash_params(struct adapter *adap)
adap->params.sf_size = 1 << info;
adap->params.sf_fw_start =
t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
+
+ if (adap->params.sf_size < FLASH_MIN_SIZE)
+ dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
+ adap->params.sf_size, FLASH_MIN_SIZE);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 35e3d8e32881..6833a7b02137 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -175,7 +175,7 @@ enum {
* Location of firmware image in FLASH.
*/
FLASH_FW_START_SEC = 8,
- FLASH_FW_NSECS = 8,
+ FLASH_FW_NSECS = 16,
FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
@@ -206,6 +206,12 @@ enum {
FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS),
+ /* We don't support FLASH devices which can't support the full
+ * standard set of sections which we need for normal
+ * operations.
+ */
+ FLASH_MIN_SIZE = FLASH_CFG_START + FLASH_CFG_MAX_SIZE,
+
FLASH_FPGA_CFG_START_SEC = 15,
FLASH_FPGA_CFG_START = FLASH_START(FLASH_FPGA_CFG_START_SEC),
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 2102a4c91737..82534031cada 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2912,7 +2912,6 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
static const struct pci_device_id cxgb4vf_pci_tbl[] = {
CH_DEVICE(0xb000, 0), /* PE10K FPGA */
- CH_DEVICE(0x4800, 0), /* T440-dbg */
CH_DEVICE(0x4801, 0), /* T420-cr */
CH_DEVICE(0x4802, 0), /* T422-cr */
CH_DEVICE(0x4803, 0), /* T440-cr */
@@ -2934,7 +2933,6 @@ static const struct pci_device_id cxgb4vf_pci_tbl[] = {
CH_DEVICE(0x4880, 6),
CH_DEVICE(0x4880, 7),
CH_DEVICE(0x4880, 8),
- CH_DEVICE(0x5800, 0), /* T580-dbg */
CH_DEVICE(0x5801, 0), /* T520-cr */
CH_DEVICE(0x5802, 0), /* T522-cr */
CH_DEVICE(0x5803, 0), /* T540-cr */
@@ -2962,6 +2960,7 @@ static const struct pci_device_id cxgb4vf_pci_tbl[] = {
CH_DEVICE(0x5883, 0),
CH_DEVICE(0x5884, 0),
CH_DEVICE(0x5885, 0),
+ CH_DEVICE(0x5886, 0),
{ 0, }
};
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 962510f391df..5ba5ad071bb6 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -186,6 +186,7 @@ struct enic {
____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
unsigned int cq_count;
struct enic_rfs_flw_tbl rfs_h;
+ u32 rx_copybreak;
};
static inline struct device *enic_get_dev(struct enic *enic)
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 523c9ceb04c0..85173d620758 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -379,6 +379,43 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return ret;
}
+static int enic_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct enic *enic = netdev_priv(dev);
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = enic->rx_copybreak;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int enic_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct enic *enic = netdev_priv(dev);
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ enic->rx_copybreak = *(u32 *)data;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static const struct ethtool_ops enic_ethtool_ops = {
.get_settings = enic_get_settings,
.get_drvinfo = enic_get_drvinfo,
@@ -391,6 +428,8 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_coalesce = enic_get_coalesce,
.set_coalesce = enic_set_coalesce,
.get_rxnfc = enic_get_rxnfc,
+ .get_tunable = enic_get_tunable,
+ .set_tunable = enic_set_tunable,
};
void enic_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index c8832bc1c5f7..929bfe70080a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -66,6 +66,8 @@
#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
+#define RX_COPYBREAK_DEFAULT 256
+
/* Supported devices */
static const struct pci_device_id enic_id_table[] = {
{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
@@ -924,6 +926,7 @@ static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
pci_unmap_single(enic->pdev, buf->dma_addr,
buf->len, PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(buf->os_buf);
+ buf->os_buf = NULL;
}
static int enic_rq_alloc_buf(struct vnic_rq *rq)
@@ -934,7 +937,24 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
unsigned int os_buf_index = 0;
dma_addr_t dma_addr;
+ struct vnic_rq_buf *buf = rq->to_use;
+
+ if (buf->os_buf) {
+ buf = buf->next;
+ rq->to_use = buf;
+ rq->ring.desc_avail--;
+ if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
+ /* Adding write memory barrier prevents compiler and/or
+ * CPU reordering, thus avoiding descriptor posting
+ * before descriptor is initialized. Otherwise, hardware
+ * can read stale descriptor fields.
+ */
+ wmb();
+ iowrite32(buf->index, &rq->ctrl->posted_index);
+ }
+ return 0;
+ }
skb = netdev_alloc_skb_ip_align(netdev, len);
if (!skb)
return -ENOMEM;
@@ -957,6 +977,25 @@ static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
pkt_size->small_pkt_bytes_cnt += pkt_len;
}
+static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
+ struct vnic_rq_buf *buf, u16 len)
+{
+ struct enic *enic = netdev_priv(netdev);
+ struct sk_buff *new_skb;
+
+ if (len > enic->rx_copybreak)
+ return false;
+ new_skb = netdev_alloc_skb_ip_align(netdev, len);
+ if (!new_skb)
+ return false;
+ pci_dma_sync_single_for_cpu(enic->pdev, buf->dma_addr, len,
+ DMA_FROM_DEVICE);
+ memcpy(new_skb->data, (*skb)->data, len);
+ *skb = new_skb;
+
+ return true;
+}
+
static void enic_rq_indicate_buf(struct vnic_rq *rq,
struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
int skipped, void *opaque)
@@ -978,9 +1017,6 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
return;
skb = buf->os_buf;
- prefetch(skb->data - NET_IP_ALIGN);
- pci_unmap_single(enic->pdev, buf->dma_addr,
- buf->len, PCI_DMA_FROMDEVICE);
cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
&type, &color, &q_number, &completed_index,
@@ -1011,6 +1047,13 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
/* Good receive
*/
+ if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
+ buf->os_buf = NULL;
+ pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
+ PCI_DMA_FROMDEVICE);
+ }
+ prefetch(skb->data - NET_IP_ALIGN);
+
skb_put(skb, bytes_written);
skb->protocol = eth_type_trans(skb, netdev);
skb_record_rx_queue(skb, q_number);
@@ -2531,6 +2574,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(dev, "Cannot register net device, aborting\n");
goto err_out_dev_deinit;
}
+ enic->rx_copybreak = RX_COPYBREAK_DEFAULT;
return 0;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 37472ce4fac3..62f7b7baf93c 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -847,8 +847,7 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
*/
if ((err == ERR_ECMDUNKNOWN) ||
(!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) {
- pr_warning("Using default conversion factor for "
- "interrupt coalesce timer\n");
+ pr_warn("Using default conversion factor for interrupt coalesce timer\n");
vnic_dev_intr_coal_timer_info_default(vdev);
return 0;
}
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 322213d901d5..c8205606c775 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -328,10 +328,10 @@ static void allocate_rx_buffer(struct net_device *);
static void update_cr6(u32, void __iomem *);
static void send_filter_frame(struct DEVICE *);
static void dm9132_id_table(struct DEVICE *);
-static u16 phy_read(void __iomem *, u8, u8, u32);
-static void phy_write(void __iomem *, u8, u8, u16, u32);
-static void phy_write_1bit(void __iomem *, u32);
-static u16 phy_read_1bit(void __iomem *);
+static u16 dmfe_phy_read(void __iomem *, u8, u8, u32);
+static void dmfe_phy_write(void __iomem *, u8, u8, u16, u32);
+static void dmfe_phy_write_1bit(void __iomem *, u32);
+static u16 dmfe_phy_read_1bit(void __iomem *);
static u8 dmfe_sense_speed(struct dmfe_board_info *);
static void dmfe_process_mode(struct dmfe_board_info *);
static void dmfe_timer(unsigned long);
@@ -770,7 +770,7 @@ static int dmfe_stop(struct DEVICE *dev)
/* Reset & stop DM910X board */
dw32(DCR0, DM910X_RESET);
udelay(100);
- phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
+ dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
/* free interrupt */
free_irq(db->pdev->irq, dev);
@@ -1154,7 +1154,7 @@ static void dmfe_timer(unsigned long data)
if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
db->cr6_data &= ~0x40000;
update_cr6(db->cr6_data, ioaddr);
- phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
+ dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
db->cr6_data |= 0x40000;
update_cr6(db->cr6_data, ioaddr);
db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
@@ -1230,9 +1230,9 @@ static void dmfe_timer(unsigned long data)
*/
/* need a dummy read because of PHY's register latch*/
- phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
- link_ok_phy = (phy_read (db->ioaddr,
- db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
+ dmfe_phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
+ link_ok_phy = (dmfe_phy_read (db->ioaddr,
+ db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
if (link_ok_phy != link_ok) {
DMFE_DBUG (0, "PHY and chip report different link status", 0);
@@ -1247,8 +1247,8 @@ static void dmfe_timer(unsigned long data)
/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
/* AUTO or force 1M Homerun/Longrun don't need */
if ( !(db->media_mode & 0x38) )
- phy_write(db->ioaddr, db->phy_addr,
- 0, 0x1000, db->chip_id);
+ dmfe_phy_write(db->ioaddr, db->phy_addr,
+ 0, 0x1000, db->chip_id);
/* AUTO mode, if INT phyxcer link failed, select EXT device */
if (db->media_mode & DMFE_AUTO) {
@@ -1649,16 +1649,16 @@ static u8 dmfe_sense_speed(struct dmfe_board_info *db)
/* CR6 bit18=0, select 10/100M */
update_cr6(db->cr6_data & ~0x40000, ioaddr);
- phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
- phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
+ phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
+ phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
if ( (phy_mode & 0x24) == 0x24 ) {
if (db->chip_id == PCI_DM9132_ID) /* DM9132 */
- phy_mode = phy_read(db->ioaddr,
- db->phy_addr, 7, db->chip_id) & 0xf000;
+ phy_mode = dmfe_phy_read(db->ioaddr,
+ db->phy_addr, 7, db->chip_id) & 0xf000;
else /* DM9102/DM9102A */
- phy_mode = phy_read(db->ioaddr,
- db->phy_addr, 17, db->chip_id) & 0xf000;
+ phy_mode = dmfe_phy_read(db->ioaddr,
+ db->phy_addr, 17, db->chip_id) & 0xf000;
switch (phy_mode) {
case 0x1000: db->op_mode = DMFE_10MHF; break;
case 0x2000: db->op_mode = DMFE_10MFD; break;
@@ -1695,15 +1695,15 @@ static void dmfe_set_phyxcer(struct dmfe_board_info *db)
/* DM9009 Chip: Phyxcer reg18 bit12=0 */
if (db->chip_id == PCI_DM9009_ID) {
- phy_reg = phy_read(db->ioaddr,
- db->phy_addr, 18, db->chip_id) & ~0x1000;
+ phy_reg = dmfe_phy_read(db->ioaddr,
+ db->phy_addr, 18, db->chip_id) & ~0x1000;
- phy_write(db->ioaddr,
- db->phy_addr, 18, phy_reg, db->chip_id);
+ dmfe_phy_write(db->ioaddr,
+ db->phy_addr, 18, phy_reg, db->chip_id);
}
/* Phyxcer capability setting */
- phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
+ phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
if (db->media_mode & DMFE_AUTO) {
/* AUTO Mode */
@@ -1724,13 +1724,13 @@ static void dmfe_set_phyxcer(struct dmfe_board_info *db)
phy_reg|=db->PHY_reg4;
db->media_mode|=DMFE_AUTO;
}
- phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
+ dmfe_phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
/* Restart Auto-Negotiation */
if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
- phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
+ dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
if ( !db->chip_type )
- phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
+ dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
}
@@ -1762,7 +1762,7 @@ static void dmfe_process_mode(struct dmfe_board_info *db)
/* 10/100M phyxcer force mode need */
if ( !(db->media_mode & 0x18)) {
/* Forece Mode */
- phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
+ phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
if ( !(phy_reg & 0x1) ) {
/* parter without N-Way capability */
phy_reg = 0x0;
@@ -1772,12 +1772,12 @@ static void dmfe_process_mode(struct dmfe_board_info *db)
case DMFE_100MHF: phy_reg = 0x2000; break;
case DMFE_100MFD: phy_reg = 0x2100; break;
}
- phy_write(db->ioaddr,
- db->phy_addr, 0, phy_reg, db->chip_id);
+ dmfe_phy_write(db->ioaddr,
+ db->phy_addr, 0, phy_reg, db->chip_id);
if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
mdelay(20);
- phy_write(db->ioaddr,
- db->phy_addr, 0, phy_reg, db->chip_id);
+ dmfe_phy_write(db->ioaddr,
+ db->phy_addr, 0, phy_reg, db->chip_id);
}
}
}
@@ -1787,8 +1787,8 @@ static void dmfe_process_mode(struct dmfe_board_info *db)
* Write a word to Phy register
*/
-static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
- u16 phy_data, u32 chip_id)
+static void dmfe_phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
+ u16 phy_data, u32 chip_id)
{
u16 i;
@@ -1799,34 +1799,34 @@ static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
/* Send 33 synchronization clock to Phy controller */
for (i = 0; i < 35; i++)
- phy_write_1bit(ioaddr, PHY_DATA_1);
+ dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
/* Send start command(01) to Phy */
- phy_write_1bit(ioaddr, PHY_DATA_0);
- phy_write_1bit(ioaddr, PHY_DATA_1);
+ dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
+ dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
/* Send write command(01) to Phy */
- phy_write_1bit(ioaddr, PHY_DATA_0);
- phy_write_1bit(ioaddr, PHY_DATA_1);
+ dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
+ dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
/* Send Phy address */
for (i = 0x10; i > 0; i = i >> 1)
- phy_write_1bit(ioaddr,
- phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
+ dmfe_phy_write_1bit(ioaddr,
+ phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
/* Send register address */
for (i = 0x10; i > 0; i = i >> 1)
- phy_write_1bit(ioaddr,
- offset & i ? PHY_DATA_1 : PHY_DATA_0);
+ dmfe_phy_write_1bit(ioaddr,
+ offset & i ? PHY_DATA_1 : PHY_DATA_0);
/* written trasnition */
- phy_write_1bit(ioaddr, PHY_DATA_1);
- phy_write_1bit(ioaddr, PHY_DATA_0);
+ dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
+ dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
/* Write a word data to PHY controller */
for ( i = 0x8000; i > 0; i >>= 1)
- phy_write_1bit(ioaddr,
- phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
+ dmfe_phy_write_1bit(ioaddr,
+ phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
}
}
@@ -1835,7 +1835,7 @@ static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
* Read a word data from phy register
*/
-static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
+static u16 dmfe_phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
{
int i;
u16 phy_data;
@@ -1848,33 +1848,33 @@ static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
/* Send 33 synchronization clock to Phy controller */
for (i = 0; i < 35; i++)
- phy_write_1bit(ioaddr, PHY_DATA_1);
+ dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
/* Send start command(01) to Phy */
- phy_write_1bit(ioaddr, PHY_DATA_0);
- phy_write_1bit(ioaddr, PHY_DATA_1);
+ dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
+ dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
/* Send read command(10) to Phy */
- phy_write_1bit(ioaddr, PHY_DATA_1);
- phy_write_1bit(ioaddr, PHY_DATA_0);
+ dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
+ dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
/* Send Phy address */
for (i = 0x10; i > 0; i = i >> 1)
- phy_write_1bit(ioaddr,
- phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
+ dmfe_phy_write_1bit(ioaddr,
+ phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
/* Send register address */
for (i = 0x10; i > 0; i = i >> 1)
- phy_write_1bit(ioaddr,
- offset & i ? PHY_DATA_1 : PHY_DATA_0);
+ dmfe_phy_write_1bit(ioaddr,
+ offset & i ? PHY_DATA_1 : PHY_DATA_0);
/* Skip transition state */
- phy_read_1bit(ioaddr);
+ dmfe_phy_read_1bit(ioaddr);
/* read 16bit data */
for (phy_data = 0, i = 0; i < 16; i++) {
phy_data <<= 1;
- phy_data |= phy_read_1bit(ioaddr);
+ phy_data |= dmfe_phy_read_1bit(ioaddr);
}
}
@@ -1886,7 +1886,7 @@ static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
* Write one bit data to Phy Controller
*/
-static void phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
+static void dmfe_phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
{
dw32(DCR9, phy_data); /* MII Clock Low */
udelay(1);
@@ -1901,7 +1901,7 @@ static void phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
* Read one bit phy data from PHY controller
*/
-static u16 phy_read_1bit(void __iomem *ioaddr)
+static u16 dmfe_phy_read_1bit(void __iomem *ioaddr)
{
u16 phy_data;
@@ -1995,11 +1995,11 @@ static void dmfe_parse_srom(struct dmfe_board_info * db)
/* Check DM9801 or DM9802 present or not */
db->HPNA_present = 0;
update_cr6(db->cr6_data | 0x40000, db->ioaddr);
- tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
+ tmp_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
/* DM9801 or DM9802 present */
db->HPNA_timer = 8;
- if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
+ if ( dmfe_phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
/* DM9801 HomeRun */
db->HPNA_present = 1;
dmfe_program_DM9801(db, tmp_reg);
@@ -2025,29 +2025,29 @@ static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
switch(HPNA_rev) {
case 0xb900: /* DM9801 E3 */
db->HPNA_command |= 0x1000;
- reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
+ reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
- reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
+ reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
break;
case 0xb901: /* DM9801 E4 */
- reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
+ reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
- reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
+ reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
break;
case 0xb902: /* DM9801 E5 */
case 0xb903: /* DM9801 E6 */
default:
db->HPNA_command |= 0x1000;
- reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
+ reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
- reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
+ reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
break;
}
- phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
- phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
- phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
+ dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
+ dmfe_phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
+ dmfe_phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
}
@@ -2060,10 +2060,10 @@ static void dmfe_program_DM9802(struct dmfe_board_info * db)
uint phy_reg;
if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
- phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
- phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
+ dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
+ phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
- phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
+ dmfe_phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
}
@@ -2077,7 +2077,7 @@ static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
uint phy_reg;
/* Got remote device status */
- phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
+ phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
switch(phy_reg) {
case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
case 0x20: phy_reg = 0x0900;break; /* LP/HS */
@@ -2087,8 +2087,8 @@ static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
/* Check remote device status match our setting ot not */
if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
- phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
- db->chip_id);
+ dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
+ db->chip_id);
db->HPNA_timer=8;
} else
db->HPNA_timer=600; /* Match, every 10 minutes, check */
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 056b44b93477..d1017509b08a 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -1,5 +1,5 @@
/*
- * drivers/net/ethernet/beckhoff/ec_bhf.c
+ * drivers/net/ethernet/ec_bhf.c
*
* Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
*
@@ -18,9 +18,6 @@
* Those can be found on Bechhoff CX50xx industrial PCs.
*/
-#if 0
-#define DEBUG
-#endif
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -74,6 +71,8 @@
#define DMA_WINDOW_SIZE_MASK 0xfffffffc
+#define ETHERCAT_MASTER_ID 0x14
+
static struct pci_device_id ids[] = {
{ PCI_DEVICE(0x15ec, 0x5000), },
{ 0, }
@@ -131,7 +130,6 @@ struct bhf_dma {
struct ec_bhf_priv {
struct net_device *net_dev;
-
struct pci_dev *dev;
void __iomem *io;
@@ -162,32 +160,6 @@ struct ec_bhf_priv {
#define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
-#define ETHERCAT_MASTER_ID 0x14
-
-static void ec_bhf_print_status(struct ec_bhf_priv *priv)
-{
- struct device *dev = PRIV_TO_DEV(priv);
-
- dev_dbg(dev, "Frame error counter: %d\n",
- ioread8(priv->mac_io + MAC_FRAME_ERR_CNT));
- dev_dbg(dev, "RX error counter: %d\n",
- ioread8(priv->mac_io + MAC_RX_ERR_CNT));
- dev_dbg(dev, "CRC error counter: %d\n",
- ioread8(priv->mac_io + MAC_CRC_ERR_CNT));
- dev_dbg(dev, "TX frame counter: %d\n",
- ioread32(priv->mac_io + MAC_TX_FRAME_CNT));
- dev_dbg(dev, "RX frame counter: %d\n",
- ioread32(priv->mac_io + MAC_RX_FRAME_CNT));
- dev_dbg(dev, "TX fifo level: %d\n",
- ioread8(priv->mac_io + MAC_TX_FIFO_LVL));
- dev_dbg(dev, "Dropped frames: %d\n",
- ioread8(priv->mac_io + MAC_DROPPED_FRMS));
- dev_dbg(dev, "Connected with CCAT slot: %d\n",
- ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG));
- dev_dbg(dev, "Link status: %d\n",
- ioread8(priv->mii_io + MII_LINK_STATUS));
-}
-
static void ec_bhf_reset(struct ec_bhf_priv *priv)
{
iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
@@ -210,8 +182,6 @@ static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
u32 addr = (u8 *)desc - priv->tx_buf.buf;
iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
-
- dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n");
}
static int ec_bhf_desc_sent(struct tx_desc *desc)
@@ -244,7 +214,6 @@ static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
{
struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
- struct device *dev = PRIV_TO_DEV(priv);
while (ec_bhf_pkt_received(desc)) {
int pkt_size = (le16_to_cpu(desc->header.len) &
@@ -253,20 +222,16 @@ static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
- dev_dbg(dev, "Received packet, size: %d\n", pkt_size);
-
if (skb) {
memcpy(skb_put(skb, pkt_size), data, pkt_size);
skb->protocol = eth_type_trans(skb, priv->net_dev);
- dev_dbg(dev, "Protocol type: %x\n", skb->protocol);
-
priv->stat_rx_bytes += pkt_size;
netif_rx(skb);
} else {
- dev_err_ratelimited(dev,
- "Couldn't allocate a skb_buff for a packet of size %u\n",
- pkt_size);
+ dev_err_ratelimited(PRIV_TO_DEV(priv),
+ "Couldn't allocate a skb_buff for a packet of size %u\n",
+ pkt_size);
}
desc->header.recv = 0;
@@ -276,7 +241,6 @@ static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
desc = &priv->rx_descs[priv->rx_dnext];
}
-
}
static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
@@ -299,14 +263,7 @@ static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
unsigned block_count, i;
void __iomem *ec_info;
- dev_dbg(dev, "Info block:\n");
- dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
- dev_dbg(dev, "Revision of function: %x\n",
- (unsigned)ioread16(priv->io + INFO_BLOCK_REV));
-
block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
- dev_dbg(dev, "Number of function blocks: %x\n", block_count);
-
for (i = 0; i < block_count; i++) {
u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
INFO_BLOCK_TYPE);
@@ -317,29 +274,17 @@ static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
dev_err(dev, "EtherCAT master with DMA block not found\n");
return -ENODEV;
}
- dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i);
ec_info = priv->io + i * INFO_BLOCK_SIZE;
- dev_dbg(dev, "EtherCAT master revision: %d\n",
- ioread16(ec_info + INFO_BLOCK_REV));
priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
- dev_dbg(dev, "EtherCAT master tx dma channel: %d\n",
- priv->tx_dma_chan);
-
priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
- dev_dbg(dev, "EtherCAT master rx dma channel: %d\n",
- priv->rx_dma_chan);
priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
- dev_dbg(dev,
- "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n",
- priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io);
-
return 0;
}
@@ -350,8 +295,6 @@ static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
struct tx_desc *desc;
unsigned len;
- dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n");
-
desc = &priv->tx_descs[priv->tx_dnext];
skb_copy_and_csum_dev(skb, desc->data);
@@ -366,15 +309,12 @@ static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
- /* Make sure that update updates to tx_dnext are perceived
+ /* Make sure that updates to tx_dnext are perceived
* by timer routine.
*/
smp_wmb();
netif_stop_queue(net_dev);
-
- dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n");
- ec_bhf_print_status(priv);
}
priv->stat_tx_bytes += len;
@@ -397,7 +337,6 @@ static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
mask = ioread32(priv->dma_io + offset);
mask &= DMA_WINDOW_SIZE_MASK;
- dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel);
/* We want to allocate a chunk of memory that is:
* - aligned to the mask we just read
@@ -408,12 +347,10 @@ static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
buf->len = min_t(int, ~mask + 1, size);
buf->alloc_len = 2 * buf->len;
- dev_dbg(dev, "Allocating %d bytes for channel %d",
- (int)buf->alloc_len, channel);
buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
GFP_KERNEL);
if (buf->alloc == NULL) {
- dev_info(dev, "Failed to allocate buffer\n");
+ dev_err(dev, "Failed to allocate buffer\n");
return -ENOMEM;
}
@@ -422,8 +359,6 @@ static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
iowrite32(0, priv->dma_io + offset + 4);
iowrite32(buf->buf_phys, priv->dma_io + offset);
- dev_dbg(dev, "Buffer: %x and read from dev: %x",
- (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset));
return 0;
}
@@ -433,7 +368,7 @@ static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
int i = 0;
priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
- priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf;
+ priv->tx_descs = (struct tx_desc *)priv->tx_buf.buf;
priv->tx_dnext = 0;
for (i = 0; i < priv->tx_dcount; i++)
@@ -445,7 +380,7 @@ static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
int i;
priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
- priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf;
+ priv->rx_descs = (struct rx_desc *)priv->rx_buf.buf;
priv->rx_dnext = 0;
for (i = 0; i < priv->rx_dcount; i++) {
@@ -469,8 +404,6 @@ static int ec_bhf_open(struct net_device *net_dev)
struct device *dev = PRIV_TO_DEV(priv);
int err = 0;
- dev_info(dev, "Opening device\n");
-
ec_bhf_reset(priv);
err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
@@ -481,20 +414,13 @@ static int ec_bhf_open(struct net_device *net_dev)
}
ec_bhf_setup_rx_descs(priv);
- dev_info(dev, "RX buffer allocated, address: %x\n",
- (unsigned)priv->rx_buf.buf_phys);
-
err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
FIFO_SIZE * sizeof(struct tx_desc));
if (err) {
dev_err(dev, "Failed to allocate tx buffer\n");
goto error_rx_free;
}
- dev_dbg(dev, "TX buffer allocated, addres: %x\n",
- (unsigned)priv->tx_buf.buf_phys);
-
iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
-
ec_bhf_setup_tx_descs(priv);
netif_start_queue(net_dev);
@@ -504,10 +430,6 @@ static int ec_bhf_open(struct net_device *net_dev)
hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency),
HRTIMER_MODE_REL);
- dev_info(PRIV_TO_DEV(priv), "Device open\n");
-
- ec_bhf_print_status(priv);
-
return 0;
error_rx_free:
@@ -640,9 +562,6 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
- dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n",
- net_dev->dev_addr);
-
err = register_netdev(net_dev);
if (err < 0)
goto err_free_net_dev;
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 43e08d0bc3d3..9a2d75235e89 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -86,6 +86,8 @@ static inline char *nic_name(struct pci_dev *pdev)
#define BE_MAX_JUMBO_FRAME_SIZE 9018
#define BE_MIN_MTU 256
+#define BE_MAX_MTU (BE_MAX_JUMBO_FRAME_SIZE - \
+ (ETH_HLEN + ETH_FCS_LEN))
#define BE_NUM_VLANS_SUPPORTED 64
#define BE_MAX_EQD 128u
@@ -112,7 +114,6 @@ static inline char *nic_name(struct pci_dev *pdev)
#define MAX_ROCE_EQS 5
#define MAX_MSIX_VECTORS 32
#define MIN_MSIX_VECTORS 1
-#define BE_TX_BUDGET 256
#define BE_NAPI_WEIGHT 64
#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
@@ -198,7 +199,6 @@ struct be_eq_obj {
u8 idx; /* array index */
u8 msix_idx;
- u16 tx_budget;
u16 spurious_intr;
struct napi_struct napi;
struct be_adapter *adapter;
@@ -248,6 +248,13 @@ struct be_tx_stats {
ulong tx_jiffies;
u32 tx_stops;
u32 tx_drv_drops; /* pkts dropped by driver */
+ /* the error counters are described in be_ethtool.c */
+ u32 tx_hdr_parse_err;
+ u32 tx_dma_err;
+ u32 tx_tso_err;
+ u32 tx_spoof_check_err;
+ u32 tx_qinq_err;
+ u32 tx_internal_parity_err;
struct u64_stats_sync sync;
struct u64_stats_sync sync_compl;
};
@@ -316,6 +323,7 @@ struct be_rx_obj {
struct be_drv_stats {
u32 be_on_die_temperature;
u32 eth_red_drops;
+ u32 dma_map_errors;
u32 rx_drops_no_pbuf;
u32 rx_drops_no_txpb;
u32 rx_drops_no_erx_descr;
@@ -399,9 +407,9 @@ struct phy_info {
u16 auto_speeds_supported;
u16 fixed_speeds_supported;
int link_speed;
- u32 dac_cable_len;
u32 advertising;
u32 supported;
+ u8 cable_type;
};
struct be_resources {
@@ -613,6 +621,10 @@ extern const struct ethtool_ops be_ethtool_ops;
for (i = eqo->idx, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;\
i += adapter->num_evt_qs, rxo += adapter->num_evt_qs)
+#define for_all_tx_queues_on_eq(adapter, eqo, txo, i) \
+ for (i = eqo->idx, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs;\
+ i += adapter->num_evt_qs, txo += adapter->num_evt_qs)
+
#define is_mcc_eqo(eqo) (eqo->idx == 0)
#define mcc_eqo(adapter) (&adapter->eq_obj[0])
@@ -661,6 +673,18 @@ static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
amap_mask(sizeof(((_struct *)0)->field)), \
AMAP_BIT_OFFSET(_struct, field))
+#define GET_RX_COMPL_V0_BITS(field, ptr) \
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, field, ptr)
+
+#define GET_RX_COMPL_V1_BITS(field, ptr) \
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, field, ptr)
+
+#define GET_TX_COMPL_BITS(field, ptr) \
+ AMAP_GET_BITS(struct amap_eth_tx_compl, field, ptr)
+
+#define SET_TX_WRB_HDR_BITS(field, ptr, val) \
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, field, ptr, val)
+
#define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len)
#define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len)
static inline void swap_dws(void *wrb, int len)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 4370ec1952ac..fead5c65a4f0 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -209,7 +209,6 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
if (base_status != MCC_STATUS_SUCCESS &&
!be_skip_err_log(opcode, base_status, addl_status)) {
-
if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
dev_warn(&adapter->pdev->dev,
"VF is not privileged to issue opcode %d-%d\n",
@@ -309,8 +308,6 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
be_async_grp5_pvid_state_process(adapter, compl);
break;
default:
- dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
- event_type);
break;
}
}
@@ -319,7 +316,7 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter,
struct be_mcc_compl *cmp)
{
u8 event_type = 0;
- struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
+ struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp;
event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
ASYNC_EVENT_TYPE_MASK;
@@ -595,6 +592,7 @@ static int lancer_wait_ready(struct be_adapter *adapter)
static bool lancer_provisioning_error(struct be_adapter *adapter)
{
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
+
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
@@ -677,7 +675,6 @@ int be_fw_wait_ready(struct be_adapter *adapter)
return -1;
}
-
static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
{
return &wrb->payload.sgl[0];
@@ -924,6 +921,7 @@ int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
status = be_mbox_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
+
eqo->q.id = le16_to_cpu(resp->eq_id);
eqo->msix_idx =
(ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
@@ -958,7 +956,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
if (permanent) {
req->permanent = 1;
} else {
- req->if_id = cpu_to_le16((u16) if_handle);
+ req->if_id = cpu_to_le16((u16)if_handle);
req->pmac_id = cpu_to_le32(pmac_id);
req->permanent = 0;
}
@@ -966,6 +964,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
+
memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
}
@@ -1002,6 +1001,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
+
*pmac_id = le32_to_cpu(resp->pmac_id);
}
@@ -1034,7 +1034,8 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req),
+ wrb, NULL);
req->hdr.domain = dom;
req->if_id = cpu_to_le32(if_id);
@@ -1106,6 +1107,7 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
status = be_mbox_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
+
cq->id = le16_to_cpu(resp->cq_id);
cq->created = true;
}
@@ -1118,6 +1120,7 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
static u32 be_encoded_q_len(int q_len)
{
u32 len_encoded = fls(q_len); /* log2(len) + 1 */
+
if (len_encoded == 16)
len_encoded = 0;
return len_encoded;
@@ -1173,6 +1176,7 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
status = be_mbox_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
+
mccq->id = le16_to_cpu(resp->id);
mccq->created = true;
}
@@ -1216,6 +1220,7 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
status = be_mbox_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
+
mccq->id = le16_to_cpu(resp->id);
mccq->created = true;
}
@@ -1274,6 +1279,7 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
status = be_cmd_notify_wait(adapter, &wrb);
if (!status) {
struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
+
txq->id = le16_to_cpu(resp->cid);
if (ver == 2)
txo->db_offset = le32_to_cpu(resp->db_offset);
@@ -1318,6 +1324,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
+
rxq->id = le16_to_cpu(resp->id);
rxq->created = true;
*rss_id = resp->rss_id;
@@ -1431,6 +1438,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
status = be_cmd_notify_wait(adapter, &wrb);
if (!status) {
struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
+
*if_handle = le32_to_cpu(resp->interface_id);
/* Hack to retrieve VF's pmac-id on BE3 */
@@ -1514,7 +1522,6 @@ err:
int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
struct be_dma_mem *nonemb_cmd)
{
-
struct be_mcc_wrb *wrb;
struct lancer_cmd_req_pport_stats *req;
int status = 0;
@@ -1605,6 +1612,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
+
if (link_speed) {
*link_speed = resp->link_speed ?
le16_to_cpu(resp->link_speed) * 10 :
@@ -1672,6 +1680,7 @@ int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
+
if (log_size && resp->log_size)
*log_size = le32_to_cpu(resp->log_size) -
sizeof(u32);
@@ -1681,17 +1690,17 @@ err:
return status;
}
-void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
+int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
{
struct be_dma_mem get_fat_cmd;
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_fat *req;
u32 offset = 0, total_size, buf_size,
log_offset = sizeof(u32), payload_len;
- int status;
+ int status = 0;
if (buf_len == 0)
- return;
+ return -EIO;
total_size = buf_len;
@@ -1700,10 +1709,9 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
get_fat_cmd.size,
&get_fat_cmd.dma);
if (!get_fat_cmd.va) {
- status = -ENOMEM;
dev_err(&adapter->pdev->dev,
- "Memory allocation failure while retrieving FAT data\n");
- return;
+ "Memory allocation failure while reading FAT data\n");
+ return -ENOMEM;
}
spin_lock_bh(&adapter->mcc_lock);
@@ -1732,6 +1740,7 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
+
memcpy(buf + offset,
resp->data_buffer,
le32_to_cpu(resp->read_log_length));
@@ -1746,6 +1755,7 @@ err:
pci_free_consistent(adapter->pdev, get_fat_cmd.size,
get_fat_cmd.va, get_fat_cmd.dma);
spin_unlock_bh(&adapter->mcc_lock);
+ return status;
}
/* Uses synchronous mcc */
@@ -1771,8 +1781,11 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
- strcpy(adapter->fw_ver, resp->firmware_version_string);
- strcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string);
+
+ strlcpy(adapter->fw_ver, resp->firmware_version_string,
+ sizeof(adapter->fw_ver));
+ strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
+ sizeof(adapter->fw_on_flash));
}
err:
spin_unlock_bh(&adapter->mcc_lock);
@@ -1782,8 +1795,8 @@ err:
/* set the EQ delay interval of an EQ to specified value
* Uses async mcc
*/
-int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
- int num)
+static int __be_cmd_modify_eqd(struct be_adapter *adapter,
+ struct be_set_eqd *set_eqd, int num)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_modify_eq_delay *req;
@@ -1816,6 +1829,25 @@ err:
return status;
}
+int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
+ int num)
+{
+ int num_eqs, i = 0;
+
+ if (lancer_chip(adapter) && num > 8) {
+ while (num) {
+ num_eqs = min(num, 8);
+ __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
+ i += num_eqs;
+ num -= num_eqs;
+ }
+ } else {
+ __be_cmd_modify_eqd(adapter, set_eqd, num);
+ }
+
+ return 0;
+}
+
/* Uses sycnhronous mcc */
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
u32 num)
@@ -1879,8 +1911,8 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
BE_IF_FLAGS_VLAN_PROMISCUOUS |
BE_IF_FLAGS_MCAST_PROMISCUOUS);
} else if (flags & IFF_ALLMULTI) {
- req->if_flags_mask = req->if_flags =
- cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
+ req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
+ req->if_flags = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
} else if (flags & BE_FLAGS_VLAN_PROMISC) {
req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
@@ -1891,8 +1923,8 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
struct netdev_hw_addr *ha;
int i = 0;
- req->if_flags_mask = req->if_flags =
- cpu_to_le32(BE_IF_FLAGS_MULTICAST);
+ req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MULTICAST);
+ req->if_flags = cpu_to_le32(BE_IF_FLAGS_MULTICAST);
/* Reset mcast promisc mode if already set by setting mask
* and not setting flags field
@@ -1947,6 +1979,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
wrb, NULL);
+ req->hdr.version = 1;
req->tx_flow_control = cpu_to_le16((u16)tx_fc);
req->rx_flow_control = cpu_to_le16((u16)rx_fc);
@@ -1954,6 +1987,10 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
err:
spin_unlock_bh(&adapter->mcc_lock);
+
+ if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
+ return -EOPNOTSUPP;
+
return status;
}
@@ -1985,6 +2022,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
if (!status) {
struct be_cmd_resp_get_flow_control *resp =
embedded_payload(wrb);
+
*tx_fc = le16_to_cpu(resp->tx_flow_control);
*rx_fc = le16_to_cpu(resp->rx_flow_control);
}
@@ -2014,10 +2052,14 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter)
status = be_mbox_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
+
adapter->port_num = le32_to_cpu(resp->phys_port);
adapter->function_mode = le32_to_cpu(resp->function_mode);
adapter->function_caps = le32_to_cpu(resp->function_caps);
adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
+ dev_info(&adapter->pdev->dev,
+ "FW config: function_mode=0x%x, function_caps=0x%x\n",
+ adapter->function_mode, adapter->function_caps);
}
mutex_unlock(&adapter->mbox_lock);
@@ -2159,6 +2201,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
if (!status) {
struct be_cmd_resp_get_beacon_state *resp =
embedded_payload(wrb);
+
*state = resp->beacon_state;
}
@@ -2167,6 +2210,53 @@ err:
return status;
}
+/* Uses sync mcc */
+int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
+ u8 page_num, u8 *data)
+{
+ struct be_dma_mem cmd;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_port_type *req;
+ int status;
+
+ if (page_num > TR_PAGE_A2)
+ return -EINVAL;
+
+ cmd.size = sizeof(struct be_cmd_resp_port_type);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+ memset(cmd.va, 0, cmd.size);
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = cmd.va;
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_READ_TRANSRECV_DATA,
+ cmd.size, wrb, &cmd);
+
+ req->port = cpu_to_le32(adapter->hba_port_num);
+ req->page_num = cpu_to_le32(page_num);
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_port_type *resp = cmd.va;
+
+ memcpy(data, resp->page_data, PAGE_DATA_LEN);
+ }
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+ return status;
+}
+
int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 data_size, u32 data_offset,
const char *obj_name, u32 *data_written,
@@ -2207,7 +2297,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
be_dws_cpu_to_le(ctxt, sizeof(req->context));
req->write_offset = cpu_to_le32(data_offset);
- strcpy(req->object_name, obj_name);
+ strlcpy(req->object_name, obj_name, sizeof(req->object_name));
req->descriptor_count = cpu_to_le32(1);
req->buf_len = cpu_to_le32(data_size);
req->addr_low = cpu_to_le32((cmd->dma +
@@ -2240,6 +2330,31 @@ err_unlock:
return status;
}
+int be_cmd_query_cable_type(struct be_adapter *adapter)
+{
+ u8 page_data[PAGE_DATA_LEN];
+ int status;
+
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
+ page_data);
+ if (!status) {
+ switch (adapter->phy.interface_type) {
+ case PHY_TYPE_QSFP:
+ adapter->phy.cable_type =
+ page_data[QSFP_PLUS_CABLE_TYPE_OFFSET];
+ break;
+ case PHY_TYPE_SFP_PLUS_10GB:
+ adapter->phy.cable_type =
+ page_data[SFP_PLUS_CABLE_TYPE_OFFSET];
+ break;
+ default:
+ adapter->phy.cable_type = 0;
+ break;
+ }
+ }
+ return status;
+}
+
int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name)
{
struct lancer_cmd_req_delete_object *req;
@@ -2260,7 +2375,7 @@ int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name)
OPCODE_COMMON_DELETE_OBJECT,
sizeof(*req), wrb, NULL);
- strcpy(req->object_name, obj_name);
+ strlcpy(req->object_name, obj_name, sizeof(req->object_name));
status = be_mcc_notify_wait(adapter);
err:
@@ -2357,7 +2472,7 @@ err_unlock:
}
int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
- u16 optype, int offset)
+ u16 optype, int offset)
{
struct be_mcc_wrb *wrb;
struct be_cmd_read_flash_crc *req;
@@ -2528,9 +2643,10 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
if (!status) {
struct be_cmd_resp_ddrdma_test *resp;
+
resp = cmd->va;
if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
- resp->snd_err) {
+ resp->snd_err) {
status = -1;
}
}
@@ -2603,6 +2719,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
if (!status) {
struct be_phy_info *resp_phy_info =
cmd.va + sizeof(struct be_cmd_req_hdr);
+
adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
adapter->phy.interface_type =
le16_to_cpu(resp_phy_info->interface_type);
@@ -2732,6 +2849,7 @@ int be_cmd_req_native_mode(struct be_adapter *adapter)
status = be_mbox_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
+
adapter->be3_native = le32_to_cpu(resp->cap_flags) &
CAPABILITY_BE3_NATIVE_ERX_API;
if (!adapter->be3_native)
@@ -2771,6 +2889,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
if (!status) {
struct be_cmd_resp_get_fn_privileges *resp =
embedded_payload(wrb);
+
*privilege = le32_to_cpu(resp->privilege_mask);
/* In UMC mode FW does not return right privileges.
@@ -2918,7 +3037,6 @@ out:
int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
u8 *mac, u32 if_handle, bool active, u32 domain)
{
-
if (!active)
be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
if_handle, domain);
@@ -3102,6 +3220,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
if (!status) {
struct be_cmd_resp_get_hsw_config *resp =
embedded_payload(wrb);
+
be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
pvid, &resp->context);
@@ -3161,7 +3280,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
status = be_mbox_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
- resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
+
+ resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
adapter->wol_cap = resp->wol_settings;
if (adapter->wol_cap & BE_WOL_CAP)
@@ -3197,6 +3317,7 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
(extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
+
for (j = 0; j < num_modes; j++) {
if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
cfgs->module[i].trace_lvl[j].dbg_lvl =
@@ -3233,6 +3354,7 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
if (!status) {
cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
sizeof(struct be_cmd_resp_hdr));
+
for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
level = cfgs->module[0].trace_lvl[j].dbg_lvl;
@@ -3329,6 +3451,7 @@ int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
+
*port_name = resp->port_name[adapter->hba_port_num];
} else {
*port_name = adapter->hba_port_num + '0';
@@ -3952,6 +4075,7 @@ int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
if (!status) {
struct be_cmd_resp_get_active_profile *resp =
embedded_payload(wrb);
+
*profile_id = le16_to_cpu(resp->active_profile_id);
}
@@ -4004,7 +4128,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
{
struct be_adapter *adapter = netdev_priv(netdev_handle);
struct be_mcc_wrb *wrb;
- struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
+ struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload;
struct be_cmd_req_hdr *req;
struct be_cmd_resp_hdr *resp;
int status;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 5284b825bba2..eb5085d6794f 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -57,7 +57,8 @@ enum mcc_base_status {
MCC_STATUS_ILLEGAL_FIELD = 3,
MCC_STATUS_INSUFFICIENT_BUFFER = 4,
MCC_STATUS_UNAUTHORIZED_REQUEST = 5,
- MCC_STATUS_NOT_SUPPORTED = 66
+ MCC_STATUS_NOT_SUPPORTED = 66,
+ MCC_STATUS_FEATURE_NOT_SUPPORTED = 68
};
/* Additional status */
@@ -1004,8 +1005,8 @@ struct be_cmd_resp_link_status {
/* Identifies the type of port attached to NIC */
struct be_cmd_req_port_type {
struct be_cmd_req_hdr hdr;
- u32 page_num;
- u32 port;
+ __le32 page_num;
+ __le32 port;
};
enum {
@@ -1013,28 +1014,23 @@ enum {
TR_PAGE_A2 = 0xa2
};
+/* From SFF-8436 QSFP+ spec */
+#define QSFP_PLUS_CABLE_TYPE_OFFSET 0x83
+#define QSFP_PLUS_CR4_CABLE 0x8
+#define QSFP_PLUS_SR4_CABLE 0x4
+#define QSFP_PLUS_LR4_CABLE 0x2
+
+/* From SFF-8472 spec */
+#define SFP_PLUS_SFF_8472_COMP 0x5E
+#define SFP_PLUS_CABLE_TYPE_OFFSET 0x8
+#define SFP_PLUS_COPPER_CABLE 0x4
+
+#define PAGE_DATA_LEN 256
struct be_cmd_resp_port_type {
struct be_cmd_resp_hdr hdr;
u32 page_num;
u32 port;
- struct data {
- u8 identifier;
- u8 identifier_ext;
- u8 connector;
- u8 transceiver[8];
- u8 rsvd0[3];
- u8 length_km;
- u8 length_hm;
- u8 length_om1;
- u8 length_om2;
- u8 length_cu;
- u8 length_cu_m;
- u8 vendor_name[16];
- u8 rsvd;
- u8 vendor_oui[3];
- u8 vendor_pn[16];
- u8 vendor_rev[4];
- } data;
+ u8 page_data[PAGE_DATA_LEN];
};
/******************** Get FW Version *******************/
@@ -1367,6 +1363,9 @@ enum {
PHY_TYPE_BASET_1GB,
PHY_TYPE_BASEX_1GB,
PHY_TYPE_SGMII,
+ PHY_TYPE_QSFP,
+ PHY_TYPE_KR4_40GB,
+ PHY_TYPE_KR2_20GB,
PHY_TYPE_DISABLED = 255
};
@@ -1375,6 +1374,8 @@ enum {
#define BE_SUPPORTED_SPEED_100MBPS 2
#define BE_SUPPORTED_SPEED_1GBPS 4
#define BE_SUPPORTED_SPEED_10GBPS 8
+#define BE_SUPPORTED_SPEED_20GBPS 0x10
+#define BE_SUPPORTED_SPEED_40GBPS 0x20
#define BE_AN_EN 0x2
#define BE_PAUSE_SYM_EN 0x80
@@ -2066,6 +2067,9 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
u8 status, u8 state);
int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
u32 *state);
+int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
+ u8 page_num, u8 *data);
+int be_cmd_query_cable_type(struct be_adapter *adapter);
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 flash_oper, u32 flash_opcode, u32 buf_size);
int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
@@ -2101,7 +2105,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter);
int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
int be_cmd_req_native_mode(struct be_adapter *adapter);
int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
-void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
+int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
u32 domain);
int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 0cd3311409a8..e42a791c1835 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -78,6 +78,11 @@ static const struct be_ethtool_stat et_stats[] = {
* fifo must never overflow.
*/
{DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
+ /* Received packets dropped when the RX block runs out of space in
+ * one of its input FIFOs. This could happen due a long burst of
+ * minimum-sized (64b) frames in the receive path.
+ * This counter may also be erroneously incremented rarely.
+ */
{DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
{DRVSTAT_INFO(rx_ip_checksum_errs)},
{DRVSTAT_INFO(rx_tcp_checksum_errs)},
@@ -114,6 +119,8 @@ static const struct be_ethtool_stat et_stats[] = {
* is more than 9018 bytes
*/
{DRVSTAT_INFO(rx_drops_mtu)},
+ /* Number of dma mapping errors */
+ {DRVSTAT_INFO(dma_map_errors)},
/* Number of packets dropped due to random early drop function */
{DRVSTAT_INFO(eth_red_drops)},
{DRVSTAT_INFO(be_on_die_temperature)},
@@ -123,6 +130,7 @@ static const struct be_ethtool_stat et_stats[] = {
{DRVSTAT_INFO(roce_drops_payload_len)},
{DRVSTAT_INFO(roce_drops_crc)}
};
+
#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
/* Stats related to multi RX queues: get_stats routine assumes bytes, pkts
@@ -145,6 +153,7 @@ static const struct be_ethtool_stat et_rx_stats[] = {
*/
{DRVSTAT_RX_INFO(rx_drops_no_frags)}
};
+
#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
/* Stats related to multi TX queues: get_stats routine assumes compl is the
@@ -152,6 +161,34 @@ static const struct be_ethtool_stat et_rx_stats[] = {
*/
static const struct be_ethtool_stat et_tx_stats[] = {
{DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */
+ /* This counter is incremented when the HW encounters an error while
+ * parsing the packet header of an outgoing TX request. This counter is
+ * applicable only for BE2, BE3 and Skyhawk based adapters.
+ */
+ {DRVSTAT_TX_INFO(tx_hdr_parse_err)},
+ /* This counter is incremented when an error occurs in the DMA
+ * operation associated with the TX request from the host to the device.
+ */
+ {DRVSTAT_TX_INFO(tx_dma_err)},
+ /* This counter is incremented when MAC or VLAN spoof checking is
+ * enabled on the interface and the TX request fails the spoof check
+ * in HW.
+ */
+ {DRVSTAT_TX_INFO(tx_spoof_check_err)},
+ /* This counter is incremented when the HW encounters an error while
+ * performing TSO offload. This counter is applicable only for Lancer
+ * adapters.
+ */
+ {DRVSTAT_TX_INFO(tx_tso_err)},
+ /* This counter is incremented when the HW detects Q-in-Q style VLAN
+ * tagging in a packet and such tagging is not expected on the outgoing
+ * interface. This counter is applicable only for Lancer adapters.
+ */
+ {DRVSTAT_TX_INFO(tx_qinq_err)},
+ /* This counter is incremented when the HW detects parity errors in the
+ * packet data. This counter is applicable only for Lancer adapters.
+ */
+ {DRVSTAT_TX_INFO(tx_internal_parity_err)},
{DRVSTAT_TX_INFO(tx_bytes)},
{DRVSTAT_TX_INFO(tx_pkts)},
/* Number of skbs queued for trasmission by the driver */
@@ -165,6 +202,7 @@ static const struct be_ethtool_stat et_tx_stats[] = {
/* Pkts dropped in the driver's transmit path */
{DRVSTAT_TX_INFO(tx_drv_drops)}
};
+
#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
static const char et_self_tests[][ETH_GSTRING_LEN] = {
@@ -239,7 +277,7 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
while ((total_read_len < buf_len) && !eof) {
chunk_size = min_t(u32, (buf_len - total_read_len),
- LANCER_READ_FILE_CHUNK);
+ LANCER_READ_FILE_CHUNK);
chunk_size = ALIGN(chunk_size, 4);
status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
total_read_len, file_name,
@@ -298,7 +336,6 @@ static int be_get_coalesce(struct net_device *netdev,
struct be_adapter *adapter = netdev_priv(netdev);
struct be_aic_obj *aic = &adapter->aic_obj[0];
-
et->rx_coalesce_usecs = aic->prev_eqd;
et->rx_coalesce_usecs_high = aic->max_eqd;
et->rx_coalesce_usecs_low = aic->min_eqd;
@@ -440,18 +477,27 @@ static int be_get_sset_count(struct net_device *netdev, int stringset)
}
}
-static u32 be_get_port_type(u32 phy_type, u32 dac_cable_len)
+static u32 be_get_port_type(struct be_adapter *adapter)
{
u32 port;
- switch (phy_type) {
+ switch (adapter->phy.interface_type) {
case PHY_TYPE_BASET_1GB:
case PHY_TYPE_BASEX_1GB:
case PHY_TYPE_SGMII:
port = PORT_TP;
break;
case PHY_TYPE_SFP_PLUS_10GB:
- port = dac_cable_len ? PORT_DA : PORT_FIBRE;
+ if (adapter->phy.cable_type & SFP_PLUS_COPPER_CABLE)
+ port = PORT_DA;
+ else
+ port = PORT_FIBRE;
+ break;
+ case PHY_TYPE_QSFP:
+ if (adapter->phy.cable_type & QSFP_PLUS_CR4_CABLE)
+ port = PORT_DA;
+ else
+ port = PORT_FIBRE;
break;
case PHY_TYPE_XFP_10GB:
case PHY_TYPE_SFP_1GB:
@@ -467,11 +513,11 @@ static u32 be_get_port_type(u32 phy_type, u32 dac_cable_len)
return port;
}
-static u32 convert_to_et_setting(u32 if_type, u32 if_speeds)
+static u32 convert_to_et_setting(struct be_adapter *adapter, u32 if_speeds)
{
u32 val = 0;
- switch (if_type) {
+ switch (adapter->phy.interface_type) {
case PHY_TYPE_BASET_1GB:
case PHY_TYPE_BASEX_1GB:
case PHY_TYPE_SGMII:
@@ -490,10 +536,38 @@ static u32 convert_to_et_setting(u32 if_type, u32 if_speeds)
if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
val |= SUPPORTED_10000baseKX4_Full;
break;
+ case PHY_TYPE_KR2_20GB:
+ val |= SUPPORTED_Backplane;
+ if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
+ val |= SUPPORTED_10000baseKR_Full;
+ if (if_speeds & BE_SUPPORTED_SPEED_20GBPS)
+ val |= SUPPORTED_20000baseKR2_Full;
+ break;
case PHY_TYPE_KR_10GB:
val |= SUPPORTED_Backplane |
SUPPORTED_10000baseKR_Full;
break;
+ case PHY_TYPE_KR4_40GB:
+ val |= SUPPORTED_Backplane;
+ if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
+ val |= SUPPORTED_10000baseKR_Full;
+ if (if_speeds & BE_SUPPORTED_SPEED_40GBPS)
+ val |= SUPPORTED_40000baseKR4_Full;
+ break;
+ case PHY_TYPE_QSFP:
+ if (if_speeds & BE_SUPPORTED_SPEED_40GBPS) {
+ switch (adapter->phy.cable_type) {
+ case QSFP_PLUS_CR4_CABLE:
+ val |= SUPPORTED_40000baseCR4_Full;
+ break;
+ case QSFP_PLUS_LR4_CABLE:
+ val |= SUPPORTED_40000baseLR4_Full;
+ break;
+ default:
+ val |= SUPPORTED_40000baseSR4_Full;
+ break;
+ }
+ }
case PHY_TYPE_SFP_PLUS_10GB:
case PHY_TYPE_XFP_10GB:
case PHY_TYPE_SFP_1GB:
@@ -534,8 +608,6 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
int status;
u32 auto_speeds;
u32 fixed_speeds;
- u32 dac_cable_len;
- u16 interface_type;
if (adapter->phy.link_speed < 0) {
status = be_cmd_link_status_query(adapter, &link_speed,
@@ -546,21 +618,19 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
status = be_cmd_get_phy_info(adapter);
if (!status) {
- interface_type = adapter->phy.interface_type;
auto_speeds = adapter->phy.auto_speeds_supported;
fixed_speeds = adapter->phy.fixed_speeds_supported;
- dac_cable_len = adapter->phy.dac_cable_len;
+
+ be_cmd_query_cable_type(adapter);
ecmd->supported =
- convert_to_et_setting(interface_type,
+ convert_to_et_setting(adapter,
auto_speeds |
fixed_speeds);
ecmd->advertising =
- convert_to_et_setting(interface_type,
- auto_speeds);
+ convert_to_et_setting(adapter, auto_speeds);
- ecmd->port = be_get_port_type(interface_type,
- dac_cable_len);
+ ecmd->port = be_get_port_type(adapter);
if (adapter->phy.auto_speeds_supported) {
ecmd->supported |= SUPPORTED_Autoneg;
@@ -614,8 +684,10 @@ static void be_get_ringparam(struct net_device *netdev,
{
struct be_adapter *adapter = netdev_priv(netdev);
- ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len;
- ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len;
+ ring->rx_max_pending = adapter->rx_obj[0].q.len;
+ ring->rx_pending = adapter->rx_obj[0].q.len;
+ ring->tx_max_pending = adapter->tx_obj[0].q.len;
+ ring->tx_pending = adapter->tx_obj[0].q.len;
}
static void
@@ -641,7 +713,7 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
status = be_cmd_set_flow_control(adapter,
adapter->tx_fc, adapter->rx_fc);
if (status)
- dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
+ dev_warn(&adapter->pdev->dev, "Pause param set failed\n");
return be_cmd_status(status);
}
@@ -907,8 +979,6 @@ static void be_set_msg_level(struct net_device *netdev, u32 level)
FW_LOG_LEVEL_DEFAULT :
FW_LOG_LEVEL_FATAL);
adapter->msg_enable = level;
-
- return;
}
static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
@@ -1127,6 +1197,7 @@ static int be_set_rxfh(struct net_device *netdev, const u32 *indir,
if (indir) {
struct be_rx_obj *rxo;
+
for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) {
j = indir[i];
rxo = &adapter->rx_obj[j];
@@ -1142,8 +1213,8 @@ static int be_set_rxfh(struct net_device *netdev, const u32 *indir,
hkey = adapter->rss_info.rss_hkey;
rc = be_cmd_rss_config(adapter, rsstable,
- adapter->rss_info.rss_flags,
- RSS_INDIR_TABLE_LEN, hkey);
+ adapter->rss_info.rss_flags,
+ RSS_INDIR_TABLE_LEN, hkey);
if (rc) {
adapter->rss_info.rss_flags = RSS_ENABLE_NONE;
return -EIO;
@@ -1154,6 +1225,58 @@ static int be_set_rxfh(struct net_device *netdev, const u32 *indir,
return 0;
}
+static int be_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u8 page_data[PAGE_DATA_LEN];
+ int status;
+
+ if (!check_privilege(adapter, MAX_PRIVILEGES))
+ return -EOPNOTSUPP;
+
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
+ page_data);
+ if (!status) {
+ if (!page_data[SFP_PLUS_SFF_8472_COMP]) {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = PAGE_DATA_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = 2 * PAGE_DATA_LEN;
+ }
+ }
+ return be_cmd_status(status);
+}
+
+static int be_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status;
+
+ if (!check_privilege(adapter, MAX_PRIVILEGES))
+ return -EOPNOTSUPP;
+
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
+ data);
+ if (status)
+ goto err;
+
+ if (eeprom->offset + eeprom->len > PAGE_DATA_LEN) {
+ status = be_cmd_read_port_transceiver_data(adapter,
+ TR_PAGE_A2,
+ data +
+ PAGE_DATA_LEN);
+ if (status)
+ goto err;
+ }
+ if (eeprom->offset)
+ memcpy(data, data + eeprom->offset, eeprom->len);
+err:
+ return be_cmd_status(status);
+}
+
const struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
@@ -1185,5 +1308,7 @@ const struct ethtool_ops be_ethtool_ops = {
.get_rxfh = be_get_rxfh,
.set_rxfh = be_set_rxfh,
.get_channels = be_get_channels,
- .set_channels = be_set_channels
+ .set_channels = be_set_channels,
+ .get_module_info = be_get_module_info,
+ .get_module_eeprom = be_get_module_eeprom
};
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 8840c64aaeca..295ee0835ba0 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -315,6 +315,18 @@ struct be_eth_hdr_wrb {
u32 dw[4];
};
+/********* Tx Compl Status Encoding *********/
+#define BE_TX_COMP_HDR_PARSE_ERR 0x2
+#define BE_TX_COMP_NDMA_ERR 0x3
+#define BE_TX_COMP_ACL_ERR 0x5
+
+#define LANCER_TX_COMP_LSO_ERR 0x1
+#define LANCER_TX_COMP_HSW_DROP_MAC_ERR 0x3
+#define LANCER_TX_COMP_HSW_DROP_VLAN_ERR 0x5
+#define LANCER_TX_COMP_QINQ_ERR 0x7
+#define LANCER_TX_COMP_PARITY_ERR 0xb
+#define LANCER_TX_COMP_DMA_ERR 0xd
+
/* TX Compl Queue Descriptor */
/* Pseudo amap definition for eth_tx_compl in which each bit of the
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 93ff8ef39352..9a18e7930b31 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -86,6 +86,7 @@ static const char * const ue_status_low_desc[] = {
"JTAG ",
"MPU_INTPEND "
};
+
/* UE Status High CSR */
static const char * const ue_status_hi_desc[] = {
"LPCMEMHOST",
@@ -122,10 +123,10 @@ static const char * const ue_status_hi_desc[] = {
"Unknown"
};
-
static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
struct be_dma_mem *mem = &q->dma_mem;
+
if (mem->va) {
dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
mem->dma);
@@ -187,6 +188,7 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
{
u32 val = 0;
+
val |= qid & DB_RQ_RING_ID_MASK;
val |= posted << DB_RQ_NUM_POSTED_SHIFT;
@@ -198,6 +200,7 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
u16 posted)
{
u32 val = 0;
+
val |= txo->q.id & DB_TXULP_RING_ID_MASK;
val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
@@ -209,6 +212,7 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
bool arm, bool clear_int, u16 num_popped)
{
u32 val = 0;
+
val |= qid & DB_EQ_RING_ID_MASK;
val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
@@ -227,6 +231,7 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
{
u32 val = 0;
+
val |= qid & DB_CQ_RING_ID_MASK;
val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
DB_CQ_RING_ID_EXT_MASK_SHIFT);
@@ -488,7 +493,6 @@ static void populate_be_v2_stats(struct be_adapter *adapter)
static void populate_lancer_stats(struct be_adapter *adapter)
{
-
struct be_drv_stats *drvs = &adapter->drv_stats;
struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
@@ -588,6 +592,7 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
for_all_rx_queues(adapter, rxo, i) {
const struct be_rx_stats *rx_stats = rx_stats(rxo);
+
do {
start = u64_stats_fetch_begin_irq(&rx_stats->sync);
pkts = rx_stats(rxo)->rx_pkts;
@@ -602,6 +607,7 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
for_all_tx_queues(adapter, txo, i) {
const struct be_tx_stats *tx_stats = tx_stats(txo);
+
do {
start = u64_stats_fetch_begin_irq(&tx_stats->sync);
pkts = tx_stats(txo)->tx_pkts;
@@ -738,38 +744,37 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
memset(hdr, 0, sizeof(*hdr));
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
+ SET_TX_WRB_HDR_BITS(crc, hdr, 1);
if (skb_is_gso(skb)) {
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
- hdr, skb_shinfo(skb)->gso_size);
+ SET_TX_WRB_HDR_BITS(lso, hdr, 1);
+ SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
+ SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (skb->encapsulation) {
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
+ SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
proto = skb_inner_ip_proto(skb);
} else {
proto = skb_ip_proto(skb);
}
if (proto == IPPROTO_TCP)
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
+ SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
else if (proto == IPPROTO_UDP)
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
+ SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
}
if (vlan_tx_tag_present(skb)) {
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
+ SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
vlan_tag = be_get_tx_vlan_tag(adapter, skb);
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
+ SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
}
/* To skip HW VLAN tagging: evt = 1, compl = 0 */
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
+ SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
+ SET_TX_WRB_HDR_BITS(event, hdr, 1);
+ SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
+ SET_TX_WRB_HDR_BITS(len, hdr, len);
}
static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
@@ -808,6 +813,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
if (skb->len > skb->data_len) {
int len = skb_headlen(skb);
+
busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, busaddr))
goto dma_err;
@@ -821,6 +827,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+
busaddr = skb_frag_dma_map(dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
if (dma_mapping_error(dev, busaddr))
@@ -850,6 +857,7 @@ dma_err:
unmap_tx_frag(dev, wrb, map_single);
map_single = false;
copied -= wrb->frag_len;
+ adapter->drv_stats.dma_map_errors++;
queue_head_inc(txq);
}
return 0;
@@ -910,7 +918,7 @@ static bool be_ipv6_exthdr_check(struct sk_buff *skb)
if (ip6h->nexthdr != NEXTHDR_TCP &&
ip6h->nexthdr != NEXTHDR_UDP) {
struct ipv6_opt_hdr *ehdr =
- (struct ipv6_opt_hdr *) (skb->data + offset);
+ (struct ipv6_opt_hdr *)(skb->data + offset);
/* offending pkt: 2nd byte following IPv6 hdr is 0xff */
if (ehdr->hdrlen == 0xff)
@@ -974,8 +982,8 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
* skip HW tagging is not enabled by FW.
*/
if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
- (adapter->pvid || adapter->qnq_vid) &&
- !qnq_async_evt_rcvd(adapter)))
+ (adapter->pvid || adapter->qnq_vid) &&
+ !qnq_async_evt_rcvd(adapter)))
goto tx_drop;
/* Manual VLAN tag insertion to prevent:
@@ -1073,15 +1081,15 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
static int be_change_mtu(struct net_device *netdev, int new_mtu)
{
struct be_adapter *adapter = netdev_priv(netdev);
- if (new_mtu < BE_MIN_MTU ||
- new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
- dev_info(&adapter->pdev->dev,
- "MTU must be between %d and %d bytes\n",
- BE_MIN_MTU,
- (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
+ struct device *dev = &adapter->pdev->dev;
+
+ if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
+ dev_info(dev, "MTU must be between %d and %d bytes\n",
+ BE_MIN_MTU, BE_MAX_MTU);
return -EINVAL;
}
- dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
+
+ dev_info(dev, "MTU changed from %d to %d bytes\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
return 0;
@@ -1093,6 +1101,7 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
*/
static int be_vid_config(struct be_adapter *adapter)
{
+ struct device *dev = &adapter->pdev->dev;
u16 vids[BE_NUM_VLANS_SUPPORTED];
u16 num = 0, i = 0;
int status = 0;
@@ -1114,16 +1123,15 @@ static int be_vid_config(struct be_adapter *adapter)
if (addl_status(status) ==
MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
goto set_vlan_promisc;
- dev_err(&adapter->pdev->dev,
- "Setting HW VLAN filtering failed.\n");
+ dev_err(dev, "Setting HW VLAN filtering failed\n");
} else {
if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
/* hw VLAN filtering re-enabled. */
status = be_cmd_rx_filter(adapter,
BE_FLAGS_VLAN_PROMISC, OFF);
if (!status) {
- dev_info(&adapter->pdev->dev,
- "Disabling VLAN Promiscuous mode.\n");
+ dev_info(dev,
+ "Disabling VLAN Promiscuous mode\n");
adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
}
}
@@ -1137,11 +1145,10 @@ set_vlan_promisc:
status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
if (!status) {
- dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
+ dev_info(dev, "Enable VLAN Promiscuous mode\n");
adapter->flags |= BE_FLAGS_VLAN_PROMISC;
} else
- dev_err(&adapter->pdev->dev,
- "Failed to enable VLAN Promiscuous mode.\n");
+ dev_err(dev, "Failed to enable VLAN Promiscuous mode\n");
return status;
}
@@ -1417,6 +1424,7 @@ err:
max_tx_rate, vf);
return be_cmd_status(status);
}
+
static int be_set_vf_link_state(struct net_device *netdev, int vf,
int link_state)
{
@@ -1482,7 +1490,6 @@ static void be_eqd_update(struct be_adapter *adapter)
tx_pkts = txo->stats.tx_reqs;
} while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
-
/* Skip, if wrapped around or first calculation */
now = jiffies;
if (!aic->jiffies || time_before(now, aic->jiffies) ||
@@ -1683,7 +1690,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
if (netdev->features & NETIF_F_RXHASH)
skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
- skb->encapsulation = rxcp->tunneled;
+ skb->csum_level = rxcp->tunneled;
skb_mark_napi_id(skb, napi);
if (rxcp->vlanf)
@@ -1741,7 +1748,7 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
if (adapter->netdev->features & NETIF_F_RXHASH)
skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
- skb->encapsulation = rxcp->tunneled;
+ skb->csum_level = rxcp->tunneled;
skb_mark_napi_id(skb, napi);
if (rxcp->vlanf)
@@ -1753,65 +1760,46 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
struct be_rx_compl_info *rxcp)
{
- rxcp->pkt_size =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
- rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
- rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
- rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
- rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
- rxcp->ip_csum =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
- rxcp->l4_csum =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
- rxcp->ipv6 =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
- rxcp->num_rcvd =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
- rxcp->pkt_type =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
- rxcp->rss_hash =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
+ rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
+ rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
+ rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
+ rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
+ rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
+ rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
+ rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
+ rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
+ rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
+ rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
+ rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
if (rxcp->vlanf) {
- rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
- compl);
- rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
- vlan_tag, compl);
+ rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
+ rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
}
- rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
+ rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
rxcp->tunneled =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
+ GET_RX_COMPL_V1_BITS(tunneled, compl);
}
static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
struct be_rx_compl_info *rxcp)
{
- rxcp->pkt_size =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
- rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
- rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
- rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
- rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
- rxcp->ip_csum =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
- rxcp->l4_csum =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
- rxcp->ipv6 =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
- rxcp->num_rcvd =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
- rxcp->pkt_type =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
- rxcp->rss_hash =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
+ rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
+ rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
+ rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
+ rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
+ rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
+ rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
+ rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
+ rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
+ rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
+ rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
+ rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
if (rxcp->vlanf) {
- rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
- compl);
- rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
- vlan_tag, compl);
+ rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
+ rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
}
- rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
- rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
- ip_frag, compl);
+ rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
+ rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
}
static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
@@ -1872,7 +1860,7 @@ static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
* Allocate a page, split it to fragments of size rx_frag_size and post as
* receive buffers to BE
*/
-static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
+static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
{
struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
@@ -1881,10 +1869,10 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
struct device *dev = &adapter->pdev->dev;
struct be_eth_rx_d *rxd;
u64 page_dmaaddr = 0, frag_dmaaddr;
- u32 posted, page_offset = 0;
+ u32 posted, page_offset = 0, notify = 0;
page_info = &rxo->page_info_tbl[rxq->head];
- for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
+ for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
if (!pagep) {
pagep = be_alloc_pages(adapter->big_page_size, gfp);
if (unlikely(!pagep)) {
@@ -1897,7 +1885,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
if (dma_mapping_error(dev, page_dmaaddr)) {
put_page(pagep);
pagep = NULL;
- rx_stats(rxo)->rx_post_fail++;
+ adapter->drv_stats.dma_map_errors++;
break;
}
page_offset = 0;
@@ -1940,7 +1928,11 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
atomic_add(posted, &rxq->used);
if (rxo->rx_post_starved)
rxo->rx_post_starved = false;
- be_rxq_notify(adapter, rxq->id, posted);
+ do {
+ notify = min(256u, posted);
+ be_rxq_notify(adapter, rxq->id, notify);
+ posted -= notify;
+ } while (posted);
} else if (atomic_read(&rxq->used) == 0) {
/* Let be_worker replenish when memory is available */
rxo->rx_post_starved = true;
@@ -1991,7 +1983,7 @@ static u16 be_tx_compl_process(struct be_adapter *adapter,
queue_tail_inc(txq);
} while (cur_index != last_index);
- dev_kfree_skb_any(sent_skb);
+ dev_consume_skb_any(sent_skb);
return num_wrbs;
}
@@ -2069,7 +2061,8 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
memset(page_info, 0, sizeof(*page_info));
}
BUG_ON(atomic_read(&rxq->used));
- rxq->tail = rxq->head = 0;
+ rxq->tail = 0;
+ rxq->head = 0;
}
static void be_tx_compl_clean(struct be_adapter *adapter)
@@ -2091,9 +2084,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
num_wrbs = 0;
txq = &txo->q;
while ((txcp = be_tx_compl_get(&txo->cq))) {
- end_idx =
- AMAP_GET_BITS(struct amap_eth_tx_compl,
- wrb_index, txcp);
+ end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
num_wrbs += be_tx_compl_process(adapter, txo,
end_idx);
cmpl++;
@@ -2164,7 +2155,6 @@ static int be_evt_queues_create(struct be_adapter *adapter)
napi_hash_add(&eqo->napi);
aic = &adapter->aic_obj[i];
eqo->adapter = adapter;
- eqo->tx_budget = BE_TX_BUDGET;
eqo->idx = i;
aic->max_eqd = BE_MAX_EQD;
aic->enable = true;
@@ -2394,6 +2384,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
struct be_queue_info *rx_cq = &rxo->cq;
struct be_rx_compl_info *rxcp;
u32 work_done;
+ u32 frags_consumed = 0;
for (work_done = 0; work_done < budget; work_done++) {
rxcp = be_rx_compl_get(rxo);
@@ -2426,6 +2417,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
be_rx_compl_process(rxo, napi, rxcp);
loop_continue:
+ frags_consumed += rxcp->num_rcvd;
be_rx_stats_update(rxo, rxcp);
}
@@ -2437,26 +2429,71 @@ loop_continue:
*/
if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
!rxo->rx_post_starved)
- be_post_rx_frags(rxo, GFP_ATOMIC);
+ be_post_rx_frags(rxo, GFP_ATOMIC,
+ max_t(u32, MAX_RX_POST,
+ frags_consumed));
}
return work_done;
}
-static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
- int budget, int idx)
+static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
+{
+ switch (status) {
+ case BE_TX_COMP_HDR_PARSE_ERR:
+ tx_stats(txo)->tx_hdr_parse_err++;
+ break;
+ case BE_TX_COMP_NDMA_ERR:
+ tx_stats(txo)->tx_dma_err++;
+ break;
+ case BE_TX_COMP_ACL_ERR:
+ tx_stats(txo)->tx_spoof_check_err++;
+ break;
+ }
+}
+
+static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
+{
+ switch (status) {
+ case LANCER_TX_COMP_LSO_ERR:
+ tx_stats(txo)->tx_tso_err++;
+ break;
+ case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
+ case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
+ tx_stats(txo)->tx_spoof_check_err++;
+ break;
+ case LANCER_TX_COMP_QINQ_ERR:
+ tx_stats(txo)->tx_qinq_err++;
+ break;
+ case LANCER_TX_COMP_PARITY_ERR:
+ tx_stats(txo)->tx_internal_parity_err++;
+ break;
+ case LANCER_TX_COMP_DMA_ERR:
+ tx_stats(txo)->tx_dma_err++;
+ break;
+ }
+}
+
+static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
+ int idx)
{
struct be_eth_tx_compl *txcp;
- int num_wrbs = 0, work_done;
+ int num_wrbs = 0, work_done = 0;
+ u32 compl_status;
+ u16 last_idx;
- for (work_done = 0; work_done < budget; work_done++) {
- txcp = be_tx_compl_get(&txo->cq);
- if (!txcp)
- break;
- num_wrbs += be_tx_compl_process(adapter, txo,
- AMAP_GET_BITS(struct
- amap_eth_tx_compl,
- wrb_index, txcp));
+ while ((txcp = be_tx_compl_get(&txo->cq))) {
+ last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
+ num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
+ work_done++;
+
+ compl_status = GET_TX_COMPL_BITS(status, txcp);
+ if (compl_status) {
+ if (lancer_chip(adapter))
+ lancer_update_tx_err(txo, compl_status);
+ else
+ be_update_tx_err(txo, compl_status);
+ }
}
if (work_done) {
@@ -2474,7 +2511,6 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
tx_stats(txo)->tx_compl += work_done;
u64_stats_update_end(&tx_stats(txo)->sync_compl);
}
- return (work_done < budget); /* Done */
}
int be_poll(struct napi_struct *napi, int budget)
@@ -2483,17 +2519,12 @@ int be_poll(struct napi_struct *napi, int budget)
struct be_adapter *adapter = eqo->adapter;
int max_work = 0, work, i, num_evts;
struct be_rx_obj *rxo;
- bool tx_done;
+ struct be_tx_obj *txo;
num_evts = events_get(eqo);
- /* Process all TXQs serviced by this EQ */
- for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
- tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
- eqo->tx_budget, i);
- if (!tx_done)
- max_work = budget;
- }
+ for_all_tx_queues_on_eq(adapter, eqo, txo, i)
+ be_process_tx(adapter, txo, i);
if (be_lock_napi(eqo)) {
/* This loop will iterate twice for EQ0 in which
@@ -2882,7 +2913,7 @@ static int be_rx_qs_create(struct be_adapter *adapter)
/* First time posting */
for_all_rx_queues(adapter, rxo, i)
- be_post_rx_frags(rxo, GFP_KERNEL);
+ be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
return 0;
}
@@ -3309,10 +3340,20 @@ static void BEx_get_resources(struct be_adapter *adapter,
*/
if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
!be_physfn(adapter) || (be_is_mc(adapter) &&
- !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
+ !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
res->max_tx_qs = 1;
- else
+ } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
+ struct be_resources super_nic_res = {0};
+
+ /* On a SuperNIC profile, the driver needs to use the
+ * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
+ */
+ be_cmd_get_profile_config(adapter, &super_nic_res, 0);
+ /* Some old versions of BE3 FW don't report max_tx_qs value */
+ res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
+ } else {
res->max_tx_qs = BE3_MAX_TX_QS;
+ }
if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
!use_sriov && be_physfn(adapter))
@@ -3362,7 +3403,7 @@ static int be_get_sriov_config(struct be_adapter *adapter)
if (!be_max_vfs(adapter)) {
if (num_vfs)
- dev_warn(dev, "device doesn't support SRIOV\n");
+ dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
adapter->num_vfs = 0;
return 0;
}
@@ -3413,16 +3454,16 @@ static int be_get_resources(struct be_adapter *adapter)
if (be_roce_supported(adapter))
res.max_evt_qs /= 2;
adapter->res = res;
-
- dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
- be_max_txqs(adapter), be_max_rxqs(adapter),
- be_max_rss(adapter), be_max_eqs(adapter),
- be_max_vfs(adapter));
- dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
- be_max_uc(adapter), be_max_mc(adapter),
- be_max_vlans(adapter));
}
+ dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
+ be_max_txqs(adapter), be_max_rxqs(adapter),
+ be_max_rss(adapter), be_max_eqs(adapter),
+ be_max_vfs(adapter));
+ dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
+ be_max_uc(adapter), be_max_mc(adapter),
+ be_max_vlans(adapter));
+
return 0;
}
@@ -3633,9 +3674,10 @@ static int be_setup(struct be_adapter *adapter)
goto err;
be_cmd_get_fw_ver(adapter);
+ dev_info(dev, "FW version is %s\n", adapter->fw_ver);
if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
- dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
+ dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
adapter->fw_ver);
dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
}
@@ -3683,8 +3725,6 @@ static void be_netpoll(struct net_device *netdev)
be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
napi_schedule(&eqo->napi);
}
-
- return;
}
#endif
@@ -4052,6 +4092,7 @@ static int lancer_fw_download(struct be_adapter *adapter,
{
#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
+ struct device *dev = &adapter->pdev->dev;
struct be_dma_mem flash_cmd;
const u8 *data_ptr = NULL;
u8 *dest_image_ptr = NULL;
@@ -4064,21 +4105,16 @@ static int lancer_fw_download(struct be_adapter *adapter,
u8 change_status;
if (!IS_ALIGNED(fw->size, sizeof(u32))) {
- dev_err(&adapter->pdev->dev,
- "FW Image not properly aligned. "
- "Length must be 4 byte aligned.\n");
- status = -EINVAL;
- goto lancer_fw_exit;
+ dev_err(dev, "FW image size should be multiple of 4\n");
+ return -EINVAL;
}
flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
+ LANCER_FW_DOWNLOAD_CHUNK;
- flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
+ flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
&flash_cmd.dma, GFP_KERNEL);
- if (!flash_cmd.va) {
- status = -ENOMEM;
- goto lancer_fw_exit;
- }
+ if (!flash_cmd.va)
+ return -ENOMEM;
dest_image_ptr = flash_cmd.va +
sizeof(struct lancer_cmd_req_write_object);
@@ -4113,35 +4149,27 @@ static int lancer_fw_download(struct be_adapter *adapter,
&add_status);
}
- dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
- flash_cmd.dma);
+ dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
if (status) {
- dev_err(&adapter->pdev->dev,
- "Firmware load error. "
- "Status code: 0x%x Additional Status: 0x%x\n",
- status, add_status);
- goto lancer_fw_exit;
+ dev_err(dev, "Firmware load error\n");
+ return be_cmd_status(status);
}
+ dev_info(dev, "Firmware flashed successfully\n");
+
if (change_status == LANCER_FW_RESET_NEEDED) {
- dev_info(&adapter->pdev->dev,
- "Resetting adapter to activate new FW\n");
+ dev_info(dev, "Resetting adapter to activate new FW\n");
status = lancer_physdev_ctrl(adapter,
PHYSDEV_CONTROL_FW_RESET_MASK);
if (status) {
- dev_err(&adapter->pdev->dev,
- "Adapter busy for FW reset.\n"
- "New FW will not be active.\n");
- goto lancer_fw_exit;
+ dev_err(dev, "Adapter busy, could not reset FW\n");
+ dev_err(dev, "Reboot server to activate new FW\n");
}
} else if (change_status != LANCER_NO_RESET_NEEDED) {
- dev_err(&adapter->pdev->dev,
- "System reboot required for new FW to be active\n");
+ dev_info(dev, "Reboot server to activate new FW\n");
}
- dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
-lancer_fw_exit:
- return status;
+ return 0;
}
#define UFI_TYPE2 2
@@ -4374,7 +4402,6 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
return;
err:
be_disable_vxlan_offloads(adapter);
- return;
}
static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
@@ -4506,6 +4533,7 @@ static int be_map_pci_bars(struct be_adapter *adapter)
return 0;
pci_map_err:
+ dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
be_unmap_pci_bars(adapter);
return -ENOMEM;
}
@@ -4713,7 +4741,6 @@ static void be_func_recovery_task(struct work_struct *work)
be_detect_error(adapter);
if (adapter->hw_error && lancer_chip(adapter)) {
-
rtnl_lock();
netif_device_detach(adapter->netdev);
rtnl_unlock();
@@ -4750,7 +4777,7 @@ static void be_worker(struct work_struct *work)
if (!adapter->stats_cmd_sent) {
if (lancer_chip(adapter))
lancer_cmd_get_pport_stats(adapter,
- &adapter->stats_cmd);
+ &adapter->stats_cmd);
else
be_cmd_get_stats(adapter, &adapter->stats_cmd);
}
@@ -4764,7 +4791,7 @@ static void be_worker(struct work_struct *work)
* allocation failures.
*/
if (rxo->rx_post_starved)
- be_post_rx_frags(rxo, GFP_KERNEL);
+ be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
}
be_eqd_update(adapter);
@@ -4822,6 +4849,8 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
struct net_device *netdev;
char port_name;
+ dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
+
status = pci_enable_device(pdev);
if (status)
goto do_none;
@@ -4853,11 +4882,9 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
}
}
- if (be_physfn(adapter)) {
- status = pci_enable_pcie_error_reporting(pdev);
- if (!status)
- dev_info(&pdev->dev, "PCIe error reporting enabled\n");
- }
+ status = pci_enable_pcie_error_reporting(pdev);
+ if (!status)
+ dev_info(&pdev->dev, "PCIe error reporting enabled\n");
status = be_ctrl_init(adapter);
if (status)
@@ -4897,7 +4924,8 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
INIT_DELAYED_WORK(&adapter->work, be_worker);
INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
- adapter->rx_fc = adapter->tx_fc = true;
+ adapter->rx_fc = true;
+ adapter->tx_fc = true;
status = be_setup(adapter);
if (status)
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index ef4672dc7357..132866433a25 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -174,6 +174,7 @@ int be_roce_register_driver(struct ocrdma_driver *drv)
ocrdma_drv = drv;
list_for_each_entry(dev, &be_adapter_list, entry) {
struct net_device *netdev;
+
_be_roce_dev_add(dev);
netdev = dev->netdev;
if (netif_running(netdev) && netif_oper_up(netdev))
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index ee41d98b44b6..26fb1de98826 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -27,8 +27,8 @@
*/
#define FEC_IEVENT 0x004 /* Interrupt event reg */
#define FEC_IMASK 0x008 /* Interrupt mask reg */
-#define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */
-#define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */
+#define FEC_R_DES_ACTIVE_0 0x010 /* Receive descriptor reg */
+#define FEC_X_DES_ACTIVE_0 0x014 /* Transmit descriptor reg */
#define FEC_ECNTRL 0x024 /* Ethernet control reg */
#define FEC_MII_DATA 0x040 /* MII manage frame reg */
#define FEC_MII_SPEED 0x044 /* MII speed control reg */
@@ -38,6 +38,12 @@
#define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */
#define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */
#define FEC_OPD 0x0ec /* Opcode + Pause duration */
+#define FEC_TXIC0 0xF0 /* Tx Interrupt Coalescing for ring 0 */
+#define FEC_TXIC1 0xF4 /* Tx Interrupt Coalescing for ring 1 */
+#define FEC_TXIC2 0xF8 /* Tx Interrupt Coalescing for ring 2 */
+#define FEC_RXIC0 0x100 /* Rx Interrupt Coalescing for ring 0 */
+#define FEC_RXIC1 0x104 /* Rx Interrupt Coalescing for ring 1 */
+#define FEC_RXIC2 0x108 /* Rx Interrupt Coalescing for ring 2 */
#define FEC_HASH_TABLE_HIGH 0x118 /* High 32bits hash table */
#define FEC_HASH_TABLE_LOW 0x11c /* Low 32bits hash table */
#define FEC_GRP_HASH_TABLE_HIGH 0x120 /* High 32bits hash table */
@@ -45,14 +51,27 @@
#define FEC_X_WMRK 0x144 /* FIFO transmit water mark */
#define FEC_R_BOUND 0x14c /* FIFO receive bound reg */
#define FEC_R_FSTART 0x150 /* FIFO receive start reg */
-#define FEC_R_DES_START 0x180 /* Receive descriptor ring */
-#define FEC_X_DES_START 0x184 /* Transmit descriptor ring */
+#define FEC_R_DES_START_1 0x160 /* Receive descriptor ring 1 */
+#define FEC_X_DES_START_1 0x164 /* Transmit descriptor ring 1 */
+#define FEC_R_DES_START_2 0x16c /* Receive descriptor ring 2 */
+#define FEC_X_DES_START_2 0x170 /* Transmit descriptor ring 2 */
+#define FEC_R_DES_START_0 0x180 /* Receive descriptor ring */
+#define FEC_X_DES_START_0 0x184 /* Transmit descriptor ring */
#define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */
#define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */
#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */
#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */
#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */
#define FEC_RACC 0x1C4 /* Receive Accelerator function */
+#define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */
+#define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */
+#define FEC_DMA_CFG_1 0x1d8 /* DMA class configuration for ring 1 */
+#define FEC_DMA_CFG_2 0x1dc /* DMA class Configuration for ring 2 */
+#define FEC_R_DES_ACTIVE_1 0x1e0 /* Rx descriptor active for ring 1 */
+#define FEC_X_DES_ACTIVE_1 0x1e4 /* Tx descriptor active for ring 1 */
+#define FEC_R_DES_ACTIVE_2 0x1e8 /* Rx descriptor active for ring 2 */
+#define FEC_X_DES_ACTIVE_2 0x1ec /* Tx descriptor active for ring 2 */
+#define FEC_QOS_SCHEME 0x1f0 /* Set multi queues Qos scheme */
#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
@@ -121,8 +140,12 @@
#define FEC_IEVENT 0x004 /* Interrupt even reg */
#define FEC_IMASK 0x008 /* Interrupt mask reg */
#define FEC_IVEC 0x00c /* Interrupt vec status reg */
-#define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */
-#define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */
+#define FEC_R_DES_ACTIVE_0 0x010 /* Receive descriptor reg */
+#define FEC_R_DES_ACTIVE_1 FEC_R_DES_ACTIVE_0
+#define FEC_R_DES_ACTIVE_2 FEC_R_DES_ACTIVE_0
+#define FEC_X_DES_ACTIVE_0 0x014 /* Transmit descriptor reg */
+#define FEC_X_DES_ACTIVE_1 FEC_X_DES_ACTIVE_0
+#define FEC_X_DES_ACTIVE_2 FEC_X_DES_ACTIVE_0
#define FEC_MII_DATA 0x040 /* MII manage frame reg */
#define FEC_MII_SPEED 0x044 /* MII speed control reg */
#define FEC_R_BOUND 0x08c /* FIFO receive bound reg */
@@ -136,11 +159,27 @@
#define FEC_ADDR_HIGH 0x3c4 /* High 16bits MAC address */
#define FEC_GRP_HASH_TABLE_HIGH 0x3c8 /* High 32bits hash table */
#define FEC_GRP_HASH_TABLE_LOW 0x3cc /* Low 32bits hash table */
-#define FEC_R_DES_START 0x3d0 /* Receive descriptor ring */
-#define FEC_X_DES_START 0x3d4 /* Transmit descriptor ring */
+#define FEC_R_DES_START_0 0x3d0 /* Receive descriptor ring */
+#define FEC_R_DES_START_1 FEC_R_DES_START_0
+#define FEC_R_DES_START_2 FEC_R_DES_START_0
+#define FEC_X_DES_START_0 0x3d4 /* Transmit descriptor ring */
+#define FEC_X_DES_START_1 FEC_X_DES_START_0
+#define FEC_X_DES_START_2 FEC_X_DES_START_0
#define FEC_R_BUFF_SIZE 0x3d8 /* Maximum receive buff size */
#define FEC_FIFO_RAM 0x400 /* FIFO RAM buffer */
-
+/* Not existed in real chip
+ * Just for pass build.
+ */
+#define FEC_RCMR_1 0xFFF
+#define FEC_RCMR_2 0xFFF
+#define FEC_DMA_CFG_1 0xFFF
+#define FEC_DMA_CFG_2 0xFFF
+#define FEC_TXIC0 0xFFF
+#define FEC_TXIC1 0xFFF
+#define FEC_TXIC2 0xFFF
+#define FEC_RXIC0 0xFFF
+#define FEC_RXIC1 0xFFF
+#define FEC_RXIC2 0xFFF
#endif /* CONFIG_M5272 */
@@ -233,6 +272,43 @@ struct bufdesc_ex {
/* This device has up to three irqs on some platforms */
#define FEC_IRQ_NUM 3
+/* Maximum number of queues supported
+ * ENET with AVB IP can support up to 3 independent tx queues and rx queues.
+ * User can point the queue number that is less than or equal to 3.
+ */
+#define FEC_ENET_MAX_TX_QS 3
+#define FEC_ENET_MAX_RX_QS 3
+
+#define FEC_R_DES_START(X) ((X == 1) ? FEC_R_DES_START_1 : \
+ ((X == 2) ? \
+ FEC_R_DES_START_2 : FEC_R_DES_START_0))
+#define FEC_X_DES_START(X) ((X == 1) ? FEC_X_DES_START_1 : \
+ ((X == 2) ? \
+ FEC_X_DES_START_2 : FEC_X_DES_START_0))
+#define FEC_R_DES_ACTIVE(X) ((X == 1) ? FEC_R_DES_ACTIVE_1 : \
+ ((X == 2) ? \
+ FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0))
+#define FEC_X_DES_ACTIVE(X) ((X == 1) ? FEC_X_DES_ACTIVE_1 : \
+ ((X == 2) ? \
+ FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0))
+
+#define FEC_DMA_CFG(X) ((X == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1)
+
+#define DMA_CLASS_EN (1 << 16)
+#define FEC_RCMR(X) ((X == 2) ? FEC_RCMR_2 : FEC_RCMR_1)
+#define IDLE_SLOPE_MASK 0xFFFF
+#define IDLE_SLOPE_1 0x200 /* BW fraction: 0.5 */
+#define IDLE_SLOPE_2 0x200 /* BW fraction: 0.5 */
+#define IDLE_SLOPE(X) ((X == 1) ? (IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \
+ (IDLE_SLOPE_2 & IDLE_SLOPE_MASK))
+#define RCMR_MATCHEN (0x1 << 16)
+#define RCMR_CMP_CFG(v, n) ((v & 0x7) << (n << 2))
+#define RCMR_CMP_1 (RCMR_CMP_CFG(0, 0) | RCMR_CMP_CFG(1, 1) | \
+ RCMR_CMP_CFG(2, 2) | RCMR_CMP_CFG(3, 3))
+#define RCMR_CMP_2 (RCMR_CMP_CFG(4, 0) | RCMR_CMP_CFG(5, 1) | \
+ RCMR_CMP_CFG(6, 2) | RCMR_CMP_CFG(7, 3))
+#define RCMR_CMP(X) ((X == 1) ? RCMR_CMP_1 : RCMR_CMP_2)
+
/* The number of Tx and Rx buffers. These are allocated from the page
* pool. The code may assume these are power of two, so it it best
* to keep them that size.
@@ -240,7 +316,7 @@ struct bufdesc_ex {
* the skbuffer directly.
*/
-#define FEC_ENET_RX_PAGES 8
+#define FEC_ENET_RX_PAGES 256
#define FEC_ENET_RX_FRSIZE 2048
#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
@@ -256,6 +332,69 @@ struct bufdesc_ex {
#define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
#define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
+/* Interrupt events/masks. */
+#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
+#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
+#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
+#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
+#define FEC_ENET_TXF_0 ((uint)0x08000000) /* Full frame transmitted */
+#define FEC_ENET_TXF_1 ((uint)0x00000008) /* Full frame transmitted */
+#define FEC_ENET_TXF_2 ((uint)0x00000080) /* Full frame transmitted */
+#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
+#define FEC_ENET_RXF_0 ((uint)0x02000000) /* Full frame received */
+#define FEC_ENET_RXF_1 ((uint)0x00000002) /* Full frame received */
+#define FEC_ENET_RXF_2 ((uint)0x00000020) /* Full frame received */
+#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
+#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
+#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
+#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
+#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
+#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
+#define FEC_ENET_TS_TIMER ((uint)0x00008000)
+
+#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER)
+#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
+
+/* ENET interrupt coalescing macro define */
+#define FEC_ITR_CLK_SEL (0x1 << 30)
+#define FEC_ITR_EN (0x1 << 31)
+#define FEC_ITR_ICFT(X) ((X & 0xFF) << 20)
+#define FEC_ITR_ICTT(X) ((X) & 0xFFFF)
+#define FEC_ITR_ICFT_DEFAULT 200 /* Set 200 frame count threshold */
+#define FEC_ITR_ICTT_DEFAULT 1000 /* Set 1000us timer threshold */
+
+#define FEC_VLAN_TAG_LEN 0x04
+#define FEC_ETHTYPE_LEN 0x02
+
+struct fec_enet_priv_tx_q {
+ int index;
+ unsigned char *tx_bounce[TX_RING_SIZE];
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+
+ dma_addr_t bd_dma;
+ struct bufdesc *tx_bd_base;
+ uint tx_ring_size;
+
+ unsigned short tx_stop_threshold;
+ unsigned short tx_wake_threshold;
+
+ struct bufdesc *cur_tx;
+ struct bufdesc *dirty_tx;
+ char *tso_hdrs;
+ dma_addr_t tso_hdrs_dma;
+};
+
+struct fec_enet_priv_rx_q {
+ int index;
+ struct sk_buff *rx_skbuff[RX_RING_SIZE];
+
+ dma_addr_t bd_dma;
+ struct bufdesc *rx_bd_base;
+ uint rx_ring_size;
+
+ struct bufdesc *cur_rx;
+};
+
/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
* tx_bd_base always point to the base of the buffer descriptors. The
* cur_rx and cur_tx point to the currently available buffer.
@@ -272,36 +411,28 @@ struct fec_enet_private {
struct clk *clk_ipg;
struct clk *clk_ahb;
+ struct clk *clk_ref;
struct clk *clk_enet_out;
struct clk *clk_ptp;
bool ptp_clk_on;
struct mutex ptp_clk_mutex;
+ unsigned int num_tx_queues;
+ unsigned int num_rx_queues;
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
- unsigned char *tx_bounce[TX_RING_SIZE];
- struct sk_buff *tx_skbuff[TX_RING_SIZE];
- struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ struct fec_enet_priv_tx_q *tx_queue[FEC_ENET_MAX_TX_QS];
+ struct fec_enet_priv_rx_q *rx_queue[FEC_ENET_MAX_RX_QS];
- /* CPM dual port RAM relative addresses */
- dma_addr_t bd_dma;
- /* Address of Rx and Tx buffers */
- struct bufdesc *rx_bd_base;
- struct bufdesc *tx_bd_base;
- /* The next free ring entry */
- struct bufdesc *cur_rx, *cur_tx;
- /* The ring entries to be free()ed */
- struct bufdesc *dirty_tx;
+ unsigned int total_tx_ring_size;
+ unsigned int total_rx_ring_size;
- unsigned short bufdesc_size;
- unsigned short tx_ring_size;
- unsigned short rx_ring_size;
- unsigned short tx_stop_threshold;
- unsigned short tx_wake_threshold;
+ unsigned long work_tx;
+ unsigned long work_rx;
+ unsigned long work_ts;
+ unsigned long work_mdio;
- /* Software TSO */
- char *tso_hdrs;
- dma_addr_t tso_hdrs_dma;
+ unsigned short bufdesc_size;
struct platform_device *pdev;
@@ -340,6 +471,16 @@ struct fec_enet_private {
int hwts_tx_en;
struct delayed_work time_keep;
struct regulator *reg_phy;
+
+ unsigned int tx_align;
+ unsigned int rx_align;
+
+ /* hw interrupt coalesce */
+ unsigned int rx_pkts_itr;
+ unsigned int rx_time_itr;
+ unsigned int tx_pkts_itr;
+ unsigned int tx_time_itr;
+ unsigned int itr_clk_rate;
};
void fec_ptp_init(struct platform_device *pdev);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 89355a719625..3a4ec0ff1a8c 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -63,15 +63,12 @@
#include "fec.h"
static void set_multicast_list(struct net_device *ndev);
-
-#if defined(CONFIG_ARM)
-#define FEC_ALIGNMENT 0xf
-#else
-#define FEC_ALIGNMENT 0x3
-#endif
+static void fec_enet_itr_coal_init(struct net_device *ndev);
#define DRIVER_NAME "fec"
+#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))
+
/* Pause frame feild and FIFO threshold */
#define FEC_ENET_FCE (1 << 5)
#define FEC_ENET_RSEM_V 0x84
@@ -104,6 +101,22 @@ static void set_multicast_list(struct net_device *ndev);
* ENET_TDAR[TDAR].
*/
#define FEC_QUIRK_ERR006358 (1 << 7)
+/* ENET IP hw AVB
+ *
+ * i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support.
+ * - Two class indicators on receive with configurable priority
+ * - Two class indicators and line speed timer on transmit allowing
+ * implementation class credit based shapers externally
+ * - Additional DMA registers provisioned to allow managing up to 3
+ * independent rings
+ */
+#define FEC_QUIRK_HAS_AVB (1 << 8)
+/* There is a TDAR race condition for mutliQ when the software sets TDAR
+ * and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles).
+ * This will cause the udma_tx and udma_tx_arbiter state machines to hang.
+ * The issue exist at i.MX6SX enet IP.
+ */
+#define FEC_QUIRK_ERR007885 (1 << 9)
static struct platform_device_id fec_devtype[] = {
{
@@ -128,6 +141,12 @@ static struct platform_device_id fec_devtype[] = {
.name = "mvf600-fec",
.driver_data = FEC_QUIRK_ENET_MAC,
}, {
+ .name = "imx6sx-fec",
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
+ FEC_QUIRK_HAS_AVB | FEC_QUIRK_ERR007885,
+ }, {
/* sentinel */
}
};
@@ -139,6 +158,7 @@ enum imx_fec_type {
IMX28_FEC,
IMX6Q_FEC,
MVF600_FEC,
+ IMX6SX_FEC,
};
static const struct of_device_id fec_dt_ids[] = {
@@ -147,6 +167,7 @@ static const struct of_device_id fec_dt_ids[] = {
{ .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
{ .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
{ .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
+ { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@ -175,21 +196,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#endif
#endif /* CONFIG_M5272 */
-/* Interrupt events/masks. */
-#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
-#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
-#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
-#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
-#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */
-#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
-#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */
-#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
-#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
-#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
-
-#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
-#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
-
/* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
*/
#define PKT_MAXBUF_SIZE 1522
@@ -242,22 +248,26 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
static int mii_cnt;
static inline
-struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
+struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
+ struct fec_enet_private *fep,
+ int queue_id)
{
struct bufdesc *new_bd = bdp + 1;
struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
struct bufdesc_ex *ex_base;
struct bufdesc *base;
int ring_size;
- if (bdp >= fep->tx_bd_base) {
- base = fep->tx_bd_base;
- ring_size = fep->tx_ring_size;
- ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
+ if (bdp >= txq->tx_bd_base) {
+ base = txq->tx_bd_base;
+ ring_size = txq->tx_ring_size;
+ ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
} else {
- base = fep->rx_bd_base;
- ring_size = fep->rx_ring_size;
- ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
+ base = rxq->rx_bd_base;
+ ring_size = rxq->rx_ring_size;
+ ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
}
if (fep->bufdesc_ex)
@@ -269,22 +279,26 @@ struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_priva
}
static inline
-struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
+struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
+ struct fec_enet_private *fep,
+ int queue_id)
{
struct bufdesc *new_bd = bdp - 1;
struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
struct bufdesc_ex *ex_base;
struct bufdesc *base;
int ring_size;
- if (bdp >= fep->tx_bd_base) {
- base = fep->tx_bd_base;
- ring_size = fep->tx_ring_size;
- ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
+ if (bdp >= txq->tx_bd_base) {
+ base = txq->tx_bd_base;
+ ring_size = txq->tx_ring_size;
+ ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
} else {
- base = fep->rx_bd_base;
- ring_size = fep->rx_ring_size;
- ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
+ base = rxq->rx_bd_base;
+ ring_size = rxq->rx_ring_size;
+ ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
}
if (fep->bufdesc_ex)
@@ -300,14 +314,15 @@ static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
}
-static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep)
+static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep,
+ struct fec_enet_priv_tx_q *txq)
{
int entries;
- entries = ((const char *)fep->dirty_tx -
- (const char *)fep->cur_tx) / fep->bufdesc_size - 1;
+ entries = ((const char *)txq->dirty_tx -
+ (const char *)txq->cur_tx) / fep->bufdesc_size - 1;
- return entries > 0 ? entries : entries + fep->tx_ring_size;
+ return entries > 0 ? entries : entries + txq->tx_ring_size;
}
static void *swap_buffer(void *bufaddr, int len)
@@ -324,22 +339,26 @@ static void *swap_buffer(void *bufaddr, int len)
static void fec_dump(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct bufdesc *bdp = fep->tx_bd_base;
- unsigned int index = 0;
+ struct bufdesc *bdp;
+ struct fec_enet_priv_tx_q *txq;
+ int index = 0;
netdev_info(ndev, "TX ring dump\n");
pr_info("Nr SC addr len SKB\n");
+ txq = fep->tx_queue[0];
+ bdp = txq->tx_bd_base;
+
do {
pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n",
index,
- bdp == fep->cur_tx ? 'S' : ' ',
- bdp == fep->dirty_tx ? 'H' : ' ',
+ bdp == txq->cur_tx ? 'S' : ' ',
+ bdp == txq->dirty_tx ? 'H' : ' ',
bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen,
- fep->tx_skbuff[index]);
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ txq->tx_skbuff[index]);
+ bdp = fec_enet_get_nextdesc(bdp, fep, 0);
index++;
- } while (bdp != fep->tx_bd_base);
+ } while (bdp != txq->tx_bd_base);
}
static inline bool is_ipv4_pkt(struct sk_buff *skb)
@@ -365,14 +384,17 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
}
static int
-fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
+fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
+ struct sk_buff *skb,
+ struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
- struct bufdesc *bdp = fep->cur_tx;
+ struct bufdesc *bdp = txq->cur_tx;
struct bufdesc_ex *ebdp;
int nr_frags = skb_shinfo(skb)->nr_frags;
+ unsigned short queue = skb_get_queue_mapping(skb);
int frag, frag_len;
unsigned short status;
unsigned int estatus = 0;
@@ -384,7 +406,7 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
for (frag = 0; frag < nr_frags; frag++) {
this_frag = &skb_shinfo(skb)->frags[frag];
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
ebdp = (struct bufdesc_ex *)bdp;
status = bdp->cbd_sc;
@@ -412,11 +434,11 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
- index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
- if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
+ index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
+ if (((unsigned long) bufaddr) & fep->tx_align ||
id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
- memcpy(fep->tx_bounce[index], bufaddr, frag_len);
- bufaddr = fep->tx_bounce[index];
+ memcpy(txq->tx_bounce[index], bufaddr, frag_len);
+ bufaddr = txq->tx_bounce[index];
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
swap_buffer(bufaddr, frag_len);
@@ -436,21 +458,22 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
bdp->cbd_sc = status;
}
- fep->cur_tx = bdp;
+ txq->cur_tx = bdp;
return 0;
dma_mapping_error:
- bdp = fep->cur_tx;
+ bdp = txq->cur_tx;
for (i = 0; i < frag; i++) {
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
bdp->cbd_datlen, DMA_TO_DEVICE);
}
return NETDEV_TX_OK;
}
-static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
+static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
+ struct sk_buff *skb, struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
@@ -461,12 +484,13 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
dma_addr_t addr;
unsigned short status;
unsigned short buflen;
+ unsigned short queue;
unsigned int estatus = 0;
unsigned int index;
int entries_free;
int ret;
- entries_free = fec_enet_get_free_txdesc_num(fep);
+ entries_free = fec_enet_get_free_txdesc_num(fep, txq);
if (entries_free < MAX_SKB_FRAGS + 1) {
dev_kfree_skb_any(skb);
if (net_ratelimit())
@@ -481,7 +505,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
}
/* Fill in a Tx ring entry */
- bdp = fep->cur_tx;
+ bdp = txq->cur_tx;
status = bdp->cbd_sc;
status &= ~BD_ENET_TX_STATS;
@@ -489,11 +513,12 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
bufaddr = skb->data;
buflen = skb_headlen(skb);
- index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
- if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
+ queue = skb_get_queue_mapping(skb);
+ index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
+ if (((unsigned long) bufaddr) & fep->tx_align ||
id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
- memcpy(fep->tx_bounce[index], skb->data, buflen);
- bufaddr = fep->tx_bounce[index];
+ memcpy(txq->tx_bounce[index], skb->data, buflen);
+ bufaddr = txq->tx_bounce[index];
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
swap_buffer(bufaddr, buflen);
@@ -509,7 +534,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
}
if (nr_frags) {
- ret = fec_enet_txq_submit_frag_skb(skb, ndev);
+ ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
if (ret)
return ret;
} else {
@@ -537,10 +562,10 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
ebdp->cbd_esc = estatus;
}
- last_bdp = fep->cur_tx;
- index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep);
+ last_bdp = txq->cur_tx;
+ index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
/* Save skb pointer */
- fep->tx_skbuff[index] = skb;
+ txq->tx_skbuff[index] = skb;
bdp->cbd_datlen = buflen;
bdp->cbd_bufaddr = addr;
@@ -552,27 +577,28 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
bdp->cbd_sc = status;
/* If this was the last BD in the ring, start at the beginning again. */
- bdp = fec_enet_get_nextdesc(last_bdp, fep);
+ bdp = fec_enet_get_nextdesc(last_bdp, fep, queue);
skb_tx_timestamp(skb);
- fep->cur_tx = bdp;
+ txq->cur_tx = bdp;
/* Trigger transmission start */
- writel(0, fep->hwp + FEC_X_DES_ACTIVE);
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
return 0;
}
static int
-fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
- struct bufdesc *bdp, int index, char *data,
- int size, bool last_tcp, bool is_last)
+fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
+ struct net_device *ndev,
+ struct bufdesc *bdp, int index, char *data,
+ int size, bool last_tcp, bool is_last)
{
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
- struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
unsigned short status;
unsigned int estatus = 0;
dma_addr_t addr;
@@ -582,10 +608,10 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
- if (((unsigned long) data) & FEC_ALIGNMENT ||
+ if (((unsigned long) data) & fep->tx_align ||
id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
- memcpy(fep->tx_bounce[index], data, size);
- data = fep->tx_bounce[index];
+ memcpy(txq->tx_bounce[index], data, size);
+ data = txq->tx_bounce[index];
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
swap_buffer(data, size);
@@ -624,14 +650,15 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
}
static int
-fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev,
- struct bufdesc *bdp, int index)
+fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
+ struct sk_buff *skb, struct net_device *ndev,
+ struct bufdesc *bdp, int index)
{
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
- struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
void *bufaddr;
unsigned long dmabuf;
unsigned short status;
@@ -641,12 +668,12 @@ fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev,
status &= ~BD_ENET_TX_STATS;
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
- bufaddr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
- dmabuf = fep->tso_hdrs_dma + index * TSO_HEADER_SIZE;
- if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
+ bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
+ dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
+ if (((unsigned long)bufaddr) & fep->tx_align ||
id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
- memcpy(fep->tx_bounce[index], skb->data, hdr_len);
- bufaddr = fep->tx_bounce[index];
+ memcpy(txq->tx_bounce[index], skb->data, hdr_len);
+ bufaddr = txq->tx_bounce[index];
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
swap_buffer(bufaddr, hdr_len);
@@ -676,17 +703,22 @@ fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev,
return 0;
}
-static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
+static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
+ struct sk_buff *skb,
+ struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
int total_len, data_left;
- struct bufdesc *bdp = fep->cur_tx;
+ struct bufdesc *bdp = txq->cur_tx;
+ unsigned short queue = skb_get_queue_mapping(skb);
struct tso_t tso;
unsigned int index = 0;
int ret;
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
- if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) {
+ if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) {
dev_kfree_skb_any(skb);
if (net_ratelimit())
netdev_err(ndev, "NOT enough BD for TSO!\n");
@@ -706,14 +738,14 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
while (total_len > 0) {
char *hdr;
- index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
+ index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
total_len -= data_left;
/* prepare packet headers: MAC + IP + TCP */
- hdr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
+ hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
- ret = fec_enet_txq_put_hdr_tso(skb, ndev, bdp, index);
+ ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
if (ret)
goto err_release;
@@ -721,10 +753,13 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
int size;
size = min_t(int, tso.size, data_left);
- bdp = fec_enet_get_nextdesc(bdp, fep);
- index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
- ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data,
- size, size == data_left,
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
+ index = fec_enet_get_bd_index(txq->tx_bd_base,
+ bdp, fep);
+ ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
+ bdp, index,
+ tso.data, size,
+ size == data_left,
total_len == 0);
if (ret)
goto err_release;
@@ -733,17 +768,22 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
tso_build_data(skb, &tso, size);
}
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
}
/* Save skb pointer */
- fep->tx_skbuff[index] = skb;
+ txq->tx_skbuff[index] = skb;
skb_tx_timestamp(skb);
- fep->cur_tx = bdp;
+ txq->cur_tx = bdp;
/* Trigger transmission start */
- writel(0, fep->hwp + FEC_X_DES_ACTIVE);
+ if (!(id_entry->driver_data & FEC_QUIRK_ERR007885) ||
+ !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
+ !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
+ !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
+ !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)))
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
return 0;
@@ -757,18 +797,25 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
int entries_free;
+ unsigned short queue;
+ struct fec_enet_priv_tx_q *txq;
+ struct netdev_queue *nq;
int ret;
+ queue = skb_get_queue_mapping(skb);
+ txq = fep->tx_queue[queue];
+ nq = netdev_get_tx_queue(ndev, queue);
+
if (skb_is_gso(skb))
- ret = fec_enet_txq_submit_tso(skb, ndev);
+ ret = fec_enet_txq_submit_tso(txq, skb, ndev);
else
- ret = fec_enet_txq_submit_skb(skb, ndev);
+ ret = fec_enet_txq_submit_skb(txq, skb, ndev);
if (ret)
return ret;
- entries_free = fec_enet_get_free_txdesc_num(fep);
- if (entries_free <= fep->tx_stop_threshold)
- netif_stop_queue(ndev);
+ entries_free = fec_enet_get_free_txdesc_num(fep, txq);
+ if (entries_free <= txq->tx_stop_threshold)
+ netif_tx_stop_queue(nq);
return NETDEV_TX_OK;
}
@@ -778,46 +825,111 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
static void fec_enet_bd_init(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_priv_tx_q *txq;
+ struct fec_enet_priv_rx_q *rxq;
struct bufdesc *bdp;
unsigned int i;
+ unsigned int q;
- /* Initialize the receive buffer descriptors. */
- bdp = fep->rx_bd_base;
- for (i = 0; i < fep->rx_ring_size; i++) {
+ for (q = 0; q < fep->num_rx_queues; q++) {
+ /* Initialize the receive buffer descriptors. */
+ rxq = fep->rx_queue[q];
+ bdp = rxq->rx_bd_base;
- /* Initialize the BD for every fragment in the page. */
- if (bdp->cbd_bufaddr)
- bdp->cbd_sc = BD_ENET_RX_EMPTY;
- else
+ for (i = 0; i < rxq->rx_ring_size; i++) {
+
+ /* Initialize the BD for every fragment in the page. */
+ if (bdp->cbd_bufaddr)
+ bdp->cbd_sc = BD_ENET_RX_EMPTY;
+ else
+ bdp->cbd_sc = 0;
+ bdp = fec_enet_get_nextdesc(bdp, fep, q);
+ }
+
+ /* Set the last buffer to wrap */
+ bdp = fec_enet_get_prevdesc(bdp, fep, q);
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ rxq->cur_rx = rxq->rx_bd_base;
+ }
+
+ for (q = 0; q < fep->num_tx_queues; q++) {
+ /* ...and the same for transmit */
+ txq = fep->tx_queue[q];
+ bdp = txq->tx_bd_base;
+ txq->cur_tx = bdp;
+
+ for (i = 0; i < txq->tx_ring_size; i++) {
+ /* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0;
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ if (txq->tx_skbuff[i]) {
+ dev_kfree_skb_any(txq->tx_skbuff[i]);
+ txq->tx_skbuff[i] = NULL;
+ }
+ bdp->cbd_bufaddr = 0;
+ bdp = fec_enet_get_nextdesc(bdp, fep, q);
+ }
+
+ /* Set the last buffer to wrap */
+ bdp = fec_enet_get_prevdesc(bdp, fep, q);
+ bdp->cbd_sc |= BD_SC_WRAP;
+ txq->dirty_tx = bdp;
}
+}
- /* Set the last buffer to wrap */
- bdp = fec_enet_get_prevdesc(bdp, fep);
- bdp->cbd_sc |= BD_SC_WRAP;
+static void fec_enet_active_rxring(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int i;
- fep->cur_rx = fep->rx_bd_base;
+ for (i = 0; i < fep->num_rx_queues; i++)
+ writel(0, fep->hwp + FEC_R_DES_ACTIVE(i));
+}
- /* ...and the same for transmit */
- bdp = fep->tx_bd_base;
- fep->cur_tx = bdp;
- for (i = 0; i < fep->tx_ring_size; i++) {
+static void fec_enet_enable_ring(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_enet_priv_tx_q *txq;
+ struct fec_enet_priv_rx_q *rxq;
+ int i;
- /* Initialize the BD for every fragment in the page. */
- bdp->cbd_sc = 0;
- if (fep->tx_skbuff[i]) {
- dev_kfree_skb_any(fep->tx_skbuff[i]);
- fep->tx_skbuff[i] = NULL;
- }
- bdp->cbd_bufaddr = 0;
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ for (i = 0; i < fep->num_rx_queues; i++) {
+ rxq = fep->rx_queue[i];
+ writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i));
+
+ /* enable DMA1/2 */
+ if (i)
+ writel(RCMR_MATCHEN | RCMR_CMP(i),
+ fep->hwp + FEC_RCMR(i));
}
- /* Set the last buffer to wrap */
- bdp = fec_enet_get_prevdesc(bdp, fep);
- bdp->cbd_sc |= BD_SC_WRAP;
- fep->dirty_tx = bdp;
+ for (i = 0; i < fep->num_tx_queues; i++) {
+ txq = fep->tx_queue[i];
+ writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i));
+
+ /* enable DMA1/2 */
+ if (i)
+ writel(DMA_CLASS_EN | IDLE_SLOPE(i),
+ fep->hwp + FEC_DMA_CFG(i));
+ }
+}
+
+static void fec_enet_reset_skb(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_enet_priv_tx_q *txq;
+ int i, j;
+
+ for (i = 0; i < fep->num_tx_queues; i++) {
+ txq = fep->tx_queue[i];
+
+ for (j = 0; j < txq->tx_ring_size; j++) {
+ if (txq->tx_skbuff[j]) {
+ dev_kfree_skb_any(txq->tx_skbuff[j]);
+ txq->tx_skbuff[j] = NULL;
+ }
+ }
+ }
}
/*
@@ -831,15 +943,21 @@ fec_restart(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
- int i;
u32 val;
u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04;
u32 ecntl = 0x2; /* ETHEREN */
- /* Whack a reset. We should wait for this. */
- writel(1, fep->hwp + FEC_ECNTRL);
- udelay(10);
+ /* Whack a reset. We should wait for this.
+ * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+ * instead of reset MAC itself.
+ */
+ if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) {
+ writel(0, fep->hwp + FEC_ECNTRL);
+ } else {
+ writel(1, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ }
/*
* enet-mac reset will reset mac address registers too,
@@ -859,22 +977,10 @@ fec_restart(struct net_device *ndev)
fec_enet_bd_init(ndev);
- /* Set receive and transmit descriptor base. */
- writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
- if (fep->bufdesc_ex)
- writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
- * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
- else
- writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
- * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
-
+ fec_enet_enable_ring(ndev);
- for (i = 0; i <= TX_RING_MOD_MASK; i++) {
- if (fep->tx_skbuff[i]) {
- dev_kfree_skb_any(fep->tx_skbuff[i]);
- fep->tx_skbuff[i] = NULL;
- }
- }
+ /* Reset tx SKB buffers. */
+ fec_enet_reset_skb(ndev);
/* Enable MII mode */
if (fep->full_duplex == DUPLEX_FULL) {
@@ -996,13 +1102,17 @@ fec_restart(struct net_device *ndev)
/* And last, enable the transmit and receive processing */
writel(ecntl, fep->hwp + FEC_ECNTRL);
- writel(0, fep->hwp + FEC_R_DES_ACTIVE);
+ fec_enet_active_rxring(ndev);
if (fep->bufdesc_ex)
fec_ptp_start_cyclecounter(ndev);
/* Enable interrupts we wish to service */
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+
+ /* Init the interrupt coalescing */
+ fec_enet_itr_coal_init(ndev);
+
}
static void
@@ -1021,9 +1131,16 @@ fec_stop(struct net_device *ndev)
netdev_err(ndev, "Graceful transmit stop did not complete!\n");
}
- /* Whack a reset. We should wait for this. */
- writel(1, fep->hwp + FEC_ECNTRL);
- udelay(10);
+ /* Whack a reset. We should wait for this.
+ * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+ * instead of reset MAC itself.
+ */
+ if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) {
+ writel(0, fep->hwp + FEC_ECNTRL);
+ } else {
+ writel(1, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ }
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -1081,37 +1198,45 @@ fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
}
static void
-fec_enet_tx(struct net_device *ndev)
+fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
{
struct fec_enet_private *fep;
struct bufdesc *bdp;
unsigned short status;
struct sk_buff *skb;
+ struct fec_enet_priv_tx_q *txq;
+ struct netdev_queue *nq;
int index = 0;
int entries_free;
fep = netdev_priv(ndev);
- bdp = fep->dirty_tx;
+ queue_id = FEC_ENET_GET_QUQUE(queue_id);
+
+ txq = fep->tx_queue[queue_id];
/* get next bdp of dirty_tx */
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ nq = netdev_get_tx_queue(ndev, queue_id);
+ bdp = txq->dirty_tx;
+
+ /* get next bdp of dirty_tx */
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
/* current queue is empty */
- if (bdp == fep->cur_tx)
+ if (bdp == txq->cur_tx)
break;
- index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
+ index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
- skb = fep->tx_skbuff[index];
- fep->tx_skbuff[index] = NULL;
- if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr))
+ skb = txq->tx_skbuff[index];
+ txq->tx_skbuff[index] = NULL;
+ if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
bdp->cbd_datlen, DMA_TO_DEVICE);
bdp->cbd_bufaddr = 0;
if (!skb) {
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
continue;
}
@@ -1153,23 +1278,37 @@ fec_enet_tx(struct net_device *ndev)
/* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
- fep->dirty_tx = bdp;
+ txq->dirty_tx = bdp;
/* Update pointer to next buffer descriptor to be transmitted */
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
/* Since we have freed up a buffer, the ring is no longer full
*/
if (netif_queue_stopped(ndev)) {
- entries_free = fec_enet_get_free_txdesc_num(fep);
- if (entries_free >= fep->tx_wake_threshold)
- netif_wake_queue(ndev);
+ entries_free = fec_enet_get_free_txdesc_num(fep, txq);
+ if (entries_free >= txq->tx_wake_threshold)
+ netif_tx_wake_queue(nq);
}
}
/* ERR006538: Keep the transmitter going */
- if (bdp != fep->cur_tx && readl(fep->hwp + FEC_X_DES_ACTIVE) == 0)
- writel(0, fep->hwp + FEC_X_DES_ACTIVE);
+ if (bdp != txq->cur_tx &&
+ readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0)
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id));
+}
+
+static void
+fec_enet_tx(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ u16 queue_id;
+ /* First process class A queue, then Class B and Best Effort queue */
+ for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) {
+ clear_bit(queue_id, &fep->work_tx);
+ fec_enet_tx_queue(ndev, queue_id);
+ }
+ return;
}
/* During a receive, the cur_rx points to the current incoming buffer.
@@ -1178,11 +1317,12 @@ fec_enet_tx(struct net_device *ndev)
* effectively tossing the packet.
*/
static int
-fec_enet_rx(struct net_device *ndev, int budget)
+fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
{
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
+ struct fec_enet_priv_rx_q *rxq;
struct bufdesc *bdp;
unsigned short status;
struct sk_buff *skb;
@@ -1197,11 +1337,13 @@ fec_enet_rx(struct net_device *ndev, int budget)
#ifdef CONFIG_M532x
flush_cache_all();
#endif
+ queue_id = FEC_ENET_GET_QUQUE(queue_id);
+ rxq = fep->rx_queue[queue_id];
/* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
*/
- bdp = fep->cur_rx;
+ bdp = rxq->cur_rx;
while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
@@ -1215,7 +1357,6 @@ fec_enet_rx(struct net_device *ndev, int budget)
if ((status & BD_ENET_RX_LAST) == 0)
netdev_err(ndev, "rcv is not +last\n");
- writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
/* Check for errors. */
if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
@@ -1248,8 +1389,8 @@ fec_enet_rx(struct net_device *ndev, int budget)
pkt_len = bdp->cbd_datlen;
ndev->stats.rx_bytes += pkt_len;
- index = fec_enet_get_bd_index(fep->rx_bd_base, bdp, fep);
- data = fep->rx_skbuff[index]->data;
+ index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep);
+ data = rxq->rx_skbuff[index]->data;
dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
@@ -1264,7 +1405,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
/* If this is a VLAN packet remove the VLAN Tag */
vlan_packet_rcvd = false;
if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
+ fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
/* Push and remove the vlan tag */
struct vlan_hdr *vlan_header =
(struct vlan_hdr *) (data + ETH_HLEN);
@@ -1341,19 +1482,56 @@ rx_processing_done:
}
/* Update BD pointer to next entry */
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
/* Doing this here will keep the FEC running while we process
* incoming frames. On a heavily loaded network, we should be
* able to keep up at the expense of system resources.
*/
- writel(0, fep->hwp + FEC_R_DES_ACTIVE);
+ writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id));
}
- fep->cur_rx = bdp;
+ rxq->cur_rx = bdp;
+ return pkt_received;
+}
+
+static int
+fec_enet_rx(struct net_device *ndev, int budget)
+{
+ int pkt_received = 0;
+ u16 queue_id;
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
+ clear_bit(queue_id, &fep->work_rx);
+ pkt_received += fec_enet_rx_queue(ndev,
+ budget - pkt_received, queue_id);
+ }
return pkt_received;
}
+static bool
+fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
+{
+ if (int_events == 0)
+ return false;
+
+ if (int_events & FEC_ENET_RXF)
+ fep->work_rx |= (1 << 2);
+ if (int_events & FEC_ENET_RXF_1)
+ fep->work_rx |= (1 << 0);
+ if (int_events & FEC_ENET_RXF_2)
+ fep->work_rx |= (1 << 1);
+
+ if (int_events & FEC_ENET_TXF)
+ fep->work_tx |= (1 << 2);
+ if (int_events & FEC_ENET_TXF_1)
+ fep->work_tx |= (1 << 0);
+ if (int_events & FEC_ENET_TXF_2)
+ fep->work_tx |= (1 << 1);
+
+ return true;
+}
+
static irqreturn_t
fec_enet_interrupt(int irq, void *dev_id)
{
@@ -1365,6 +1543,7 @@ fec_enet_interrupt(int irq, void *dev_id)
int_events = readl(fep->hwp + FEC_IEVENT);
writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT);
+ fec_enet_collect_events(fep, int_events);
if (int_events & napi_mask) {
ret = IRQ_HANDLED;
@@ -1621,6 +1800,11 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
}
mutex_unlock(&fep->ptp_clk_mutex);
}
+ if (fep->clk_ref) {
+ ret = clk_prepare_enable(fep->clk_ref);
+ if (ret)
+ goto failed_clk_ref;
+ }
} else {
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
@@ -1632,9 +1816,15 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
fep->ptp_clk_on = false;
mutex_unlock(&fep->ptp_clk_mutex);
}
+ if (fep->clk_ref)
+ clk_disable_unprepare(fep->clk_ref);
}
return 0;
+
+failed_clk_ref:
+ if (fep->clk_ref)
+ clk_disable_unprepare(fep->clk_ref);
failed_clk_ptp:
if (fep->clk_enet_out)
clk_disable_unprepare(fep->clk_enet_out);
@@ -1674,13 +1864,13 @@ static int fec_enet_mii_probe(struct net_device *ndev)
continue;
if (dev_id--)
continue;
- strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
+ strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
break;
}
if (phy_id >= PHY_MAX_ADDR) {
netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
- strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
+ strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
phy_id = 0;
}
@@ -2062,12 +2252,141 @@ static int fec_enet_nway_reset(struct net_device *dev)
return genphy_restart_aneg(phydev);
}
+/* ITR clock source is enet system clock (clk_ahb).
+ * TCTT unit is cycle_ns * 64 cycle
+ * So, the ICTT value = X us / (cycle_ns * 64)
+ */
+static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ return us * (fep->itr_clk_rate / 64000) / 1000;
+}
+
+/* Set threshold for interrupt coalescing */
+static void fec_enet_itr_coal_set(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ int rx_itr, tx_itr;
+
+ if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB))
+ return;
+
+ /* Must be greater than zero to avoid unpredictable behavior */
+ if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
+ !fep->tx_time_itr || !fep->tx_pkts_itr)
+ return;
+
+ /* Select enet system clock as Interrupt Coalescing
+ * timer Clock Source
+ */
+ rx_itr = FEC_ITR_CLK_SEL;
+ tx_itr = FEC_ITR_CLK_SEL;
+
+ /* set ICFT and ICTT */
+ rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
+ rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
+ tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
+ tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
+
+ rx_itr |= FEC_ITR_EN;
+ tx_itr |= FEC_ITR_EN;
+
+ writel(tx_itr, fep->hwp + FEC_TXIC0);
+ writel(rx_itr, fep->hwp + FEC_RXIC0);
+ writel(tx_itr, fep->hwp + FEC_TXIC1);
+ writel(rx_itr, fep->hwp + FEC_RXIC1);
+ writel(tx_itr, fep->hwp + FEC_TXIC2);
+ writel(rx_itr, fep->hwp + FEC_RXIC2);
+}
+
+static int
+fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+
+ if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB))
+ return -EOPNOTSUPP;
+
+ ec->rx_coalesce_usecs = fep->rx_time_itr;
+ ec->rx_max_coalesced_frames = fep->rx_pkts_itr;
+
+ ec->tx_coalesce_usecs = fep->tx_time_itr;
+ ec->tx_max_coalesced_frames = fep->tx_pkts_itr;
+
+ return 0;
+}
+
+static int
+fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+
+ unsigned int cycle;
+
+ if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB))
+ return -EOPNOTSUPP;
+
+ if (ec->rx_max_coalesced_frames > 255) {
+ pr_err("Rx coalesced frames exceed hardware limiation");
+ return -EINVAL;
+ }
+
+ if (ec->tx_max_coalesced_frames > 255) {
+ pr_err("Tx coalesced frame exceed hardware limiation");
+ return -EINVAL;
+ }
+
+ cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
+ if (cycle > 0xFFFF) {
+ pr_err("Rx coalesed usec exceeed hardware limiation");
+ return -EINVAL;
+ }
+
+ cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
+ if (cycle > 0xFFFF) {
+ pr_err("Rx coalesed usec exceeed hardware limiation");
+ return -EINVAL;
+ }
+
+ fep->rx_time_itr = ec->rx_coalesce_usecs;
+ fep->rx_pkts_itr = ec->rx_max_coalesced_frames;
+
+ fep->tx_time_itr = ec->tx_coalesce_usecs;
+ fep->tx_pkts_itr = ec->tx_max_coalesced_frames;
+
+ fec_enet_itr_coal_set(ndev);
+
+ return 0;
+}
+
+static void fec_enet_itr_coal_init(struct net_device *ndev)
+{
+ struct ethtool_coalesce ec;
+
+ ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
+ ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
+
+ ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
+ ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
+
+ fec_enet_set_coalesce(ndev, &ec);
+}
+
static const struct ethtool_ops fec_enet_ethtool_ops = {
.get_settings = fec_enet_get_settings,
.set_settings = fec_enet_set_settings,
.get_drvinfo = fec_enet_get_drvinfo,
.nway_reset = fec_enet_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_coalesce = fec_enet_get_coalesce,
+ .set_coalesce = fec_enet_set_coalesce,
#ifndef CONFIG_M5272
.get_pauseparam = fec_enet_get_pauseparam,
.set_pauseparam = fec_enet_set_pauseparam,
@@ -2105,46 +2424,140 @@ static void fec_enet_free_buffers(struct net_device *ndev)
unsigned int i;
struct sk_buff *skb;
struct bufdesc *bdp;
+ struct fec_enet_priv_tx_q *txq;
+ struct fec_enet_priv_rx_q *rxq;
+ unsigned int q;
+
+ for (q = 0; q < fep->num_rx_queues; q++) {
+ rxq = fep->rx_queue[q];
+ bdp = rxq->rx_bd_base;
+ for (i = 0; i < rxq->rx_ring_size; i++) {
+ skb = rxq->rx_skbuff[i];
+ rxq->rx_skbuff[i] = NULL;
+ if (skb) {
+ dma_unmap_single(&fep->pdev->dev,
+ bdp->cbd_bufaddr,
+ FEC_ENET_RX_FRSIZE,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ }
+ bdp = fec_enet_get_nextdesc(bdp, fep, q);
+ }
+ }
- bdp = fep->rx_bd_base;
- for (i = 0; i < fep->rx_ring_size; i++) {
- skb = fep->rx_skbuff[i];
- fep->rx_skbuff[i] = NULL;
- if (skb) {
- dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
- FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+ for (q = 0; q < fep->num_tx_queues; q++) {
+ txq = fep->tx_queue[q];
+ bdp = txq->tx_bd_base;
+ for (i = 0; i < txq->tx_ring_size; i++) {
+ kfree(txq->tx_bounce[i]);
+ txq->tx_bounce[i] = NULL;
+ skb = txq->tx_skbuff[i];
+ txq->tx_skbuff[i] = NULL;
dev_kfree_skb(skb);
}
- bdp = fec_enet_get_nextdesc(bdp, fep);
}
+}
+
+static void fec_enet_free_queue(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int i;
+ struct fec_enet_priv_tx_q *txq;
+
+ for (i = 0; i < fep->num_tx_queues; i++)
+ if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
+ txq = fep->tx_queue[i];
+ dma_free_coherent(NULL,
+ txq->tx_ring_size * TSO_HEADER_SIZE,
+ txq->tso_hdrs,
+ txq->tso_hdrs_dma);
+ }
+
+ for (i = 0; i < fep->num_rx_queues; i++)
+ if (fep->rx_queue[i])
+ kfree(fep->rx_queue[i]);
+
+ for (i = 0; i < fep->num_tx_queues; i++)
+ if (fep->tx_queue[i])
+ kfree(fep->tx_queue[i]);
+}
+
+static int fec_enet_alloc_queue(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int i;
+ int ret = 0;
+ struct fec_enet_priv_tx_q *txq;
+
+ for (i = 0; i < fep->num_tx_queues; i++) {
+ txq = kzalloc(sizeof(*txq), GFP_KERNEL);
+ if (!txq) {
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
- bdp = fep->tx_bd_base;
- for (i = 0; i < fep->tx_ring_size; i++) {
- kfree(fep->tx_bounce[i]);
- fep->tx_bounce[i] = NULL;
- skb = fep->tx_skbuff[i];
- fep->tx_skbuff[i] = NULL;
- dev_kfree_skb(skb);
+ fep->tx_queue[i] = txq;
+ txq->tx_ring_size = TX_RING_SIZE;
+ fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size;
+
+ txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
+ txq->tx_wake_threshold =
+ (txq->tx_ring_size - txq->tx_stop_threshold) / 2;
+
+ txq->tso_hdrs = dma_alloc_coherent(NULL,
+ txq->tx_ring_size * TSO_HEADER_SIZE,
+ &txq->tso_hdrs_dma,
+ GFP_KERNEL);
+ if (!txq->tso_hdrs) {
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
+ }
+
+ for (i = 0; i < fep->num_rx_queues; i++) {
+ fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
+ GFP_KERNEL);
+ if (!fep->rx_queue[i]) {
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
+
+ fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE;
+ fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size;
}
+ return ret;
+
+alloc_failed:
+ fec_enet_free_queue(ndev);
+ return ret;
}
-static int fec_enet_alloc_buffers(struct net_device *ndev)
+static int
+fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
{
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned int i;
struct sk_buff *skb;
struct bufdesc *bdp;
+ struct fec_enet_priv_rx_q *rxq;
+ unsigned int off;
- bdp = fep->rx_bd_base;
- for (i = 0; i < fep->rx_ring_size; i++) {
+ rxq = fep->rx_queue[queue];
+ bdp = rxq->rx_bd_base;
+ for (i = 0; i < rxq->rx_ring_size; i++) {
dma_addr_t addr;
skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
if (!skb)
goto err_alloc;
+ off = ((unsigned long)skb->data) & fep->rx_align;
+ if (off)
+ skb_reserve(skb, fep->rx_align + 1 - off);
+
addr = dma_map_single(&fep->pdev->dev, skb->data,
- FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+ FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE);
+
if (dma_mapping_error(&fep->pdev->dev, addr)) {
dev_kfree_skb(skb);
if (net_ratelimit())
@@ -2152,7 +2565,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
goto err_alloc;
}
- fep->rx_skbuff[i] = skb;
+ rxq->rx_skbuff[i] = skb;
bdp->cbd_bufaddr = addr;
bdp->cbd_sc = BD_ENET_RX_EMPTY;
@@ -2161,17 +2574,32 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
ebdp->cbd_esc = BD_ENET_RX_INT;
}
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
}
/* Set the last buffer to wrap. */
- bdp = fec_enet_get_prevdesc(bdp, fep);
+ bdp = fec_enet_get_prevdesc(bdp, fep, queue);
bdp->cbd_sc |= BD_SC_WRAP;
+ return 0;
- bdp = fep->tx_bd_base;
- for (i = 0; i < fep->tx_ring_size; i++) {
- fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
- if (!fep->tx_bounce[i])
+ err_alloc:
+ fec_enet_free_buffers(ndev);
+ return -ENOMEM;
+}
+
+static int
+fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ unsigned int i;
+ struct bufdesc *bdp;
+ struct fec_enet_priv_tx_q *txq;
+
+ txq = fep->tx_queue[queue];
+ bdp = txq->tx_bd_base;
+ for (i = 0; i < txq->tx_ring_size; i++) {
+ txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
+ if (!txq->tx_bounce[i])
goto err_alloc;
bdp->cbd_sc = 0;
@@ -2182,11 +2610,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
ebdp->cbd_esc = BD_ENET_TX_INT;
}
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
}
/* Set the last buffer to wrap. */
- bdp = fec_enet_get_prevdesc(bdp, fep);
+ bdp = fec_enet_get_prevdesc(bdp, fep, queue);
bdp->cbd_sc |= BD_SC_WRAP;
return 0;
@@ -2196,6 +2624,21 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
return -ENOMEM;
}
+static int fec_enet_alloc_buffers(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ unsigned int i;
+
+ for (i = 0; i < fep->num_rx_queues; i++)
+ if (fec_enet_alloc_rxq_buffers(ndev, i))
+ return -ENOMEM;
+
+ for (i = 0; i < fep->num_tx_queues; i++)
+ if (fec_enet_alloc_txq_buffers(ndev, i))
+ return -ENOMEM;
+ return 0;
+}
+
static int
fec_enet_open(struct net_device *ndev)
{
@@ -2225,7 +2668,8 @@ fec_enet_open(struct net_device *ndev)
fec_restart(ndev);
napi_enable(&fep->napi);
phy_start(fep->phy_dev);
- netif_start_queue(ndev);
+ netif_tx_start_all_queues(ndev);
+
return 0;
}
@@ -2399,7 +2843,7 @@ static int fec_set_features(struct net_device *netdev,
/* Resume the device after updates */
if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
fec_restart(netdev);
- netif_wake_queue(netdev);
+ netif_tx_wake_all_queues(netdev);
netif_tx_unlock_bh(netdev);
napi_enable(&fep->napi);
}
@@ -2407,10 +2851,17 @@ static int fec_set_features(struct net_device *netdev,
return 0;
}
+u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ return skb_tx_hash(ndev, skb);
+}
+
static const struct net_device_ops fec_netdev_ops = {
.ndo_open = fec_enet_open,
.ndo_stop = fec_enet_close,
.ndo_start_xmit = fec_enet_start_xmit,
+ .ndo_select_queue = fec_enet_select_queue,
.ndo_set_rx_mode = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
@@ -2432,39 +2883,38 @@ static int fec_enet_init(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
+ struct fec_enet_priv_tx_q *txq;
+ struct fec_enet_priv_rx_q *rxq;
struct bufdesc *cbd_base;
+ dma_addr_t bd_dma;
int bd_size;
+ unsigned int i;
- /* init the tx & rx ring size */
- fep->tx_ring_size = TX_RING_SIZE;
- fep->rx_ring_size = RX_RING_SIZE;
+#if defined(CONFIG_ARM)
+ fep->rx_align = 0xf;
+ fep->tx_align = 0xf;
+#else
+ fep->rx_align = 0x3;
+ fep->tx_align = 0x3;
+#endif
- fep->tx_stop_threshold = FEC_MAX_SKB_DESCS;
- fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2;
+ fec_enet_alloc_queue(ndev);
if (fep->bufdesc_ex)
fep->bufdesc_size = sizeof(struct bufdesc_ex);
else
fep->bufdesc_size = sizeof(struct bufdesc);
- bd_size = (fep->tx_ring_size + fep->rx_ring_size) *
+ bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) *
fep->bufdesc_size;
/* Allocate memory for buffer descriptors. */
- cbd_base = dma_alloc_coherent(NULL, bd_size, &fep->bd_dma,
+ cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma,
GFP_KERNEL);
- if (!cbd_base)
- return -ENOMEM;
-
- fep->tso_hdrs = dma_alloc_coherent(NULL, fep->tx_ring_size * TSO_HEADER_SIZE,
- &fep->tso_hdrs_dma, GFP_KERNEL);
- if (!fep->tso_hdrs) {
- dma_free_coherent(NULL, bd_size, cbd_base, fep->bd_dma);
+ if (!cbd_base) {
return -ENOMEM;
}
- memset(cbd_base, 0, PAGE_SIZE);
-
- fep->netdev = ndev;
+ memset(cbd_base, 0, bd_size);
/* Get the Ethernet address */
fec_get_mac(ndev);
@@ -2472,12 +2922,36 @@ static int fec_enet_init(struct net_device *ndev)
fec_set_mac_address(ndev, NULL);
/* Set receive and transmit descriptor base. */
- fep->rx_bd_base = cbd_base;
- if (fep->bufdesc_ex)
- fep->tx_bd_base = (struct bufdesc *)
- (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size);
- else
- fep->tx_bd_base = cbd_base + fep->rx_ring_size;
+ for (i = 0; i < fep->num_rx_queues; i++) {
+ rxq = fep->rx_queue[i];
+ rxq->index = i;
+ rxq->rx_bd_base = (struct bufdesc *)cbd_base;
+ rxq->bd_dma = bd_dma;
+ if (fep->bufdesc_ex) {
+ bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size;
+ cbd_base = (struct bufdesc *)
+ (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size);
+ } else {
+ bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size;
+ cbd_base += rxq->rx_ring_size;
+ }
+ }
+
+ for (i = 0; i < fep->num_tx_queues; i++) {
+ txq = fep->tx_queue[i];
+ txq->index = i;
+ txq->tx_bd_base = (struct bufdesc *)cbd_base;
+ txq->bd_dma = bd_dma;
+ if (fep->bufdesc_ex) {
+ bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size;
+ cbd_base = (struct bufdesc *)
+ (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size);
+ } else {
+ bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size;
+ cbd_base += txq->tx_ring_size;
+ }
+ }
+
/* The FEC Ethernet specific entries in the device structure */
ndev->watchdog_timeo = TX_TIMEOUT;
@@ -2500,6 +2974,11 @@ static int fec_enet_init(struct net_device *ndev)
fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
}
+ if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) {
+ fep->tx_align = 0;
+ fep->rx_align = 0x3f;
+ }
+
ndev->hw_features = ndev->features;
fec_restart(ndev);
@@ -2545,6 +3024,42 @@ static void fec_reset_phy(struct platform_device *pdev)
}
#endif /* CONFIG_OF */
+static void
+fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int err;
+
+ *num_tx = *num_rx = 1;
+
+ if (!np || !of_device_is_available(np))
+ return;
+
+ /* parse the num of tx and rx queues */
+ err = of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
+ if (err)
+ *num_tx = 1;
+
+ err = of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
+ if (err)
+ *num_rx = 1;
+
+ if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
+ dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
+ *num_tx);
+ *num_tx = 1;
+ return;
+ }
+
+ if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
+ dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
+ *num_rx);
+ *num_rx = 1;
+ return;
+ }
+
+}
+
static int
fec_probe(struct platform_device *pdev)
{
@@ -2556,13 +3071,18 @@ fec_probe(struct platform_device *pdev)
const struct of_device_id *of_id;
static int dev_id;
struct device_node *np = pdev->dev.of_node, *phy_node;
+ int num_tx_qs;
+ int num_rx_qs;
of_id = of_match_device(fec_dt_ids, &pdev->dev);
if (of_id)
pdev->id_entry = of_id->data;
+ fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
+
/* Init network device */
- ndev = alloc_etherdev(sizeof(struct fec_enet_private));
+ ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private),
+ num_tx_qs, num_rx_qs);
if (!ndev)
return -ENOMEM;
@@ -2571,6 +3091,9 @@ fec_probe(struct platform_device *pdev)
/* setup board info structure */
fep = netdev_priv(ndev);
+ fep->num_rx_queues = num_rx_qs;
+ fep->num_tx_queues = num_tx_qs;
+
#if !defined(CONFIG_M5272)
/* default enable pause frame auto negotiation */
if (pdev->id_entry &&
@@ -2630,6 +3153,8 @@ fec_probe(struct platform_device *pdev)
goto failed_clk;
}
+ fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
+
/* enet_out is optional, depends on board */
fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
if (IS_ERR(fep->clk_enet_out))
@@ -2637,6 +3162,12 @@ fec_probe(struct platform_device *pdev)
fep->ptp_clk_on = false;
mutex_init(&fep->ptp_clk_mutex);
+
+ /* clk_ref is optional, depends on board */
+ fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref");
+ if (IS_ERR(fep->clk_ref))
+ fep->clk_ref = NULL;
+
fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
fep->bufdesc_ex =
pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX;
@@ -2684,6 +3215,7 @@ fec_probe(struct platform_device *pdev)
goto failed_irq;
}
+ init_completion(&fep->mdio_done);
ret = fec_enet_mii_init(pdev);
if (ret)
goto failed_mii_init;
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index ed7916f6fbcf..76a6e0c77d69 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1627,7 +1627,7 @@ static void hp100_clean_txring(struct net_device *dev)
#endif
/* Conversion to new PCI API : NOP */
pci_unmap_single(lp->pci_dev, (dma_addr_t) lp->txrhead->pdl[1], lp->txrhead->pdl[2], PCI_DMA_TODEVICE);
- dev_kfree_skb_any(lp->txrhead->skb);
+ dev_consume_skb_any(lp->txrhead->skb);
lp->txrhead->skb = NULL;
lp->txrhead = lp->txrhead->next;
lp->txrcommit--;
@@ -1745,7 +1745,7 @@ static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
hp100_ints_on();
spin_unlock_irqrestore(&lp->lock, flags);
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
#ifdef HP100_DEBUG_TX
printk("hp100: %s: start_xmit: end\n", dev->name);
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 10a0f221b183..69707108d23c 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -148,16 +148,23 @@ struct e1000_adapter;
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
*/
-struct e1000_buffer {
+struct e1000_tx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
- struct page *page;
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
- unsigned int segs;
+ bool mapped_as_page;
+ unsigned short segs;
unsigned int bytecount;
- u16 mapped_as_page;
+};
+
+struct e1000_rx_buffer {
+ union {
+ struct page *page; /* jumbo: alloc_page */
+ u8 *data; /* else, netdev_alloc_frag */
+ } rxbuf;
+ dma_addr_t dma;
};
struct e1000_tx_ring {
@@ -174,7 +181,7 @@ struct e1000_tx_ring {
/* next descriptor to check for DD status bit */
unsigned int next_to_clean;
/* array of buffer information structs */
- struct e1000_buffer *buffer_info;
+ struct e1000_tx_buffer *buffer_info;
u16 tdh;
u16 tdt;
@@ -195,7 +202,7 @@ struct e1000_rx_ring {
/* next descriptor to check for DD status bit */
unsigned int next_to_clean;
/* array of buffer information structs */
- struct e1000_buffer *buffer_info;
+ struct e1000_rx_buffer *buffer_info;
struct sk_buff *rx_skb_top;
/* cpu for rx queue */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index cca5bca44e73..b691eb4f6376 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1,35 +1,30 @@
/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2006 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+ * Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2006 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
/* ethtool support for e1000 */
#include "e1000.h"
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
enum {NETDEV_STATS, E1000_STATS};
@@ -42,7 +37,7 @@ struct e1000_stats {
#define E1000_STAT(m) E1000_STATS, \
sizeof(((struct e1000_adapter *)0)->m), \
- offsetof(struct e1000_adapter, m)
+ offsetof(struct e1000_adapter, m)
#define E1000_NETDEV_STAT(m) NETDEV_STATS, \
sizeof(((struct net_device *)0)->m), \
offsetof(struct net_device, m)
@@ -104,6 +99,7 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
"Interrupt test (offline)", "Loopback test (offline)",
"Link test (on/offline)"
};
+
#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
static int e1000_get_settings(struct net_device *netdev,
@@ -113,7 +109,6 @@ static int e1000_get_settings(struct net_device *netdev,
struct e1000_hw *hw = &adapter->hw;
if (hw->media_type == e1000_media_type_copper) {
-
ecmd->supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
@@ -155,9 +150,8 @@ static int e1000_get_settings(struct net_device *netdev,
}
if (er32(STATUS) & E1000_STATUS_LU) {
-
e1000_get_speed_and_duplex(hw, &adapter->link_speed,
- &adapter->link_duplex);
+ &adapter->link_duplex);
ethtool_cmd_speed_set(ecmd, adapter->link_speed);
/* unfortunately FULL_DUPLEX != DUPLEX_FULL
@@ -247,9 +241,9 @@ static int e1000_set_settings(struct net_device *netdev,
if (netif_running(adapter->netdev)) {
e1000_down(adapter);
e1000_up(adapter);
- } else
+ } else {
e1000_reset(adapter);
-
+ }
clear_bit(__E1000_RESETTING, &adapter->flags);
return 0;
}
@@ -279,11 +273,11 @@ static void e1000_get_pauseparam(struct net_device *netdev,
pause->autoneg =
(adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
- if (hw->fc == E1000_FC_RX_PAUSE)
+ if (hw->fc == E1000_FC_RX_PAUSE) {
pause->rx_pause = 1;
- else if (hw->fc == E1000_FC_TX_PAUSE)
+ } else if (hw->fc == E1000_FC_TX_PAUSE) {
pause->tx_pause = 1;
- else if (hw->fc == E1000_FC_FULL) {
+ } else if (hw->fc == E1000_FC_FULL) {
pause->rx_pause = 1;
pause->tx_pause = 1;
}
@@ -316,8 +310,9 @@ static int e1000_set_pauseparam(struct net_device *netdev,
if (netif_running(adapter->netdev)) {
e1000_down(adapter);
e1000_up(adapter);
- } else
+ } else {
e1000_reset(adapter);
+ }
} else
retval = ((hw->media_type == e1000_media_type_fiber) ?
e1000_setup_link(hw) : e1000_force_mac_fc(hw));
@@ -329,12 +324,14 @@ static int e1000_set_pauseparam(struct net_device *netdev,
static u32 e1000_get_msglevel(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+
return adapter->msg_enable;
}
static void e1000_set_msglevel(struct net_device *netdev, u32 data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+
adapter->msg_enable = data;
}
@@ -526,7 +523,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
* only the first byte of the word is being modified
*/
ret_val = e1000_read_eeprom(hw, last_word, 1,
- &eeprom_buff[last_word - first_word]);
+ &eeprom_buff[last_word - first_word]);
}
/* Device's eeprom is always little-endian, word addressable */
@@ -618,13 +615,12 @@ static int e1000_set_ringparam(struct net_device *netdev,
adapter->tx_ring = txdr;
adapter->rx_ring = rxdr;
- rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD);
- rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ?
+ rxdr->count = max(ring->rx_pending, (u32)E1000_MIN_RXD);
+ rxdr->count = min(rxdr->count, (u32)(mac_type < e1000_82544 ?
E1000_MAX_RXD : E1000_MAX_82544_RXD));
rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
-
- txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD);
- txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ?
+ txdr->count = max(ring->tx_pending, (u32)E1000_MIN_TXD);
+ txdr->count = min(txdr->count, (u32)(mac_type < e1000_82544 ?
E1000_MAX_TXD : E1000_MAX_82544_TXD));
txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
@@ -680,8 +676,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg,
u32 mask, u32 write)
{
struct e1000_hw *hw = &adapter->hw;
- static const u32 test[] =
- {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ static const u32 test[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
+ };
u8 __iomem *address = hw->hw_addr + reg;
u32 read;
int i;
@@ -793,8 +790,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
value = E1000_RAR_ENTRIES;
for (i = 0; i < value; i++) {
- REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
- 0xFFFFFFFF);
+ REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2),
+ 0x8003FFFF, 0xFFFFFFFF);
}
} else {
REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
@@ -877,7 +874,6 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Test each interrupt */
for (; i < 10; i++) {
-
/* Interrupt to test */
mask = 1 << i;
@@ -972,10 +968,9 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (rxdr->buffer_info[i].dma)
dma_unmap_single(&pdev->dev,
rxdr->buffer_info[i].dma,
- rxdr->buffer_info[i].length,
+ E1000_RXBUFFER_2048,
DMA_FROM_DEVICE);
- if (rxdr->buffer_info[i].skb)
- dev_kfree_skb(rxdr->buffer_info[i].skb);
+ kfree(rxdr->buffer_info[i].rxbuf.data);
}
}
@@ -1010,7 +1005,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
if (!txdr->count)
txdr->count = E1000_DEFAULT_TXD;
- txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_buffer),
+ txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_tx_buffer),
GFP_KERNEL);
if (!txdr->buffer_info) {
ret_val = 1;
@@ -1069,7 +1064,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
if (!rxdr->count)
rxdr->count = E1000_DEFAULT_RXD;
- rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer),
+ rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_rx_buffer),
GFP_KERNEL);
if (!rxdr->buffer_info) {
ret_val = 5;
@@ -1099,25 +1094,25 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
for (i = 0; i < rxdr->count; i++) {
struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
- struct sk_buff *skb;
+ u8 *buf;
- skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
- if (!skb) {
+ buf = kzalloc(E1000_RXBUFFER_2048 + NET_SKB_PAD + NET_IP_ALIGN,
+ GFP_KERNEL);
+ if (!buf) {
ret_val = 7;
goto err_nomem;
}
- skb_reserve(skb, NET_IP_ALIGN);
- rxdr->buffer_info[i].skb = skb;
- rxdr->buffer_info[i].length = E1000_RXBUFFER_2048;
+ rxdr->buffer_info[i].rxbuf.data = buf;
+
rxdr->buffer_info[i].dma =
- dma_map_single(&pdev->dev, skb->data,
+ dma_map_single(&pdev->dev,
+ buf + NET_SKB_PAD + NET_IP_ALIGN,
E1000_RXBUFFER_2048, DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) {
ret_val = 8;
goto err_nomem;
}
rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
- memset(skb->data, 0x00, skb->len);
}
return 0;
@@ -1149,8 +1144,7 @@ static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
*/
e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
phy_reg |= M88E1000_EPSCR_TX_CLK_25;
- e1000_write_phy_reg(hw,
- M88E1000_EXT_PHY_SPEC_CTRL, phy_reg);
+ e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_reg);
/* In addition, because of the s/w reset above, we need to enable
* CRS on TX. This must be set for both full and half duplex
@@ -1158,8 +1152,7 @@ static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
*/
e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
- e1000_write_phy_reg(hw,
- M88E1000_PHY_SPEC_CTRL, phy_reg);
+ e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg);
}
static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
@@ -1216,7 +1209,7 @@ static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
/* Check Phy Configuration */
e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
if (phy_reg != 0x4100)
- return 9;
+ return 9;
e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
if (phy_reg != 0x0070)
@@ -1261,7 +1254,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
E1000_CTRL_FD); /* Force Duplex to FULL */
if (hw->media_type == e1000_media_type_copper &&
- hw->phy_type == e1000_phy_m88)
+ hw->phy_type == e1000_phy_m88)
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
else {
/* Set the ILOS bit on the fiber Nic is half
@@ -1299,7 +1292,7 @@ static int e1000_set_phy_loopback(struct e1000_adapter *adapter)
* attempt this 10 times.
*/
while (e1000_nonintegrated_phy_loopback(adapter) &&
- count++ < 10);
+ count++ < 10);
if (count < 11)
return 0;
}
@@ -1348,8 +1341,9 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
ew32(RCTL, rctl);
return 0;
}
- } else if (hw->media_type == e1000_media_type_copper)
+ } else if (hw->media_type == e1000_media_type_copper) {
return e1000_set_phy_loopback(adapter);
+ }
return 7;
}
@@ -1391,13 +1385,13 @@ static void e1000_create_lbtest_frame(struct sk_buff *skb,
memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
}
-static int e1000_check_lbtest_frame(struct sk_buff *skb,
+static int e1000_check_lbtest_frame(const unsigned char *data,
unsigned int frame_size)
{
frame_size &= ~1;
- if (*(skb->data + 3) == 0xFF) {
- if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
- (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+ if (*(data + 3) == 0xFF) {
+ if ((*(data + frame_size / 2 + 10) == 0xBE) &&
+ (*(data + frame_size / 2 + 12) == 0xAF)) {
return 0;
}
}
@@ -1410,7 +1404,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
struct pci_dev *pdev = adapter->pdev;
- int i, j, k, l, lc, good_cnt, ret_val=0;
+ int i, j, k, l, lc, good_cnt, ret_val = 0;
unsigned long time;
ew32(RDT, rxdr->count - 1);
@@ -1429,12 +1423,13 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
for (j = 0; j <= lc; j++) { /* loop count loop */
for (i = 0; i < 64; i++) { /* send the packets */
e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
- 1024);
+ 1024);
dma_sync_single_for_device(&pdev->dev,
txdr->buffer_info[k].dma,
txdr->buffer_info[k].length,
DMA_TO_DEVICE);
- if (unlikely(++k == txdr->count)) k = 0;
+ if (unlikely(++k == txdr->count))
+ k = 0;
}
ew32(TDT, k);
E1000_WRITE_FLUSH();
@@ -1444,15 +1439,17 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
do { /* receive the sent packets */
dma_sync_single_for_cpu(&pdev->dev,
rxdr->buffer_info[l].dma,
- rxdr->buffer_info[l].length,
+ E1000_RXBUFFER_2048,
DMA_FROM_DEVICE);
ret_val = e1000_check_lbtest_frame(
- rxdr->buffer_info[l].skb,
+ rxdr->buffer_info[l].rxbuf.data +
+ NET_SKB_PAD + NET_IP_ALIGN,
1024);
if (!ret_val)
good_cnt++;
- if (unlikely(++l == rxdr->count)) l = 0;
+ if (unlikely(++l == rxdr->count))
+ l = 0;
/* time + 20 msecs (200 msecs on 2.4) is more than
* enough time to complete the receives, if it's
* exceeded, break and error off
@@ -1494,6 +1491,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
*data = 0;
if (hw->media_type == e1000_media_type_internal_serdes) {
int i = 0;
+
hw->serdes_has_link = false;
/* On some blade server designs, link establishment
@@ -1512,9 +1510,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
if (hw->autoneg) /* if auto_neg is set wait for it */
msleep(4000);
- if (!(er32(STATUS) & E1000_STATUS_LU)) {
+ if (!(er32(STATUS) & E1000_STATUS_LU))
*data = 1;
- }
}
return *data;
}
@@ -1665,8 +1662,7 @@ static void e1000_get_wol(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- wol->supported = WAKE_UCAST | WAKE_MCAST |
- WAKE_BCAST | WAKE_MAGIC;
+ wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
wol->wolopts = 0;
/* this function will set ->supported = 0 and return 1 if wol is not
@@ -1819,6 +1815,7 @@ static int e1000_set_coalesce(struct net_device *netdev,
static int e1000_nway_reset(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+
if (netif_running(netdev))
e1000_reinit_locked(adapter);
return 0;
@@ -1830,22 +1827,29 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
int i;
char *p = NULL;
+ const struct e1000_stats *stat = e1000_gstrings_stats;
e1000_update_stats(adapter);
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
- switch (e1000_gstrings_stats[i].type) {
+ switch (stat->type) {
case NETDEV_STATS:
- p = (char *) netdev +
- e1000_gstrings_stats[i].stat_offset;
+ p = (char *)netdev + stat->stat_offset;
break;
case E1000_STATS:
- p = (char *) adapter +
- e1000_gstrings_stats[i].stat_offset;
+ p = (char *)adapter + stat->stat_offset;
+ break;
+ default:
+ WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n",
+ stat->type, i);
break;
}
- data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ if (stat->sizeof_stat == sizeof(u64))
+ data[i] = *(u64 *)p;
+ else
+ data[i] = *(u32 *)p;
+
+ stat++;
}
/* BUG_ON(i != E1000_STATS_LEN); */
}
@@ -1858,8 +1862,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
switch (stringset) {
case ETH_SS_TEST:
- memcpy(data, *e1000_gstrings_test,
- sizeof(e1000_gstrings_test));
+ memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
break;
case ETH_SS_STATS:
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 1acf5034db10..45c8c864104e 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -4837,84 +4837,6 @@ void e1000_update_adaptive(struct e1000_hw *hw)
}
/**
- * e1000_tbi_adjust_stats
- * @hw: Struct containing variables accessed by shared code
- * @frame_len: The length of the frame in question
- * @mac_addr: The Ethernet destination address of the frame in question
- *
- * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
- */
-void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
- u32 frame_len, u8 *mac_addr)
-{
- u64 carry_bit;
-
- /* First adjust the frame length. */
- frame_len--;
- /* We need to adjust the statistics counters, since the hardware
- * counters overcount this packet as a CRC error and undercount
- * the packet as a good packet
- */
- /* This packet should not be counted as a CRC error. */
- stats->crcerrs--;
- /* This packet does count as a Good Packet Received. */
- stats->gprc++;
-
- /* Adjust the Good Octets received counters */
- carry_bit = 0x80000000 & stats->gorcl;
- stats->gorcl += frame_len;
- /* If the high bit of Gorcl (the low 32 bits of the Good Octets
- * Received Count) was one before the addition,
- * AND it is zero after, then we lost the carry out,
- * need to add one to Gorch (Good Octets Received Count High).
- * This could be simplified if all environments supported
- * 64-bit integers.
- */
- if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
- stats->gorch++;
- /* Is this a broadcast or multicast? Check broadcast first,
- * since the test for a multicast frame will test positive on
- * a broadcast frame.
- */
- if (is_broadcast_ether_addr(mac_addr))
- /* Broadcast packet */
- stats->bprc++;
- else if (is_multicast_ether_addr(mac_addr))
- /* Multicast packet */
- stats->mprc++;
-
- if (frame_len == hw->max_frame_size) {
- /* In this case, the hardware has overcounted the number of
- * oversize frames.
- */
- if (stats->roc > 0)
- stats->roc--;
- }
-
- /* Adjust the bin counters when the extra byte put the frame in the
- * wrong bin. Remember that the frame_len was adjusted above.
- */
- if (frame_len == 64) {
- stats->prc64++;
- stats->prc127--;
- } else if (frame_len == 127) {
- stats->prc127++;
- stats->prc255--;
- } else if (frame_len == 255) {
- stats->prc255++;
- stats->prc511--;
- } else if (frame_len == 511) {
- stats->prc511++;
- stats->prc1023--;
- } else if (frame_len == 1023) {
- stats->prc1023++;
- stats->prc1522--;
- } else if (frame_len == 1522) {
- stats->prc1522++;
- }
-}
-
-/**
* e1000_get_bus_info
* @hw: Struct containing variables accessed by shared code
*
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h
index 11578c8978db..5cf7268cc4e1 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.h
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h
@@ -393,8 +393,6 @@ s32 e1000_blink_led_start(struct e1000_hw *hw);
/* Everything else */
void e1000_reset_adaptive(struct e1000_hw *hw);
void e1000_update_adaptive(struct e1000_hw *hw);
-void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
- u32 frame_len, u8 * mac_addr);
void e1000_get_bus_info(struct e1000_hw *hw);
void e1000_pci_set_mwi(struct e1000_hw *hw);
void e1000_pci_clear_mwi(struct e1000_hw *hw);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index ad3d5d12173f..5f6aded512f5 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1497,7 +1497,7 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
struct pci_dev *pdev = adapter->pdev;
int size;
- size = sizeof(struct e1000_buffer) * txdr->count;
+ size = sizeof(struct e1000_tx_buffer) * txdr->count;
txdr->buffer_info = vzalloc(size);
if (!txdr->buffer_info)
return -ENOMEM;
@@ -1687,7 +1687,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
struct pci_dev *pdev = adapter->pdev;
int size, desc_len;
- size = sizeof(struct e1000_buffer) * rxdr->count;
+ size = sizeof(struct e1000_rx_buffer) * rxdr->count;
rxdr->buffer_info = vzalloc(size);
if (!rxdr->buffer_info)
return -ENOMEM;
@@ -1947,8 +1947,9 @@ void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
}
-static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
- struct e1000_buffer *buffer_info)
+static void
+e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
+ struct e1000_tx_buffer *buffer_info)
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
@@ -1977,7 +1978,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring)
{
struct e1000_hw *hw = &adapter->hw;
- struct e1000_buffer *buffer_info;
+ struct e1000_tx_buffer *buffer_info;
unsigned long size;
unsigned int i;
@@ -1989,7 +1990,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
}
netdev_reset_queue(adapter->netdev);
- size = sizeof(struct e1000_buffer) * tx_ring->count;
+ size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
memset(tx_ring->buffer_info, 0, size);
/* Zero out the descriptor ring */
@@ -2053,6 +2054,28 @@ void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
}
+#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
+static unsigned int e1000_frag_len(const struct e1000_adapter *a)
+{
+ return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+}
+
+static void *e1000_alloc_frag(const struct e1000_adapter *a)
+{
+ unsigned int len = e1000_frag_len(a);
+ u8 *data = netdev_alloc_frag(len);
+
+ if (likely(data))
+ data += E1000_HEADROOM;
+ return data;
+}
+
+static void e1000_free_frag(const void *data)
+{
+ put_page(virt_to_head_page(data));
+}
+
/**
* e1000_clean_rx_ring - Free Rx Buffers per Queue
* @adapter: board private structure
@@ -2062,44 +2085,42 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring)
{
struct e1000_hw *hw = &adapter->hw;
- struct e1000_buffer *buffer_info;
+ struct e1000_rx_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
unsigned long size;
unsigned int i;
- /* Free all the Rx ring sk_buffs */
+ /* Free all the Rx netfrags */
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
- if (buffer_info->dma &&
- adapter->clean_rx == e1000_clean_rx_irq) {
- dma_unmap_single(&pdev->dev, buffer_info->dma,
- buffer_info->length,
- DMA_FROM_DEVICE);
- } else if (buffer_info->dma &&
- adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
- dma_unmap_page(&pdev->dev, buffer_info->dma,
- buffer_info->length,
- DMA_FROM_DEVICE);
+ if (adapter->clean_rx == e1000_clean_rx_irq) {
+ if (buffer_info->dma)
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ if (buffer_info->rxbuf.data) {
+ e1000_free_frag(buffer_info->rxbuf.data);
+ buffer_info->rxbuf.data = NULL;
+ }
+ } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
+ if (buffer_info->dma)
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ adapter->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ if (buffer_info->rxbuf.page) {
+ put_page(buffer_info->rxbuf.page);
+ buffer_info->rxbuf.page = NULL;
+ }
}
buffer_info->dma = 0;
- if (buffer_info->page) {
- put_page(buffer_info->page);
- buffer_info->page = NULL;
- }
- if (buffer_info->skb) {
- dev_kfree_skb(buffer_info->skb);
- buffer_info->skb = NULL;
- }
}
/* there also may be some cached data from a chained receive */
- if (rx_ring->rx_skb_top) {
- dev_kfree_skb(rx_ring->rx_skb_top);
- rx_ring->rx_skb_top = NULL;
- }
+ napi_free_frags(&adapter->napi);
+ rx_ring->rx_skb_top = NULL;
- size = sizeof(struct e1000_buffer) * rx_ring->count;
+ size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
memset(rx_ring->buffer_info, 0, size);
/* Zero out the descriptor ring */
@@ -2678,7 +2699,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
__be16 protocol)
{
struct e1000_context_desc *context_desc;
- struct e1000_buffer *buffer_info;
+ struct e1000_tx_buffer *buffer_info;
unsigned int i;
u32 cmd_length = 0;
u16 ipcse = 0, tucse, mss;
@@ -2750,7 +2771,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
__be16 protocol)
{
struct e1000_context_desc *context_desc;
- struct e1000_buffer *buffer_info;
+ struct e1000_tx_buffer *buffer_info;
unsigned int i;
u8 css;
u32 cmd_len = E1000_TXD_CMD_DEXT;
@@ -2809,7 +2830,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
{
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- struct e1000_buffer *buffer_info;
+ struct e1000_tx_buffer *buffer_info;
unsigned int len = skb_headlen(skb);
unsigned int offset = 0, size, count = 0, i;
unsigned int f, bytecount, segs;
@@ -2955,7 +2976,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_tx_desc *tx_desc = NULL;
- struct e1000_buffer *buffer_info;
+ struct e1000_tx_buffer *buffer_info;
u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
unsigned int i;
@@ -3373,7 +3394,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
- struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
+ struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
struct my_u { __le64 a; __le64 b; };
struct my_u *u = (struct my_u *)tx_desc;
const char *type;
@@ -3415,7 +3436,7 @@ rx_ring_summary:
for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
- struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
+ struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
struct my_u { __le64 a; __le64 b; };
struct my_u *u = (struct my_u *)rx_desc;
const char *type;
@@ -3429,7 +3450,7 @@ rx_ring_summary:
pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
i, le64_to_cpu(u->a), le64_to_cpu(u->b),
- (u64)buffer_info->dma, buffer_info->skb, type);
+ (u64)buffer_info->dma, buffer_info->rxbuf.data, type);
} /* for */
/* dump the descriptor caches */
@@ -3811,7 +3832,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct e1000_tx_desc *tx_desc, *eop_desc;
- struct e1000_buffer *buffer_info;
+ struct e1000_tx_buffer *buffer_info;
unsigned int i, eop;
unsigned int count = 0;
unsigned int total_tx_bytes=0, total_tx_packets=0;
@@ -3949,12 +3970,12 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
}
/**
- * e1000_consume_page - helper function
+ * e1000_consume_page - helper function for jumbo Rx path
**/
-static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
+static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
u16 length)
{
- bi->page = NULL;
+ bi->rxbuf.page = NULL;
skb->len += length;
skb->data_len += length;
skb->truesize += PAGE_SIZE;
@@ -3981,6 +4002,113 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
}
/**
+ * e1000_tbi_adjust_stats
+ * @hw: Struct containing variables accessed by shared code
+ * @frame_len: The length of the frame in question
+ * @mac_addr: The Ethernet destination address of the frame in question
+ *
+ * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
+ */
+static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
+ struct e1000_hw_stats *stats,
+ u32 frame_len, const u8 *mac_addr)
+{
+ u64 carry_bit;
+
+ /* First adjust the frame length. */
+ frame_len--;
+ /* We need to adjust the statistics counters, since the hardware
+ * counters overcount this packet as a CRC error and undercount
+ * the packet as a good packet
+ */
+ /* This packet should not be counted as a CRC error. */
+ stats->crcerrs--;
+ /* This packet does count as a Good Packet Received. */
+ stats->gprc++;
+
+ /* Adjust the Good Octets received counters */
+ carry_bit = 0x80000000 & stats->gorcl;
+ stats->gorcl += frame_len;
+ /* If the high bit of Gorcl (the low 32 bits of the Good Octets
+ * Received Count) was one before the addition,
+ * AND it is zero after, then we lost the carry out,
+ * need to add one to Gorch (Good Octets Received Count High).
+ * This could be simplified if all environments supported
+ * 64-bit integers.
+ */
+ if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
+ stats->gorch++;
+ /* Is this a broadcast or multicast? Check broadcast first,
+ * since the test for a multicast frame will test positive on
+ * a broadcast frame.
+ */
+ if (is_broadcast_ether_addr(mac_addr))
+ stats->bprc++;
+ else if (is_multicast_ether_addr(mac_addr))
+ stats->mprc++;
+
+ if (frame_len == hw->max_frame_size) {
+ /* In this case, the hardware has overcounted the number of
+ * oversize frames.
+ */
+ if (stats->roc > 0)
+ stats->roc--;
+ }
+
+ /* Adjust the bin counters when the extra byte put the frame in the
+ * wrong bin. Remember that the frame_len was adjusted above.
+ */
+ if (frame_len == 64) {
+ stats->prc64++;
+ stats->prc127--;
+ } else if (frame_len == 127) {
+ stats->prc127++;
+ stats->prc255--;
+ } else if (frame_len == 255) {
+ stats->prc255++;
+ stats->prc511--;
+ } else if (frame_len == 511) {
+ stats->prc511++;
+ stats->prc1023--;
+ } else if (frame_len == 1023) {
+ stats->prc1023++;
+ stats->prc1522--;
+ } else if (frame_len == 1522) {
+ stats->prc1522++;
+ }
+}
+
+static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
+ u8 status, u8 errors,
+ u32 length, const u8 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u8 last_byte = *(data + length - 1);
+
+ if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&adapter->stats_lock, irq_flags);
+ e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
+ spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
+
+ return true;
+ }
+
+ return false;
+}
+
+static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
+ unsigned int bufsz)
+{
+ struct sk_buff *skb = netdev_alloc_skb_ip_align(adapter->netdev, bufsz);
+
+ if (unlikely(!skb))
+ adapter->alloc_rx_buff_failed++;
+ return skb;
+}
+
+/**
* e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
* @adapter: board private structure
* @rx_ring: ring to clean
@@ -3994,12 +4122,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do)
{
- struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct e1000_rx_desc *rx_desc, *next_rxd;
- struct e1000_buffer *buffer_info, *next_buffer;
- unsigned long irq_flags;
+ struct e1000_rx_buffer *buffer_info, *next_buffer;
u32 length;
unsigned int i;
int cleaned_count = 0;
@@ -4020,8 +4146,6 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
rmb(); /* read descriptor and rx_buffer_info after status DD */
status = rx_desc->status;
- skb = buffer_info->skb;
- buffer_info->skb = NULL;
if (++i == rx_ring->count) i = 0;
next_rxd = E1000_RX_DESC(*rx_ring, i);
@@ -4032,7 +4156,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
cleaned = true;
cleaned_count++;
dma_unmap_page(&pdev->dev, buffer_info->dma,
- buffer_info->length, DMA_FROM_DEVICE);
+ adapter->rx_buffer_len, DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -4040,25 +4164,15 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* errors is only valid for DD + EOP descriptors */
if (unlikely((status & E1000_RXD_STAT_EOP) &&
(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
- u8 *mapped;
- u8 last_byte;
-
- mapped = page_address(buffer_info->page);
- last_byte = *(mapped + length - 1);
- if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
- last_byte)) {
- spin_lock_irqsave(&adapter->stats_lock,
- irq_flags);
- e1000_tbi_adjust_stats(hw, &adapter->stats,
- length, mapped);
- spin_unlock_irqrestore(&adapter->stats_lock,
- irq_flags);
+ u8 *mapped = page_address(buffer_info->rxbuf.page);
+
+ if (e1000_tbi_should_accept(adapter, status,
+ rx_desc->errors,
+ length, mapped)) {
length--;
+ } else if (netdev->features & NETIF_F_RXALL) {
+ goto process_skb;
} else {
- if (netdev->features & NETIF_F_RXALL)
- goto process_skb;
- /* recycle both page and skb */
- buffer_info->skb = skb;
/* an error means any chain goes out the window
* too
*/
@@ -4075,16 +4189,18 @@ process_skb:
/* this descriptor is only the beginning (or middle) */
if (!rxtop) {
/* this is the beginning of a chain */
- rxtop = skb;
- skb_fill_page_desc(rxtop, 0, buffer_info->page,
+ rxtop = napi_get_frags(&adapter->napi);
+ if (!rxtop)
+ break;
+
+ skb_fill_page_desc(rxtop, 0,
+ buffer_info->rxbuf.page,
0, length);
} else {
/* this is the middle of a chain */
skb_fill_page_desc(rxtop,
skb_shinfo(rxtop)->nr_frags,
- buffer_info->page, 0, length);
- /* re-use the skb, only consumed the page */
- buffer_info->skb = skb;
+ buffer_info->rxbuf.page, 0, length);
}
e1000_consume_page(buffer_info, rxtop, length);
goto next_desc;
@@ -4093,32 +4209,51 @@ process_skb:
/* end of the chain */
skb_fill_page_desc(rxtop,
skb_shinfo(rxtop)->nr_frags,
- buffer_info->page, 0, length);
- /* re-use the current skb, we only consumed the
- * page
- */
- buffer_info->skb = skb;
+ buffer_info->rxbuf.page, 0, length);
skb = rxtop;
rxtop = NULL;
e1000_consume_page(buffer_info, skb, length);
} else {
+ struct page *p;
/* no chain, got EOP, this buf is the packet
* copybreak to save the put_page/alloc_page
*/
- if (length <= copybreak &&
- skb_tailroom(skb) >= length) {
+ p = buffer_info->rxbuf.page;
+ if (length <= copybreak) {
u8 *vaddr;
- vaddr = kmap_atomic(buffer_info->page);
+
+ if (likely(!(netdev->features & NETIF_F_RXFCS)))
+ length -= 4;
+ skb = e1000_alloc_rx_skb(adapter,
+ length);
+ if (!skb)
+ break;
+
+ vaddr = kmap_atomic(p);
memcpy(skb_tail_pointer(skb), vaddr,
length);
kunmap_atomic(vaddr);
/* re-use the page, so don't erase
- * buffer_info->page
+ * buffer_info->rxbuf.page
*/
skb_put(skb, length);
+ e1000_rx_checksum(adapter,
+ status | rx_desc->errors << 24,
+ le16_to_cpu(rx_desc->csum), skb);
+
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ e1000_receive_skb(adapter, status,
+ rx_desc->special, skb);
+ goto next_desc;
} else {
- skb_fill_page_desc(skb, 0,
- buffer_info->page, 0,
+ skb = napi_get_frags(&adapter->napi);
+ if (!skb) {
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+ skb_fill_page_desc(skb, 0, p, 0,
length);
e1000_consume_page(buffer_info, skb,
length);
@@ -4137,14 +4272,14 @@ process_skb:
pskb_trim(skb, skb->len - 4);
total_rx_packets++;
- /* eth type trans needs skb->data to point to something */
- if (!pskb_may_pull(skb, ETH_HLEN)) {
- e_err(drv, "pskb_may_pull failed.\n");
- dev_kfree_skb(skb);
- goto next_desc;
+ if (status & E1000_RXD_STAT_VP) {
+ __le16 vlan = rx_desc->special;
+ u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
- e1000_receive_skb(adapter, status, rx_desc->special, skb);
+ napi_gro_frags(&adapter->napi);
next_desc:
rx_desc->status = 0;
@@ -4175,25 +4310,25 @@ next_desc:
/* this should improve performance for small packets with large amounts
* of reassembly being done in the stack
*/
-static void e1000_check_copybreak(struct net_device *netdev,
- struct e1000_buffer *buffer_info,
- u32 length, struct sk_buff **skb)
+static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
+ struct e1000_rx_buffer *buffer_info,
+ u32 length, const void *data)
{
- struct sk_buff *new_skb;
+ struct sk_buff *skb;
if (length > copybreak)
- return;
+ return NULL;
- new_skb = netdev_alloc_skb_ip_align(netdev, length);
- if (!new_skb)
- return;
+ skb = e1000_alloc_rx_skb(adapter, length);
+ if (!skb)
+ return NULL;
+
+ dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
+ length, DMA_FROM_DEVICE);
- skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
- (*skb)->data - NET_IP_ALIGN,
- length + NET_IP_ALIGN);
- /* save the skb in buffer_info as good */
- buffer_info->skb = *skb;
- *skb = new_skb;
+ memcpy(skb_put(skb, length), data, length);
+
+ return skb;
}
/**
@@ -4207,12 +4342,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do)
{
- struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct e1000_rx_desc *rx_desc, *next_rxd;
- struct e1000_buffer *buffer_info, *next_buffer;
- unsigned long flags;
+ struct e1000_rx_buffer *buffer_info, *next_buffer;
u32 length;
unsigned int i;
int cleaned_count = 0;
@@ -4225,6 +4358,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
while (rx_desc->status & E1000_RXD_STAT_DD) {
struct sk_buff *skb;
+ u8 *data;
u8 status;
if (*work_done >= work_to_do)
@@ -4233,10 +4367,27 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
rmb(); /* read descriptor and rx_buffer_info after status DD */
status = rx_desc->status;
- skb = buffer_info->skb;
- buffer_info->skb = NULL;
+ length = le16_to_cpu(rx_desc->length);
+
+ data = buffer_info->rxbuf.data;
+ prefetch(data);
+ skb = e1000_copybreak(adapter, buffer_info, length, data);
+ if (!skb) {
+ unsigned int frag_len = e1000_frag_len(adapter);
+
+ skb = build_skb(data - E1000_HEADROOM, frag_len);
+ if (!skb) {
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
- prefetch(skb->data - NET_IP_ALIGN);
+ skb_reserve(skb, E1000_HEADROOM);
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+ buffer_info->rxbuf.data = NULL;
+ }
if (++i == rx_ring->count) i = 0;
next_rxd = E1000_RX_DESC(*rx_ring, i);
@@ -4246,11 +4397,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
cleaned = true;
cleaned_count++;
- dma_unmap_single(&pdev->dev, buffer_info->dma,
- buffer_info->length, DMA_FROM_DEVICE);
- buffer_info->dma = 0;
- length = le16_to_cpu(rx_desc->length);
/* !EOP means multiple descriptors were used to store a single
* packet, if thats the case we need to toss it. In fact, we
* to toss every packet with the EOP bit clear and the next
@@ -4262,29 +4409,22 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
if (adapter->discarding) {
/* All receives must fit into a single buffer */
- e_dbg("Receive packet consumed multiple buffers\n");
- /* recycle */
- buffer_info->skb = skb;
+ netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
+ dev_kfree_skb(skb);
if (status & E1000_RXD_STAT_EOP)
adapter->discarding = false;
goto next_desc;
}
if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
- u8 last_byte = *(skb->data + length - 1);
- if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
- last_byte)) {
- spin_lock_irqsave(&adapter->stats_lock, flags);
- e1000_tbi_adjust_stats(hw, &adapter->stats,
- length, skb->data);
- spin_unlock_irqrestore(&adapter->stats_lock,
- flags);
+ if (e1000_tbi_should_accept(adapter, status,
+ rx_desc->errors,
+ length, data)) {
length--;
+ } else if (netdev->features & NETIF_F_RXALL) {
+ goto process_skb;
} else {
- if (netdev->features & NETIF_F_RXALL)
- goto process_skb;
- /* recycle */
- buffer_info->skb = skb;
+ dev_kfree_skb(skb);
goto next_desc;
}
}
@@ -4299,9 +4439,10 @@ process_skb:
*/
length -= 4;
- e1000_check_copybreak(netdev, buffer_info, length, &skb);
-
- skb_put(skb, length);
+ if (buffer_info->rxbuf.data == NULL)
+ skb_put(skb, length);
+ else /* copybreak skb */
+ skb_trim(skb, length);
/* Receive Checksum Offload */
e1000_rx_checksum(adapter,
@@ -4347,38 +4488,19 @@ static void
e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring, int cleaned_count)
{
- struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct e1000_rx_desc *rx_desc;
- struct e1000_buffer *buffer_info;
- struct sk_buff *skb;
+ struct e1000_rx_buffer *buffer_info;
unsigned int i;
- unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
i = rx_ring->next_to_use;
buffer_info = &rx_ring->buffer_info[i];
while (cleaned_count--) {
- skb = buffer_info->skb;
- if (skb) {
- skb_trim(skb, 0);
- goto check_page;
- }
-
- skb = netdev_alloc_skb_ip_align(netdev, bufsz);
- if (unlikely(!skb)) {
- /* Better luck next round */
- adapter->alloc_rx_buff_failed++;
- break;
- }
-
- buffer_info->skb = skb;
- buffer_info->length = adapter->rx_buffer_len;
-check_page:
/* allocate a new page if necessary */
- if (!buffer_info->page) {
- buffer_info->page = alloc_page(GFP_ATOMIC);
- if (unlikely(!buffer_info->page)) {
+ if (!buffer_info->rxbuf.page) {
+ buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
+ if (unlikely(!buffer_info->rxbuf.page)) {
adapter->alloc_rx_buff_failed++;
break;
}
@@ -4386,17 +4508,15 @@ check_page:
if (!buffer_info->dma) {
buffer_info->dma = dma_map_page(&pdev->dev,
- buffer_info->page, 0,
- buffer_info->length,
+ buffer_info->rxbuf.page, 0,
+ adapter->rx_buffer_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
- put_page(buffer_info->page);
- dev_kfree_skb(skb);
- buffer_info->page = NULL;
- buffer_info->skb = NULL;
+ put_page(buffer_info->rxbuf.page);
+ buffer_info->rxbuf.page = NULL;
buffer_info->dma = 0;
adapter->alloc_rx_buff_failed++;
- break; /* while !buffer_info->skb */
+ break;
}
}
@@ -4432,11 +4552,9 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
int cleaned_count)
{
struct e1000_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct e1000_rx_desc *rx_desc;
- struct e1000_buffer *buffer_info;
- struct sk_buff *skb;
+ struct e1000_rx_buffer *buffer_info;
unsigned int i;
unsigned int bufsz = adapter->rx_buffer_len;
@@ -4444,57 +4562,52 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
buffer_info = &rx_ring->buffer_info[i];
while (cleaned_count--) {
- skb = buffer_info->skb;
- if (skb) {
- skb_trim(skb, 0);
- goto map_skb;
- }
+ void *data;
- skb = netdev_alloc_skb_ip_align(netdev, bufsz);
- if (unlikely(!skb)) {
+ if (buffer_info->rxbuf.data)
+ goto skip;
+
+ data = e1000_alloc_frag(adapter);
+ if (!data) {
/* Better luck next round */
adapter->alloc_rx_buff_failed++;
break;
}
/* Fix for errata 23, can't cross 64kB boundary */
- if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
- struct sk_buff *oldskb = skb;
+ if (!e1000_check_64k_bound(adapter, data, bufsz)) {
+ void *olddata = data;
e_err(rx_err, "skb align check failed: %u bytes at "
- "%p\n", bufsz, skb->data);
+ "%p\n", bufsz, data);
/* Try again, without freeing the previous */
- skb = netdev_alloc_skb_ip_align(netdev, bufsz);
+ data = e1000_alloc_frag(adapter);
/* Failed allocation, critical failure */
- if (!skb) {
- dev_kfree_skb(oldskb);
+ if (!data) {
+ e1000_free_frag(olddata);
adapter->alloc_rx_buff_failed++;
break;
}
- if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+ if (!e1000_check_64k_bound(adapter, data, bufsz)) {
/* give up */
- dev_kfree_skb(skb);
- dev_kfree_skb(oldskb);
+ e1000_free_frag(data);
+ e1000_free_frag(olddata);
adapter->alloc_rx_buff_failed++;
- break; /* while !buffer_info->skb */
+ break;
}
/* Use new allocation */
- dev_kfree_skb(oldskb);
+ e1000_free_frag(olddata);
}
- buffer_info->skb = skb;
- buffer_info->length = adapter->rx_buffer_len;
-map_skb:
buffer_info->dma = dma_map_single(&pdev->dev,
- skb->data,
- buffer_info->length,
+ data,
+ adapter->rx_buffer_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
- dev_kfree_skb(skb);
- buffer_info->skb = NULL;
+ e1000_free_frag(data);
buffer_info->dma = 0;
adapter->alloc_rx_buff_failed++;
- break; /* while !buffer_info->skb */
+ break;
}
/* XXX if it was allocated cleanly it will never map to a
@@ -4508,17 +4621,20 @@ map_skb:
e_err(rx_err, "dma align check failed: %u bytes at "
"%p\n", adapter->rx_buffer_len,
(void *)(unsigned long)buffer_info->dma);
- dev_kfree_skb(skb);
- buffer_info->skb = NULL;
dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
+
+ e1000_free_frag(data);
+ buffer_info->rxbuf.data = NULL;
buffer_info->dma = 0;
adapter->alloc_rx_buff_failed++;
- break; /* while !buffer_info->skb */
+ break;
}
+ buffer_info->rxbuf.data = data;
+ skip:
rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 801da392a20e..f1e33f896439 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -144,6 +144,8 @@ enum i40e_state_t {
__I40E_PTP_TX_IN_PROGRESS,
__I40E_BAD_EEPROM,
__I40E_DOWN_REQUESTED,
+ __I40E_FD_FLUSH_REQUESTED,
+ __I40E_RESET_FAILED,
};
enum i40e_interrupt_policy {
@@ -250,6 +252,11 @@ struct i40e_pf {
u16 fdir_pf_active_filters;
u16 fd_sb_cnt_idx;
u16 fd_atr_cnt_idx;
+ unsigned long fd_flush_timestamp;
+ u32 fd_flush_cnt;
+ u32 fd_add_err;
+ u32 fd_atr_cnt;
+ u32 fd_tcp_rule;
#ifdef CONFIG_I40E_VXLAN
__be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
@@ -310,6 +317,7 @@ struct i40e_pf {
u32 tx_timeout_count;
u32 tx_timeout_recovery_level;
unsigned long tx_timeout_last_recovery;
+ u32 tx_sluggish_count;
u32 hw_csum_rx_error;
u32 led_status;
u16 corer_count; /* Core reset count */
@@ -608,6 +616,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
int i40e_get_current_fd_count(struct i40e_pf *pf);
int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
+int i40e_get_current_atr_cnt(struct i40e_pf *pf);
bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index b29c157b1f57..72f5d25a222f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -840,7 +840,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
/* bump the tail */
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
- i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
+ buff, buff_size);
(hw->aq.asq.next_to_use)++;
if (hw->aq.asq.next_to_use == hw->aq.asq.count)
hw->aq.asq.next_to_use = 0;
@@ -891,7 +892,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: desc and buffer writeback:\n");
- i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff);
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
/* update the error if time out occurred */
if ((!cmd_completed) &&
@@ -987,7 +988,8 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
e->msg_size);
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
- i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf);
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
+ hw->aq.arq_buf_size);
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index df43e7c6777c..30056b25d94e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -75,13 +75,15 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
* @mask: debug mask
* @desc: pointer to admin queue descriptor
* @buffer: pointer to command buffer
+ * @buf_len: max length of buffer
*
* Dumps debug log about adminq command with descriptor contents.
**/
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
- void *buffer)
+ void *buffer, u16 buf_len)
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ u16 len = le16_to_cpu(aq_desc->datalen);
u8 *aq_buffer = (u8 *)buffer;
u32 data[4];
u32 i = 0;
@@ -105,7 +107,9 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
if ((buffer != NULL) && (aq_desc->datalen != 0)) {
memset(data, 0, sizeof(data));
i40e_debug(hw, mask, "AQ CMD Buffer:\n");
- for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) {
+ if (buf_len < len)
+ len = buf_len;
+ for (i = 0; i < len; i++) {
data[((i % 16) / 4)] |=
((u32)aq_buffer[i]) << (8 * (i % 4));
if ((i % 16) == 15) {
@@ -748,6 +752,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
switch (hw->phy.link_info.phy_type) {
case I40E_PHY_TYPE_10GBASE_SR:
case I40E_PHY_TYPE_10GBASE_LR:
+ case I40E_PHY_TYPE_1000BASE_SX:
+ case I40E_PHY_TYPE_1000BASE_LX:
case I40E_PHY_TYPE_40GBASE_SR4:
case I40E_PHY_TYPE_40GBASE_LR4:
media = I40E_MEDIA_TYPE_FIBER;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 5a0cabeb35ed..7067f4b9159c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1356,6 +1356,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
"emp reset count: %d\n", pf->empr_count);
dev_info(&pf->pdev->dev,
"pf reset count: %d\n", pf->pfr_count);
+ dev_info(&pf->pdev->dev,
+ "pf tx sluggish count: %d\n",
+ pf->tx_sluggish_count);
} else if (strncmp(&cmd_buf[5], "port", 4) == 0) {
struct i40e_aqc_query_port_ets_config_resp *bw_data;
struct i40e_dcbx_config *cfg =
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index e8ba7470700a..1dda467ae1ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -145,6 +145,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_jabber", stats.rx_jabber),
I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+ I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
@@ -312,7 +313,10 @@ static int i40e_get_settings(struct net_device *netdev,
break;
case I40E_PHY_TYPE_10GBASE_SR:
case I40E_PHY_TYPE_10GBASE_LR:
+ case I40E_PHY_TYPE_1000BASE_SX:
+ case I40E_PHY_TYPE_1000BASE_LX:
ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->supported |= SUPPORTED_1000baseT_Full;
break;
case I40E_PHY_TYPE_10GBASE_CR1_CU:
case I40E_PHY_TYPE_10GBASE_CR1:
@@ -351,7 +355,8 @@ static int i40e_get_settings(struct net_device *netdev,
break;
default:
/* if we got here and link is up something bad is afoot */
- WARN_ON(link_up);
+ netdev_info(netdev, "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
+ hw_link_info->phy_type);
}
no_valid_phy_type:
@@ -461,7 +466,8 @@ static int i40e_set_settings(struct net_device *netdev,
if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
- hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE)
+ hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE &&
+ hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
return -EOPNOTSUPP;
/* get our own copy of the bits to check against */
@@ -492,11 +498,10 @@ static int i40e_set_settings(struct net_device *netdev,
if (status)
return -EAGAIN;
- /* Copy link_speed and abilities to config in case they are not
+ /* Copy abilities to config in case autoneg is not
* set below
*/
memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
- config.link_speed = abilities.link_speed;
config.abilities = abilities.abilities;
/* Check autoneg */
@@ -533,42 +538,38 @@ static int i40e_set_settings(struct net_device *netdev,
return -EINVAL;
if (advertise & ADVERTISED_100baseT_Full)
- if (!(abilities.link_speed & I40E_LINK_SPEED_100MB)) {
- config.link_speed |= I40E_LINK_SPEED_100MB;
- change = true;
- }
+ config.link_speed |= I40E_LINK_SPEED_100MB;
if (advertise & ADVERTISED_1000baseT_Full ||
advertise & ADVERTISED_1000baseKX_Full)
- if (!(abilities.link_speed & I40E_LINK_SPEED_1GB)) {
- config.link_speed |= I40E_LINK_SPEED_1GB;
- change = true;
- }
+ config.link_speed |= I40E_LINK_SPEED_1GB;
if (advertise & ADVERTISED_10000baseT_Full ||
advertise & ADVERTISED_10000baseKX4_Full ||
advertise & ADVERTISED_10000baseKR_Full)
- if (!(abilities.link_speed & I40E_LINK_SPEED_10GB)) {
- config.link_speed |= I40E_LINK_SPEED_10GB;
- change = true;
- }
+ config.link_speed |= I40E_LINK_SPEED_10GB;
if (advertise & ADVERTISED_40000baseKR4_Full ||
advertise & ADVERTISED_40000baseCR4_Full ||
advertise & ADVERTISED_40000baseSR4_Full ||
advertise & ADVERTISED_40000baseLR4_Full)
- if (!(abilities.link_speed & I40E_LINK_SPEED_40GB)) {
- config.link_speed |= I40E_LINK_SPEED_40GB;
- change = true;
- }
+ config.link_speed |= I40E_LINK_SPEED_40GB;
- if (change) {
+ if (change || (abilities.link_speed != config.link_speed)) {
/* copy over the rest of the abilities */
config.phy_type = abilities.phy_type;
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
- /* If link is up set link and an so changes take effect */
- if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
- config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ /* set link and auto negotiation so changes take effect */
+ config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ /* If link is up put link down */
+ if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) {
+ /* Tell the OS link is going down, the link will go
+ * back up when fw says it is ready asynchronously
+ */
+ netdev_info(netdev, "PHY settings change requested, NIC Link is going down.\n");
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ }
/* make the aq call */
status = i40e_aq_set_phy_config(hw, &config, NULL);
@@ -685,6 +686,13 @@ static int i40e_set_pauseparam(struct net_device *netdev,
else
return -EINVAL;
+ /* Tell the OS link is going down, the link will go back up when fw
+ * says it is ready asynchronously
+ */
+ netdev_info(netdev, "Flow control settings change requested, NIC Link is going down.\n");
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
/* Set the fc mode and only restart an if link is up*/
status = i40e_set_fc(hw, &aq_failures, link_up);
@@ -1977,6 +1985,13 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
struct i40e_pf *pf = vsi->back;
int ret = 0;
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
+ return -EBUSY;
+
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+ return -EBUSY;
+
ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
i40e_fdir_check_and_reenable(pf);
@@ -2010,6 +2025,13 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
return -ENOSPC;
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
+ return -EBUSY;
+
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+ return -EBUSY;
+
fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index eddec6ba095b..ed5f1c15fb0f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -37,9 +37,9 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
-#define DRV_VERSION_MAJOR 0
-#define DRV_VERSION_MINOR 4
-#define DRV_VERSION_BUILD 21
+#define DRV_VERSION_MAJOR 1
+#define DRV_VERSION_MINOR 0
+#define DRV_VERSION_BUILD 11
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -1239,8 +1239,11 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
* i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
* @vsi: the PF Main VSI - inappropriate for any other VSI
* @macaddr: the MAC address
+ *
+ * Some older firmware configurations set up a default promiscuous VLAN
+ * filter that needs to be removed.
**/
-static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
+static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
{
struct i40e_aqc_remove_macvlan_element_data element;
struct i40e_pf *pf = vsi->back;
@@ -1248,15 +1251,18 @@ static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
/* Only appropriate for the PF main VSI */
if (vsi->type != I40E_VSI_MAIN)
- return;
+ return -EINVAL;
+ memset(&element, 0, sizeof(element));
ether_addr_copy(element.mac_addr, macaddr);
element.vlan_tag = 0;
element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
if (aq_ret)
- dev_err(&pf->pdev->dev, "Could not remove default MAC-VLAN\n");
+ return -ENOENT;
+
+ return 0;
}
/**
@@ -1385,18 +1391,30 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
struct sockaddr *addr = p;
struct i40e_mac_filter *f;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- netdev_info(netdev, "set mac address=%pM\n", addr->sa_data);
+ if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
+ netdev_info(netdev, "already using mac address %pM\n",
+ addr->sa_data);
+ return 0;
+ }
if (test_bit(__I40E_DOWN, &vsi->back->state) ||
test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
return -EADDRNOTAVAIL;
+ if (ether_addr_equal(hw->mac.addr, addr->sa_data))
+ netdev_info(netdev, "returning to hw mac address %pM\n",
+ hw->mac.addr);
+ else
+ netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
+
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
ret = i40e_aq_mac_address_write(&vsi->back->hw,
@@ -1410,25 +1428,34 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
}
}
- f = i40e_find_mac(vsi, addr->sa_data, false, true);
- if (!f) {
- /* In order to be sure to not drop any packets, add the
- * new address first then delete the old one.
- */
- f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
- false, false);
- if (!f)
- return -ENOMEM;
+ if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
+ struct i40e_aqc_remove_macvlan_element_data element;
- i40e_sync_vsi_filters(vsi);
+ memset(&element, 0, sizeof(element));
+ ether_addr_copy(element.mac_addr, netdev->dev_addr);
+ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+ } else {
i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
false, false);
- i40e_sync_vsi_filters(vsi);
}
- f->is_laa = true;
- if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
+ struct i40e_aqc_add_macvlan_element_data element;
+
+ memset(&element, 0, sizeof(element));
+ ether_addr_copy(element.mac_addr, hw->mac.addr);
+ element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
+ i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+ } else {
+ f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
+ false, false);
+ if (f)
+ f->is_laa = true;
+ }
+
+ i40e_sync_vsi_filters(vsi);
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
return 0;
}
@@ -1796,9 +1823,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
kfree(add_list);
add_list = NULL;
- if (add_happened && (!aq_ret)) {
- /* do nothing */;
- } else if (add_happened && (aq_ret)) {
+ if (add_happened && aq_ret &&
+ pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) {
dev_info(&pf->pdev->dev,
"add filter failed, err %d, aq_err %d\n",
aq_ret, pf->hw.aq.asq_last_status);
@@ -4480,11 +4506,26 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
netif_carrier_on(vsi->netdev);
} else if (vsi->netdev) {
i40e_print_link_message(vsi, false);
+ /* need to check for qualified module here*/
+ if ((pf->hw.phy.link_info.link_info &
+ I40E_AQ_MEDIA_AVAILABLE) &&
+ (!(pf->hw.phy.link_info.an_info &
+ I40E_AQ_QUALIFIED_MODULE)))
+ netdev_err(vsi->netdev,
+ "the driver failed to link because an unqualified module was detected.");
}
/* replay FDIR SB filters */
- if (vsi->type == I40E_VSI_FDIR)
+ if (vsi->type == I40E_VSI_FDIR) {
+ /* reset fd counters */
+ pf->fd_add_err = pf->fd_atr_cnt = 0;
+ if (pf->fd_tcp_rule > 0) {
+ pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
+ pf->fd_tcp_rule = 0;
+ }
i40e_fdir_filter_restore(vsi);
+ }
i40e_service_event_schedule(pf);
return 0;
@@ -5125,6 +5166,7 @@ int i40e_get_current_fd_count(struct i40e_pf *pf)
I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
return fcnt_prog;
}
+
/**
* i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
* @pf: board private structure
@@ -5133,15 +5175,17 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
{
u32 fcnt_prog, fcnt_avail;
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+ return;
+
/* Check if, FD SB or ATR was auto disabled and if there is enough room
* to re-enable
*/
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->flags & I40E_FLAG_FD_SB_ENABLED))
- return;
fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
fcnt_avail = pf->fdir_pf_filter_count;
- if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
+ if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
+ (pf->fd_add_err == 0) ||
+ (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
@@ -5158,23 +5202,84 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
}
}
+#define I40E_MIN_FD_FLUSH_INTERVAL 10
+/**
+ * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
+ * @pf: board private structure
+ **/
+static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
+{
+ int flush_wait_retry = 50;
+ int reg;
+
+ if (time_after(jiffies, pf->fd_flush_timestamp +
+ (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
+ set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+ pf->fd_flush_timestamp = jiffies;
+ pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED;
+ pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ /* flush all filters */
+ wr32(&pf->hw, I40E_PFQF_CTL_1,
+ I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
+ i40e_flush(&pf->hw);
+ pf->fd_flush_cnt++;
+ pf->fd_add_err = 0;
+ do {
+ /* Check FD flush status every 5-6msec */
+ usleep_range(5000, 6000);
+ reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
+ if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
+ break;
+ } while (flush_wait_retry--);
+ if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
+ dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
+ } else {
+ /* replay sideband filters */
+ i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
+
+ pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+ pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+ dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
+ }
+ }
+}
+
+/**
+ * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
+ * @pf: board private structure
+ **/
+int i40e_get_current_atr_cnt(struct i40e_pf *pf)
+{
+ return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
+}
+
+/* We can see up to 256 filter programming desc in transit if the filters are
+ * being applied really fast; before we see the first
+ * filter miss error on Rx queue 0. Accumulating enough error messages before
+ * reacting will make sure we don't cause flush too often.
+ */
+#define I40E_MAX_FD_PROGRAM_ERROR 256
+
/**
* i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
* @pf: board private structure
**/
static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
{
- if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
- return;
/* if interface is down do nothing */
if (test_bit(__I40E_DOWN, &pf->state))
return;
+
+ if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) &&
+ (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) &&
+ (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count))
+ i40e_fdir_flush_and_replay(pf);
+
i40e_fdir_check_and_reenable(pf);
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->flags & I40E_FLAG_FD_SB_ENABLED))
- pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
}
/**
@@ -5184,7 +5289,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
**/
static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
{
- if (!vsi)
+ if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
return;
switch (vsi->type) {
@@ -5420,6 +5525,13 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
memcpy(&pf->hw.phy.link_info_old, hw_link_info,
sizeof(pf->hw.phy.link_info_old));
+ /* check for unqualified module, if link is down */
+ if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+ (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
+ (!(status->link_info & I40E_AQ_LINK_UP)))
+ dev_err(&pf->pdev->dev,
+ "The driver failed to link because an unqualified module was detected.\n");
+
/* update link status */
hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
@@ -5456,6 +5568,10 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
u32 oldval;
u32 val;
+ /* Do not run clean AQ when PF reset fails */
+ if (test_bit(__I40E_RESET_FAILED, &pf->state))
+ return;
+
/* check for error indications */
val = rd32(&pf->hw, pf->hw.aq.arq.len);
oldval = val;
@@ -5861,19 +5977,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
ret = i40e_pf_reset(hw);
if (ret) {
dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
- goto end_core_reset;
+ set_bit(__I40E_RESET_FAILED, &pf->state);
+ goto clear_recovery;
}
pf->pfr_count++;
if (test_bit(__I40E_DOWN, &pf->state))
- goto end_core_reset;
+ goto clear_recovery;
dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
ret = i40e_init_adminq(&pf->hw);
if (ret) {
dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
- goto end_core_reset;
+ goto clear_recovery;
}
/* re-verify the eeprom if we just had an EMP reset */
@@ -5991,6 +6108,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
i40e_send_version(pf);
end_core_reset:
+ clear_bit(__I40E_RESET_FAILED, &pf->state);
+clear_recovery:
clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
}
@@ -6036,9 +6155,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
I40E_GL_MDET_TX_EVENT_SHIFT;
u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
I40E_GL_MDET_TX_QUEUE_SHIFT;
- dev_info(&pf->pdev->dev,
- "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n",
- event, queue, pf_num, vf_num);
+ if (netif_msg_tx_err(pf))
+ dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n",
+ event, queue, pf_num, vf_num);
wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
mdd_detected = true;
}
@@ -6050,9 +6169,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
I40E_GL_MDET_RX_EVENT_SHIFT;
u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
I40E_GL_MDET_RX_QUEUE_SHIFT;
- dev_info(&pf->pdev->dev,
- "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
- event, queue, func);
+ if (netif_msg_rx_err(pf))
+ dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
+ event, queue, func);
wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
mdd_detected = true;
}
@@ -6061,17 +6180,13 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
reg = rd32(hw, I40E_PF_MDET_TX);
if (reg & I40E_PF_MDET_TX_VALID_MASK) {
wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
- dev_info(&pf->pdev->dev,
- "MDD TX event is for this function 0x%08x, requesting PF reset.\n",
- reg);
+ dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
pf_mdd_detected = true;
}
reg = rd32(hw, I40E_PF_MDET_RX);
if (reg & I40E_PF_MDET_RX_VALID_MASK) {
wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
- dev_info(&pf->pdev->dev,
- "MDD RX event is for this function 0x%08x, requesting PF reset.\n",
- reg);
+ dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
pf_mdd_detected = true;
}
/* Queue belongs to the PF, initiate a reset */
@@ -6088,14 +6203,16 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
if (reg & I40E_VP_MDET_TX_VALID_MASK) {
wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
vf->num_mdd_events++;
- dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i);
+ dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ i);
}
reg = rd32(hw, I40E_VP_MDET_RX(i));
if (reg & I40E_VP_MDET_RX_VALID_MASK) {
wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
vf->num_mdd_events++;
- dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i);
+ dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
+ i);
}
if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
@@ -7086,6 +7203,11 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
}
pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ /* reset fd counters */
+ pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
+ pf->fdir_pf_active_filters = 0;
+ pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+ dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
/* if ATR was auto disabled it can be re-enabled. */
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
@@ -7352,7 +7474,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_vf_rate = i40e_ndo_set_vf_bw,
.ndo_get_vf_config = i40e_ndo_get_vf_config,
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
- .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofck,
+ .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
#ifdef CONFIG_I40E_VXLAN
.ndo_add_vxlan_port = i40e_add_vxlan_port,
.ndo_del_vxlan_port = i40e_del_vxlan_port,
@@ -7421,14 +7543,14 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
ether_addr_copy(mac_addr, hw->mac.perm_addr);
- /* The following two steps are necessary to prevent reception
- * of tagged packets - by default the NVM loads a MAC-VLAN
- * filter that will accept any tagged packet. This is to
- * prevent that during normal operations until a specific
- * VLAN tag filter has been set.
+ /* The following steps are necessary to prevent reception
+ * of tagged packets - some older NVM configurations load a
+ * default a MAC-VLAN filter that accepts any tagged packet
+ * which must be replaced by a normal filter.
*/
- i40e_rm_default_mac_filter(vsi, mac_addr);
- i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true);
+ if (!i40e_rm_default_mac_filter(vsi, mac_addr))
+ i40e_add_filter(vsi, mac_addr,
+ I40E_VLAN_ANY, false, true);
} else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */
snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
@@ -7644,7 +7766,22 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
f_count++;
if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
- i40e_aq_mac_address_write(&vsi->back->hw,
+ struct i40e_aqc_remove_macvlan_element_data element;
+
+ memset(&element, 0, sizeof(element));
+ ether_addr_copy(element.mac_addr, f->macaddr);
+ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ ret = i40e_aq_remove_macvlan(hw, vsi->seid,
+ &element, 1, NULL);
+ if (ret) {
+ /* some older FW has a different default */
+ element.flags |=
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ i40e_aq_remove_macvlan(hw, vsi->seid,
+ &element, 1, NULL);
+ }
+
+ i40e_aq_mac_address_write(hw,
I40E_AQC_WRITE_TYPE_LAA_WOL,
f->macaddr, NULL);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 949a9a01778b..0988b5c1fe87 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -52,10 +52,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
/* debug function for adminq */
-void i40e_debug_aq(struct i40e_hw *hw,
- enum i40e_debug_mask mask,
- void *desc,
- void *buffer);
+void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
+ void *desc, void *buffer, u16 buf_len);
void i40e_idle_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 369848e107f8..be039dd6114d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -224,15 +224,19 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
if (ret) {
dev_info(&pf->pdev->dev,
- "Filter command send failed for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
+ "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
+ fd_data->pctype, fd_data->fd_id, ret);
err = true;
} else {
- dev_info(&pf->pdev->dev,
- "Filter OK for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
+ if (add)
+ dev_info(&pf->pdev->dev,
+ "Filter OK for PCTYPE %d loc = %d\n",
+ fd_data->pctype, fd_data->fd_id);
+ else
+ dev_info(&pf->pdev->dev,
+ "Filter deleted for PCTYPE %d loc = %d\n",
+ fd_data->pctype, fd_data->fd_id);
}
-
return err ? -EOPNOTSUPP : 0;
}
@@ -276,10 +280,18 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
tcp->source = fd_data->src_port;
if (add) {
+ pf->fd_tcp_rule++;
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
}
+ } else {
+ pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
+ (pf->fd_tcp_rule - 1) : 0;
+ if (pf->fd_tcp_rule == 0) {
+ pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+ dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
+ }
}
fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
@@ -287,12 +299,17 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
if (ret) {
dev_info(&pf->pdev->dev,
- "Filter command send failed for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
+ "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
+ fd_data->pctype, fd_data->fd_id, ret);
err = true;
} else {
- dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
+ if (add)
+ dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
+ fd_data->pctype, fd_data->fd_id);
+ else
+ dev_info(&pf->pdev->dev,
+ "Filter deleted for PCTYPE %d loc = %d\n",
+ fd_data->pctype, fd_data->fd_id);
}
return err ? -EOPNOTSUPP : 0;
@@ -355,13 +372,18 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
if (ret) {
dev_info(&pf->pdev->dev,
- "Filter command send failed for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
+ "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
+ fd_data->pctype, fd_data->fd_id, ret);
err = true;
} else {
- dev_info(&pf->pdev->dev,
- "Filter OK for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
+ if (add)
+ dev_info(&pf->pdev->dev,
+ "Filter OK for PCTYPE %d loc = %d\n",
+ fd_data->pctype, fd_data->fd_id);
+ else
+ dev_info(&pf->pdev->dev,
+ "Filter deleted for PCTYPE %d loc = %d\n",
+ fd_data->pctype, fd_data->fd_id);
}
}
@@ -443,8 +465,14 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
- dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
- rx_desc->wb.qword0.hi_dword.fd_id);
+ if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
+ (I40E_DEBUG_FD & pf->hw.debug_mask))
+ dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
+ rx_desc->wb.qword0.hi_dword.fd_id);
+
+ pf->fd_add_err++;
+ /* store the current atr filter count */
+ pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
/* filter programming failed most likely due to table full */
fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
@@ -454,29 +482,21 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
* FD ATR/SB and then re-enable it when there is room.
*/
if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
- /* Turn off ATR first */
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
!(pf->auto_disable_flags &
- I40E_FLAG_FD_ATR_ENABLED)) {
- dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
- pf->auto_disable_flags |=
- I40E_FLAG_FD_ATR_ENABLED;
- pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
- } else if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
- !(pf->auto_disable_flags &
I40E_FLAG_FD_SB_ENABLED)) {
dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
pf->auto_disable_flags |=
I40E_FLAG_FD_SB_ENABLED;
- pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
}
} else {
- dev_info(&pdev->dev, "FD filter programming error\n");
+ dev_info(&pdev->dev,
+ "FD filter programming failed due to incorrect filter parameters\n");
}
} else if (error ==
(0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pdev->dev, "ntuple filter loc = %d, could not be removed\n",
+ dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
rx_desc->wb.qword0.hi_dword.fd_id);
}
}
@@ -587,6 +607,7 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
{
u32 tx_pending = i40e_get_tx_pending(tx_ring);
+ struct i40e_pf *pf = tx_ring->vsi->back;
bool ret = false;
clear_check_for_tx_hang(tx_ring);
@@ -603,10 +624,17 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
* pending but without time to complete it yet.
*/
if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
- tx_pending) {
+ (tx_pending >= I40E_MIN_DESC_PENDING)) {
/* make sure it is true for two checks in a row */
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
&tx_ring->state);
+ } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
+ (tx_pending < I40E_MIN_DESC_PENDING) &&
+ (tx_pending > 0)) {
+ if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
+ dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
+ tx_pending, tx_ring->queue_index);
+ pf->tx_sluggish_count++;
} else {
/* update completed stats and disarm the hang check */
tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
@@ -1213,7 +1241,6 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
(rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
- skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
skb->ip_summed = CHECKSUM_NONE;
/* Rx csum enabled and ip headers found? */
@@ -1287,6 +1314,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = ipv4_tunnel || ipv6_tunnel;
return;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 73f4fa425697..d7a625a6a14f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -121,6 +121,7 @@ enum i40e_dyn_idx_t {
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+#define I40E_MIN_DESC_PENDING 4
#define I40E_TX_FLAGS_CSUM (u32)(1)
#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 3ac6a0d2f143..4eeed267e4b7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -73,7 +73,7 @@ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
{
struct i40e_pf *pf = vf->pf;
- return qid < pf->vsi[vsi_id]->num_queue_pairs;
+ return qid < pf->vsi[vsi_id]->alloc_queue_pairs;
}
/**
@@ -350,6 +350,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
rx_ctx.lrxqthresh = 2;
rx_ctx.crcstrip = 1;
rx_ctx.prefena = 1;
+ rx_ctx.l2tsel = 1;
/* clear the context in the HMC */
ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
@@ -468,7 +469,7 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
/* map PF queues to VF queues */
- for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
+ for (j = 0; j < pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; j++) {
u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
@@ -477,7 +478,7 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
/* map PF queues to VSI */
for (j = 0; j < 7; j++) {
- if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) {
+ if (j * 2 >= pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs) {
reg = 0x07FF07FF; /* unused */
} else {
u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
@@ -584,7 +585,7 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
if (ret)
goto error_alloc;
- total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
+ total_queue_pairs += pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs;
set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
/* store the total qps number for the runtime
@@ -706,35 +707,6 @@ complete_reset:
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
i40e_flush(hw);
}
-
-/**
- * i40e_vfs_are_assigned
- * @pf: pointer to the pf structure
- *
- * Determine if any VFs are assigned to VMs
- **/
-static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
-{
- struct pci_dev *pdev = pf->pdev;
- struct pci_dev *vfdev;
-
- /* loop through all the VFs to see if we own any that are assigned */
- vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF , NULL);
- while (vfdev) {
- /* if we don't own it we don't care */
- if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
- /* if it is assigned we cannot release it */
- if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
- return true;
- }
-
- vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- I40E_DEV_ID_VF,
- vfdev);
- }
-
- return false;
-}
#ifdef CONFIG_PCI_IOV
/**
@@ -842,7 +814,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
* assigned. Setting the number of VFs to 0 through sysfs is caught
* before this function ever gets called.
*/
- if (!i40e_vfs_are_assigned(pf)) {
+ if (!pci_vfs_assigned(pf->pdev)) {
pci_disable_sriov(pf->pdev);
/* Acknowledge VFLR for all VFS. Without this, VFs will fail to
* work correctly when SR-IOV gets re-enabled.
@@ -979,7 +951,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (num_vfs)
return i40e_pci_sriov_enable(pdev, num_vfs);
- if (!i40e_vfs_are_assigned(pf)) {
+ if (!pci_vfs_assigned(pf->pdev)) {
i40e_free_vfs(pf);
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
@@ -1123,7 +1095,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
vfres->vsi_res[i].num_queue_pairs =
- pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
+ pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs;
memcpy(vfres->vsi_res[i].default_mac_addr,
vf->default_lan_addr.addr, ETH_ALEN);
i++;
@@ -1209,6 +1181,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_virtchnl_vsi_queue_config_info *qci =
(struct i40e_virtchnl_vsi_queue_config_info *)msg;
struct i40e_virtchnl_queue_pair_info *qpi;
+ struct i40e_pf *pf = vf->pf;
u16 vsi_id, vsi_queue_id;
i40e_status aq_ret = 0;
int i;
@@ -1242,6 +1215,8 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
}
+ /* set vsi num_queue_pairs in use to num configured by vf */
+ pf->vsi[vf->lan_vsi_index]->num_queue_pairs = qci->num_queue_pairs;
error_param:
/* send the response to the vf */
@@ -2094,7 +2069,6 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
/* Force the VF driver stop so it has to reload with new MAC address */
i40e_vc_disable_vf(pf, vf);
dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
- ret = 0;
error_param:
return ret;
@@ -2419,7 +2393,7 @@ error_out:
*
* Enable or disable VF spoof checking
**/
-int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable)
+int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 63e7e0d81ad2..0adc61e1052d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -122,7 +122,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
int i40e_ndo_get_vf_config(struct net_device *netdev,
int vf_id, struct ifla_vf_info *ivi);
int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
-int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable);
+int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 003006033614..f206be917842 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -788,7 +788,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
/* bump the tail */
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
- i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
+ i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
+ buff, buff_size);
(hw->aq.asq.next_to_use)++;
if (hw->aq.asq.next_to_use == hw->aq.asq.count)
hw->aq.asq.next_to_use = 0;
@@ -842,7 +843,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: desc and buffer writeback:\n");
- i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff);
+ i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff,
+ buff_size);
/* update the error if time out occurred */
if ((!cmd_completed) &&
@@ -938,7 +940,8 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
hw->aq.nvm_busy = false;
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
- i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf);
+ i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
+ hw->aq.arq_buf_size);
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 4ea90bf239bb..952560551964 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -75,13 +75,15 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
* @mask: debug mask
* @desc: pointer to admin queue descriptor
* @buffer: pointer to command buffer
+ * @buf_len: max length of buffer
*
* Dumps debug log about adminq command with descriptor contents.
**/
void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
- void *buffer)
+ void *buffer, u16 buf_len)
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ u16 len = le16_to_cpu(aq_desc->datalen);
u8 *aq_buffer = (u8 *)buffer;
u32 data[4];
u32 i = 0;
@@ -105,7 +107,9 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
if ((buffer != NULL) && (aq_desc->datalen != 0)) {
memset(data, 0, sizeof(data));
i40e_debug(hw, mask, "AQ CMD Buffer:\n");
- for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) {
+ if (buf_len < len)
+ len = buf_len;
+ for (i = 0; i < len; i++) {
data[((i % 16) / 4)] |=
((u32)aq_buffer[i]) << (8 * (i % 4));
if ((i % 16) == 15) {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 849edcc2e398..9173834825ac 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -53,10 +53,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
bool i40evf_asq_done(struct i40e_hw *hw);
/* debug function for adminq */
-void i40evf_debug_aq(struct i40e_hw *hw,
- enum i40e_debug_mask mask,
- void *desc,
- void *buffer);
+void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
+ void *desc, void *buffer, u16 buf_len);
void i40e_idle_aq(struct i40e_hw *hw);
void i40evf_resume_aq(struct i40e_hw *hw);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 95a3ec236b49..04c7c1557a0c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -163,11 +163,13 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
* pending but without time to complete it yet.
*/
if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
- tx_pending) {
+ (tx_pending >= I40E_MIN_DESC_PENDING)) {
/* make sure it is true for two checks in a row */
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
&tx_ring->state);
- } else {
+ } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) ||
+ !(tx_pending < I40E_MIN_DESC_PENDING) ||
+ !(tx_pending > 0)) {
/* update completed stats and disarm the hang check */
tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
@@ -744,7 +746,6 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
(rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
- skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
skb->ip_summed = CHECKSUM_NONE;
/* Rx csum enabled and ip headers found? */
@@ -818,6 +819,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = ipv4_tunnel || ipv6_tunnel;
return;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 8bc6858163b0..f6dcf9dd9290 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -121,6 +121,7 @@ enum i40e_dyn_idx_t {
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+#define I40E_MIN_DESC_PENDING 4
#define I40E_TX_FLAGS_CSUM (u32)(1)
#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 38429fae4fcf..c51bc7a33bc5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710/X710 Virtual Function Network Driver";
-#define DRV_VERSION "0.9.40"
+#define DRV_VERSION "1.0.5"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2014 Intel Corporation.";
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 236a6183a865..051ea94bdcd3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -2548,11 +2548,13 @@ s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
/**
* igb_set_eee_i350 - Enable/disable EEE support
* @hw: pointer to the HW structure
+ * @adv1G: boolean flag enabling 1G EEE advertisement
+ * @adv100m: boolean flag enabling 100M EEE advertisement
*
* Enable/disable EEE based on setting in dev_spec structure.
*
**/
-s32 igb_set_eee_i350(struct e1000_hw *hw)
+s32 igb_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M)
{
u32 ipcnfg, eeer;
@@ -2566,7 +2568,16 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
if (!(hw->dev_spec._82575.eee_disable)) {
u32 eee_su = rd32(E1000_EEE_SU);
- ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
+ if (adv100M)
+ ipcnfg |= E1000_IPCNFG_EEE_100M_AN;
+ else
+ ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN;
+
+ if (adv1G)
+ ipcnfg |= E1000_IPCNFG_EEE_1G_AN;
+ else
+ ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN;
+
eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
E1000_EEER_LPI_FC);
@@ -2593,11 +2604,13 @@ out:
/**
* igb_set_eee_i354 - Enable/disable EEE support
* @hw: pointer to the HW structure
+ * @adv1G: boolean flag enabling 1G EEE advertisement
+ * @adv100m: boolean flag enabling 100M EEE advertisement
*
* Enable/disable EEE legacy mode based on setting in dev_spec structure.
*
**/
-s32 igb_set_eee_i354(struct e1000_hw *hw)
+s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val = 0;
@@ -2636,8 +2649,16 @@ s32 igb_set_eee_i354(struct e1000_hw *hw)
if (ret_val)
goto out;
- phy_data |= E1000_EEE_ADV_100_SUPPORTED |
- E1000_EEE_ADV_1000_SUPPORTED;
+ if (adv100M)
+ phy_data |= E1000_EEE_ADV_100_SUPPORTED;
+ else
+ phy_data &= ~E1000_EEE_ADV_100_SUPPORTED;
+
+ if (adv1G)
+ phy_data |= E1000_EEE_ADV_1000_SUPPORTED;
+ else
+ phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED;
+
ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
E1000_EEE_ADV_DEV_I354,
phy_data);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index b407c55738fa..2154aea7aa7e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -263,8 +263,8 @@ void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
u16 igb_rxpbs_adjust_82580(u32 data);
s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data);
-s32 igb_set_eee_i350(struct e1000_hw *);
-s32 igb_set_eee_i354(struct e1000_hw *);
+s32 igb_set_eee_i350(struct e1000_hw *, bool adv1G, bool adv100M);
+s32 igb_set_eee_i354(struct e1000_hw *, bool adv1G, bool adv100M);
s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status);
#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index c737d1f40838..02cfd3b14762 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2675,6 +2675,7 @@ static int igb_set_eee(struct net_device *netdev,
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
struct ethtool_eee eee_curr;
+ bool adv1g_eee = true, adv100m_eee = true;
s32 ret_val;
if ((hw->mac.type < e1000_i350) ||
@@ -2701,12 +2702,14 @@ static int igb_set_eee(struct net_device *netdev,
return -EINVAL;
}
- if (edata->advertised &
- ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
+ if (!edata->advertised || (edata->advertised &
+ ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) {
dev_err(&adapter->pdev->dev,
- "EEE Advertisement supports only 100Tx and or 100T full duplex\n");
+ "EEE Advertisement supports only 100Tx and/or 100T full duplex\n");
return -EINVAL;
}
+ adv100m_eee = !!(edata->advertised & ADVERTISE_100_FULL);
+ adv1g_eee = !!(edata->advertised & ADVERTISE_1000_FULL);
} else if (!edata->eee_enabled) {
dev_err(&adapter->pdev->dev,
@@ -2718,10 +2721,6 @@ static int igb_set_eee(struct net_device *netdev,
if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
adapter->flags |= IGB_FLAG_EEE;
- if (hw->mac.type == e1000_i350)
- igb_set_eee_i350(hw);
- else
- igb_set_eee_i354(hw);
/* reset link */
if (netif_running(netdev))
@@ -2730,6 +2729,17 @@ static int igb_set_eee(struct net_device *netdev,
igb_reset(adapter);
}
+ if (hw->mac.type == e1000_i354)
+ ret_val = igb_set_eee_i354(hw, adv1g_eee, adv100m_eee);
+ else
+ ret_val = igb_set_eee_i350(hw, adv1g_eee, adv100m_eee);
+
+ if (ret_val) {
+ dev_err(&adapter->pdev->dev,
+ "Problem setting EEE advertisement options\n");
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index cb14bbdfb056..6cf0c17ad9c4 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2012,10 +2012,10 @@ void igb_reset(struct igb_adapter *adapter)
case e1000_i350:
case e1000_i210:
case e1000_i211:
- igb_set_eee_i350(hw);
+ igb_set_eee_i350(hw, true, true);
break;
case e1000_i354:
- igb_set_eee_i354(hw);
+ igb_set_eee_i354(hw, true, true);
break;
default:
break;
@@ -2619,7 +2619,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case e1000_i210:
case e1000_i211:
/* Enable EEE for internal copper PHY devices */
- err = igb_set_eee_i350(hw);
+ err = igb_set_eee_i350(hw, true, true);
if ((!err) &&
(!hw->dev_spec._82575.eee_disable)) {
adapter->eee_advert =
@@ -2630,7 +2630,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case e1000_i354:
if ((rd32(E1000_CTRL_EXT) &
E1000_CTRL_EXT_LINK_MODE_SGMII)) {
- err = igb_set_eee_i354(hw);
+ err = igb_set_eee_i354(hw, true, true);
if ((!err) &&
(!hw->dev_spec._82575.eee_disable)) {
adapter->eee_advert =
@@ -4813,6 +4813,41 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
+static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
+{
+ struct net_device *netdev = tx_ring->netdev;
+
+ netif_stop_subqueue(netdev, tx_ring->queue_index);
+
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it.
+ */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available.
+ */
+ if (igb_desc_unused(tx_ring) < size)
+ return -EBUSY;
+
+ /* A reprieve! */
+ netif_wake_subqueue(netdev, tx_ring->queue_index);
+
+ u64_stats_update_begin(&tx_ring->tx_syncp2);
+ tx_ring->tx_stats.restart_queue2++;
+ u64_stats_update_end(&tx_ring->tx_syncp2);
+
+ return 0;
+}
+
+static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
+{
+ if (igb_desc_unused(tx_ring) >= size)
+ return 0;
+ return __igb_maybe_stop_tx(tx_ring, size);
+}
+
static void igb_tx_map(struct igb_ring *tx_ring,
struct igb_tx_buffer *first,
const u8 hdr_len)
@@ -4915,13 +4950,17 @@ static void igb_tx_map(struct igb_ring *tx_ring,
tx_ring->next_to_use = i;
- writel(i, tx_ring->tail);
+ /* Make sure there is space in the ring for the next send. */
+ igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
- /* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
- */
- mmiowb();
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ writel(i, tx_ring->tail);
+ /* we need this if more than one processor can write to our tail
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
+ mmiowb();
+ }
return;
dma_error:
@@ -4941,41 +4980,6 @@ dma_error:
tx_ring->next_to_use = i;
}
-static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
-{
- struct net_device *netdev = tx_ring->netdev;
-
- netif_stop_subqueue(netdev, tx_ring->queue_index);
-
- /* Herbert's original patch had:
- * smp_mb__after_netif_stop_queue();
- * but since that doesn't exist yet, just open code it.
- */
- smp_mb();
-
- /* We need to check again in a case another CPU has just
- * made room available.
- */
- if (igb_desc_unused(tx_ring) < size)
- return -EBUSY;
-
- /* A reprieve! */
- netif_wake_subqueue(netdev, tx_ring->queue_index);
-
- u64_stats_update_begin(&tx_ring->tx_syncp2);
- tx_ring->tx_stats.restart_queue2++;
- u64_stats_update_end(&tx_ring->tx_syncp2);
-
- return 0;
-}
-
-static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
-{
- if (igb_desc_unused(tx_ring) >= size)
- return 0;
- return __igb_maybe_stop_tx(tx_ring, size);
-}
-
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_ring *tx_ring)
{
@@ -5046,9 +5050,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
igb_tx_map(tx_ring, first, hdr_len);
- /* Make sure there is space in the ring for the next send. */
- igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
-
return NETDEV_TX_OK;
out_drop:
@@ -6768,113 +6769,6 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring,
}
/**
- * igb_get_headlen - determine size of header for LRO/GRO
- * @data: pointer to the start of the headers
- * @max_len: total length of section to find headers in
- *
- * This function is meant to determine the length of headers that will
- * be recognized by hardware for LRO, and GRO offloads. The main
- * motivation of doing this is to only perform one pull for IPv4 TCP
- * packets so that we can do basic things like calculating the gso_size
- * based on the average data per packet.
- **/
-static unsigned int igb_get_headlen(unsigned char *data,
- unsigned int max_len)
-{
- union {
- unsigned char *network;
- /* l2 headers */
- struct ethhdr *eth;
- struct vlan_hdr *vlan;
- /* l3 headers */
- struct iphdr *ipv4;
- struct ipv6hdr *ipv6;
- } hdr;
- __be16 protocol;
- u8 nexthdr = 0; /* default to not TCP */
- u8 hlen;
-
- /* this should never happen, but better safe than sorry */
- if (max_len < ETH_HLEN)
- return max_len;
-
- /* initialize network frame pointer */
- hdr.network = data;
-
- /* set first protocol and move network header forward */
- protocol = hdr.eth->h_proto;
- hdr.network += ETH_HLEN;
-
- /* handle any vlan tag if present */
- if (protocol == htons(ETH_P_8021Q)) {
- if ((hdr.network - data) > (max_len - VLAN_HLEN))
- return max_len;
-
- protocol = hdr.vlan->h_vlan_encapsulated_proto;
- hdr.network += VLAN_HLEN;
- }
-
- /* handle L3 protocols */
- if (protocol == htons(ETH_P_IP)) {
- if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
- return max_len;
-
- /* access ihl as a u8 to avoid unaligned access on ia64 */
- hlen = (hdr.network[0] & 0x0F) << 2;
-
- /* verify hlen meets minimum size requirements */
- if (hlen < sizeof(struct iphdr))
- return hdr.network - data;
-
- /* record next protocol if header is present */
- if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
- nexthdr = hdr.ipv4->protocol;
- } else if (protocol == htons(ETH_P_IPV6)) {
- if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
- return max_len;
-
- /* record next protocol */
- nexthdr = hdr.ipv6->nexthdr;
- hlen = sizeof(struct ipv6hdr);
- } else {
- return hdr.network - data;
- }
-
- /* relocate pointer to start of L4 header */
- hdr.network += hlen;
-
- /* finally sort out TCP */
- if (nexthdr == IPPROTO_TCP) {
- if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
- return max_len;
-
- /* access doff as a u8 to avoid unaligned access on ia64 */
- hlen = (hdr.network[12] & 0xF0) >> 2;
-
- /* verify hlen meets minimum size requirements */
- if (hlen < sizeof(struct tcphdr))
- return hdr.network - data;
-
- hdr.network += hlen;
- } else if (nexthdr == IPPROTO_UDP) {
- if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
- return max_len;
-
- hdr.network += sizeof(struct udphdr);
- }
-
- /* If everything has gone correctly hdr.network should be the
- * data section of the packet and will be the end of the header.
- * If not then it probably represents the end of the last recognized
- * header.
- */
- if ((hdr.network - data) < max_len)
- return hdr.network - data;
- else
- return max_len;
-}
-
-/**
* igb_pull_tail - igb specific version of skb_pull_tail
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
@@ -6918,7 +6812,7 @@ static void igb_pull_tail(struct igb_ring *rx_ring,
/* we need the header to contain the greater of either ETH_HLEN or
* 60 bytes if the skb->len is less than 60 for skb_pad.
*/
- pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
+ pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index ac9f2148cdc5..673d82095779 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -386,119 +386,87 @@ struct ixgbe_q_vector {
char name[IFNAMSIZ + 9];
#ifdef CONFIG_NET_RX_BUSY_POLL
- unsigned int state;
-#define IXGBE_QV_STATE_IDLE 0
-#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
-#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */
-#define IXGBE_QV_STATE_DISABLED 4 /* QV is disabled */
-#define IXGBE_QV_OWNED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
-#define IXGBE_QV_LOCKED (IXGBE_QV_OWNED | IXGBE_QV_STATE_DISABLED)
-#define IXGBE_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
-#define IXGBE_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
-#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
-#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
- spinlock_t lock;
+ atomic_t state;
#endif /* CONFIG_NET_RX_BUSY_POLL */
/* for dynamic allocation of rings associated with this q_vector */
struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
};
+
#ifdef CONFIG_NET_RX_BUSY_POLL
+enum ixgbe_qv_state_t {
+ IXGBE_QV_STATE_IDLE = 0,
+ IXGBE_QV_STATE_NAPI,
+ IXGBE_QV_STATE_POLL,
+ IXGBE_QV_STATE_DISABLE
+};
+
static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
{
-
- spin_lock_init(&q_vector->lock);
- q_vector->state = IXGBE_QV_STATE_IDLE;
+ /* reset state to idle */
+ atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
}
/* called from the device poll routine to get ownership of a q_vector */
static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
{
- int rc = true;
- spin_lock_bh(&q_vector->lock);
- if (q_vector->state & IXGBE_QV_LOCKED) {
- WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
- q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
- rc = false;
+ int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
+ IXGBE_QV_STATE_NAPI);
#ifdef BP_EXTENDED_STATS
+ if (rc != IXGBE_QV_STATE_IDLE)
q_vector->tx.ring->stats.yields++;
#endif
- } else {
- /* we don't care if someone yielded */
- q_vector->state = IXGBE_QV_STATE_NAPI;
- }
- spin_unlock_bh(&q_vector->lock);
- return rc;
+
+ return rc == IXGBE_QV_STATE_IDLE;
}
/* returns true is someone tried to get the qv while napi had it */
-static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
+static inline void ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
{
- int rc = false;
- spin_lock_bh(&q_vector->lock);
- WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL |
- IXGBE_QV_STATE_NAPI_YIELD));
-
- if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
- rc = true;
- /* will reset state to idle, unless QV is disabled */
- q_vector->state &= IXGBE_QV_STATE_DISABLED;
- spin_unlock_bh(&q_vector->lock);
- return rc;
+ WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_NAPI);
+
+ /* flush any outstanding Rx frames */
+ if (q_vector->napi.gro_list)
+ napi_gro_flush(&q_vector->napi, false);
+
+ /* reset state to idle */
+ atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
}
/* called from ixgbe_low_latency_poll() */
static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
{
- int rc = true;
- spin_lock_bh(&q_vector->lock);
- if ((q_vector->state & IXGBE_QV_LOCKED)) {
- q_vector->state |= IXGBE_QV_STATE_POLL_YIELD;
- rc = false;
+ int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
+ IXGBE_QV_STATE_POLL);
#ifdef BP_EXTENDED_STATS
- q_vector->rx.ring->stats.yields++;
+ if (rc != IXGBE_QV_STATE_IDLE)
+ q_vector->tx.ring->stats.yields++;
#endif
- } else {
- /* preserve yield marks */
- q_vector->state |= IXGBE_QV_STATE_POLL;
- }
- spin_unlock_bh(&q_vector->lock);
- return rc;
+ return rc == IXGBE_QV_STATE_IDLE;
}
/* returns true if someone tried to get the qv while it was locked */
-static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
+static inline void ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
{
- int rc = false;
- spin_lock_bh(&q_vector->lock);
- WARN_ON(q_vector->state & (IXGBE_QV_STATE_NAPI));
-
- if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
- rc = true;
- /* will reset state to idle, unless QV is disabled */
- q_vector->state &= IXGBE_QV_STATE_DISABLED;
- spin_unlock_bh(&q_vector->lock);
- return rc;
+ WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_POLL);
+
+ /* reset state to idle */
+ atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
}
/* true if a socket is polling, even if it did not get the lock */
static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
{
- WARN_ON(!(q_vector->state & IXGBE_QV_OWNED));
- return q_vector->state & IXGBE_QV_USER_PEND;
+ return atomic_read(&q_vector->state) == IXGBE_QV_STATE_POLL;
}
/* false if QV is currently owned */
static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
{
- int rc = true;
- spin_lock_bh(&q_vector->lock);
- if (q_vector->state & IXGBE_QV_OWNED)
- rc = false;
- q_vector->state |= IXGBE_QV_STATE_DISABLED;
- spin_unlock_bh(&q_vector->lock);
-
- return rc;
+ int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
+ IXGBE_QV_STATE_DISABLE);
+
+ return rc == IXGBE_QV_STATE_IDLE;
}
#else /* CONFIG_NET_RX_BUSY_POLL */
@@ -643,9 +611,7 @@ struct ixgbe_adapter {
* thus the additional *_CAPABLE flags.
*/
u32 flags;
-#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0)
#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1)
-#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2)
#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3)
#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4)
#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5)
@@ -760,8 +726,6 @@ struct ixgbe_adapter {
u8 __iomem *io_addr; /* Mainly for iounmap use */
u32 wol;
- u16 bd_number;
-
u16 eeprom_verh;
u16 eeprom_verl;
u16 eeprom_cap;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index e4100b5737b6..cff383b1cbb0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1303,7 +1303,7 @@ static const struct ixgbe_reg_test reg_test_82599[] = {
{ IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
{ IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
{ IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { 0, 0, 0, 0 }
+ { .reg = 0 }
};
/* default 82598 register test */
@@ -1331,7 +1331,7 @@ static const struct ixgbe_reg_test reg_test_82598[] = {
{ IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
{ IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
{ IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { 0, 0, 0, 0 }
+ { .reg = 0 }
};
static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 2d9451e39686..ce40c77381e9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -696,46 +696,83 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
ixgbe_set_rss_queues(adapter);
}
-static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
- int vectors)
+/**
+ * ixgbe_acquire_msix_vectors - acquire MSI-X vectors
+ * @adapter: board private structure
+ *
+ * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
+ * return a negative error code if unable to acquire MSI-X vectors for any
+ * reason.
+ */
+static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
{
- int vector_threshold;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, vectors, vector_threshold;
+
+ /* We start by asking for one vector per queue pair */
+ vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
- /* We'll want at least 2 (vector_threshold):
- * 1) TxQ[0] + RxQ[0] handler
- * 2) Other (Link Status Change, etc.)
+ /* It is easy to be greedy for MSI-X vectors. However, it really
+ * doesn't do much good if we have a lot more vectors than CPUs. We'll
+ * be somewhat conservative and only ask for (roughly) the same number
+ * of vectors as there are CPUs.
*/
- vector_threshold = MIN_MSIX_COUNT;
+ vectors = min_t(int, vectors, num_online_cpus());
- /*
- * The more we get, the more we will assign to Tx/Rx Cleanup
- * for the separate queues...where Rx Cleanup >= Tx Cleanup.
- * Right now, we simply care about how many we'll get; we'll
- * set them up later while requesting irq's.
+ /* Some vectors are necessary for non-queue interrupts */
+ vectors += NON_Q_VECTORS;
+
+ /* Hardware can only support a maximum of hw.mac->max_msix_vectors.
+ * With features such as RSS and VMDq, we can easily surpass the
+ * number of Rx and Tx descriptor queues supported by our device.
+ * Thus, we cap the maximum in the rare cases where the CPU count also
+ * exceeds our vector limit
*/
+ vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
+
+ /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
+ * handler, and (2) an Other (Link Status Change, etc.) handler.
+ */
+ vector_threshold = MIN_MSIX_COUNT;
+
+ adapter->msix_entries = kcalloc(vectors,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!adapter->msix_entries)
+ return -ENOMEM;
+
+ for (i = 0; i < vectors; i++)
+ adapter->msix_entries[i].entry = i;
+
vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
vector_threshold, vectors);
if (vectors < 0) {
- /* Can't allocate enough MSI-X interrupts? Oh well.
- * This just means we'll go with either a single MSI
- * vector or fall back to legacy interrupts.
+ /* A negative count of allocated vectors indicates an error in
+ * acquiring within the specified range of MSI-X vectors
*/
- netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
- "Unable to allocate MSI-X interrupts\n");
+ e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
+ vectors);
+
adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
- } else {
- adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
- /*
- * Adjust for only the vectors we'll use, which is minimum
- * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
- * vectors we were allocated.
- */
- vectors -= NON_Q_VECTORS;
- adapter->num_q_vectors = min(vectors, adapter->max_q_vectors);
+
+ return vectors;
}
+
+ /* we successfully allocated some number of vectors within our
+ * requested range.
+ */
+ adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
+
+ /* Adjust for only the vectors we'll use, which is minimum
+ * of max_q_vectors, or the number of vectors we were allocated.
+ */
+ vectors -= NON_Q_VECTORS;
+ adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
+
+ return 0;
}
static void ixgbe_add_ring(struct ixgbe_ring *ring,
@@ -807,6 +844,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
ixgbe_poll, 64);
napi_hash_add(&q_vector->napi);
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ /* initialize busy poll */
+ atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE);
+
+#endif
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
q_vector->adapter = adapter;
@@ -1049,46 +1091,20 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
**/
static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
{
- struct ixgbe_hw *hw = &adapter->hw;
- int vector, v_budget, err;
+ int err;
- /*
- * It's easy to be greedy for MSI-X vectors, but it really
- * doesn't do us much good if we have a lot more vectors
- * than CPU's. So let's be conservative and only ask for
- * (roughly) the same number of vectors as there are CPU's.
- * The default is to use pairs of vectors.
- */
- v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
- v_budget = min_t(int, v_budget, num_online_cpus());
- v_budget += NON_Q_VECTORS;
+ /* We will try to get MSI-X interrupts first */
+ if (!ixgbe_acquire_msix_vectors(adapter))
+ return;
- /*
- * At the same time, hardware can only support a maximum of
- * hw.mac->max_msix_vectors vectors. With features
- * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
- * descriptor queues supported by our device. Thus, we cap it off in
- * those rare cases where the cpu count also exceeds our vector limit.
+ /* At this point, we do not have MSI-X capabilities. We need to
+ * reconfigure or disable various features which require MSI-X
+ * capability.
*/
- v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
-
- /* A failure in MSI-X entry allocation isn't fatal, but it does
- * mean we disable MSI-X capabilities of the adapter. */
- adapter->msix_entries = kcalloc(v_budget,
- sizeof(struct msix_entry), GFP_KERNEL);
- if (adapter->msix_entries) {
- for (vector = 0; vector < v_budget; vector++)
- adapter->msix_entries[vector].entry = vector;
-
- ixgbe_acquire_msix_vectors(adapter, v_budget);
-
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- return;
- }
- /* disable DCB if number of TCs exceeds 1 */
+ /* Disable DCB unless we only have a single traffic class */
if (netdev_get_num_tc(adapter->netdev) > 1) {
- e_err(probe, "num TCs exceeds number of queues - disabling DCB\n");
+ e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
netdev_reset_tc(adapter->netdev);
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
@@ -1098,26 +1114,30 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->temp_dcb_cfg.pfc_mode_enable = false;
adapter->dcb_cfg.pfc_mode_enable = false;
}
+
adapter->dcb_cfg.num_tcs.pg_tcs = 1;
adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
- /* disable SR-IOV */
+ /* Disable SR-IOV support */
+ e_dev_warn("Disabling SR-IOV support\n");
ixgbe_disable_sriov(adapter);
- /* disable RSS */
+ /* Disable RSS */
+ e_dev_warn("Disabling RSS support\n");
adapter->ring_feature[RING_F_RSS].limit = 1;
+ /* recalculate number of queues now that many features have been
+ * changed or disabled.
+ */
ixgbe_set_num_queues(adapter);
adapter->num_q_vectors = 1;
err = pci_enable_msi(adapter->pdev);
- if (err) {
- netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
- "Unable to allocate MSI interrupt, falling back to legacy. Error: %d\n",
- err);
- return;
- }
- adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
+ if (err)
+ e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
+ err);
+ else
+ adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 87bd53fdd209..06ef5a32a893 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -440,7 +440,7 @@ static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
{IXGBE_TXDCTL(0), "TXDCTL"},
/* List Terminator */
- {}
+ { .name = NULL }
};
@@ -1521,120 +1521,6 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
ixgbe_release_rx_desc(rx_ring, i);
}
-/**
- * ixgbe_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
- * @data: pointer to the start of the headers
- * @max_len: total length of section to find headers in
- *
- * This function is meant to determine the length of headers that will
- * be recognized by hardware for LRO, GRO, and RSC offloads. The main
- * motivation of doing this is to only perform one pull for IPv4 TCP
- * packets so that we can do basic things like calculating the gso_size
- * based on the average data per packet.
- **/
-static unsigned int ixgbe_get_headlen(unsigned char *data,
- unsigned int max_len)
-{
- union {
- unsigned char *network;
- /* l2 headers */
- struct ethhdr *eth;
- struct vlan_hdr *vlan;
- /* l3 headers */
- struct iphdr *ipv4;
- struct ipv6hdr *ipv6;
- } hdr;
- __be16 protocol;
- u8 nexthdr = 0; /* default to not TCP */
- u8 hlen;
-
- /* this should never happen, but better safe than sorry */
- if (max_len < ETH_HLEN)
- return max_len;
-
- /* initialize network frame pointer */
- hdr.network = data;
-
- /* set first protocol and move network header forward */
- protocol = hdr.eth->h_proto;
- hdr.network += ETH_HLEN;
-
- /* handle any vlan tag if present */
- if (protocol == htons(ETH_P_8021Q)) {
- if ((hdr.network - data) > (max_len - VLAN_HLEN))
- return max_len;
-
- protocol = hdr.vlan->h_vlan_encapsulated_proto;
- hdr.network += VLAN_HLEN;
- }
-
- /* handle L3 protocols */
- if (protocol == htons(ETH_P_IP)) {
- if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
- return max_len;
-
- /* access ihl as a u8 to avoid unaligned access on ia64 */
- hlen = (hdr.network[0] & 0x0F) << 2;
-
- /* verify hlen meets minimum size requirements */
- if (hlen < sizeof(struct iphdr))
- return hdr.network - data;
-
- /* record next protocol if header is present */
- if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
- nexthdr = hdr.ipv4->protocol;
- } else if (protocol == htons(ETH_P_IPV6)) {
- if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
- return max_len;
-
- /* record next protocol */
- nexthdr = hdr.ipv6->nexthdr;
- hlen = sizeof(struct ipv6hdr);
-#ifdef IXGBE_FCOE
- } else if (protocol == htons(ETH_P_FCOE)) {
- if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
- return max_len;
- hlen = FCOE_HEADER_LEN;
-#endif
- } else {
- return hdr.network - data;
- }
-
- /* relocate pointer to start of L4 header */
- hdr.network += hlen;
-
- /* finally sort out TCP/UDP */
- if (nexthdr == IPPROTO_TCP) {
- if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
- return max_len;
-
- /* access doff as a u8 to avoid unaligned access on ia64 */
- hlen = (hdr.network[12] & 0xF0) >> 2;
-
- /* verify hlen meets minimum size requirements */
- if (hlen < sizeof(struct tcphdr))
- return hdr.network - data;
-
- hdr.network += hlen;
- } else if (nexthdr == IPPROTO_UDP) {
- if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
- return max_len;
-
- hdr.network += sizeof(struct udphdr);
- }
-
- /*
- * If everything has gone correctly hdr.network should be the
- * data section of the packet and will be the end of the header.
- * If not then it probably represents the end of the last recognized
- * header.
- */
- if ((hdr.network - data) < max_len)
- return hdr.network - data;
- else
- return max_len;
-}
-
static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
struct sk_buff *skb)
{
@@ -1793,7 +1679,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
* we need the header to contain the greater of either ETH_HLEN or
* 60 bytes if the skb->len is less than 60 for skb_pad.
*/
- pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
+ pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
@@ -2191,9 +2077,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes;
- if (cleaned_count)
- ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
-
return total_rx_packets;
}
@@ -5300,15 +5183,15 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
int orig_node = dev_to_node(dev);
- int numa_node = -1;
+ int ring_node = -1;
int size;
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
if (tx_ring->q_vector)
- numa_node = tx_ring->q_vector->numa_node;
+ ring_node = tx_ring->q_vector->numa_node;
- tx_ring->tx_buffer_info = vzalloc_node(size, numa_node);
+ tx_ring->tx_buffer_info = vzalloc_node(size, ring_node);
if (!tx_ring->tx_buffer_info)
tx_ring->tx_buffer_info = vzalloc(size);
if (!tx_ring->tx_buffer_info)
@@ -5320,7 +5203,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- set_dev_node(dev, numa_node);
+ set_dev_node(dev, ring_node);
tx_ring->desc = dma_alloc_coherent(dev,
tx_ring->size,
&tx_ring->dma,
@@ -5384,15 +5267,15 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
int orig_node = dev_to_node(dev);
- int numa_node = -1;
+ int ring_node = -1;
int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
if (rx_ring->q_vector)
- numa_node = rx_ring->q_vector->numa_node;
+ ring_node = rx_ring->q_vector->numa_node;
- rx_ring->rx_buffer_info = vzalloc_node(size, numa_node);
+ rx_ring->rx_buffer_info = vzalloc_node(size, ring_node);
if (!rx_ring->rx_buffer_info)
rx_ring->rx_buffer_info = vzalloc(size);
if (!rx_ring->rx_buffer_info)
@@ -5404,7 +5287,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- set_dev_node(dev, numa_node);
+ set_dev_node(dev, ring_node);
rx_ring->desc = dma_alloc_coherent(dev,
rx_ring->size,
&rx_ring->dma,
@@ -6319,25 +6202,55 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
ixgbe_ping_all_vfs(adapter);
}
+static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+
+ if (tx_ring->next_to_use != tx_ring->next_to_clean)
+ return true;
+ }
+
+ return false;
+}
+
+static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
+ u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
+
+ int i, j;
+
+ if (!adapter->num_vfs)
+ return false;
+
+ for (i = 0; i < adapter->num_vfs; i++) {
+ for (j = 0; j < q_per_pool; j++) {
+ u32 h, t;
+
+ h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
+ t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
+
+ if (h != t)
+ return true;
+ }
+ }
+
+ return false;
+}
+
/**
* ixgbe_watchdog_flush_tx - flush queues on link down
* @adapter: pointer to the device adapter structure
**/
static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
{
- int i;
- int some_tx_pending = 0;
-
if (!netif_carrier_ok(adapter->netdev)) {
- for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
- if (tx_ring->next_to_use != tx_ring->next_to_clean) {
- some_tx_pending = 1;
- break;
- }
- }
-
- if (some_tx_pending) {
+ if (ixgbe_ring_tx_pending(adapter) ||
+ ixgbe_vf_tx_pending(adapter)) {
/* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going
* to get done, so reset controller to flush Tx.
@@ -6837,6 +6750,36 @@ static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
+static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
+{
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it.
+ */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available.
+ */
+ if (likely(ixgbe_desc_unused(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ return 0;
+}
+
+static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
+{
+ if (likely(ixgbe_desc_unused(tx_ring) >= size))
+ return 0;
+
+ return __ixgbe_maybe_stop_tx(tx_ring, size);
+}
+
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
IXGBE_TXD_CMD_RS)
@@ -6958,8 +6901,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
tx_ring->next_to_use = i;
- /* notify HW of packet */
- ixgbe_write_tail(tx_ring, i);
+ ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ /* notify HW of packet */
+ ixgbe_write_tail(tx_ring, i);
+ }
return;
dma_error:
@@ -7067,32 +7014,6 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
input, common, ring->queue_index);
}
-static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
-{
- netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
- /* Herbert's original patch had:
- * smp_mb__after_netif_stop_queue();
- * but since that doesn't exist yet, just open code it. */
- smp_mb();
-
- /* We need to check again in a case another CPU has just
- * made room available. */
- if (likely(ixgbe_desc_unused(tx_ring) < size))
- return -EBUSY;
-
- /* A reprieve! - use start_queue because it doesn't call schedule */
- netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
- ++tx_ring->tx_stats.restart_queue;
- return 0;
-}
-
-static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
-{
- if (likely(ixgbe_desc_unused(tx_ring) >= size))
- return 0;
- return __ixgbe_maybe_stop_tx(tx_ring, size);
-}
-
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
@@ -7187,9 +7108,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
}
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
- &adapter->state))) {
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ adapter->ptp_clock &&
+ !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
+ &adapter->state)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
@@ -7261,8 +7183,6 @@ xmit_fcoe:
#endif /* IXGBE_FCOE */
ixgbe_tx_map(tx_ring, first, hdr_len);
- ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
-
return NETDEV_TX_OK;
out_drop:
@@ -7735,39 +7655,13 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
const unsigned char *addr,
u16 flags)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
- int err;
-
- if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
- return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags);
-
- /* Hardware does not support aging addresses so if a
- * ndm_state is given only allow permanent addresses
- */
- if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
- pr_info("%s: FDB only supports static addresses\n",
- ixgbe_driver_name);
- return -EINVAL;
- }
-
+ /* guarantee we can provide a unique filter for the unicast address */
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
- u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS;
-
- if (netdev_uc_count(dev) < rar_uc_entries)
- err = dev_uc_add_excl(dev, addr);
- else
- err = -ENOMEM;
- } else if (is_multicast_ether_addr(addr)) {
- err = dev_mc_add_excl(dev, addr);
- } else {
- err = -EINVAL;
+ if (IXGBE_MAX_PF_MACVLANS <= netdev_uc_count(dev))
+ return -ENOMEM;
}
- /* Only return duplicate errors if NLM_F_EXCL is set */
- if (err == -EEXIST && !(flags & NLM_F_EXCL))
- err = 0;
-
- return err;
+ return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags);
}
static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
@@ -7830,9 +7724,17 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
{
struct ixgbe_fwd_adapter *fwd_adapter = NULL;
struct ixgbe_adapter *adapter = netdev_priv(pdev);
+ int used_pools = adapter->num_vfs + adapter->num_rx_pools;
unsigned int limit;
int pool, err;
+ /* Hardware has a limited number of available pools. Each VF, and the
+ * PF require a pool. Check to ensure we don't attempt to use more
+ * then the available number of pools.
+ */
+ if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
+ return ERR_PTR(-EINVAL);
+
#ifdef CONFIG_RPS
if (vdev->num_rx_queues != vdev->num_tx_queues) {
netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
@@ -8080,7 +7982,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgbe_adapter *adapter = NULL;
struct ixgbe_hw *hw;
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
- static int cards_found;
int i, err, pci_using_dac, expected_gts;
unsigned int indices = MAX_TX_QUEUES;
u8 part_str[IXGBE_PBANUM_LENGTH];
@@ -8166,8 +8067,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->watchdog_timeo = 5 * HZ;
strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
- adapter->bd_number = cards_found;
-
/* Setup hw api */
memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
hw->mac.type = ii->mac;
@@ -8451,7 +8350,6 @@ skip_sriov:
ixgbe_add_sanmac_netdev(netdev);
e_dev_info("%s\n", ixgbe_default_device_descr);
- cards_found++;
#ifdef CONFIG_IXGBE_HWMON
if (ixgbe_sysfs_init(adapter))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index c14d4d89672f..706fc69aa0c5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -250,13 +250,15 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
if (err)
return err;
- /* While the SR-IOV capability structure reports total VFs to be
- * 64 we limit the actual number that can be allocated to 63 so
- * that some transmit/receive resources can be reserved to the
- * PF. The PCI bus driver already checks for other values out of
- * range.
+ /* While the SR-IOV capability structure reports total VFs to be 64,
+ * we have to limit the actual number allocated based on two factors.
+ * First, we reserve some transmit/receive resources for the PF.
+ * Second, VMDQ also uses the same pools that SR-IOV does. We need to
+ * account for this, so that we don't accidentally allocate more VFs
+ * than we have available pools. The PCI bus driver already checks for
+ * other values out of range.
*/
- if (num_vfs > IXGBE_MAX_VFS_DRV_LIMIT)
+ if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VF_FUNCTIONS)
return -EPERM;
adapter->num_vfs = num_vfs;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index e6b07c2a01fe..dfd55d83bc03 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2194,6 +2194,8 @@ enum {
#define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600))
#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4))
/* Translated register #defines */
+#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P)))
+#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P)))
#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
@@ -2202,6 +2204,11 @@ enum {
#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \
(IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index)))
+#define IXGBE_PVFTDHN(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDH((q_per_pool)*(vf_number) + (vf_q_index)))
+#define IXGBE_PVFTDTN(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDT((q_per_pool)*(vf_number) + (vf_q_index)))
+
enum ixgbe_fdir_pballoc_type {
IXGBE_FDIR_PBALLOC_NONE = 0,
IXGBE_FDIR_PBALLOC_64K = 1,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index d420f124633f..cc0e5b7ff041 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -523,7 +523,7 @@ static const struct ixgbevf_reg_test reg_test_vf[] = {
{ IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
- { 0, 0, 0, 0 }
+ { .reg = 0 }
};
static const u32 register_test_patterns[] = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index a0a1de9ce238..ba96cb5b886d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -385,7 +385,6 @@ struct ixgbevf_adapter {
/* structs defined in ixgbe_vf.h */
struct ixgbe_hw hw;
u16 msg_enable;
- u16 bd_number;
/* Interrupt Throttle Rate */
u32 eitr_param;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index c22a00c3621a..030a219c85e3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3464,7 +3464,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgbevf_adapter *adapter = NULL;
struct ixgbe_hw *hw = NULL;
const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
- static int cards_found;
int err, pci_using_dac;
err = pci_enable_device(pdev);
@@ -3525,8 +3524,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ixgbevf_assign_netdev_ops(netdev);
- adapter->bd_number = cards_found;
-
/* Setup hw api */
memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
hw->mac.type = ii->mac;
@@ -3601,7 +3598,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw_dbg(hw, "MAC: %d\n", hw->mac.type);
hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
- cards_found++;
return 0;
err_register:
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 4d44d64ae387..9cddd56d02c3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -434,6 +434,21 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
if (!(links_reg & IXGBE_LINKS_UP))
goto out;
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (mac->type == ixgbe_mac_82599_vf) {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ udelay(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+ }
+ }
+
switch (links_reg & IXGBE_LINKS_SPEED_82599) {
case IXGBE_LINKS_SPEED_10G_82599:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 24b242277ea1..264eab7d3b26 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -1107,7 +1107,7 @@ static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
{
u16 v = 0;
if (__xm_phy_read(hw, port, reg, &v))
- pr_warning("%s: phy read timed out\n", hw->dev[port]->name);
+ pr_warn("%s: phy read timed out\n", hw->dev[port]->name);
return v;
}
@@ -1903,7 +1903,7 @@ static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
return 0;
}
- pr_warning("%s: phy write timeout\n", hw->dev[port]->name);
+ pr_warn("%s: phy write timeout\n", hw->dev[port]->name);
return -EIO;
}
@@ -1931,7 +1931,7 @@ static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
{
u16 v = 0;
if (__gm_phy_read(hw, port, reg, &v))
- pr_warning("%s: phy read timeout\n", hw->dev[port]->name);
+ pr_warn("%s: phy read timeout\n", hw->dev[port]->name);
return v;
}
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index dba48a5ce7ab..bd3366267039 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2814,7 +2814,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
default:
if (net_ratelimit())
- pr_warning("unknown status opcode 0x%x\n", opcode);
+ pr_warn("unknown status opcode 0x%x\n", opcode);
}
} while (hw->st_idx != idx);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index abddcf8c40aa..f3032fec8fce 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2459,6 +2459,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
}
priv->rx_ring_num = prof->rx_ring_num;
priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
+ priv->cqe_size = mdev->dev->caps.cqe_size;
priv->mac_index = -1;
priv->msg_enable = MLX4_EN_MSG_LEVEL;
spin_lock_init(&priv->stats_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9c909d23f14c..a33048ee9621 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -588,6 +588,8 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
skb_copy_to_linear_data(skb, va, length);
skb->tail += length;
} else {
+ unsigned int pull_len;
+
/* Move relevant fragments to skb */
used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags,
skb, length);
@@ -597,16 +599,17 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
}
skb_shinfo(skb)->nr_frags = used_frags;
+ pull_len = eth_get_headlen(va, SMALL_PACKET_SIZE);
/* Copy headers into the skb linear buffer */
- memcpy(skb->data, va, HEADER_COPY_SIZE);
- skb->tail += HEADER_COPY_SIZE;
+ memcpy(skb->data, va, pull_len);
+ skb->tail += pull_len;
/* Skip headers in first fragment */
- skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;
+ skb_shinfo(skb)->frags[0].page_offset += pull_len;
/* Adjust size of first fragment */
- skb_frag_size_sub(&skb_shinfo(skb)->frags[0], HEADER_COPY_SIZE);
- skb->data_len = length - HEADER_COPY_SIZE;
+ skb_frag_size_sub(&skb_shinfo(skb)->frags[0], pull_len);
+ skb->data_len = length - pull_len;
}
return skb;
}
@@ -668,7 +671,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
* descriptor offset can be deduced from the CQE index instead of
* reading 'cqe->index' */
index = cq->mcq.cons_index & ring->size_mask;
- cqe = &cq->buf[(index << factor) + factor];
+ cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
@@ -769,7 +772,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
if (l2_tunnel)
- gro_skb->encapsulation = 1;
+ gro_skb->csum_level = 1;
if ((cqe->vlan_my_qpn &
cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
@@ -823,8 +826,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
skb->protocol = eth_type_trans(skb, dev);
skb_record_rx_queue(skb, cq->ring);
- if (l2_tunnel)
- skb->encapsulation = 1;
+ if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
+ skb->csum_level = 1;
if (dev->features & NETIF_F_RXHASH)
skb_set_hash(skb,
@@ -855,7 +858,7 @@ next:
++cq->mcq.cons_index;
index = (cq->mcq.cons_index) & ring->size_mask;
- cqe = &cq->buf[(index << factor) + factor];
+ cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
if (++polled == budget)
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index dae3da6d8dd0..c44f4237b9be 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -319,7 +319,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
}
}
}
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
return tx_info->nr_txbb;
}
@@ -382,7 +382,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
return true;
index = cons_index & size_mask;
- cqe = &buf[(index << factor) + factor];
+ cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
ring_index = ring->cons & size_mask;
stamp_index = ring_index;
@@ -430,7 +430,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
++cons_index;
index = cons_index & size_mask;
- cqe = &buf[(index << factor) + factor];
+ cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 2a004b347e1d..a49c9d11d8a5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -101,21 +101,24 @@ static void eq_set_ci(struct mlx4_eq *eq, int req_not)
mb();
}
-static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor)
+static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor,
+ u8 eqe_size)
{
/* (entry & (eq->nent - 1)) gives us a cyclic array */
- unsigned long offset = (entry & (eq->nent - 1)) * (MLX4_EQ_ENTRY_SIZE << eqe_factor);
- /* CX3 is capable of extending the EQE from 32 to 64 bytes.
- * When this feature is enabled, the first (in the lower addresses)
+ unsigned long offset = (entry & (eq->nent - 1)) * eqe_size;
+ /* CX3 is capable of extending the EQE from 32 to 64 bytes with
+ * strides of 64B,128B and 256B.
+ * When 64B EQE is used, the first (in the lower addresses)
* 32 bytes in the 64 byte EQE are reserved and the next 32 bytes
* contain the legacy EQE information.
+ * In all other cases, the first 32B contains the legacy EQE info.
*/
return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE;
}
-static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor)
+static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor, u8 size)
{
- struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor);
+ struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor, size);
return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
}
@@ -459,8 +462,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
enum slave_port_gen_event gen_event;
unsigned long flags;
struct mlx4_vport_state *s_info;
+ int eqe_size = dev->caps.eqe_size;
- while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) {
+ while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor, eqe_size))) {
/*
* Make sure we read EQ entry contents after we've
* checked the ownership bit.
@@ -894,8 +898,10 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
eq->dev = dev;
eq->nent = roundup_pow_of_two(max(nent, 2));
- /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */
- npages = PAGE_ALIGN(eq->nent * (MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor)) / PAGE_SIZE;
+ /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with
+ * strides of 64B,128B and 256B.
+ */
+ npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE;
eq->page_list = kmalloc(npages * sizeof *eq->page_list,
GFP_KERNEL);
@@ -997,8 +1003,10 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
struct mlx4_cmd_mailbox *mailbox;
int err;
int i;
- /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */
- int npages = PAGE_ALIGN((MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor) * eq->nent) / PAGE_SIZE;
+ /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with
+ * strides of 64B,128B and 256B
+ */
+ int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 494753e44ae3..13b2e4a51ef4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -137,7 +137,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[8] = "Dynamic QP updates support",
[9] = "Device managed flow steering IPoIB support",
[10] = "TCP/IP offloads/flow-steering for VXLAN support",
- [11] = "MAD DEMUX (Secure-Host) support"
+ [11] = "MAD DEMUX (Secure-Host) support",
+ [12] = "Large cache line (>64B) CQE stride support",
+ [13] = "Large cache line (>64B) EQE stride support"
};
int i;
@@ -557,6 +559,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
+#define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
@@ -733,6 +736,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->max_rq_sg = field;
MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
dev_cap->max_rq_desc_sz = size;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
+ if (field & (1 << 6))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
+ if (field & (1 << 7))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
MLX4_GET(dev_cap->bmme_flags, outbox,
QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
@@ -1376,6 +1384,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
#define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
#define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
+#define INIT_HCA_EQE_CQE_STRIDE_OFFSET (INIT_HCA_QPC_OFFSET + 0x3b)
#define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
@@ -1452,11 +1461,25 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) {
*(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30);
dev->caps.cqe_size = 64;
- dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE;
+ dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
} else {
dev->caps.cqe_size = 32;
}
+ /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
+ if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) &&
+ (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) {
+ dev->caps.eqe_size = cache_line_size();
+ dev->caps.cqe_size = cache_line_size();
+ dev->caps.eqe_factor = 0;
+ MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 |
+ (ilog2(dev->caps.eqe_size) - 5)),
+ INIT_HCA_EQE_CQE_STRIDE_OFFSET);
+
+ /* User still need to know to support CQE > 32B */
+ dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
+ }
+
/* QPC/EEC/CQC/EQC/RDMARC attributes */
MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
@@ -1616,6 +1639,17 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
if (byte_field & 0x40) /* 64-bytes cqe enabled */
param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
+ /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
+ MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET);
+ if (byte_field) {
+ param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
+ param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
+ param->cqe_size = 1 << ((byte_field &
+ MLX4_CQE_SIZE_MASK_STRIDE) + 5);
+ param->eqe_size = 1 << (((byte_field &
+ MLX4_EQE_SIZE_MASK_STRIDE) >> 4) + 5);
+ }
+
/* TPT attributes */
MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 1fce03ebe5c4..9b835aecac96 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -178,6 +178,8 @@ struct mlx4_init_hca_param {
u8 uar_page_sz; /* log pg sz in 4k chunks */
u8 steering_mode; /* for QUERY_HCA */
u64 dev_cap_enabled;
+ u16 cqe_size; /* For use only when CQE stride feature enabled */
+ u16 eqe_size; /* For use only when EQE stride feature enabled */
};
struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 7e2d5d57c598..1f10023af1db 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -104,7 +104,8 @@ module_param(enable_64b_cqe_eqe, bool, 0444);
MODULE_PARM_DESC(enable_64b_cqe_eqe,
"Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
-#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE
+#define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \
+ MLX4_FUNC_CAP_EQE_CQE_STRIDE)
static char mlx4_version[] =
DRV_NAME ": Mellanox ConnectX core driver v"
@@ -196,6 +197,40 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev)
dev->caps.port_mask[i] = dev->caps.port_type[i];
}
+static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
+{
+ struct mlx4_caps *dev_cap = &dev->caps;
+
+ /* FW not supporting or cancelled by user */
+ if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) ||
+ !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE))
+ return;
+
+ /* Must have 64B CQE_EQE enabled by FW to use bigger stride
+ * When FW has NCSI it may decide not to report 64B CQE/EQEs
+ */
+ if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) ||
+ !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) {
+ dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
+ dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
+ return;
+ }
+
+ if (cache_line_size() == 128 || cache_line_size() == 256) {
+ mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n");
+ /* Changing the real data inside CQE size to 32B */
+ dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
+ dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
+
+ if (mlx4_is_master(dev))
+ dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
+ } else {
+ mlx4_dbg(dev, "Disabling CQE stride cacheLine unsupported\n");
+ dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
+ dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
+ }
+}
+
static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
{
int err;
@@ -390,6 +425,14 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
}
+
+ if (dev_cap->flags2 &
+ (MLX4_DEV_CAP_FLAG2_CQE_STRIDE |
+ MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) {
+ mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n");
+ dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
+ dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
+ }
}
if ((dev->caps.flags &
@@ -397,6 +440,9 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
mlx4_is_master(dev))
dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
+ if (!mlx4_is_slave(dev))
+ mlx4_enable_cqe_eqe_stride(dev);
+
return 0;
}
@@ -724,11 +770,22 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) {
dev->caps.cqe_size = 64;
- dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE;
+ dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
} else {
dev->caps.cqe_size = 32;
}
+ if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) {
+ dev->caps.eqe_size = hca_param.eqe_size;
+ dev->caps.eqe_factor = 0;
+ }
+
+ if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) {
+ dev->caps.cqe_size = hca_param.cqe_size;
+ /* User still need to know when CQE > 32B */
+ dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
+ }
+
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index b508c7887ef8..de10dbb2e6ed 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -285,6 +285,9 @@ struct mlx4_icm_table {
#define MLX4_MPT_STATUS_SW 0xF0
#define MLX4_MPT_STATUS_HW 0x00
+#define MLX4_CQE_SIZE_MASK_STRIDE 0x3
+#define MLX4_EQE_SIZE_MASK_STRIDE 0x30
+
/*
* Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
*/
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 3de41be49425..e3d71c386da7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -542,6 +542,7 @@ struct mlx4_en_priv {
unsigned max_mtu;
int base_qpn;
int cqe_factor;
+ int cqe_size;
struct mlx4_en_rss_map rss_map;
__be32 ctrl_flags;
@@ -612,6 +613,11 @@ struct mlx4_mac_entry {
struct rcu_head rcu;
};
+static inline struct mlx4_cqe *mlx4_en_get_cqe(void *buf, int idx, int cqe_sz)
+{
+ return buf + idx * cqe_sz;
+}
+
#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
{
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 4f40d7b8629e..cc0485e3c621 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3537,7 +3537,7 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
__func__, __LINE__);
- strncpy(buf, dev->name, IFNAMSIZ);
+ strlcpy(buf, dev->name, IFNAMSIZ);
flush_work(&vdev->reset_task);
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 925b296d8ab8..f39cae620f61 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1481,7 +1481,7 @@ static int phy_init(struct net_device *dev)
}
/* phy vendor specific configuration */
- if ((np->phy_oui == PHY_OUI_CICADA)) {
+ if (np->phy_oui == PHY_OUI_CICADA) {
if (init_cicada(dev, np, phyinterface)) {
netdev_info(dev, "%s: phy init failed\n",
pci_name(np->pci_dev));
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 2d6b148528dd..fa2db41e02f8 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -693,11 +693,11 @@ static void yellowfin_tx_timeout(struct net_device *dev)
/* Note: these should be KERN_DEBUG. */
if (yellowfin_debug) {
int i;
- pr_warning(" Rx ring %p: ", yp->rx_ring);
+ pr_warn(" Rx ring %p: ", yp->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++)
pr_cont(" %08x", yp->rx_ring[i].result_status);
pr_cont("\n");
- pr_warning(" Tx ring %p: ", yp->tx_ring);
+ pr_warn(" Tx ring %p: ", yp->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
pr_cont(" %04x /%08x",
yp->tx_status[i].tx_errs,
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 1159031f885b..32456c79cc73 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1477,9 +1477,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u32 val;
if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) {
- pr_warning("%s: chip revisions between 0x%x-0x%x "
- "will not be enabled.\n",
- module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1);
+ pr_warn("%s: chip revisions between 0x%x-0x%x will not be enabled\n",
+ module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1);
return -ENODEV;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index b84f5ea3d659..e56c1bb36141 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -39,8 +39,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 61
-#define QLCNIC_LINUX_VERSIONID "5.3.61"
+#define _QLCNIC_LINUX_SUBVERSION 62
+#define QLCNIC_LINUX_VERSIONID "5.3.62"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -540,6 +540,8 @@ struct qlcnic_hardware_context {
u8 lb_mode;
u16 vxlan_port;
struct device *hwmon_dev;
+ u32 post_mode;
+ bool run_post;
};
struct qlcnic_adapter_stats {
@@ -2283,6 +2285,7 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
+#define PCI_DEVICE_ID_QLOGIC_QLE8830 0x8830
#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430
#define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040
#define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440
@@ -2307,6 +2310,7 @@ static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
bool status;
status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_QLE8830) ||
(device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
(device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
(device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 476e4998ef99..840bf36b5e9d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -35,6 +35,35 @@ static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *);
#define QLC_SKIP_INACTIVE_PCI_REGS 7
#define QLC_MAX_LEGACY_FUNC_SUPP 8
+/* 83xx Module type */
+#define QLC_83XX_MODULE_FIBRE_10GBASE_LRM 0x1 /* 10GBase-LRM */
+#define QLC_83XX_MODULE_FIBRE_10GBASE_LR 0x2 /* 10GBase-LR */
+#define QLC_83XX_MODULE_FIBRE_10GBASE_SR 0x3 /* 10GBase-SR */
+#define QLC_83XX_MODULE_DA_10GE_PASSIVE_CP 0x4 /* 10GE passive
+ * copper(compliant)
+ */
+#define QLC_83XX_MODULE_DA_10GE_ACTIVE_CP 0x5 /* 10GE active limiting
+ * copper(compliant)
+ */
+#define QLC_83XX_MODULE_DA_10GE_LEGACY_CP 0x6 /* 10GE passive copper
+ * (legacy, best effort)
+ */
+#define QLC_83XX_MODULE_FIBRE_1000BASE_SX 0x7 /* 1000Base-SX */
+#define QLC_83XX_MODULE_FIBRE_1000BASE_LX 0x8 /* 1000Base-LX */
+#define QLC_83XX_MODULE_FIBRE_1000BASE_CX 0x9 /* 1000Base-CX */
+#define QLC_83XX_MODULE_TP_1000BASE_T 0xa /* 1000Base-T*/
+#define QLC_83XX_MODULE_DA_1GE_PASSIVE_CP 0xb /* 1GE passive copper
+ * (legacy, best effort)
+ */
+#define QLC_83XX_MODULE_UNKNOWN 0xf /* Unknown module type */
+
+/* Port types */
+#define QLC_83XX_10_CAPABLE BIT_8
+#define QLC_83XX_100_CAPABLE BIT_9
+#define QLC_83XX_1G_CAPABLE BIT_10
+#define QLC_83XX_10G_CAPABLE BIT_11
+#define QLC_83XX_AUTONEG_ENABLE BIT_15
+
static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
{QLCNIC_CMD_CONFIG_INTRPT, 18, 34},
@@ -667,6 +696,7 @@ void qlcnic_83xx_write_crb(struct qlcnic_adapter *adapter, char *buf,
int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
int status;
status = qlcnic_83xx_get_port_config(adapter);
@@ -674,13 +704,20 @@ int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter)
dev_err(&adapter->pdev->dev,
"Get Port Info failed\n");
} else {
- if (QLC_83XX_SFP_10G_CAPABLE(adapter->ahw->port_config))
- adapter->ahw->port_type = QLCNIC_XGBE;
- else
- adapter->ahw->port_type = QLCNIC_GBE;
- if (QLC_83XX_AUTONEG(adapter->ahw->port_config))
- adapter->ahw->link_autoneg = AUTONEG_ENABLE;
+ if (ahw->port_config & QLC_83XX_10G_CAPABLE) {
+ ahw->port_type = QLCNIC_XGBE;
+ } else if (ahw->port_config & QLC_83XX_10_CAPABLE ||
+ ahw->port_config & QLC_83XX_100_CAPABLE ||
+ ahw->port_config & QLC_83XX_1G_CAPABLE) {
+ ahw->port_type = QLCNIC_GBE;
+ } else {
+ ahw->port_type = QLCNIC_XGBE;
+ }
+
+ if (QLC_83XX_AUTONEG(ahw->port_config))
+ ahw->link_autoneg = AUTONEG_ENABLE;
+
}
return status;
}
@@ -2664,7 +2701,7 @@ static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter)
QLC_83XX_FLASH_STATUS_READY)
break;
- msleep(QLC_83XX_FLASH_STATUS_REG_POLL_DELAY);
+ usleep_range(1000, 1100);
} while (--retries);
if (!retries)
@@ -3176,22 +3213,33 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
break;
}
config = cmd.rsp.arg[3];
- if (QLC_83XX_SFP_PRESENT(config)) {
- switch (ahw->module_type) {
- case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
- case LINKEVENT_MODULE_OPTICAL_SRLR:
- case LINKEVENT_MODULE_OPTICAL_LRM:
- case LINKEVENT_MODULE_OPTICAL_SFP_1G:
- ahw->supported_type = PORT_FIBRE;
- break;
- case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
- case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
- case LINKEVENT_MODULE_TWINAX:
- ahw->supported_type = PORT_TP;
- break;
- default:
- ahw->supported_type = PORT_OTHER;
- }
+ switch (QLC_83XX_SFP_MODULE_TYPE(config)) {
+ case QLC_83XX_MODULE_FIBRE_10GBASE_LRM:
+ case QLC_83XX_MODULE_FIBRE_10GBASE_LR:
+ case QLC_83XX_MODULE_FIBRE_10GBASE_SR:
+ ahw->supported_type = PORT_FIBRE;
+ ahw->port_type = QLCNIC_XGBE;
+ break;
+ case QLC_83XX_MODULE_FIBRE_1000BASE_SX:
+ case QLC_83XX_MODULE_FIBRE_1000BASE_LX:
+ case QLC_83XX_MODULE_FIBRE_1000BASE_CX:
+ ahw->supported_type = PORT_FIBRE;
+ ahw->port_type = QLCNIC_GBE;
+ break;
+ case QLC_83XX_MODULE_TP_1000BASE_T:
+ ahw->supported_type = PORT_TP;
+ ahw->port_type = QLCNIC_GBE;
+ break;
+ case QLC_83XX_MODULE_DA_10GE_PASSIVE_CP:
+ case QLC_83XX_MODULE_DA_10GE_ACTIVE_CP:
+ case QLC_83XX_MODULE_DA_10GE_LEGACY_CP:
+ case QLC_83XX_MODULE_DA_1GE_PASSIVE_CP:
+ ahw->supported_type = PORT_DA;
+ ahw->port_type = QLCNIC_XGBE;
+ break;
+ default:
+ ahw->supported_type = PORT_OTHER;
+ ahw->port_type = QLCNIC_XGBE;
}
if (config & 1)
err = 1;
@@ -3204,9 +3252,9 @@ out:
int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
struct ethtool_cmd *ecmd)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 config = 0;
int status = 0;
- struct qlcnic_hardware_context *ahw = adapter->ahw;
if (!test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
/* Get port configuration info */
@@ -3229,20 +3277,41 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
ecmd->autoneg = AUTONEG_DISABLE;
}
- if (ahw->port_type == QLCNIC_XGBE) {
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->advertising = ADVERTISED_10000baseT_Full;
+ ecmd->supported = (SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_Autoneg);
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if (ahw->port_config & QLC_83XX_10_CAPABLE)
+ ecmd->advertising |= SUPPORTED_10baseT_Full;
+ if (ahw->port_config & QLC_83XX_100_CAPABLE)
+ ecmd->advertising |= SUPPORTED_100baseT_Full;
+ if (ahw->port_config & QLC_83XX_1G_CAPABLE)
+ ecmd->advertising |= SUPPORTED_1000baseT_Full;
+ if (ahw->port_config & QLC_83XX_10G_CAPABLE)
+ ecmd->advertising |= SUPPORTED_10000baseT_Full;
+ if (ahw->port_config & QLC_83XX_AUTONEG_ENABLE)
+ ecmd->advertising |= ADVERTISED_Autoneg;
} else {
- ecmd->supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full);
- ecmd->advertising = (ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full);
+ switch (ahw->link_speed) {
+ case SPEED_10:
+ ecmd->advertising = SUPPORTED_10baseT_Full;
+ break;
+ case SPEED_100:
+ ecmd->advertising = SUPPORTED_100baseT_Full;
+ break;
+ case SPEED_1000:
+ ecmd->advertising = SUPPORTED_1000baseT_Full;
+ break;
+ case SPEED_10000:
+ ecmd->advertising = SUPPORTED_10000baseT_Full;
+ break;
+ default:
+ break;
+ }
+
}
switch (ahw->supported_type) {
@@ -3258,6 +3327,12 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
ecmd->port = PORT_TP;
ecmd->transceiver = XCVR_INTERNAL;
break;
+ case PORT_DA:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_DA;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
default:
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
@@ -3272,35 +3347,60 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
int qlcnic_83xx_set_settings(struct qlcnic_adapter *adapter,
struct ethtool_cmd *ecmd)
{
- int status = 0;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 config = adapter->ahw->port_config;
+ int status = 0;
- if (ecmd->autoneg)
- adapter->ahw->port_config |= BIT_15;
-
- switch (ethtool_cmd_speed(ecmd)) {
- case SPEED_10:
- adapter->ahw->port_config |= BIT_8;
- break;
- case SPEED_100:
- adapter->ahw->port_config |= BIT_9;
- break;
- case SPEED_1000:
- adapter->ahw->port_config |= BIT_10;
- break;
- case SPEED_10000:
- adapter->ahw->port_config |= BIT_11;
- break;
- default:
- return -EINVAL;
+ /* 83xx devices do not support Half duplex */
+ if (ecmd->duplex == DUPLEX_HALF) {
+ netdev_info(adapter->netdev,
+ "Half duplex mode not supported\n");
+ return -EINVAL;
}
+ if (ecmd->autoneg) {
+ ahw->port_config |= QLC_83XX_AUTONEG_ENABLE;
+ ahw->port_config |= (QLC_83XX_100_CAPABLE |
+ QLC_83XX_1G_CAPABLE |
+ QLC_83XX_10G_CAPABLE);
+ } else { /* force speed */
+ ahw->port_config &= ~QLC_83XX_AUTONEG_ENABLE;
+ switch (ethtool_cmd_speed(ecmd)) {
+ case SPEED_10:
+ ahw->port_config &= ~(QLC_83XX_100_CAPABLE |
+ QLC_83XX_1G_CAPABLE |
+ QLC_83XX_10G_CAPABLE);
+ ahw->port_config |= QLC_83XX_10_CAPABLE;
+ break;
+ case SPEED_100:
+ ahw->port_config &= ~(QLC_83XX_10_CAPABLE |
+ QLC_83XX_1G_CAPABLE |
+ QLC_83XX_10G_CAPABLE);
+ ahw->port_config |= QLC_83XX_100_CAPABLE;
+ break;
+ case SPEED_1000:
+ ahw->port_config &= ~(QLC_83XX_10_CAPABLE |
+ QLC_83XX_100_CAPABLE |
+ QLC_83XX_10G_CAPABLE);
+ ahw->port_config |= QLC_83XX_1G_CAPABLE;
+ break;
+ case SPEED_10000:
+ ahw->port_config &= ~(QLC_83XX_10_CAPABLE |
+ QLC_83XX_100_CAPABLE |
+ QLC_83XX_1G_CAPABLE);
+ ahw->port_config |= QLC_83XX_10G_CAPABLE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
status = qlcnic_83xx_set_port_config(adapter);
if (status) {
- dev_info(&adapter->pdev->dev,
- "Failed to Set Link Speed and autoneg.\n");
- adapter->ahw->port_config = config;
+ netdev_info(adapter->netdev,
+ "Failed to Set Link Speed and autoneg.\n");
+ ahw->port_config = config;
}
+
return status;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 2bf101a47d02..f3346a3779d3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -83,6 +83,7 @@
/* Firmware image definitions */
#define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000
#define QLC_83XX_FW_FILE_NAME "83xx_fw.bin"
+#define QLC_83XX_POST_FW_FILE_NAME "83xx_post_fw.bin"
#define QLC_84XX_FW_FILE_NAME "84xx_fw.bin"
#define QLC_83XX_BOOT_FROM_FLASH 0
#define QLC_83XX_BOOT_FROM_FILE 0x12345678
@@ -360,7 +361,6 @@ enum qlcnic_83xx_states {
#define QLC_83XX_SFP_MODULE_TYPE(data) (((data) >> 4) & 0x1F)
#define QLC_83XX_SFP_CU_LENGTH(data) (LSB((data) >> 16))
#define QLC_83XX_SFP_TX_FAULT(data) ((data) & BIT_10)
-#define QLC_83XX_SFP_10G_CAPABLE(data) ((data) & BIT_11)
#define QLC_83XX_LINK_STATS(data) ((data) & BIT_0)
#define QLC_83XX_CURRENT_LINK_SPEED(data) (((data) >> 3) & 7)
#define QLC_83XX_LINK_PAUSE(data) (((data) >> 6) & 3)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 86783e1afcf7..9a2cfe4efac6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2075,6 +2075,121 @@ static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev)
dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
}
+/* POST FW related definations*/
+#define QLC_83XX_POST_SIGNATURE_REG 0x41602014
+#define QLC_83XX_POST_MODE_REG 0x41602018
+#define QLC_83XX_POST_FAST_MODE 0
+#define QLC_83XX_POST_MEDIUM_MODE 1
+#define QLC_83XX_POST_SLOW_MODE 2
+
+/* POST Timeout values in milliseconds */
+#define QLC_83XX_POST_FAST_MODE_TIMEOUT 690
+#define QLC_83XX_POST_MED_MODE_TIMEOUT 2930
+#define QLC_83XX_POST_SLOW_MODE_TIMEOUT 7500
+
+/* POST result values */
+#define QLC_83XX_POST_PASS 0xfffffff0
+#define QLC_83XX_POST_ASIC_STRESS_TEST_FAIL 0xffffffff
+#define QLC_83XX_POST_DDR_TEST_FAIL 0xfffffffe
+#define QLC_83XX_POST_ASIC_MEMORY_TEST_FAIL 0xfffffffc
+#define QLC_83XX_POST_FLASH_TEST_FAIL 0xfffffff8
+
+static int qlcnic_83xx_run_post(struct qlcnic_adapter *adapter)
+{
+ struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
+ struct device *dev = &adapter->pdev->dev;
+ int timeout, count, ret = 0;
+ u32 signature;
+
+ /* Set timeout values with extra 2 seconds of buffer */
+ switch (adapter->ahw->post_mode) {
+ case QLC_83XX_POST_FAST_MODE:
+ timeout = QLC_83XX_POST_FAST_MODE_TIMEOUT + 2000;
+ break;
+ case QLC_83XX_POST_MEDIUM_MODE:
+ timeout = QLC_83XX_POST_MED_MODE_TIMEOUT + 2000;
+ break;
+ case QLC_83XX_POST_SLOW_MODE:
+ timeout = QLC_83XX_POST_SLOW_MODE_TIMEOUT + 2000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ strncpy(fw_info->fw_file_name, QLC_83XX_POST_FW_FILE_NAME,
+ QLC_FW_FILE_NAME_LEN);
+
+ ret = request_firmware(&fw_info->fw, fw_info->fw_file_name, dev);
+ if (ret) {
+ dev_err(dev, "POST firmware can not be loaded, skipping POST\n");
+ return 0;
+ }
+
+ ret = qlcnic_83xx_copy_fw_file(adapter);
+ if (ret)
+ return ret;
+
+ /* clear QLC_83XX_POST_SIGNATURE_REG register */
+ qlcnic_ind_wr(adapter, QLC_83XX_POST_SIGNATURE_REG, 0);
+
+ /* Set POST mode */
+ qlcnic_ind_wr(adapter, QLC_83XX_POST_MODE_REG,
+ adapter->ahw->post_mode);
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+ QLC_83XX_BOOT_FROM_FILE);
+
+ qlcnic_83xx_start_hw(adapter);
+
+ count = 0;
+ do {
+ msleep(100);
+ count += 100;
+
+ signature = qlcnic_ind_rd(adapter, QLC_83XX_POST_SIGNATURE_REG);
+ if (signature == QLC_83XX_POST_PASS)
+ break;
+ } while (timeout > count);
+
+ if (timeout <= count) {
+ dev_err(dev, "POST timed out, signature = 0x%08x\n", signature);
+ return -EIO;
+ }
+
+ switch (signature) {
+ case QLC_83XX_POST_PASS:
+ dev_info(dev, "POST passed, Signature = 0x%08x\n", signature);
+ break;
+ case QLC_83XX_POST_ASIC_STRESS_TEST_FAIL:
+ dev_err(dev, "POST failed, Test case : ASIC STRESS TEST, Signature = 0x%08x\n",
+ signature);
+ ret = -EIO;
+ break;
+ case QLC_83XX_POST_DDR_TEST_FAIL:
+ dev_err(dev, "POST failed, Test case : DDT TEST, Signature = 0x%08x\n",
+ signature);
+ ret = -EIO;
+ break;
+ case QLC_83XX_POST_ASIC_MEMORY_TEST_FAIL:
+ dev_err(dev, "POST failed, Test case : ASIC MEMORY TEST, Signature = 0x%08x\n",
+ signature);
+ ret = -EIO;
+ break;
+ case QLC_83XX_POST_FLASH_TEST_FAIL:
+ dev_err(dev, "POST failed, Test case : FLASH TEST, Signature = 0x%08x\n",
+ signature);
+ ret = -EIO;
+ break;
+ default:
+ dev_err(dev, "POST failed, Test case : INVALID, Signature = 0x%08x\n",
+ signature);
+ ret = -EIO;
+ break;
+ }
+
+ return ret;
+}
+
static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter)
{
struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
@@ -2119,8 +2234,27 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
if (qlcnic_83xx_copy_bootloader(adapter))
return err;
+
+ /* Check if POST needs to be run */
+ if (adapter->ahw->run_post) {
+ err = qlcnic_83xx_run_post(adapter);
+ if (err)
+ return err;
+
+ /* No need to run POST in next reset sequence */
+ adapter->ahw->run_post = false;
+
+ /* Again reset the adapter to load regular firmware */
+ qlcnic_83xx_stop_hw(adapter);
+ qlcnic_83xx_init_hw(adapter);
+
+ err = qlcnic_83xx_copy_bootloader(adapter);
+ if (err)
+ return err;
+ }
+
/* Boot either flash image or firmware image from host file system */
- if (qlcnic_load_fw_file) {
+ if (qlcnic_load_fw_file == 1) {
if (qlcnic_83xx_load_fw_image_from_host(adapter))
return err;
} else {
@@ -2284,6 +2418,7 @@ static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter)
fw_info = ahw->fw_info;
switch (pdev->device) {
case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_QLE8830:
strncpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME,
QLC_FW_FILE_NAME_LEN);
break;
@@ -2328,6 +2463,25 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
adapter->rx_mac_learn = false;
ahw->msix_supported = !!qlcnic_use_msi_x;
+ /* Check if POST needs to be run */
+ switch (qlcnic_load_fw_file) {
+ case 2:
+ ahw->post_mode = QLC_83XX_POST_FAST_MODE;
+ ahw->run_post = true;
+ break;
+ case 3:
+ ahw->post_mode = QLC_83XX_POST_MEDIUM_MODE;
+ ahw->run_post = true;
+ break;
+ case 4:
+ ahw->post_mode = QLC_83XX_POST_SLOW_MODE;
+ ahw->run_post = true;
+ break;
+ default:
+ ahw->run_post = false;
+ break;
+ }
+
qlcnic_83xx_init_rings(adapter);
err = qlcnic_83xx_init_mailbox_work(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 851cb4a80d50..8102673cb37f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
}
return -EIO;
}
- msleep(1);
+ usleep_range(1000, 1500);
}
if (id_reg)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index c4262c23ed7c..be41e4c77b65 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -537,7 +537,7 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0);
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0);
- msleep(1);
+ usleep_range(1000, 1500);
QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
@@ -1198,7 +1198,7 @@ qlcnic_load_firmware(struct qlcnic_adapter *adapter)
flashaddr += 8;
}
}
- msleep(1);
+ usleep_range(1000, 1500);
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020);
QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e);
@@ -1295,7 +1295,7 @@ next:
rc = qlcnic_validate_firmware(adapter);
if (rc != 0) {
release_firmware(adapter->fw);
- msleep(1);
+ usleep_range(1000, 1500);
goto next;
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index e45bf09af0c9..18e5de72e9b4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -1753,7 +1753,7 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
if (qlcnic_encap_length(sts_data[1]) &&
skb->ip_summed == CHECKSUM_UNNECESSARY) {
- skb->encapsulation = 1;
+ skb->csum_level = 1;
adapter->stats.encap_rx_csummed++;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index cf08b2de071e..f5e29f7bdae3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled)");
module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644);
int qlcnic_load_fw_file;
-MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file)");
+MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file, 2=POST in fast mode, 3= POST in medium mode, 4=POST in slow mode)");
module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444);
static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -111,6 +111,7 @@ static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
static const struct pci_device_id qlcnic_pci_tbl[] = {
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830),
ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X),
ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X),
@@ -228,6 +229,11 @@ static const struct qlcnic_board_info qlcnic_boards[] = {
PCI_DEVICE_ID_QLOGIC_QLE834X,
0x0, 0x0, "8300 Series 1/10GbE Controller" },
{ PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE8830,
+ 0x0,
+ 0x0,
+ "8830 Series 1/10GbE Controller" },
+ { PCI_VENDOR_ID_QLOGIC,
PCI_DEVICE_ID_QLOGIC_QLE824X,
PCI_VENDOR_ID_QLOGIC,
0x203,
@@ -1131,6 +1137,7 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
*bar = QLCNIC_82XX_BAR0_LENGTH;
break;
case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_QLE8830:
case PCI_DEVICE_ID_QLOGIC_QLE844X:
case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
@@ -2474,6 +2481,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ahw->reg_tbl = (u32 *) qlcnic_reg_tbl;
break;
case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_QLE8830:
case PCI_DEVICE_ID_QLOGIC_QLE844X:
qlcnic_83xx_register_map(ahw);
break;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 3e96f269150d..6c904a6cad2a 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1922,7 +1922,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
sbq_desc->p.skb = NULL;
skb_reserve(skb, NET_IP_ALIGN);
}
- while (length > 0) {
+ do {
lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
size = (length < rx_ring->lbq_buf_size) ? length :
rx_ring->lbq_buf_size;
@@ -1939,7 +1939,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
skb->truesize += size;
length -= size;
i++;
- }
+ } while (length > 0);
ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
&hlen);
__pskb_pull_tail(skb, hlen);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 0921302553c6..1d81238fcb93 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -52,6 +52,10 @@
#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw"
#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw"
#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw"
+#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw"
+#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw"
+#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw"
+#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
#ifdef RTL8169_DEBUG
#define assert(expr) \
@@ -147,6 +151,10 @@ enum mac_version {
RTL_GIGA_MAC_VER_42,
RTL_GIGA_MAC_VER_43,
RTL_GIGA_MAC_VER_44,
+ RTL_GIGA_MAC_VER_45,
+ RTL_GIGA_MAC_VER_46,
+ RTL_GIGA_MAC_VER_47,
+ RTL_GIGA_MAC_VER_48,
RTL_GIGA_MAC_NONE = 0xff,
};
@@ -282,6 +290,18 @@ static const struct {
[RTL_GIGA_MAC_VER_44] =
_R("RTL8411", RTL_TD_1, FIRMWARE_8411_2,
JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_45] =
+ _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_1,
+ JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_46] =
+ _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_2,
+ JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_47] =
+ _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_1,
+ JUMBO_1K, false),
+ [RTL_GIGA_MAC_VER_48] =
+ _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_2,
+ JUMBO_1K, false),
};
#undef _R
@@ -410,6 +430,7 @@ enum rtl8168_8101_registers {
#define EPHYAR_DATA_MASK 0xffff
DLLPR = 0xd0,
#define PFM_EN (1 << 6)
+#define TX_10M_PS_EN (1 << 7)
DBG_REG = 0xd1,
#define FIX_NAK_1 (1 << 4)
#define FIX_NAK_2 (1 << 3)
@@ -429,6 +450,8 @@ enum rtl8168_8101_registers {
#define EFUSEAR_REG_MASK 0x03ff
#define EFUSEAR_REG_SHIFT 8
#define EFUSEAR_DATA_MASK 0xff
+ MISC_1 = 0xf2,
+#define PFM_D3COLD_EN (1 << 6)
};
enum rtl8168_registers {
@@ -447,6 +470,7 @@ enum rtl8168_registers {
#define ERIAR_MASK_SHIFT 12
#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
+#define ERIAR_MASK_0100 (0x4 << ERIAR_MASK_SHIFT)
#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
EPHY_RXER_NUM = 0x7c,
@@ -598,6 +622,9 @@ enum rtl_register_content {
/* DumpCounterCommand */
CounterDump = 0x8,
+
+ /* magic enable v2 */
+ MagicPacket_v2 = (1 << 16), /* Wake up when receives a Magic Packet */
};
enum rtl_desc_bit {
@@ -823,6 +850,10 @@ MODULE_FIRMWARE(FIRMWARE_8106E_1);
MODULE_FIRMWARE(FIRMWARE_8106E_2);
MODULE_FIRMWARE(FIRMWARE_8168G_2);
MODULE_FIRMWARE(FIRMWARE_8168G_3);
+MODULE_FIRMWARE(FIRMWARE_8168H_1);
+MODULE_FIRMWARE(FIRMWARE_8168H_2);
+MODULE_FIRMWARE(FIRMWARE_8107E_1);
+MODULE_FIRMWARE(FIRMWARE_8107E_2);
static void rtl_lock_work(struct rtl8169_private *tp)
{
@@ -1514,8 +1545,17 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
options = RTL_R8(Config3);
if (options & LinkUp)
wolopts |= WAKE_PHY;
- if (options & MagicPacket)
- wolopts |= WAKE_MAGIC;
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_45:
+ case RTL_GIGA_MAC_VER_46:
+ if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2)
+ wolopts |= WAKE_MAGIC;
+ break;
+ default:
+ if (options & MagicPacket)
+ wolopts |= WAKE_MAGIC;
+ break;
+ }
options = RTL_R8(Config5);
if (options & UWF)
@@ -1543,24 +1583,48 @@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
{
void __iomem *ioaddr = tp->mmio_addr;
- unsigned int i;
+ unsigned int i, tmp;
static const struct {
u32 opt;
u16 reg;
u8 mask;
} cfg[] = {
{ WAKE_PHY, Config3, LinkUp },
- { WAKE_MAGIC, Config3, MagicPacket },
{ WAKE_UCAST, Config5, UWF },
{ WAKE_BCAST, Config5, BWF },
{ WAKE_MCAST, Config5, MWF },
- { WAKE_ANY, Config5, LanWake }
+ { WAKE_ANY, Config5, LanWake },
+ { WAKE_MAGIC, Config3, MagicPacket }
};
u8 options;
RTL_W8(Cfg9346, Cfg9346_Unlock);
- for (i = 0; i < ARRAY_SIZE(cfg); i++) {
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_45:
+ case RTL_GIGA_MAC_VER_46:
+ tmp = ARRAY_SIZE(cfg) - 1;
+ if (wolopts & WAKE_MAGIC)
+ rtl_w1w0_eri(tp,
+ 0x0dc,
+ ERIAR_MASK_0100,
+ MagicPacket_v2,
+ 0x0000,
+ ERIAR_EXGMAC);
+ else
+ rtl_w1w0_eri(tp,
+ 0x0dc,
+ ERIAR_MASK_0100,
+ 0x0000,
+ MagicPacket_v2,
+ ERIAR_EXGMAC);
+ break;
+ default:
+ tmp = ARRAY_SIZE(cfg);
+ break;
+ }
+
+ for (i = 0; i < tmp; i++) {
options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
if (wolopts & cfg[i].opt)
options |= cfg[i].mask;
@@ -2045,6 +2109,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
u32 val;
int mac_version;
} mac_info[] = {
+ /* 8168H family. */
+ { 0x7cf00000, 0x54100000, RTL_GIGA_MAC_VER_46 },
+ { 0x7cf00000, 0x54000000, RTL_GIGA_MAC_VER_45 },
+
/* 8168G family. */
{ 0x7cf00000, 0x5c800000, RTL_GIGA_MAC_VER_44 },
{ 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 },
@@ -2140,6 +2208,14 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
tp->mac_version = tp->mii.supports_gmii ?
RTL_GIGA_MAC_VER_42 :
RTL_GIGA_MAC_VER_43;
+ } else if (tp->mac_version == RTL_GIGA_MAC_VER_45) {
+ tp->mac_version = tp->mii.supports_gmii ?
+ RTL_GIGA_MAC_VER_45 :
+ RTL_GIGA_MAC_VER_47;
+ } else if (tp->mac_version == RTL_GIGA_MAC_VER_46) {
+ tp->mac_version = tp->mii.supports_gmii ?
+ RTL_GIGA_MAC_VER_46 :
+ RTL_GIGA_MAC_VER_48;
}
}
@@ -3465,6 +3541,189 @@ static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
}
+static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
+{
+ u16 dout_tapbin;
+ u32 data;
+
+ rtl_apply_firmware(tp);
+
+ /* CHN EST parameters adjust - giga master */
+ rtl_writephy(tp, 0x1f, 0x0a43);
+ rtl_writephy(tp, 0x13, 0x809b);
+ rtl_w1w0_phy(tp, 0x14, 0x8000, 0xf800);
+ rtl_writephy(tp, 0x13, 0x80a2);
+ rtl_w1w0_phy(tp, 0x14, 0x8000, 0xff00);
+ rtl_writephy(tp, 0x13, 0x80a4);
+ rtl_w1w0_phy(tp, 0x14, 0x8500, 0xff00);
+ rtl_writephy(tp, 0x13, 0x809c);
+ rtl_w1w0_phy(tp, 0x14, 0xbd00, 0xff00);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* CHN EST parameters adjust - giga slave */
+ rtl_writephy(tp, 0x1f, 0x0a43);
+ rtl_writephy(tp, 0x13, 0x80ad);
+ rtl_w1w0_phy(tp, 0x14, 0x7000, 0xf800);
+ rtl_writephy(tp, 0x13, 0x80b4);
+ rtl_w1w0_phy(tp, 0x14, 0x5000, 0xff00);
+ rtl_writephy(tp, 0x13, 0x80ac);
+ rtl_w1w0_phy(tp, 0x14, 0x4000, 0xff00);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* CHN EST parameters adjust - fnet */
+ rtl_writephy(tp, 0x1f, 0x0a43);
+ rtl_writephy(tp, 0x13, 0x808e);
+ rtl_w1w0_phy(tp, 0x14, 0x1200, 0xff00);
+ rtl_writephy(tp, 0x13, 0x8090);
+ rtl_w1w0_phy(tp, 0x14, 0xe500, 0xff00);
+ rtl_writephy(tp, 0x13, 0x8092);
+ rtl_w1w0_phy(tp, 0x14, 0x9f00, 0xff00);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* enable R-tune & PGA-retune function */
+ dout_tapbin = 0;
+ rtl_writephy(tp, 0x1f, 0x0a46);
+ data = rtl_readphy(tp, 0x13);
+ data &= 3;
+ data <<= 2;
+ dout_tapbin |= data;
+ data = rtl_readphy(tp, 0x12);
+ data &= 0xc000;
+ data >>= 14;
+ dout_tapbin |= data;
+ dout_tapbin = ~(dout_tapbin^0x08);
+ dout_tapbin <<= 12;
+ dout_tapbin &= 0xf000;
+ rtl_writephy(tp, 0x1f, 0x0a43);
+ rtl_writephy(tp, 0x13, 0x827a);
+ rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000);
+ rtl_writephy(tp, 0x13, 0x827b);
+ rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000);
+ rtl_writephy(tp, 0x13, 0x827c);
+ rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000);
+ rtl_writephy(tp, 0x13, 0x827d);
+ rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000);
+
+ rtl_writephy(tp, 0x1f, 0x0a43);
+ rtl_writephy(tp, 0x13, 0x0811);
+ rtl_w1w0_phy(tp, 0x14, 0x0800, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0a42);
+ rtl_w1w0_phy(tp, 0x16, 0x0002, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* enable GPHY 10M */
+ rtl_writephy(tp, 0x1f, 0x0a44);
+ rtl_w1w0_phy(tp, 0x11, 0x0800, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* SAR ADC performance */
+ rtl_writephy(tp, 0x1f, 0x0bca);
+ rtl_w1w0_phy(tp, 0x17, 0x4000, 0x3000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ rtl_writephy(tp, 0x1f, 0x0a43);
+ rtl_writephy(tp, 0x13, 0x803f);
+ rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
+ rtl_writephy(tp, 0x13, 0x8047);
+ rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
+ rtl_writephy(tp, 0x13, 0x804f);
+ rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
+ rtl_writephy(tp, 0x13, 0x8057);
+ rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
+ rtl_writephy(tp, 0x13, 0x805f);
+ rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
+ rtl_writephy(tp, 0x13, 0x8067);
+ rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
+ rtl_writephy(tp, 0x13, 0x806f);
+ rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* disable phy pfm mode */
+ rtl_writephy(tp, 0x1f, 0x0a44);
+ rtl_w1w0_phy(tp, 0x14, 0x0000, 0x0080);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* Check ALDPS bit, disable it if enabled */
+ rtl_writephy(tp, 0x1f, 0x0a43);
+ if (rtl_readphy(tp, 0x10) & 0x0004)
+ rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004);
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
+
+static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
+{
+ u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0;
+ u16 rlen;
+ u32 data;
+
+ rtl_apply_firmware(tp);
+
+ /* CHIN EST parameter update */
+ rtl_writephy(tp, 0x1f, 0x0a43);
+ rtl_writephy(tp, 0x13, 0x808a);
+ rtl_w1w0_phy(tp, 0x14, 0x000a, 0x003f);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* enable R-tune & PGA-retune function */
+ rtl_writephy(tp, 0x1f, 0x0a43);
+ rtl_writephy(tp, 0x13, 0x0811);
+ rtl_w1w0_phy(tp, 0x14, 0x0800, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0a42);
+ rtl_w1w0_phy(tp, 0x16, 0x0002, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* enable GPHY 10M */
+ rtl_writephy(tp, 0x1f, 0x0a44);
+ rtl_w1w0_phy(tp, 0x11, 0x0800, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ r8168_mac_ocp_write(tp, 0xdd02, 0x807d);
+ data = r8168_mac_ocp_read(tp, 0xdd02);
+ ioffset_p3 = ((data & 0x80)>>7);
+ ioffset_p3 <<= 3;
+
+ data = r8168_mac_ocp_read(tp, 0xdd00);
+ ioffset_p3 |= ((data & (0xe000))>>13);
+ ioffset_p2 = ((data & (0x1e00))>>9);
+ ioffset_p1 = ((data & (0x01e0))>>5);
+ ioffset_p0 = ((data & 0x0010)>>4);
+ ioffset_p0 <<= 3;
+ ioffset_p0 |= (data & (0x07));
+ data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0);
+
+ if ((ioffset_p3 != 0x0F) || (ioffset_p2 != 0x0F) ||
+ (ioffset_p1 != 0x0F) || (ioffset_p0 == 0x0F)) {
+ rtl_writephy(tp, 0x1f, 0x0bcf);
+ rtl_writephy(tp, 0x16, data);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ }
+
+ /* Modify rlen (TX LPF corner frequency) level */
+ rtl_writephy(tp, 0x1f, 0x0bcd);
+ data = rtl_readphy(tp, 0x16);
+ data &= 0x000f;
+ rlen = 0;
+ if (data > 3)
+ rlen = data - 3;
+ data = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12);
+ rtl_writephy(tp, 0x17, data);
+ rtl_writephy(tp, 0x1f, 0x0bcd);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* disable phy pfm mode */
+ rtl_writephy(tp, 0x1f, 0x0a44);
+ rtl_w1w0_phy(tp, 0x14, 0x0000, 0x0080);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* Check ALDPS bit, disable it if enabled */
+ rtl_writephy(tp, 0x1f, 0x0a43);
+ if (rtl_readphy(tp, 0x10) & 0x0004)
+ rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004);
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
+
static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -3655,6 +3914,14 @@ static void rtl_hw_phy_config(struct net_device *dev)
case RTL_GIGA_MAC_VER_44:
rtl8168g_2_hw_phy_config(tp);
break;
+ case RTL_GIGA_MAC_VER_45:
+ case RTL_GIGA_MAC_VER_47:
+ rtl8168h_1_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_46:
+ case RTL_GIGA_MAC_VER_48:
+ rtl8168h_2_hw_phy_config(tp);
+ break;
case RTL_GIGA_MAC_VER_41:
default:
@@ -3866,6 +4133,10 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
case RTL_GIGA_MAC_VER_44:
+ case RTL_GIGA_MAC_VER_45:
+ case RTL_GIGA_MAC_VER_46:
+ case RTL_GIGA_MAC_VER_47:
+ case RTL_GIGA_MAC_VER_48:
ops->write = r8168g_mdio_write;
ops->read = r8168g_mdio_read;
break;
@@ -3920,6 +4191,10 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
case RTL_GIGA_MAC_VER_44:
+ case RTL_GIGA_MAC_VER_45:
+ case RTL_GIGA_MAC_VER_46:
+ case RTL_GIGA_MAC_VER_47:
+ case RTL_GIGA_MAC_VER_48:
RTL_W32(RxConfig, RTL_R32(RxConfig) |
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
break;
@@ -3988,6 +4263,10 @@ static void r810x_pll_power_up(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_13:
case RTL_GIGA_MAC_VER_16:
break;
+ case RTL_GIGA_MAC_VER_47:
+ case RTL_GIGA_MAC_VER_48:
+ RTL_W8(PMCH, RTL_R8(PMCH) | 0xC0);
+ break;
default:
RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
break;
@@ -4088,6 +4367,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_31:
case RTL_GIGA_MAC_VER_32:
case RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_45:
+ case RTL_GIGA_MAC_VER_46:
RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
break;
case RTL_GIGA_MAC_VER_40:
@@ -4112,6 +4393,10 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_33:
RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
break;
+ case RTL_GIGA_MAC_VER_45:
+ case RTL_GIGA_MAC_VER_46:
+ RTL_W8(PMCH, RTL_R8(PMCH) | 0xC0);
+ break;
case RTL_GIGA_MAC_VER_40:
case RTL_GIGA_MAC_VER_41:
rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000,
@@ -4154,6 +4439,8 @@ static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_37:
case RTL_GIGA_MAC_VER_39:
case RTL_GIGA_MAC_VER_43:
+ case RTL_GIGA_MAC_VER_47:
+ case RTL_GIGA_MAC_VER_48:
ops->down = r810x_pll_power_down;
ops->up = r810x_pll_power_up;
break;
@@ -4183,6 +4470,8 @@ static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_44:
+ case RTL_GIGA_MAC_VER_45:
+ case RTL_GIGA_MAC_VER_46:
ops->down = r8168_pll_power_down;
ops->up = r8168_pll_power_up;
break;
@@ -4233,6 +4522,10 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
case RTL_GIGA_MAC_VER_44:
+ case RTL_GIGA_MAC_VER_45:
+ case RTL_GIGA_MAC_VER_46:
+ case RTL_GIGA_MAC_VER_47:
+ case RTL_GIGA_MAC_VER_48:
RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
default:
@@ -4394,6 +4687,10 @@ static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
case RTL_GIGA_MAC_VER_44:
+ case RTL_GIGA_MAC_VER_45:
+ case RTL_GIGA_MAC_VER_46:
+ case RTL_GIGA_MAC_VER_47:
+ case RTL_GIGA_MAC_VER_48:
default:
ops->disable = NULL;
ops->enable = NULL;
@@ -4496,15 +4793,19 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
tp->mac_version == RTL_GIGA_MAC_VER_31) {
rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
} else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
- tp->mac_version == RTL_GIGA_MAC_VER_35 ||
- tp->mac_version == RTL_GIGA_MAC_VER_36 ||
- tp->mac_version == RTL_GIGA_MAC_VER_37 ||
- tp->mac_version == RTL_GIGA_MAC_VER_40 ||
- tp->mac_version == RTL_GIGA_MAC_VER_41 ||
- tp->mac_version == RTL_GIGA_MAC_VER_42 ||
- tp->mac_version == RTL_GIGA_MAC_VER_43 ||
- tp->mac_version == RTL_GIGA_MAC_VER_44 ||
- tp->mac_version == RTL_GIGA_MAC_VER_38) {
+ tp->mac_version == RTL_GIGA_MAC_VER_35 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_36 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_37 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_38 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_40 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_41 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_42 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_43 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_44 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_45 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_46 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_47 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_48) {
RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
} else {
@@ -5331,6 +5632,105 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2));
}
+static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
+ u16 rg_saw_cnt;
+ u32 data;
+ static const struct ephy_info e_info_8168h_1[] = {
+ { 0x1e, 0x0800, 0x0001 },
+ { 0x1d, 0x0000, 0x0800 },
+ { 0x05, 0xffff, 0x2089 },
+ { 0x06, 0xffff, 0x5881 },
+ { 0x04, 0xffff, 0x154a },
+ { 0x01, 0xffff, 0x068b }
+ };
+
+ /* disable aspm and clock request before access ephy */
+ RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
+ RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
+ rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
+
+ RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
+
+ rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+
+ rtl_csi_access_enable_1(tp);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_1111, 0x0010, 0x00, ERIAR_EXGMAC);
+
+ rtl_w1w0_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f00, 0x00, ERIAR_EXGMAC);
+
+ rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC);
+
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+ RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
+ RTL_W8(MaxTxPacketSize, EarlySize);
+
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+
+ /* Adjust EEE LED frequency */
+ RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
+
+ RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
+ RTL_W8(DLLPR, RTL_R8(MISC_1) & ~PFM_D3COLD_EN);
+
+ RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN);
+
+ rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
+
+ rtl_pcie_state_l2l3_enable(tp, false);
+
+ rtl_writephy(tp, 0x1f, 0x0c42);
+ rg_saw_cnt = rtl_readphy(tp, 0x13);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ if (rg_saw_cnt > 0) {
+ u16 sw_cnt_1ms_ini;
+
+ sw_cnt_1ms_ini = 16000000/rg_saw_cnt;
+ sw_cnt_1ms_ini &= 0x0fff;
+ data = r8168_mac_ocp_read(tp, 0xd412);
+ data &= 0x0fff;
+ data |= sw_cnt_1ms_ini;
+ r8168_mac_ocp_write(tp, 0xd412, data);
+ }
+
+ data = r8168_mac_ocp_read(tp, 0xe056);
+ data &= 0xf0;
+ data |= 0x07;
+ r8168_mac_ocp_write(tp, 0xe056, data);
+
+ data = r8168_mac_ocp_read(tp, 0xe052);
+ data &= 0x8008;
+ data |= 0x6000;
+ r8168_mac_ocp_write(tp, 0xe052, data);
+
+ data = r8168_mac_ocp_read(tp, 0xe0d6);
+ data &= 0x01ff;
+ data |= 0x017f;
+ r8168_mac_ocp_write(tp, 0xe0d6, data);
+
+ data = r8168_mac_ocp_read(tp, 0xd420);
+ data &= 0x0fff;
+ data |= 0x047f;
+ r8168_mac_ocp_write(tp, 0xd420, data);
+
+ r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
+ r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
+ r8168_mac_ocp_write(tp, 0xc094, 0x0000);
+ r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
+}
+
static void rtl_hw_start_8168(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -5441,6 +5841,11 @@ static void rtl_hw_start_8168(struct net_device *dev)
rtl_hw_start_8411_2(tp);
break;
+ case RTL_GIGA_MAC_VER_45:
+ case RTL_GIGA_MAC_VER_46:
+ rtl_hw_start_8168h_1(tp);
+ break;
+
default:
printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
dev->name, tp->mac_version);
@@ -5656,6 +6061,10 @@ static void rtl_hw_start_8101(struct net_device *dev)
case RTL_GIGA_MAC_VER_43:
rtl_hw_start_8168g_2(tp);
break;
+ case RTL_GIGA_MAC_VER_47:
+ case RTL_GIGA_MAC_VER_48:
+ rtl_hw_start_8168h_1(tp);
+ break;
}
RTL_W8(Cfg9346, Cfg9346_Lock);
@@ -5896,7 +6305,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
{
struct skb_shared_info *info = skb_shinfo(skb);
unsigned int cur_frag, entry;
- struct TxDesc * uninitialized_var(txd);
+ struct TxDesc *uninitialized_var(txd);
struct device *d = &tp->pci_dev->dev;
entry = tp->cur_tx;
@@ -7111,6 +7520,10 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
case RTL_GIGA_MAC_VER_44:
+ case RTL_GIGA_MAC_VER_45:
+ case RTL_GIGA_MAC_VER_46:
+ case RTL_GIGA_MAC_VER_47:
+ case RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
@@ -7248,8 +7661,19 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
RTL_W8(Cfg9346, Cfg9346_Unlock);
RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus));
- if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
- tp->features |= RTL_FEATURE_WOL;
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_45:
+ case RTL_GIGA_MAC_VER_46:
+ if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2)
+ tp->features |= RTL_FEATURE_WOL;
+ if ((RTL_R8(Config3) & LinkUp) != 0)
+ tp->features |= RTL_FEATURE_WOL;
+ break;
+ default:
+ if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
+ tp->features |= RTL_FEATURE_WOL;
+ break;
+ }
if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
tp->features |= RTL_FEATURE_WOL;
tp->features |= rtl_try_msi(tp, cfg);
@@ -7276,6 +7700,18 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
u64_stats_init(&tp->tx_stats.syncp);
/* Get MAC address */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_45 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_46 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_47 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_48) {
+ u16 mac_addr[3];
+
+ *(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xE0, ERIAR_EXGMAC);
+ *(u16 *)&mac_addr[2] = rtl_eri_read(tp, 0xE4, ERIAR_EXGMAC);
+
+ if (is_valid_ether_addr((u8 *)mac_addr))
+ rtl_rar_set(tp, (u8 *)mac_addr);
+ }
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = RTL_R8(MAC0 + i);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 65c220f8661d..320609842211 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -78,7 +78,7 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
if (buffer->flags & EFX_TX_BUF_SKB) {
(*pkts_compl)++;
(*bytes_compl) += buffer->skb->len;
- dev_kfree_skb_any((struct sk_buff *) buffer->skb);
+ dev_consume_skb_any((struct sk_buff *)buffer->skb);
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
"TX queue %d transmission id %x complete\n",
tx_queue->queue, tx_queue->read_count);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index ec632e666c56..3aad413e74b4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -17,6 +17,7 @@
#include <linux/mfd/syscon.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/regmap.h>
@@ -30,6 +31,12 @@
#define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2
#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
+#define EMAC_SPLITTER_CTRL_REG 0x0
+#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3
+#define EMAC_SPLITTER_CTRL_SPEED_10 0x2
+#define EMAC_SPLITTER_CTRL_SPEED_100 0x3
+#define EMAC_SPLITTER_CTRL_SPEED_1000 0x0
+
struct socfpga_dwmac {
int interface;
u32 reg_offset;
@@ -37,14 +44,46 @@ struct socfpga_dwmac {
struct device *dev;
struct regmap *sys_mgr_base_addr;
struct reset_control *stmmac_rst;
+ void __iomem *splitter_base;
};
+static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
+ void __iomem *splitter_base = dwmac->splitter_base;
+ u32 val;
+
+ if (!splitter_base)
+ return;
+
+ val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG);
+ val &= ~EMAC_SPLITTER_CTRL_SPEED_MASK;
+
+ switch (speed) {
+ case 1000:
+ val |= EMAC_SPLITTER_CTRL_SPEED_1000;
+ break;
+ case 100:
+ val |= EMAC_SPLITTER_CTRL_SPEED_100;
+ break;
+ case 10:
+ val |= EMAC_SPLITTER_CTRL_SPEED_10;
+ break;
+ default:
+ return;
+ }
+
+ writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
+}
+
static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev)
{
struct device_node *np = dev->of_node;
struct regmap *sys_mgr_base_addr;
u32 reg_offset, reg_shift;
int ret;
+ struct device_node *np_splitter;
+ struct resource res_splitter;
dwmac->stmmac_rst = devm_reset_control_get(dev,
STMMAC_RESOURCE_NAME);
@@ -73,6 +112,20 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
return -EINVAL;
}
+ np_splitter = of_parse_phandle(np, "altr,emac-splitter", 0);
+ if (np_splitter) {
+ if (of_address_to_resource(np_splitter, 0, &res_splitter)) {
+ dev_info(dev, "Missing emac splitter address\n");
+ return -EINVAL;
+ }
+
+ dwmac->splitter_base = devm_ioremap_resource(dev, &res_splitter);
+ if (IS_ERR(dwmac->splitter_base)) {
+ dev_info(dev, "Failed to mapping emac splitter\n");
+ return PTR_ERR(dwmac->splitter_base);
+ }
+ }
+
dwmac->reg_offset = reg_offset;
dwmac->reg_shift = reg_shift;
dwmac->sys_mgr_base_addr = sys_mgr_base_addr;
@@ -91,6 +144,7 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
switch (phymode) {
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII;
break;
case PHY_INTERFACE_MODE_MII:
@@ -102,6 +156,13 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
return -EINVAL;
}
+ /* Overwrite val to GMII if splitter core is enabled. The phymode here
+ * is the actual phy mode on phy hardware, but phy interface from
+ * EMAC core is GMII.
+ */
+ if (dwmac->splitter_base)
+ val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
+
regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
ctrl |= val << reg_shift;
@@ -196,4 +257,5 @@ const struct stmmac_of_data socfpga_gmac_data = {
.setup = socfpga_dwmac_probe,
.init = socfpga_dwmac_init,
.exit = socfpga_dwmac_exit,
+ .fix_mac_speed = socfpga_dwmac_fix_mac_speed,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index cf4f38db1c0a..3a08a1f78c73 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -261,11 +261,11 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed);
/* Get and convert ADV/LP_ADV from the HW AN registers */
- if (priv->hw->mac->get_adv)
- priv->hw->mac->get_adv(priv->hw, &adv);
- else
+ if (!priv->hw->mac->get_adv)
return -EOPNOTSUPP; /* should never happen indeed */
+ priv->hw->mac->get_adv(priv->hw, &adv);
+
/* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
if (adv.pause & STMMAC_PCS_PAUSE)
@@ -340,19 +340,17 @@ static int stmmac_ethtool_setsettings(struct net_device *dev,
if (cmd->autoneg != AUTONEG_ENABLE)
return -EINVAL;
- if (cmd->autoneg == AUTONEG_ENABLE) {
- mask &= (ADVERTISED_1000baseT_Half |
+ mask &= (ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full |
ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full);
- spin_lock(&priv->lock);
- if (priv->hw->mac->ctrl_ane)
- priv->hw->mac->ctrl_ane(priv->hw, 1);
- spin_unlock(&priv->lock);
- }
+ spin_lock(&priv->lock);
+ if (priv->hw->mac->ctrl_ane)
+ priv->hw->mac->ctrl_ane(priv->hw, 1);
+ spin_unlock(&priv->lock);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6e6ee226de04..9dbb02d9d9c2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -834,7 +834,7 @@ static int stmmac_init_phy(struct net_device *dev)
/* Stop Advertising 1000BASE Capability if interface is not GMII */
if ((interface == PHY_INTERFACE_MODE_MII) ||
(interface == PHY_INTERFACE_MODE_RMII) ||
- (max_speed < 1000 && max_speed > 0))
+ (max_speed < 1000 && max_speed > 0))
phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index a5b1e1b776fe..b735fa22ac95 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -253,7 +253,7 @@ int stmmac_mdio_register(struct net_device *ndev)
}
/*
- * If we're going to bind the MAC to this PHY bus,
+ * If we're going to bind the MAC to this PHY bus,
* and no PHY number was provided to the MAC,
* use the one probed here.
*/
@@ -282,7 +282,7 @@ int stmmac_mdio_register(struct net_device *ndev)
}
if (!found) {
- pr_warning("%s: No PHY found\n", ndev->name);
+ pr_warn("%s: No PHY found\n", ndev->name);
mdiobus_unregister(new_bus);
mdiobus_free(new_bus);
return -ENODEV;
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 37f87ff28f03..02d370e58110 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -4962,7 +4962,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_cmd |= PCI_COMMAND_PARITY;
pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
if (pci_try_set_mwi(pdev))
- pr_warning("Could not enable MWI for %s\n", pci_name(pdev));
+ pr_warn("Could not enable MWI for %s\n", pci_name(pdev));
cas_program_bridge(pdev);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 8216be46540f..904fd1ab5f6e 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -8717,8 +8717,8 @@ static void niu_divide_channels(struct niu_parent *parent,
parent->txchan_per_port[i] = 1;
}
if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
- pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
- parent->index, tot_rx, tot_tx);
+ pr_warn("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
+ parent->index, tot_rx, tot_tx);
}
}
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index f7415b6bf141..fef5dec2cffe 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -115,7 +115,7 @@ static const struct pci_device_id gem_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, gem_pci_tbl);
-static u16 __phy_read(struct gem *gp, int phy_addr, int reg)
+static u16 __sungem_phy_read(struct gem *gp, int phy_addr, int reg)
{
u32 cmd;
int limit = 10000;
@@ -141,18 +141,18 @@ static u16 __phy_read(struct gem *gp, int phy_addr, int reg)
return cmd & MIF_FRAME_DATA;
}
-static inline int _phy_read(struct net_device *dev, int mii_id, int reg)
+static inline int _sungem_phy_read(struct net_device *dev, int mii_id, int reg)
{
struct gem *gp = netdev_priv(dev);
- return __phy_read(gp, mii_id, reg);
+ return __sungem_phy_read(gp, mii_id, reg);
}
-static inline u16 phy_read(struct gem *gp, int reg)
+static inline u16 sungem_phy_read(struct gem *gp, int reg)
{
- return __phy_read(gp, gp->mii_phy_addr, reg);
+ return __sungem_phy_read(gp, gp->mii_phy_addr, reg);
}
-static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
+static void __sungem_phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
{
u32 cmd;
int limit = 10000;
@@ -174,15 +174,15 @@ static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
}
}
-static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val)
+static inline void _sungem_phy_write(struct net_device *dev, int mii_id, int reg, int val)
{
struct gem *gp = netdev_priv(dev);
- __phy_write(gp, mii_id, reg, val & 0xffff);
+ __sungem_phy_write(gp, mii_id, reg, val & 0xffff);
}
-static inline void phy_write(struct gem *gp, int reg, u16 val)
+static inline void sungem_phy_write(struct gem *gp, int reg, u16 val)
{
- __phy_write(gp, gp->mii_phy_addr, reg, val);
+ __sungem_phy_write(gp, gp->mii_phy_addr, reg, val);
}
static inline void gem_enable_ints(struct gem *gp)
@@ -1687,9 +1687,9 @@ static void gem_init_phy(struct gem *gp)
/* Some PHYs used by apple have problem getting back to us,
* we do an additional reset here
*/
- phy_write(gp, MII_BMCR, BMCR_RESET);
+ sungem_phy_write(gp, MII_BMCR, BMCR_RESET);
msleep(20);
- if (phy_read(gp, MII_BMCR) != 0xffff)
+ if (sungem_phy_read(gp, MII_BMCR) != 0xffff)
break;
if (i == 2)
netdev_warn(gp->dev, "GMAC PHY not responding !\n");
@@ -2012,7 +2012,7 @@ static int gem_check_invariants(struct gem *gp)
for (i = 0; i < 32; i++) {
gp->mii_phy_addr = i;
- if (phy_read(gp, MII_BMCR) != 0xffff)
+ if (sungem_phy_read(gp, MII_BMCR) != 0xffff)
break;
}
if (i == 32) {
@@ -2696,13 +2696,13 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Fallthrough... */
case SIOCGMIIREG: /* Read MII PHY register. */
- data->val_out = __phy_read(gp, data->phy_id & 0x1f,
+ data->val_out = __sungem_phy_read(gp, data->phy_id & 0x1f,
data->reg_num & 0x1f);
rc = 0;
break;
case SIOCSMIIREG: /* Write MII PHY register. */
- __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
+ __sungem_phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
data->val_in);
rc = 0;
break;
@@ -2933,8 +2933,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Fill up the mii_phy structure (even if we won't use it) */
gp->phy_mii.dev = dev;
- gp->phy_mii.mdio_read = _phy_read;
- gp->phy_mii.mdio_write = _phy_write;
+ gp->phy_mii.mdio_read = _sungem_phy_read;
+ gp->phy_mii.mdio_write = _sungem_phy_write;
#ifdef CONFIG_PPC_PMAC
gp->phy_mii.platform_data = gp->of_node;
#endif
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index f67539650c38..edb860947da4 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -37,6 +37,8 @@ MODULE_VERSION(DRV_MODULE_VERSION);
*/
#define VNET_MAX_RETRIES 10
+static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
+
/* Ordered from largest major to lowest */
static struct vio_version vnet_versions[] = {
{ .major = 1, .minor = 0 },
@@ -283,10 +285,18 @@ static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
port->raddr[0], port->raddr[1],
port->raddr[2], port->raddr[3],
port->raddr[4], port->raddr[5]);
- err = -ECONNRESET;
+ break;
}
} while (err == -EAGAIN);
+ if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) {
+ port->stop_rx_idx = end;
+ port->stop_rx = true;
+ } else {
+ port->stop_rx_idx = 0;
+ port->stop_rx = false;
+ }
+
return err;
}
@@ -451,7 +461,7 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
struct net_device *dev;
struct vnet *vp;
u32 end;
-
+ struct vio_net_desc *desc;
if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
return 0;
@@ -459,7 +469,24 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
if (unlikely(!idx_is_pending(dr, end)))
return 0;
+ /* sync for race conditions with vnet_start_xmit() and tell xmit it
+ * is time to send a trigger.
+ */
dr->cons = next_idx(end, dr);
+ desc = vio_dring_entry(dr, dr->cons);
+ if (desc->hdr.state == VIO_DESC_READY && port->start_cons) {
+ /* vnet_start_xmit() just populated this dring but missed
+ * sending the "start" LDC message to the consumer.
+ * Send a "start" trigger on its behalf.
+ */
+ if (__vnet_tx_trigger(port, dr->cons) > 0)
+ port->start_cons = false;
+ else
+ port->start_cons = true;
+ } else {
+ port->start_cons = true;
+ }
+
vp = port->vp;
dev = vp->dev;
@@ -537,7 +564,7 @@ static void vnet_event(void *arg, int event)
}
if (unlikely(event != LDC_EVENT_DATA_READY)) {
- pr_warning("Unexpected LDC event %d\n", event);
+ pr_warn("Unexpected LDC event %d\n", event);
spin_unlock_irqrestore(&vio->lock, flags);
return;
}
@@ -600,7 +627,7 @@ static void vnet_event(void *arg, int event)
local_irq_restore(flags);
}
-static int __vnet_tx_trigger(struct vnet_port *port)
+static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
{
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
struct vio_dring_data hdr = {
@@ -611,12 +638,21 @@ static int __vnet_tx_trigger(struct vnet_port *port)
.sid = vio_send_sid(&port->vio),
},
.dring_ident = dr->ident,
- .start_idx = dr->prod,
+ .start_idx = start,
.end_idx = (u32) -1,
};
int err, delay;
int retries = 0;
+ if (port->stop_rx) {
+ err = vnet_send_ack(port,
+ &port->vio.drings[VIO_DRIVER_RX_RING],
+ port->stop_rx_idx, -1,
+ VIO_DRING_STOPPED);
+ if (err <= 0)
+ return err;
+ }
+
hdr.seq = dr->snd_nxt;
delay = 1;
do {
@@ -737,7 +773,30 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
d->hdr.state = VIO_DESC_READY;
- err = __vnet_tx_trigger(port);
+ /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent
+ * to notify the consumer that some descriptors are READY.
+ * After that "start" trigger, no additional triggers are needed until
+ * a DRING_STOPPED is received from the consumer. The dr->cons field
+ * (set up by vnet_ack()) has the value of the next dring index
+ * that has not yet been ack-ed. We send a "start" trigger here
+ * if, and only if, start_cons is true (reset it afterward). Conversely,
+ * vnet_ack() should check if the dring corresponding to cons
+ * is marked READY, but start_cons was false.
+ * If so, vnet_ack() should send out the missed "start" trigger.
+ *
+ * Note that the wmb() above makes sure the cookies et al. are
+ * not globally visible before the VIO_DESC_READY, and that the
+ * stores are ordered correctly by the compiler. The consumer will
+ * not proceed until the VIO_DESC_READY is visible assuring that
+ * the consumer does not observe anything related to descriptors
+ * out of order. The HV trap from the LDC start trigger is the
+ * producer to consumer announcement that work is available to the
+ * consumer
+ */
+ if (!port->start_cons)
+ goto ldc_start_done; /* previous trigger suffices */
+
+ err = __vnet_tx_trigger(port, dr->cons);
if (unlikely(err < 0)) {
netdev_info(dev, "TX trigger error %d\n", err);
d->hdr.state = VIO_DESC_FREE;
@@ -745,6 +804,9 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto out_dropped_unlock;
}
+ldc_start_done:
+ port->start_cons = false;
+
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
@@ -1038,6 +1100,7 @@ static int vnet_port_alloc_tx_bufs(struct vnet_port *port)
(sizeof(struct ldc_trans_cookie) * 2));
dr->num_entries = VNET_TX_RING_SIZE;
dr->prod = dr->cons = 0;
+ port->start_cons = true; /* need an initial trigger */
dr->pending = VNET_TX_RING_SIZE;
dr->ncookies = ncookies;
diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h
index de5c2c64996f..da4933750d06 100644
--- a/drivers/net/ethernet/sun/sunvnet.h
+++ b/drivers/net/ethernet/sun/sunvnet.h
@@ -40,6 +40,10 @@ struct vnet_port {
struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE];
struct list_head list;
+
+ u32 stop_rx_idx;
+ bool stop_rx;
+ bool start_cons;
};
static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio)
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index f9bcf7aa88ca..dd9430043536 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1207,7 +1207,6 @@ static int cpmac_remove(struct platform_device *pdev)
static struct platform_driver cpmac_driver = {
.driver = {
.name = "cpmac",
- .owner = THIS_MODULE,
},
.probe = cpmac_probe,
.remove = cpmac_remove,
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index aa8bf45e53dc..0ea78326cc21 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -211,7 +211,6 @@ static struct platform_driver cpsw_phy_sel_driver = {
.probe = cpsw_phy_sel_probe,
.driver = {
.name = "cpsw-phy-sel",
- .owner = THIS_MODULE,
.of_match_table = cpsw_phy_sel_id_table,
},
};
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index e2a00287f8eb..45ba50e4eaec 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -397,6 +397,8 @@ struct cpsw_priv {
struct cpdma_ctlr *dma;
struct cpdma_chan *txch, *rxch;
struct cpsw_ale *ale;
+ bool rx_pause;
+ bool tx_pause;
/* snapshot of IRQ numbers */
u32 irqs_table[4];
u32 num_irqs;
@@ -855,6 +857,12 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
else if (phy->speed == 10)
mac_control |= BIT(18); /* In Band mode */
+ if (priv->rx_pause)
+ mac_control |= BIT(3);
+
+ if (priv->tx_pause)
+ mac_control |= BIT(4);
+
*link = true;
} else {
mac_control = 0;
@@ -1246,6 +1254,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
/* enable statistics collection only on all ports */
__raw_writel(0x7, &priv->regs->stat_port_en);
+ /* Enable internal fifo flow control */
+ writel(0x7, &priv->regs->flow_control);
+
if (WARN_ON(!priv->data.rx_descs))
priv->data.rx_descs = 128;
@@ -1807,6 +1818,30 @@ static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
}
+static void cpsw_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ pause->autoneg = AUTONEG_DISABLE;
+ pause->rx_pause = priv->rx_pause ? true : false;
+ pause->tx_pause = priv->tx_pause ? true : false;
+}
+
+static int cpsw_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ bool link;
+
+ priv->rx_pause = pause->rx_pause ? true : false;
+ priv->tx_pause = pause->tx_pause ? true : false;
+
+ for_each_slave(priv, _cpsw_adjust_link, priv, &link);
+
+ return 0;
+}
+
static const struct ethtool_ops cpsw_ethtool_ops = {
.get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel,
@@ -1820,6 +1855,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.get_sset_count = cpsw_get_sset_count,
.get_strings = cpsw_get_strings,
.get_ethtool_stats = cpsw_get_ethtool_stats,
+ .get_pauseparam = cpsw_get_pauseparam,
+ .set_pauseparam = cpsw_set_pauseparam,
.get_wol = cpsw_get_wol,
.set_wol = cpsw_set_wol,
.get_regs_len = cpsw_get_regs_len,
@@ -2255,18 +2292,24 @@ static int cpsw_probe(struct platform_device *pdev)
}
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
- for (i = res->start; i <= res->end; i++) {
- if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
- dev_name(&pdev->dev), priv)) {
- dev_err(priv->dev, "error attaching irq\n");
- goto clean_ale_ret;
- }
- priv->irqs_table[k] = i;
- priv->num_irqs = k + 1;
+ if (k >= ARRAY_SIZE(priv->irqs_table)) {
+ ret = -EINVAL;
+ goto clean_ale_ret;
+ }
+
+ ret = devm_request_irq(&pdev->dev, res->start, cpsw_interrupt,
+ 0, dev_name(&pdev->dev), priv);
+ if (ret < 0) {
+ dev_err(priv->dev, "error attaching irq (%d)\n", ret);
+ goto clean_ale_ret;
}
+
+ priv->irqs_table[k] = res->start;
k++;
}
+ priv->num_irqs = k;
+
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->netdev_ops = &cpsw_netdev_ops;
@@ -2395,7 +2438,6 @@ MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
static struct platform_driver cpsw_driver = {
.driver = {
.name = "cpsw",
- .owner = THIS_MODULE,
.pm = &cpsw_pm_ops,
.of_match_table = cpsw_of_mtable,
},
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 35a139e9a833..ea712512c7d1 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -2083,7 +2083,6 @@ MODULE_DEVICE_TABLE(of, davinci_emac_of_match);
static struct platform_driver davinci_emac_driver = {
.driver = {
.name = "davinci_emac",
- .owner = THIS_MODULE,
.pm = &davinci_emac_pm_ops,
.of_match_table = of_match_ptr(davinci_emac_of_match),
},
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 2791f6f2db11..98655b44b97e 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -481,7 +481,6 @@ MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
static struct platform_driver davinci_mdio_driver = {
.driver = {
.name = "davinci_mdio",
- .owner = THIS_MODULE,
.pm = &davinci_mdio_pm_ops,
.of_match_table = of_match_ptr(davinci_mdio_of_mtable),
},
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 88c712126692..88818d5054ab 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -956,7 +956,7 @@ static int tile_net_open_aux(struct net_device *dev)
*/
if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) {
- pr_warning("Failed to start LIPP/LEPP.\n");
+ pr_warn("Failed to start LIPP/LEPP\n");
return -EIO;
}
@@ -2399,8 +2399,7 @@ static int __init network_cpus_setup(char *str)
{
int rc = cpulist_parse_crop(str, &network_cpus_map);
if (rc != 0) {
- pr_warning("network_cpus=%s: malformed cpu list\n",
- str);
+ pr_warn("network_cpus=%s: malformed cpu list\n", str);
} else {
/* Remove dedicated cpus. */
@@ -2409,8 +2408,7 @@ static int __init network_cpus_setup(char *str)
if (cpumask_empty(&network_cpus_map)) {
- pr_warning("Ignoring network_cpus='%s'.\n",
- str);
+ pr_warn("Ignoring network_cpus='%s'\n", str);
} else {
char buf[1024];
cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index c8fd94133ecd..4ea2d4e6f1d1 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1485,7 +1485,6 @@ static int axienet_of_probe(struct platform_device *op)
if (!ndev)
return -ENOMEM;
- ether_setup(ndev);
platform_set_drvdata(op, ndev);
SET_NETDEV_DEV(ndev, &op->dev);
OpenPOWER on IntegriCloud