summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2014-10-11 11:34:07 -0700
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2014-10-11 11:34:07 -0700
commit9d93551188069a0a21e664b4bfc89ed4a6df1903 (patch)
treebcf7334c0e57323a6844b8147ce1c92180fe8cef /drivers/net/ethernet
parentdd4cae8bf16611053ee7b00e20aa4fa945b92b99 (diff)
parentbfe01a5ba2490f299e1d2d5508cbbbadd897bbe9 (diff)
downloadblackbird-op-linux-9d93551188069a0a21e664b4bfc89ed4a6df1903.tar.gz
blackbird-op-linux-9d93551188069a0a21e664b4bfc89ed4a6df1903.zip
Merge tag 'v3.17' into next
Synchronize with mainline to bring in changes to Synaptics and i8042 drivers.
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c59x.c50
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c86
-rw-r--r--drivers/net/ethernet/aeroflex/greth.h2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c209
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c7
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h4
-rw-r--r--drivers/net/ethernet/apm/xgene/Kconfig1
-rw-r--r--drivers/net/ethernet/arc/emac_main.c53
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c31
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c74
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c57
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c143
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c39
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c7
-rw-r--r--drivers/net/ethernet/cadence/macb.c11
-rw-r--r--drivers/net/ethernet/calxeda/Kconfig1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c65
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c79
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h4
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c19
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c38
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c34
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c3
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169.c67
-rw-r--r--drivers/net/ethernet/renesas/Kconfig1
-rw-r--r--drivers/net/ethernet/sfc/farch.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c153
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw.c52
72 files changed, 1059 insertions, 572 deletions
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 059c7414e303..8ca49f04acec 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2129,6 +2129,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
int entry = vp->cur_tx % TX_RING_SIZE;
struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
unsigned long flags;
+ dma_addr_t dma_addr;
if (vortex_debug > 6) {
pr_debug("boomerang_start_xmit()\n");
@@ -2163,24 +2164,48 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
if (!skb_shinfo(skb)->nr_frags) {
- vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
- skb->len, PCI_DMA_TODEVICE));
+ dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+ goto out_dma_err;
+
+ vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
} else {
int i;
- vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE));
+ dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data,
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+ goto out_dma_err;
+
+ vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag,
+ 0,
+ frag->size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) {
+ for(i = i-1; i >= 0; i--)
+ dma_unmap_page(&VORTEX_PCI(vp)->dev,
+ le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
+ le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
+ DMA_TO_DEVICE);
+
+ pci_unmap_single(VORTEX_PCI(vp),
+ le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
+ le32_to_cpu(vp->tx_ring[entry].frag[0].length),
+ PCI_DMA_TODEVICE);
+
+ goto out_dma_err;
+ }
+
vp->tx_ring[entry].frag[i+1].addr =
- cpu_to_le32(pci_map_single(
- VORTEX_PCI(vp),
- (void *)skb_frag_address(frag),
- skb_frag_size(frag), PCI_DMA_TODEVICE));
+ cpu_to_le32(dma_addr);
if (i == skb_shinfo(skb)->nr_frags-1)
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
@@ -2189,7 +2214,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
#else
- vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
+ dma_addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
+ if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+ goto out_dma_err;
+ vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
#endif
@@ -2217,7 +2245,11 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
iowrite16(DownUnstall, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&vp->lock, flags);
+out:
return NETDEV_TX_OK;
+out_dma_err:
+ dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n");
+ goto out;
}
/* The interrupt handler does all of the Rx thread work and cleans up
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 23578dfee249..3005155e412b 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -123,6 +123,12 @@ static inline void greth_enable_tx(struct greth_private *greth)
GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
}
+static inline void greth_enable_tx_and_irq(struct greth_private *greth)
+{
+ wmb(); /* BDs must been written to memory before enabling TX */
+ GRETH_REGORIN(greth->regs->control, GRETH_TXEN | GRETH_TXI);
+}
+
static inline void greth_disable_tx(struct greth_private *greth)
{
GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
@@ -447,29 +453,30 @@ out:
return err;
}
+static inline u16 greth_num_free_bds(u16 tx_last, u16 tx_next)
+{
+ if (tx_next < tx_last)
+ return (tx_last - tx_next) - 1;
+ else
+ return GRETH_TXBD_NUM - (tx_next - tx_last) - 1;
+}
static netdev_tx_t
greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
{
struct greth_private *greth = netdev_priv(dev);
struct greth_bd *bdp;
- u32 status = 0, dma_addr, ctrl;
+ u32 status, dma_addr;
int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
unsigned long flags;
+ u16 tx_last;
nr_frags = skb_shinfo(skb)->nr_frags;
+ tx_last = greth->tx_last;
+ rmb(); /* tx_last is updated by the poll task */
- /* Clean TX Ring */
- greth_clean_tx_gbit(dev);
-
- if (greth->tx_free < nr_frags + 1) {
- spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
- ctrl = GRETH_REGLOAD(greth->regs->control);
- /* Enable TX IRQ only if not already in poll() routine */
- if (ctrl & GRETH_RXI)
- GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
+ if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) {
netif_stop_queue(dev);
- spin_unlock_irqrestore(&greth->devlock, flags);
err = NETDEV_TX_BUSY;
goto out;
}
@@ -488,6 +495,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
/* Linear buf */
if (nr_frags != 0)
status = GRETH_TXBD_MORE;
+ else
+ status = GRETH_BD_IE;
if (skb->ip_summed == CHECKSUM_PARTIAL)
status |= GRETH_TXBD_CSALL;
@@ -545,14 +554,12 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
/* Enable the descriptor chain by enabling the first descriptor */
bdp = greth->tx_bd_base + greth->tx_next;
- greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
- greth->tx_next = curr_tx;
- greth->tx_free -= nr_frags + 1;
-
- wmb();
+ greth_write_bd(&bdp->stat,
+ greth_read_bd(&bdp->stat) | GRETH_BD_EN);
spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
- greth_enable_tx(greth);
+ greth->tx_next = curr_tx;
+ greth_enable_tx_and_irq(greth);
spin_unlock_irqrestore(&greth->devlock, flags);
return NETDEV_TX_OK;
@@ -648,7 +655,6 @@ static void greth_clean_tx(struct net_device *dev)
if (greth->tx_free > 0) {
netif_wake_queue(dev);
}
-
}
static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
@@ -670,20 +676,22 @@ static void greth_clean_tx_gbit(struct net_device *dev)
{
struct greth_private *greth;
struct greth_bd *bdp, *bdp_last_frag;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
u32 stat;
int nr_frags, i;
+ u16 tx_last;
greth = netdev_priv(dev);
+ tx_last = greth->tx_last;
- while (greth->tx_free < GRETH_TXBD_NUM) {
+ while (tx_last != greth->tx_next) {
- skb = greth->tx_skbuff[greth->tx_last];
+ skb = greth->tx_skbuff[tx_last];
nr_frags = skb_shinfo(skb)->nr_frags;
/* We only clean fully completed SKBs */
- bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
+ bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags);
GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
mb();
@@ -692,14 +700,14 @@ static void greth_clean_tx_gbit(struct net_device *dev)
if (stat & GRETH_BD_EN)
break;
- greth->tx_skbuff[greth->tx_last] = NULL;
+ greth->tx_skbuff[tx_last] = NULL;
greth_update_tx_stats(dev, stat);
dev->stats.tx_bytes += skb->len;
- bdp = greth->tx_bd_base + greth->tx_last;
+ bdp = greth->tx_bd_base + tx_last;
- greth->tx_last = NEXT_TX(greth->tx_last);
+ tx_last = NEXT_TX(tx_last);
dma_unmap_single(greth->dev,
greth_read_bd(&bdp->addr),
@@ -708,21 +716,26 @@ static void greth_clean_tx_gbit(struct net_device *dev)
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- bdp = greth->tx_bd_base + greth->tx_last;
+ bdp = greth->tx_bd_base + tx_last;
dma_unmap_page(greth->dev,
greth_read_bd(&bdp->addr),
skb_frag_size(frag),
DMA_TO_DEVICE);
- greth->tx_last = NEXT_TX(greth->tx_last);
+ tx_last = NEXT_TX(tx_last);
}
- greth->tx_free += nr_frags+1;
dev_kfree_skb(skb);
}
+ if (skb) { /* skb is set only if the above while loop was entered */
+ wmb();
+ greth->tx_last = tx_last;
- if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
- netif_wake_queue(dev);
+ if (netif_queue_stopped(dev) &&
+ (greth_num_free_bds(tx_last, greth->tx_next) >
+ (MAX_SKB_FRAGS+1)))
+ netif_wake_queue(dev);
+ }
}
static int greth_rx(struct net_device *dev, int limit)
@@ -965,16 +978,12 @@ static int greth_poll(struct napi_struct *napi, int budget)
greth = container_of(napi, struct greth_private, napi);
restart_txrx_poll:
- if (netif_queue_stopped(greth->netdev)) {
- if (greth->gbit_mac)
- greth_clean_tx_gbit(greth->netdev);
- else
- greth_clean_tx(greth->netdev);
- }
-
if (greth->gbit_mac) {
+ greth_clean_tx_gbit(greth->netdev);
work_done += greth_rx_gbit(greth->netdev, budget - work_done);
} else {
+ if (netif_queue_stopped(greth->netdev))
+ greth_clean_tx(greth->netdev);
work_done += greth_rx(greth->netdev, budget - work_done);
}
@@ -983,7 +992,8 @@ restart_txrx_poll:
spin_lock_irqsave(&greth->devlock, flags);
ctrl = GRETH_REGLOAD(greth->regs->control);
- if (netif_queue_stopped(greth->netdev)) {
+ if ((greth->gbit_mac && (greth->tx_last != greth->tx_next)) ||
+ (!greth->gbit_mac && netif_queue_stopped(greth->netdev))) {
GRETH_REGSAVE(greth->regs->control,
ctrl | GRETH_TXI | GRETH_RXI);
mask = GRETH_INT_RX | GRETH_INT_RE |
diff --git a/drivers/net/ethernet/aeroflex/greth.h b/drivers/net/ethernet/aeroflex/greth.h
index 232a622a85b7..ae16ac94daf8 100644
--- a/drivers/net/ethernet/aeroflex/greth.h
+++ b/drivers/net/ethernet/aeroflex/greth.h
@@ -107,7 +107,7 @@ struct greth_private {
u16 tx_next;
u16 tx_last;
- u16 tx_free;
+ u16 tx_free; /* only used on 10/100Mbit */
u16 rx_cur;
struct greth_regs *regs; /* Address of controller registers. */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index 346592dca33c..a3c11355a34d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -272,8 +272,8 @@ static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer,
struct xgbe_prv_data *pdata = filp->private_data;
unsigned int value;
- value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
- pdata->debugfs_xpcs_reg);
+ value = XMDIO_READ(pdata, pdata->debugfs_xpcs_mmd,
+ pdata->debugfs_xpcs_reg);
return xgbe_common_read(buffer, count, ppos, value);
}
@@ -290,8 +290,8 @@ static ssize_t xpcs_reg_value_write(struct file *filp,
if (len < 0)
return len;
- pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
- pdata->debugfs_xpcs_reg, value);
+ XMDIO_WRITE(pdata, pdata->debugfs_xpcs_mmd, pdata->debugfs_xpcs_reg,
+ value);
return len;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index edaca4496264..ea273836d999 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -348,7 +348,7 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
/* Clear MAC flow control */
max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
- q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
+ q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
reg = MAC_Q0TFCR;
for (i = 0; i < q_count; i++) {
reg_val = XGMAC_IOREAD(pdata, reg);
@@ -373,7 +373,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
/* Set MAC flow control */
max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
- q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
+ q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
reg = MAC_Q0TFCR;
for (i = 0; i < q_count; i++) {
reg_val = XGMAC_IOREAD(pdata, reg);
@@ -509,8 +509,8 @@ static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
/* Enable all counter interrupts */
- XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff);
- XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff);
+ XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
+ XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
}
static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
@@ -1633,6 +1633,9 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
{
unsigned int i, count;
+ if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
+ return 0;
+
for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
@@ -1703,8 +1706,8 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
}
-static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
- unsigned char queue_count)
+static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
+ unsigned int queue_count)
{
unsigned int q_fifo_size = 0;
enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
@@ -1748,6 +1751,10 @@ static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
q_fifo_size = XGBE_FIFO_SIZE_KB(256);
break;
}
+
+ /* The configured value is not the actual amount of fifo RAM */
+ q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
+
q_fifo_size = q_fifo_size / queue_count;
/* Set the queue fifo size programmable value */
@@ -1947,6 +1954,32 @@ static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
xgbe_disable_rx_vlan_stripping(pdata);
}
+static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
+{
+ bool read_hi;
+ u64 val;
+
+ switch (reg_lo) {
+ /* These registers are always 64 bit */
+ case MMC_TXOCTETCOUNT_GB_LO:
+ case MMC_TXOCTETCOUNT_G_LO:
+ case MMC_RXOCTETCOUNT_GB_LO:
+ case MMC_RXOCTETCOUNT_G_LO:
+ read_hi = true;
+ break;
+
+ default:
+ read_hi = false;
+ };
+
+ val = XGMAC_IOREAD(pdata, reg_lo);
+
+ if (read_hi)
+ val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
+
+ return val;
+}
+
static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
{
struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
@@ -1954,75 +1987,75 @@ static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
stats->txoctetcount_gb +=
- XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
stats->txframecount_gb +=
- XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
stats->txbroadcastframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
stats->txmulticastframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
stats->tx64octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
stats->tx65to127octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
stats->tx128to255octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
stats->tx256to511octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
stats->tx512to1023octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
stats->tx1024tomaxoctets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
stats->txunicastframes_gb +=
- XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
stats->txmulticastframes_gb +=
- XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
stats->txbroadcastframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
stats->txunderflowerror +=
- XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
+ xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
stats->txoctetcount_g +=
- XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
stats->txframecount_g +=
- XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
stats->txpauseframes +=
- XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
+ xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
stats->txvlanframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
}
static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
@@ -2032,95 +2065,95 @@ static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
stats->rxframecount_gb +=
- XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
stats->rxoctetcount_gb +=
- XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
stats->rxoctetcount_g +=
- XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
stats->rxbroadcastframes_g +=
- XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
stats->rxmulticastframes_g +=
- XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
stats->rxcrcerror +=
- XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
+ xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
stats->rxrunterror +=
- XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
+ xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
stats->rxjabbererror +=
- XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
+ xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
stats->rxundersize_g +=
- XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
+ xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
stats->rxoversize_g +=
- XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
+ xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
stats->rx64octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
stats->rx65to127octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
stats->rx128to255octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
stats->rx256to511octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
stats->rx512to1023octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
stats->rx1024tomaxoctets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
stats->rxunicastframes_g +=
- XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
stats->rxlengtherror +=
- XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
+ xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
stats->rxoutofrangetype +=
- XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
+ xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
stats->rxpauseframes +=
- XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
+ xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
stats->rxfifooverflow +=
- XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
+ xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
stats->rxvlanframes_gb +=
- XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
stats->rxwatchdogerror +=
- XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
+ xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
}
static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
@@ -2131,127 +2164,127 @@ static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
stats->txoctetcount_gb +=
- XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
stats->txframecount_gb +=
- XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
stats->txbroadcastframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
stats->txmulticastframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
stats->tx64octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
stats->tx65to127octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
stats->tx128to255octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
stats->tx256to511octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
stats->tx512to1023octets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
stats->tx1024tomaxoctets_gb +=
- XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
stats->txunicastframes_gb +=
- XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
stats->txmulticastframes_gb +=
- XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
stats->txbroadcastframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
stats->txunderflowerror +=
- XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
+ xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
stats->txoctetcount_g +=
- XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
stats->txframecount_g +=
- XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
stats->txpauseframes +=
- XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
+ xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
stats->txvlanframes_g +=
- XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
stats->rxframecount_gb +=
- XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
stats->rxoctetcount_gb +=
- XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
stats->rxoctetcount_g +=
- XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
stats->rxbroadcastframes_g +=
- XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
stats->rxmulticastframes_g +=
- XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
stats->rxcrcerror +=
- XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
+ xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
stats->rxrunterror +=
- XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
+ xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
stats->rxjabbererror +=
- XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
+ xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
stats->rxundersize_g +=
- XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
+ xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
stats->rxoversize_g +=
- XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
+ xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
stats->rx64octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
stats->rx65to127octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
stats->rx128to255octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
stats->rx256to511octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
stats->rx512to1023octets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
stats->rx1024tomaxoctets_gb +=
- XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
stats->rxunicastframes_g +=
- XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
+ xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
stats->rxlengtherror +=
- XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
+ xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
stats->rxoutofrangetype +=
- XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
+ xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
stats->rxpauseframes +=
- XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
+ xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
stats->rxfifooverflow +=
- XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
+ xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
stats->rxvlanframes_gb +=
- XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
+ xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
stats->rxwatchdogerror +=
- XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
+ xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
/* Un-freeze counters */
XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index dc84f7193c2d..b26d75856553 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -361,6 +361,8 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
memset(hw_feat, 0, sizeof(*hw_feat));
+ hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
+
/* Hardware feature register 0 */
hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index a076aca138a1..46f613028e9c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -361,15 +361,16 @@ static void xgbe_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
- XGMAC_IOREAD_BITS(pdata, MAC_VR, USERVER),
- XGMAC_IOREAD_BITS(pdata, MAC_VR, DEVID),
- XGMAC_IOREAD_BITS(pdata, MAC_VR, SNPSVER));
+ XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
+ XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
+ XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
drvinfo->n_stats = XGBE_STATS_COUNT;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 8aa6a9353f7b..bdf9cfa70e88 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -172,7 +172,7 @@ static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata)
}
if (i < pdata->rx_ring_count) {
- spin_lock_init(&tx_ring->lock);
+ spin_lock_init(&rx_ring->lock);
channel->rx_ring = rx_ring++;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 07bf70a82908..e9fe6e6ddcc3 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -183,6 +183,7 @@
#define XGMAC_DRIVER_CONTEXT 1
#define XGMAC_IOCTL_CONTEXT 2
+#define XGBE_FIFO_MAX 81920
#define XGBE_FIFO_SIZE_B(x) (x)
#define XGBE_FIFO_SIZE_KB(x) (x * 1024)
@@ -526,6 +527,9 @@ struct xgbe_desc_if {
* or configurations are present in the device.
*/
struct xgbe_hw_features {
+ /* HW Version */
+ unsigned int version;
+
/* HW Feature Register0 */
unsigned int gmii; /* 1000 Mbps support */
unsigned int vlhash; /* VLAN Hash Filter */
diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig
index 616dff6d3f5f..f4054d242f3c 100644
--- a/drivers/net/ethernet/apm/xgene/Kconfig
+++ b/drivers/net/ethernet/apm/xgene/Kconfig
@@ -1,5 +1,6 @@
config NET_XGENE
tristate "APM X-Gene SoC Ethernet Driver"
+ depends on HAS_DMA
select PHYLIB
help
This is the Ethernet driver for the on-chip ethernet interface on the
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index fe5cfeace6e3..5919394d9f58 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -30,6 +30,17 @@
#define DRV_VERSION "1.0"
/**
+ * arc_emac_tx_avail - Return the number of available slots in the tx ring.
+ * @priv: Pointer to ARC EMAC private data structure.
+ *
+ * returns: the number of slots available for transmission in tx the ring.
+ */
+static inline int arc_emac_tx_avail(struct arc_emac_priv *priv)
+{
+ return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM;
+}
+
+/**
* arc_emac_adjust_link - Adjust the PHY link duplex.
* @ndev: Pointer to the net_device structure.
*
@@ -180,10 +191,15 @@ static void arc_emac_tx_clean(struct net_device *ndev)
txbd->info = 0;
*txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
-
- if (netif_queue_stopped(ndev))
- netif_wake_queue(ndev);
}
+
+ /* Ensure that txbd_dirty is visible to tx() before checking
+ * for queue stopped.
+ */
+ smp_mb();
+
+ if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv))
+ netif_wake_queue(ndev);
}
/**
@@ -298,7 +314,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
work_done = arc_emac_rx(ndev, budget);
if (work_done < budget) {
napi_complete(napi);
- arc_reg_or(priv, R_ENABLE, RXINT_MASK);
+ arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
}
return work_done;
@@ -327,9 +343,9 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
/* Reset all flags except "MDIO complete" */
arc_reg_set(priv, R_STATUS, status);
- if (status & RXINT_MASK) {
+ if (status & (RXINT_MASK | TXINT_MASK)) {
if (likely(napi_schedule_prep(&priv->napi))) {
- arc_reg_clr(priv, R_ENABLE, RXINT_MASK);
+ arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
__napi_schedule(&priv->napi);
}
}
@@ -440,7 +456,7 @@ static int arc_emac_open(struct net_device *ndev)
arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma);
/* Enable interrupts */
- arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK);
+ arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
/* Set CONTROL */
arc_reg_set(priv, R_CTRL,
@@ -511,7 +527,7 @@ static int arc_emac_stop(struct net_device *ndev)
netif_stop_queue(ndev);
/* Disable interrupts */
- arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK);
+ arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
/* Disable EMAC */
arc_reg_clr(priv, R_CTRL, EN_MASK);
@@ -574,11 +590,9 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
len = max_t(unsigned int, ETH_ZLEN, skb->len);
- /* EMAC still holds this buffer in its possession.
- * CPU must not modify this buffer descriptor
- */
- if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) {
+ if (unlikely(!arc_emac_tx_avail(priv))) {
netif_stop_queue(ndev);
+ netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@@ -607,12 +621,19 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
/* Increment index to point to the next BD */
*txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
- /* Get "info" of the next BD */
- info = &priv->txbd[*txbd_curr].info;
+ /* Ensure that tx_clean() sees the new txbd_curr before
+ * checking the queue status. This prevents an unneeded wake
+ * of the queue in tx_clean().
+ */
+ smp_mb();
- /* Check if if Tx BD ring is full - next BD is still owned by EMAC */
- if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC))
+ if (!arc_emac_tx_avail(priv)) {
netif_stop_queue(ndev);
+ /* Refresh tx_dirty */
+ smp_mb();
+ if (arc_emac_tx_avail(priv))
+ netif_start_queue(ndev);
+ }
arc_reg_set(priv, R_STATUS, TXPL_MASK);
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 7dcfb19a31c8..d8d07a818b89 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -84,7 +84,7 @@ config BNX2
config CNIC
tristate "QLogic CNIC support"
- depends on PCI
+ depends on PCI && (IPV6 || IPV6=n)
select BNX2
select UIO
---help---
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 4a7028d65912..d588136b23b9 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1697,7 +1697,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
hwstat->tx_underruns +
hwstat->tx_excessive_cols +
hwstat->tx_late_cols);
- nstat->multicast = hwstat->tx_multicast_pkts;
+ nstat->multicast = hwstat->rx_multicast_pkts;
nstat->collisions = hwstat->tx_total_cols;
nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 6f4e18644bd4..d9b9170ed2fc 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -534,6 +534,25 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
while ((processed < to_process) && (processed < budget)) {
cb = &priv->rx_cbs[priv->rx_read_ptr];
skb = cb->skb;
+
+ processed++;
+ priv->rx_read_ptr++;
+
+ if (priv->rx_read_ptr == priv->num_rx_bds)
+ priv->rx_read_ptr = 0;
+
+ /* We do not have a backing SKB, so we do not a corresponding
+ * DMA mapping for this incoming packet since
+ * bcm_sysport_rx_refill always either has both skb and mapping
+ * or none.
+ */
+ if (unlikely(!skb)) {
+ netif_err(priv, rx_err, ndev, "out of memory!\n");
+ ndev->stats.rx_dropped++;
+ ndev->stats.rx_errors++;
+ goto refill;
+ }
+
dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
RX_BUF_LENGTH, DMA_FROM_DEVICE);
@@ -543,23 +562,11 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
DESC_STATUS_MASK;
- processed++;
- priv->rx_read_ptr++;
- if (priv->rx_read_ptr == priv->num_rx_bds)
- priv->rx_read_ptr = 0;
-
netif_dbg(priv, rx_status, ndev,
"p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
p_index, priv->rx_c_index, priv->rx_read_ptr,
len, status);
- if (unlikely(!skb)) {
- netif_err(priv, rx_err, ndev, "out of memory!\n");
- ndev->stats.rx_dropped++;
- ndev->stats.rx_errors++;
- goto refill;
- }
-
if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
netif_err(priv, rx_status, ndev, "fragmented packet!\n");
ndev->stats.rx_dropped++;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 2fee73b878c2..823d01c5684c 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -3236,8 +3236,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
skb->protocol = eth_type_trans(skb, bp->dev);
- if ((len > (bp->dev->mtu + ETH_HLEN)) &&
- (ntohs(skb->protocol) != 0x8100)) {
+ if (len > (bp->dev->mtu + ETH_HLEN) &&
+ skb->protocol != htons(0x8100) &&
+ skb->protocol != htons(ETH_P_8021AD)) {
dev_kfree_skb(skb);
goto next_rx;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 5ba8af50c84f..c4daa068f1db 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -2233,7 +2233,12 @@ struct shmem2_region {
u32 reserved3; /* Offset 0x14C */
u32 reserved4; /* Offset 0x150 */
u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */
- #define LINK_ATTR_SYNC_KR2_ENABLE (1<<0)
+ #define LINK_ATTR_SYNC_KR2_ENABLE 0x00000001
+ #define LINK_SFP_EEPROM_COMP_CODE_MASK 0x0000ff00
+ #define LINK_SFP_EEPROM_COMP_CODE_SHIFT 8
+ #define LINK_SFP_EEPROM_COMP_CODE_SR 0x00001000
+ #define LINK_SFP_EEPROM_COMP_CODE_LR 0x00002000
+ #define LINK_SFP_EEPROM_COMP_CODE_LRM 0x00004000
u32 reserved5[2];
u32 reserved6[PORT_MAX];
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 53fb4fa61b40..549549eaf580 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -154,15 +154,22 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
#define SFP_EEPROM_CON_TYPE_ADDR 0x2
+ #define SFP_EEPROM_CON_TYPE_VAL_UNKNOWN 0x0
#define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
#define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
#define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22
-#define SFP_EEPROM_COMP_CODE_ADDR 0x3
- #define SFP_EEPROM_COMP_CODE_SR_MASK (1<<4)
- #define SFP_EEPROM_COMP_CODE_LR_MASK (1<<5)
- #define SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6)
+#define SFP_EEPROM_10G_COMP_CODE_ADDR 0x3
+ #define SFP_EEPROM_10G_COMP_CODE_SR_MASK (1<<4)
+ #define SFP_EEPROM_10G_COMP_CODE_LR_MASK (1<<5)
+ #define SFP_EEPROM_10G_COMP_CODE_LRM_MASK (1<<6)
+
+#define SFP_EEPROM_1G_COMP_CODE_ADDR 0x6
+ #define SFP_EEPROM_1G_COMP_CODE_SX (1<<0)
+ #define SFP_EEPROM_1G_COMP_CODE_LX (1<<1)
+ #define SFP_EEPROM_1G_COMP_CODE_CX (1<<2)
+ #define SFP_EEPROM_1G_COMP_CODE_BASE_T (1<<3)
#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
#define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
@@ -3633,8 +3640,8 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
reg_set[i].val);
/* Start KR2 work-around timer which handles BCM8073 link-parner */
- vars->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
- bnx2x_update_link_attr(params, vars->link_attr_sync);
+ params->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
+ bnx2x_update_link_attr(params, params->link_attr_sync);
}
static void bnx2x_disable_kr2(struct link_params *params,
@@ -3666,8 +3673,8 @@ static void bnx2x_disable_kr2(struct link_params *params,
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
- vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
- bnx2x_update_link_attr(params, vars->link_attr_sync);
+ params->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
+ bnx2x_update_link_attr(params, params->link_attr_sync);
vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
}
@@ -4810,7 +4817,7 @@ void bnx2x_link_status_update(struct link_params *params,
~FEATURE_CONFIG_PFC_ENABLED;
if (SHMEM2_HAS(bp, link_attr_sync))
- vars->link_attr_sync = SHMEM2_RD(bp,
+ params->link_attr_sync = SHMEM2_RD(bp,
link_attr_sync[params->port]);
DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n",
@@ -8057,21 +8064,24 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u32 sync_offset = 0, phy_idx, media_types;
- u8 gport, val[2], check_limiting_mode = 0;
+ u8 val[SFP_EEPROM_FC_TX_TECH_ADDR + 1], check_limiting_mode = 0;
*edc_mode = EDC_MODE_LIMITING;
phy->media_type = ETH_PHY_UNSPECIFIED;
/* First check for copper cable */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
I2C_DEV_ADDR_A0,
- SFP_EEPROM_CON_TYPE_ADDR,
- 2,
+ 0,
+ SFP_EEPROM_FC_TX_TECH_ADDR + 1,
(u8 *)val) != 0) {
DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
return -EINVAL;
}
-
- switch (val[0]) {
+ params->link_attr_sync &= ~LINK_SFP_EEPROM_COMP_CODE_MASK;
+ params->link_attr_sync |= val[SFP_EEPROM_10G_COMP_CODE_ADDR] <<
+ LINK_SFP_EEPROM_COMP_CODE_SHIFT;
+ bnx2x_update_link_attr(params, params->link_attr_sync);
+ switch (val[SFP_EEPROM_CON_TYPE_ADDR]) {
case SFP_EEPROM_CON_TYPE_VAL_COPPER:
{
u8 copper_module_type;
@@ -8079,17 +8089,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
/* Check if its active cable (includes SFP+ module)
* of passive cable
*/
- if (bnx2x_read_sfp_module_eeprom(phy,
- params,
- I2C_DEV_ADDR_A0,
- SFP_EEPROM_FC_TX_TECH_ADDR,
- 1,
- &copper_module_type) != 0) {
- DP(NETIF_MSG_LINK,
- "Failed to read copper-cable-type"
- " from SFP+ EEPROM\n");
- return -EINVAL;
- }
+ copper_module_type = val[SFP_EEPROM_FC_TX_TECH_ADDR];
if (copper_module_type &
SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
@@ -8115,16 +8115,18 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
}
break;
}
+ case SFP_EEPROM_CON_TYPE_VAL_UNKNOWN:
case SFP_EEPROM_CON_TYPE_VAL_LC:
case SFP_EEPROM_CON_TYPE_VAL_RJ45:
check_limiting_mode = 1;
- if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK |
- SFP_EEPROM_COMP_CODE_LR_MASK |
- SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
+ if ((val[SFP_EEPROM_10G_COMP_CODE_ADDR] &
+ (SFP_EEPROM_10G_COMP_CODE_SR_MASK |
+ SFP_EEPROM_10G_COMP_CODE_LR_MASK |
+ SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) {
DP(NETIF_MSG_LINK, "1G SFP module detected\n");
- gport = params->port;
phy->media_type = ETH_PHY_SFP_1G_FIBER;
if (phy->req_line_speed != SPEED_1000) {
+ u8 gport = params->port;
phy->req_line_speed = SPEED_1000;
if (!CHIP_IS_E1x(bp)) {
gport = BP_PATH(bp) +
@@ -8134,6 +8136,12 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
"Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n",
gport);
}
+ if (val[SFP_EEPROM_1G_COMP_CODE_ADDR] &
+ SFP_EEPROM_1G_COMP_CODE_BASE_T) {
+ bnx2x_sfp_set_transmitter(params, phy, 0);
+ msleep(40);
+ bnx2x_sfp_set_transmitter(params, phy, 1);
+ }
} else {
int idx, cfg_idx = 0;
DP(NETIF_MSG_LINK, "10G Optic module detected\n");
@@ -8149,7 +8157,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
break;
default:
DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
- val[0]);
+ val[SFP_EEPROM_CON_TYPE_ADDR]);
return -EINVAL;
}
sync_offset = params->shmem_base +
@@ -13507,7 +13515,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
sigdet = bnx2x_warpcore_get_sigdet(phy, params);
if (!sigdet) {
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+ if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
bnx2x_kr2_recovery(params, vars, phy);
DP(NETIF_MSG_LINK, "No sigdet\n");
}
@@ -13525,7 +13533,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
/* CL73 has not begun yet */
if (base_page == 0) {
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+ if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
bnx2x_kr2_recovery(params, vars, phy);
DP(NETIF_MSG_LINK, "No BP\n");
}
@@ -13541,7 +13549,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
((next_page & 0xe0) == 0x20))));
/* In case KR2 is already disabled, check if we need to re-enable it */
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+ if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
if (!not_kr2_device) {
DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page,
next_page);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 389f5f8cb0a3..d9cce4c3899b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -323,6 +323,9 @@ struct link_params {
#define LINK_FLAGS_INT_DISABLED (1<<0)
#define PHY_INITIALIZED (1<<1)
u32 lfa_base;
+
+ /* The same definitions as the shmem2 parameter */
+ u32 link_attr_sync;
};
/* Output parameters */
@@ -364,8 +367,6 @@ struct link_vars {
u8 rx_tx_asic_rst;
u8 turn_to_run_wc_rt;
u16 rsrv2;
- /* The same definitions as the shmem2 parameter */
- u32 link_attr_sync;
};
/***********************************************************/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 900cab420810..d1c093dcb054 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -6849,6 +6849,37 @@ static void bnx2x__common_init_phy(struct bnx2x *bp)
bnx2x_release_phy_lock(bp);
}
+static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
+{
+ REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
+ REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
+ REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
+ REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
+ REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
+
+ /* make sure this value is 0 */
+ REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
+
+ REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
+ REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
+ REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
+ REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
+}
+
+static void bnx2x_set_endianity(struct bnx2x *bp)
+{
+#ifdef __BIG_ENDIAN
+ bnx2x_config_endianity(bp, 1);
+#else
+ bnx2x_config_endianity(bp, 0);
+#endif
+}
+
+static void bnx2x_reset_endianity(struct bnx2x *bp)
+{
+ bnx2x_config_endianity(bp, 0);
+}
+
/**
* bnx2x_init_hw_common - initialize the HW at the COMMON phase.
*
@@ -6915,23 +6946,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
bnx2x_init_pxp(bp);
-
-#ifdef __BIG_ENDIAN
- REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
- REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
- REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
- REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
- REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
- /* make sure this value is 0 */
- REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
-
-/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
- REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
- REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
- REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
- REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
-#endif
-
+ bnx2x_set_endianity(bp);
bnx2x_ilt_init_page_size(bp, INITOP_SET);
if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
@@ -13169,9 +13184,15 @@ static void __bnx2x_remove(struct pci_dev *pdev,
bnx2x_iov_remove_one(bp);
/* Power on: we can't let PCI layer write to us while we are in D3 */
- if (IS_PF(bp))
+ if (IS_PF(bp)) {
bnx2x_set_power_state(bp, PCI_D0);
+ /* Set endianity registers to reset values in case next driver
+ * boots in different endianty environment.
+ */
+ bnx2x_reset_endianity(bp);
+ }
+
/* Disable MSI/MSI-X */
bnx2x_disable_msi(bp);
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 27861a6c7ca5..a6a9f284c8dd 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -31,7 +31,7 @@
#include <linux/if_vlan.h>
#include <linux/prefetch.h>
#include <linux/random.h>
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define BCM_VLAN 1
#endif
#include <net/ip.h>
@@ -3685,7 +3685,7 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
struct dst_entry **dst)
{
-#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_IPV6)
struct flowi6 fl6;
memset(&fl6, 0, sizeof(fl6));
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 3f9d4de8173c..5cc9cae21ed5 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -875,6 +875,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
int last_tx_cn, last_c_index, num_tx_bds;
struct enet_cb *tx_cb_ptr;
struct netdev_queue *txq;
+ unsigned int bds_compl;
unsigned int c_index;
/* Compute how many buffers are transmitted since last xmit call */
@@ -899,7 +900,9 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
/* Reclaim transmitted buffers */
while (last_tx_cn-- > 0) {
tx_cb_ptr = ring->cbs + last_c_index;
+ bds_compl = 0;
if (tx_cb_ptr->skb) {
+ bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
dev->stats.tx_bytes += tx_cb_ptr->skb->len;
dma_unmap_single(&dev->dev,
dma_unmap_addr(tx_cb_ptr, dma_addr),
@@ -916,7 +919,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
}
dev->stats.tx_packets++;
- ring->free_bds += 1;
+ ring->free_bds += bds_compl;
last_c_index++;
last_c_index &= (num_tx_bds - 1);
@@ -1274,12 +1277,29 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
while ((rxpktprocessed < rxpkttoprocess) &&
(rxpktprocessed < budget)) {
+ cb = &priv->rx_cbs[priv->rx_read_ptr];
+ skb = cb->skb;
+
+ rxpktprocessed++;
+
+ priv->rx_read_ptr++;
+ priv->rx_read_ptr &= (priv->num_rx_bds - 1);
+
+ /* We do not have a backing SKB, so we do not have a
+ * corresponding DMA mapping for this incoming packet since
+ * bcmgenet_rx_refill always either has both skb and mapping or
+ * none.
+ */
+ if (unlikely(!skb)) {
+ dev->stats.rx_dropped++;
+ dev->stats.rx_errors++;
+ goto refill;
+ }
+
/* Unmap the packet contents such that we can use the
* RSV from the 64 bytes descriptor when enabled and save
* a 32-bits register read
*/
- cb = &priv->rx_cbs[priv->rx_read_ptr];
- skb = cb->skb;
dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
priv->rx_buf_len, DMA_FROM_DEVICE);
@@ -1307,18 +1327,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
__func__, p_index, priv->rx_c_index,
priv->rx_read_ptr, dma_length_status);
- rxpktprocessed++;
-
- priv->rx_read_ptr++;
- priv->rx_read_ptr &= (priv->num_rx_bds - 1);
-
- /* out of memory, just drop packets at the hardware level */
- if (unlikely(!skb)) {
- dev->stats.rx_dropped++;
- dev->stats.rx_errors++;
- goto refill;
- }
-
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
@@ -1736,13 +1744,63 @@ static void bcmgenet_init_multiq(struct net_device *dev)
bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
}
+static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
+{
+ int ret = 0;
+ int timeout = 0;
+ u32 reg;
+
+ /* Disable TDMA to stop add more frames in TX DMA */
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg &= ~DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+ /* Check TDMA status register to confirm TDMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
+ if (reg & DMA_DISABLED)
+ break;
+
+ udelay(1);
+ }
+
+ if (timeout == DMA_TIMEOUT_VAL) {
+ netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
+ ret = -ETIMEDOUT;
+ }
+
+ /* Wait 10ms for packet drain in both tx and rx dma */
+ usleep_range(10000, 20000);
+
+ /* Disable RDMA */
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg &= ~DMA_EN;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ timeout = 0;
+ /* Check RDMA status register to confirm RDMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
+ if (reg & DMA_DISABLED)
+ break;
+
+ udelay(1);
+ }
+
+ if (timeout == DMA_TIMEOUT_VAL) {
+ netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
{
int i;
/* disable DMA */
- bcmgenet_rdma_writel(priv, 0, DMA_CTRL);
- bcmgenet_tdma_writel(priv, 0, DMA_CTRL);
+ bcmgenet_dma_teardown(priv);
for (i = 0; i < priv->num_tx_bds; i++) {
if (priv->tx_cbs[i].skb != NULL) {
@@ -2101,57 +2159,6 @@ err_clk_disable:
return ret;
}
-static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
-{
- int ret = 0;
- int timeout = 0;
- u32 reg;
-
- /* Disable TDMA to stop add more frames in TX DMA */
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~DMA_EN;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
- /* Check TDMA status register to confirm TDMA is disabled */
- while (timeout++ < DMA_TIMEOUT_VAL) {
- reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
- if (reg & DMA_DISABLED)
- break;
-
- udelay(1);
- }
-
- if (timeout == DMA_TIMEOUT_VAL) {
- netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
- ret = -ETIMEDOUT;
- }
-
- /* Wait 10ms for packet drain in both tx and rx dma */
- usleep_range(10000, 20000);
-
- /* Disable RDMA */
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~DMA_EN;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- timeout = 0;
- /* Check RDMA status register to confirm RDMA is disabled */
- while (timeout++ < DMA_TIMEOUT_VAL) {
- reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
- if (reg & DMA_DISABLED)
- break;
-
- udelay(1);
- }
-
- if (timeout == DMA_TIMEOUT_VAL) {
- netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
- ret = -ETIMEDOUT;
- }
-
- return ret;
-}
-
static void bcmgenet_netif_stop(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3ac5d23454a8..ba499489969a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6918,7 +6918,8 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
skb->protocol = eth_type_trans(skb, tp->dev);
if (len > (tp->dev->mtu + ETH_HLEN) &&
- skb->protocol != htons(ETH_P_8021Q)) {
+ skb->protocol != htons(ETH_P_8021Q) &&
+ skb->protocol != htons(ETH_P_8021AD)) {
dev_kfree_skb_any(skb);
goto drop_it_no_recycle;
}
@@ -7914,8 +7915,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
entry = tnapi->tx_prod;
base_flags = 0;
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- base_flags |= TXD_FLAG_TCPUDP_CSUM;
mss = skb_shinfo(skb)->gso_size;
if (mss) {
@@ -7929,6 +7928,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
+ /* HW/FW can not correctly segment packets that have been
+ * vlan encapsulated.
+ */
+ if (skb->protocol == htons(ETH_P_8021Q) ||
+ skb->protocol == htons(ETH_P_8021AD))
+ return tg3_tso_bug(tp, tnapi, txq, skb);
+
if (!skb_is_gso_v6(skb)) {
if (unlikely((ETH_HLEN + hdr_len) > 80) &&
tg3_flag(tp, TSO_BUG))
@@ -7979,6 +7985,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
base_flags |= tsflags << 12;
}
}
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ /* HW/FW can not correctly checksum packets that have been
+ * vlan encapsulated.
+ */
+ if (skb->protocol == htons(ETH_P_8021Q) ||
+ skb->protocol == htons(ETH_P_8021AD)) {
+ if (skb_checksum_help(skb))
+ goto drop;
+ } else {
+ base_flags |= TXD_FLAG_TCPUDP_CSUM;
+ }
}
if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
@@ -11617,6 +11634,12 @@ static int tg3_open(struct net_device *dev)
struct tg3 *tp = netdev_priv(dev);
int err;
+ if (tp->pcierr_recovery) {
+ netdev_err(dev, "Failed to open device. PCI error recovery "
+ "in progress\n");
+ return -EAGAIN;
+ }
+
if (tp->fw_needed) {
err = tg3_request_firmware(tp);
if (tg3_asic_rev(tp) == ASIC_REV_57766) {
@@ -11674,6 +11697,12 @@ static int tg3_close(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
+ if (tp->pcierr_recovery) {
+ netdev_err(dev, "Failed to close device. PCI error recovery "
+ "in progress\n");
+ return -EAGAIN;
+ }
+
tg3_ptp_fini(tp);
tg3_stop(tp);
@@ -17561,6 +17590,7 @@ static int tg3_init_one(struct pci_dev *pdev,
tp->rx_mode = TG3_DEF_RX_MODE;
tp->tx_mode = TG3_DEF_TX_MODE;
tp->irq_sync = 1;
+ tp->pcierr_recovery = false;
if (tg3_debug > 0)
tp->msg_enable = tg3_debug;
@@ -18071,6 +18101,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
+ tp->pcierr_recovery = true;
+
/* We probably don't have netdev yet */
if (!netdev || !netif_running(netdev))
goto done;
@@ -18195,6 +18227,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
tg3_phy_start(tp);
done:
+ tp->pcierr_recovery = false;
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 461accaf0aa4..31c9f8295953 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -3407,6 +3407,7 @@ struct tg3 {
struct device *hwmon_dev;
bool link_up;
+ bool pcierr_recovery;
};
/* Accessor macros for chip and asic attributes
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ff8cae5e2535..ffc92a41d75b 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2506,7 +2506,7 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
* For TSO, the TCP checksum field is seeded with pseudo-header sum
* excluding the length field.
*/
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
/* Do we really need these? */
@@ -2870,12 +2870,13 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ __be16 net_proto = vlan_get_protocol(skb);
u8 proto = 0;
- if (skb->protocol == htons(ETH_P_IP))
+ if (net_proto == htons(ETH_P_IP))
proto = ip_hdr(skb)->protocol;
#ifdef NETIF_F_IPV6_CSUM
- else if (skb->protocol == htons(ETH_P_IPV6)) {
+ else if (net_proto == htons(ETH_P_IPV6)) {
/* nexthdr may not be TCP immediately. */
proto = ipv6_hdr(skb)->nexthdr;
}
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index ca5d7798b265..e1e02fba4fcc 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -30,7 +30,6 @@
#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/pinctrl/consumer.h>
#include "macb.h"
@@ -2071,7 +2070,6 @@ static int __init macb_probe(struct platform_device *pdev)
struct phy_device *phydev;
u32 config;
int err = -ENXIO;
- struct pinctrl *pinctrl;
const char *mac;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2080,15 +2078,6 @@ static int __init macb_probe(struct platform_device *pdev)
goto err_out;
}
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl)) {
- err = PTR_ERR(pinctrl);
- if (err == -EPROBE_DEFER)
- goto err_out;
-
- dev_warn(&pdev->dev, "No pinctrl provided\n");
- }
-
err = -ENOMEM;
dev = alloc_etherdev(sizeof(*bp));
if (!dev)
diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig
index 184a063bed5f..07d2201530d2 100644
--- a/drivers/net/ethernet/calxeda/Kconfig
+++ b/drivers/net/ethernet/calxeda/Kconfig
@@ -1,6 +1,7 @@
config NET_CALXEDA_XGMAC
tristate "Calxeda 1G/10G XGMAC Ethernet driver"
depends on HAS_IOMEM && HAS_DMA
+ depends on ARCH_HIGHBANK || COMPILE_TEST
select CRC32
help
This is the driver for the XGMAC Ethernet IP block found on Calxeda
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 18fb9c61d7ba..e5be511a3c38 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1253,7 +1253,9 @@ freeout: t4_free_sge_resources(adap);
goto freeout;
}
- t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
+ t4_write_reg(adap, is_t4(adap->params.chip) ?
+ MPS_TRC_RSS_CONTROL :
+ MPS_T5_TRC_RSS_CONTROL,
RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
return 0;
@@ -1761,7 +1763,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
0xd004, 0xd03c,
0xdfc0, 0xdfe0,
0xe000, 0xea7c,
- 0xf000, 0x11190,
+ 0xf000, 0x11110,
+ 0x11118, 0x11190,
0x19040, 0x1906c,
0x19078, 0x19080,
0x1908c, 0x19124,
@@ -1968,7 +1971,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
0xd004, 0xd03c,
0xdfc0, 0xdfe0,
0xe000, 0x11088,
- 0x1109c, 0x1117c,
+ 0x1109c, 0x11110,
+ 0x11118, 0x1117c,
0x11190, 0x11204,
0x19040, 0x1906c,
0x19078, 0x19080,
@@ -5955,7 +5959,8 @@ static int adap_init0(struct adapter *adap)
params[3] = FW_PARAM_PFVF(CQ_END);
params[4] = FW_PARAM_PFVF(OCQ_START);
params[5] = FW_PARAM_PFVF(OCQ_END);
- ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
+ val);
if (ret < 0)
goto bye;
adap->vres.qp.start = val[0];
@@ -5967,7 +5972,8 @@ static int adap_init0(struct adapter *adap)
params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
- ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
+ val);
if (ret < 0) {
adap->params.max_ordird_qp = 8;
adap->params.max_ird_adapter = 32 * adap->tids.ntids;
@@ -6472,6 +6478,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct port_info *pi;
bool highdma = false;
struct adapter *adapter = NULL;
+ void __iomem *regs;
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -6488,19 +6495,35 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_release_regions;
}
+ regs = pci_ioremap_bar(pdev, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ err = -ENOMEM;
+ goto out_disable_device;
+ }
+
+ /* We control everything through one PF */
+ func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
+ if (func != ent->driver_data) {
+ iounmap(regs);
+ pci_disable_device(pdev);
+ pci_save_state(pdev); /* to restore SR-IOV later */
+ goto sriov;
+ }
+
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
highdma = true;
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
"coherent allocations\n");
- goto out_disable_device;
+ goto out_unmap_bar0;
}
} else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
- goto out_disable_device;
+ goto out_unmap_bar0;
}
}
@@ -6512,7 +6535,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
if (!adapter) {
err = -ENOMEM;
- goto out_disable_device;
+ goto out_unmap_bar0;
}
adapter->workq = create_singlethread_workqueue("cxgb4");
@@ -6524,20 +6547,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* PCI device has been enabled */
adapter->flags |= DEV_ENABLED;
- adapter->regs = pci_ioremap_bar(pdev, 0);
- if (!adapter->regs) {
- dev_err(&pdev->dev, "cannot map device registers\n");
- err = -ENOMEM;
- goto out_free_adapter;
- }
-
- /* We control everything through one PF */
- func = SOURCEPF_GET(readl(adapter->regs + PL_WHOAMI));
- if (func != ent->driver_data) {
- pci_save_state(pdev); /* to restore SR-IOV later */
- goto sriov;
- }
-
+ adapter->regs = regs;
adapter->pdev = pdev;
adapter->pdev_dev = &pdev->dev;
adapter->mbox = func;
@@ -6554,7 +6564,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = t4_prep_adapter(adapter);
if (err)
- goto out_unmap_bar0;
+ goto out_free_adapter;
+
if (!is_t4(adapter->params.chip)) {
s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
@@ -6571,14 +6582,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev,
"Incorrect number of egress queues per page\n");
err = -EINVAL;
- goto out_unmap_bar0;
+ goto out_free_adapter;
}
adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
pci_resource_len(pdev, 2));
if (!adapter->bar2) {
dev_err(&pdev->dev, "cannot map device bar2 region\n");
err = -ENOMEM;
- goto out_unmap_bar0;
+ goto out_free_adapter;
}
}
@@ -6716,13 +6727,13 @@ sriov:
out_unmap_bar:
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
- out_unmap_bar0:
- iounmap(adapter->regs);
out_free_adapter:
if (adapter->workq)
destroy_workqueue(adapter->workq);
kfree(adapter);
+ out_unmap_bar0:
+ iounmap(regs);
out_disable_device:
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index a853133d8db8..41d04462b72e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -168,6 +168,34 @@ void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
}
/*
+ * t4_report_fw_error - report firmware error
+ * @adap: the adapter
+ *
+ * The adapter firmware can indicate error conditions to the host.
+ * If the firmware has indicated an error, print out the reason for
+ * the firmware error.
+ */
+static void t4_report_fw_error(struct adapter *adap)
+{
+ static const char *const reason[] = {
+ "Crash", /* PCIE_FW_EVAL_CRASH */
+ "During Device Preparation", /* PCIE_FW_EVAL_PREP */
+ "During Device Configuration", /* PCIE_FW_EVAL_CONF */
+ "During Device Initialization", /* PCIE_FW_EVAL_INIT */
+ "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
+ "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
+ "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
+ "Reserved", /* reserved */
+ };
+ u32 pcie_fw;
+
+ pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
+ if (pcie_fw & FW_PCIE_FW_ERR)
+ dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
+ reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]);
+}
+
+/*
* Get the reply to a mailbox command and store it in @rpl in big-endian order.
*/
static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
@@ -300,6 +328,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
dump_mbox(adap, mbox, data_reg);
dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
*(const u8 *)cmd, mbox);
+ t4_report_fw_error(adap);
return -ETIMEDOUT;
}
@@ -566,6 +595,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
#define VPD_BASE 0x400
#define VPD_BASE_OLD 0
#define VPD_LEN 1024
+#define CHELSIO_VPD_UNIQUE_ID 0x82
/**
* t4_seeprom_wp - enable/disable EEPROM write protection
@@ -603,7 +633,14 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
if (ret < 0)
goto out;
- addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
+
+ /* The VPD shall have a unique identifier specified by the PCI SIG.
+ * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
+ * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
+ * is expected to automatically put this entry at the
+ * beginning of the VPD.
+ */
+ addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
if (ret < 0)
@@ -667,6 +704,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
strim(p->sn);
+ i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
memcpy(p->pn, vpd + pn, min(i, PN_LEN));
strim(p->pn);
@@ -1394,15 +1432,18 @@ static void pcie_intr_handler(struct adapter *adapter)
int fat;
- fat = t4_handle_intr_status(adapter,
- PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
- sysbus_intr_info) +
- t4_handle_intr_status(adapter,
- PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
- pcie_port_intr_info) +
- t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
- is_t4(adapter->params.chip) ?
- pcie_intr_info : t5_pcie_intr_info);
+ if (is_t4(adapter->params.chip))
+ fat = t4_handle_intr_status(adapter,
+ PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+ sysbus_intr_info) +
+ t4_handle_intr_status(adapter,
+ PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+ pcie_port_intr_info) +
+ t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+ pcie_intr_info);
+ else
+ fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+ t5_pcie_intr_info);
if (fat)
t4_fatal_err(adapter);
@@ -1521,6 +1562,9 @@ static void cim_intr_handler(struct adapter *adapter)
int fat;
+ if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR)
+ t4_report_fw_error(adapter);
+
fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
cim_intr_info) +
t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
@@ -1768,10 +1812,16 @@ static void ma_intr_handler(struct adapter *adap)
{
u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
- if (status & MEM_PERR_INT_CAUSE)
+ if (status & MEM_PERR_INT_CAUSE) {
dev_alert(adap->pdev_dev,
"MA parity error, parity status %#x\n",
t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
+ if (is_t5(adap->params.chip))
+ dev_alert(adap->pdev_dev,
+ "MA parity error, parity status %#x\n",
+ t4_read_reg(adap,
+ MA_PARITY_ERROR_STATUS2));
+ }
if (status & MEM_WRAP_INT_CAUSE) {
v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
dev_alert(adap->pdev_dev, "MA address wrap-around error by "
@@ -2733,12 +2783,16 @@ retry:
/*
* Issue the HELLO command to the firmware. If it's not successful
* but indicates that we got a "busy" or "timeout" condition, retry
- * the HELLO until we exhaust our retry limit.
+ * the HELLO until we exhaust our retry limit. If we do exceed our
+ * retry limit, check to see if the firmware left us any error
+ * information and report that if so.
*/
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret < 0) {
if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
goto retry;
+ if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR)
+ t4_report_fw_error(adap);
return ret;
}
@@ -3742,6 +3796,7 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
lc->link_ok = link_ok;
lc->speed = speed;
lc->fc = fc;
+ lc->supported = be16_to_cpu(p->u.info.pcap);
t4_os_link_changed(adap, port, link_ok);
}
if (mod != pi->mod_type) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index e3146e83df20..39fb325474f7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -511,6 +511,7 @@
#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
#define MA_PCIE_FW 0x30b8
#define MA_PARITY_ERROR_STATUS 0x77f4
+#define MA_PARITY_ERROR_STATUS2 0x7804
#define MA_EXT_MEMORY1_BAR 0x7808
#define EDC_0_BASE_ADDR 0x7900
@@ -959,6 +960,7 @@
#define TRCMULTIFILTER 0x00000001U
#define MPS_TRC_RSS_CONTROL 0x9808
+#define MPS_T5_TRC_RSS_CONTROL 0xa00c
#define RSSCONTROL_MASK 0x00ff0000U
#define RSSCONTROL_SHIFT 16
#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 5f2729ebadbe..3409756a85b9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2228,6 +2228,10 @@ struct fw_debug_cmd {
#define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT)
#define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \
FW_PCIE_FW_MASTER_MASK)
+#define FW_PCIE_FW_EVAL_MASK 0x7
+#define FW_PCIE_FW_EVAL_SHIFT 24
+#define FW_PCIE_FW_EVAL_GET(x) (((x) >> FW_PCIE_FW_EVAL_SHIFT) & \
+ FW_PCIE_FW_EVAL_MASK)
struct fw_hdr {
u8 ver;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 9b33057a9477..70089c29d307 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1399,7 +1399,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
const void *mac_addr;
if (!IS_ENABLED(CONFIG_OF) || !np)
- return NULL;
+ return ERR_PTR(-ENXIO);
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index a0b418e007a0..566b17db135a 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1994,7 +1994,7 @@ static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
{
swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
- if (skb->protocol != htons(ETH_P_IP))
+ if (vlan_get_protocol(skb) != htons(ETH_P_IP))
return;
if (skb->ip_summed == CHECKSUM_PARTIAL)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index cbc330b301cd..ad3d5d12173f 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2674,7 +2674,8 @@ set_itr_now:
#define E1000_TX_FLAGS_VLAN_SHIFT 16
static int e1000_tso(struct e1000_adapter *adapter,
- struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+ struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
+ __be16 protocol)
{
struct e1000_context_desc *context_desc;
struct e1000_buffer *buffer_info;
@@ -2692,7 +2693,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
@@ -2702,7 +2703,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
0);
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb_transport_offset(skb) - 1;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
@@ -2745,7 +2746,8 @@ static int e1000_tso(struct e1000_adapter *adapter,
}
static bool e1000_tx_csum(struct e1000_adapter *adapter,
- struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+ struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
+ __be16 protocol)
{
struct e1000_context_desc *context_desc;
struct e1000_buffer *buffer_info;
@@ -2756,7 +2758,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
if (skb->ip_summed != CHECKSUM_PARTIAL)
return false;
- switch (skb->protocol) {
+ switch (protocol) {
case cpu_to_be16(ETH_P_IP):
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
cmd_len |= E1000_TXD_CMD_TCP;
@@ -3097,6 +3099,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
int count = 0;
int tso;
unsigned int f;
+ __be16 protocol = vlan_get_protocol(skb);
/* This goes back to the question of how to logically map a Tx queue
* to a flow. Right now, performance is impacted slightly negatively
@@ -3210,7 +3213,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
first = tx_ring->next_to_use;
- tso = e1000_tso(adapter, tx_ring, skb);
+ tso = e1000_tso(adapter, tx_ring, skb, protocol);
if (tso < 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -3220,10 +3223,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if (likely(hw->mac_type != e1000_82544))
tx_ring->last_tx_tso = true;
tx_flags |= E1000_TX_FLAGS_TSO;
- } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
+ } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
tx_flags |= E1000_TX_FLAGS_CSUM;
- if (likely(skb->protocol == htons(ETH_P_IP)))
+ if (protocol == htons(ETH_P_IP))
tx_flags |= E1000_TX_FLAGS_IPV4;
if (unlikely(skb->no_fcs))
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 65c3aef2bd36..247335d2c7ec 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5164,7 +5164,8 @@ link_up:
#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
#define E1000_TX_FLAGS_VLAN_SHIFT 16
-static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
+static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
+ __be16 protocol)
{
struct e1000_context_desc *context_desc;
struct e1000_buffer *buffer_info;
@@ -5183,7 +5184,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
@@ -5231,7 +5232,8 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
return 1;
}
-static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
+static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb,
+ __be16 protocol)
{
struct e1000_adapter *adapter = tx_ring->adapter;
struct e1000_context_desc *context_desc;
@@ -5239,16 +5241,10 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
unsigned int i;
u8 css;
u32 cmd_len = E1000_TXD_CMD_DEXT;
- __be16 protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return false;
- if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
- protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
- else
- protocol = skb->protocol;
-
switch (protocol) {
case cpu_to_be16(ETH_P_IP):
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
@@ -5546,6 +5542,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
int count = 0;
int tso;
unsigned int f;
+ __be16 protocol = vlan_get_protocol(skb);
if (test_bit(__E1000_DOWN, &adapter->state)) {
dev_kfree_skb_any(skb);
@@ -5620,7 +5617,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
first = tx_ring->next_to_use;
- tso = e1000_tso(tx_ring, skb);
+ tso = e1000_tso(tx_ring, skb, protocol);
if (tso < 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -5628,14 +5625,14 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if (tso)
tx_flags |= E1000_TX_FLAGS_TSO;
- else if (e1000_tx_csum(tx_ring, skb))
+ else if (e1000_tx_csum(tx_ring, skb, protocol))
tx_flags |= E1000_TX_FLAGS_CSUM;
/* Old method was to assume IPv4 packet by default if TSO was enabled.
* 82571 hardware supports TSO capabilities for IPv6 as well...
* no longer assume, we must.
*/
- if (skb->protocol == htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
tx_flags |= E1000_TX_FLAGS_IPV4;
if (unlikely(skb->no_fcs))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index a51aa37b7b5a..369848e107f8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2295,7 +2295,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
goto out_drop;
/* obtain protocol of skb */
- protocol = skb->protocol;
+ protocol = vlan_get_protocol(skb);
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_bi[tx_ring->next_to_use];
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 79bf96ca6489..95a3ec236b49 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1597,7 +1597,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
goto out_drop;
/* obtain protocol of skb */
- protocol = skb->protocol;
+ protocol = vlan_get_protocol(skb);
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_bi[tx_ring->next_to_use];
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index c9f1d1b7ef37..ade067de1689 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -20,6 +20,7 @@
#include <linux/mbus.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/if_vlan.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/io.h>
@@ -1371,15 +1372,16 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
{
if (skb->ip_summed == CHECKSUM_PARTIAL) {
int ip_hdr_len = 0;
+ __be16 l3_proto = vlan_get_protocol(skb);
u8 l4_proto;
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (l3_proto == htons(ETH_P_IP)) {
struct iphdr *ip4h = ip_hdr(skb);
/* Calculate IPv4 checksum and L4 checksum */
ip_hdr_len = ip4h->ihl;
l4_proto = ip4h->protocol;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (l3_proto == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = ipv6_hdr(skb);
/* Read l4_protocol from one of IPv6 extra headers */
@@ -1390,7 +1392,7 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
return MVNETA_TX_L4_CSUM_NOT;
return mvneta_txq_desc_csum(skb_network_offset(skb),
- skb->protocol, ip_hdr_len, l4_proto);
+ l3_proto, ip_hdr_len, l4_proto);
}
return MVNETA_TX_L4_CSUM_NOT;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 65a4a0f88ea0..02a2e90d581a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2389,6 +2389,22 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
}
EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
+static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
+{
+ struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
+ int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
+ + 1;
+ int max_port = min_port +
+ bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+
+ if (port < min_port)
+ port = min_port;
+ else if (port >= max_port)
+ port = max_port - 1;
+
+ return port;
+}
+
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2402,6 +2418,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
if (slave < 0)
return -EINVAL;
+ port = mlx4_slaves_closest_port(dev, slave, port);
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
s_info->mac = mac;
mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
@@ -2428,6 +2445,7 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
if (slave < 0)
return -EINVAL;
+ port = mlx4_slaves_closest_port(dev, slave, port);
vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
if ((0 == vlan) && (0 == qos))
@@ -2455,6 +2473,7 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
struct mlx4_priv *priv;
priv = mlx4_priv(dev);
+ port = mlx4_slaves_closest_port(dev, slave, port);
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (MLX4_VGT != vp_oper->state.default_vlan) {
@@ -2482,6 +2501,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
if (slave < 0)
return -EINVAL;
+ port = mlx4_slaves_closest_port(dev, slave, port);
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
s_info->spoofchk = setting;
@@ -2535,6 +2555,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat
if (slave < 0)
return -EINVAL;
+ port = mlx4_slaves_closest_port(dev, slave, port);
switch (link_state) {
case IFLA_VF_LINK_STATE_AUTO:
/* get current link state */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index e22f24f784fc..35ff2925110a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -487,6 +487,9 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
struct mlx4_en_dev *mdev = priv->mdev;
int err;
+ if (pause->autoneg)
+ return -EINVAL;
+
priv->prof->tx_pause = pause->tx_pause != 0;
priv->prof->rx_pause = pause->rx_pause != 0;
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index bb536aa613f4..abddcf8c40aa 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -474,39 +474,12 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad
int qpn, u64 *reg_id)
{
int err;
- struct mlx4_spec_list spec_eth_outer = { {NULL} };
- struct mlx4_spec_list spec_vxlan = { {NULL} };
- struct mlx4_spec_list spec_eth_inner = { {NULL} };
-
- struct mlx4_net_trans_rule rule = {
- .queue_mode = MLX4_NET_TRANS_Q_FIFO,
- .exclusive = 0,
- .allow_loopback = 1,
- .promisc_mode = MLX4_FS_REGULAR,
- .priority = MLX4_DOMAIN_NIC,
- };
-
- __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
return 0; /* do nothing */
- rule.port = priv->port;
- rule.qpn = qpn;
- INIT_LIST_HEAD(&rule.list);
-
- spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
- memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
- memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
-
- spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
- spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
-
- list_add_tail(&spec_eth_outer.list, &rule.list);
- list_add_tail(&spec_vxlan.list, &rule.list);
- list_add_tail(&spec_eth_inner.list, &rule.list);
-
- err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id);
+ err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
+ MLX4_DOMAIN_NIC, reg_id);
if (err) {
en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 7e2d5d57c598..871e3a5bda38 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -78,13 +78,13 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
#endif /* CONFIG_PCI_MSI */
static uint8_t num_vfs[3] = {0, 0, 0};
-static int num_vfs_argc = 3;
+static int num_vfs_argc;
module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
"num_vfs=port1,port2,port1+2");
static uint8_t probe_vf[3] = {0, 0, 0};
-static int probe_vfs_argc = 3;
+static int probe_vfs_argc;
module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
"probe_vf=port1,port2,port1+2");
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index d80e7a6fac74..ca0f98c95105 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1020,6 +1020,44 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
}
EXPORT_SYMBOL_GPL(mlx4_flow_detach);
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+ int port, int qpn, u16 prio, u64 *reg_id)
+{
+ int err;
+ struct mlx4_spec_list spec_eth_outer = { {NULL} };
+ struct mlx4_spec_list spec_vxlan = { {NULL} };
+ struct mlx4_spec_list spec_eth_inner = { {NULL} };
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_REGULAR,
+ };
+
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ rule.port = port;
+ rule.qpn = qpn;
+ rule.priority = prio;
+ INIT_LIST_HEAD(&rule.list);
+
+ spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
+ memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+
+ spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
+ spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
+
+ list_add_tail(&spec_eth_outer.list, &rule.list);
+ list_add_tail(&spec_vxlan.list, &rule.list);
+ list_add_tail(&spec_eth_inner.list, &rule.list);
+
+ err = mlx4_flow_attach(dev, &rule, reg_id);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_tunnel_steer_add);
+
int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
u32 max_range_qpn)
{
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 7d717eccb7b0..193a6adb5d04 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -298,6 +298,7 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
}
+/* Must protect against concurrent access */
int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
struct mlx4_mpt_entry ***mpt_entry)
{
@@ -305,13 +306,10 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1);
struct mlx4_cmd_mailbox *mailbox = NULL;
- /* Make sure that at this point we have single-threaded access only */
-
if (mmr->enabled != MLX4_MPT_EN_HW)
return -EINVAL;
err = mlx4_HW2SW_MPT(dev, NULL, key);
-
if (err) {
mlx4_warn(dev, "HW2SW_MPT failed (%d).", err);
mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n");
@@ -333,7 +331,6 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
0, MLX4_CMD_QUERY_MPT,
MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
-
if (err)
goto free_mailbox;
@@ -378,9 +375,10 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
err = mlx4_SW2HW_MPT(dev, mailbox, key);
}
- mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK;
- if (!err)
+ if (!err) {
+ mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK;
mmr->enabled = MLX4_MPT_EN_HW;
+ }
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt);
@@ -400,11 +398,12 @@ EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt);
int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry,
u32 pdn)
{
- u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags);
+ u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK;
/* The wrapper function will put the slave's id here */
if (mlx4_is_mfunc(dev))
pd_flags &= ~MLX4_MPT_PD_VF_MASK;
- mpt_entry->pd_flags = cpu_to_be32((pd_flags & ~MLX4_MPT_PD_MASK) |
+
+ mpt_entry->pd_flags = cpu_to_be32(pd_flags |
(pdn & MLX4_MPT_PD_MASK)
| MLX4_MPT_PD_FLAG_EN_INV);
return 0;
@@ -600,14 +599,18 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
{
int err;
- mpt_entry->start = cpu_to_be64(mr->iova);
- mpt_entry->length = cpu_to_be64(mr->size);
- mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
+ mpt_entry->start = cpu_to_be64(iova);
+ mpt_entry->length = cpu_to_be64(size);
+ mpt_entry->entity_size = cpu_to_be32(page_shift);
err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
if (err)
return err;
+ mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK |
+ MLX4_MPT_PD_FLAG_EN_INV);
+ mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
+ MLX4_MPT_FLAG_SW_OWNS);
if (mr->mtt.order < 0) {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
mpt_entry->mtt_addr = 0;
@@ -617,6 +620,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
if (mr->mtt.page_shift == 0)
mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
}
+ if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
+ /* fast register MR in free state */
+ mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
+ mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
+ MLX4_MPT_PD_FLAG_RAE);
+ } else {
+ mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
+ }
mr->enabled = MLX4_MPT_EN_SW;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 9ba0c1ca10d5..94eeb2c7d7e4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -103,7 +103,8 @@ static int find_index(struct mlx4_dev *dev,
int i;
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
- if ((mac & MLX4_MAC_MASK) ==
+ if (table->refs[i] &&
+ (MLX4_MAC_MASK & mac) ==
(MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
return i;
}
@@ -165,12 +166,14 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
mutex_lock(&table->mutex);
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
- if (free < 0 && !table->entries[i]) {
- free = i;
+ if (!table->refs[i]) {
+ if (free < 0)
+ free = i;
continue;
}
- if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
+ if ((MLX4_MAC_MASK & mac) ==
+ (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
/* MAC already registered, increment ref count */
err = i;
++table->refs[i];
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 0dc31d85fc3b..2301365c79c7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -390,13 +390,14 @@ err_icm:
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
-int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
enum mlx4_update_qp_attr attr,
struct mlx4_update_qp_params *params)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_update_qp_context *cmd;
u64 pri_addr_path_mask = 0;
+ u64 qp_mask = 0;
int err = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -413,9 +414,16 @@ int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
}
+ if (attr & MLX4_UPDATE_QP_VSD) {
+ qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD;
+ if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE)
+ cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
+ }
+
cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
+ cmd->qp_mask = cpu_to_be64(qp_mask);
- err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
+ err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 1089367fed22..5d2498dcf536 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -702,11 +702,13 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
struct mlx4_qp_context *qpc = inbox->buf + 8;
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_priv *priv;
+ u32 qp_type;
int port;
port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
priv = mlx4_priv(dev);
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+ qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
if (MLX4_VGT != vp_oper->state.default_vlan) {
/* the reserved QPs (special, proxy, tunnel)
@@ -715,8 +717,20 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
if (mlx4_is_qp_reserved(dev, qpn))
return 0;
- /* force strip vlan by clear vsd */
- qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
+ /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
+ if (qp_type == MLX4_QP_ST_UD ||
+ (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
+ if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
+ *(__be32 *)inbox->buf =
+ cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
+ MLX4_QP_OPTPAR_VLAN_STRIPPING);
+ qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
+ } else {
+ struct mlx4_update_qp_params params = {.flags = 0};
+
+ mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+ }
+ }
if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
@@ -3998,13 +4012,17 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
}
port = (rqp->sched_queue >> 6 & 1) + 1;
- smac_index = cmd->qp_context.pri_path.grh_mylmc;
- err = mac_find_smac_ix_in_slave(dev, slave, port,
- smac_index, &mac);
- if (err) {
- mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
- qpn, smac_index);
- goto err_mac;
+
+ if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
+ smac_index = cmd->qp_context.pri_path.grh_mylmc;
+ err = mac_find_smac_ix_in_slave(dev, slave, port,
+ smac_index, &mac);
+
+ if (err) {
+ mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
+ qpn, smac_index);
+ goto err_mac;
+ }
}
err = mlx4_cmd(dev, inbox->dma,
@@ -4818,7 +4836,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
upd_context = mailbox->buf;
- upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
+ upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 5020fd47825d..2f12c88c66ab 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -206,7 +206,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
int rx_head = priv->rx_head;
int rx = 0;
- while (1) {
+ while (rx < budget) {
desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
desc0 = readl(desc + RX_REG_OFFSET_DESC0);
@@ -218,7 +218,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
net_dbg_ratelimited("packet error\n");
priv->stats.rx_dropped++;
priv->stats.rx_errors++;
- continue;
+ goto rx_next;
}
len = desc0 & RX_DESC0_FRAME_LEN_MASK;
@@ -226,13 +226,19 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
if (len > RX_BUF_SIZE)
len = RX_BUF_SIZE;
- skb = build_skb(priv->rx_buf[rx_head], priv->rx_buf_size);
+ dma_sync_single_for_cpu(&ndev->dev,
+ priv->rx_mapping[rx_head],
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+ skb = netdev_alloc_skb_ip_align(ndev, len);
+
if (unlikely(!skb)) {
- net_dbg_ratelimited("build_skb failed\n");
+ net_dbg_ratelimited("netdev_alloc_skb_ip_align failed\n");
priv->stats.rx_dropped++;
priv->stats.rx_errors++;
+ goto rx_next;
}
+ memcpy(skb->data, priv->rx_buf[rx_head], len);
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, ndev);
napi_gro_receive(&priv->napi, skb);
@@ -244,18 +250,15 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
if (desc0 & RX_DESC0_MULTICAST)
priv->stats.multicast++;
+rx_next:
writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
rx_head = RX_NEXT(rx_head);
priv->rx_head = rx_head;
-
- if (rx >= budget)
- break;
}
if (rx < budget) {
- napi_gro_flush(napi, false);
- __napi_complete(napi);
+ napi_complete(napi);
}
priv->reg_imr |= RPKT_FINISH_M;
@@ -346,10 +349,12 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
len = ETH_ZLEN;
}
- txdes1 = readl(desc + TX_REG_OFFSET_DESC1);
- txdes1 |= TX_DESC1_LTS | TX_DESC1_FTS;
- txdes1 &= ~(TX_DESC1_FIFO_COMPLETE | TX_DESC1_INTR_COMPLETE);
- txdes1 |= (len & TX_DESC1_BUF_SIZE_MASK);
+ dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
+ priv->tx_buf_size, DMA_TO_DEVICE);
+
+ txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
+ if (tx_head == TX_DESC_NUM_MASK)
+ txdes1 |= TX_DESC1_END;
writel(txdes1, desc + TX_REG_OFFSET_DESC1);
writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
@@ -465,8 +470,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
spin_lock_init(&priv->txlock);
priv->tx_buf_size = TX_BUF_SIZE;
- priv->rx_buf_size = RX_BUF_SIZE +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ priv->rx_buf_size = RX_BUF_SIZE;
priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
TX_DESC_NUM, &priv->tx_base,
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 8706c0dbd0c3..a44a03c45014 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1220,6 +1220,9 @@ static int lpc_eth_open(struct net_device *ndev)
__lpc_eth_clock_enable(pldat, true);
+ /* Suspended PHY makes LPC ethernet core block, so resume now */
+ phy_resume(pldat->phy_dev);
+
/* Reset and initialize */
__lpc_eth_reset(pldat);
__lpc_eth_init(pldat);
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 979c6980639f..a42293092ea4 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -290,9 +290,11 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
/* Read the hardware TX timestamp if one was recorded */
if (unlikely(re.s.tstamp)) {
struct skb_shared_hwtstamps ts;
+ u64 ns;
+
memset(&ts, 0, sizeof(ts));
/* Read the timestamp */
- u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
+ ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
/* Remove the timestamp from the FIFO */
cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
/* Tell the kernel about the timestamp */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index 44c8be1c6805..5f7a35212796 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -7,6 +7,7 @@ config PCH_GBE
depends on PCI && (X86_32 || COMPILE_TEST)
select MII
select PTP_1588_CLOCK_PCH
+ select NET_PTP_CLASSIFY
---help---
This is a gigabit ethernet driver for EG20T PCH.
EG20T PCH is the platform controller hub that is used in Intel's
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 32058614151a..5c4068353f66 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -135,6 +135,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
int i, j;
struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
+ spin_lock(&adapter->tx_clean_lock);
cmd_buf = tx_ring->cmd_buf_arr;
for (i = 0; i < tx_ring->num_desc; i++) {
buffrag = cmd_buf->frag_array;
@@ -158,6 +159,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
}
cmd_buf++;
}
+ spin_unlock(&adapter->tx_clean_lock);
}
void netxen_free_sw_resources(struct netxen_adapter *adapter)
@@ -1792,9 +1794,9 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
break;
}
- if (count && netif_running(netdev)) {
- tx_ring->sw_consumer = sw_consumer;
+ tx_ring->sw_consumer = sw_consumer;
+ if (count && netif_running(netdev)) {
smp_mb();
if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 1159031f885b..5ec5a2b0e989 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1186,7 +1186,6 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
return;
smp_mb();
- spin_lock(&adapter->tx_clean_lock);
netif_carrier_off(netdev);
netif_tx_disable(netdev);
@@ -1204,7 +1203,6 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
netxen_napi_disable(adapter);
netxen_release_tx_buffers(adapter);
- spin_unlock(&adapter->tx_clean_lock);
}
/* Usage: During suspend and firmware recovery module */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 86783e1afcf7..3172cdf591fe 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1177,9 +1177,8 @@ static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter)
{
u32 idc_params, val;
- if (qlcnic_83xx_lockless_flash_read32(adapter,
- QLC_83XX_IDC_FLASH_PARAM_ADDR,
- (u8 *)&idc_params, 1)) {
+ if (qlcnic_83xx_flash_read32(adapter, QLC_83XX_IDC_FLASH_PARAM_ADDR,
+ (u8 *)&idc_params, 1)) {
dev_info(&adapter->pdev->dev,
"%s:failed to get IDC params from flash\n", __func__);
adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 141f116eb868..494e8105adee 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1333,21 +1333,21 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_esw_statistics port_stats;
struct qlcnic_mac_statistics mac_stats;
- int index, ret, length, size, tx_size, ring;
+ int index, ret, length, size, ring;
char *p;
- tx_size = adapter->drv_tx_rings * QLCNIC_TX_STATS_LEN;
+ memset(data, 0, stats->n_stats * sizeof(u64));
- memset(data, 0, tx_size * sizeof(u64));
for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) {
- if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
tx_ring = &adapter->tx_ring[ring];
data = qlcnic_fill_tx_queue_stats(data, tx_ring);
qlcnic_update_stats(adapter);
+ } else {
+ data += QLCNIC_TX_STATS_LEN;
}
}
- memset(data, 0, stats->n_stats * sizeof(u64));
length = QLCNIC_STATS_LEN;
for (index = 0; index < length; index++) {
p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 188626e2a861..3e96f269150d 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2556,6 +2556,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
if (skb_is_gso(skb)) {
int err;
+ __be16 l3_proto = vlan_get_protocol(skb);
err = skb_cow_head(skb, 0);
if (err < 0)
@@ -2572,7 +2573,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
<< OB_MAC_TRANSPORT_HDR_SHIFT);
mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
- if (likely(skb->protocol == htons(ETH_P_IP))) {
+ if (likely(l3_proto == htons(ETH_P_IP))) {
struct iphdr *iph = ip_hdr(skb);
iph->check = 0;
mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
@@ -2580,7 +2581,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
iph->daddr, 0,
IPPROTO_TCP,
0);
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (l3_proto == htons(ETH_P_IPV6)) {
mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 91652e7235e4..0921302553c6 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1783,33 +1783,31 @@ static void __rtl8169_set_features(struct net_device *dev,
netdev_features_t features)
{
struct rtl8169_private *tp = netdev_priv(dev);
- netdev_features_t changed = features ^ dev->features;
void __iomem *ioaddr = tp->mmio_addr;
+ u32 rx_config;
- if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_CTAG_RX)))
- return;
+ rx_config = RTL_R32(RxConfig);
+ if (features & NETIF_F_RXALL)
+ rx_config |= (AcceptErr | AcceptRunt);
+ else
+ rx_config &= ~(AcceptErr | AcceptRunt);
- if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) {
- if (features & NETIF_F_RXCSUM)
- tp->cp_cmd |= RxChkSum;
- else
- tp->cp_cmd &= ~RxChkSum;
+ RTL_W32(RxConfig, rx_config);
- if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
- tp->cp_cmd |= RxVlan;
- else
- tp->cp_cmd &= ~RxVlan;
+ if (features & NETIF_F_RXCSUM)
+ tp->cp_cmd |= RxChkSum;
+ else
+ tp->cp_cmd &= ~RxChkSum;
- RTL_W16(CPlusCmd, tp->cp_cmd);
- RTL_R16(CPlusCmd);
- }
- if (changed & NETIF_F_RXALL) {
- int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
- if (features & NETIF_F_RXALL)
- tmp |= (AcceptErr | AcceptRunt);
- RTL_W32(RxConfig, tmp);
- }
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ tp->cp_cmd |= RxVlan;
+ else
+ tp->cp_cmd &= ~RxVlan;
+
+ tp->cp_cmd |= RTL_R16(CPlusCmd) & ~(RxVlan | RxChkSum);
+
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+ RTL_R16(CPlusCmd);
}
static int rtl8169_set_features(struct net_device *dev,
@@ -1817,8 +1815,11 @@ static int rtl8169_set_features(struct net_device *dev,
{
struct rtl8169_private *tp = netdev_priv(dev);
+ features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
+
rtl_lock_work(tp);
- __rtl8169_set_features(dev, features);
+ if (features ^ dev->features)
+ __rtl8169_set_features(dev, features);
rtl_unlock_work(tp);
return 0;
@@ -7118,8 +7119,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
}
}
-static int
-rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
const unsigned int region = cfg->region;
@@ -7194,7 +7194,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_mwi_2;
}
- tp->cp_cmd = RxChkSum;
+ tp->cp_cmd = 0;
if ((sizeof(dma_addr_t) > 4) &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
@@ -7235,13 +7235,6 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- /*
- * Pretend we are using VLANs; This bypasses a nasty bug where
- * Interrupts stop flowing on high load on 8110SCd controllers.
- */
- if (tp->mac_version == RTL_GIGA_MAC_VER_05)
- tp->cp_cmd |= RxVlan;
-
rtl_init_mdio_ops(tp);
rtl_init_pll_power_ops(tp);
rtl_init_jumbo_ops(tp);
@@ -7302,8 +7295,14 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HIGHDMA;
+ tp->cp_cmd |= RxChkSum | RxVlan;
+
+ /*
+ * Pretend we are using VLANs; This bypasses a nasty bug where
+ * Interrupts stop flowing on high load on 8110SCd controllers.
+ */
if (tp->mac_version == RTL_GIGA_MAC_VER_05)
- /* 8110SCd requires hardware Rx VLAN - disallow toggling */
+ /* Disallow toggling */
dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
if (tp->txd_version == RTL_TD_0)
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 9e757c792d84..196e98a2d93b 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -5,6 +5,7 @@
config SH_ETH
tristate "Renesas SuperH Ethernet support"
depends on HAS_DMA
+ depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST
select CRC32
select MII
select MDIO_BITBANG
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 0537381cd2f6..6859437b59fb 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -2933,6 +2933,9 @@ void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
u32 crc;
int bit;
+ if (!efx_dev_registered(efx))
+ return;
+
netif_addr_lock_bh(net_dev);
efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index c553f6b5a913..cf28daba4346 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -28,7 +28,7 @@
#include "stmmac.h"
-static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
{
struct stmmac_priv *priv = (struct stmmac_priv *)p;
unsigned int txsize = priv->dma_tx_size;
@@ -47,7 +47,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device, skb->data,
bmax, DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ return -1;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
while (len != 0) {
@@ -59,7 +61,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device,
(skb->data + bmax * i),
bmax, DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ return -1;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
STMMAC_CHAIN_MODE);
priv->hw->desc->set_tx_owner(desc);
@@ -69,7 +73,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device,
(skb->data + bmax * i), len,
DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ return -1;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
STMMAC_CHAIN_MODE);
priv->hw->desc->set_tx_owner(desc);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index de507c32036c..593e6c4144a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -220,10 +220,10 @@ enum dma_irq_status {
handle_tx = 0x8,
};
-#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 1)
-#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 2)
-#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 3)
-#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 4)
+#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0)
+#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1)
+#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2)
+#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3)
#define CORE_PCS_ANE_COMPLETE (1 << 5)
#define CORE_PCS_LINK_STATUS (1 << 6)
@@ -287,7 +287,7 @@ struct dma_features {
/* Default LPI timers */
#define STMMAC_DEFAULT_LIT_LS 0x3E8
-#define STMMAC_DEFAULT_TWT_LS 0x0
+#define STMMAC_DEFAULT_TWT_LS 0x1E
#define STMMAC_CHAIN_MODE 0x1
#define STMMAC_RING_MODE 0x2
@@ -425,7 +425,7 @@ struct stmmac_mode_ops {
void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
unsigned int extend_desc);
unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
- unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
+ int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum);
int (*set_16kib_bfsize)(int mtu);
void (*init_desc3)(struct dma_desc *p);
void (*refill_desc3) (void *priv, struct dma_desc *p);
@@ -445,6 +445,7 @@ struct mac_device_info {
int multicast_filter_bins;
int unicast_filter_entries;
int mcast_bits_log2;
+ unsigned int rx_csum;
};
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 71b5419256c1..64d8f56a9c17 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -153,7 +153,7 @@ enum inter_frame_gap {
#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
- GMAC_CONTROL_BE)
+ GMAC_CONTROL_BE | GMAC_CONTROL_DCRS)
/* GMAC Frame Filter defines */
#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index d8ef18786a1c..5efe60ea6526 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -58,7 +58,11 @@ static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw)
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONTROL);
- value |= GMAC_CONTROL_IPC;
+ if (hw->rx_csum)
+ value |= GMAC_CONTROL_IPC;
+ else
+ value &= ~GMAC_CONTROL_IPC;
+
writel(value, ioaddr + GMAC_CONTROL);
value = readl(ioaddr + GMAC_CONTROL);
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 8607488cbcfc..192c2491330b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -68,7 +68,7 @@ struct stmmac_counters {
unsigned int mmc_rx_octetcount_g;
unsigned int mmc_rx_broadcastframe_g;
unsigned int mmc_rx_multicastframe_g;
- unsigned int mmc_rx_crc_errror;
+ unsigned int mmc_rx_crc_error;
unsigned int mmc_rx_align_error;
unsigned int mmc_rx_run_error;
unsigned int mmc_rx_jabber_error;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 50617c5a0bdb..08c483bd2ec7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -196,7 +196,7 @@ void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc)
mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G);
mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G);
mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G);
- mmc->mmc_rx_crc_errror += readl(ioaddr + MMC_RX_CRC_ERRROR);
+ mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERRROR);
mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR);
mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR);
mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 650a4be6bce5..5dd50c6cda5b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -28,7 +28,7 @@
#include "stmmac.h"
-static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
{
struct stmmac_priv *priv = (struct stmmac_priv *)p;
unsigned int txsize = priv->dma_tx_size;
@@ -53,7 +53,10 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device, skb->data,
bmax, DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ return -1;
+
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
STMMAC_RING_MODE);
@@ -68,7 +71,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device, skb->data + bmax,
len, DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ return -1;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
STMMAC_RING_MODE);
@@ -77,7 +82,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
} else {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ return -1;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
STMMAC_RING_MODE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index ca01035634a7..58097c0e2ad5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -34,6 +34,11 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/reset.h>
+struct stmmac_tx_info {
+ dma_addr_t buf;
+ bool map_as_page;
+};
+
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
@@ -45,7 +50,7 @@ struct stmmac_priv {
u32 tx_count_frames;
u32 tx_coal_frames;
u32 tx_coal_timer;
- dma_addr_t *tx_skbuff_dma;
+ struct stmmac_tx_info *tx_skbuff_dma;
dma_addr_t dma_tx_phy;
int tx_coalesce;
int hwts_tx_en;
@@ -105,6 +110,8 @@ struct stmmac_priv {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_ops;
unsigned int default_addend;
+ struct clk *clk_ptp_ref;
+ unsigned int clk_ptp_rate;
u32 adv_ts;
int use_riwt;
int irq_wake;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 9af50bae4dde..cf4f38db1c0a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -175,7 +175,7 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_octetcount_g),
STMMAC_MMC_STAT(mmc_rx_broadcastframe_g),
STMMAC_MMC_STAT(mmc_rx_multicastframe_g),
- STMMAC_MMC_STAT(mmc_rx_crc_errror),
+ STMMAC_MMC_STAT(mmc_rx_crc_error),
STMMAC_MMC_STAT(mmc_rx_align_error),
STMMAC_MMC_STAT(mmc_rx_run_error),
STMMAC_MMC_STAT(mmc_rx_jabber_error),
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 08addd653728..b0c1521e08a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -275,6 +275,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
*/
bool stmmac_eee_init(struct stmmac_priv *priv)
{
+ char *phy_bus_name = priv->plat->phy_bus_name;
bool ret = false;
/* Using PCS we cannot dial with the phy registers at this stage
@@ -284,6 +285,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
(priv->pcs == STMMAC_PCS_RTBI))
goto out;
+ /* Never init EEE in case of a switch is attached */
+ if (phy_bus_name && (!strcmp(phy_bus_name, "fixed")))
+ goto out;
+
/* MAC core supports the EEE feature. */
if (priv->dma_cap.eee) {
int tx_lpi_timer = priv->tx_lpi_timer;
@@ -316,10 +321,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
priv->hw->mac->set_eee_timer(priv->hw,
STMMAC_DEFAULT_LIT_LS,
tx_lpi_timer);
- } else
- /* Set HW EEE according to the speed */
- priv->hw->mac->set_eee_pls(priv->hw,
- priv->phydev->link);
+ }
+ /* Set HW EEE according to the speed */
+ priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
@@ -603,16 +607,16 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
/* calculate default added value:
* formula is :
* addend = (2^32)/freq_div_ratio;
- * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz
- * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK;
- * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to
+ * where, freq_div_ratio = clk_ptp_ref_i/50MHz
+ * hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i;
+ * NOTE: clk_ptp_ref_i should be >= 50MHz to
* achive 20ns accuracy.
*
* 2^x * y == (y << x), hence
* 2^32 * 50000000 ==> (50000000 << 32)
*/
temp = (u64) (50000000ULL << 32);
- priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK);
+ priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
priv->hw->ptp->config_addend(priv->ioaddr,
priv->default_addend);
@@ -638,6 +642,16 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
+ /* Fall-back to main clock in case of no PTP ref is passed */
+ priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref");
+ if (IS_ERR(priv->clk_ptp_ref)) {
+ priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk);
+ priv->clk_ptp_ref = NULL;
+ } else {
+ clk_prepare_enable(priv->clk_ptp_ref);
+ priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref);
+ }
+
priv->adv_ts = 0;
if (priv->dma_cap.atime_stamp && priv->extend_desc)
priv->adv_ts = 1;
@@ -657,6 +671,8 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
static void stmmac_release_ptp(struct stmmac_priv *priv)
{
+ if (priv->clk_ptp_ref)
+ clk_disable_unprepare(priv->clk_ptp_ref);
stmmac_ptp_unregister(priv);
}
@@ -1061,7 +1077,8 @@ static int init_dma_desc_rings(struct net_device *dev)
else
p = priv->dma_tx + i;
p->des2 = 0;
- priv->tx_skbuff_dma[i] = 0;
+ priv->tx_skbuff_dma[i].buf = 0;
+ priv->tx_skbuff_dma[i].map_as_page = false;
priv->tx_skbuff[i] = NULL;
}
@@ -1100,17 +1117,24 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
else
p = priv->dma_tx + i;
- if (priv->tx_skbuff_dma[i]) {
- dma_unmap_single(priv->device,
- priv->tx_skbuff_dma[i],
- priv->hw->desc->get_tx_len(p),
- DMA_TO_DEVICE);
- priv->tx_skbuff_dma[i] = 0;
+ if (priv->tx_skbuff_dma[i].buf) {
+ if (priv->tx_skbuff_dma[i].map_as_page)
+ dma_unmap_page(priv->device,
+ priv->tx_skbuff_dma[i].buf,
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(priv->device,
+ priv->tx_skbuff_dma[i].buf,
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
}
if (priv->tx_skbuff[i] != NULL) {
dev_kfree_skb_any(priv->tx_skbuff[i]);
priv->tx_skbuff[i] = NULL;
+ priv->tx_skbuff_dma[i].buf = 0;
+ priv->tx_skbuff_dma[i].map_as_page = false;
}
}
}
@@ -1131,7 +1155,8 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
if (!priv->rx_skbuff)
goto err_rx_skbuff;
- priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
+ priv->tx_skbuff_dma = kmalloc_array(txsize,
+ sizeof(*priv->tx_skbuff_dma),
GFP_KERNEL);
if (!priv->tx_skbuff_dma)
goto err_tx_skbuff_dma;
@@ -1293,12 +1318,19 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
pr_debug("%s: curr %d, dirty %d\n", __func__,
priv->cur_tx, priv->dirty_tx);
- if (likely(priv->tx_skbuff_dma[entry])) {
- dma_unmap_single(priv->device,
- priv->tx_skbuff_dma[entry],
- priv->hw->desc->get_tx_len(p),
- DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = 0;
+ if (likely(priv->tx_skbuff_dma[entry].buf)) {
+ if (priv->tx_skbuff_dma[entry].map_as_page)
+ dma_unmap_page(priv->device,
+ priv->tx_skbuff_dma[entry].buf,
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(priv->device,
+ priv->tx_skbuff_dma[entry].buf,
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry].buf = 0;
+ priv->tx_skbuff_dma[entry].map_as_page = false;
}
priv->hw->mode->clean_desc3(priv, p);
@@ -1637,6 +1669,13 @@ static int stmmac_hw_setup(struct net_device *dev)
/* Initialize the MAC Core */
priv->hw->mac->core_init(priv->hw, dev->mtu);
+ ret = priv->hw->mac->rx_ipc(priv->hw);
+ if (!ret) {
+ pr_warn(" RX IPC Checksum Offload disabled\n");
+ priv->plat->rx_coe = STMMAC_RX_COE_NONE;
+ priv->hw->rx_csum = 0;
+ }
+
/* Enable the MAC Rx/Tx */
stmmac_set_mac(priv->ioaddr, true);
@@ -1887,12 +1926,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(!is_jumbo)) {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ goto dma_map_err;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
csum_insertion, priv->mode);
} else {
desc = first;
entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
+ if (unlikely(entry < 0))
+ goto dma_map_err;
}
for (i = 0; i < nfrags; i++) {
@@ -1908,7 +1951,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
DMA_TO_DEVICE);
- priv->tx_skbuff_dma[entry] = desc->des2;
+ if (dma_mapping_error(priv->device, desc->des2))
+ goto dma_map_err; /* should reuse desc w/o issues */
+
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
+ priv->tx_skbuff_dma[entry].map_as_page = true;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
priv->mode);
wmb();
@@ -1975,7 +2022,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hw->dma->enable_dma_transmission(priv->ioaddr);
spin_unlock(&priv->tx_lock);
+ return NETDEV_TX_OK;
+dma_map_err:
+ dev_err(priv->device, "Tx dma map failed\n");
+ dev_kfree_skb(skb);
+ priv->dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
@@ -2028,7 +2080,12 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
priv->rx_skbuff_dma[entry] =
dma_map_single(priv->device, skb->data, bfsize,
DMA_FROM_DEVICE);
-
+ if (dma_mapping_error(priv->device,
+ priv->rx_skbuff_dma[entry])) {
+ dev_err(priv->device, "Rx dma map failed\n");
+ dev_kfree_skb(skb);
+ break;
+ }
p->des2 = priv->rx_skbuff_dma[entry];
priv->hw->mode->refill_desc3(priv, p);
@@ -2055,7 +2112,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
unsigned int entry = priv->cur_rx % rxsize;
unsigned int next_entry;
unsigned int count = 0;
- int coe = priv->plat->rx_coe;
+ int coe = priv->hw->rx_csum;
if (netif_msg_rx_status(priv)) {
pr_debug("%s: descriptor ring:\n", __func__);
@@ -2276,8 +2333,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
features &= ~NETIF_F_RXCSUM;
- else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1)
- features &= ~NETIF_F_IPV6_CSUM;
+
if (!priv->plat->tx_coe)
features &= ~NETIF_F_ALL_CSUM;
@@ -2292,6 +2348,24 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
return features;
}
+static int stmmac_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+
+ /* Keep the COE Type in case of csum is supporting */
+ if (features & NETIF_F_RXCSUM)
+ priv->hw->rx_csum = priv->plat->rx_coe;
+ else
+ priv->hw->rx_csum = 0;
+ /* No check needed because rx_coe has been set before and it will be
+ * fixed in case of issue.
+ */
+ priv->hw->mac->rx_ipc(priv->hw);
+
+ return 0;
+}
+
/**
* stmmac_interrupt - main ISR
* @irq: interrupt number.
@@ -2572,6 +2646,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_stop = stmmac_release,
.ndo_change_mtu = stmmac_change_mtu,
.ndo_fix_features = stmmac_fix_features,
+ .ndo_set_features = stmmac_set_features,
.ndo_set_rx_mode = stmmac_set_rx_mode,
.ndo_tx_timeout = stmmac_tx_timeout,
.ndo_do_ioctl = stmmac_ioctl,
@@ -2592,7 +2667,6 @@ static const struct net_device_ops stmmac_netdev_ops = {
*/
static int stmmac_hw_init(struct stmmac_priv *priv)
{
- int ret;
struct mac_device_info *mac;
/* Identify the MAC HW device */
@@ -2649,15 +2723,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
/* To use alternate (extended) or normal descriptor structures */
stmmac_selec_desc_mode(priv);
- ret = priv->hw->mac->rx_ipc(priv->hw);
- if (!ret) {
- pr_warn(" RX IPC Checksum Offload not configured.\n");
- priv->plat->rx_coe = STMMAC_RX_COE_NONE;
- }
-
- if (priv->plat->rx_coe)
+ if (priv->plat->rx_coe) {
+ priv->hw->rx_csum = priv->plat->rx_coe;
pr_info(" RX Checksum Offload Engine supported (type %d)\n",
priv->plat->rx_coe);
+ }
if (priv->plat->tx_coe)
pr_info(" TX Checksum insertion supported\n");
@@ -2716,8 +2786,15 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
if (IS_ERR(priv->stmmac_clk)) {
dev_warn(priv->device, "%s: warning: cannot get CSR clock\n",
__func__);
- ret = PTR_ERR(priv->stmmac_clk);
- goto error_clk_get;
+ /* If failed to obtain stmmac_clk and specific clk_csr value
+ * is NOT passed from the platform, probe fail.
+ */
+ if (!priv->plat->clk_csr) {
+ ret = PTR_ERR(priv->stmmac_clk);
+ goto error_clk_get;
+ } else {
+ priv->stmmac_clk = NULL;
+ }
}
clk_prepare_enable(priv->stmmac_clk);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index b7ad3565566c..c5ee79d8a8c5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -206,6 +206,7 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv)
{
if (priv->ptp_clock) {
ptp_clock_unregister(priv->ptp_clock);
+ priv->ptp_clock = NULL;
pr_debug("Removed PTP HW clock successfully on %s\n",
priv->dev->name);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index 3dbc047622fa..4535df37c227 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -25,8 +25,6 @@
#ifndef __STMMAC_PTP_H__
#define __STMMAC_PTP_H__
-#define STMMAC_SYSCLOCK 62500000
-
/* IEEE 1588 PTP register offsets */
#define PTP_TCR 0x0700 /* Timestamp Control Reg */
#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 23c89ab5a6ad..f67539650c38 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -350,14 +350,17 @@ static int vnet_walk_rx_one(struct vnet_port *port,
if (IS_ERR(desc))
return PTR_ERR(desc);
+ if (desc->hdr.state != VIO_DESC_READY)
+ return 1;
+
+ rmb();
+
viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
desc->hdr.state, desc->hdr.ack,
desc->size, desc->ncookies,
desc->cookies[0].cookie_addr,
desc->cookies[0].cookie_size);
- if (desc->hdr.state != VIO_DESC_READY)
- return 1;
err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
if (err == -ECONNRESET)
return err;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 999fb72688d2..e2a00287f8eb 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -699,6 +699,28 @@ static void cpsw_rx_handler(void *token, int len, int status)
cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
+ bool ndev_status = false;
+ struct cpsw_slave *slave = priv->slaves;
+ int n;
+
+ if (priv->data.dual_emac) {
+ /* In dual emac mode check for all interfaces */
+ for (n = priv->data.slaves; n; n--, slave++)
+ if (netif_running(slave->ndev))
+ ndev_status = true;
+ }
+
+ if (ndev_status && (status >= 0)) {
+ /* The packet received is for the interface which
+ * is already down and the other interface is up
+ * and running, intead of freeing which results
+ * in reducing of the number of rx descriptor in
+ * DMA engine, requeue skb back to cpdma.
+ */
+ new_skb = skb;
+ goto requeue;
+ }
+
/* the interface is going down, skbs are purged */
dev_kfree_skb_any(skb);
return;
@@ -717,6 +739,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
new_skb = skb;
}
+requeue:
ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data,
skb_tailroom(new_skb), 0);
if (WARN_ON(ret < 0))
@@ -2311,10 +2334,19 @@ static int cpsw_suspend(struct device *dev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct cpsw_priv *priv = netdev_priv(ndev);
- if (netif_running(ndev))
- cpsw_ndo_stop(ndev);
+ if (priv->data.dual_emac) {
+ int i;
- for_each_slave(priv, soft_reset_slave);
+ for (i = 0; i < priv->data.slaves; i++) {
+ if (netif_running(priv->slaves[i].ndev))
+ cpsw_ndo_stop(priv->slaves[i].ndev);
+ soft_reset_slave(priv->slaves + i);
+ }
+ } else {
+ if (netif_running(ndev))
+ cpsw_ndo_stop(ndev);
+ for_each_slave(priv, soft_reset_slave);
+ }
pm_runtime_put_sync(&pdev->dev);
@@ -2328,14 +2360,24 @@ static int cpsw_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev = platform_get_drvdata(pdev);
+ struct cpsw_priv *priv = netdev_priv(ndev);
pm_runtime_get_sync(&pdev->dev);
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
- if (netif_running(ndev))
- cpsw_ndo_open(ndev);
+ if (priv->data.dual_emac) {
+ int i;
+
+ for (i = 0; i < priv->data.slaves; i++) {
+ if (netif_running(priv->slaves[i].ndev))
+ cpsw_ndo_open(priv->slaves[i].ndev);
+ }
+ } else {
+ if (netif_running(ndev))
+ cpsw_ndo_open(ndev);
+ }
return 0;
}
OpenPOWER on IntegriCloud