summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-02-10 23:32:28 -0500
committerDavid S. Miller <davem@davemloft.net>2012-02-10 23:32:28 -0500
commitd5ef8a4d87ab21d575ac86366599c9152a28028d (patch)
tree8b1be85ad1af7ee6a0e3e36c77ae738c966c1f21 /drivers/net/ethernet
parentd9dd966d7fc088a6bed991c2b1e2fba4485e0a31 (diff)
parent8df54d622a120058ee8bec38743c9b8f091c8e58 (diff)
downloadblackbird-op-linux-d5ef8a4d87ab21d575ac86366599c9152a28028d.tar.gz
blackbird-op-linux-d5ef8a4d87ab21d575ac86366599c9152a28028d.zip
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/infiniband/hw/nes/nes_cm.c Simple whitespace conflict. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c3
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c10
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c88
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c3
-rw-r--r--drivers/net/ethernet/marvell/skge.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/micrel/Kconfig1
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c4
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h3
-rw-r--r--drivers/net/ethernet/toshiba/Kconfig2
-rw-r--r--drivers/net/ethernet/via/via-velocity.c3
19 files changed, 127 insertions, 145 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 6e6a684359b5..518ec5c6872d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -523,7 +523,6 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb = build_skb(data);
if (likely(skb)) {
-
#ifdef BNX2X_STOP_ON_ERROR
if (pad + len > fp->rx_buf_size) {
BNX2X_ERR("skb_put is about to fail... "
@@ -557,7 +556,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return;
}
-
+ kfree(new_data);
drop:
/* drop the packet and keep the buffer in the bin */
DP(NETIF_MSG_RX_STATUS,
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index a27c601af3d1..ab753d7334a6 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -946,7 +946,7 @@ bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset,
flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL);
if (!flash_attr)
- return -ENOMEM;
+ return 0;
fcomp.bnad = bnad;
fcomp.comp_status = 0;
@@ -958,7 +958,7 @@ bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset,
if (ret != BFA_STATUS_OK) {
spin_unlock_irqrestore(&bnad->bna_lock, flags);
kfree(flash_attr);
- goto out_err;
+ return 0;
}
spin_unlock_irqrestore(&bnad->bna_lock, flags);
wait_for_completion(&fcomp.comp);
@@ -978,8 +978,6 @@ bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset,
}
kfree(flash_attr);
return flash_part;
-out_err:
- return -EINVAL;
}
static int
@@ -1006,7 +1004,7 @@ bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
/* Query the flash partition based on the offset */
flash_part = bnad_get_flash_partition_by_offset(bnad,
eeprom->offset, &base_offset);
- if (flash_part <= 0)
+ if (flash_part == 0)
return -EFAULT;
fcomp.bnad = bnad;
@@ -1048,7 +1046,7 @@ bnad_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
/* Query the flash partition based on the offset */
flash_part = bnad_get_flash_partition_by_offset(bnad,
eeprom->offset, &base_offset);
- if (flash_part <= 0)
+ if (flash_part == 0)
return -EFAULT;
fcomp.bnad = bnad;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 1f86a70b88af..e72dc8175955 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -4962,12 +4962,14 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
e1000_setup_rctl(adapter);
e1000_set_rx_mode(netdev);
+ rctl = er32(RCTL);
+
/* turn on all-multi mode if wake on multicast is enabled */
- if (wufc & E1000_WUFC_MC) {
- rctl = er32(RCTL);
+ if (wufc & E1000_WUFC_MC)
rctl |= E1000_RCTL_MPE;
- ew32(RCTL, rctl);
- }
+
+ /* enable receives in the hardware */
+ ew32(RCTL, rctl | E1000_RCTL_EN);
if (hw->mac_type >= e1000_82540) {
ctrl = er32(CTRL);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index fd911cac1ac5..fda824735e18 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5014,7 +5014,8 @@ static int igb_find_enabled_vfs(struct igb_adapter *adapter)
vf_devfn = pdev->devfn + 0x80;
pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
while (pvfdev) {
- if (pvfdev->devfn == vf_devfn)
+ if (pvfdev->devfn == vf_devfn &&
+ (pvfdev->bus->number >= pdev->bus->number))
vfs_found++;
vf_devfn += vf_stride;
pvfdev = pci_get_device(hw->vendor_id,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 005e5f25600d..79a92fe987b9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -112,6 +112,8 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
{
u8 err = 0;
+ u8 prio_tc[MAX_USER_PRIORITY] = {0};
+ int i;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
/* Fail command if not in CEE mode */
@@ -122,10 +124,15 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
return err;
- if (state > 0)
+ if (state > 0) {
err = ixgbe_setup_tc(netdev, adapter->dcb_cfg.num_tcs.pg_tcs);
- else
+ ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
+ } else {
err = ixgbe_setup_tc(netdev, 0);
+ }
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
return err;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 1f31a04d3c91..a62975480e37 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -120,19 +120,23 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
#endif /* IXGBE_FCOE */
};
-#define IXGBE_QUEUE_STATS_LEN \
- ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \
- ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \
+/* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
+ * we set the num_rx_queues to evaluate to num_tx_queues. This is
+ * used because we do not have a good way to get the max number of
+ * rx queues with CONFIG_RPS disabled.
+ */
+#define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
+
+#define IXGBE_QUEUE_STATS_LEN ( \
+ (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
(sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
#define IXGBE_PB_STATS_LEN ( \
- (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \
- IXGBE_FLAG_DCB_ENABLED) ? \
- (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
- sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
- sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
- sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
- / sizeof(u64) : 0)
+ (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
+ sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
+ sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
+ sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
+ / sizeof(u64))
#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
IXGBE_PB_STATS_LEN + \
IXGBE_QUEUE_STATS_LEN)
@@ -1078,8 +1082,15 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (j = 0; j < adapter->num_tx_queues; j++) {
+ for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
ring = adapter->tx_ring[j];
+ if (!ring) {
+ data[i] = 0;
+ data[i+1] = 0;
+ i += 2;
+ continue;
+ }
+
do {
start = u64_stats_fetch_begin_bh(&ring->syncp);
data[i] = ring->stats.packets;
@@ -1087,8 +1098,15 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
i += 2;
}
- for (j = 0; j < adapter->num_rx_queues; j++) {
+ for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
ring = adapter->rx_ring[j];
+ if (!ring) {
+ data[i] = 0;
+ data[i+1] = 0;
+ i += 2;
+ continue;
+ }
+
do {
start = u64_stats_fetch_begin_bh(&ring->syncp);
data[i] = ring->stats.packets;
@@ -1096,22 +1114,20 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
i += 2;
}
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
- data[i++] = adapter->stats.pxontxc[j];
- data[i++] = adapter->stats.pxofftxc[j];
- }
- for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) {
- data[i++] = adapter->stats.pxonrxc[j];
- data[i++] = adapter->stats.pxoffrxc[j];
- }
+
+ for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
+ data[i++] = adapter->stats.pxontxc[j];
+ data[i++] = adapter->stats.pxofftxc[j];
+ }
+ for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
+ data[i++] = adapter->stats.pxonrxc[j];
+ data[i++] = adapter->stats.pxoffrxc[j];
}
}
static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
char *p = (char *)data;
int i;
@@ -1126,31 +1142,29 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < adapter->num_tx_queues; i++) {
+ for (i = 0; i < netdev->num_tx_queues; i++) {
sprintf(p, "tx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < adapter->num_rx_queues; i++) {
+ for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
- sprintf(p, "tx_pb_%u_pxon", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_pb_%u_pxoff", i);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) {
- sprintf(p, "rx_pb_%u_pxon", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_pb_%u_pxoff", i);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
+ sprintf(p, "tx_pb_%u_pxon", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_pb_%u_pxoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
+ sprintf(p, "rx_pb_%u_pxon", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_pb_%u_pxoff", i);
+ p += ETH_GSTRING_LEN;
}
/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index ecc46ce8b2c3..6441acc34c23 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2638,22 +2638,22 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
/*
* we must limit the number of descriptors so that the
* total size of max desc * buf_len is not greater
- * than 65535
+ * than 65536
*/
if (ring_is_ps_enabled(ring)) {
-#if (MAX_SKB_FRAGS > 16)
+#if (PAGE_SIZE < 8192)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-#elif (MAX_SKB_FRAGS > 8)
+#elif (PAGE_SIZE < 16384)
rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
-#elif (MAX_SKB_FRAGS > 4)
+#elif (PAGE_SIZE < 32768)
rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
#else
rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
#endif
} else {
- if (rx_buf_len < IXGBE_RXBUFFER_4K)
+ if (rx_buf_len <= IXGBE_RXBUFFER_4K)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
- else if (rx_buf_len < IXGBE_RXBUFFER_8K)
+ else if (rx_buf_len <= IXGBE_RXBUFFER_8K)
rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
else
rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
@@ -2835,7 +2835,7 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
vf_shift = adapter->num_vfs % 32;
- reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
+ reg_offset = (adapter->num_vfs >= 32) ? 1 : 0;
/* Enable only the PF's pool for Tx/Rx */
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
@@ -4335,6 +4335,10 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
adapter->num_tx_queues = 1;
done:
+ if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) ||
+ (adapter->netdev->reg_state == NETREG_UNREGISTERING))
+ return 0;
+
/* Notify the stack of the (possibly) reduced queue counts. */
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
return netif_set_real_num_rx_queues(adapter->netdev,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 8d8cdbc22df0..b01ecb4d2bb1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -67,7 +67,8 @@ static int ixgbe_find_enabled_vfs(struct ixgbe_adapter *adapter)
vf_devfn = pdev->devfn + 0x80;
pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
while (pvfdev) {
- if (pvfdev->devfn == vf_devfn)
+ if (pvfdev->devfn == vf_devfn &&
+ (pvfdev->bus->number >= pdev->bus->number))
vfs_found++;
vf_devfn += 2;
pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 1d61eaac8587..5a30bf823099 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -931,20 +931,17 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
}
/* Allocate and setup a new buffer for receiving */
-static int skge_rx_setup(struct pci_dev *pdev,
- struct skge_element *e,
- struct sk_buff *skb, unsigned int bufsize)
+static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
+ struct sk_buff *skb, unsigned int bufsize)
{
struct skge_rx_desc *rd = e->desc;
- dma_addr_t map;
+ u64 map;
- map = pci_map_single(pdev, skb->data, bufsize,
+ map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, map))
- goto mapping_error;
- rd->dma_lo = lower_32_bits(map);
- rd->dma_hi = upper_32_bits(map);
+ rd->dma_lo = map;
+ rd->dma_hi = map >> 32;
e->skb = skb;
rd->csum1_start = ETH_HLEN;
rd->csum2_start = ETH_HLEN;
@@ -956,13 +953,6 @@ static int skge_rx_setup(struct pci_dev *pdev,
rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
dma_unmap_addr_set(e, mapaddr, map);
dma_unmap_len_set(e, maplen, bufsize);
- return 0;
-
-mapping_error:
- if (net_ratelimit())
- dev_warn(&pdev->dev, "%s: rx mapping error\n",
- skb->dev->name);
- return -EIO;
}
/* Resume receiving using existing skb,
@@ -1024,11 +1014,7 @@ static int skge_rx_fill(struct net_device *dev)
return -ENOMEM;
skb_reserve(skb, NET_IP_ALIGN);
- if (skge_rx_setup(skge->hw->pdev, e, skb, skge->rx_buf_size)) {
- kfree_skb(skb);
- return -ENOMEM;
- }
-
+ skge_rx_setup(skge, e, skb, skge->rx_buf_size);
} while ((e = e->next) != ring->start);
ring->to_clean = ring->start;
@@ -2743,7 +2729,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
struct skge_tx_desc *td;
int i;
u32 control, len;
- dma_addr_t map;
+ u64 map;
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
@@ -2757,14 +2743,11 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
e->skb = skb;
len = skb_headlen(skb);
map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(hw->pdev, map))
- goto mapping_error;
-
dma_unmap_addr_set(e, mapaddr, map);
dma_unmap_len_set(e, maplen, len);
- td->dma_lo = lower_32_bits(map);
- td->dma_hi = upper_32_bits(map);
+ td->dma_lo = map;
+ td->dma_hi = map >> 32;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
const int offset = skb_checksum_start_offset(skb);
@@ -2795,16 +2778,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
- if (dma_mapping_error(&hw->pdev->dev, map))
- goto mapping_unwind;
e = e->next;
e->skb = skb;
tf = e->desc;
BUG_ON(tf->control & BMU_OWN);
- tf->dma_lo = lower_32_bits(map);
- tf->dma_hi = upper_32_bits(map);
+ tf->dma_lo = map;
+ tf->dma_hi = (u64) map >> 32;
dma_unmap_addr_set(e, mapaddr, map);
dma_unmap_len_set(e, maplen, skb_frag_size(frag));
@@ -2834,28 +2815,6 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
}
return NETDEV_TX_OK;
-
-mapping_unwind:
- /* unroll any pages that were already mapped. */
- if (e != skge->tx_ring.to_use) {
- struct skge_element *u;
-
- for (u = skge->tx_ring.to_use->next; u != e; u = u->next)
- pci_unmap_page(hw->pdev, dma_unmap_addr(u, mapaddr),
- dma_unmap_len(u, maplen),
- PCI_DMA_TODEVICE);
- e = skge->tx_ring.to_use;
- }
- /* undo the mapping for the skb header */
- pci_unmap_single(hw->pdev, dma_unmap_addr(e, mapaddr),
- dma_unmap_len(e, maplen),
- PCI_DMA_TODEVICE);
-mapping_error:
- /* mapping error causes error message and packet to be discarded. */
- if (net_ratelimit())
- dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
}
@@ -3099,17 +3058,13 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
if (!nskb)
goto resubmit;
- if (unlikely(skge_rx_setup(skge->hw->pdev, e, nskb, skge->rx_buf_size))) {
- dev_kfree_skb(nskb);
- goto resubmit;
- }
-
pci_unmap_single(skge->hw->pdev,
dma_unmap_addr(e, mapaddr),
dma_unmap_len(e, maplen),
PCI_DMA_FROMDEVICE);
skb = e->skb;
prefetch(skb->data);
+ skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
}
skb_put(skb, len);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 1831ddeebc42..48d5c48d7ce8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1616,12 +1616,12 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
}
kfree(priv->mfunc.master.slave_state);
- iounmap(priv->mfunc.comm);
- dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
- priv->mfunc.vhcr,
- priv->mfunc.vhcr_dma);
- priv->mfunc.vhcr = NULL;
}
+
+ iounmap(priv->mfunc.comm);
+ dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
+ priv->mfunc.vhcr = NULL;
}
void mlx4_cmd_cleanup(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 25e6480479df..9fe4f94c6da7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -892,7 +892,8 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
for (i = 0; i < priv->rx_ring_num; i++) {
if (priv->rx_ring[i].rx_info)
- mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
+ mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
+ priv->prof->rx_ring_size, priv->stride);
if (priv->rx_cq[i].buf)
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index fb6e899f38d3..d703ef2c9c91 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -168,8 +168,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
return 0;
err:
- while (i--)
+ while (i--) {
+ dma_addr_t dma = be64_to_cpu(rx_desc->data[i].addr);
+ pci_unmap_single(priv->mdev->pdev, dma, skb_frags[i].size,
+ PCI_DMA_FROMDEVICE);
put_page(skb_frags[i].page);
+ }
return -ENOMEM;
}
@@ -379,12 +383,12 @@ err_allocator:
}
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring)
+ struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
{
struct mlx4_en_dev *mdev = priv->mdev;
mlx4_en_unmap_buffer(&ring->wqres.buf);
- mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE);
+ mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
vfree(ring->rx_info);
ring->rx_info = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 0785d9b2a265..ca574d850b39 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -136,7 +136,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 port,
u32 prot;
int err;
- s_steer = &mlx4_priv(dev)->steer[0];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
if (!new_entry)
return -ENOMEM;
@@ -220,7 +220,7 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
struct mlx4_promisc_qp *pqp;
struct mlx4_promisc_qp *dqp;
- s_steer = &mlx4_priv(dev)->steer[0];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
pqp = get_promisc_qp(dev, 0, steer, qpn);
if (!pqp)
@@ -265,7 +265,7 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
struct mlx4_steer_index *tmp_entry, *entry = NULL;
struct mlx4_promisc_qp *dqp, *tmp_dqp;
- s_steer = &mlx4_priv(dev)->steer[0];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
/* if qp is not promisc, it cannot be duplicated */
if (!get_promisc_qp(dev, 0, steer, qpn))
@@ -306,7 +306,7 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
bool ret = false;
int i;
- s_steer = &mlx4_priv(dev)->steer[0];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -361,7 +361,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
int err;
struct mlx4_priv *priv = mlx4_priv(dev);
- s_steer = &mlx4_priv(dev)->steer[0];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
mutex_lock(&priv->mcg_table.mutex);
@@ -466,7 +466,7 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
int loc, i;
int err;
- s_steer = &mlx4_priv(dev)->steer[0];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
mutex_lock(&priv->mcg_table.mutex);
pqp = get_promisc_qp(dev, 0, steer, qpn);
@@ -1004,7 +1004,7 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
return 0;
if (mlx4_is_mfunc(dev))
@@ -1016,7 +1016,7 @@ EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
return 0;
if (mlx4_is_mfunc(dev))
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 35f08840813c..d60335f3c473 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -528,7 +528,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
u32 size, u16 stride);
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring);
+ struct mlx4_en_rx_ring *ring,
+ u32 size, u16 stride);
int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring);
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
index 1ea811cf515b..fe42fc00d8d3 100644
--- a/drivers/net/ethernet/micrel/Kconfig
+++ b/drivers/net/ethernet/micrel/Kconfig
@@ -42,7 +42,6 @@ config KS8851
select NET_CORE
select MII
select CRC32
- select MISC_DEVICES
select EEPROM_93CX6
---help---
SPI driver for Micrel KS8851 SPI attached network chip.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 1a69504ed9c5..cfad09107054 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -38,6 +38,7 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
#include <linux/sh_eth.h>
#include "sh_eth.h"
@@ -816,7 +817,8 @@ static int sh_eth_dev_init(struct net_device *ndev)
sh_eth_write(ndev, 0, TRIMD);
/* Recv frame limit set register */
- sh_eth_write(ndev, RFLR_VALUE, RFLR);
+ sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
+ RFLR);
sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index ba72976926fd..cb07add8638f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -575,9 +575,6 @@ enum RPADIR_BIT {
RPADIR_PADR = 0x0003f,
};
-/* RFLR */
-#define RFLR_VALUE 0x1000
-
/* FDR */
#define DEFAULT_FDR_INIT 0x00000707
diff --git a/drivers/net/ethernet/toshiba/Kconfig b/drivers/net/ethernet/toshiba/Kconfig
index 051764704559..74acb5cf6099 100644
--- a/drivers/net/ethernet/toshiba/Kconfig
+++ b/drivers/net/ethernet/toshiba/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_TOSHIBA
bool "Toshiba devices"
default y
- depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB) || PPC_PS3
+ depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB || MIPS) || PPC_PS3
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index cf69cbf6fec3..8a5d7c100a5e 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2491,9 +2491,6 @@ static int velocity_close(struct net_device *dev)
if (dev->irq != 0)
free_irq(dev->irq, dev);
- /* Power down the chip */
- pci_set_power_state(vptr->pdev, PCI_D3hot);
-
velocity_free_rings(vptr);
vptr->flags &= (~VELOCITY_FLAGS_OPENED);
OpenPOWER on IntegriCloud