summaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-03-26 15:54:36 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-03-26 15:54:36 -0700
commit13220a94d35708d5378114e96ffcc88d0a74fe99 (patch)
treebe6530677d5f9536c7211e05ba012923e4c0b307 /drivers/net/e1000
parent8690d8a9f6c2d5728a9c9f68231f1bb4de109e3a (diff)
parent08abe18af1f78ee80c3c3a5ac47c3e0ae0beadf6 (diff)
downloadblackbird-op-linux-13220a94d35708d5378114e96ffcc88d0a74fe99.tar.gz
blackbird-op-linux-13220a94d35708d5378114e96ffcc88d0a74fe99.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1750 commits) ixgbe: Allow Priority Flow Control settings to survive a device reset net: core: remove unneeded include in net/core/utils.c. e1000e: update version number e1000e: fix close interrupt race e1000e: fix loss of multicast packets e1000e: commonize tx cleanup routine to match e1000 & igb netfilter: fix nf_logger name in ebt_ulog. netfilter: fix warning in ebt_ulog init function. netfilter: fix warning about invalid const usage e1000: fix close race with interrupt e1000: cleanup clean_tx_irq routine so that it completely cleans ring e1000: fix tx hang detect logic and address dma mapping issues bridge: bad error handling when adding invalid ether address bonding: select current active slave when enslaving device for mode tlb and alb gianfar: reallocate skb when headroom is not enough for fcb Bump release date to 25Mar2009 and version to 0.22 r6040: Fix second PHY address qeth: fix wait_event_timeout handling qeth: check for completion of a running recovery qeth: unregister MAC addresses during recovery. ... Manually fixed up conflicts in: drivers/infiniband/hw/cxgb3/cxio_hal.h drivers/infiniband/hw/nes/nes_nic.c
Diffstat (limited to 'drivers/net/e1000')
-rw-r--r--drivers/net/e1000/e1000.h2
-rw-r--r--drivers/net/e1000/e1000_main.c162
2 files changed, 81 insertions, 83 deletions
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index f5581de04757..e9a416f40162 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -182,7 +182,6 @@ struct e1000_tx_ring {
/* array of buffer information structs */
struct e1000_buffer *buffer_info;
- spinlock_t tx_lock;
u16 tdh;
u16 tdt;
bool last_tx_tso;
@@ -238,7 +237,6 @@ struct e1000_adapter {
u16 link_speed;
u16 link_duplex;
spinlock_t stats_lock;
- spinlock_t tx_queue_lock;
unsigned int total_tx_bytes;
unsigned int total_tx_packets;
unsigned int total_rx_bytes;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 6bd63cc67b3e..93b861d032b5 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -577,12 +577,30 @@ out:
void e1000_down(struct e1000_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
+ u32 rctl, tctl;
/* signal that we're down so the interrupt handler does not
* reschedule our watchdog timer */
set_bit(__E1000_DOWN, &adapter->flags);
+ /* disable receives in the hardware */
+ rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ /* flush and sleep below */
+
+ /* can be netif_tx_disable when NETIF_F_LLTX is removed */
+ netif_stop_queue(netdev);
+
+ /* disable transmits in the hardware */
+ tctl = er32(TCTL);
+ tctl &= ~E1000_TCTL_EN;
+ ew32(TCTL, tctl);
+ /* flush both disables and wait for them to finish */
+ E1000_WRITE_FLUSH();
+ msleep(10);
+
napi_disable(&adapter->napi);
e1000_irq_disable(adapter);
@@ -595,7 +613,6 @@ void e1000_down(struct e1000_adapter *adapter)
adapter->link_speed = 0;
adapter->link_duplex = 0;
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
e1000_reset(adapter);
e1000_clean_all_tx_rings(adapter);
@@ -1048,8 +1065,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- netdev->features |= NETIF_F_LLTX;
-
netdev->vlan_features |= NETIF_F_TSO;
netdev->vlan_features |= NETIF_F_TSO6;
netdev->vlan_features |= NETIF_F_HW_CSUM;
@@ -1368,8 +1383,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
return -ENOMEM;
}
- spin_lock_init(&adapter->tx_queue_lock);
-
/* Explicitly disable IRQ since the NIC can be in any state. */
e1000_irq_disable(adapter);
@@ -1624,7 +1637,6 @@ setup_tx_desc_die:
txdr->next_to_use = 0;
txdr->next_to_clean = 0;
- spin_lock_init(&txdr->tx_lock);
return 0;
}
@@ -1882,8 +1894,6 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
* e1000_setup_rctl - configure the receive control registers
* @adapter: Board private structure
**/
-#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
- (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
static void e1000_setup_rctl(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -2056,17 +2066,14 @@ void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info)
{
- if (buffer_info->dma) {
- pci_unmap_page(adapter->pdev,
- buffer_info->dma,
- buffer_info->length,
- PCI_DMA_TODEVICE);
- buffer_info->dma = 0;
- }
+ buffer_info->dma = 0;
if (buffer_info->skb) {
+ skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
+ DMA_TO_DEVICE);
dev_kfree_skb_any(buffer_info->skb);
buffer_info->skb = NULL;
}
+ buffer_info->time_stamp = 0;
/* buffer_info must be completely set up in the transmit path */
}
@@ -2865,11 +2872,11 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
return false;
switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
+ case cpu_to_be16(ETH_P_IP):
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
cmd_len |= E1000_TXD_CMD_TCP;
break;
- case __constant_htons(ETH_P_IPV6):
+ case cpu_to_be16(ETH_P_IPV6):
/* XXX not handling all IPV6 headers */
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
cmd_len |= E1000_TXD_CMD_TCP;
@@ -2915,13 +2922,21 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_buffer *buffer_info;
- unsigned int len = skb->len;
- unsigned int offset = 0, size, count = 0, i;
+ unsigned int len = skb_headlen(skb);
+ unsigned int offset, size, count = 0, i;
unsigned int f;
- len -= skb->data_len;
+ dma_addr_t *map;
i = tx_ring->next_to_use;
+ if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
+ dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
+ return 0;
+ }
+
+ map = skb_shinfo(skb)->dma_maps;
+ offset = 0;
+
while (len) {
buffer_info = &tx_ring->buffer_info[i];
size = min(len, max_per_txd);
@@ -2956,18 +2971,18 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
size -= 4;
buffer_info->length = size;
- buffer_info->dma =
- pci_map_single(adapter->pdev,
- skb->data + offset,
- size,
- PCI_DMA_TODEVICE);
+ buffer_info->dma = map[0] + offset;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
len -= size;
offset += size;
count++;
- if (unlikely(++i == tx_ring->count)) i = 0;
+ if (len) {
+ i++;
+ if (unlikely(i == tx_ring->count))
+ i = 0;
+ }
}
for (f = 0; f < nr_frags; f++) {
@@ -2975,9 +2990,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
frag = &skb_shinfo(skb)->frags[f];
len = frag->size;
- offset = frag->page_offset;
+ offset = 0;
while (len) {
+ i++;
+ if (unlikely(i == tx_ring->count))
+ i = 0;
+
buffer_info = &tx_ring->buffer_info[i];
size = min(len, max_per_txd);
/* Workaround for premature desc write-backs
@@ -2993,23 +3012,16 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
size -= 4;
buffer_info->length = size;
- buffer_info->dma =
- pci_map_page(adapter->pdev,
- frag->page,
- offset,
- size,
- PCI_DMA_TODEVICE);
+ buffer_info->dma = map[f + 1] + offset;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
len -= size;
offset += size;
count++;
- if (unlikely(++i == tx_ring->count)) i = 0;
}
}
- i = (i == 0) ? tx_ring->count - 1 : i - 1;
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[first].next_to_watch = i;
@@ -3185,7 +3197,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
unsigned int len = skb->len - skb->data_len;
- unsigned long flags;
unsigned int nr_frags;
unsigned int mss;
int count = 0;
@@ -3290,22 +3301,15 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
(hw->mac_type == e1000_82573))
e1000_transfer_dhcp_info(adapter, skb);
- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags))
- /* Collision - tell upper layer to requeue */
- return NETDEV_TX_LOCKED;
-
/* need: count + 2 desc gap to keep tail from touching
* head, otherwise try next time */
- if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) {
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+ if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
return NETDEV_TX_BUSY;
- }
if (unlikely(hw->mac_type == e1000_82547)) {
if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
netif_stop_queue(netdev);
mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
return NETDEV_TX_BUSY;
}
}
@@ -3320,7 +3324,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tso = e1000_tso(adapter, tx_ring, skb);
if (tso < 0) {
dev_kfree_skb_any(skb);
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
return NETDEV_TX_OK;
}
@@ -3336,16 +3339,21 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (likely(skb->protocol == htons(ETH_P_IP)))
tx_flags |= E1000_TX_FLAGS_IPV4;
- e1000_tx_queue(adapter, tx_ring, tx_flags,
- e1000_tx_map(adapter, tx_ring, skb, first,
- max_per_txd, nr_frags, mss));
+ count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
+ nr_frags, mss);
- netdev->trans_start = jiffies;
+ if (count) {
+ e1000_tx_queue(adapter, tx_ring, tx_flags, count);
+ netdev->trans_start = jiffies;
+ /* Make sure there is space in the ring for the next send. */
+ e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
- /* Make sure there is space in the ring for the next send. */
- e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
+ } else {
+ dev_kfree_skb_any(skb);
+ tx_ring->buffer_info[first].time_stamp = 0;
+ tx_ring->next_to_use = first;
+ }
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
return NETDEV_TX_OK;
}
@@ -3687,12 +3695,12 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
- if (likely(netif_rx_schedule_prep(&adapter->napi))) {
+ if (likely(napi_schedule_prep(&adapter->napi))) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
adapter->total_rx_bytes = 0;
adapter->total_rx_packets = 0;
- __netif_rx_schedule(&adapter->napi);
+ __napi_schedule(&adapter->napi);
} else
e1000_irq_enable(adapter);
@@ -3747,16 +3755,18 @@ static irqreturn_t e1000_intr(int irq, void *data)
ew32(IMC, ~0);
E1000_WRITE_FLUSH();
}
- if (likely(netif_rx_schedule_prep(&adapter->napi))) {
+ if (likely(napi_schedule_prep(&adapter->napi))) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
adapter->total_rx_bytes = 0;
adapter->total_rx_packets = 0;
- __netif_rx_schedule(&adapter->napi);
- } else
+ __napi_schedule(&adapter->napi);
+ } else {
/* this really should not happen! if it does it is basically a
* bug, but not a hard error, so enable ints and continue */
- e1000_irq_enable(adapter);
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_enable(adapter);
+ }
return IRQ_HANDLED;
}
@@ -3773,28 +3783,21 @@ static int e1000_clean(struct napi_struct *napi, int budget)
adapter = netdev_priv(poll_dev);
- /* e1000_clean is called per-cpu. This lock protects
- * tx_ring[0] from being cleaned by multiple cpus
- * simultaneously. A failure obtaining the lock means
- * tx_ring[0] is currently being cleaned anyway. */
- if (spin_trylock(&adapter->tx_queue_lock)) {
- tx_cleaned = e1000_clean_tx_irq(adapter,
- &adapter->tx_ring[0]);
- spin_unlock(&adapter->tx_queue_lock);
- }
+ tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
adapter->clean_rx(adapter, &adapter->rx_ring[0],
&work_done, budget);
- if (tx_cleaned)
+ if (!tx_cleaned)
work_done = budget;
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
if (likely(adapter->itr_setting & 3))
e1000_set_itr(adapter);
- netif_rx_complete(napi);
- e1000_irq_enable(adapter);
+ napi_complete(napi);
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_enable(adapter);
}
return work_done;
@@ -3813,15 +3816,16 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info;
unsigned int i, eop;
unsigned int count = 0;
- bool cleaned = false;
+ bool cleaned;
unsigned int total_tx_bytes=0, total_tx_packets=0;
i = tx_ring->next_to_clean;
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
- while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
- for (cleaned = false; !cleaned; ) {
+ while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+ (count < tx_ring->count)) {
+ for (cleaned = false; !cleaned; count++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
@@ -3844,10 +3848,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
-#define E1000_TX_WEIGHT 64
- /* weight of a sort for tx, to avoid endless transmit cleanup */
- if (count++ == E1000_TX_WEIGHT)
- break;
}
tx_ring->next_to_clean = i;
@@ -3869,8 +3869,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
/* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
adapter->detect_tx_hung = false;
- if (tx_ring->buffer_info[eop].dma &&
- time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
+ if (tx_ring->buffer_info[i].time_stamp &&
+ time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
(adapter->tx_timeout_factor * HZ))
&& !(er32(STATUS) & E1000_STATUS_TXOFF)) {
@@ -3892,7 +3892,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
readl(hw->hw_addr + tx_ring->tdt),
tx_ring->next_to_use,
tx_ring->next_to_clean,
- tx_ring->buffer_info[eop].time_stamp,
+ tx_ring->buffer_info[i].time_stamp,
eop,
jiffies,
eop_desc->upper.fields.status);
@@ -3903,7 +3903,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
adapter->total_tx_packets += total_tx_packets;
adapter->net_stats.tx_bytes += total_tx_bytes;
adapter->net_stats.tx_packets += total_tx_packets;
- return cleaned;
+ return (count < tx_ring->count);
}
/**
OpenPOWER on IntegriCloud