diff options
author | Michael Chan <mchan@broadcom.com> | 2007-12-12 11:17:01 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-28 14:57:28 -0800 |
commit | 85833c6269016d009ada17b04ac288e2ab9c37ea (patch) | |
tree | d102f33d3cb4c07506cc3429f124dcef063e5930 /drivers | |
parent | e343d55c0a624c5bb88cd6821a17586474f20271 (diff) | |
download | blackbird-op-linux-85833c6269016d009ada17b04ac288e2ab9c37ea.tar.gz blackbird-op-linux-85833c6269016d009ada17b04ac288e2ab9c37ea.zip |
[BNX2]: Restructure RX fast path handling.
Add a new function to handle new SKB allocation and to prepare the
completed SKB. This makes it easier to add support for non-linear
SKB.
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/bnx2.c | 46 |
1 files changed, 30 insertions, 16 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index dfe50c286d95..14119fb5964d 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -2379,6 +2379,27 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb, prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; } +static int +bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len, + dma_addr_t dma_addr, u32 ring_idx) +{ + int err; + u16 prod = ring_idx & 0xffff; + + err = bnx2_alloc_rx_skb(bp, prod); + if (unlikely(err)) { + bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod); + return err; + } + + skb_reserve(skb, bp->rx_offset); + pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size, + PCI_DMA_FROMDEVICE); + + skb_put(skb, len); + return 0; +} + static inline u16 bnx2_get_hw_rx_cons(struct bnx2 *bp) { @@ -2434,7 +2455,8 @@ bnx2_rx_int(struct bnx2 *bp, int budget) L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME)) { - goto reuse_rx; + bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod); + goto next_rx; } /* Since we don't have a jumbo ring, copy small packets @@ -2444,8 +2466,11 @@ bnx2_rx_int(struct bnx2 *bp, int budget) struct sk_buff *new_skb; new_skb = netdev_alloc_skb(bp->dev, len + 2); - if (new_skb == NULL) - goto reuse_rx; + if (new_skb == NULL) { + bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, + sw_ring_prod); + goto next_rx; + } /* aligned copy */ skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2, @@ -2457,20 +2482,9 @@ bnx2_rx_int(struct bnx2 *bp, int budget) sw_ring_cons, sw_ring_prod); skb = new_skb; - } - else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) { - pci_unmap_single(bp->pdev, dma_addr, - bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); - - skb_reserve(skb, bp->rx_offset); - skb_put(skb, len); - } - else { -reuse_rx: - bnx2_reuse_rx_skb(bp, skb, - sw_ring_cons, sw_ring_prod); + } else if (unlikely(bnx2_rx_skb(bp, skb, len, dma_addr, + (sw_ring_cons << 16) | sw_ring_prod))) goto next_rx; - } skb->protocol = eth_type_trans(skb, bp->dev); |