diff options
author | Govindarajulu Varadarajan <_govind@gmx.com> | 2014-11-19 12:59:32 +0530 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-11-21 12:17:54 -0500 |
commit | f8e34d246c7d89e13399b410f92234acd2f7411a (patch) | |
tree | a761fe599db9b9095f96656ddc949160f761e9ab | |
parent | 3819ffdff70da4c0d3bab0f8becabfc6936a230c (diff) | |
download | blackbird-op-linux-f8e34d246c7d89e13399b410f92234acd2f7411a.tar.gz blackbird-op-linux-f8e34d246c7d89e13399b410f92234acd2f7411a.zip |
enic: support skb->xmit_more
Check and update posted_index only when skb->xmit_more is 0 or tx queue is full.
v2:
use txq_map instead of skb_get_queue_mapping(skb)
Signed-off-by: Govindarajulu Varadarajan <_govind@gmx.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/cisco/enic/enic_main.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/cisco/enic/vnic_wq.h | 20 |
2 files changed, 17 insertions, 11 deletions
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 5afe360c7e89..b9cda2fc5ae8 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -533,6 +533,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, struct vnic_wq *wq; unsigned long flags; unsigned int txq_map; + struct netdev_queue *txq; if (skb->len <= 0) { dev_kfree_skb_any(skb); @@ -541,6 +542,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, txq_map = skb_get_queue_mapping(skb) % enic->wq_count; wq = &enic->wq[txq_map]; + txq = netdev_get_tx_queue(netdev, txq_map); /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, * which is very likely. In the off chance it's going to take @@ -558,7 +560,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { - netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map)); + netif_tx_stop_queue(txq); /* This is a hard error, log it */ netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); @@ -568,7 +570,9 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, enic_queue_wq_skb(enic, wq, skb); if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) - netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map)); + netif_tx_stop_queue(txq); + if (!skb->xmit_more || netif_xmit_stopped(txq)) + vnic_wq_doorbell(wq); spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h index 2c6c70804a39..816f1ad6072f 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_wq.h +++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h @@ -104,6 +104,17 @@ static inline void *vnic_wq_next_desc(struct vnic_wq *wq) return wq->to_use->desc; } +static inline void vnic_wq_doorbell(struct vnic_wq *wq) +{ + /* Adding write memory barrier prevents compiler and/or CPU + * reordering, thus avoiding descriptor posting before + * descriptor is initialized. Otherwise, hardware can read + * stale descriptor fields. + */ + wmb(); + iowrite32(wq->to_use->index, &wq->ctrl->posted_index); +} + static inline void vnic_wq_post(struct vnic_wq *wq, void *os_buf, dma_addr_t dma_addr, unsigned int len, int sop, int eop, @@ -122,15 +133,6 @@ static inline void vnic_wq_post(struct vnic_wq *wq, buf->wr_id = wrid; buf = buf->next; - if (eop) { - /* Adding write memory barrier prevents compiler and/or CPU - * reordering, thus avoiding descriptor posting before - * descriptor is initialized. Otherwise, hardware can read - * stale descriptor fields. - */ - wmb(); - iowrite32(buf->index, &wq->ctrl->posted_index); - } wq->to_use = buf; wq->ring.desc_avail -= desc_skip_cnt; |