summaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c153
1 files changed, 34 insertions, 119 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 7dc2d64db3cb..997cf0901ac2 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -96,6 +96,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
static void make_tx_response(struct xenvif_queue *queue,
struct xen_netif_tx_request *txp,
s8 st);
+static void push_tx_responses(struct xenvif_queue *queue);
static inline int tx_work_todo(struct xenvif_queue *queue);
@@ -233,51 +234,6 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
}
}
-/*
- * Returns true if we should start a new receive buffer instead of
- * adding 'size' bytes to a buffer which currently contains 'offset'
- * bytes.
- */
-static bool start_new_rx_buffer(int offset, unsigned long size, int head,
- bool full_coalesce)
-{
- /* simple case: we have completely filled the current buffer. */
- if (offset == MAX_BUFFER_OFFSET)
- return true;
-
- /*
- * complex case: start a fresh buffer if the current frag
- * would overflow the current buffer but only if:
- * (i) this frag would fit completely in the next buffer
- * and (ii) there is already some data in the current buffer
- * and (iii) this is not the head buffer.
- * and (iv) there is no need to fully utilize the buffers
- *
- * Where:
- * - (i) stops us splitting a frag into two copies
- * unless the frag is too large for a single buffer.
- * - (ii) stops us from leaving a buffer pointlessly empty.
- * - (iii) stops us leaving the first buffer
- * empty. Strictly speaking this is already covered
- * by (ii) but is explicitly checked because
- * netfront relies on the first buffer being
- * non-empty and can crash otherwise.
- * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS
- * slot
- *
- * This means we will effectively linearise small
- * frags but do not needlessly split large buffers
- * into multiple copies tend to give large frags their
- * own buffers as before.
- */
- BUG_ON(size > MAX_BUFFER_OFFSET);
- if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head &&
- !full_coalesce)
- return true;
-
- return false;
-}
-
struct netrx_pending_operations {
unsigned copy_prod, copy_cons;
unsigned meta_prod, meta_cons;
@@ -336,24 +292,13 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
BUG_ON(offset >= PAGE_SIZE);
BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
- bytes = PAGE_SIZE - offset;
+ if (npo->copy_off == MAX_BUFFER_OFFSET)
+ meta = get_next_rx_buffer(queue, npo);
+ bytes = PAGE_SIZE - offset;
if (bytes > size)
bytes = size;
- if (start_new_rx_buffer(npo->copy_off,
- bytes,
- *head,
- XENVIF_RX_CB(skb)->full_coalesce)) {
- /*
- * Netfront requires there to be some data in the head
- * buffer.
- */
- BUG_ON(*head);
-
- meta = get_next_rx_buffer(queue, npo);
- }
-
if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
bytes = MAX_BUFFER_OFFSET - npo->copy_off;
@@ -570,60 +515,15 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
&& (skb = xenvif_rx_dequeue(queue)) != NULL) {
- RING_IDX max_slots_needed;
RING_IDX old_req_cons;
RING_IDX ring_slots_used;
- int i;
queue->last_rx_time = jiffies;
- /* We need a cheap worse case estimate for the number of
- * slots we'll use.
- */
-
- max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
- skb_headlen(skb),
- PAGE_SIZE);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- unsigned int size;
- unsigned int offset;
-
- size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
- offset = skb_shinfo(skb)->frags[i].page_offset;
-
- /* For a worse-case estimate we need to factor in
- * the fragment page offset as this will affect the
- * number of times xenvif_gop_frag_copy() will
- * call start_new_rx_buffer().
- */
- max_slots_needed += DIV_ROUND_UP(offset + size,
- PAGE_SIZE);
- }
-
- /* To avoid the estimate becoming too pessimal for some
- * frontends that limit posted rx requests, cap the estimate
- * at MAX_SKB_FRAGS. In this case netback will fully coalesce
- * the skb into the provided slots.
- */
- if (max_slots_needed > MAX_SKB_FRAGS) {
- max_slots_needed = MAX_SKB_FRAGS;
- XENVIF_RX_CB(skb)->full_coalesce = true;
- } else {
- XENVIF_RX_CB(skb)->full_coalesce = false;
- }
-
- /* We may need one more slot for GSO metadata */
- if (skb_is_gso(skb) &&
- (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
- skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
- max_slots_needed++;
-
old_req_cons = queue->rx.req_cons;
XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
ring_slots_used = queue->rx.req_cons - old_req_cons;
- BUG_ON(ring_slots_used > max_slots_needed);
-
__skb_queue_tail(&rxq, skb);
}
@@ -758,6 +658,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
do {
spin_lock_irqsave(&queue->response_lock, flags);
make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
+ push_tx_responses(queue);
spin_unlock_irqrestore(&queue->response_lock, flags);
if (cons == end)
break;
@@ -1444,7 +1345,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
{
unsigned int offset = skb_headlen(skb);
skb_frag_t frags[MAX_SKB_FRAGS];
- int i;
+ int i, f;
struct ubuf_info *uarg;
struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
@@ -1484,23 +1385,25 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
frags[i].page_offset = 0;
skb_frag_size_set(&frags[i], len);
}
- /* swap out with old one */
- memcpy(skb_shinfo(skb)->frags,
- frags,
- i * sizeof(skb_frag_t));
- skb_shinfo(skb)->nr_frags = i;
- skb->truesize += i * PAGE_SIZE;
- /* remove traces of mapped pages and frag_list */
+ /* Copied all the bits from the frag list -- free it. */
skb_frag_list_init(skb);
+ xenvif_skb_zerocopy_prepare(queue, nskb);
+ kfree_skb(nskb);
+
+ /* Release all the original (foreign) frags. */
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ skb_frag_unref(skb, f);
uarg = skb_shinfo(skb)->destructor_arg;
/* increase inflight counter to offset decrement in callback */
atomic_inc(&queue->inflight_packets);
uarg->callback(uarg, true);
skb_shinfo(skb)->destructor_arg = NULL;
- xenvif_skb_zerocopy_prepare(queue, nskb);
- kfree_skb(nskb);
+ /* Fill the skb with the new (local) frags. */
+ memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
+ skb_shinfo(skb)->nr_frags = i;
+ skb->truesize += i * PAGE_SIZE;
return 0;
}
@@ -1753,13 +1656,20 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
unsigned long flags;
pending_tx_info = &queue->pending_tx_info[pending_idx];
+
spin_lock_irqsave(&queue->response_lock, flags);
+
make_tx_response(queue, &pending_tx_info->req, status);
- index = pending_index(queue->pending_prod);
+
+ /* Release the pending index before pusing the Tx response so
+ * its available before a new Tx request is pushed by the
+ * frontend.
+ */
+ index = pending_index(queue->pending_prod++);
queue->pending_ring[index] = pending_idx;
- /* TX shouldn't use the index before we give it back here */
- mb();
- queue->pending_prod++;
+
+ push_tx_responses(queue);
+
spin_unlock_irqrestore(&queue->response_lock, flags);
}
@@ -1770,7 +1680,6 @@ static void make_tx_response(struct xenvif_queue *queue,
{
RING_IDX i = queue->tx.rsp_prod_pvt;
struct xen_netif_tx_response *resp;
- int notify;
resp = RING_GET_RESPONSE(&queue->tx, i);
resp->id = txp->id;
@@ -1780,6 +1689,12 @@ static void make_tx_response(struct xenvif_queue *queue,
RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
queue->tx.rsp_prod_pvt = ++i;
+}
+
+static void push_tx_responses(struct xenvif_queue *queue)
+{
+ int notify;
+
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
if (notify)
notify_remote_via_irq(queue->tx_irq);
OpenPOWER on IntegriCloud