summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIan Campbell <Ian.Campbell@citrix.com>2012-11-21 02:02:16 +0000
committerDavid S. Miller <davem@davemloft.net>2012-11-21 11:50:31 -0500
commitf36c374782e40a3812f729838b5b80d2ce601725 (patch)
tree7d300779d8188842abf5eb16b51bb3c6db8bd2fc
parentf30a944392d701228f5076c0397c83bf871153d2 (diff)
downloadblackbird-op-linux-f36c374782e40a3812f729838b5b80d2ce601725.tar.gz
blackbird-op-linux-f36c374782e40a3812f729838b5b80d2ce601725.zip
xen/netfront: handle compound page fragments on transmit
An SKB paged fragment can consist of a compound page with order > 0. However the netchannel protocol deals only in PAGE_SIZE frames. Handle this in xennet_make_frags by iterating over the frames which make up the page. This is the netfront equivalent to 6a8ed462f16b for netback. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Cc: netdev@vger.kernel.org Cc: xen-devel@lists.xen.org Cc: Eric Dumazet <edumazet@google.com> Cc: Konrad Rzeszutek Wilk <konrad@kernel.org> Cc: ANNIE LI <annie.li@oracle.com> Cc: Sander Eikelenboom <linux@eikelenboom.it> Cc: Stefan Bader <stefan.bader@canonical.com> Acked-by: Eric Dumazet <edumazet@google.com> Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/xen-netfront.c98
1 files changed, 77 insertions, 21 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index caa011008cd0..fc24eb9b3948 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -452,29 +452,85 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
/* Grant backend access to each skb fragment page. */
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+ struct page *page = skb_frag_page(frag);
- tx->flags |= XEN_NETTXF_more_data;
+ len = skb_frag_size(frag);
+ offset = frag->page_offset;
- id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
- np->tx_skbs[id].skb = skb_get(skb);
- tx = RING_GET_REQUEST(&np->tx, prod++);
- tx->id = id;
- ref = gnttab_claim_grant_reference(&np->gref_tx_head);
- BUG_ON((signed short)ref < 0);
+ /* Data must not cross a page boundary. */
+ BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
- mfn = pfn_to_mfn(page_to_pfn(skb_frag_page(frag)));
- gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
- mfn, GNTMAP_readonly);
+ /* Skip unused frames from start of page */
+ page += offset >> PAGE_SHIFT;
+ offset &= ~PAGE_MASK;
- tx->gref = np->grant_tx_ref[id] = ref;
- tx->offset = frag->page_offset;
- tx->size = skb_frag_size(frag);
- tx->flags = 0;
+ while (len > 0) {
+ unsigned long bytes;
+
+ BUG_ON(offset >= PAGE_SIZE);
+
+ bytes = PAGE_SIZE - offset;
+ if (bytes > len)
+ bytes = len;
+
+ tx->flags |= XEN_NETTXF_more_data;
+
+ id = get_id_from_freelist(&np->tx_skb_freelist,
+ np->tx_skbs);
+ np->tx_skbs[id].skb = skb_get(skb);
+ tx = RING_GET_REQUEST(&np->tx, prod++);
+ tx->id = id;
+ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+ BUG_ON((signed short)ref < 0);
+
+ mfn = pfn_to_mfn(page_to_pfn(page));
+ gnttab_grant_foreign_access_ref(ref,
+ np->xbdev->otherend_id,
+ mfn, GNTMAP_readonly);
+
+ tx->gref = np->grant_tx_ref[id] = ref;
+ tx->offset = offset;
+ tx->size = bytes;
+ tx->flags = 0;
+
+ offset += bytes;
+ len -= bytes;
+
+ /* Next frame */
+ if (offset == PAGE_SIZE && len) {
+ BUG_ON(!PageCompound(page));
+ page++;
+ offset = 0;
+ }
+ }
}
np->tx.req_prod_pvt = prod;
}
+/*
+ * Count how many ring slots are required to send the frags of this
+ * skb. Each frag might be a compound page.
+ */
+static int xennet_count_skb_frag_slots(struct sk_buff *skb)
+{
+ int i, frags = skb_shinfo(skb)->nr_frags;
+ int pages = 0;
+
+ for (i = 0; i < frags; i++) {
+ skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+ unsigned long size = skb_frag_size(frag);
+ unsigned long offset = frag->page_offset;
+
+ /* Skip unused frames from start of page */
+ offset &= ~PAGE_MASK;
+
+ pages += PFN_UP(offset + size);
+ }
+
+ return pages;
+}
+
static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned short id;
@@ -487,23 +543,23 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
grant_ref_t ref;
unsigned long mfn;
int notify;
- int frags = skb_shinfo(skb)->nr_frags;
+ int slots;
unsigned int offset = offset_in_page(data);
unsigned int len = skb_headlen(skb);
unsigned long flags;
- frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
- if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
- printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
- frags);
- dump_stack();
+ slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
+ xennet_count_skb_frag_slots(skb);
+ if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
+ net_alert_ratelimited(
+ "xennet: skb rides the rocket: %d slots\n", slots);
goto drop;
}
spin_lock_irqsave(&np->tx_lock, flags);
if (unlikely(!netif_carrier_ok(dev) ||
- (frags > 1 && !xennet_can_sg(dev)) ||
+ (slots > 1 && !xennet_can_sg(dev)) ||
netif_needs_gso(skb, netif_skb_features(skb)))) {
spin_unlock_irqrestore(&np->tx_lock, flags);
goto drop;
OpenPOWER on IntegriCloud