summaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netback')
-rw-r--r--drivers/net/xen-netback/netback.c21
1 files changed, 8 insertions, 13 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 0d2594395ffb..5485f91294e7 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -52,7 +52,7 @@
* event channels are limited resource. Split event channels are
* enabled by default.
*/
-bool separate_tx_rx_irq = 1;
+bool separate_tx_rx_irq = true;
module_param(separate_tx_rx_irq, bool, 0644);
/* The time that packets can stay on the guest Rx internal queue
@@ -515,14 +515,9 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
&& (skb = xenvif_rx_dequeue(queue)) != NULL) {
- RING_IDX old_req_cons;
- RING_IDX ring_slots_used;
-
queue->last_rx_time = jiffies;
- old_req_cons = queue->rx.req_cons;
XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
- ring_slots_used = queue->rx.req_cons - old_req_cons;
__skb_queue_tail(&rxq, skb);
}
@@ -753,7 +748,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
slots++;
if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
- netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
+ netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
txp->offset, txp->size);
xenvif_fatal_tx_err(queue->vif);
return -EINVAL;
@@ -879,7 +874,7 @@ static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
if (unlikely(queue->grant_tx_handle[pending_idx] !=
NETBACK_INVALID_HANDLE)) {
netdev_err(queue->vif->dev,
- "Trying to overwrite active handle! pending_idx: %x\n",
+ "Trying to overwrite active handle! pending_idx: 0x%x\n",
pending_idx);
BUG();
}
@@ -892,7 +887,7 @@ static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
if (unlikely(queue->grant_tx_handle[pending_idx] ==
NETBACK_INVALID_HANDLE)) {
netdev_err(queue->vif->dev,
- "Trying to unmap invalid handle! pending_idx: %x\n",
+ "Trying to unmap invalid handle! pending_idx: 0x%x\n",
pending_idx);
BUG();
}
@@ -1248,7 +1243,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
/* No crossing a page as the payload mustn't fragment. */
if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
netdev_err(queue->vif->dev,
- "txreq.offset: %x, size: %u, end: %lu\n",
+ "txreq.offset: %u, size: %u, end: %lu\n",
txreq.offset, txreq.size,
(unsigned long)(txreq.offset&~PAGE_MASK) + txreq.size);
xenvif_fatal_tx_err(queue->vif);
@@ -1598,12 +1593,12 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
queue->pages_to_unmap,
gop - queue->tx_unmap_ops);
if (ret) {
- netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
+ netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
gop - queue->tx_unmap_ops, ret);
for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
if (gop[i].status != GNTST_okay)
netdev_err(queue->vif->dev,
- " host_addr: %llx handle: %x status: %d\n",
+ " host_addr: 0x%llx handle: 0x%x status: %d\n",
gop[i].host_addr,
gop[i].handle,
gop[i].status);
@@ -1736,7 +1731,7 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
&queue->mmap_pages[pending_idx], 1);
if (ret) {
netdev_err(queue->vif->dev,
- "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
+ "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
ret,
pending_idx,
tx_unmap_op.host_addr,
OpenPOWER on IntegriCloud