diff options
author | Ingo Molnar <mingo@kernel.org> | 2016-10-16 11:31:39 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-10-16 11:31:39 +0200 |
commit | 1d33369db25eb7f37b7a8bd22d736888b4501a9c (patch) | |
tree | 116d764339be1bca928870151decbedc53a9e1d1 /drivers/net/xen-netback/common.h | |
parent | 23446cb66c073b827779e5eb3dec301623299b32 (diff) | |
parent | 1001354ca34179f3db924eb66672442a173147dc (diff) | |
download | talos-op-linux-1d33369db25eb7f37b7a8bd22d736888b4501a9c.tar.gz talos-op-linux-1d33369db25eb7f37b7a8bd22d736888b4501a9c.zip |
Merge tag 'v4.9-rc1' into x86/urgent, to pick up updates
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/net/xen-netback/common.h')
-rw-r--r-- | drivers/net/xen-netback/common.h | 33 |
1 files changed, 16 insertions, 17 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 84d6cbdd11b2..3ce1f7da8647 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -91,13 +91,6 @@ struct xenvif_rx_meta { */ #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) -/* It's possible for an skb to have a maximal number of frags - * but still be less than MAX_BUFFER_OFFSET in size. Thus the - * worst-case number of copy operations is MAX_XEN_SKB_FRAGS per - * ring slot. - */ -#define MAX_GRANT_COPY_OPS (MAX_XEN_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE) - #define NETBACK_INVALID_HANDLE -1 /* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating @@ -133,6 +126,15 @@ struct xenvif_stats { unsigned long tx_frag_overflow; }; +#define COPY_BATCH_SIZE 64 + +struct xenvif_copy_state { + struct gnttab_copy op[COPY_BATCH_SIZE]; + RING_IDX idx[COPY_BATCH_SIZE]; + unsigned int num; + struct sk_buff_head *completed; +}; + struct xenvif_queue { /* Per-queue data for xenvif */ unsigned int id; /* Queue ID, 0-based */ char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ @@ -189,12 +191,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */ unsigned long last_rx_time; bool stalled; - struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS]; - - /* We create one meta structure per ring request we consume, so - * the maximum number is the same as the ring size. - */ - struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE]; + struct xenvif_copy_state rx_copy; /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */ unsigned long credit_bytes; @@ -260,7 +257,6 @@ struct xenvif { /* Frontend feature information. */ int gso_mask; - int gso_prefix_mask; u8 can_sg:1; u8 ip_csum:1; @@ -292,8 +288,6 @@ struct xenvif { #endif struct xen_netif_ctrl_back_ring ctrl; - struct task_struct *ctrl_task; - wait_queue_head_t ctrl_wq; unsigned int ctrl_irq; /* Miscellaneous private stuff. */ @@ -359,8 +353,9 @@ void xenvif_kick_thread(struct xenvif_queue *queue); int xenvif_dealloc_kthread(void *data); -int xenvif_ctrl_kthread(void *data); +irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data); +void xenvif_rx_action(struct xenvif_queue *queue); void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); void xenvif_carrier_on(struct xenvif *vif); @@ -412,4 +407,8 @@ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len, void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb); +#ifdef CONFIG_DEBUG_FS +void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m); +#endif + #endif /* __XEN_NETBACK__COMMON_H__ */ |