diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-17 16:26:30 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-17 16:26:30 -0700 |
commit | a7fd20d1c476af4563e66865213474a2f9f473a4 (patch) | |
tree | fb1399e2f82842450245fb058a8fb23c52865f43 /drivers/net/ethernet/intel/i40evf/i40e_txrx.h | |
parent | b80fed9595513384424cd141923c9161c4b5021b (diff) | |
parent | 917fa5353da05e8a0045b8acacba8d50400d5b12 (diff) | |
download | talos-op-linux-a7fd20d1c476af4563e66865213474a2f9f473a4.tar.gz talos-op-linux-a7fd20d1c476af4563e66865213474a2f9f473a4.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
"Highlights:
1) Support SPI based w5100 devices, from Akinobu Mita.
2) Partial Segmentation Offload, from Alexander Duyck.
3) Add GMAC4 support to stmmac driver, from Alexandre TORGUE.
4) Allow cls_flower stats offload, from Amir Vadai.
5) Implement bpf blinding, from Daniel Borkmann.
6) Optimize _ASYNC_ bit twiddling on sockets, unless the socket is
actually using FASYNC these atomics are superfluous. From Eric
Dumazet.
7) Run TCP more preemptibly, also from Eric Dumazet.
8) Support LED blinking, EEPROM dumps, and rxvlan offloading in mlx5e
driver, from Gal Pressman.
9) Allow creating ppp devices via rtnetlink, from Guillaume Nault.
10) Improve BPF usage documentation, from Jesper Dangaard Brouer.
11) Support tunneling offloads in qed, from Manish Chopra.
12) aRFS offloading in mlx5e, from Maor Gottlieb.
13) Add RFS and RPS support to SCTP protocol, from Marcelo Ricardo
Leitner.
14) Add MSG_EOR support to TCP, this allows controlling packet
coalescing on application record boundaries for more accurate
socket timestamp sampling. From Martin KaFai Lau.
15) Fix alignment of 64-bit netlink attributes across the board, from
Nicolas Dichtel.
16) Per-vlan stats in bridging, from Nikolay Aleksandrov.
17) Several conversions of drivers to ethtool ksettings, from Philippe
Reynes.
18) Checksum neutral ILA in ipv6, from Tom Herbert.
19) Factorize all of the various marvell dsa drivers into one, from
Vivien Didelot
20) Add VF support to qed driver, from Yuval Mintz"
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1649 commits)
Revert "phy dp83867: Fix compilation with CONFIG_OF_MDIO=m"
Revert "phy dp83867: Make rgmii parameters optional"
r8169: default to 64-bit DMA on recent PCIe chips
phy dp83867: Make rgmii parameters optional
phy dp83867: Fix compilation with CONFIG_OF_MDIO=m
bpf: arm64: remove callee-save registers use for tmp registers
asix: Fix offset calculation in asix_rx_fixup() causing slow transmissions
switchdev: pass pointer to fib_info instead of copy
net_sched: close another race condition in tcf_mirred_release()
tipc: fix nametable publication field in nl compat
drivers: net: Don't print unpopulated net_device name
qed: add support for dcbx.
ravb: Add missing free_irq() calls to ravb_close()
qed: Remove a stray tab
net: ethernet: fec-mpc52xx: use phy_ethtool_{get|set}_link_ksettings
net: ethernet: fec-mpc52xx: use phydev from struct net_device
bpf, doc: fix typo on bpf_asm descriptions
stmmac: hardware TX COE doesn't work when force_thresh_dma_mode is set
net: ethernet: fs-enet: use phy_ethtool_{get|set}_link_ksettings
net: ethernet: fs-enet: use phydev from struct net_device
...
Diffstat (limited to 'drivers/net/ethernet/intel/i40evf/i40e_txrx.h')
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 114 |
1 files changed, 77 insertions, 37 deletions
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 0429553fe887..0112277e5882 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -102,8 +102,8 @@ enum i40e_dyn_idx_t { (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) -/* Supported Rx Buffer Sizes */ -#define I40E_RXBUFFER_512 512 /* Used for packet split */ +/* Supported Rx Buffer Sizes (a multiple of 128) */ +#define I40E_RXBUFFER_256 256 #define I40E_RXBUFFER_2048 2048 #define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */ #define I40E_RXBUFFER_4096 4096 @@ -114,9 +114,28 @@ enum i40e_dyn_idx_t { * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, * this adds up to 512 bytes of extra data meaning the smallest allocation * we could have is 1K. - * i.e. RXBUFFER_512 --> size-1024 slab + * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab) + * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab) */ -#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512 +#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 +#define i40e_rx_desc i40e_32byte_rx_desc + +/** + * i40e_test_staterr - tests bits in Rx descriptor status and error fields + * @rx_desc: pointer to receive descriptor (in le64 format) + * @stat_err_bits: value to mask + * + * This function does some fast chicanery in order to return the + * value of the mask which is really only used for boolean tests. + * The status_error_len doesn't need to be shifted because it begins + * at offset zero. + */ +static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, + const u64 stat_err_bits) +{ + return !!(rx_desc->wb.qword1.status_error_len & + cpu_to_le64(stat_err_bits)); +} /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ @@ -142,14 +161,41 @@ enum i40e_dyn_idx_t { prefetch((n)); \ } while (0) -#define i40e_rx_desc i40e_32byte_rx_desc - #define I40E_MAX_BUFFER_TXD 8 #define I40E_MIN_TX_LEN 17 -#define I40E_MAX_DATA_PER_TXD 8192 + +/* The size limit for a transmit buffer in a descriptor is (16K - 1). + * In order to align with the read requests we will align the value to + * the nearest 4K which represents our maximum read request size. + */ +#define I40E_MAX_READ_REQ_SIZE 4096 +#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1) +#define I40E_MAX_DATA_PER_TXD_ALIGNED \ + (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1)) + +/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is + * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact + * that 12K is not a power of 2 and division is expensive. It is used to + * approximate the number of descriptors used per linear buffer. Note + * that this will overestimate in some cases as it doesn't account for the + * fact that we will add up to 4K - 1 in aligning the 12K buffer, however + * the error should not impact things much as large buffers usually mean + * we will use fewer descriptors then there are frags in an skb. + */ +static inline unsigned int i40e_txd_use_count(unsigned int size) +{ + const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED; + const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max; + unsigned int adjust = ~(u32)0; + + /* if we rounded up on the reciprocal pull down the adjustment */ + if ((max * reciprocal) > adjust) + adjust = ~(u32)(reciprocal - 1); + + return (u32)((((u64)size * reciprocal) + adjust) >> 32); +} /* Tx Descriptors needed, worst case */ -#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) #define DESC_NEEDED (MAX_SKB_FRAGS + 4) #define I40E_MIN_DESC_PENDING 4 @@ -183,10 +229,8 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { struct sk_buff *skb; - void *hdr_buf; dma_addr_t dma; struct page *page; - dma_addr_t page_dma; unsigned int page_offset; }; @@ -215,22 +259,18 @@ struct i40e_rx_queue_stats { enum i40e_ring_state_t { __I40E_TX_FDIR_INIT_DONE, __I40E_TX_XPS_INIT_DONE, - __I40E_RX_PS_ENABLED, - __I40E_RX_16BYTE_DESC_ENABLED, }; -#define ring_is_ps_enabled(ring) \ - test_bit(__I40E_RX_PS_ENABLED, &(ring)->state) -#define set_ring_ps_enabled(ring) \ - set_bit(__I40E_RX_PS_ENABLED, &(ring)->state) -#define clear_ring_ps_enabled(ring) \ - clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state) -#define ring_is_16byte_desc_enabled(ring) \ - test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) -#define set_ring_16byte_desc_enabled(ring) \ - set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) -#define clear_ring_16byte_desc_enabled(ring) \ - clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) +/* some useful defines for virtchannel interface, which + * is the only remaining user of header split + */ +#define I40E_RX_DTYPE_NO_SPLIT 0 +#define I40E_RX_DTYPE_HEADER_SPLIT 1 +#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 +#define I40E_RX_SPLIT_L2 0x1 +#define I40E_RX_SPLIT_IP 0x2 +#define I40E_RX_SPLIT_TCP_UDP 0x4 +#define I40E_RX_SPLIT_SCTP 0x8 /* struct that defines a descriptor ring, associated with a VSI */ struct i40e_ring { @@ -249,16 +289,7 @@ struct i40e_ring { u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ - u16 rx_hdr_len; u16 rx_buf_len; - u8 dtype; -#define I40E_RX_DTYPE_NO_SPLIT 0 -#define I40E_RX_DTYPE_HEADER_SPLIT 1 -#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 -#define I40E_RX_SPLIT_L2 0x1 -#define I40E_RX_SPLIT_IP 0x2 -#define I40E_RX_SPLIT_TCP_UDP 0x4 -#define I40E_RX_SPLIT_SCTP 0x8 /* used in interrupt processing */ u16 next_to_use; @@ -290,6 +321,7 @@ struct i40e_ring { struct i40e_q_vector *q_vector; /* Backreference to associated vector */ struct rcu_head rcu; /* to avoid race on free */ + u16 next_to_alloc; } ____cacheline_internodealigned_in_smp; enum i40e_latency_range { @@ -313,9 +345,7 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); -bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); -void i40evf_alloc_rx_headers(struct i40e_ring *rxr); +bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); void i40evf_clean_rx_ring(struct i40e_ring *rx_ring); @@ -359,7 +389,7 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) int count = 0, size = skb_headlen(skb); for (;;) { - count += TXD_USE_COUNT(size); + count += i40e_txd_use_count(size); if (!nr_frags--) break; @@ -405,4 +435,14 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) /* we can support up to 8 data buffers for a single send */ return count != I40E_MAX_BUFFER_TXD; } + +/** + * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE + * @ptype: the packet type field from Rx descriptor write-back + **/ +static inline bool i40e_rx_is_fcoe(u16 ptype) +{ + return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) && + (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER); +} #endif /* _I40E_TXRX_H_ */ |