diff options
Diffstat (limited to 'drivers/net/ethernet/intel/i40e/i40e_txrx.c')
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_txrx.c | 104 |
1 files changed, 81 insertions, 23 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 7ccd05bf4b06..f174c72480ab 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -336,7 +336,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); - pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED; + set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); } else { pf->fd_tcp4_filter_cnt--; } @@ -594,8 +594,14 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) && - pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) { - pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED; + test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) { + /* These set_bit() calls aren't atomic with the + * test_bit() here, but worse case we potentially + * disable ATR and queue a flush right after SB + * support is re-enabled. That shouldn't cause an + * issue in practice + */ + set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); } @@ -608,11 +614,10 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, */ if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && - !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) { - pf->flags |= I40E_FLAG_FD_SB_AUTO_DISABLED; + !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED, + pf->state)) if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); - } } } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { if (I40E_DEBUG_FD & pf->hw.debug_mask) @@ -1583,9 +1588,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, bi->dma = dma; bi->page = page; bi->page_offset = i40e_rx_offset(rx_ring); - - /* initialize pagecnt_bias to 1 representing we fully own page */ - bi->pagecnt_bias = 1; + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; return true; } @@ -1951,8 +1955,8 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) * the pagecnt_bias and page count so that we fully restock the * number of references the driver holds. */ - if (unlikely(!pagecnt_bias)) { - page_ref_add(page, USHRT_MAX); + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); rx_buffer->pagecnt_bias = USHRT_MAX; } @@ -2210,7 +2214,7 @@ static int i40e_xmit_xdp_ring(struct xdp_buff *xdp, static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp) { - int result = I40E_XDP_PASS; + int err, result = I40E_XDP_PASS; struct i40e_ring *xdp_ring; struct bpf_prog *xdp_prog; u32 act; @@ -2229,6 +2233,10 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; result = i40e_xmit_xdp_ring(xdp, xdp_ring); break; + case XDP_REDIRECT: + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); + result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED; + break; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: @@ -2264,6 +2272,15 @@ static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring, #endif } +static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) +{ + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. + */ + wmb(); + writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); +} + /** * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: rx descriptor ring to transact packets on @@ -2398,16 +2415,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) } if (xdp_xmit) { - struct i40e_ring *xdp_ring; + struct i40e_ring *xdp_ring = + rx_ring->vsi->xdp_rings[rx_ring->queue_index]; - xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. - */ - wmb(); - - writel(xdp_ring->next_to_use, xdp_ring->tail); + i40e_xdp_ring_update_tail(xdp_ring); + xdp_do_flush_map(); } rx_ring->skb = skb; @@ -2647,7 +2659,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) return; - if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) + if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) return; /* if sampling is disabled do nothing */ @@ -2687,7 +2699,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, th = (struct tcphdr *)(hdr.network + hlen); /* Due to lack of space, no more new filters can be programmed */ - if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)) + if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) return; if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) { /* HW ATR eviction will take care of removing filters on FIN @@ -3655,3 +3667,49 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) return i40e_xmit_frame_ring(skb, tx_ring); } + +/** + * i40e_xdp_xmit - Implements ndo_xdp_xmit + * @dev: netdev + * @xdp: XDP buffer + * + * Returns Zero if sent, else an error code + **/ +int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + unsigned int queue_index = smp_processor_id(); + struct i40e_vsi *vsi = np->vsi; + int err; + + if (test_bit(__I40E_VSI_DOWN, vsi->state)) + return -ENETDOWN; + + if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) + return -ENXIO; + + err = i40e_xmit_xdp_ring(xdp, vsi->xdp_rings[queue_index]); + if (err != I40E_XDP_TX) + return -ENOSPC; + + return 0; +} + +/** + * i40e_xdp_flush - Implements ndo_xdp_flush + * @dev: netdev + **/ +void i40e_xdp_flush(struct net_device *dev) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + unsigned int queue_index = smp_processor_id(); + struct i40e_vsi *vsi = np->vsi; + + if (test_bit(__I40E_VSI_DOWN, vsi->state)) + return; + + if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) + return; + + i40e_xdp_ring_update_tail(vsi->xdp_rings[queue_index]); +} |