summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/qlogic/qede
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/qlogic/qede')
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h173
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c362
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c1514
4 files changed, 1296 insertions, 755 deletions
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index 28dc58919c85..048a230c3ce0 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -2,4 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o
qede-y := qede_main.o qede_ethtool.o
qede-$(CONFIG_DCB) += qede_dcbnl.o
-qede-$(CONFIG_INFINIBAND_QEDR) += qede_roce.o
+qede-$(CONFIG_QED_RDMA) += qede_roce.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 28c0e9f42c9e..c79dc78746fc 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -16,6 +16,7 @@
#include <linux/bitmap.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
+#include <linux/bpf.h>
#include <linux/io.h>
#include <linux/qed/common_hsi.h>
#include <linux/qed/eth_common.h>
@@ -127,10 +128,9 @@ struct qede_dev {
const struct qed_eth_ops *ops;
- struct qed_dev_eth_info dev_info;
+ struct qed_dev_eth_info dev_info;
#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
-#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues * \
- (edev)->dev_info.num_tc)
+#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues)
struct qede_fastpath *fp_array;
u8 req_num_tx;
@@ -139,17 +139,9 @@ struct qede_dev {
u8 fp_num_rx;
u16 req_queues;
u16 num_queues;
- u8 num_tc;
#define QEDE_QUEUE_CNT(edev) ((edev)->num_queues)
#define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx)
-#define QEDE_TSS_COUNT(edev) (((edev)->num_queues - (edev)->fp_num_rx) * \
- (edev)->num_tc)
-#define QEDE_TX_IDX(edev, txqidx) ((edev)->fp_num_rx + (txqidx) % \
- QEDE_TSS_COUNT(edev))
-#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / QEDE_TSS_COUNT(edev))
-#define QEDE_TX_QUEUE(edev, txqidx) \
- (&(edev)->fp_array[QEDE_TX_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX(\
- (edev), (txqidx))])
+#define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx)
struct qed_int_info int_info;
unsigned char primary_mac[ETH_ALEN];
@@ -193,7 +185,11 @@ struct qede_dev {
u16 vxlan_dst_port;
u16 geneve_dst_port;
+ bool wol_enabled;
+
struct qede_rdma_dev rdma_info;
+
+ struct bpf_prog *xdp_prog;
};
enum QEDE_STATE {
@@ -223,39 +219,67 @@ enum qede_agg_state {
};
struct qede_agg_info {
- struct sw_rx_data replace_buf;
- dma_addr_t replace_buf_mapping;
- struct sw_rx_data start_buf;
- dma_addr_t start_buf_mapping;
- struct eth_fast_path_rx_tpa_start_cqe start_cqe;
- enum qede_agg_state agg_state;
+ /* rx_buf is a data buffer that can be placed / consumed from rx bd
+ * chain. It has two purposes: We will preallocate the data buffer
+ * for each aggregation when we open the interface and will place this
+ * buffer on the rx-bd-ring when we receive TPA_START. We don't want
+ * to be in a state where allocation fails, as we can't reuse the
+ * consumer buffer in the rx-chain since FW may still be writing to it
+ * (since header needs to be modified for TPA).
+ * The second purpose is to keep a pointer to the bd buffer during
+ * aggregation.
+ */
+ struct sw_rx_data buffer;
+ dma_addr_t buffer_mapping;
+
struct sk_buff *skb;
- int frag_id;
+
+ /* We need some structs from the start cookie until termination */
u16 vlan_tag;
+ u16 start_cqe_bd_len;
+ u8 start_cqe_placement_offset;
+
+ u8 state;
+ u8 frag_id;
+
+ u8 tunnel_type;
};
struct qede_rx_queue {
- __le16 *hw_cons_ptr;
- struct sw_rx_data *sw_rx_ring;
- u16 sw_rx_cons;
- u16 sw_rx_prod;
- struct qed_chain rx_bd_ring;
- struct qed_chain rx_comp_ring;
- void __iomem *hw_rxq_prod_addr;
+ __le16 *hw_cons_ptr;
+ void __iomem *hw_rxq_prod_addr;
+
+ /* Required for the allocation of replacement buffers */
+ struct device *dev;
+
+ struct bpf_prog *xdp_prog;
+
+ u16 sw_rx_cons;
+ u16 sw_rx_prod;
+
+ u16 num_rx_buffers; /* Slowpath */
+ u8 data_direction;
+ u8 rxq_id;
+
+ u32 rx_buf_size;
+ u32 rx_buf_seg_size;
+
+ u64 rcv_pkts;
+
+ struct sw_rx_data *sw_rx_ring;
+ struct qed_chain rx_bd_ring;
+ struct qed_chain rx_comp_ring ____cacheline_aligned;
/* GRO */
- struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
+ struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
- int rx_buf_size;
- unsigned int rx_buf_seg_size;
+ u64 rx_hw_errors;
+ u64 rx_alloc_errors;
+ u64 rx_ip_frags;
- u16 num_rx_buffers;
- u16 rxq_id;
+ u64 xdp_no_pass;
- u64 rcv_pkts;
- u64 rx_hw_errors;
- u64 rx_alloc_errors;
- u64 rx_ip_frags;
+ void *handle;
};
union db_prod {
@@ -271,20 +295,39 @@ struct sw_tx_bd {
};
struct qede_tx_queue {
- int index; /* Queue index */
- __le16 *hw_cons_ptr;
- struct sw_tx_bd *sw_tx_ring;
- u16 sw_tx_cons;
- u16 sw_tx_prod;
- struct qed_chain tx_pbl;
- void __iomem *doorbell_addr;
- union db_prod tx_db;
-
- u16 num_tx_buffers;
- u64 xmit_pkts;
- u64 stopped_cnt;
-
- bool is_legacy;
+ u8 is_xdp;
+ bool is_legacy;
+ u16 sw_tx_cons;
+ u16 sw_tx_prod;
+ u16 num_tx_buffers; /* Slowpath only */
+
+ u64 xmit_pkts;
+ u64 stopped_cnt;
+
+ __le16 *hw_cons_ptr;
+
+ /* Needed for the mapping of packets */
+ struct device *dev;
+
+ void __iomem *doorbell_addr;
+ union db_prod tx_db;
+ int index; /* Slowpath only */
+#define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \
+ QEDE_MAX_TSS_CNT(edev))
+#define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
+
+ /* Regular Tx requires skb + metadata for release purpose,
+ * while XDP requires only the pages themselves.
+ */
+ union {
+ struct sw_tx_bd *skbs;
+ struct page **pages;
+ } sw_tx_ring;
+
+ struct qed_chain tx_pbl;
+
+ /* Slowpath; Should be kept in end [unless missing padding] */
+ void *handle;
};
#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
@@ -301,13 +344,16 @@ struct qede_fastpath {
struct qede_dev *edev;
#define QEDE_FASTPATH_TX BIT(0)
#define QEDE_FASTPATH_RX BIT(1)
+#define QEDE_FASTPATH_XDP BIT(2)
#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
u8 type;
u8 id;
+ u8 xdp_xmit;
struct napi_struct napi;
struct qed_sb_info *sb_info;
struct qede_rx_queue *rxq;
- struct qede_tx_queue *txqs;
+ struct qede_tx_queue *txq;
+ struct qede_tx_queue *xdp_tx;
#define VEC_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
char name[VEC_NAME_SIZE];
@@ -320,6 +366,7 @@ struct qede_fastpath {
#define XMIT_L4_CSUM BIT(0)
#define XMIT_LSO BIT(1)
#define XMIT_ENC BIT(2)
+#define XMIT_ENC_GSO_L4_CSUM BIT(3)
#define QEDE_CSUM_ERROR BIT(0)
#define QEDE_CSUM_UNNECESSARY BIT(1)
@@ -329,8 +376,13 @@ struct qede_fastpath {
#define QEDE_SP_VXLAN_PORT_CONFIG 2
#define QEDE_SP_GENEVE_PORT_CONFIG 3
-union qede_reload_args {
- u16 mtu;
+struct qede_reload_args {
+ void (*func)(struct qede_dev *edev, struct qede_reload_args *args);
+ union {
+ netdev_features_t features;
+ struct bpf_prog *new_prog;
+ u16 mtu;
+ } u;
};
#ifdef CONFIG_DCB
@@ -339,21 +391,21 @@ void qede_set_dcbnl_ops(struct net_device *ndev);
void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
void qede_set_ethtool_ops(struct net_device *netdev);
void qede_reload(struct qede_dev *edev,
- void (*func)(struct qede_dev *edev,
- union qede_reload_args *args),
- union qede_reload_args *args);
+ struct qede_reload_args *args, bool is_locked);
int qede_change_mtu(struct net_device *dev, int new_mtu);
void qede_fill_by_demand_stats(struct qede_dev *edev);
+void __qede_lock(struct qede_dev *edev);
+void __qede_unlock(struct qede_dev *edev);
bool qede_has_rx_work(struct qede_rx_queue *rxq);
int qede_txq_has_work(struct qede_tx_queue *txq);
-void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
- u8 count);
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count);
+void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
#define NUM_RX_BDS_MIN 128
-#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX
+#define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
#define TX_RING_SIZE_POW 13
#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
@@ -361,8 +413,9 @@ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
#define NUM_TX_BDS_MIN 128
#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
-#define QEDE_MIN_PKT_LEN 64
-#define QEDE_RX_HDR_SIZE 256
+#define QEDE_MIN_PKT_LEN 64
+#define QEDE_RX_HDR_SIZE 256
+#define QEDE_MAX_JUMBO_PACKET_SIZE 9600
#define for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
#endif /* _QEDE_H_ */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 25a9b293ee8f..1c48f445c93b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -16,13 +16,6 @@
#include <linux/capability.h>
#include "qede.h"
-#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name))
-#define QEDE_STAT_STRING(stat_name) (#stat_name)
-#define _QEDE_STAT(stat_name, pf_only) \
- {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only}
-#define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true)
-#define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false)
-
#define QEDE_RQSTAT_OFFSET(stat_name) \
(offsetof(struct qede_rx_queue, stat_name))
#define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
@@ -39,12 +32,10 @@ static const struct {
QEDE_RQSTAT(rx_hw_errors),
QEDE_RQSTAT(rx_alloc_errors),
QEDE_RQSTAT(rx_ip_frags),
+ QEDE_RQSTAT(xdp_no_pass),
};
#define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
-#define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \
- (*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\
- qede_rqstats_arr[(sindex)].offset)))
#define QEDE_TQSTAT_OFFSET(stat_name) \
(offsetof(struct qede_tx_queue, stat_name))
#define QEDE_TQSTAT_STRING(stat_name) (#stat_name)
@@ -59,10 +50,12 @@ static const struct {
QEDE_TQSTAT(stopped_cnt),
};
-#define QEDE_TQSTATS_DATA(dev, sindex, tssid, tcid) \
- (*((u64 *)(((void *)(&dev->fp_array[tssid].txqs[tcid])) +\
- qede_tqstats_arr[(sindex)].offset)))
-
+#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name))
+#define QEDE_STAT_STRING(stat_name) (#stat_name)
+#define _QEDE_STAT(stat_name, pf_only) \
+ {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only}
+#define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true)
+#define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false)
static const struct {
u64 offset;
char string[ETH_GSTRING_LEN];
@@ -136,10 +129,6 @@ static const struct {
QEDE_STAT(coalesced_bytes),
};
-#define QEDE_STATS_DATA(dev, index) \
- (*((u64 *)(((char *)(dev)) + offsetof(struct qede_dev, stats) \
- + qede_stats_arr[(index)].offset)))
-
#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
enum {
@@ -157,6 +146,7 @@ enum qede_ethtool_tests {
QEDE_ETHTOOL_MEMORY_TEST,
QEDE_ETHTOOL_REGISTER_TEST,
QEDE_ETHTOOL_CLOCK_TEST,
+ QEDE_ETHTOOL_NVRAM_TEST,
QEDE_ETHTOOL_TEST_MAX
};
@@ -166,34 +156,63 @@ static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = {
"Memory (online)\t\t",
"Register (online)\t",
"Clock (online)\t\t",
+ "Nvram (online)\t\t",
};
+static void qede_get_strings_stats_txq(struct qede_dev *edev,
+ struct qede_tx_queue *txq, u8 **buf)
+{
+ int i;
+
+ for (i = 0; i < QEDE_NUM_TQSTATS; i++) {
+ if (txq->is_xdp)
+ sprintf(*buf, "%d [XDP]: %s",
+ QEDE_TXQ_XDP_TO_IDX(edev, txq),
+ qede_tqstats_arr[i].string);
+ else
+ sprintf(*buf, "%d: %s", txq->index,
+ qede_tqstats_arr[i].string);
+ *buf += ETH_GSTRING_LEN;
+ }
+}
+
+static void qede_get_strings_stats_rxq(struct qede_dev *edev,
+ struct qede_rx_queue *rxq, u8 **buf)
+{
+ int i;
+
+ for (i = 0; i < QEDE_NUM_RQSTATS; i++) {
+ sprintf(*buf, "%d: %s", rxq->rxq_id,
+ qede_rqstats_arr[i].string);
+ *buf += ETH_GSTRING_LEN;
+ }
+}
+
static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
{
- int i, j, k;
-
- for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) {
- int tc;
-
- for (j = 0; j < QEDE_NUM_RQSTATS; j++)
- sprintf(buf + (k + j) * ETH_GSTRING_LEN,
- "%d: %s", i, qede_rqstats_arr[j].string);
- k += QEDE_NUM_RQSTATS;
- for (tc = 0; tc < edev->num_tc; tc++) {
- for (j = 0; j < QEDE_NUM_TQSTATS; j++)
- sprintf(buf + (k + j) * ETH_GSTRING_LEN,
- "%d.%d: %s", i, tc,
- qede_tqstats_arr[j].string);
- k += QEDE_NUM_TQSTATS;
- }
+ struct qede_fastpath *fp;
+ int i;
+
+ /* Account for queue statistics */
+ for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
+ fp = &edev->fp_array[i];
+
+ if (fp->type & QEDE_FASTPATH_RX)
+ qede_get_strings_stats_rxq(edev, fp->rxq, &buf);
+
+ if (fp->type & QEDE_FASTPATH_XDP)
+ qede_get_strings_stats_txq(edev, fp->xdp_tx, &buf);
+
+ if (fp->type & QEDE_FASTPATH_TX)
+ qede_get_strings_stats_txq(edev, fp->txq, &buf);
}
- for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
+ /* Account for non-queue statistics */
+ for (i = 0; i < QEDE_NUM_STATS; i++) {
if (IS_VF(edev) && qede_stats_arr[i].pf_only)
continue;
- strcpy(buf + (k + j) * ETH_GSTRING_LEN,
- qede_stats_arr[i].string);
- j++;
+ strcpy(buf, qede_stats_arr[i].string);
+ buf += ETH_GSTRING_LEN;
}
}
@@ -219,42 +238,61 @@ static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
}
}
+static void qede_get_ethtool_stats_txq(struct qede_tx_queue *txq, u64 **buf)
+{
+ int i;
+
+ for (i = 0; i < QEDE_NUM_TQSTATS; i++) {
+ **buf = *((u64 *)(((void *)txq) + qede_tqstats_arr[i].offset));
+ (*buf)++;
+ }
+}
+
+static void qede_get_ethtool_stats_rxq(struct qede_rx_queue *rxq, u64 **buf)
+{
+ int i;
+
+ for (i = 0; i < QEDE_NUM_RQSTATS; i++) {
+ **buf = *((u64 *)(((void *)rxq) + qede_rqstats_arr[i].offset));
+ (*buf)++;
+ }
+}
+
static void qede_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *buf)
{
struct qede_dev *edev = netdev_priv(dev);
- int sidx, cnt = 0;
- int qid;
+ struct qede_fastpath *fp;
+ int i;
qede_fill_by_demand_stats(edev);
- mutex_lock(&edev->qede_lock);
+ /* Need to protect the access to the fastpath array */
+ __qede_lock(edev);
- for (qid = 0; qid < QEDE_QUEUE_CNT(edev); qid++) {
- int tc;
+ for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
+ fp = &edev->fp_array[i];
- if (edev->fp_array[qid].type & QEDE_FASTPATH_RX) {
- for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++)
- buf[cnt++] = QEDE_RQSTATS_DATA(edev, sidx, qid);
- }
+ if (fp->type & QEDE_FASTPATH_RX)
+ qede_get_ethtool_stats_rxq(fp->rxq, &buf);
- if (edev->fp_array[qid].type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < edev->num_tc; tc++) {
- for (sidx = 0; sidx < QEDE_NUM_TQSTATS; sidx++)
- buf[cnt++] = QEDE_TQSTATS_DATA(edev,
- sidx,
- qid, tc);
- }
- }
+ if (fp->type & QEDE_FASTPATH_XDP)
+ qede_get_ethtool_stats_txq(fp->xdp_tx, &buf);
+
+ if (fp->type & QEDE_FASTPATH_TX)
+ qede_get_ethtool_stats_txq(fp->txq, &buf);
}
- for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) {
- if (IS_VF(edev) && qede_stats_arr[sidx].pf_only)
+ for (i = 0; i < QEDE_NUM_STATS; i++) {
+ if (IS_VF(edev) && qede_stats_arr[i].pf_only)
continue;
- buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
+ *buf = *((u64 *)(((void *)&edev->stats) +
+ qede_stats_arr[i].offset));
+
+ buf++;
}
- mutex_unlock(&edev->qede_lock);
+ __qede_unlock(edev);
}
static int qede_get_sset_count(struct net_device *dev, int stringset)
@@ -271,8 +309,18 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
if (qede_stats_arr[i].pf_only)
num_stats--;
}
- return num_stats + QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS +
- QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS * edev->num_tc;
+
+ /* Account for the Regular Tx statistics */
+ num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS;
+
+ /* Account for the Regular Rx statistics */
+ num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS;
+
+ /* Account for XDP statistics [if needed] */
+ if (edev->xdp_prog)
+ num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_TQSTATS;
+ return num_stats;
+
case ETH_SS_PRIV_FLAGS:
return QEDE_PRI_FLAG_LEN;
case ETH_SS_TEST:
@@ -318,7 +366,7 @@ static const struct qede_link_mode_mapping qed_lm_map[] = {
{ \
int i; \
\
- for (i = 0; i < QED_LM_COUNT; i++) { \
+ for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) { \
if ((caps) & (qed_lm_map[i].qed_link_mode)) \
__set_bit(qed_lm_map[i].ethtool_link_mode,\
lk_ksettings->link_modes.name); \
@@ -329,7 +377,7 @@ static const struct qede_link_mode_mapping qed_lm_map[] = {
{ \
int i; \
\
- for (i = 0; i < QED_LM_COUNT; i++) { \
+ for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) { \
if (test_bit(qed_lm_map[i].ethtool_link_mode, \
lk_ksettings->link_modes.name)) \
caps |= qed_lm_map[i].qed_link_mode; \
@@ -343,6 +391,8 @@ static int qede_get_link_ksettings(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
+ __qede_lock(edev);
+
memset(&current_link, 0, sizeof(current_link));
edev->ops->common->get_link(edev->cdev, &current_link);
@@ -362,6 +412,9 @@ static int qede_get_link_ksettings(struct net_device *dev,
base->speed = SPEED_UNKNOWN;
base->duplex = DUPLEX_UNKNOWN;
}
+
+ __qede_unlock(edev);
+
base->port = current_link.port;
base->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE :
AUTONEG_DISABLE;
@@ -481,6 +534,45 @@ static void qede_get_drvinfo(struct net_device *ndev,
strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
}
+static void qede_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ if (edev->dev_info.common.wol_support) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = edev->wol_enabled ? WAKE_MAGIC : 0;
+ }
+}
+
+static int qede_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ bool wol_requested;
+ int rc;
+
+ if (wol->wolopts & ~WAKE_MAGIC) {
+ DP_INFO(edev,
+ "Can't support WoL options other than magic-packet\n");
+ return -EINVAL;
+ }
+
+ wol_requested = !!(wol->wolopts & WAKE_MAGIC);
+ if (wol_requested == edev->wol_enabled)
+ return 0;
+
+ /* Need to actually change configuration */
+ if (!edev->dev_info.common.wol_support) {
+ DP_INFO(edev, "Device doesn't support WoL\n");
+ return -EINVAL;
+ }
+
+ rc = edev->ops->common->update_wol(edev->cdev, wol_requested);
+ if (!rc)
+ edev->wol_enabled = wol_requested;
+
+ return rc;
+}
+
static u32 qede_get_msglevel(struct net_device *ndev)
{
struct qede_dev *edev = netdev_priv(ndev);
@@ -631,8 +723,7 @@ static int qede_set_ringparam(struct net_device *dev,
edev->q_num_rx_buffers = ering->rx_pending;
edev->q_num_tx_buffers = ering->tx_pending;
- if (netif_running(edev->ndev))
- qede_reload(edev, NULL, NULL);
+ qede_reload(edev, NULL, false);
return 0;
}
@@ -717,35 +808,27 @@ static int qede_get_regs_len(struct net_device *ndev)
return -EINVAL;
}
-static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args)
+static void qede_update_mtu(struct qede_dev *edev,
+ struct qede_reload_args *args)
{
- edev->ndev->mtu = args->mtu;
+ edev->ndev->mtu = args->u.mtu;
}
/* Netdevice NDOs */
-#define ETH_MAX_JUMBO_PACKET_SIZE 9600
-#define ETH_MIN_PACKET_SIZE 60
int qede_change_mtu(struct net_device *ndev, int new_mtu)
{
struct qede_dev *edev = netdev_priv(ndev);
- union qede_reload_args args;
-
- if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
- ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
- DP_ERR(edev, "Can't support requested MTU size\n");
- return -EINVAL;
- }
+ struct qede_reload_args args;
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"Configuring MTU size of %d\n", new_mtu);
- /* Set the mtu field and re-start the interface if needed*/
- args.mtu = new_mtu;
+ /* Set the mtu field and re-start the interface if needed */
+ args.u.mtu = new_mtu;
+ args.func = &qede_update_mtu;
+ qede_reload(edev, &args, false);
- if (netif_running(edev->ndev))
- qede_reload(edev, &qede_update_mtu, &args);
-
- qede_update_mtu(edev, &args);
+ edev->ops->common->update_mtu(edev->cdev, new_mtu);
return 0;
}
@@ -756,6 +839,8 @@ static void qede_get_channels(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev);
channels->max_combined = QEDE_MAX_RSS_CNT(edev);
+ channels->max_rx = QEDE_MAX_RSS_CNT(edev);
+ channels->max_tx = QEDE_MAX_RSS_CNT(edev);
channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx -
edev->fp_num_rx;
channels->tx_count = edev->fp_num_tx;
@@ -820,8 +905,14 @@ static int qede_set_channels(struct net_device *dev,
edev->req_queues = count;
edev->req_num_tx = channels->tx_count;
edev->req_num_rx = channels->rx_count;
- if (netif_running(dev))
- qede_reload(edev, NULL, NULL);
+ /* Reset the indirection table if rx queue count is updated */
+ if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) {
+ edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED;
+ memset(&edev->rss_params.rss_ind_table, 0,
+ sizeof(edev->rss_params.rss_ind_table));
+ }
+
+ qede_reload(edev, NULL, false);
return 0;
}
@@ -1053,6 +1144,12 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
struct qede_dev *edev = netdev_priv(dev);
int i;
+ if (edev->dev_info.common.num_hwfns > 1) {
+ DP_INFO(edev,
+ "RSS configuration is not supported for 100G devices\n");
+ return -EOPNOTSUPP;
+ }
+
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
@@ -1121,7 +1218,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
for_each_queue(i) {
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
- txq = edev->fp_array[i].txqs;
+ txq = edev->fp_array[i].txq;
break;
}
}
@@ -1133,7 +1230,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
/* Fill the entry in the SW ring and the BDs in the FW ring */
idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
- txq->sw_tx_ring[idx].skb = skb;
+ txq->sw_tx_ring.skbs[idx].skb = skb;
first_bd = qed_chain_produce(&txq->tx_pbl);
memset(first_bd, 0, sizeof(*first_bd));
val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
@@ -1184,10 +1281,10 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
}
first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
txq->sw_tx_cons++;
- txq->sw_tx_ring[idx].skb = NULL;
+ txq->sw_tx_ring.skbs[idx].skb = NULL;
return 0;
}
@@ -1199,8 +1296,8 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
struct qede_rx_queue *rxq = NULL;
struct sw_rx_data *sw_rx_data;
union eth_rx_cqe *cqe;
+ int i, rc = 0;
u8 *data_ptr;
- int i;
for_each_queue(i) {
if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
@@ -1219,46 +1316,60 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
* queue and that the loopback traffic is not IP.
*/
for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
- if (qede_has_rx_work(rxq))
+ if (!qede_has_rx_work(rxq)) {
+ usleep_range(100, 200);
+ continue;
+ }
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ /* Memory barrier to prevent the CPU from doing speculative
+ * reads of CQE/BD before reading hw_comp_cons. If the CQE is
+ * read before it is written by FW, then FW writes CQE and SB,
+ * and then the CPU reads the hw_comp_cons, it will use an old
+ * CQE.
+ */
+ rmb();
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+
+ /* Get the data from the SW ring */
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+ fp_cqe = &cqe->fast_path_regular;
+ len = le16_to_cpu(fp_cqe->len_on_first_bd);
+ data_ptr = (u8 *)(page_address(sw_rx_data->data) +
+ fp_cqe->placement_offset +
+ sw_rx_data->page_offset);
+ if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) &&
+ ether_addr_equal(data_ptr + ETH_ALEN,
+ edev->ndev->dev_addr)) {
+ for (i = ETH_HLEN; i < len; i++)
+ if (data_ptr[i] != (unsigned char)(i & 0xff)) {
+ rc = -1;
+ break;
+ }
+
+ qede_recycle_rx_bd_ring(rxq, 1);
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
break;
- usleep_range(100, 200);
+ }
+
+ DP_INFO(edev, "Not the transmitted packet\n");
+ qede_recycle_rx_bd_ring(rxq, 1);
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
}
- if (!qede_has_rx_work(rxq)) {
+ if (i == QEDE_SELFTEST_POLL_COUNT) {
DP_NOTICE(edev, "Failed to receive the traffic\n");
return -1;
}
- hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
- sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
-
- /* Memory barrier to prevent the CPU from doing speculative reads of CQE
- * / BD before reading hw_comp_cons. If the CQE is read before it is
- * written by FW, then FW writes CQE and SB, and then the CPU reads the
- * hw_comp_cons, it will use an old CQE.
- */
- rmb();
-
- /* Get the CQE from the completion ring */
- cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
-
- /* Get the data from the SW ring */
- sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
- sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
- fp_cqe = &cqe->fast_path_regular;
- len = le16_to_cpu(fp_cqe->len_on_first_bd);
- data_ptr = (u8 *)(page_address(sw_rx_data->data) +
- fp_cqe->placement_offset + sw_rx_data->page_offset);
- for (i = ETH_HLEN; i < len; i++)
- if (data_ptr[i] != (unsigned char)(i & 0xff)) {
- DP_NOTICE(edev, "Loopback test failed\n");
- qede_recycle_rx_bd_ring(rxq, edev, 1);
- return -1;
- }
+ qede_update_rx_prod(edev, rxq);
- qede_recycle_rx_bd_ring(rxq, edev, 1);
-
- return 0;
+ return rc;
}
static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
@@ -1369,6 +1480,11 @@ static void qede_self_test(struct net_device *dev,
buf[QEDE_ETHTOOL_CLOCK_TEST] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
+
+ if (edev->ops->common->selftest->selftest_nvram(edev->cdev)) {
+ buf[QEDE_ETHTOOL_NVRAM_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
}
static int qede_set_tunable(struct net_device *dev,
@@ -1419,6 +1535,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_drvinfo = qede_get_drvinfo,
.get_regs_len = qede_get_regs_len,
.get_regs = qede_get_regs,
+ .get_wol = qede_get_wol,
+ .set_wol = qede_set_wol,
.get_msglevel = qede_get_msglevel,
.set_msglevel = qede_set_msglevel,
.nway_reset = qede_nway_reset,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 343038ca047d..aecdd1c5c0ea 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -94,11 +94,26 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
#define TX_TIMEOUT (5 * HZ)
+/* Utilize last protocol index for XDP */
+#define XDP_PI 11
+
static void qede_remove(struct pci_dev *pdev);
-static int qede_alloc_rx_buffer(struct qede_dev *edev,
- struct qede_rx_queue *rxq);
+static void qede_shutdown(struct pci_dev *pdev);
static void qede_link_update(void *dev, struct qed_link_output *link);
+/* The qede lock is used to protect driver state change and driver flows that
+ * are not reentrant.
+ */
+void __qede_lock(struct qede_dev *edev)
+{
+ mutex_lock(&edev->qede_lock);
+}
+
+void __qede_unlock(struct qede_dev *edev)
+{
+ mutex_unlock(&edev->qede_lock);
+}
+
#ifdef CONFIG_QED_SRIOV
static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto)
@@ -166,15 +181,20 @@ static struct pci_driver qede_pci_driver = {
.id_table = qede_pci_tbl,
.probe = qede_probe,
.remove = qede_remove,
+ .shutdown = qede_shutdown,
#ifdef CONFIG_QED_SRIOV
.sriov_configure = qede_sriov_configure,
#endif
};
-static void qede_force_mac(void *dev, u8 *mac)
+static void qede_force_mac(void *dev, u8 *mac, bool forced)
{
struct qede_dev *edev = dev;
+ /* MAC hints take effect only if we haven't set one already */
+ if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced)
+ return;
+
ether_addr_copy(edev->ndev->dev_addr, mac);
ether_addr_copy(edev->primary_mac, mac);
}
@@ -284,12 +304,12 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
struct qede_tx_queue *txq, int *len)
{
u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
- struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+ struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
struct eth_tx_1st_bd *first_bd;
struct eth_tx_bd *tx_data_bd;
int bds_consumed = 0;
int nbds;
- bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD;
+ bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD;
int i, split_bd_len = 0;
if (unlikely(!skb)) {
@@ -313,8 +333,8 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
split_bd_len = BD_UNMAP_LEN(split);
bds_consumed++;
}
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
/* Unmap the data of the skb frags */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
@@ -329,20 +349,19 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
/* Free skb */
dev_kfree_skb_any(skb);
- txq->sw_tx_ring[idx].skb = NULL;
- txq->sw_tx_ring[idx].flags = 0;
+ txq->sw_tx_ring.skbs[idx].skb = NULL;
+ txq->sw_tx_ring.skbs[idx].flags = 0;
return 0;
}
/* Unmap the data and free skb when mapping failed during start_xmit */
-static void qede_free_failed_tx_pkt(struct qede_dev *edev,
- struct qede_tx_queue *txq,
+static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
struct eth_tx_1st_bd *first_bd,
int nbd, bool data_split)
{
u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
- struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+ struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
struct eth_tx_bd *tx_data_bd;
int i, split_bd_len = 0;
@@ -359,15 +378,15 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
nbd--;
}
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+ dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
/* Unmap the data of the skb frags */
for (i = 0; i < nbd; i++) {
tx_data_bd = (struct eth_tx_bd *)
qed_chain_produce(&txq->tx_pbl);
if (tx_data_bd->nbytes)
- dma_unmap_page(&edev->pdev->dev,
+ dma_unmap_page(txq->dev,
BD_UNMAP_ADDR(tx_data_bd),
BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
}
@@ -378,12 +397,11 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
/* Free skb */
dev_kfree_skb_any(skb);
- txq->sw_tx_ring[idx].skb = NULL;
- txq->sw_tx_ring[idx].flags = 0;
+ txq->sw_tx_ring.skbs[idx].skb = NULL;
+ txq->sw_tx_ring.skbs[idx].flags = 0;
}
-static u32 qede_xmit_type(struct qede_dev *edev,
- struct sk_buff *skb, int *ipv6_ext)
+static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
{
u32 rc = XMIT_L4_CSUM;
__be16 l3_proto;
@@ -396,8 +414,19 @@ static u32 qede_xmit_type(struct qede_dev *edev,
(ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
*ipv6_ext = 1;
- if (skb->encapsulation)
+ if (skb->encapsulation) {
rc |= XMIT_ENC;
+ if (skb_is_gso(skb)) {
+ unsigned short gso_type = skb_shinfo(skb)->gso_type;
+
+ if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
+ (gso_type & SKB_GSO_GRE_CSUM))
+ rc |= XMIT_ENC_GSO_L4_CSUM;
+
+ rc |= XMIT_LSO;
+ return rc;
+ }
+ }
if (skb_is_gso(skb))
rc |= XMIT_LSO;
@@ -439,18 +468,16 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
}
-static int map_frag_to_bd(struct qede_dev *edev,
+static int map_frag_to_bd(struct qede_tx_queue *txq,
skb_frag_t *frag, struct eth_tx_bd *bd)
{
dma_addr_t mapping;
/* Map skb non-linear frag data for DMA */
- mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
+ mapping = skb_frag_dma_map(txq->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
- DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
+ if (unlikely(dma_mapping_error(txq->dev, mapping)))
return -ENOMEM;
- }
/* Setup the data pointer of the frag data */
BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
@@ -470,8 +497,7 @@ static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
-static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
- u8 xmit_type)
+static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
{
int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
@@ -507,6 +533,47 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
mmiowb();
}
+static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
+ struct sw_rx_data *metadata, u16 padding, u16 length)
+{
+ struct qede_tx_queue *txq = fp->xdp_tx;
+ u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ struct eth_tx_1st_bd *first_bd;
+
+ if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
+ txq->stopped_cnt++;
+ return -ENOMEM;
+ }
+
+ first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
+
+ memset(first_bd, 0, sizeof(*first_bd));
+ first_bd->data.bd_flags.bitfields =
+ BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
+ first_bd->data.bitfields |=
+ (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+ ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+ first_bd->data.nbds = 1;
+
+ /* We can safely ignore the offset, as it's 0 for XDP */
+ BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length);
+
+ /* Synchronize the buffer back to device, as program [probably]
+ * has changed it.
+ */
+ dma_sync_single_for_device(&edev->pdev->dev,
+ metadata->mapping + padding,
+ length, PCI_DMA_TODEVICE);
+
+ txq->sw_tx_ring.pages[idx] = metadata->data;
+ txq->sw_tx_prod++;
+
+ /* Mark the fastpath for future XDP doorbell */
+ fp->xdp_xmit = 1;
+
+ return 0;
+}
+
/* Main transmit function */
static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
@@ -530,15 +597,15 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* Get tx-queue context and netdev index */
txq_index = skb_get_queue_mapping(skb);
WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
- txq = QEDE_TX_QUEUE(edev, txq_index);
+ txq = edev->fp_array[edev->fp_num_rx + txq_index].txq;
netdev_txq = netdev_get_tx_queue(ndev, txq_index);
WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
- xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
+ xmit_type = qede_xmit_type(skb, &ipv6_ext);
#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
- if (qede_pkt_req_lin(edev, skb, xmit_type)) {
+ if (qede_pkt_req_lin(skb, xmit_type)) {
if (skb_linearize(skb)) {
DP_NOTICE(edev,
"SKB linearization failed - silently dropping this SKB\n");
@@ -550,7 +617,7 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* Fill the entry in the SW ring and the BDs in the FW ring */
idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
- txq->sw_tx_ring[idx].skb = skb;
+ txq->sw_tx_ring.skbs[idx].skb = skb;
first_bd = (struct eth_tx_1st_bd *)
qed_chain_produce(&txq->tx_pbl);
memset(first_bd, 0, sizeof(*first_bd));
@@ -558,11 +625,11 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
/* Map skb linear data for DMA and set in the first BD */
- mapping = dma_map_single(&edev->pdev->dev, skb->data,
+ mapping = dma_map_single(txq->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ if (unlikely(dma_mapping_error(txq->dev, mapping))) {
DP_NOTICE(edev, "SKB mapping failed\n");
- qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
+ qede_free_failed_tx_pkt(txq, first_bd, 0, false);
qede_update_tx_producer(txq);
return NETDEV_TX_OK;
}
@@ -633,6 +700,12 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
if (unlikely(xmit_type & XMIT_ENC)) {
first_bd->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+
+ if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
+ u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+
+ first_bd->data.bd_flags.bitfields |= 1 << tmp;
+ }
hlen = qede_get_skb_hlen(skb, true);
} else {
first_bd->data.bd_flags.bitfields |=
@@ -664,7 +737,7 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* this marks the BD as one that has no
* individual mapping
*/
- txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD;
+ txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD;
first_bd->nbytes = cpu_to_le16(hlen);
@@ -680,12 +753,11 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* Handle fragmented skb */
/* special handle for frags inside 2nd and 3rd bds.. */
while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
- rc = map_frag_to_bd(edev,
+ rc = map_frag_to_bd(txq,
&skb_shinfo(skb)->frags[frag_idx],
tx_data_bd);
if (rc) {
- qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
- data_split);
+ qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
qede_update_tx_producer(txq);
return NETDEV_TX_OK;
}
@@ -705,12 +777,11 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
memset(tx_data_bd, 0, sizeof(*tx_data_bd));
- rc = map_frag_to_bd(edev,
+ rc = map_frag_to_bd(txq,
&skb_shinfo(skb)->frags[frag_idx],
tx_data_bd);
if (rc) {
- qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
- data_split);
+ qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
qede_update_tx_producer(txq);
return NETDEV_TX_OK;
}
@@ -775,6 +846,27 @@ int qede_txq_has_work(struct qede_tx_queue *txq)
return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
}
+static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+ struct eth_tx_1st_bd *bd;
+ u16 hw_bd_cons;
+
+ hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+ barrier();
+
+ while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
+ bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd),
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons &
+ NUM_TX_BDS_MAX]);
+
+ txq->sw_tx_cons++;
+ txq->xmit_pkts++;
+ }
+}
+
static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
{
struct netdev_queue *netdev_txq;
@@ -858,16 +950,6 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq)
return hw_comp_cons != sw_comp_cons;
}
-static bool qede_has_tx_work(struct qede_fastpath *fp)
-{
- u8 tc;
-
- for (tc = 0; tc < fp->edev->num_tc; tc++)
- if (qede_txq_has_work(&fp->txqs[tc]))
- return true;
- return false;
-}
-
static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
{
qed_chain_consume(&rxq->rx_bd_ring);
@@ -877,8 +959,7 @@ static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
/* This function reuses the buffer(from an offset) from
* consumer index to producer index in the bd ring
*/
-static inline void qede_reuse_page(struct qede_dev *edev,
- struct qede_rx_queue *rxq,
+static inline void qede_reuse_page(struct qede_rx_queue *rxq,
struct sw_rx_data *curr_cons)
{
struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
@@ -900,27 +981,62 @@ static inline void qede_reuse_page(struct qede_dev *edev,
/* In case of allocation failures reuse buffers
* from consumer index to produce buffers for firmware
*/
-void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
- struct qede_dev *edev, u8 count)
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
{
struct sw_rx_data *curr_cons;
for (; count > 0; count--) {
curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
- qede_reuse_page(edev, rxq, curr_cons);
+ qede_reuse_page(rxq, curr_cons);
qede_rx_bd_ring_consume(rxq);
}
}
-static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
- struct qede_rx_queue *rxq,
+static int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
+{
+ struct sw_rx_data *sw_rx_data;
+ struct eth_rx_bd *rx_bd;
+ dma_addr_t mapping;
+ struct page *data;
+
+ data = alloc_pages(GFP_ATOMIC, 0);
+ if (unlikely(!data))
+ return -ENOMEM;
+
+ /* Map the entire page as it would be used
+ * for multiple RX buffer segment size mapping.
+ */
+ mapping = dma_map_page(rxq->dev, data, 0,
+ PAGE_SIZE, rxq->data_direction);
+ if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
+ __free_page(data);
+ return -ENOMEM;
+ }
+
+ sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+ sw_rx_data->page_offset = 0;
+ sw_rx_data->data = data;
+ sw_rx_data->mapping = mapping;
+
+ /* Advance PROD and get BD pointer */
+ rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
+ WARN_ON(!rx_bd);
+ rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+ rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+
+ rxq->sw_rx_prod++;
+
+ return 0;
+}
+
+static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
struct sw_rx_data *curr_cons)
{
/* Move to the next segment in the page */
curr_cons->page_offset += rxq->rx_buf_seg_size;
if (curr_cons->page_offset == PAGE_SIZE) {
- if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+ if (unlikely(qede_alloc_rx_buffer(rxq))) {
/* Since we failed to allocate new buffer
* current buffer can be used again.
*/
@@ -929,22 +1045,21 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
return -ENOMEM;
}
- dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_page(rxq->dev, curr_cons->mapping,
+ PAGE_SIZE, rxq->data_direction);
} else {
/* Increment refcount of the page as we don't want
* network stack to take the ownership of the page
* which can be recycled multiple times by the driver.
*/
page_ref_inc(curr_cons->data);
- qede_reuse_page(edev, rxq, curr_cons);
+ qede_reuse_page(rxq, curr_cons);
}
return 0;
}
-static inline void qede_update_rx_prod(struct qede_dev *edev,
- struct qede_rx_queue *rxq)
+void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
@@ -972,22 +1087,20 @@ static inline void qede_update_rx_prod(struct qede_dev *edev,
mmiowb();
}
-static u32 qede_get_rxhash(struct qede_dev *edev,
- u8 bitfields,
- __le32 rss_hash, enum pkt_hash_types *rxhash_type)
+static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
{
+ enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
enum rss_hash_type htype;
+ u32 hash = 0;
htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
-
- if ((edev->ndev->features & NETIF_F_RXHASH) && htype) {
- *rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
- (htype == RSS_HASH_TYPE_IPV6)) ?
- PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
- return le32_to_cpu(rss_hash);
+ if (htype) {
+ hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
+ (htype == RSS_HASH_TYPE_IPV6)) ?
+ PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
+ hash = le32_to_cpu(rss_hash);
}
- *rxhash_type = PKT_HASH_TYPE_NONE;
- return 0;
+ skb_set_hash(skb, hash, hash_type);
}
static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
@@ -1003,12 +1116,14 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
static inline void qede_skb_receive(struct qede_dev *edev,
struct qede_fastpath *fp,
+ struct qede_rx_queue *rxq,
struct sk_buff *skb, u16 vlan_tag)
{
if (vlan_tag)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
napi_gro_receive(&fp->napi, skb);
+ fp->rxq->rcv_pkts++;
}
static void qede_set_gro_params(struct qede_dev *edev,
@@ -1036,7 +1151,7 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
struct sk_buff *skb = tpa_info->skb;
- if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
+ if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
goto out;
/* Add one frag and update the appropriate fields in the skb */
@@ -1044,7 +1159,7 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
current_bd->data, current_bd->page_offset,
len_on_bd);
- if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
+ if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
/* Incr page ref count to reuse on allocation failure
* so that it doesn't get freed while freeing SKB.
*/
@@ -1062,8 +1177,9 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
return 0;
out:
- tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
- qede_recycle_rx_bd_ring(rxq, edev, 1);
+ tpa_info->state = QEDE_AGG_STATE_ERROR;
+ qede_recycle_rx_bd_ring(rxq, 1);
+
return -ENOMEM;
}
@@ -1074,12 +1190,10 @@ static void qede_tpa_start(struct qede_dev *edev,
struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
- struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
- dma_addr_t mapping = tpa_info->replace_buf_mapping;
+ struct sw_rx_data *replace_buf = &tpa_info->buffer;
+ dma_addr_t mapping = tpa_info->buffer_mapping;
struct sw_rx_data *sw_rx_data_cons;
struct sw_rx_data *sw_rx_data_prod;
- enum pkt_hash_types rxhash_type;
- u32 rxhash;
sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
@@ -1100,11 +1214,11 @@ static void qede_tpa_start(struct qede_dev *edev,
/* move partial skb from cons to pool (don't unmap yet)
* save mapping, incase we drop the packet later on.
*/
- tpa_info->start_buf = *sw_rx_data_cons;
+ tpa_info->buffer = *sw_rx_data_cons;
mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
le32_to_cpu(rx_bd_cons->addr.lo));
- tpa_info->start_buf_mapping = mapping;
+ tpa_info->buffer_mapping = mapping;
rxq->sw_rx_cons++;
/* set tpa state to start only if we are able to allocate skb
@@ -1115,27 +1229,27 @@ static void qede_tpa_start(struct qede_dev *edev,
le16_to_cpu(cqe->len_on_first_bd));
if (unlikely(!tpa_info->skb)) {
DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
- tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+ tpa_info->state = QEDE_AGG_STATE_ERROR;
goto cons_buf;
}
- skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
- memcpy(&tpa_info->start_cqe, cqe, sizeof(tpa_info->start_cqe));
-
/* Start filling in the aggregation info */
+ skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
tpa_info->frag_id = 0;
- tpa_info->agg_state = QEDE_AGG_STATE_START;
+ tpa_info->state = QEDE_AGG_STATE_START;
- rxhash = qede_get_rxhash(edev, cqe->bitfields,
- cqe->rss_hash, &rxhash_type);
- skb_set_hash(tpa_info->skb, rxhash, rxhash_type);
+ /* Store some information from first CQE */
+ tpa_info->start_cqe_placement_offset = cqe->placement_offset;
+ tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd);
if ((le16_to_cpu(cqe->pars_flags.flags) >>
PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
- PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
+ PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
else
tpa_info->vlan_tag = 0;
+ qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
+
/* This is needed in order to enable forwarding support */
qede_set_gro_params(edev, tpa_info->skb, cqe);
@@ -1147,7 +1261,7 @@ cons_buf: /* We still need to handle bd_len_list to consume buffers */
if (unlikely(cqe->ext_bd_len_list[1])) {
DP_ERR(edev,
"Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
- tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+ tpa_info->state = QEDE_AGG_STATE_ERROR;
}
}
@@ -1198,7 +1312,7 @@ static void qede_gro_receive(struct qede_dev *edev,
#ifdef CONFIG_INET
if (skb_shinfo(skb)->gso_size) {
- skb_set_network_header(skb, 0);
+ skb_reset_network_header(skb);
switch (skb->protocol) {
case htons(ETH_P_IP):
@@ -1217,7 +1331,7 @@ static void qede_gro_receive(struct qede_dev *edev,
send_skb:
skb_record_rx_queue(skb, fp->rxq->rxq_id);
- qede_skb_receive(edev, fp, skb, vlan_tag);
+ qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
}
static inline void qede_tpa_cont(struct qede_dev *edev,
@@ -1254,7 +1368,7 @@ static void qede_tpa_end(struct qede_dev *edev,
DP_ERR(edev,
"Strange - TPA emd with more than a single len_list entry\n");
- if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
+ if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
goto err;
/* Sanity */
@@ -1268,14 +1382,9 @@ static void qede_tpa_end(struct qede_dev *edev,
le16_to_cpu(cqe->total_packet_len), skb->len);
memcpy(skb->data,
- page_address(tpa_info->start_buf.data) +
- tpa_info->start_cqe.placement_offset +
- tpa_info->start_buf.page_offset,
- le16_to_cpu(tpa_info->start_cqe.len_on_first_bd));
-
- /* Recycle [mapped] start buffer for the next replacement */
- tpa_info->replace_buf = tpa_info->start_buf;
- tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
+ page_address(tpa_info->buffer.data) +
+ tpa_info->start_cqe_placement_offset +
+ tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len);
/* Finalize the SKB */
skb->protocol = eth_type_trans(skb, edev->ndev);
@@ -1288,18 +1397,11 @@ static void qede_tpa_end(struct qede_dev *edev,
qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
- tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+ tpa_info->state = QEDE_AGG_STATE_NONE;
return;
err:
- /* The BD starting the aggregation is still mapped; Re-use it for
- * future aggregations [as replacement buffer]
- */
- memcpy(&tpa_info->replace_buf, &tpa_info->start_buf,
- sizeof(struct sw_rx_data));
- tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
- tpa_info->start_buf.data = NULL;
- tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+ tpa_info->state = QEDE_AGG_STATE_NONE;
dev_kfree_skb_any(tpa_info->skb);
tpa_info->skb = NULL;
}
@@ -1381,238 +1483,364 @@ static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
return false;
}
-static int qede_rx_int(struct qede_fastpath *fp, int budget)
+/* Return true iff packet is to be passed to stack */
+static bool qede_rx_xdp(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct qede_rx_queue *rxq,
+ struct bpf_prog *prog,
+ struct sw_rx_data *bd,
+ struct eth_fast_path_rx_reg_cqe *cqe)
{
- struct qede_dev *edev = fp->edev;
- struct qede_rx_queue *rxq = fp->rxq;
-
- u16 hw_comp_cons, sw_comp_cons, sw_rx_index, parse_flag;
- int rx_pkt = 0;
- u8 csum_flag;
+ u16 len = le16_to_cpu(cqe->len_on_first_bd);
+ struct xdp_buff xdp;
+ enum xdp_action act;
- hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
- sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+ xdp.data = page_address(bd->data) + cqe->placement_offset;
+ xdp.data_end = xdp.data + len;
- /* Memory barrier to prevent the CPU from doing speculative reads of CQE
- * / BD in the while-loop before reading hw_comp_cons. If the CQE is
- * read before it is written by FW, then FW writes CQE and SB, and then
- * the CPU reads the hw_comp_cons, it will use an old CQE.
+ /* Queues always have a full reset currently, so for the time
+ * being until there's atomic program replace just mark read
+ * side for map helpers.
*/
- rmb();
+ rcu_read_lock();
+ act = bpf_prog_run_xdp(prog, &xdp);
+ rcu_read_unlock();
- /* Loop to complete all indicated BDs */
- while (sw_comp_cons != hw_comp_cons) {
- struct eth_fast_path_rx_reg_cqe *fp_cqe;
- enum pkt_hash_types rxhash_type;
- enum eth_rx_cqe_type cqe_type;
- struct sw_rx_data *sw_rx_data;
- union eth_rx_cqe *cqe;
- struct sk_buff *skb;
- struct page *data;
- __le16 flags;
- u16 len, pad;
- u32 rx_hash;
-
- /* Get the CQE from the completion ring */
- cqe = (union eth_rx_cqe *)
- qed_chain_consume(&rxq->rx_comp_ring);
- cqe_type = cqe->fast_path_regular.type;
-
- if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
- edev->ops->eth_cqe_completion(
- edev->cdev, fp->id,
- (struct eth_slow_path_rx_cqe *)cqe);
- goto next_cqe;
+ if (act == XDP_PASS)
+ return true;
+
+ /* Count number of packets not to be passed to stack */
+ rxq->xdp_no_pass++;
+
+ switch (act) {
+ case XDP_TX:
+ /* We need the replacement buffer before transmit. */
+ if (qede_alloc_rx_buffer(rxq)) {
+ qede_recycle_rx_bd_ring(rxq, 1);
+ return false;
}
- if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
- switch (cqe_type) {
- case ETH_RX_CQE_TYPE_TPA_START:
- qede_tpa_start(edev, rxq,
- &cqe->fast_path_tpa_start);
- goto next_cqe;
- case ETH_RX_CQE_TYPE_TPA_CONT:
- qede_tpa_cont(edev, rxq,
- &cqe->fast_path_tpa_cont);
- goto next_cqe;
- case ETH_RX_CQE_TYPE_TPA_END:
- qede_tpa_end(edev, fp,
- &cqe->fast_path_tpa_end);
- goto next_rx_only;
- default:
- break;
- }
+ /* Now if there's a transmission problem, we'd still have to
+ * throw current buffer, as replacement was already allocated.
+ */
+ if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) {
+ dma_unmap_page(rxq->dev, bd->mapping,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(bd->data);
}
- /* Get the data from the SW ring */
- sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
- sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
- data = sw_rx_data->data;
-
- fp_cqe = &cqe->fast_path_regular;
- len = le16_to_cpu(fp_cqe->len_on_first_bd);
- pad = fp_cqe->placement_offset;
- flags = cqe->fast_path_regular.pars_flags.flags;
-
- /* If this is an error packet then drop it */
- parse_flag = le16_to_cpu(flags);
-
- csum_flag = qede_check_csum(parse_flag);
- if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
- if (qede_pkt_is_ip_fragmented(&cqe->fast_path_regular,
- parse_flag)) {
- rxq->rx_ip_frags++;
- goto alloc_skb;
- }
+ /* Regardless, we've consumed an Rx BD */
+ qede_rx_bd_ring_consume(rxq);
+ return false;
- DP_NOTICE(edev,
- "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
- sw_comp_cons, parse_flag);
- rxq->rx_hw_errors++;
- qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
- goto next_cqe;
- }
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ case XDP_DROP:
+ qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
+ }
-alloc_skb:
- skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
- if (unlikely(!skb)) {
- DP_NOTICE(edev,
- "skb allocation failed, dropping incoming packet\n");
- qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
- rxq->rx_alloc_errors++;
- goto next_cqe;
+ return false;
+}
+
+static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct sw_rx_data *bd, u16 len,
+ u16 pad)
+{
+ unsigned int offset = bd->page_offset;
+ struct skb_frag_struct *frag;
+ struct page *page = bd->data;
+ unsigned int pull_len;
+ struct sk_buff *skb;
+ unsigned char *va;
+
+ /* Allocate a new SKB with a sufficient large header len */
+ skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* Copy data into SKB - if it's small, we can simply copy it and
+ * re-use the already allcoated & mapped memory.
+ */
+ if (len + pad <= edev->rx_copybreak) {
+ memcpy(skb_put(skb, len),
+ page_address(page) + pad + offset, len);
+ qede_reuse_page(rxq, bd);
+ goto out;
+ }
+
+ frag = &skb_shinfo(skb)->frags[0];
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ page, pad + offset, len, rxq->rx_buf_seg_size);
+
+ va = skb_frag_address(frag);
+ pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
+
+ /* Align the pull_len to optimize memcpy */
+ memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
+
+ /* Correct the skb & frag sizes offset after the pull */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+
+ if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
+ /* Incr page ref count to reuse on allocation failure so
+ * that it doesn't get freed while freeing SKB [as its
+ * already mapped there].
+ */
+ page_ref_inc(page);
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+out:
+ /* We've consumed the first BD and prepared an SKB */
+ qede_rx_bd_ring_consume(rxq);
+ return skb;
+}
+
+static int qede_rx_build_jumbo(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct sk_buff *skb,
+ struct eth_fast_path_rx_reg_cqe *cqe,
+ u16 first_bd_len)
+{
+ u16 pkt_len = le16_to_cpu(cqe->pkt_len);
+ struct sw_rx_data *bd;
+ u16 bd_cons_idx;
+ u8 num_frags;
+
+ pkt_len -= first_bd_len;
+
+ /* We've already used one BD for the SKB. Now take care of the rest */
+ for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
+ u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
+ pkt_len;
+
+ if (unlikely(!cur_size)) {
+ DP_ERR(edev,
+ "Still got %d BDs for mapping jumbo, but length became 0\n",
+ num_frags);
+ goto out;
}
- /* Copy data into SKB */
- if (len + pad <= edev->rx_copybreak) {
- memcpy(skb_put(skb, len),
- page_address(data) + pad +
- sw_rx_data->page_offset, len);
- qede_reuse_page(edev, rxq, sw_rx_data);
+ /* We need a replacement buffer for each BD */
+ if (unlikely(qede_alloc_rx_buffer(rxq)))
+ goto out;
+
+ /* Now that we've allocated the replacement buffer,
+ * we can safely consume the next BD and map it to the SKB.
+ */
+ bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ bd = &rxq->sw_rx_ring[bd_cons_idx];
+ qede_rx_bd_ring_consume(rxq);
+
+ dma_unmap_page(rxq->dev, bd->mapping,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
+ bd->data, 0, cur_size);
+
+ skb->truesize += PAGE_SIZE;
+ skb->data_len += cur_size;
+ skb->len += cur_size;
+ pkt_len -= cur_size;
+ }
+
+ if (unlikely(pkt_len))
+ DP_ERR(edev,
+ "Mapped all BDs of jumbo, but still have %d bytes\n",
+ pkt_len);
+
+out:
+ return num_frags;
+}
+
+static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct qede_rx_queue *rxq,
+ union eth_rx_cqe *cqe,
+ enum eth_rx_cqe_type type)
+{
+ switch (type) {
+ case ETH_RX_CQE_TYPE_TPA_START:
+ qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
+ return 0;
+ case ETH_RX_CQE_TYPE_TPA_CONT:
+ qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
+ return 0;
+ case ETH_RX_CQE_TYPE_TPA_END:
+ qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int qede_rx_process_cqe(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct qede_rx_queue *rxq)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
+ struct eth_fast_path_rx_reg_cqe *fp_cqe;
+ u16 len, pad, bd_cons_idx, parse_flag;
+ enum eth_rx_cqe_type cqe_type;
+ union eth_rx_cqe *cqe;
+ struct sw_rx_data *bd;
+ struct sk_buff *skb;
+ __le16 flags;
+ u8 csum_flag;
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+ cqe_type = cqe->fast_path_regular.type;
+
+ /* Process an unlikely slowpath event */
+ if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
+ struct eth_slow_path_rx_cqe *sp_cqe;
+
+ sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
+ edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
+ return 0;
+ }
+
+ /* Handle TPA cqes */
+ if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
+ return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
+
+ /* Get the data from the SW ring; Consume it only after it's evident
+ * we wouldn't recycle it.
+ */
+ bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ bd = &rxq->sw_rx_ring[bd_cons_idx];
+
+ fp_cqe = &cqe->fast_path_regular;
+ len = le16_to_cpu(fp_cqe->len_on_first_bd);
+ pad = fp_cqe->placement_offset;
+
+ /* Run eBPF program if one is attached */
+ if (xdp_prog)
+ if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe))
+ return 1;
+
+ /* If this is an error packet then drop it */
+ flags = cqe->fast_path_regular.pars_flags.flags;
+ parse_flag = le16_to_cpu(flags);
+
+ csum_flag = qede_check_csum(parse_flag);
+ if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
+ if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) {
+ rxq->rx_ip_frags++;
} else {
- struct skb_frag_struct *frag;
- unsigned int pull_len;
- unsigned char *va;
-
- frag = &skb_shinfo(skb)->frags[0];
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, data,
- pad + sw_rx_data->page_offset,
- len, rxq->rx_buf_seg_size);
-
- va = skb_frag_address(frag);
- pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
-
- /* Align the pull_len to optimize memcpy */
- memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
-
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-
- if (unlikely(qede_realloc_rx_buffer(edev, rxq,
- sw_rx_data))) {
- DP_ERR(edev, "Failed to allocate rx buffer\n");
- /* Incr page ref count to reuse on allocation
- * failure so that it doesn't get freed while
- * freeing SKB.
- */
-
- page_ref_inc(sw_rx_data->data);
- rxq->rx_alloc_errors++;
- qede_recycle_rx_bd_ring(rxq, edev,
- fp_cqe->bd_num);
- dev_kfree_skb_any(skb);
- goto next_cqe;
- }
+ DP_NOTICE(edev,
+ "CQE has error, flags = %x, dropping incoming packet\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
+ return 0;
}
+ }
- qede_rx_bd_ring_consume(rxq);
+ /* Basic validation passed; Need to prepare an SKB. This would also
+ * guarantee to finally consume the first BD upon success.
+ */
+ skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad);
+ if (!skb) {
+ rxq->rx_alloc_errors++;
+ qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
+ return 0;
+ }
- if (fp_cqe->bd_num != 1) {
- u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
- u8 num_frags;
-
- pkt_len -= len;
-
- for (num_frags = fp_cqe->bd_num - 1; num_frags > 0;
- num_frags--) {
- u16 cur_size = pkt_len > rxq->rx_buf_size ?
- rxq->rx_buf_size : pkt_len;
- if (unlikely(!cur_size)) {
- DP_ERR(edev,
- "Still got %d BDs for mapping jumbo, but length became 0\n",
- num_frags);
- qede_recycle_rx_bd_ring(rxq, edev,
- num_frags);
- dev_kfree_skb_any(skb);
- goto next_cqe;
- }
-
- if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
- qede_recycle_rx_bd_ring(rxq, edev,
- num_frags);
- dev_kfree_skb_any(skb);
- goto next_cqe;
- }
-
- sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
- sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
- qede_rx_bd_ring_consume(rxq);
-
- dma_unmap_page(&edev->pdev->dev,
- sw_rx_data->mapping,
- PAGE_SIZE, DMA_FROM_DEVICE);
-
- skb_fill_page_desc(skb,
- skb_shinfo(skb)->nr_frags++,
- sw_rx_data->data, 0,
- cur_size);
-
- skb->truesize += PAGE_SIZE;
- skb->data_len += cur_size;
- skb->len += cur_size;
- pkt_len -= cur_size;
- }
+ /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed
+ * by a single cqe.
+ */
+ if (fp_cqe->bd_num > 1) {
+ u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
+ fp_cqe, len);
- if (unlikely(pkt_len))
- DP_ERR(edev,
- "Mapped all BDs of jumbo, but still have %d bytes\n",
- pkt_len);
+ if (unlikely(unmapped_frags > 0)) {
+ qede_recycle_rx_bd_ring(rxq, unmapped_frags);
+ dev_kfree_skb_any(skb);
+ return 0;
}
+ }
- skb->protocol = eth_type_trans(skb, edev->ndev);
+ /* The SKB contains all the data. Now prepare meta-magic */
+ skb->protocol = eth_type_trans(skb, edev->ndev);
+ qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
+ qede_set_skb_csum(skb, csum_flag);
+ skb_record_rx_queue(skb, rxq->rxq_id);
- rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
- fp_cqe->rss_hash, &rxhash_type);
+ /* SKB is prepared - pass it to stack */
+ qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
- skb_set_hash(skb, rx_hash, rxhash_type);
+ return 1;
+}
- qede_set_skb_csum(skb, csum_flag);
+static int qede_rx_int(struct qede_fastpath *fp, int budget)
+{
+ struct qede_rx_queue *rxq = fp->rxq;
+ struct qede_dev *edev = fp->edev;
+ u16 hw_comp_cons, sw_comp_cons;
+ int work_done = 0;
- skb_record_rx_queue(skb, fp->rxq->rxq_id);
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
- qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
-next_rx_only:
- rx_pkt++;
+ /* Memory barrier to prevent the CPU from doing speculative reads of CQE
+ * / BD in the while-loop before reading hw_comp_cons. If the CQE is
+ * read before it is written by FW, then FW writes CQE and SB, and then
+ * the CPU reads the hw_comp_cons, it will use an old CQE.
+ */
+ rmb();
-next_cqe: /* don't consume bd rx buffer */
+ /* Loop to complete all indicated BDs */
+ while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
+ qede_rx_process_cqe(edev, fp, rxq);
qed_chain_recycle_consumed(&rxq->rx_comp_ring);
sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
- /* CR TPA - revisit how to handle budget in TPA perhaps
- * increase on "end"
- */
- if (rx_pkt == budget)
- break;
- } /* repeat while sw_comp_cons != hw_comp_cons... */
+ work_done++;
+ }
/* Update producers */
qede_update_rx_prod(edev, rxq);
- rxq->rcv_pkts += rx_pkt;
+ return work_done;
+}
+
+static bool qede_poll_is_more_work(struct qede_fastpath *fp)
+{
+ qed_sb_update_sb_idx(fp->sb_info);
+
+ /* *_has_*_work() reads the status block, thus we need to ensure that
+ * status block indices have been actually read (qed_sb_update_sb_idx)
+ * prior to this check (*_has_*_work) so that we won't write the
+ * "newer" value of the status block to HW (if there was a DMA right
+ * after qede_has_rx_work and if there is no rmb, the memory reading
+ * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb).
+ * In this case there will never be another interrupt until there is
+ * another update of the status block, while there is still unhandled
+ * work.
+ */
+ rmb();
+
+ if (likely(fp->type & QEDE_FASTPATH_RX))
+ if (qede_has_rx_work(fp->rxq))
+ return true;
- return rx_pkt;
+ if (fp->type & QEDE_FASTPATH_XDP)
+ if (qede_txq_has_work(fp->xdp_tx))
+ return true;
+
+ if (likely(fp->type & QEDE_FASTPATH_TX))
+ if (qede_txq_has_work(fp->txq))
+ return true;
+
+ return false;
}
static int qede_poll(struct napi_struct *napi, int budget)
@@ -1621,48 +1849,35 @@ static int qede_poll(struct napi_struct *napi, int budget)
napi);
struct qede_dev *edev = fp->edev;
int rx_work_done = 0;
- u8 tc;
- for (tc = 0; tc < edev->num_tc; tc++)
- if (likely(fp->type & QEDE_FASTPATH_TX) &&
- qede_txq_has_work(&fp->txqs[tc]))
- qede_tx_int(edev, &fp->txqs[tc]);
+ if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq))
+ qede_tx_int(edev, fp->txq);
+
+ if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
+ qede_xdp_tx_int(edev, fp->xdp_tx);
rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
qede_has_rx_work(fp->rxq)) ?
qede_rx_int(fp, budget) : 0;
if (rx_work_done < budget) {
- qed_sb_update_sb_idx(fp->sb_info);
- /* *_has_*_work() reads the status block,
- * thus we need to ensure that status block indices
- * have been actually read (qed_sb_update_sb_idx)
- * prior to this check (*_has_*_work) so that
- * we won't write the "newer" value of the status block
- * to HW (if there was a DMA right after
- * qede_has_rx_work and if there is no rmb, the memory
- * reading (qed_sb_update_sb_idx) may be postponed
- * to right before *_ack_sb). In this case there
- * will never be another interrupt until there is
- * another update of the status block, while there
- * is still unhandled work.
- */
- rmb();
-
- /* Fall out from the NAPI loop if needed */
- if (!((likely(fp->type & QEDE_FASTPATH_RX) &&
- qede_has_rx_work(fp->rxq)) ||
- (likely(fp->type & QEDE_FASTPATH_TX) &&
- qede_has_tx_work(fp)))) {
+ if (!qede_poll_is_more_work(fp)) {
napi_complete(napi);
/* Update and reenable interrupts */
- qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
- 1 /*update*/);
+ qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
} else {
rx_work_done = budget;
}
}
+ if (fp->xdp_xmit) {
+ u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
+
+ fp->xdp_xmit = 0;
+ fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
+ qede_update_tx_producer(fp->xdp_tx);
+ }
+
return rx_work_done;
}
@@ -1913,7 +2128,7 @@ static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct qede_dev *edev = netdev_priv(dev);
struct qede_vlan *vlan, *tmp;
- int rc;
+ int rc = 0;
DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
@@ -1937,6 +2152,7 @@ static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
}
/* If interface is down, cache this VLAN ID and return */
+ __qede_lock(edev);
if (edev->state != QEDE_STATE_OPEN) {
DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
"Interface is down, VLAN %d will be configured when interface is up\n",
@@ -1944,8 +2160,7 @@ static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
if (vid != 0)
edev->non_configured_vlans++;
list_add(&vlan->list, &edev->vlan_list);
-
- return 0;
+ goto out;
}
/* Check for the filter limit.
@@ -1961,7 +2176,7 @@ static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
DP_ERR(edev, "Failed to configure VLAN %d\n",
vlan->vid);
kfree(vlan);
- return -EINVAL;
+ goto out;
}
vlan->configured = true;
@@ -1978,7 +2193,9 @@ static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
list_add(&vlan->list, &edev->vlan_list);
- return 0;
+out:
+ __qede_unlock(edev);
+ return rc;
}
static void qede_del_vlan_from_list(struct qede_dev *edev,
@@ -2055,11 +2272,12 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct qede_dev *edev = netdev_priv(dev);
struct qede_vlan *vlan = NULL;
- int rc;
+ int rc = 0;
DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
/* Find whether entry exists */
+ __qede_lock(edev);
list_for_each_entry(vlan, &edev->vlan_list, list)
if (vlan->vid == vid)
break;
@@ -2067,7 +2285,7 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
if (!vlan || (vlan->vid != vid)) {
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
"Vlan isn't configured\n");
- return 0;
+ goto out;
}
if (edev->state != QEDE_STATE_OPEN) {
@@ -2077,7 +2295,7 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
"Interface is down, removing VLAN from list only\n");
qede_del_vlan_from_list(edev, vlan);
- return 0;
+ goto out;
}
/* Remove vlan */
@@ -2086,7 +2304,7 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
vid);
if (rc) {
DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
- return -EINVAL;
+ goto out;
}
}
@@ -2097,6 +2315,8 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
*/
rc = qede_configure_vlan_filters(edev);
+out:
+ __qede_unlock(edev);
return rc;
}
@@ -2126,7 +2346,13 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
edev->accept_any_vlan = false;
}
-static int qede_set_features(struct net_device *dev, netdev_features_t features)
+static void qede_set_features_reload(struct qede_dev *edev,
+ struct qede_reload_args *args)
+{
+ edev->ndev->features = args->u.features;
+}
+
+int qede_set_features(struct net_device *dev, netdev_features_t features)
{
struct qede_dev *edev = netdev_priv(dev);
netdev_features_t changes = features ^ dev->features;
@@ -2140,9 +2366,23 @@ static int qede_set_features(struct net_device *dev, netdev_features_t features)
need_reload = edev->gro_disable;
}
- if (need_reload && netif_running(edev->ndev)) {
- dev->features = features;
- qede_reload(edev, NULL, NULL);
+ if (need_reload) {
+ struct qede_reload_args args;
+
+ args.u.features = features;
+ args.func = &qede_set_features_reload;
+
+ /* Make sure that we definitely need to reload.
+ * In case of an eBPF attached program, there will be no FW
+ * aggregations, so no need to actually reload.
+ */
+ __qede_lock(edev);
+ if (edev->xdp_prog)
+ args.func(edev, &args);
+ else
+ qede_reload(edev, &args, true);
+ __qede_unlock(edev);
+
return 1;
}
@@ -2219,6 +2459,82 @@ static void qede_udp_tunnel_del(struct net_device *dev,
schedule_delayed_work(&edev->sp_task, 0);
}
+/* 8B udp header + 8B base tunnel header + 32B option length */
+#define QEDE_MAX_TUN_HDR_LEN 48
+
+static netdev_features_t qede_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ if (skb->encapsulation) {
+ u8 l4_proto = 0;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_proto = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return features;
+ }
+
+ /* Disable offloads for geneve tunnels, as HW can't parse
+ * the geneve header which has option length greater than 32B.
+ */
+ if ((l4_proto == IPPROTO_UDP) &&
+ ((skb_inner_mac_header(skb) -
+ skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN))
+ return features & ~(NETIF_F_CSUM_MASK |
+ NETIF_F_GSO_MASK);
+ }
+
+ return features;
+}
+
+static void qede_xdp_reload_func(struct qede_dev *edev,
+ struct qede_reload_args *args)
+{
+ struct bpf_prog *old;
+
+ old = xchg(&edev->xdp_prog, args->u.new_prog);
+ if (old)
+ bpf_prog_put(old);
+}
+
+static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
+{
+ struct qede_reload_args args;
+
+ if (prog && prog->xdp_adjust_head) {
+ DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* If we're called, there was already a bpf reference increment */
+ args.func = &qede_xdp_reload_func;
+ args.u.new_prog = prog;
+ qede_reload(edev, &args, false);
+
+ return 0;
+}
+
+static int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return qede_xdp_set(edev, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = !!edev->xdp_prog;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
@@ -2243,6 +2559,8 @@ static const struct net_device_ops qede_netdev_ops = {
#endif
.ndo_udp_tunnel_add = qede_udp_tunnel_add,
.ndo_udp_tunnel_del = qede_udp_tunnel_del,
+ .ndo_features_check = qede_features_check,
+ .ndo_xdp = qede_xdp,
};
/* -------------------------------------------------------------------------
@@ -2283,8 +2601,6 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
memset(&edev->stats, 0, sizeof(edev->stats));
memcpy(&edev->dev_info, info, sizeof(*info));
- edev->num_tc = edev->dev_info.num_tc;
-
INIT_LIST_HEAD(&edev->vlan_list);
return edev;
@@ -2309,6 +2625,8 @@ static void qede_init_ndev(struct qede_dev *edev)
qede_set_ethtool_ops(ndev);
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+
/* user-changeble features */
hw_features = NETIF_F_GRO | NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -2316,11 +2634,14 @@ static void qede_init_ndev(struct qede_dev *edev)
/* Encap features*/
hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_TSO_ECN;
+ NETIF_F_TSO_ECN | NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM;
ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
NETIF_F_TSO6 | NETIF_F_GSO_GRE |
- NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM;
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM;
ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
NETIF_F_HIGHDMA;
@@ -2330,8 +2651,14 @@ static void qede_init_ndev(struct qede_dev *edev)
ndev->hw_features = hw_features;
+ /* MTU range: 46 - 9600 */
+ ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
+ ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
+
/* Set network device HW mac */
ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
+
+ ndev->mtu = edev->dev_info.common.mtu;
}
/* This function converts from 32b param to two params of level and module
@@ -2371,7 +2698,8 @@ static void qede_free_fp_array(struct qede_dev *edev)
kfree(fp->sb_info);
kfree(fp->rxq);
- kfree(fp->txqs);
+ kfree(fp->xdp_tx);
+ kfree(fp->txq);
}
kfree(edev->fp_array);
}
@@ -2404,7 +2732,7 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
for_each_queue(i) {
fp = &edev->fp_array[i];
- fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
+ fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
if (!fp->sb_info) {
DP_NOTICE(edev, "sb info struct allocation failed\n");
goto err;
@@ -2421,21 +2749,22 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
}
if (fp->type & QEDE_FASTPATH_TX) {
- fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs),
- GFP_KERNEL);
- if (!fp->txqs) {
- DP_NOTICE(edev,
- "TXQ array allocation failed\n");
+ fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL);
+ if (!fp->txq)
goto err;
- }
}
if (fp->type & QEDE_FASTPATH_RX) {
- fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
- if (!fp->rxq) {
- DP_NOTICE(edev,
- "RXQ struct allocation failed\n");
+ fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
+ if (!fp->rxq)
goto err;
+
+ if (edev->xdp_prog) {
+ fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
+ GFP_KERNEL);
+ if (!fp->xdp_tx)
+ goto err;
+ fp->type |= QEDE_FASTPATH_XDP;
}
}
}
@@ -2452,12 +2781,11 @@ static void qede_sp_task(struct work_struct *work)
sp_task.work);
struct qed_dev *cdev = edev->cdev;
- mutex_lock(&edev->qede_lock);
+ __qede_lock(edev);
- if (edev->state == QEDE_STATE_OPEN) {
- if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
+ if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
+ if (edev->state == QEDE_STATE_OPEN)
qede_config_rx_mode(edev->ndev);
- }
if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
struct qed_tunn_params tunn_params;
@@ -2477,16 +2805,16 @@ static void qede_sp_task(struct work_struct *work)
qed_ops->tunn_config(cdev, &tunn_params);
}
- mutex_unlock(&edev->qede_lock);
+ __qede_unlock(edev);
}
static void qede_update_pf_params(struct qed_dev *cdev)
{
struct qed_pf_params pf_params;
- /* 64 rx + 64 tx */
+ /* 64 rx + 64 tx + 64 XDP */
memset(&pf_params, 0, sizeof(struct qed_pf_params));
- pf_params.eth_pf_params.num_cons = 128;
+ pf_params.eth_pf_params.num_cons = 192;
qed_ops->common->update_pf_params(cdev, &pf_params);
}
@@ -2635,10 +2963,16 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
pci_set_drvdata(pdev, NULL);
+ /* Release edev's reference to XDP's bpf if such exist */
+ if (edev->xdp_prog)
+ bpf_prog_put(edev->xdp_prog);
+
free_netdev(ndev);
/* Use global ops since we've freed edev */
qed_ops->common->slowpath_stop(cdev);
+ if (system_state == SYSTEM_POWER_OFF)
+ return;
qed_ops->common->remove(cdev);
dev_info(&pdev->dev, "Ending qede_remove successfully\n");
@@ -2649,6 +2983,11 @@ static void qede_remove(struct pci_dev *pdev)
__qede_remove(pdev, QEDE_REMOVE_NORMAL);
}
+static void qede_shutdown(struct pci_dev *pdev)
+{
+ __qede_remove(pdev, QEDE_REMOVE_NORMAL);
+}
+
/* -------------------------------------------------------------------------
* START OF LOAD / UNLOAD
* -------------------------------------------------------------------------
@@ -2732,7 +3071,7 @@ static void qede_free_rx_buffers(struct qede_dev *edev,
data = rx_buf->data;
dma_unmap_page(&edev->pdev->dev,
- rx_buf->mapping, PAGE_SIZE, DMA_FROM_DEVICE);
+ rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
rx_buf->data = NULL;
__free_page(data);
@@ -2748,7 +3087,7 @@ static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
- struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
+ struct sw_rx_data *replace_buf = &tpa_info->buffer;
if (replace_buf->data) {
dma_unmap_page(&edev->pdev->dev,
@@ -2774,52 +3113,15 @@ static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
}
-static int qede_alloc_rx_buffer(struct qede_dev *edev,
- struct qede_rx_queue *rxq)
-{
- struct sw_rx_data *sw_rx_data;
- struct eth_rx_bd *rx_bd;
- dma_addr_t mapping;
- struct page *data;
-
- data = alloc_pages(GFP_ATOMIC, 0);
- if (unlikely(!data)) {
- DP_NOTICE(edev, "Failed to allocate Rx data [page]\n");
- return -ENOMEM;
- }
-
- /* Map the entire page as it would be used
- * for multiple RX buffer segment size mapping.
- */
- mapping = dma_map_page(&edev->pdev->dev, data, 0,
- PAGE_SIZE, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
- __free_page(data);
- DP_NOTICE(edev, "Failed to map Rx buffer\n");
- return -ENOMEM;
- }
-
- sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
- sw_rx_data->page_offset = 0;
- sw_rx_data->data = data;
- sw_rx_data->mapping = mapping;
-
- /* Advance PROD and get BD pointer */
- rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
- WARN_ON(!rx_bd);
- rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
- rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
-
- rxq->sw_rx_prod++;
-
- return 0;
-}
-
static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
dma_addr_t mapping;
int i;
+ /* Don't perform FW aggregations in case of XDP */
+ if (edev->xdp_prog)
+ edev->gro_disable = 1;
+
if (edev->gro_disable)
return 0;
@@ -2830,7 +3132,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
- struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
+ struct sw_rx_data *replace_buf = &tpa_info->buffer;
replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
if (unlikely(!replace_buf->data)) {
@@ -2840,7 +3142,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
}
mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
- rxq->rx_buf_size, DMA_FROM_DEVICE);
+ PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
DP_NOTICE(edev,
"Failed to map TPA replacement buffer\n");
@@ -2848,10 +3150,9 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
}
replace_buf->mapping = mapping;
- tpa_info->replace_buf.page_offset = 0;
-
- tpa_info->replace_buf_mapping = mapping;
- tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+ tpa_info->buffer.page_offset = 0;
+ tpa_info->buffer_mapping = mapping;
+ tpa_info->state = QEDE_AGG_STATE_NONE;
}
return 0;
@@ -2873,8 +3174,13 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
if (rxq->rx_buf_size > PAGE_SIZE)
rxq->rx_buf_size = PAGE_SIZE;
- /* Segment size to spilt a page in multiple equal parts */
- rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
+ /* Segment size to spilt a page in multiple equal parts,
+ * unless XDP is used in which case we'd use the entire page.
+ */
+ if (!edev->xdp_prog)
+ rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
+ else
+ rxq->rx_buf_seg_size = PAGE_SIZE;
/* Allocate the parallel driver ring for Rx buffers */
size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
@@ -2910,7 +3216,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
/* Allocate buffers for the Rx ring */
for (i = 0; i < rxq->num_rx_buffers; i++) {
- rc = qede_alloc_rx_buffer(edev, rxq);
+ rc = qede_alloc_rx_buffer(rxq);
if (rc) {
DP_ERR(edev,
"Rx buffers allocation failed at index %d\n", i);
@@ -2926,7 +3232,10 @@ err:
static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
{
/* Free the parallel SW ring */
- kfree(txq->sw_tx_ring);
+ if (txq->is_xdp)
+ kfree(txq->sw_tx_ring.pages);
+ else
+ kfree(txq->sw_tx_ring.skbs);
/* Free the real RQ ring used by FW */
edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
@@ -2935,24 +3244,29 @@ static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
/* This function allocates all memory needed per Tx queue */
static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
{
- int size, rc;
union eth_tx_bd_types *p_virt;
+ int size, rc;
txq->num_tx_buffers = edev->q_num_tx_buffers;
/* Allocate the parallel driver ring for Tx buffers */
- size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX;
- txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
- if (!txq->sw_tx_ring) {
- DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
- goto err;
+ if (txq->is_xdp) {
+ size = sizeof(*txq->sw_tx_ring.pages) * TX_RING_SIZE;
+ txq->sw_tx_ring.pages = kzalloc(size, GFP_KERNEL);
+ if (!txq->sw_tx_ring.pages)
+ goto err;
+ } else {
+ size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE;
+ txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
+ if (!txq->sw_tx_ring.skbs)
+ goto err;
}
rc = edev->ops->common->chain_alloc(edev->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
- NUM_TX_BDS_MAX,
+ TX_RING_SIZE,
sizeof(*p_virt), &txq->tx_pbl);
if (rc)
goto err;
@@ -2967,16 +3281,13 @@ err:
/* This function frees all memory of a single fp */
static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
{
- int tc;
-
qede_free_mem_sb(edev, fp->sb_info);
if (fp->type & QEDE_FASTPATH_RX)
qede_free_mem_rxq(edev, fp->rxq);
if (fp->type & QEDE_FASTPATH_TX)
- for (tc = 0; tc < edev->num_tc; tc++)
- qede_free_mem_txq(edev, &fp->txqs[tc]);
+ qede_free_mem_txq(edev, fp->txq);
}
/* This function allocates all memory needed for a single fp (i.e. an entity
@@ -2984,28 +3295,31 @@ static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
*/
static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
{
- int rc, tc;
+ int rc = 0;
rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
if (rc)
- goto err;
+ goto out;
if (fp->type & QEDE_FASTPATH_RX) {
rc = qede_alloc_mem_rxq(edev, fp->rxq);
if (rc)
- goto err;
+ goto out;
+ }
+
+ if (fp->type & QEDE_FASTPATH_XDP) {
+ rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
+ if (rc)
+ goto out;
}
if (fp->type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < edev->num_tc; tc++) {
- rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
- if (rc)
- goto err;
- }
+ rc = qede_alloc_mem_txq(edev, fp->txq);
+ if (rc)
+ goto out;
}
- return 0;
-err:
+out:
return rc;
}
@@ -3044,7 +3358,7 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
/* This function inits fp content and resets the SB, RXQ and TXQ structures */
static void qede_init_fp(struct qede_dev *edev)
{
- int queue_id, rxq_index = 0, txq_index = 0, tc;
+ int queue_id, rxq_index = 0, txq_index = 0;
struct qede_fastpath *fp;
for_each_queue(queue_id) {
@@ -3053,25 +3367,28 @@ static void qede_init_fp(struct qede_dev *edev)
fp->edev = edev;
fp->id = queue_id;
- memset((void *)&fp->napi, 0, sizeof(fp->napi));
-
- memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
+ if (fp->type & QEDE_FASTPATH_XDP) {
+ fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
+ rxq_index);
+ fp->xdp_tx->is_xdp = 1;
+ }
if (fp->type & QEDE_FASTPATH_RX) {
- memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
fp->rxq->rxq_id = rxq_index++;
+
+ /* Determine how to map buffers for this queue */
+ if (fp->type & QEDE_FASTPATH_XDP)
+ fp->rxq->data_direction = DMA_BIDIRECTIONAL;
+ else
+ fp->rxq->data_direction = DMA_FROM_DEVICE;
+ fp->rxq->dev = &edev->pdev->dev;
}
if (fp->type & QEDE_FASTPATH_TX) {
- memset((void *)fp->txqs, 0,
- (edev->num_tc * sizeof(*fp->txqs)));
- for (tc = 0; tc < edev->num_tc; tc++) {
- fp->txqs[tc].index = txq_index +
- tc * QEDE_TSS_COUNT(edev);
- if (edev->dev_info.is_legacy)
- fp->txqs[tc].is_legacy = true;
- }
- txq_index++;
+ fp->txq->index = txq_index++;
+ if (edev->dev_info.is_legacy)
+ fp->txq->is_legacy = 1;
+ fp->txq->dev = &edev->pdev->dev;
}
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
@@ -3239,11 +3556,18 @@ static int qede_drain_txq(struct qede_dev *edev,
return 0;
}
+static int qede_stop_txq(struct qede_dev *edev,
+ struct qede_tx_queue *txq, int rss_id)
+{
+ return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
+}
+
static int qede_stop_queues(struct qede_dev *edev)
{
struct qed_update_vport_params vport_update_params;
struct qed_dev *cdev = edev->cdev;
- int rc, tc, i;
+ struct qede_fastpath *fp;
+ int rc, i;
/* Disable the vport */
memset(&vport_update_params, 0, sizeof(vport_update_params));
@@ -3260,53 +3584,49 @@ static int qede_stop_queues(struct qede_dev *edev)
/* Flush Tx queues. If needed, request drain from MCP */
for_each_queue(i) {
- struct qede_fastpath *fp = &edev->fp_array[i];
+ fp = &edev->fp_array[i];
if (fp->type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < edev->num_tc; tc++) {
- struct qede_tx_queue *txq = &fp->txqs[tc];
+ rc = qede_drain_txq(edev, fp->txq, true);
+ if (rc)
+ return rc;
+ }
- rc = qede_drain_txq(edev, txq, true);
- if (rc)
- return rc;
- }
+ if (fp->type & QEDE_FASTPATH_XDP) {
+ rc = qede_drain_txq(edev, fp->xdp_tx, true);
+ if (rc)
+ return rc;
}
}
/* Stop all Queues in reverse order */
for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
- struct qed_stop_rxq_params rx_params;
+ fp = &edev->fp_array[i];
/* Stop the Tx Queue(s) */
- if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < edev->num_tc; tc++) {
- struct qed_stop_txq_params tx_params;
- u8 val;
-
- tx_params.rss_id = i;
- val = edev->fp_array[i].txqs[tc].index;
- tx_params.tx_queue_id = val;
- rc = edev->ops->q_tx_stop(cdev, &tx_params);
- if (rc) {
- DP_ERR(edev, "Failed to stop TXQ #%d\n",
- tx_params.tx_queue_id);
- return rc;
- }
- }
+ if (fp->type & QEDE_FASTPATH_TX) {
+ rc = qede_stop_txq(edev, fp->txq, i);
+ if (rc)
+ return rc;
}
/* Stop the Rx Queue */
- if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
- memset(&rx_params, 0, sizeof(rx_params));
- rx_params.rss_id = i;
- rx_params.rx_queue_id = edev->fp_array[i].rxq->rxq_id;
-
- rc = edev->ops->q_rx_stop(cdev, &rx_params);
+ if (fp->type & QEDE_FASTPATH_RX) {
+ rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
if (rc) {
DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
return rc;
}
}
+
+ /* Stop the XDP forwarding queue */
+ if (fp->type & QEDE_FASTPATH_XDP) {
+ rc = qede_stop_txq(edev, fp->xdp_tx, i);
+ if (rc)
+ return rc;
+
+ bpf_prog_put(fp->rxq->xdp_prog);
+ }
}
/* Stop the vport */
@@ -3317,9 +3637,55 @@ static int qede_stop_queues(struct qede_dev *edev)
return rc;
}
+static int qede_start_txq(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
+{
+ dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
+ u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
+ struct qed_queue_start_common_params params;
+ struct qed_txq_start_ret_params ret_params;
+ int rc;
+
+ memset(&params, 0, sizeof(params));
+ memset(&ret_params, 0, sizeof(ret_params));
+
+ /* Let the XDP queue share the queue-zone with one of the regular txq.
+ * We don't really care about its coalescing.
+ */
+ if (txq->is_xdp)
+ params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
+ else
+ params.queue_id = txq->index;
+
+ params.sb = fp->sb_info->igu_sb_id;
+ params.sb_idx = sb_idx;
+
+ rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
+ page_cnt, &ret_params);
+ if (rc) {
+ DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
+ return rc;
+ }
+
+ txq->doorbell_addr = ret_params.p_doorbell;
+ txq->handle = ret_params.p_handle;
+
+ /* Determine the FW consumer address associated */
+ txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
+
+ /* Prepare the doorbell parameters */
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_ETH_TX_BD_PROD_CMD);
+ txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+
+ return rc;
+}
+
static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
{
- int rc, tc, i;
int vlan_removal_en = 1;
struct qed_dev *cdev = edev->cdev;
struct qed_update_vport_params vport_update_params;
@@ -3327,6 +3693,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
struct qed_dev_info *qed_info = &edev->dev_info.common;
struct qed_start_vport_params start = {0};
bool reset_rss_indir = false;
+ int rc, i;
if (!edev->num_queues) {
DP_ERR(edev,
@@ -3358,11 +3725,12 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
u32 page_cnt;
if (fp->type & QEDE_FASTPATH_RX) {
+ struct qed_rxq_start_ret_params ret_params;
struct qede_rx_queue *rxq = fp->rxq;
__le16 *val;
+ memset(&ret_params, 0, sizeof(ret_params));
memset(&q_params, 0, sizeof(q_params));
- q_params.rss_id = i;
q_params.queue_id = rxq->rxq_id;
q_params.vport_id = 0;
q_params.sb = fp->sb_info->igu_sb_id;
@@ -3372,60 +3740,44 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
- rc = edev->ops->q_rx_start(cdev, &q_params,
+ rc = edev->ops->q_rx_start(cdev, i, &q_params,
rxq->rx_buf_size,
rxq->rx_bd_ring.p_phys_addr,
p_phys_table,
- page_cnt,
- &rxq->hw_rxq_prod_addr);
+ page_cnt, &ret_params);
if (rc) {
DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
rc);
return rc;
}
+ /* Use the return parameters */
+ rxq->hw_rxq_prod_addr = ret_params.p_prod;
+ rxq->handle = ret_params.p_handle;
+
val = &fp->sb_info->sb_virt->pi_array[RX_PI];
rxq->hw_cons_ptr = val;
qede_update_rx_prod(edev, rxq);
}
- if (!(fp->type & QEDE_FASTPATH_TX))
- continue;
-
- for (tc = 0; tc < edev->num_tc; tc++) {
- struct qede_tx_queue *txq = &fp->txqs[tc];
-
- p_phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
- page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
-
- memset(&q_params, 0, sizeof(q_params));
- q_params.rss_id = i;
- q_params.queue_id = txq->index;
- q_params.vport_id = 0;
- q_params.sb = fp->sb_info->igu_sb_id;
- q_params.sb_idx = TX_PI(tc);
+ if (fp->type & QEDE_FASTPATH_XDP) {
+ rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
+ if (rc)
+ return rc;
- rc = edev->ops->q_tx_start(cdev, &q_params,
- p_phys_table, page_cnt,
- &txq->doorbell_addr);
- if (rc) {
- DP_ERR(edev, "Start TXQ #%d failed %d\n",
- txq->index, rc);
+ fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
+ if (IS_ERR(fp->rxq->xdp_prog)) {
+ rc = PTR_ERR(fp->rxq->xdp_prog);
+ fp->rxq->xdp_prog = NULL;
return rc;
}
+ }
- txq->hw_cons_ptr =
- &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
- SET_FIELD(txq->tx_db.data.params,
- ETH_DB_DATA_DEST, DB_DEST_XCM);
- SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
- DB_AGG_CMD_SET);
- SET_FIELD(txq->tx_db.data.params,
- ETH_DB_DATA_AGG_VAL_SEL,
- DQ_XCM_ETH_TX_BD_PROD_CMD);
-
- txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+ if (fp->type & QEDE_FASTPATH_TX) {
+ rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
+ if (rc)
+ return rc;
}
}
@@ -3520,15 +3872,18 @@ enum qede_unload_mode {
QEDE_UNLOAD_NORMAL,
};
-static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
+static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
+ bool is_locked)
{
struct qed_link_params link_params;
int rc;
DP_INFO(edev, "Starting qede unload\n");
+ if (!is_locked)
+ __qede_lock(edev);
+
qede_roce_dev_event_close(edev);
- mutex_lock(&edev->qede_lock);
edev->state = QEDE_STATE_CLOSED;
/* Close OS Tx */
@@ -3560,7 +3915,8 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
qede_free_fp_array(edev);
out:
- mutex_unlock(&edev->qede_lock);
+ if (!is_locked)
+ __qede_unlock(edev);
DP_INFO(edev, "Ending qede unload\n");
}
@@ -3569,7 +3925,8 @@ enum qede_load_mode {
QEDE_LOAD_RELOAD,
};
-static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
+static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
+ bool is_locked)
{
struct qed_link_params link_params;
struct qed_link_output link_output;
@@ -3577,21 +3934,24 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
DP_INFO(edev, "Starting qede load\n");
+ if (!is_locked)
+ __qede_lock(edev);
+
rc = qede_set_num_queues(edev);
if (rc)
- goto err0;
+ goto out;
rc = qede_alloc_fp_array(edev);
if (rc)
- goto err0;
+ goto out;
qede_init_fp(edev);
rc = qede_alloc_mem_load(edev);
if (rc)
goto err1;
- DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
- QEDE_QUEUE_CNT(edev), edev->num_tc);
+ DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
+ QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
rc = qede_set_real_num_queues(edev);
if (rc)
@@ -3613,10 +3973,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
/* Add primary mac and set Rx filters */
ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
- mutex_lock(&edev->qede_lock);
- edev->state = QEDE_STATE_OPEN;
- mutex_unlock(&edev->qede_lock);
-
/* Program un-configured VLANs */
qede_configure_vlan_filters(edev);
@@ -3631,10 +3987,12 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
qede_roce_dev_event_open(edev);
qede_link_update(edev, &link_output);
+ edev->state = QEDE_STATE_OPEN;
+
DP_INFO(edev, "Ending successfully qede load\n");
- return 0;
+ goto out;
err4:
qede_sync_free_irqs(edev);
memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
@@ -3648,26 +4006,40 @@ err1:
edev->num_queues = 0;
edev->fp_num_tx = 0;
edev->fp_num_rx = 0;
-err0:
+out:
+ if (!is_locked)
+ __qede_unlock(edev);
+
return rc;
}
+/* 'func' should be able to run between unload and reload assuming interface
+ * is actually running, or afterwards in case it's currently DOWN.
+ */
void qede_reload(struct qede_dev *edev,
- void (*func)(struct qede_dev *, union qede_reload_args *),
- union qede_reload_args *args)
+ struct qede_reload_args *args, bool is_locked)
{
- qede_unload(edev, QEDE_UNLOAD_NORMAL);
- /* Call function handler to update parameters
- * needed for function load.
- */
- if (func)
- func(edev, args);
+ if (!is_locked)
+ __qede_lock(edev);
- qede_load(edev, QEDE_LOAD_RELOAD);
+ /* Since qede_lock is held, internal state wouldn't change even
+ * if netdev state would start transitioning. Check whether current
+ * internal configuration indicates device is up, then reload.
+ */
+ if (edev->state == QEDE_STATE_OPEN) {
+ qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
+ if (args)
+ args->func(edev, args);
+ qede_load(edev, QEDE_LOAD_RELOAD, true);
+
+ /* Since no one is going to do it for us, re-configure */
+ qede_config_rx_mode(edev->ndev);
+ } else if (args) {
+ args->func(edev, args);
+ }
- mutex_lock(&edev->qede_lock);
- qede_config_rx_mode(edev->ndev);
- mutex_unlock(&edev->qede_lock);
+ if (!is_locked)
+ __qede_unlock(edev);
}
/* called with rtnl_lock */
@@ -3680,13 +4052,14 @@ static int qede_open(struct net_device *ndev)
edev->ops->common->set_power_state(edev->cdev, PCI_D0);
- rc = qede_load(edev, QEDE_LOAD_NORMAL);
-
+ rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
if (rc)
return rc;
udp_tunnel_get_rx_info(ndev);
+ edev->ops->common->update_drv_state(edev->cdev, true);
+
return 0;
}
@@ -3694,7 +4067,9 @@ static int qede_close(struct net_device *ndev)
{
struct qede_dev *edev = netdev_priv(ndev);
- qede_unload(edev, QEDE_UNLOAD_NORMAL);
+ qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
+
+ edev->ops->common->update_drv_state(edev->cdev, false);
return 0;
}
@@ -3756,6 +4131,8 @@ static int qede_set_mac_addr(struct net_device *ndev, void *p)
if (rc)
return rc;
+ edev->ops->common->update_mac(edev->cdev, addr->sa_data);
+
/* Add MAC filter according to the new unicast HW MAC address */
ether_addr_copy(edev->primary_mac, ndev->dev_addr);
return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
@@ -3822,15 +4199,8 @@ static void qede_set_rx_mode(struct net_device *ndev)
{
struct qede_dev *edev = netdev_priv(ndev);
- DP_INFO(edev, "qede_set_rx_mode called\n");
-
- if (edev->state != QEDE_STATE_OPEN) {
- DP_INFO(edev,
- "qede_set_rx_mode called while interface is down\n");
- } else {
- set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
- schedule_delayed_work(&edev->sp_task, 0);
- }
+ set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
}
/* Must be called with qede_lock held */
@@ -3878,7 +4248,7 @@ static void qede_config_rx_mode(struct net_device *ndev)
/* Check for promiscuous */
if ((ndev->flags & IFF_PROMISC) ||
- (uc_count > 15)) { /* @@@TBD resource allocation - 1 */
+ (uc_count > edev->dev_info.num_mac_filters - 1)) {
accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
} else {
/* Add MAC filters according to the unicast secondary macs */
OpenPOWER on IntegriCloud