summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/cadence/macb.c4
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c34
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c97
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c464
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h310
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h217
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c104
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c38
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c32
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c38
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c1012
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h232
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c115
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c247
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c72
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c2
-rw-r--r--drivers/net/geneve.c11
-rw-r--r--drivers/net/hyperv/hyperv_net.h7
-rw-r--r--drivers/net/hyperv/netvsc_drv.c5
-rw-r--r--drivers/net/hyperv/rndis_filter.c16
-rw-r--r--drivers/net/phy/at803x.c10
-rw-r--r--drivers/net/phy/mdio-sun4i.c4
-rw-r--r--drivers/net/ppp/ppp_generic.c4
-rw-r--r--drivers/net/rionet.c277
-rw-r--r--drivers/net/usb/lan78xx.c49
-rw-r--r--drivers/net/vxlan.c9
54 files changed, 3384 insertions, 312 deletions
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 3ce6095ced3d..6619178ed77b 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -2959,7 +2959,7 @@ static int macb_probe(struct platform_device *pdev)
int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
if (gpio_is_valid(gpio))
bp->reset_gpio = gpio_to_desc(gpio);
- gpiod_set_value(bp->reset_gpio, GPIOD_OUT_HIGH);
+ gpiod_direction_output(bp->reset_gpio, 1);
}
of_node_put(phy_node);
@@ -3029,7 +3029,7 @@ static int macb_remove(struct platform_device *pdev)
mdiobus_free(bp->mii_bus);
/* Shutdown the PHY if there is a GPIO reset */
- gpiod_set_value(bp->reset_gpio, GPIOD_OUT_LOW);
+ gpiod_set_value(bp->reset_gpio, 0);
unregister_netdev(dev);
clk_disable_unprepare(bp->tx_clk);
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 4d187f22c48b..4686a85a8a22 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -96,6 +96,17 @@ config CHELSIO_T4_DCB
If unsure, say N.
+config CHELSIO_T4_UWIRE
+ bool "Unified Wire Support for Chelsio T5 cards"
+ default n
+ depends on CHELSIO_T4
+ ---help---
+ Enable unified-wire offload features.
+ Say Y here if you want to enable unified-wire over Ethernet
+ in the driver.
+
+ If unsure, say N.
+
config CHELSIO_T4_FCOE
bool "Fibre Channel over Ethernet (FCoE) Support for Chelsio T5 cards"
default n
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index ace0ab98d0f1..85c92821b239 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -7,4 +7,5 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
+cxgb4-$(CONFIG_CHELSIO_T4_UWIRE) += cxgb4_ppm.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 1dac6c6111bf..984a3cc26f86 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -404,6 +404,9 @@ enum {
MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */
+
+ /* # of streaming iSCSIT Rx queues */
+ MAX_ISCSIT_QUEUES = MAX_OFLD_QSETS,
};
enum {
@@ -420,8 +423,8 @@ enum {
enum {
INGQ_EXTRAS = 2, /* firmware event queue and */
/* forwarded interrupts */
- MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
- + MAX_RDMA_CIQS + INGQ_EXTRAS,
+ MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES +
+ MAX_RDMA_CIQS + MAX_ISCSIT_QUEUES + INGQ_EXTRAS,
};
struct adapter;
@@ -508,6 +511,15 @@ struct pkt_gl {
typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl);
+typedef void (*rspq_flush_handler_t)(struct sge_rspq *q);
+/* LRO related declarations for ULD */
+struct t4_lro_mgr {
+#define MAX_LRO_SESSIONS 64
+ u8 lro_session_cnt; /* # of sessions to aggregate */
+ unsigned long lro_pkts; /* # of LRO super packets */
+ unsigned long lro_merged; /* # of wire packets merged by LRO */
+ struct sk_buff_head lroq; /* list of aggregated sessions */
+};
struct sge_rspq { /* state for an SGE response queue */
struct napi_struct napi;
@@ -532,6 +544,8 @@ struct sge_rspq { /* state for an SGE response queue */
struct adapter *adap;
struct net_device *netdev; /* associated net device */
rspq_handler_t handler;
+ rspq_flush_handler_t flush_handler;
+ struct t4_lro_mgr lro_mgr;
#ifdef CONFIG_NET_RX_BUSY_POLL
#define CXGB_POLL_STATE_IDLE 0
#define CXGB_POLL_STATE_NAPI BIT(0) /* NAPI owns this poll */
@@ -641,6 +655,7 @@ struct sge {
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
struct sge_ofld_rxq iscsirxq[MAX_OFLD_QSETS];
+ struct sge_ofld_rxq iscsitrxq[MAX_ISCSIT_QUEUES];
struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
@@ -652,9 +667,11 @@ struct sge {
u16 ethqsets; /* # of active Ethernet queue sets */
u16 ethtxq_rover; /* Tx queue to clean up next */
u16 iscsiqsets; /* # of active iSCSI queue sets */
+ u16 niscsitq; /* # of available iSCST Rx queues */
u16 rdmaqs; /* # of available RDMA Rx queues */
u16 rdmaciqs; /* # of available RDMA concentrator IQs */
u16 iscsi_rxq[MAX_OFLD_QSETS];
+ u16 iscsit_rxq[MAX_ISCSIT_QUEUES];
u16 rdma_rxq[MAX_RDMA_QUEUES];
u16 rdma_ciq[MAX_RDMA_CIQS];
u16 timer_val[SGE_NTIMERS];
@@ -681,6 +698,7 @@ struct sge {
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
#define for_each_iscsirxq(sge, i) for (i = 0; i < (sge)->iscsiqsets; i++)
+#define for_each_iscsitrxq(sge, i) for (i = 0; i < (sge)->niscsitq; i++)
#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
@@ -747,6 +765,8 @@ struct adapter {
struct list_head rcu_node;
struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */
+ void *iscsi_ppm;
+
struct tid_info tids;
void **tid_release_head;
spinlock_t tid_release_lock;
@@ -1113,7 +1133,8 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
struct net_device *dev, int intr_idx,
- struct sge_fl *fl, rspq_handler_t hnd, int cong);
+ struct sge_fl *fl, rspq_handler_t hnd,
+ rspq_flush_handler_t flush_handler, int cong);
int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
struct net_device *dev, struct netdev_queue *netdevq,
unsigned int iqid);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index e6a4072b494b..0bb41e9b9b1c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2334,12 +2334,14 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
struct adapter *adap = seq->private;
int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
int iscsi_entries = DIV_ROUND_UP(adap->sge.iscsiqsets, 4);
+ int iscsit_entries = DIV_ROUND_UP(adap->sge.niscsitq, 4);
int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
int i, r = (uintptr_t)v - 1;
int iscsi_idx = r - eth_entries;
- int rdma_idx = iscsi_idx - iscsi_entries;
+ int iscsit_idx = iscsi_idx - iscsi_entries;
+ int rdma_idx = iscsit_idx - iscsit_entries;
int ciq_idx = rdma_idx - rdma_entries;
int ctrl_idx = ciq_idx - ciq_entries;
int fq_idx = ctrl_idx - ctrl_entries;
@@ -2453,6 +2455,35 @@ do { \
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
+ } else if (iscsit_idx < iscsit_entries) {
+ const struct sge_ofld_rxq *rx =
+ &adap->sge.iscsitrxq[iscsit_idx * 4];
+ int n = min(4, adap->sge.niscsitq - 4 * iscsit_idx);
+
+ S("QType:", "iSCSIT");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:",
+ adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+ RL("RxPackets:", stats.pkts);
+ RL("RxImmPkts:", stats.imm);
+ RL("RxNoMem:", stats.nomem);
+ RL("FLAllocErr:", fl.alloc_failed);
+ RL("FLLrgAlcErr:", fl.large_alloc_failed);
+ RL("FLMapErr:", fl.mapping_err);
+ RL("FLLow:", fl.low);
+ RL("FLStarving:", fl.starving);
+
} else if (rdma_idx < rdma_entries) {
const struct sge_ofld_rxq *rx =
&adap->sge.rdmarxq[rdma_idx * 4];
@@ -2543,6 +2574,7 @@ static int sge_queue_entries(const struct adapter *adap)
{
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
DIV_ROUND_UP(adap->sge.iscsiqsets, 4) +
+ DIV_ROUND_UP(adap->sge.niscsitq, 4) +
DIV_ROUND_UP(adap->sge.rdmaqs, 4) +
DIV_ROUND_UP(adap->sge.rdmaciqs, 4) +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index adad73f7c8cd..d1e3f0997d6b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -227,7 +227,7 @@ static DEFINE_MUTEX(uld_mutex);
static LIST_HEAD(adap_rcu_list);
static DEFINE_SPINLOCK(adap_rcu_lock);
static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
-static const char *uld_str[] = { "RDMA", "iSCSI" };
+static const char *const uld_str[] = { "RDMA", "iSCSI", "iSCSIT" };
static void link_report(struct net_device *dev)
{
@@ -664,6 +664,13 @@ out:
return 0;
}
+/* Flush the aggregated lro sessions */
+static void uldrx_flush_handler(struct sge_rspq *q)
+{
+ if (ulds[q->uld].lro_flush)
+ ulds[q->uld].lro_flush(&q->lro_mgr);
+}
+
/**
* uldrx_handler - response queue handler for ULD queues
* @q: the response queue that received the packet
@@ -677,6 +684,7 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl)
{
struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
+ int ret;
/* FW can send CPLs encapsulated in a CPL_FW4_MSG.
*/
@@ -684,10 +692,19 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
rsp += 2;
- if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
+ if (q->flush_handler)
+ ret = ulds[q->uld].lro_rx_handler(q->adap->uld_handle[q->uld],
+ rsp, gl, &q->lro_mgr,
+ &q->napi);
+ else
+ ret = ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld],
+ rsp, gl);
+
+ if (ret) {
rxq->stats.nomem++;
return -1;
}
+
if (gl == NULL)
rxq->stats.imm++;
else if (gl == CXGB4_MSG_AN)
@@ -754,6 +771,10 @@ static void name_msix_vecs(struct adapter *adap)
snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iscsi%d",
adap->port[0]->name, i);
+ for_each_iscsitrxq(&adap->sge, i)
+ snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iSCSIT%d",
+ adap->port[0]->name, i);
+
for_each_rdmarxq(&adap->sge, i)
snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
adap->port[0]->name, i);
@@ -767,6 +788,7 @@ static int request_msix_queue_irqs(struct adapter *adap)
{
struct sge *s = &adap->sge;
int err, ethqidx, iscsiqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
+ int iscsitqidx = 0;
int msi_index = 2;
err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
@@ -792,6 +814,15 @@ static int request_msix_queue_irqs(struct adapter *adap)
goto unwind;
msi_index++;
}
+ for_each_iscsitrxq(s, iscsitqidx) {
+ err = request_irq(adap->msix_info[msi_index].vec,
+ t4_sge_intr_msix, 0,
+ adap->msix_info[msi_index].desc,
+ &s->iscsitrxq[iscsitqidx].rspq);
+ if (err)
+ goto unwind;
+ msi_index++;
+ }
for_each_rdmarxq(s, rdmaqidx) {
err = request_irq(adap->msix_info[msi_index].vec,
t4_sge_intr_msix, 0,
@@ -819,6 +850,9 @@ unwind:
while (--rdmaqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
&s->rdmarxq[rdmaqidx].rspq);
+ while (--iscsitqidx >= 0)
+ free_irq(adap->msix_info[--msi_index].vec,
+ &s->iscsitrxq[iscsitqidx].rspq);
while (--iscsiqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
&s->iscsirxq[iscsiqidx].rspq);
@@ -840,6 +874,9 @@ static void free_msix_queue_irqs(struct adapter *adap)
for_each_iscsirxq(s, i)
free_irq(adap->msix_info[msi_index++].vec,
&s->iscsirxq[i].rspq);
+ for_each_iscsitrxq(s, i)
+ free_irq(adap->msix_info[msi_index++].vec,
+ &s->iscsitrxq[i].rspq);
for_each_rdmarxq(s, i)
free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
for_each_rdmaciq(s, i)
@@ -984,7 +1021,7 @@ static void enable_rx(struct adapter *adap)
static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
unsigned int nq, unsigned int per_chan, int msi_idx,
- u16 *ids)
+ u16 *ids, bool lro)
{
int i, err;
@@ -994,7 +1031,9 @@ static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
adap->port[i / per_chan],
msi_idx, q->fl.size ? &q->fl : NULL,
- uldrx_handler, 0);
+ uldrx_handler,
+ lro ? uldrx_flush_handler : NULL,
+ 0);
if (err)
return err;
memset(&q->stats, 0, sizeof(q->stats));
@@ -1024,7 +1063,7 @@ static int setup_sge_queues(struct adapter *adap)
msi_idx = 1; /* vector 0 is for non-queue interrupts */
else {
err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
- NULL, NULL, -1);
+ NULL, NULL, NULL, -1);
if (err)
return err;
msi_idx = -((int)s->intrq.abs_id + 1);
@@ -1044,7 +1083,7 @@ static int setup_sge_queues(struct adapter *adap)
* new/deleted queues.
*/
err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
- msi_idx, NULL, fwevtq_handler, -1);
+ msi_idx, NULL, fwevtq_handler, NULL, -1);
if (err) {
freeout: t4_free_sge_resources(adap);
return err;
@@ -1062,6 +1101,7 @@ freeout: t4_free_sge_resources(adap);
err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
msi_idx, &q->fl,
t4_ethrx_handler,
+ NULL,
t4_get_mps_bg_map(adap,
pi->tx_chan));
if (err)
@@ -1087,18 +1127,19 @@ freeout: t4_free_sge_resources(adap);
goto freeout;
}
-#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids) do { \
- err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \
+#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids, lro) do { \
+ err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids, lro); \
if (err) \
goto freeout; \
if (msi_idx > 0) \
msi_idx += nq; \
} while (0)
- ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq);
- ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq);
+ ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq, false);
+ ALLOC_OFLD_RXQS(s->iscsitrxq, s->niscsitq, j, s->iscsit_rxq, true);
+ ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq, false);
j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
- ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq);
+ ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq, false);
#undef ALLOC_OFLD_RXQS
@@ -2430,6 +2471,9 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
} else if (uld == CXGB4_ULD_ISCSI) {
lli.rxq_ids = adap->sge.iscsi_rxq;
lli.nrxq = adap->sge.iscsiqsets;
+ } else if (uld == CXGB4_ULD_ISCSIT) {
+ lli.rxq_ids = adap->sge.iscsit_rxq;
+ lli.nrxq = adap->sge.niscsitq;
}
lli.ntxq = adap->sge.iscsiqsets;
lli.nchan = adap->params.nports;
@@ -2437,6 +2481,10 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.wr_cred = adap->params.ofldq_wr_cred;
lli.adapter_type = adap->params.chip;
lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
+ lli.iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
+ lli.iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
+ lli.iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
+ lli.iscsi_ppm = &adap->iscsi_ppm;
lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
lli.udb_density = 1 << adap->params.sge.eq_qpp;
lli.ucq_density = 1 << adap->params.sge.iq_qpp;
@@ -4336,6 +4384,9 @@ static void cfg_queues(struct adapter *adap)
s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
adap->params.nports;
s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
+
+ if (!is_t4(adap->params.chip))
+ s->niscsitq = s->iscsiqsets;
}
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
@@ -4362,6 +4413,16 @@ static void cfg_queues(struct adapter *adap)
r->fl.size = 72;
}
+ if (!is_t4(adap->params.chip)) {
+ for (i = 0; i < ARRAY_SIZE(s->iscsitrxq); i++) {
+ struct sge_ofld_rxq *r = &s->iscsitrxq[i];
+
+ init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
+ r->rspq.uld = CXGB4_ULD_ISCSIT;
+ r->fl.size = 72;
+ }
+ }
+
for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
struct sge_ofld_rxq *r = &s->rdmarxq[i];
@@ -4436,9 +4497,13 @@ static int enable_msix(struct adapter *adap)
want = s->max_ethqsets + EXTRA_VECS;
if (is_offload(adap)) {
- want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets;
+ want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets +
+ s->niscsitq;
/* need nchan for each possible ULD */
- ofld_need = 3 * nchan;
+ if (is_t4(adap->params.chip))
+ ofld_need = 3 * nchan;
+ else
+ ofld_need = 4 * nchan;
}
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
@@ -4470,12 +4535,16 @@ static int enable_msix(struct adapter *adap)
if (allocated < want) {
s->rdmaqs = nchan;
s->rdmaciqs = nchan;
+
+ if (!is_t4(adap->params.chip))
+ s->niscsitq = nchan;
}
/* leftovers go to OFLD */
i = allocated - EXTRA_VECS - s->max_ethqsets -
- s->rdmaqs - s->rdmaciqs;
+ s->rdmaqs - s->rdmaciqs - s->niscsitq;
s->iscsiqsets = (i / nchan) * nchan; /* round down */
+
}
for (i = 0; i < allocated; ++i)
adap->msix_info[i].vec = entries[i].vector;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c
new file mode 100644
index 000000000000..d88a7a7b2400
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c
@@ -0,0 +1,464 @@
+/*
+ * cxgb4_ppm.c: Chelsio common library for T4/T5 iSCSI PagePod Manager
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/pci.h>
+#include <linux/scatterlist.h>
+
+#include "cxgb4_ppm.h"
+
+/* Direct Data Placement -
+ * Directly place the iSCSI Data-In or Data-Out PDU's payload into
+ * pre-posted final destination host-memory buffers based on the
+ * Initiator Task Tag (ITT) in Data-In or Target Task Tag (TTT)
+ * in Data-Out PDUs. The host memory address is programmed into
+ * h/w in the format of pagepod entries. The location of the
+ * pagepod entry is encoded into ddp tag which is used as the base
+ * for ITT/TTT.
+ */
+
+/* Direct-Data Placement page size adjustment
+ */
+int cxgbi_ppm_find_page_index(struct cxgbi_ppm *ppm, unsigned long pgsz)
+{
+ struct cxgbi_tag_format *tformat = &ppm->tformat;
+ int i;
+
+ for (i = 0; i < DDP_PGIDX_MAX; i++) {
+ if (pgsz == 1UL << (DDP_PGSZ_BASE_SHIFT +
+ tformat->pgsz_order[i])) {
+ pr_debug("%s: %s ppm, pgsz %lu -> idx %d.\n",
+ __func__, ppm->ndev->name, pgsz, i);
+ return i;
+ }
+ }
+ pr_info("ippm: ddp page size %lu not supported.\n", pgsz);
+ return DDP_PGIDX_MAX;
+}
+
+/* DDP setup & teardown
+ */
+static int ppm_find_unused_entries(unsigned long *bmap,
+ unsigned int max_ppods,
+ unsigned int start,
+ unsigned int nr,
+ unsigned int align_mask)
+{
+ unsigned long i;
+
+ i = bitmap_find_next_zero_area(bmap, max_ppods, start, nr, align_mask);
+
+ if (unlikely(i >= max_ppods) && (start > nr))
+ i = bitmap_find_next_zero_area(bmap, max_ppods, 0, start - 1,
+ align_mask);
+ if (unlikely(i >= max_ppods))
+ return -ENOSPC;
+
+ bitmap_set(bmap, i, nr);
+ return (int)i;
+}
+
+static void ppm_mark_entries(struct cxgbi_ppm *ppm, int i, int count,
+ unsigned long caller_data)
+{
+ struct cxgbi_ppod_data *pdata = ppm->ppod_data + i;
+
+ pdata->caller_data = caller_data;
+ pdata->npods = count;
+
+ if (pdata->color == ((1 << PPOD_IDX_SHIFT) - 1))
+ pdata->color = 0;
+ else
+ pdata->color++;
+}
+
+static int ppm_get_cpu_entries(struct cxgbi_ppm *ppm, unsigned int count,
+ unsigned long caller_data)
+{
+ struct cxgbi_ppm_pool *pool;
+ unsigned int cpu;
+ int i;
+
+ cpu = get_cpu();
+ pool = per_cpu_ptr(ppm->pool, cpu);
+ spin_lock_bh(&pool->lock);
+ put_cpu();
+
+ i = ppm_find_unused_entries(pool->bmap, ppm->pool_index_max,
+ pool->next, count, 0);
+ if (i < 0) {
+ pool->next = 0;
+ spin_unlock_bh(&pool->lock);
+ return -ENOSPC;
+ }
+
+ pool->next = i + count;
+ if (pool->next >= ppm->pool_index_max)
+ pool->next = 0;
+
+ spin_unlock_bh(&pool->lock);
+
+ pr_debug("%s: cpu %u, idx %d + %d (%d), next %u.\n",
+ __func__, cpu, i, count, i + cpu * ppm->pool_index_max,
+ pool->next);
+
+ i += cpu * ppm->pool_index_max;
+ ppm_mark_entries(ppm, i, count, caller_data);
+
+ return i;
+}
+
+static int ppm_get_entries(struct cxgbi_ppm *ppm, unsigned int count,
+ unsigned long caller_data)
+{
+ int i;
+
+ spin_lock_bh(&ppm->map_lock);
+ i = ppm_find_unused_entries(ppm->ppod_bmap, ppm->bmap_index_max,
+ ppm->next, count, 0);
+ if (i < 0) {
+ ppm->next = 0;
+ spin_unlock_bh(&ppm->map_lock);
+ pr_debug("ippm: NO suitable entries %u available.\n",
+ count);
+ return -ENOSPC;
+ }
+
+ ppm->next = i + count;
+ if (ppm->next >= ppm->bmap_index_max)
+ ppm->next = 0;
+
+ spin_unlock_bh(&ppm->map_lock);
+
+ pr_debug("%s: idx %d + %d (%d), next %u, caller_data 0x%lx.\n",
+ __func__, i, count, i + ppm->pool_rsvd, ppm->next,
+ caller_data);
+
+ i += ppm->pool_rsvd;
+ ppm_mark_entries(ppm, i, count, caller_data);
+
+ return i;
+}
+
+static void ppm_unmark_entries(struct cxgbi_ppm *ppm, int i, int count)
+{
+ pr_debug("%s: idx %d + %d.\n", __func__, i, count);
+
+ if (i < ppm->pool_rsvd) {
+ unsigned int cpu;
+ struct cxgbi_ppm_pool *pool;
+
+ cpu = i / ppm->pool_index_max;
+ i %= ppm->pool_index_max;
+
+ pool = per_cpu_ptr(ppm->pool, cpu);
+ spin_lock_bh(&pool->lock);
+ bitmap_clear(pool->bmap, i, count);
+
+ if (i < pool->next)
+ pool->next = i;
+ spin_unlock_bh(&pool->lock);
+
+ pr_debug("%s: cpu %u, idx %d, next %u.\n",
+ __func__, cpu, i, pool->next);
+ } else {
+ spin_lock_bh(&ppm->map_lock);
+
+ i -= ppm->pool_rsvd;
+ bitmap_clear(ppm->ppod_bmap, i, count);
+
+ if (i < ppm->next)
+ ppm->next = i;
+ spin_unlock_bh(&ppm->map_lock);
+
+ pr_debug("%s: idx %d, next %u.\n", __func__, i, ppm->next);
+ }
+}
+
+void cxgbi_ppm_ppod_release(struct cxgbi_ppm *ppm, u32 idx)
+{
+ struct cxgbi_ppod_data *pdata;
+
+ if (idx >= ppm->ppmax) {
+ pr_warn("ippm: idx too big %u > %u.\n", idx, ppm->ppmax);
+ return;
+ }
+
+ pdata = ppm->ppod_data + idx;
+ if (!pdata->npods) {
+ pr_warn("ippm: idx %u, npods 0.\n", idx);
+ return;
+ }
+
+ pr_debug("release idx %u, npods %u.\n", idx, pdata->npods);
+ ppm_unmark_entries(ppm, idx, pdata->npods);
+}
+EXPORT_SYMBOL(cxgbi_ppm_ppod_release);
+
+int cxgbi_ppm_ppods_reserve(struct cxgbi_ppm *ppm, unsigned short nr_pages,
+ u32 per_tag_pg_idx, u32 *ppod_idx,
+ u32 *ddp_tag, unsigned long caller_data)
+{
+ struct cxgbi_ppod_data *pdata;
+ unsigned int npods;
+ int idx = -1;
+ unsigned int hwidx;
+ u32 tag;
+
+ npods = (nr_pages + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
+ if (!npods) {
+ pr_warn("%s: pages %u -> npods %u, full.\n",
+ __func__, nr_pages, npods);
+ return -EINVAL;
+ }
+
+ /* grab from cpu pool first */
+ idx = ppm_get_cpu_entries(ppm, npods, caller_data);
+ /* try the general pool */
+ if (idx < 0)
+ idx = ppm_get_entries(ppm, npods, caller_data);
+ if (idx < 0) {
+ pr_debug("ippm: pages %u, nospc %u, nxt %u, 0x%lx.\n",
+ nr_pages, npods, ppm->next, caller_data);
+ return idx;
+ }
+
+ pdata = ppm->ppod_data + idx;
+ hwidx = ppm->base_idx + idx;
+
+ tag = cxgbi_ppm_make_ddp_tag(hwidx, pdata->color);
+
+ if (per_tag_pg_idx)
+ tag |= (per_tag_pg_idx << 30) & 0xC0000000;
+
+ *ppod_idx = idx;
+ *ddp_tag = tag;
+
+ pr_debug("ippm: sg %u, tag 0x%x(%u,%u), data 0x%lx.\n",
+ nr_pages, tag, idx, npods, caller_data);
+
+ return npods;
+}
+EXPORT_SYMBOL(cxgbi_ppm_ppods_reserve);
+
+void cxgbi_ppm_make_ppod_hdr(struct cxgbi_ppm *ppm, u32 tag,
+ unsigned int tid, unsigned int offset,
+ unsigned int length,
+ struct cxgbi_pagepod_hdr *hdr)
+{
+ /* The ddp tag in pagepod should be with bit 31:30 set to 0.
+ * The ddp Tag on the wire should be with non-zero 31:30 to the peer
+ */
+ tag &= 0x3FFFFFFF;
+
+ hdr->vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid));
+
+ hdr->rsvd = 0;
+ hdr->pgsz_tag_clr = htonl(tag & ppm->tformat.idx_clr_mask);
+ hdr->max_offset = htonl(length);
+ hdr->page_offset = htonl(offset);
+
+ pr_debug("ippm: tag 0x%x, tid 0x%x, xfer %u, off %u.\n",
+ tag, tid, length, offset);
+}
+EXPORT_SYMBOL(cxgbi_ppm_make_ppod_hdr);
+
+static void ppm_free(struct cxgbi_ppm *ppm)
+{
+ vfree(ppm);
+}
+
+static void ppm_destroy(struct kref *kref)
+{
+ struct cxgbi_ppm *ppm = container_of(kref,
+ struct cxgbi_ppm,
+ refcnt);
+ pr_info("ippm: kref 0, destroy %s ppm 0x%p.\n",
+ ppm->ndev->name, ppm);
+
+ *ppm->ppm_pp = NULL;
+
+ free_percpu(ppm->pool);
+ ppm_free(ppm);
+}
+
+int cxgbi_ppm_release(struct cxgbi_ppm *ppm)
+{
+ if (ppm) {
+ int rv;
+
+ rv = kref_put(&ppm->refcnt, ppm_destroy);
+ return rv;
+ }
+ return 1;
+}
+
+static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
+ unsigned int *pcpu_ppmax)
+{
+ struct cxgbi_ppm_pool *pools;
+ unsigned int ppmax = (*total) / num_possible_cpus();
+ unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3;
+ unsigned int bmap;
+ unsigned int alloc_sz;
+ unsigned int count = 0;
+ unsigned int cpu;
+
+ /* make sure per cpu pool fits into PCPU_MIN_UNIT_SIZE */
+ if (ppmax > max)
+ ppmax = max;
+
+ /* pool size must be multiple of unsigned long */
+ bmap = BITS_TO_LONGS(ppmax);
+ ppmax = (bmap * sizeof(unsigned long)) << 3;
+
+ alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
+ pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool));
+
+ if (!pools)
+ return NULL;
+
+ for_each_possible_cpu(cpu) {
+ struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu);
+
+ memset(ppool, 0, alloc_sz);
+ spin_lock_init(&ppool->lock);
+ count += ppmax;
+ }
+
+ *total = count;
+ *pcpu_ppmax = ppmax;
+
+ return pools;
+}
+
+int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
+ struct pci_dev *pdev, void *lldev,
+ struct cxgbi_tag_format *tformat,
+ unsigned int ppmax,
+ unsigned int llimit,
+ unsigned int start,
+ unsigned int reserve_factor)
+{
+ struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp);
+ struct cxgbi_ppm_pool *pool = NULL;
+ unsigned int ppmax_pool = 0;
+ unsigned int pool_index_max = 0;
+ unsigned int alloc_sz;
+ unsigned int ppod_bmap_size;
+
+ if (ppm) {
+ pr_info("ippm: %s, ppm 0x%p,0x%p already initialized, %u/%u.\n",
+ ndev->name, ppm_pp, ppm, ppm->ppmax, ppmax);
+ kref_get(&ppm->refcnt);
+ return 1;
+ }
+
+ if (reserve_factor) {
+ ppmax_pool = ppmax / reserve_factor;
+ pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
+
+ pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
+ ndev->name, ppmax, ppmax_pool, pool_index_max);
+ }
+
+ ppod_bmap_size = BITS_TO_LONGS(ppmax - ppmax_pool);
+ alloc_sz = sizeof(struct cxgbi_ppm) +
+ ppmax * (sizeof(struct cxgbi_ppod_data)) +
+ ppod_bmap_size * sizeof(unsigned long);
+
+ ppm = vmalloc(alloc_sz);
+ if (!ppm)
+ goto release_ppm_pool;
+
+ memset(ppm, 0, alloc_sz);
+
+ ppm->ppod_bmap = (unsigned long *)(&ppm->ppod_data[ppmax]);
+
+ if ((ppod_bmap_size >> 3) > (ppmax - ppmax_pool)) {
+ unsigned int start = ppmax - ppmax_pool;
+ unsigned int end = ppod_bmap_size >> 3;
+
+ bitmap_set(ppm->ppod_bmap, ppmax, end - start);
+ pr_info("%s: %u - %u < %u * 8, mask extra bits %u, %u.\n",
+ __func__, ppmax, ppmax_pool, ppod_bmap_size, start,
+ end);
+ }
+
+ spin_lock_init(&ppm->map_lock);
+ kref_init(&ppm->refcnt);
+
+ memcpy(&ppm->tformat, tformat, sizeof(struct cxgbi_tag_format));
+
+ ppm->ppm_pp = ppm_pp;
+ ppm->ndev = ndev;
+ ppm->pdev = pdev;
+ ppm->lldev = lldev;
+ ppm->ppmax = ppmax;
+ ppm->next = 0;
+ ppm->llimit = llimit;
+ ppm->base_idx = start > llimit ?
+ (start - llimit + 1) >> PPOD_SIZE_SHIFT : 0;
+ ppm->bmap_index_max = ppmax - ppmax_pool;
+
+ ppm->pool = pool;
+ ppm->pool_rsvd = ppmax_pool;
+ ppm->pool_index_max = pool_index_max;
+
+ /* check one more time */
+ if (*ppm_pp) {
+ ppm_free(ppm);
+ ppm = (struct cxgbi_ppm *)(*ppm_pp);
+
+ pr_info("ippm: %s, ppm 0x%p,0x%p already initialized, %u/%u.\n",
+ ndev->name, ppm_pp, *ppm_pp, ppm->ppmax, ppmax);
+
+ kref_get(&ppm->refcnt);
+ return 1;
+ }
+ *ppm_pp = ppm;
+
+ ppm->tformat.pgsz_idx_dflt = cxgbi_ppm_find_page_index(ppm, PAGE_SIZE);
+
+ pr_info("ippm %s: ppm 0x%p, 0x%p, base %u/%u, pg %lu,%u, rsvd %u,%u.\n",
+ ndev->name, ppm_pp, ppm, ppm->base_idx, ppm->ppmax, PAGE_SIZE,
+ ppm->tformat.pgsz_idx_dflt, ppm->pool_rsvd,
+ ppm->pool_index_max);
+
+ return 0;
+
+release_ppm_pool:
+ free_percpu(pool);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(cxgbi_ppm_init);
+
+unsigned int cxgbi_tagmask_set(unsigned int ppmax)
+{
+ unsigned int bits = fls(ppmax);
+
+ if (bits > PPOD_IDX_MAX_SIZE)
+ bits = PPOD_IDX_MAX_SIZE;
+
+ pr_info("ippm: ppmax %u/0x%x -> bits %u, tagmask 0x%x.\n",
+ ppmax, ppmax, bits, 1 << (bits + PPOD_IDX_SHIFT));
+
+ return 1 << (bits + PPOD_IDX_SHIFT);
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h
new file mode 100644
index 000000000000..d48732673b75
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h
@@ -0,0 +1,310 @@
+/*
+ * cxgb4_ppm.h: Chelsio common library for T4/T5 iSCSI ddp operation
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ */
+
+#ifndef __CXGB4PPM_H__
+#define __CXGB4PPM_H__
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
+#include <linux/bitmap.h>
+
+struct cxgbi_pagepod_hdr {
+ u32 vld_tid;
+ u32 pgsz_tag_clr;
+ u32 max_offset;
+ u32 page_offset;
+ u64 rsvd;
+};
+
+#define PPOD_PAGES_MAX 4
+struct cxgbi_pagepod {
+ struct cxgbi_pagepod_hdr hdr;
+ u64 addr[PPOD_PAGES_MAX + 1];
+};
+
+/* ddp tag format
+ * for a 32-bit tag:
+ * bit #
+ * 31 ..... ..... 0
+ * X Y...Y Z...Z, where
+ * ^ ^^^^^ ^^^^
+ * | | |____ when ddp bit = 0: color bits
+ * | |
+ * | |____ when ddp bit = 0: idx into the ddp memory region
+ * |
+ * |____ ddp bit: 0 - ddp tag, 1 - non-ddp tag
+ *
+ * [page selector:2] [sw/free bits] [0] [idx] [color:6]
+ */
+
+#define DDP_PGIDX_MAX 4
+#define DDP_PGSZ_BASE_SHIFT 12 /* base page 4K */
+
+struct cxgbi_task_tag_info {
+ unsigned char flags;
+#define CXGBI_PPOD_INFO_FLAG_VALID 0x1
+#define CXGBI_PPOD_INFO_FLAG_MAPPED 0x2
+ unsigned char cid;
+ unsigned short pg_shift;
+ unsigned int npods;
+ unsigned int idx;
+ unsigned int tag;
+ struct cxgbi_pagepod_hdr hdr;
+ int nents;
+ int nr_pages;
+ struct scatterlist *sgl;
+};
+
+struct cxgbi_tag_format {
+ unsigned char pgsz_order[DDP_PGIDX_MAX];
+ unsigned char pgsz_idx_dflt;
+ unsigned char free_bits:4;
+ unsigned char color_bits:4;
+ unsigned char idx_bits;
+ unsigned char rsvd_bits;
+ unsigned int no_ddp_mask;
+ unsigned int idx_mask;
+ unsigned int color_mask;
+ unsigned int idx_clr_mask;
+ unsigned int rsvd_mask;
+};
+
+struct cxgbi_ppod_data {
+ unsigned char pg_idx:2;
+ unsigned char color:6;
+ unsigned char chan_id;
+ unsigned short npods;
+ unsigned long caller_data;
+};
+
+/* per cpu ppm pool */
+struct cxgbi_ppm_pool {
+ unsigned int base; /* base index */
+ unsigned int next; /* next possible free index */
+ spinlock_t lock; /* ppm pool lock */
+ unsigned long bmap[0];
+} ____cacheline_aligned_in_smp;
+
+struct cxgbi_ppm {
+ struct kref refcnt;
+ struct net_device *ndev; /* net_device, 1st port */
+ struct pci_dev *pdev;
+ void *lldev;
+ void **ppm_pp;
+ struct cxgbi_tag_format tformat;
+ unsigned int ppmax;
+ unsigned int llimit;
+ unsigned int base_idx;
+
+ unsigned int pool_rsvd;
+ unsigned int pool_index_max;
+ struct cxgbi_ppm_pool __percpu *pool;
+ /* map lock */
+ spinlock_t map_lock; /* ppm map lock */
+ unsigned int bmap_index_max;
+ unsigned int next;
+ unsigned long *ppod_bmap;
+ struct cxgbi_ppod_data ppod_data[0];
+};
+
+#define DDP_THRESHOLD 512
+
+#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */
+
+#define IPPOD_SIZE sizeof(struct cxgbi_pagepod) /* 64 */
+#define PPOD_SIZE_SHIFT 6
+
+/* page pods are allocated in groups of this size (must be power of 2) */
+#define PPOD_CLUSTER_SIZE 16U
+
+#define ULPMEM_DSGL_MAX_NPPODS 16 /* 1024/PPOD_SIZE */
+#define ULPMEM_IDATA_MAX_NPPODS 3 /* (PPOD_SIZE * 3 + ulptx hdr) < 256B */
+#define PCIE_MEMWIN_MAX_NPPODS 16 /* 1024/PPOD_SIZE */
+
+#define PPOD_COLOR_SHIFT 0
+#define PPOD_COLOR(x) ((x) << PPOD_COLOR_SHIFT)
+
+#define PPOD_IDX_SHIFT 6
+#define PPOD_IDX_MAX_SIZE 24
+
+#define PPOD_TID_SHIFT 0
+#define PPOD_TID(x) ((x) << PPOD_TID_SHIFT)
+
+#define PPOD_TAG_SHIFT 6
+#define PPOD_TAG(x) ((x) << PPOD_TAG_SHIFT)
+
+#define PPOD_VALID_SHIFT 24
+#define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT)
+#define PPOD_VALID_FLAG PPOD_VALID(1U)
+
+#define PPOD_PI_EXTRACT_CTL_SHIFT 31
+#define PPOD_PI_EXTRACT_CTL(x) ((x) << PPOD_PI_EXTRACT_CTL_SHIFT)
+#define PPOD_PI_EXTRACT_CTL_FLAG V_PPOD_PI_EXTRACT_CTL(1U)
+
+#define PPOD_PI_TYPE_SHIFT 29
+#define PPOD_PI_TYPE_MASK 0x3
+#define PPOD_PI_TYPE(x) ((x) << PPOD_PI_TYPE_SHIFT)
+
+#define PPOD_PI_CHECK_CTL_SHIFT 27
+#define PPOD_PI_CHECK_CTL_MASK 0x3
+#define PPOD_PI_CHECK_CTL(x) ((x) << PPOD_PI_CHECK_CTL_SHIFT)
+
+#define PPOD_PI_REPORT_CTL_SHIFT 25
+#define PPOD_PI_REPORT_CTL_MASK 0x3
+#define PPOD_PI_REPORT_CTL(x) ((x) << PPOD_PI_REPORT_CTL_SHIFT)
+
+static inline int cxgbi_ppm_is_ddp_tag(struct cxgbi_ppm *ppm, u32 tag)
+{
+ return !(tag & ppm->tformat.no_ddp_mask);
+}
+
+static inline int cxgbi_ppm_sw_tag_is_usable(struct cxgbi_ppm *ppm,
+ u32 tag)
+{
+ /* the sw tag must be using <= 31 bits */
+ return !(tag & 0x80000000U);
+}
+
+static inline int cxgbi_ppm_make_non_ddp_tag(struct cxgbi_ppm *ppm,
+ u32 sw_tag,
+ u32 *final_tag)
+{
+ struct cxgbi_tag_format *tformat = &ppm->tformat;
+
+ if (!cxgbi_ppm_sw_tag_is_usable(ppm, sw_tag)) {
+ pr_info("sw_tag 0x%x NOT usable.\n", sw_tag);
+ return -EINVAL;
+ }
+
+ if (!sw_tag) {
+ *final_tag = tformat->no_ddp_mask;
+ } else {
+ unsigned int shift = tformat->idx_bits + tformat->color_bits;
+ u32 lower = sw_tag & tformat->idx_clr_mask;
+ u32 upper = (sw_tag >> shift) << (shift + 1);
+
+ *final_tag = upper | tformat->no_ddp_mask | lower;
+ }
+ return 0;
+}
+
+static inline u32 cxgbi_ppm_decode_non_ddp_tag(struct cxgbi_ppm *ppm,
+ u32 tag)
+{
+ struct cxgbi_tag_format *tformat = &ppm->tformat;
+ unsigned int shift = tformat->idx_bits + tformat->color_bits;
+ u32 lower = tag & tformat->idx_clr_mask;
+ u32 upper = (tag >> tformat->rsvd_bits) << shift;
+
+ return upper | lower;
+}
+
+static inline u32 cxgbi_ppm_ddp_tag_get_idx(struct cxgbi_ppm *ppm,
+ u32 ddp_tag)
+{
+ u32 hw_idx = (ddp_tag >> PPOD_IDX_SHIFT) &
+ ppm->tformat.idx_mask;
+
+ return hw_idx - ppm->base_idx;
+}
+
+static inline u32 cxgbi_ppm_make_ddp_tag(unsigned int hw_idx,
+ unsigned char color)
+{
+ return (hw_idx << PPOD_IDX_SHIFT) | ((u32)color);
+}
+
+static inline unsigned long
+cxgbi_ppm_get_tag_caller_data(struct cxgbi_ppm *ppm,
+ u32 ddp_tag)
+{
+ u32 idx = cxgbi_ppm_ddp_tag_get_idx(ppm, ddp_tag);
+
+ return ppm->ppod_data[idx].caller_data;
+}
+
+/* sw bits are the free bits */
+static inline int cxgbi_ppm_ddp_tag_update_sw_bits(struct cxgbi_ppm *ppm,
+ u32 val, u32 orig_tag,
+ u32 *final_tag)
+{
+ struct cxgbi_tag_format *tformat = &ppm->tformat;
+ u32 v = val >> tformat->free_bits;
+
+ if (v) {
+ pr_info("sw_bits 0x%x too large, avail bits %u.\n",
+ val, tformat->free_bits);
+ return -EINVAL;
+ }
+ if (!cxgbi_ppm_is_ddp_tag(ppm, orig_tag))
+ return -EINVAL;
+
+ *final_tag = (val << tformat->rsvd_bits) |
+ (orig_tag & ppm->tformat.rsvd_mask);
+ return 0;
+}
+
+static inline void cxgbi_ppm_ppod_clear(struct cxgbi_pagepod *ppod)
+{
+ ppod->hdr.vld_tid = 0U;
+}
+
+static inline void cxgbi_tagmask_check(unsigned int tagmask,
+ struct cxgbi_tag_format *tformat)
+{
+ unsigned int bits = fls(tagmask);
+
+ /* reserve top most 2 bits for page selector */
+ tformat->free_bits = 32 - 2 - bits;
+ tformat->rsvd_bits = bits;
+ tformat->color_bits = PPOD_IDX_SHIFT;
+ tformat->idx_bits = bits - 1 - PPOD_IDX_SHIFT;
+ tformat->no_ddp_mask = 1 << (bits - 1);
+ tformat->idx_mask = (1 << tformat->idx_bits) - 1;
+ tformat->color_mask = (1 << PPOD_IDX_SHIFT) - 1;
+ tformat->idx_clr_mask = (1 << (bits - 1)) - 1;
+ tformat->rsvd_mask = (1 << bits) - 1;
+
+ pr_info("ippm: tagmask 0x%x, rsvd %u=%u+%u+1, mask 0x%x,0x%x, "
+ "pg %u,%u,%u,%u.\n",
+ tagmask, tformat->rsvd_bits, tformat->idx_bits,
+ tformat->color_bits, tformat->no_ddp_mask, tformat->rsvd_mask,
+ tformat->pgsz_order[0], tformat->pgsz_order[1],
+ tformat->pgsz_order[2], tformat->pgsz_order[3]);
+}
+
+int cxgbi_ppm_find_page_index(struct cxgbi_ppm *ppm, unsigned long pgsz);
+void cxgbi_ppm_make_ppod_hdr(struct cxgbi_ppm *ppm, u32 tag,
+ unsigned int tid, unsigned int offset,
+ unsigned int length,
+ struct cxgbi_pagepod_hdr *hdr);
+void cxgbi_ppm_ppod_release(struct cxgbi_ppm *, u32 idx);
+int cxgbi_ppm_ppods_reserve(struct cxgbi_ppm *, unsigned short nr_pages,
+ u32 per_tag_pg_idx, u32 *ppod_idx, u32 *ddp_tag,
+ unsigned long caller_data);
+int cxgbi_ppm_init(void **ppm_pp, struct net_device *, struct pci_dev *,
+ void *lldev, struct cxgbi_tag_format *,
+ unsigned int ppmax, unsigned int llimit,
+ unsigned int start,
+ unsigned int reserve_factor);
+int cxgbi_ppm_release(struct cxgbi_ppm *ppm);
+void cxgbi_tagmask_check(unsigned int tagmask, struct cxgbi_tag_format *);
+unsigned int cxgbi_tagmask_set(unsigned int ppmax);
+
+#endif /*__CXGB4PPM_H__*/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index cf711d5f15be..f3c58aaa932d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -191,6 +191,7 @@ static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
enum cxgb4_uld {
CXGB4_ULD_RDMA,
CXGB4_ULD_ISCSI,
+ CXGB4_ULD_ISCSIT,
CXGB4_ULD_MAX
};
@@ -212,6 +213,7 @@ struct l2t_data;
struct net_device;
struct pkt_gl;
struct tp_tcp_stats;
+struct t4_lro_mgr;
struct cxgb4_range {
unsigned int start;
@@ -273,6 +275,10 @@ struct cxgb4_lld_info {
unsigned int max_ordird_qp; /* Max ORD/IRD depth per RDMA QP */
unsigned int max_ird_adapter; /* Max IRD memory per adapter */
bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
+ unsigned int iscsi_tagmask; /* iscsi ddp tag mask */
+ unsigned int iscsi_pgsz_order; /* iscsi ddp page size orders */
+ unsigned int iscsi_llimit; /* chip's iscsi region llimit */
+ void **iscsi_ppm; /* iscsi page pod manager */
int nodeid; /* device numa node id */
};
@@ -283,6 +289,11 @@ struct cxgb4_uld_info {
const struct pkt_gl *gl);
int (*state_change)(void *handle, enum cxgb4_state new_state);
int (*control)(void *handle, enum cxgb4_control control, ...);
+ int (*lro_rx_handler)(void *handle, const __be64 *rsp,
+ const struct pkt_gl *gl,
+ struct t4_lro_mgr *lro_mgr,
+ struct napi_struct *napi);
+ void (*lro_flush)(struct t4_lro_mgr *);
};
int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 5b0f3ef348e9..60a26037a1c6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -48,8 +48,6 @@
#include "t4_regs.h"
#include "t4_values.h"
-#define VLAN_NONE 0xfff
-
/* identifies sync vs async L2T_WRITE_REQs */
#define SYNC_WR_S 12
#define SYNC_WR_V(x) ((x) << SYNC_WR_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 4e2d47ac102b..79665bd8f881 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -39,6 +39,8 @@
#include <linux/if_ether.h>
#include <linux/atomic.h>
+#define VLAN_NONE 0xfff
+
enum { L2T_SIZE = 4096 }; /* # of L2T entries */
enum {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index deca4a2956cc..13b144bcf725 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2157,8 +2157,11 @@ static int process_responses(struct sge_rspq *q, int budget)
while (likely(budget_left)) {
rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
- if (!is_new_response(rc, q))
+ if (!is_new_response(rc, q)) {
+ if (q->flush_handler)
+ q->flush_handler(q);
break;
+ }
dma_rmb();
rsp_type = RSPD_TYPE_G(rc->type_gen);
@@ -2544,7 +2547,8 @@ static void __iomem *bar2_address(struct adapter *adapter,
*/
int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
struct net_device *dev, int intr_idx,
- struct sge_fl *fl, rspq_handler_t hnd, int cong)
+ struct sge_fl *fl, rspq_handler_t hnd,
+ rspq_flush_handler_t flush_hnd, int cong)
{
int ret, flsz = 0;
struct fw_iq_cmd c;
@@ -2648,6 +2652,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->size--; /* subtract status entry */
iq->netdev = dev;
iq->handler = hnd;
+ iq->flush_handler = flush_hnd;
+
+ memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr));
+ skb_queue_head_init(&iq->lro_mgr.lroq);
/* set offset to -1 to distinguish ingress queues without FL */
iq->offset = fl ? 0 : -1;
@@ -2992,6 +3000,7 @@ void t4_free_sge_resources(struct adapter *adap)
/* clean up RDMA and iSCSI Rx queues */
t4_free_ofld_rxqs(adap, adap->sge.iscsiqsets, adap->sge.iscsirxq);
+ t4_free_ofld_rxqs(adap, adap->sge.niscsitq, adap->sge.iscsitrxq);
t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq);
t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 1d2d1da40c80..80417fc564d4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -51,6 +51,7 @@ enum {
CPL_TX_PKT = 0xE,
CPL_L2T_WRITE_REQ = 0x12,
CPL_TID_RELEASE = 0x1A,
+ CPL_TX_DATA_ISO = 0x1F,
CPL_CLOSE_LISTSRV_RPL = 0x20,
CPL_L2T_WRITE_RPL = 0x23,
@@ -344,6 +345,87 @@ struct cpl_pass_open_rpl {
u8 status;
};
+struct tcp_options {
+ __be16 mss;
+ __u8 wsf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8:4;
+ __u8 unknown:1;
+ __u8:1;
+ __u8 sack:1;
+ __u8 tstamp:1;
+#else
+ __u8 tstamp:1;
+ __u8 sack:1;
+ __u8:1;
+ __u8 unknown:1;
+ __u8:4;
+#endif
+};
+
+struct cpl_pass_accept_req {
+ union opcode_tid ot;
+ __be16 rsvd;
+ __be16 len;
+ __be32 hdr_len;
+ __be16 vlan;
+ __be16 l2info;
+ __be32 tos_stid;
+ struct tcp_options tcpopt;
+};
+
+/* cpl_pass_accept_req.hdr_len fields */
+#define SYN_RX_CHAN_S 0
+#define SYN_RX_CHAN_M 0xF
+#define SYN_RX_CHAN_V(x) ((x) << SYN_RX_CHAN_S)
+#define SYN_RX_CHAN_G(x) (((x) >> SYN_RX_CHAN_S) & SYN_RX_CHAN_M)
+
+#define TCP_HDR_LEN_S 10
+#define TCP_HDR_LEN_M 0x3F
+#define TCP_HDR_LEN_V(x) ((x) << TCP_HDR_LEN_S)
+#define TCP_HDR_LEN_G(x) (((x) >> TCP_HDR_LEN_S) & TCP_HDR_LEN_M)
+
+#define IP_HDR_LEN_S 16
+#define IP_HDR_LEN_M 0x3FF
+#define IP_HDR_LEN_V(x) ((x) << IP_HDR_LEN_S)
+#define IP_HDR_LEN_G(x) (((x) >> IP_HDR_LEN_S) & IP_HDR_LEN_M)
+
+#define ETH_HDR_LEN_S 26
+#define ETH_HDR_LEN_M 0x1F
+#define ETH_HDR_LEN_V(x) ((x) << ETH_HDR_LEN_S)
+#define ETH_HDR_LEN_G(x) (((x) >> ETH_HDR_LEN_S) & ETH_HDR_LEN_M)
+
+/* cpl_pass_accept_req.l2info fields */
+#define SYN_MAC_IDX_S 0
+#define SYN_MAC_IDX_M 0x1FF
+#define SYN_MAC_IDX_V(x) ((x) << SYN_MAC_IDX_S)
+#define SYN_MAC_IDX_G(x) (((x) >> SYN_MAC_IDX_S) & SYN_MAC_IDX_M)
+
+#define SYN_XACT_MATCH_S 9
+#define SYN_XACT_MATCH_V(x) ((x) << SYN_XACT_MATCH_S)
+#define SYN_XACT_MATCH_F SYN_XACT_MATCH_V(1U)
+
+#define SYN_INTF_S 12
+#define SYN_INTF_M 0xF
+#define SYN_INTF_V(x) ((x) << SYN_INTF_S)
+#define SYN_INTF_G(x) (((x) >> SYN_INTF_S) & SYN_INTF_M)
+
+enum { /* TCP congestion control algorithms */
+ CONG_ALG_RENO,
+ CONG_ALG_TAHOE,
+ CONG_ALG_NEWRENO,
+ CONG_ALG_HIGHSPEED
+};
+
+#define CONG_CNTRL_S 14
+#define CONG_CNTRL_M 0x3
+#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S)
+#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M)
+
+#define T5_ISS_S 18
+#define T5_ISS_V(x) ((x) << T5_ISS_S)
+#define T5_ISS_F T5_ISS_V(1U)
+
struct cpl_pass_accept_rpl {
WR_HDR;
union opcode_tid ot;
@@ -818,6 +900,110 @@ struct cpl_iscsi_hdr {
#define ISCSI_DDP_V(x) ((x) << ISCSI_DDP_S)
#define ISCSI_DDP_F ISCSI_DDP_V(1U)
+struct cpl_rx_data_ddp {
+ union opcode_tid ot;
+ __be16 urg;
+ __be16 len;
+ __be32 seq;
+ union {
+ __be32 nxt_seq;
+ __be32 ddp_report;
+ };
+ __be32 ulp_crc;
+ __be32 ddpvld;
+};
+
+#define cpl_rx_iscsi_ddp cpl_rx_data_ddp
+
+struct cpl_iscsi_data {
+ union opcode_tid ot;
+ __u8 rsvd0[2];
+ __be16 len;
+ __be32 seq;
+ __be16 urg;
+ __u8 rsvd1;
+ __u8 status;
+};
+
+struct cpl_tx_data_iso {
+ __be32 op_to_scsi;
+ __u8 reserved1;
+ __u8 ahs_len;
+ __be16 mpdu;
+ __be32 burst_size;
+ __be32 len;
+ __be32 reserved2_seglen_offset;
+ __be32 datasn_offset;
+ __be32 buffer_offset;
+ __be32 reserved3;
+
+ /* encapsulated CPL_TX_DATA follows here */
+};
+
+/* cpl_tx_data_iso.op_to_scsi fields */
+#define CPL_TX_DATA_ISO_OP_S 24
+#define CPL_TX_DATA_ISO_OP_M 0xff
+#define CPL_TX_DATA_ISO_OP_V(x) ((x) << CPL_TX_DATA_ISO_OP_S)
+#define CPL_TX_DATA_ISO_OP_G(x) \
+ (((x) >> CPL_TX_DATA_ISO_OP_S) & CPL_TX_DATA_ISO_OP_M)
+
+#define CPL_TX_DATA_ISO_FIRST_S 23
+#define CPL_TX_DATA_ISO_FIRST_M 0x1
+#define CPL_TX_DATA_ISO_FIRST_V(x) ((x) << CPL_TX_DATA_ISO_FIRST_S)
+#define CPL_TX_DATA_ISO_FIRST_G(x) \
+ (((x) >> CPL_TX_DATA_ISO_FIRST_S) & CPL_TX_DATA_ISO_FIRST_M)
+#define CPL_TX_DATA_ISO_FIRST_F CPL_TX_DATA_ISO_FIRST_V(1U)
+
+#define CPL_TX_DATA_ISO_LAST_S 22
+#define CPL_TX_DATA_ISO_LAST_M 0x1
+#define CPL_TX_DATA_ISO_LAST_V(x) ((x) << CPL_TX_DATA_ISO_LAST_S)
+#define CPL_TX_DATA_ISO_LAST_G(x) \
+ (((x) >> CPL_TX_DATA_ISO_LAST_S) & CPL_TX_DATA_ISO_LAST_M)
+#define CPL_TX_DATA_ISO_LAST_F CPL_TX_DATA_ISO_LAST_V(1U)
+
+#define CPL_TX_DATA_ISO_CPLHDRLEN_S 21
+#define CPL_TX_DATA_ISO_CPLHDRLEN_M 0x1
+#define CPL_TX_DATA_ISO_CPLHDRLEN_V(x) ((x) << CPL_TX_DATA_ISO_CPLHDRLEN_S)
+#define CPL_TX_DATA_ISO_CPLHDRLEN_G(x) \
+ (((x) >> CPL_TX_DATA_ISO_CPLHDRLEN_S) & CPL_TX_DATA_ISO_CPLHDRLEN_M)
+#define CPL_TX_DATA_ISO_CPLHDRLEN_F CPL_TX_DATA_ISO_CPLHDRLEN_V(1U)
+
+#define CPL_TX_DATA_ISO_HDRCRC_S 20
+#define CPL_TX_DATA_ISO_HDRCRC_M 0x1
+#define CPL_TX_DATA_ISO_HDRCRC_V(x) ((x) << CPL_TX_DATA_ISO_HDRCRC_S)
+#define CPL_TX_DATA_ISO_HDRCRC_G(x) \
+ (((x) >> CPL_TX_DATA_ISO_HDRCRC_S) & CPL_TX_DATA_ISO_HDRCRC_M)
+#define CPL_TX_DATA_ISO_HDRCRC_F CPL_TX_DATA_ISO_HDRCRC_V(1U)
+
+#define CPL_TX_DATA_ISO_PLDCRC_S 19
+#define CPL_TX_DATA_ISO_PLDCRC_M 0x1
+#define CPL_TX_DATA_ISO_PLDCRC_V(x) ((x) << CPL_TX_DATA_ISO_PLDCRC_S)
+#define CPL_TX_DATA_ISO_PLDCRC_G(x) \
+ (((x) >> CPL_TX_DATA_ISO_PLDCRC_S) & CPL_TX_DATA_ISO_PLDCRC_M)
+#define CPL_TX_DATA_ISO_PLDCRC_F CPL_TX_DATA_ISO_PLDCRC_V(1U)
+
+#define CPL_TX_DATA_ISO_IMMEDIATE_S 18
+#define CPL_TX_DATA_ISO_IMMEDIATE_M 0x1
+#define CPL_TX_DATA_ISO_IMMEDIATE_V(x) ((x) << CPL_TX_DATA_ISO_IMMEDIATE_S)
+#define CPL_TX_DATA_ISO_IMMEDIATE_G(x) \
+ (((x) >> CPL_TX_DATA_ISO_IMMEDIATE_S) & CPL_TX_DATA_ISO_IMMEDIATE_M)
+#define CPL_TX_DATA_ISO_IMMEDIATE_F CPL_TX_DATA_ISO_IMMEDIATE_V(1U)
+
+#define CPL_TX_DATA_ISO_SCSI_S 16
+#define CPL_TX_DATA_ISO_SCSI_M 0x3
+#define CPL_TX_DATA_ISO_SCSI_V(x) ((x) << CPL_TX_DATA_ISO_SCSI_S)
+#define CPL_TX_DATA_ISO_SCSI_G(x) \
+ (((x) >> CPL_TX_DATA_ISO_SCSI_S) & CPL_TX_DATA_ISO_SCSI_M)
+
+/* cpl_tx_data_iso.reserved2_seglen_offset fields */
+#define CPL_TX_DATA_ISO_SEGLEN_OFFSET_S 0
+#define CPL_TX_DATA_ISO_SEGLEN_OFFSET_M 0xffffff
+#define CPL_TX_DATA_ISO_SEGLEN_OFFSET_V(x) \
+ ((x) << CPL_TX_DATA_ISO_SEGLEN_OFFSET_S)
+#define CPL_TX_DATA_ISO_SEGLEN_OFFSET_G(x) \
+ (((x) >> CPL_TX_DATA_ISO_SEGLEN_OFFSET_S) & \
+ CPL_TX_DATA_ISO_SEGLEN_OFFSET_M)
+
struct cpl_rx_data {
union opcode_tid ot;
__be16 rsvd;
@@ -854,6 +1040,15 @@ struct cpl_rx_data_ack {
#define RX_FORCE_ACK_V(x) ((x) << RX_FORCE_ACK_S)
#define RX_FORCE_ACK_F RX_FORCE_ACK_V(1U)
+#define RX_DACK_MODE_S 29
+#define RX_DACK_MODE_M 0x3
+#define RX_DACK_MODE_V(x) ((x) << RX_DACK_MODE_S)
+#define RX_DACK_MODE_G(x) (((x) >> RX_DACK_MODE_S) & RX_DACK_MODE_M)
+
+#define RX_DACK_CHANGE_S 31
+#define RX_DACK_CHANGE_V(x) ((x) << RX_DACK_CHANGE_S)
+#define RX_DACK_CHANGE_F RX_DACK_CHANGE_V(1U)
+
struct cpl_rx_pkt {
struct rss_header rsshdr;
u8 opcode;
@@ -1090,6 +1285,12 @@ struct cpl_fw4_ack {
__be64 rsvd1;
};
+enum {
+ CPL_FW4_ACK_FLAGS_SEQVAL = 0x1, /* seqn valid */
+ CPL_FW4_ACK_FLAGS_CH = 0x2, /* channel change complete */
+ CPL_FW4_ACK_FLAGS_FLOWC = 0x4, /* fw_flowc_wr complete */
+};
+
struct cpl_fw6_msg {
u8 opcode;
u8 type;
@@ -1115,6 +1316,17 @@ struct cpl_fw6_msg_ofld_connection_wr_rpl {
__u8 rsvd[2];
};
+struct cpl_tx_data {
+ union opcode_tid ot;
+ __be32 len;
+ __be32 rsvd;
+ __be32 flags;
+};
+
+/* cpl_tx_data.flags field */
+#define TX_FORCE_S 13
+#define TX_FORCE_V(x) ((x) << TX_FORCE_S)
+
enum {
ULP_TX_MEM_READ = 2,
ULP_TX_MEM_WRITE = 3,
@@ -1143,6 +1355,11 @@ struct ulptx_sgl {
struct ulptx_sge_pair sge[0];
};
+struct ulptx_idata {
+ __be32 cmd_more;
+ __be32 len;
+};
+
#define ULPTX_NSGE_S 0
#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index c8661c77b4e3..7ad6d4e75b2a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -101,6 +101,7 @@ enum fw_wr_opcodes {
FW_RI_BIND_MW_WR = 0x18,
FW_RI_FR_NSMR_WR = 0x19,
FW_RI_INV_LSTAG_WR = 0x1a,
+ FW_ISCSI_TX_DATA_WR = 0x45,
FW_LASTC2E_WR = 0x70
};
@@ -561,7 +562,12 @@ enum fw_flowc_mnem {
FW_FLOWC_MNEM_SNDBUF,
FW_FLOWC_MNEM_MSS,
FW_FLOWC_MNEM_TXDATAPLEN_MAX,
- FW_FLOWC_MNEM_SCHEDCLASS = 11,
+ FW_FLOWC_MNEM_TCPSTATE,
+ FW_FLOWC_MNEM_EOSTATE,
+ FW_FLOWC_MNEM_SCHEDCLASS,
+ FW_FLOWC_MNEM_DCBPRIO,
+ FW_FLOWC_MNEM_SND_SCALE,
+ FW_FLOWC_MNEM_RCV_SCALE,
};
struct fw_flowc_mnemval {
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 79a210aaf0bb..ea83712a6d62 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -35,6 +35,7 @@
#include "fman.h"
#include "fman_muram.h"
+#include <linux/fsl/guts.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -1871,6 +1872,90 @@ err_fm_state:
return -EINVAL;
}
+static int fman_reset(struct fman *fman)
+{
+ u32 count;
+ int err = 0;
+
+ if (fman->state->rev_info.major < 6) {
+ iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
+ /* Wait for reset completion */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
+ FPM_RSTC_FM_RESET) && --count);
+ if (count == 0)
+ err = -EBUSY;
+
+ goto _return;
+ } else {
+ struct device_node *guts_node;
+ struct ccsr_guts __iomem *guts_regs;
+ u32 devdisr2, reg;
+
+ /* Errata A007273 */
+ guts_node =
+ of_find_compatible_node(NULL, NULL,
+ "fsl,qoriq-device-config-2.0");
+ if (!guts_node) {
+ dev_err(fman->dev, "%s: Couldn't find guts node\n",
+ __func__);
+ goto guts_node;
+ }
+
+ guts_regs = of_iomap(guts_node, 0);
+ if (!guts_regs) {
+ dev_err(fman->dev, "%s: Couldn't map %s regs\n",
+ __func__, guts_node->full_name);
+ goto guts_regs;
+ }
+#define FMAN1_ALL_MACS_MASK 0xFCC00000
+#define FMAN2_ALL_MACS_MASK 0x000FCC00
+ /* Read current state */
+ devdisr2 = ioread32be(&guts_regs->devdisr2);
+ if (fman->dts_params.id == 0)
+ reg = devdisr2 & ~FMAN1_ALL_MACS_MASK;
+ else
+ reg = devdisr2 & ~FMAN2_ALL_MACS_MASK;
+
+ /* Enable all MACs */
+ iowrite32be(reg, &guts_regs->devdisr2);
+
+ /* Perform FMan reset */
+ iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
+
+ /* Wait for reset completion */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
+ FPM_RSTC_FM_RESET) && --count);
+ if (count == 0) {
+ iounmap(guts_regs);
+ of_node_put(guts_node);
+ err = -EBUSY;
+ goto _return;
+ }
+
+ /* Restore devdisr2 value */
+ iowrite32be(devdisr2, &guts_regs->devdisr2);
+
+ iounmap(guts_regs);
+ of_node_put(guts_node);
+
+ goto _return;
+
+guts_regs:
+ of_node_put(guts_node);
+guts_node:
+ dev_dbg(fman->dev, "%s: Didn't perform FManV3 reset due to Errata A007273!\n",
+ __func__);
+ }
+_return:
+ return err;
+}
+
static int fman_init(struct fman *fman)
{
struct fman_cfg *cfg = NULL;
@@ -1914,22 +1999,9 @@ static int fman_init(struct fman *fman)
fman->liodn_base[i] = liodn_base;
}
- /* FMan Reset (supported only for FMan V2) */
- if (fman->state->rev_info.major >= 6) {
- /* Errata A007273 */
- dev_dbg(fman->dev, "%s: FManV3 reset is not supported!\n",
- __func__);
- } else {
- iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
- /* Wait for reset completion */
- count = 100;
- do {
- udelay(1);
- } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
- FPM_RSTC_FM_RESET) && --count);
- if (count == 0)
- return -EBUSY;
- }
+ err = fman_reset(fman);
+ if (err)
+ return err;
if (ioread32be(&fman->qmi_regs->fmqm_gs) & QMI_GS_HALT_NOT_BUSY) {
resume(fman->fpm_regs);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 1cbcb9fa3fb5..37d0cce392be 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -147,6 +147,8 @@ enum hnae_led_state {
#define HNSV2_TXD_BUFNUM_S 0
#define HNSV2_TXD_BUFNUM_M (0x7 << HNSV2_TXD_BUFNUM_S)
+#define HNSV2_TXD_PORTID_S 4
+#define HNSV2_TXD_PORTID_M (0X7 << HNSV2_TXD_PORTID_S)
#define HNSV2_TXD_RI_B 1
#define HNSV2_TXD_L4CS_B 2
#define HNSV2_TXD_L3CS_B 3
@@ -516,6 +518,7 @@ struct hnae_handle {
int q_num;
int vf_id;
u32 eport_id;
+ u32 dport_id; /* v2 tx bd should fill the dport_id */
enum hnae_port_type port_type;
struct list_head node; /* list to hnae_ae_dev->handle_list */
struct hnae_buf_ops *bops; /* operation for the buffer */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index d4f92ed322d6..285c893ab135 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -175,6 +175,7 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
ae_handle->phy_node = vf_cb->mac_cb->phy_node;
ae_handle->if_support = vf_cb->mac_cb->if_support;
ae_handle->port_type = vf_cb->mac_cb->mac_type;
+ ae_handle->dport_id = port_idx;
return ae_handle;
vf_id_err:
@@ -419,7 +420,10 @@ static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable)
static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en)
{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
hns_dsaf_set_promisc_mode(hns_ae_get_dsaf_dev(handle->dev), en);
+ hns_mac_set_promisc(mac_cb, (u8)!!en);
}
static int hns_ae_get_autoneg(struct hnae_handle *handle)
@@ -787,7 +791,8 @@ static int hns_ae_get_rss(struct hnae_handle *handle, u32 *indir, u8 *key,
memcpy(key, ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE);
/* update the current hash->queue mappings from the shadow RSS table */
- memcpy(indir, ppe_cb->rss_indir_table, HNS_PPEV2_RSS_IND_TBL_SIZE);
+ memcpy(indir, ppe_cb->rss_indir_table,
+ HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
return 0;
}
@@ -799,10 +804,11 @@ static int hns_ae_set_rss(struct hnae_handle *handle, const u32 *indir,
/* set the RSS Hash Key if specififed by the user */
if (key)
- hns_ppe_set_rss_key(ppe_cb, (int *)key);
+ hns_ppe_set_rss_key(ppe_cb, (u32 *)key);
/* update the shadow RSS table with user specified qids */
- memcpy(ppe_cb->rss_indir_table, indir, HNS_PPEV2_RSS_IND_TBL_SIZE);
+ memcpy(ppe_cb->rss_indir_table, indir,
+ HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
/* now update the hardware */
hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index b8517b00e706..6e2b76ede075 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -290,6 +290,24 @@ static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
return 0;
}
+static void hns_gmac_set_uc_match(void *mac_drv, u16 en)
+{
+ struct mac_driver *drv = mac_drv;
+
+ dsaf_set_dev_bit(drv, GMAC_REC_FILT_CONTROL_REG,
+ GMAC_UC_MATCH_EN_B, !en);
+ dsaf_set_dev_bit(drv, GMAC_STATION_ADDR_HIGH_2_REG,
+ GMAC_ADDR_EN_B, !en);
+}
+
+static void hns_gmac_set_promisc(void *mac_drv, u8 en)
+{
+ struct mac_driver *drv = mac_drv;
+
+ if (drv->mac_cb->mac_type == HNAE_PORT_DEBUG)
+ hns_gmac_set_uc_match(mac_drv, en);
+}
+
static void hns_gmac_init(void *mac_drv)
{
u32 port;
@@ -305,6 +323,8 @@ static void hns_gmac_init(void *mac_drv)
mdelay(10);
hns_gmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX);
hns_gmac_tx_loop_pkt_dis(mac_drv);
+ if (drv->mac_cb->mac_type == HNAE_PORT_DEBUG)
+ hns_gmac_set_uc_match(mac_drv, 0);
}
void hns_gmac_update_stats(void *mac_drv)
@@ -402,14 +422,17 @@ static void hns_gmac_set_mac_addr(void *mac_drv, char *mac_addr)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
- if (drv->mac_id >= DSAF_SERVICE_NW_NUM) {
- u32 high_val = mac_addr[1] | (mac_addr[0] << 8);
+ u32 high_val = mac_addr[1] | (mac_addr[0] << 8);
- u32 low_val = mac_addr[5] | (mac_addr[4] << 8)
- | (mac_addr[3] << 16) | (mac_addr[2] << 24);
- dsaf_write_dev(drv, GMAC_STATION_ADDR_LOW_2_REG, low_val);
- dsaf_write_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG, high_val);
- }
+ u32 low_val = mac_addr[5] | (mac_addr[4] << 8)
+ | (mac_addr[3] << 16) | (mac_addr[2] << 24);
+
+ u32 val = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG);
+ u32 sta_addr_en = dsaf_get_bit(val, GMAC_ADDR_EN_B);
+
+ dsaf_write_dev(drv, GMAC_STATION_ADDR_LOW_2_REG, low_val);
+ dsaf_write_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG,
+ high_val | (sta_addr_en << GMAC_ADDR_EN_B));
}
static int hns_gmac_config_loopback(void *mac_drv, enum hnae_loop loop_mode,
@@ -699,6 +722,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
mac_drv->get_sset_count = hns_gmac_get_sset_count;
mac_drv->get_strings = hns_gmac_get_strings;
mac_drv->update_stats = hns_gmac_update_stats;
+ mac_drv->set_promiscuous = hns_gmac_set_promisc;
return (void *)mac_drv;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 5ef0e96e918a..a38084a22bf2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -467,8 +467,13 @@ int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu)
struct mac_driver *drv = hns_mac_get_drv(mac_cb);
u32 buf_size = mac_cb->dsaf_dev->buf_size;
u32 new_frm = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ u32 max_frm = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver) ?
+ MAC_MAX_MTU : MAC_MAX_MTU_V2;
- if ((new_mtu < MAC_MIN_MTU) || (new_frm > MAC_MAX_MTU) ||
+ if (mac_cb->mac_type == HNAE_PORT_DEBUG)
+ max_frm = MAC_MAX_MTU_DBG;
+
+ if ((new_mtu < MAC_MIN_MTU) || (new_frm > max_frm) ||
(new_frm > HNS_RCB_RING_MAX_BD_PER_PKT * buf_size))
return -EINVAL;
@@ -861,6 +866,14 @@ int hns_mac_get_sset_count(struct hns_mac_cb *mac_cb, int stringset)
return mac_ctrl_drv->get_sset_count(stringset);
}
+void hns_mac_set_promisc(struct hns_mac_cb *mac_cb, u8 en)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (mac_ctrl_drv->set_promiscuous)
+ mac_ctrl_drv->set_promiscuous(mac_ctrl_drv, en);
+}
+
int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb)
{
struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 0b052191d751..823b6e78c8aa 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -26,7 +26,9 @@ struct dsaf_device;
#define MAC_DEFAULT_MTU (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
#define MAC_MAX_MTU 9600
+#define MAC_MAX_MTU_V2 9728
#define MAC_MIN_MTU 68
+#define MAC_MAX_MTU_DBG MAC_DEFAULT_MTU
#define MAC_DEFAULT_PAUSE_TIME 0xff
@@ -365,7 +367,7 @@ struct mac_driver {
/*config rx pause enable*/
void (*set_rx_ignore_pause_frames)(void *mac_drv, u32 enable);
/* config rx mode for promiscuous*/
- int (*set_promiscuous)(void *mac_drv, u8 enable);
+ void (*set_promiscuous)(void *mac_drv, u8 enable);
/* get mac id */
void (*mac_get_id)(void *mac_drv, u8 *mac_id);
void (*mac_pausefrm_cfg)(void *mac_drv, u32 rx_en, u32 tx_en);
@@ -453,4 +455,6 @@ int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb);
void hns_set_led_opt(struct hns_mac_cb *mac_cb);
int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
enum hnae_led_state status);
+void hns_mac_set_promisc(struct hns_mac_cb *mac_cb, u8 en);
+
#endif /* _HNS_DSAF_MAC_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 38fc5be3870c..5c1ac9ba1bf2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -748,8 +748,9 @@ static void hns_dsaf_tbl_stat_en(struct dsaf_device *dsaf_dev)
*/
static void hns_dsaf_rocee_bp_en(struct dsaf_device *dsaf_dev)
{
- dsaf_set_dev_bit(dsaf_dev, DSAF_XGE_CTRL_SIG_CFG_0_REG,
- DSAF_FC_XGE_TX_PAUSE_S, 1);
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver))
+ dsaf_set_dev_bit(dsaf_dev, DSAF_XGE_CTRL_SIG_CFG_0_REG,
+ DSAF_FC_XGE_TX_PAUSE_S, 1);
}
/* set msk for dsaf exception irq*/
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index f302ef9073c6..5b7ae5ff43e8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -27,7 +27,7 @@ void hns_ppe_set_tso_enable(struct hns_ppe_cb *ppe_cb, u32 value)
void hns_ppe_set_rss_key(struct hns_ppe_cb *ppe_cb,
const u32 rss_key[HNS_PPEV2_RSS_KEY_NUM])
{
- int key_item = 0;
+ u32 key_item;
for (key_item = 0; key_item < HNS_PPEV2_RSS_KEY_NUM; key_item++)
dsaf_write_dev(ppe_cb, PPEV2_RSS_KEY_REG + key_item * 0x4,
@@ -343,6 +343,9 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) {
hns_ppe_set_vlan_strip(ppe_cb, 0);
+ dsaf_write_dev(ppe_cb, PPE_CFG_MAX_FRAME_LEN_REG,
+ HNS_PPEV2_MAX_FRAME_LEN);
+
/* set default RSS key in h/w */
hns_ppe_set_rss_key(ppe_cb, ppe_cb->rss_key);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index 0f5cb6962acf..e9c0ec2fa0dd 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -30,6 +30,8 @@
#define HNS_PPEV2_RSS_KEY_SIZE 40 /* in bytes or 320 bits */
#define HNS_PPEV2_RSS_KEY_NUM (HNS_PPEV2_RSS_KEY_SIZE / sizeof(u32))
+#define HNS_PPEV2_MAX_FRAME_LEN 0X980
+
enum ppe_qid_mode {
PPE_QID_MODE0 = 0, /* fixed queue id mode */
PPE_QID_MODE1, /* switch:128VM non switch:6Port/4VM/4TC */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 60d695daa471..bf62687e5ea7 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -922,6 +922,8 @@
#define GMAC_LP_REG_CF2MI_LP_EN_B 2
#define GMAC_MODE_CHANGE_EB_B 0
+#define GMAC_UC_MATCH_EN_B 0
+#define GMAC_ADDR_EN_B 16
#define GMAC_RECV_CTRL_STRIP_PAD_EN_B 3
#define GMAC_RECV_CTRL_RUNT_PKT_EN_B 4
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index 802d55457f19..fd90f3737963 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -7,7 +7,7 @@
* (at your option) any later version.
*/
-#include <asm-generic/io-64-nonatomic-hi-lo.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/of_mdio.h>
#include "hns_dsaf_main.h"
#include "hns_dsaf_mac.h"
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 3f77ff77abbc..71aa37b4b338 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -48,7 +48,6 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
struct iphdr *iphdr;
struct ipv6hdr *ipv6hdr;
struct sk_buff *skb;
- int skb_tmp_len;
__be16 protocol;
u8 bn_pid = 0;
u8 rrcfv = 0;
@@ -66,10 +65,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
desc->addr = cpu_to_le64(dma);
desc->tx.send_size = cpu_to_le16((u16)size);
- /*config bd buffer end */
+ /* config bd buffer end */
hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1);
+ /* fill port_id in the tx bd for sending management pkts */
+ hnae_set_field(bn_pid, HNSV2_TXD_PORTID_M,
+ HNSV2_TXD_PORTID_S, ring->q->handle->dport_id);
+
if (type == DESC_TYPE_SKB) {
skb = (struct sk_buff *)priv;
@@ -90,13 +93,13 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
/* check for tcp/udp header */
- if (iphdr->protocol == IPPROTO_TCP) {
+ if (iphdr->protocol == IPPROTO_TCP &&
+ skb_is_gso(skb)) {
hnae_set_bit(tvsvsn,
HNSV2_TXD_TSE_B, 1);
- skb_tmp_len = SKB_TMP_LEN(skb);
l4_len = tcp_hdrlen(skb);
- mss = mtu - skb_tmp_len - ETH_FCS_LEN;
- paylen = skb->len - skb_tmp_len;
+ mss = skb_shinfo(skb)->gso_size;
+ paylen = skb->len - SKB_TMP_LEN(skb);
}
} else if (skb->protocol == htons(ETH_P_IPV6)) {
hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
@@ -104,13 +107,13 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
/* check for tcp/udp header */
- if (ipv6hdr->nexthdr == IPPROTO_TCP) {
+ if (ipv6hdr->nexthdr == IPPROTO_TCP &&
+ skb_is_gso(skb) && skb_is_gso_v6(skb)) {
hnae_set_bit(tvsvsn,
HNSV2_TXD_TSE_B, 1);
- skb_tmp_len = SKB_TMP_LEN(skb);
l4_len = tcp_hdrlen(skb);
- mss = mtu - skb_tmp_len - ETH_FCS_LEN;
- paylen = skb->len - skb_tmp_len;
+ mss = skb_shinfo(skb)->gso_size;
+ paylen = skb->len - SKB_TMP_LEN(skb);
}
}
desc->tx.ip_offset = ip_offset;
@@ -564,6 +567,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
struct sk_buff *skb;
struct hnae_desc *desc;
struct hnae_desc_cb *desc_cb;
+ struct ethhdr *eh;
unsigned char *va;
int bnum, length, i;
int pull_len;
@@ -670,6 +674,14 @@ out_bnum_err:
return -EFAULT;
}
+ /* filter out multicast pkt with the same src mac as this port */
+ eh = eth_hdr(skb);
+ if (unlikely(is_multicast_ether_addr(eh->h_dest) &&
+ ether_addr_equal(ndev->dev_addr, eh->h_source))) {
+ dev_kfree_skb_any(skb);
+ return -EFAULT;
+ }
+
ring->stats.rx_pkts++;
ring->stats.rx_bytes += skb->len;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 3c4a3bc31a89..9c3ba65988e1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -1173,18 +1173,15 @@ hns_get_rss_key_size(struct net_device *netdev)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_ae_ops *ops;
- u32 ret;
if (AE_IS_VER1(priv->enet_ver)) {
netdev_err(netdev,
"RSS feature is not supported on this hardware\n");
- return -EOPNOTSUPP;
+ return 0;
}
ops = priv->ae_handle->dev->ops;
- ret = ops->get_rss_key_size(priv->ae_handle);
-
- return ret;
+ return ops->get_rss_key_size(priv->ae_handle);
}
static u32
@@ -1192,18 +1189,15 @@ hns_get_rss_indir_size(struct net_device *netdev)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_ae_ops *ops;
- u32 ret;
if (AE_IS_VER1(priv->enet_ver)) {
netdev_err(netdev,
"RSS feature is not supported on this hardware\n");
- return -EOPNOTSUPP;
+ return 0;
}
ops = priv->ae_handle->dev->ops;
- ret = ops->get_rss_indir_size(priv->ae_handle);
-
- return ret;
+ return ops->get_rss_indir_size(priv->ae_handle);
}
static int
@@ -1211,7 +1205,6 @@ hns_get_rss(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_ae_ops *ops;
- int ret;
if (AE_IS_VER1(priv->enet_ver)) {
netdev_err(netdev,
@@ -1224,9 +1217,7 @@ hns_get_rss(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
if (!indir)
return 0;
- ret = ops->get_rss(priv->ae_handle, indir, key, hfunc);
-
- return 0;
+ return ops->get_rss(priv->ae_handle, indir, key, hfunc);
}
static int
@@ -1235,7 +1226,6 @@ hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key,
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_ae_ops *ops;
- int ret;
if (AE_IS_VER1(priv->enet_ver)) {
netdev_err(netdev,
@@ -1252,7 +1242,22 @@ hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key,
if (!indir)
return 0;
- ret = ops->set_rss(priv->ae_handle, indir, key, hfunc);
+ return ops->set_rss(priv->ae_handle, indir, key, hfunc);
+}
+
+static int hns_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = priv->ae_handle->q_num;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
return 0;
}
@@ -1280,6 +1285,7 @@ static struct ethtool_ops hns_ethtool_ops = {
.get_rxfh_indir_size = hns_get_rss_indir_size,
.get_rxfh = hns_get_rss,
.set_rxfh = hns_set_rss,
+ .get_rxnfc = hns_get_rxnfc,
};
void hns_ethtool_set_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index b4729ba57c9c..3b3c63e54ed6 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -41,6 +41,7 @@ i40e-objs := i40e_main.o \
i40e_diag.o \
i40e_txrx.o \
i40e_ptp.o \
+ i40e_client.o \
i40e_virtchnl_pf.o
i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 2f6210ae8ba0..1ce6e9c0427d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -58,6 +58,7 @@
#ifdef I40E_FCOE
#include "i40e_fcoe.h"
#endif
+#include "i40e_client.h"
#include "i40e_virtchnl.h"
#include "i40e_virtchnl_pf.h"
#include "i40e_txrx.h"
@@ -190,6 +191,7 @@ struct i40e_lump_tracking {
u16 search_hint;
u16 list[0];
#define I40E_PILE_VALID_BIT 0x8000
+#define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2)
};
#define I40E_DEFAULT_ATR_SAMPLE_RATE 20
@@ -282,6 +284,8 @@ struct i40e_pf {
#endif /* I40E_FCOE */
u16 num_lan_qps; /* num lan queues this PF has set up */
u16 num_lan_msix; /* num queue vectors for the base PF vsi */
+ u16 num_iwarp_msix; /* num of iwarp vectors for this PF */
+ int iwarp_base_vector;
int queues_left; /* queues left unclaimed */
u16 alloc_rss_size; /* allocated RSS queues */
u16 rss_size_max; /* HW defined max RSS queues */
@@ -329,6 +333,7 @@ struct i40e_pf {
#define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13)
#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14)
#define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
+#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16)
#define I40E_FLAG_PROCESS_MDD_EVENT BIT_ULL(17)
#define I40E_FLAG_PROCESS_VFLR_EVENT BIT_ULL(18)
#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19)
@@ -571,6 +576,8 @@ struct i40e_vsi {
struct kobject *kobj; /* sysfs object */
bool current_isup; /* Sync 'link up' logging */
+ void *priv; /* client driver data reference. */
+
/* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data);
@@ -728,6 +735,10 @@ void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
struct i40e_vsi_context *ctxt,
u8 enabled_tc, bool is_add);
#endif
+void i40e_service_event_schedule(struct i40e_pf *pf);
+void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id,
+ u8 *msg, u16 len);
+
int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable);
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
@@ -750,6 +761,17 @@ static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {}
static inline void i40e_dbg_init(void) {}
static inline void i40e_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS*/
+/* needed by client drivers */
+int i40e_lan_add_device(struct i40e_pf *pf);
+int i40e_lan_del_device(struct i40e_pf *pf);
+void i40e_client_subtask(struct i40e_pf *pf);
+void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
+void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi);
+void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
+void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
+void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
+int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
+ enum i40e_client_type type);
/**
* i40e_irq_dynamic_enable - Enable default interrupt generation settings
* @vsi: pointer to a vsi
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
new file mode 100644
index 000000000000..0e6ac841321c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -0,0 +1,1012 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include <linux/list.h>
+#include <linux/errno.h>
+
+#include "i40e.h"
+#include "i40e_prototype.h"
+#include "i40e_client.h"
+
+static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR;
+
+static LIST_HEAD(i40e_devices);
+static DEFINE_MUTEX(i40e_device_mutex);
+
+static LIST_HEAD(i40e_clients);
+static DEFINE_MUTEX(i40e_client_mutex);
+
+static LIST_HEAD(i40e_client_instances);
+static DEFINE_MUTEX(i40e_client_instance_mutex);
+
+static int i40e_client_virtchnl_send(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u32 vf_id, u8 *msg, u16 len);
+
+static int i40e_client_setup_qvlist(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_qvlist_info *qvlist_info);
+
+static void i40e_client_request_reset(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u32 reset_level);
+
+static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
+ struct i40e_client *client,
+ bool is_vf, u32 vf_id,
+ u32 flag, u32 valid_flag);
+
+static struct i40e_ops i40e_lan_ops = {
+ .virtchnl_send = i40e_client_virtchnl_send,
+ .setup_qvlist = i40e_client_setup_qvlist,
+ .request_reset = i40e_client_request_reset,
+ .update_vsi_ctxt = i40e_client_update_vsi_ctxt,
+};
+
+/**
+ * i40e_client_type_to_vsi_type - convert client type to vsi type
+ * @client_type: the i40e_client type
+ *
+ * returns the related vsi type value
+ **/
+static
+enum i40e_vsi_type i40e_client_type_to_vsi_type(enum i40e_client_type type)
+{
+ switch (type) {
+ case I40E_CLIENT_IWARP:
+ return I40E_VSI_IWARP;
+
+ case I40E_CLIENT_VMDQ2:
+ return I40E_VSI_VMDQ2;
+
+ default:
+ pr_err("i40e: Client type unknown\n");
+ return I40E_VSI_TYPE_UNKNOWN;
+ }
+}
+
+/**
+ * i40e_client_get_params - Get the params that can change at runtime
+ * @vsi: the VSI with the message
+ * @param: clinet param struct
+ *
+ **/
+static
+int i40e_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
+{
+ struct i40e_dcbx_config *dcb_cfg = &vsi->back->hw.local_dcbx_config;
+ int i = 0;
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ u8 tc = dcb_cfg->etscfg.prioritytable[i];
+ u16 qs_handle;
+
+ /* If TC is not enabled for VSI use TC0 for UP */
+ if (!(vsi->tc_config.enabled_tc & BIT(tc)))
+ tc = 0;
+
+ qs_handle = le16_to_cpu(vsi->info.qs_handle[tc]);
+ params->qos.prio_qos[i].tc = tc;
+ params->qos.prio_qos[i].qs_handle = qs_handle;
+ if (qs_handle == I40E_AQ_VSI_QS_HANDLE_INVALID) {
+ dev_err(&vsi->back->pdev->dev, "Invalid queue set handle for TC = %d, vsi id = %d\n",
+ tc, vsi->id);
+ return -EINVAL;
+ }
+ }
+
+ params->mtu = vsi->netdev->mtu;
+ return 0;
+}
+
+/**
+ * i40e_notify_client_of_vf_msg - call the client vf message callback
+ * @vsi: the VSI with the message
+ * @vf_id: the absolute VF id that sent the message
+ * @msg: message buffer
+ * @len: length of the message
+ *
+ * If there is a client to this VSI, call the client
+ **/
+void
+i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
+{
+ struct i40e_client_instance *cdev;
+
+ if (!vsi)
+ return;
+ mutex_lock(&i40e_client_instance_mutex);
+ list_for_each_entry(cdev, &i40e_client_instances, list) {
+ if (cdev->lan_info.pf == vsi->back) {
+ if (!cdev->client ||
+ !cdev->client->ops ||
+ !cdev->client->ops->virtchnl_receive) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance virtual channel receive routine\n");
+ continue;
+ }
+ cdev->client->ops->virtchnl_receive(&cdev->lan_info,
+ cdev->client,
+ vf_id, msg, len);
+ }
+ }
+ mutex_unlock(&i40e_client_instance_mutex);
+}
+
+/**
+ * i40e_notify_client_of_l2_param_changes - call the client notify callback
+ * @vsi: the VSI with l2 param changes
+ *
+ * If there is a client to this VSI, call the client
+ **/
+void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
+{
+ struct i40e_client_instance *cdev;
+ struct i40e_params params;
+
+ if (!vsi)
+ return;
+ memset(&params, 0, sizeof(params));
+ i40e_client_get_params(vsi, &params);
+ mutex_lock(&i40e_client_instance_mutex);
+ list_for_each_entry(cdev, &i40e_client_instances, list) {
+ if (cdev->lan_info.pf == vsi->back) {
+ if (!cdev->client ||
+ !cdev->client->ops ||
+ !cdev->client->ops->l2_param_change) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance l2_param_change routine\n");
+ continue;
+ }
+ cdev->lan_info.params = params;
+ cdev->client->ops->l2_param_change(&cdev->lan_info,
+ cdev->client,
+ &params);
+ }
+ }
+ mutex_unlock(&i40e_client_instance_mutex);
+}
+
+/**
+ * i40e_notify_client_of_netdev_open - call the client open callback
+ * @vsi: the VSI with netdev opened
+ *
+ * If there is a client to this netdev, call the client with open
+ **/
+void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
+{
+ struct i40e_client_instance *cdev;
+
+ if (!vsi)
+ return;
+ mutex_lock(&i40e_client_instance_mutex);
+ list_for_each_entry(cdev, &i40e_client_instances, list) {
+ if (cdev->lan_info.netdev == vsi->netdev) {
+ if (!cdev->client ||
+ !cdev->client->ops || !cdev->client->ops->open) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance open routine\n");
+ continue;
+ }
+ cdev->client->ops->open(&cdev->lan_info, cdev->client);
+ }
+ }
+ mutex_unlock(&i40e_client_instance_mutex);
+}
+
+/**
+ * i40e_client_release_qvlist
+ * @ldev: pointer to L2 context.
+ *
+ **/
+static void i40e_client_release_qvlist(struct i40e_info *ldev)
+{
+ struct i40e_qvlist_info *qvlist_info = ldev->qvlist_info;
+ u32 i;
+
+ if (!ldev->qvlist_info)
+ return;
+
+ for (i = 0; i < qvlist_info->num_vectors; i++) {
+ struct i40e_pf *pf = ldev->pf;
+ struct i40e_qv_info *qv_info;
+ u32 reg_idx;
+
+ qv_info = &qvlist_info->qv_info[i];
+ if (!qv_info)
+ continue;
+ reg_idx = I40E_PFINT_LNKLSTN(qv_info->v_idx - 1);
+ wr32(&pf->hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ }
+ kfree(ldev->qvlist_info);
+ ldev->qvlist_info = NULL;
+}
+
+/**
+ * i40e_notify_client_of_netdev_close - call the client close callback
+ * @vsi: the VSI with netdev closed
+ * @reset: true when close called due to a reset pending
+ *
+ * If there is a client to this netdev, call the client with close
+ **/
+void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
+{
+ struct i40e_client_instance *cdev;
+
+ if (!vsi)
+ return;
+ mutex_lock(&i40e_client_instance_mutex);
+ list_for_each_entry(cdev, &i40e_client_instances, list) {
+ if (cdev->lan_info.netdev == vsi->netdev) {
+ if (!cdev->client ||
+ !cdev->client->ops || !cdev->client->ops->close) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance close routine\n");
+ continue;
+ }
+ cdev->client->ops->close(&cdev->lan_info, cdev->client,
+ reset);
+ i40e_client_release_qvlist(&cdev->lan_info);
+ }
+ }
+ mutex_unlock(&i40e_client_instance_mutex);
+}
+
+/**
+ * i40e_notify_client_of_vf_reset - call the client vf reset callback
+ * @pf: PF device pointer
+ * @vf_id: asolute id of VF being reset
+ *
+ * If there is a client attached to this PF, notify when a VF is reset
+ **/
+void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id)
+{
+ struct i40e_client_instance *cdev;
+
+ if (!pf)
+ return;
+ mutex_lock(&i40e_client_instance_mutex);
+ list_for_each_entry(cdev, &i40e_client_instances, list) {
+ if (cdev->lan_info.pf == pf) {
+ if (!cdev->client ||
+ !cdev->client->ops ||
+ !cdev->client->ops->vf_reset) {
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance VF reset routine\n");
+ continue;
+ }
+ cdev->client->ops->vf_reset(&cdev->lan_info,
+ cdev->client, vf_id);
+ }
+ }
+ mutex_unlock(&i40e_client_instance_mutex);
+}
+
+/**
+ * i40e_notify_client_of_vf_enable - call the client vf notification callback
+ * @pf: PF device pointer
+ * @num_vfs: the number of VFs currently enabled, 0 for disable
+ *
+ * If there is a client attached to this PF, call its VF notification routine
+ **/
+void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs)
+{
+ struct i40e_client_instance *cdev;
+
+ if (!pf)
+ return;
+ mutex_lock(&i40e_client_instance_mutex);
+ list_for_each_entry(cdev, &i40e_client_instances, list) {
+ if (cdev->lan_info.pf == pf) {
+ if (!cdev->client ||
+ !cdev->client->ops ||
+ !cdev->client->ops->vf_enable) {
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance VF enable routine\n");
+ continue;
+ }
+ cdev->client->ops->vf_enable(&cdev->lan_info,
+ cdev->client, num_vfs);
+ }
+ }
+ mutex_unlock(&i40e_client_instance_mutex);
+}
+
+/**
+ * i40e_vf_client_capable - ask the client if it likes the specified VF
+ * @pf: PF device pointer
+ * @vf_id: the VF in question
+ *
+ * If there is a client of the specified type attached to this PF, call
+ * its vf_capable routine
+ **/
+int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
+ enum i40e_client_type type)
+{
+ struct i40e_client_instance *cdev;
+ int capable = false;
+
+ if (!pf)
+ return false;
+ mutex_lock(&i40e_client_instance_mutex);
+ list_for_each_entry(cdev, &i40e_client_instances, list) {
+ if (cdev->lan_info.pf == pf) {
+ if (!cdev->client ||
+ !cdev->client->ops ||
+ !cdev->client->ops->vf_capable ||
+ !(cdev->client->type == type)) {
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance VF capability routine\n");
+ continue;
+ }
+ capable = cdev->client->ops->vf_capable(&cdev->lan_info,
+ cdev->client,
+ vf_id);
+ break;
+ }
+ }
+ mutex_unlock(&i40e_client_instance_mutex);
+ return capable;
+}
+
+/**
+ * i40e_vsi_lookup - finds a matching VSI from the PF list starting at start_vsi
+ * @pf: board private structure
+ * @type: vsi type
+ * @start_vsi: a VSI pointer from where to start the search
+ *
+ * Returns non NULL on success or NULL for failure
+ **/
+struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf,
+ enum i40e_vsi_type type,
+ struct i40e_vsi *start_vsi)
+{
+ struct i40e_vsi *vsi;
+ int i = 0;
+
+ if (start_vsi) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ vsi = pf->vsi[i];
+ if (vsi == start_vsi)
+ break;
+ }
+ }
+ for (; i < pf->num_alloc_vsi; i++) {
+ vsi = pf->vsi[i];
+ if (vsi && vsi->type == type)
+ return vsi;
+ }
+
+ return NULL;
+}
+
+/**
+ * i40e_client_add_instance - add a client instance struct to the instance list
+ * @pf: pointer to the board struct
+ * @client: pointer to a client struct in the client list.
+ *
+ * Returns cdev ptr on success, NULL on failure
+ **/
+static
+struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
+ struct i40e_client *client)
+{
+ struct i40e_client_instance *cdev;
+ struct netdev_hw_addr *mac = NULL;
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+
+ mutex_lock(&i40e_client_instance_mutex);
+ list_for_each_entry(cdev, &i40e_client_instances, list) {
+ if ((cdev->lan_info.pf == pf) && (cdev->client == client)) {
+ cdev = NULL;
+ goto out;
+ }
+ }
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ goto out;
+
+ cdev->lan_info.pf = (void *)pf;
+ cdev->lan_info.netdev = vsi->netdev;
+ cdev->lan_info.pcidev = pf->pdev;
+ cdev->lan_info.fid = pf->hw.pf_id;
+ cdev->lan_info.ftype = I40E_CLIENT_FTYPE_PF;
+ cdev->lan_info.hw_addr = pf->hw.hw_addr;
+ cdev->lan_info.ops = &i40e_lan_ops;
+ cdev->lan_info.version.major = I40E_CLIENT_VERSION_MAJOR;
+ cdev->lan_info.version.minor = I40E_CLIENT_VERSION_MINOR;
+ cdev->lan_info.version.build = I40E_CLIENT_VERSION_BUILD;
+ cdev->lan_info.fw_maj_ver = pf->hw.aq.fw_maj_ver;
+ cdev->lan_info.fw_min_ver = pf->hw.aq.fw_min_ver;
+ cdev->lan_info.fw_build = pf->hw.aq.fw_build;
+ set_bit(__I40E_CLIENT_INSTANCE_NONE, &cdev->state);
+
+ if (i40e_client_get_params(vsi, &cdev->lan_info.params)) {
+ kfree(cdev);
+ cdev = NULL;
+ goto out;
+ }
+
+ cdev->lan_info.msix_count = pf->num_iwarp_msix;
+ cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector];
+
+ mac = list_first_entry(&cdev->lan_info.netdev->dev_addrs.list,
+ struct netdev_hw_addr, list);
+ if (mac)
+ ether_addr_copy(cdev->lan_info.lanmac, mac->addr);
+ else
+ dev_err(&pf->pdev->dev, "MAC address list is empty!\n");
+
+ cdev->client = client;
+ INIT_LIST_HEAD(&cdev->list);
+ list_add(&cdev->list, &i40e_client_instances);
+out:
+ mutex_unlock(&i40e_client_instance_mutex);
+ return cdev;
+}
+
+/**
+ * i40e_client_del_instance - removes a client instance from the list
+ * @pf: pointer to the board struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+static
+int i40e_client_del_instance(struct i40e_pf *pf, struct i40e_client *client)
+{
+ struct i40e_client_instance *cdev, *tmp;
+ int ret = -ENODEV;
+
+ mutex_lock(&i40e_client_instance_mutex);
+ list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) {
+ if ((cdev->lan_info.pf != pf) || (cdev->client != client))
+ continue;
+
+ dev_info(&pf->pdev->dev, "Deleted instance of Client %s, of dev %d bus=0x%02x func=0x%02x)\n",
+ client->name, pf->hw.pf_id,
+ pf->hw.bus.device, pf->hw.bus.func);
+ list_del(&cdev->list);
+ kfree(cdev);
+ ret = 0;
+ break;
+ }
+ mutex_unlock(&i40e_client_instance_mutex);
+ return ret;
+}
+
+/**
+ * i40e_client_subtask - client maintenance work
+ * @pf: board private structure
+ **/
+void i40e_client_subtask(struct i40e_pf *pf)
+{
+ struct i40e_client_instance *cdev;
+ struct i40e_client *client;
+ int ret = 0;
+
+ if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED))
+ return;
+ pf->flags &= ~I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+
+ /* If we're down or resetting, just bail */
+ if (test_bit(__I40E_DOWN, &pf->state) ||
+ test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ return;
+
+ /* Check client state and instantiate client if client registered */
+ mutex_lock(&i40e_client_mutex);
+ list_for_each_entry(client, &i40e_clients, list) {
+ /* first check client is registered */
+ if (!test_bit(__I40E_CLIENT_REGISTERED, &client->state))
+ continue;
+
+ /* Do we also need the LAN VSI to be up, to create instance */
+ if (!(client->flags & I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE)) {
+ /* check if L2 VSI is up, if not we are not ready */
+ if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
+ continue;
+ }
+
+ /* Add the client instance to the instance list */
+ cdev = i40e_client_add_instance(pf, client);
+ if (!cdev)
+ continue;
+
+ /* Also up the ref_cnt of no. of instances of this client */
+ atomic_inc(&client->ref_cnt);
+ dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
+ client->name, pf->hw.pf_id,
+ pf->hw.bus.device, pf->hw.bus.func);
+
+ /* Send an Open request to the client */
+ atomic_inc(&cdev->ref_cnt);
+ if (client->ops && client->ops->open)
+ ret = client->ops->open(&cdev->lan_info, client);
+ atomic_dec(&cdev->ref_cnt);
+ if (!ret) {
+ set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+ } else {
+ /* remove client instance */
+ i40e_client_del_instance(pf, client);
+ atomic_dec(&client->ref_cnt);
+ continue;
+ }
+ }
+ mutex_unlock(&i40e_client_mutex);
+}
+
+/**
+ * i40e_lan_add_device - add a lan device struct to the list of lan devices
+ * @pf: pointer to the board struct
+ *
+ * Returns 0 on success or none 0 on error
+ **/
+int i40e_lan_add_device(struct i40e_pf *pf)
+{
+ struct i40e_device *ldev;
+ int ret = 0;
+
+ mutex_lock(&i40e_device_mutex);
+ list_for_each_entry(ldev, &i40e_devices, list) {
+ if (ldev->pf == pf) {
+ ret = -EEXIST;
+ goto out;
+ }
+ }
+ ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+ if (!ldev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ldev->pf = pf;
+ INIT_LIST_HEAD(&ldev->list);
+ list_add(&ldev->list, &i40e_devices);
+ dev_info(&pf->pdev->dev, "Added LAN device PF%d bus=0x%02x func=0x%02x\n",
+ pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func);
+
+ /* Since in some cases register may have happened before a device gets
+ * added, we can schedule a subtask to go initiate the clients.
+ */
+ pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+ i40e_service_event_schedule(pf);
+
+out:
+ mutex_unlock(&i40e_device_mutex);
+ return ret;
+}
+
+/**
+ * i40e_lan_del_device - removes a lan device from the device list
+ * @pf: pointer to the board struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40e_lan_del_device(struct i40e_pf *pf)
+{
+ struct i40e_device *ldev, *tmp;
+ int ret = -ENODEV;
+
+ mutex_lock(&i40e_device_mutex);
+ list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) {
+ if (ldev->pf == pf) {
+ dev_info(&pf->pdev->dev, "Deleted LAN device PF%d bus=0x%02x func=0x%02x\n",
+ pf->hw.pf_id, pf->hw.bus.device,
+ pf->hw.bus.func);
+ list_del(&ldev->list);
+ kfree(ldev);
+ ret = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&i40e_device_mutex);
+ return ret;
+}
+
+/**
+ * i40e_client_release - release client specific resources
+ * @client: pointer to the registered client
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_client_release(struct i40e_client *client)
+{
+ struct i40e_client_instance *cdev, *tmp;
+ struct i40e_pf *pf = NULL;
+ int ret = 0;
+
+ LIST_HEAD(cdevs_tmp);
+
+ mutex_lock(&i40e_client_instance_mutex);
+ list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) {
+ if (strncmp(cdev->client->name, client->name,
+ I40E_CLIENT_STR_LENGTH))
+ continue;
+ if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ if (atomic_read(&cdev->ref_cnt) > 0) {
+ ret = I40E_ERR_NOT_READY;
+ goto out;
+ }
+ pf = (struct i40e_pf *)cdev->lan_info.pf;
+ if (client->ops && client->ops->close)
+ client->ops->close(&cdev->lan_info, client,
+ false);
+ i40e_client_release_qvlist(&cdev->lan_info);
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+
+ dev_warn(&pf->pdev->dev,
+ "Client %s instance for PF id %d closed\n",
+ client->name, pf->hw.pf_id);
+ }
+ /* delete the client instance from the list */
+ list_del(&cdev->list);
+ list_add(&cdev->list, &cdevs_tmp);
+ atomic_dec(&client->ref_cnt);
+ dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n",
+ client->name);
+ }
+out:
+ mutex_unlock(&i40e_client_instance_mutex);
+
+ /* free the client device and release its vsi */
+ list_for_each_entry_safe(cdev, tmp, &cdevs_tmp, list) {
+ kfree(cdev);
+ }
+ return ret;
+}
+
+/**
+ * i40e_client_prepare - prepare client specific resources
+ * @client: pointer to the registered client
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_client_prepare(struct i40e_client *client)
+{
+ struct i40e_device *ldev;
+ struct i40e_pf *pf;
+ int ret = 0;
+
+ mutex_lock(&i40e_device_mutex);
+ list_for_each_entry(ldev, &i40e_devices, list) {
+ pf = ldev->pf;
+ /* Start the client subtask */
+ pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+ i40e_service_event_schedule(pf);
+ }
+ mutex_unlock(&i40e_device_mutex);
+ return ret;
+}
+
+/**
+ * i40e_client_virtchnl_send - TBD
+ * @ldev: pointer to L2 context
+ * @client: Client pointer
+ * @vf_id: absolute VF identifier
+ * @msg: message buffer
+ * @len: length of message buffer
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_client_virtchnl_send(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u32 vf_id, u8 *msg, u16 len)
+{
+ struct i40e_pf *pf = ldev->pf;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status err;
+
+ err = i40e_aq_send_msg_to_vf(hw, vf_id, I40E_VIRTCHNL_OP_IWARP,
+ 0, msg, len, NULL);
+ if (err)
+ dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n",
+ err, hw->aq.asq_last_status);
+
+ return err;
+}
+
+/**
+ * i40e_client_setup_qvlist
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @qv_info: queue and vector list
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_client_setup_qvlist(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_qvlist_info *qvlist_info)
+{
+ struct i40e_pf *pf = ldev->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_qv_info *qv_info;
+ u32 v_idx, i, reg_idx, reg;
+ u32 size;
+
+ size = sizeof(struct i40e_qvlist_info) +
+ (sizeof(struct i40e_qv_info) * (qvlist_info->num_vectors - 1));
+ ldev->qvlist_info = kzalloc(size, GFP_KERNEL);
+ ldev->qvlist_info->num_vectors = qvlist_info->num_vectors;
+
+ for (i = 0; i < qvlist_info->num_vectors; i++) {
+ qv_info = &qvlist_info->qv_info[i];
+ if (!qv_info)
+ continue;
+ v_idx = qv_info->v_idx;
+
+ /* Validate vector id belongs to this client */
+ if ((v_idx >= (pf->iwarp_base_vector + pf->num_iwarp_msix)) ||
+ (v_idx < pf->iwarp_base_vector))
+ goto err;
+
+ ldev->qvlist_info->qv_info[i] = *qv_info;
+ reg_idx = I40E_PFINT_LNKLSTN(v_idx - 1);
+
+ if (qv_info->ceq_idx == I40E_QUEUE_INVALID_IDX) {
+ /* Special case - No CEQ mapped on this vector */
+ wr32(hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ } else {
+ reg = (qv_info->ceq_idx &
+ I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
+ (I40E_QUEUE_TYPE_PE_CEQ <<
+ I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+ wr32(hw, reg_idx, reg);
+
+ reg = (I40E_PFINT_CEQCTL_CAUSE_ENA_MASK |
+ (v_idx << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) |
+ (qv_info->itr_idx <<
+ I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) |
+ (I40E_QUEUE_END_OF_LIST <<
+ I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT));
+ wr32(hw, I40E_PFINT_CEQCTL(qv_info->ceq_idx), reg);
+ }
+ if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
+ reg = (I40E_PFINT_AEQCTL_CAUSE_ENA_MASK |
+ (v_idx << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) |
+ (qv_info->itr_idx <<
+ I40E_PFINT_AEQCTL_ITR_INDX_SHIFT));
+
+ wr32(hw, I40E_PFINT_AEQCTL, reg);
+ }
+ }
+
+ return 0;
+err:
+ kfree(ldev->qvlist_info);
+ ldev->qvlist_info = NULL;
+ return -EINVAL;
+}
+
+/**
+ * i40e_client_request_reset
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @level: reset level
+ **/
+static void i40e_client_request_reset(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u32 reset_level)
+{
+ struct i40e_pf *pf = ldev->pf;
+
+ switch (reset_level) {
+ case I40E_CLIENT_RESET_LEVEL_PF:
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ break;
+ case I40E_CLIENT_RESET_LEVEL_CORE:
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ break;
+ default:
+ dev_warn(&pf->pdev->dev,
+ "Client %s instance for PF id %d request an unsupported reset: %d.\n",
+ client->name, pf->hw.pf_id, reset_level);
+ break;
+ }
+
+ i40e_service_event_schedule(pf);
+}
+
+/**
+ * i40e_client_update_vsi_ctxt
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @is_vf: if this for the VF
+ * @vf_id: if is_vf true this carries the vf_id
+ * @flag: Any device level setting that needs to be done for PE
+ * @valid_flag: Bits in this match up and enable changing of flag bits
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
+ struct i40e_client *client,
+ bool is_vf, u32 vf_id,
+ u32 flag, u32 valid_flag)
+{
+ struct i40e_pf *pf = ldev->pf;
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi_context ctxt;
+ bool update = true;
+ i40e_status err;
+
+ /* TODO: for now do not allow setting VF's VSI setting */
+ if (is_vf)
+ return -EINVAL;
+
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ err = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get PF vsi config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ return -ENOENT;
+ }
+
+ if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
+ (flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
+ ctxt.info.valid_sections =
+ cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+ ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+ } else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
+ !(flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
+ ctxt.info.valid_sections =
+ cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+ ctxt.info.queueing_opt_flags &= ~I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+ } else {
+ update = false;
+ dev_warn(&pf->pdev->dev,
+ "Client %s instance for PF id %d request an unsupported Config: %x.\n",
+ client->name, pf->hw.pf_id, flag);
+ }
+
+ if (update) {
+ err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "update VSI ctxt for PE failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ }
+ }
+ return err;
+}
+
+/**
+ * i40e_register_client - Register a i40e client driver with the L2 driver
+ * @client: pointer to the i40e_client struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40e_register_client(struct i40e_client *client)
+{
+ int ret = 0;
+ enum i40e_vsi_type vsi_type;
+
+ if (!client) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (strlen(client->name) == 0) {
+ pr_info("i40e: Failed to register client with no name\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ mutex_lock(&i40e_client_mutex);
+ if (i40e_client_is_registered(client)) {
+ pr_info("i40e: Client %s has already been registered!\n",
+ client->name);
+ mutex_unlock(&i40e_client_mutex);
+ ret = -EEXIST;
+ goto out;
+ }
+
+ if ((client->version.major != I40E_CLIENT_VERSION_MAJOR) ||
+ (client->version.minor != I40E_CLIENT_VERSION_MINOR)) {
+ pr_info("i40e: Failed to register client %s due to mismatched client interface version\n",
+ client->name);
+ pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n",
+ client->version.major, client->version.minor,
+ client->version.build,
+ i40e_client_interface_version_str);
+ mutex_unlock(&i40e_client_mutex);
+ ret = -EIO;
+ goto out;
+ }
+
+ vsi_type = i40e_client_type_to_vsi_type(client->type);
+ if (vsi_type == I40E_VSI_TYPE_UNKNOWN) {
+ pr_info("i40e: Failed to register client %s due to unknown client type %d\n",
+ client->name, client->type);
+ mutex_unlock(&i40e_client_mutex);
+ ret = -EIO;
+ goto out;
+ }
+ list_add(&client->list, &i40e_clients);
+ set_bit(__I40E_CLIENT_REGISTERED, &client->state);
+ mutex_unlock(&i40e_client_mutex);
+
+ if (i40e_client_prepare(client)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ pr_info("i40e: Registered client %s with return code %d\n",
+ client->name, ret);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(i40e_register_client);
+
+/**
+ * i40e_unregister_client - Unregister a i40e client driver with the L2 driver
+ * @client: pointer to the i40e_client struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40e_unregister_client(struct i40e_client *client)
+{
+ int ret = 0;
+
+ /* When a unregister request comes through we would have to send
+ * a close for each of the client instances that were opened.
+ * client_release function is called to handle this.
+ */
+ if (!client || i40e_client_release(client)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /* TODO: check if device is in reset, or if that matters? */
+ mutex_lock(&i40e_client_mutex);
+ if (!i40e_client_is_registered(client)) {
+ pr_info("i40e: Client %s has not been registered\n",
+ client->name);
+ mutex_unlock(&i40e_client_mutex);
+ ret = -ENODEV;
+ goto out;
+ }
+ if (atomic_read(&client->ref_cnt) == 0) {
+ clear_bit(__I40E_CLIENT_REGISTERED, &client->state);
+ list_del(&client->list);
+ pr_info("i40e: Unregistered client %s with return code %d\n",
+ client->name, ret);
+ } else {
+ ret = I40E_ERR_NOT_READY;
+ pr_err("i40e: Client %s failed unregister - client has open instances\n",
+ client->name);
+ }
+
+ mutex_unlock(&i40e_client_mutex);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(i40e_unregister_client);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
new file mode 100644
index 000000000000..bf6b453d93a1
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -0,0 +1,232 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_CLIENT_H_
+#define _I40E_CLIENT_H_
+
+#define I40E_CLIENT_STR_LENGTH 10
+
+/* Client interface version should be updated anytime there is a change in the
+ * existing APIs or data structures.
+ */
+#define I40E_CLIENT_VERSION_MAJOR 0
+#define I40E_CLIENT_VERSION_MINOR 01
+#define I40E_CLIENT_VERSION_BUILD 00
+#define I40E_CLIENT_VERSION_STR \
+ XSTRINGIFY(I40E_CLIENT_VERSION_MAJOR) "." \
+ XSTRINGIFY(I40E_CLIENT_VERSION_MINOR) "." \
+ XSTRINGIFY(I40E_CLIENT_VERSION_BUILD)
+
+struct i40e_client_version {
+ u8 major;
+ u8 minor;
+ u8 build;
+ u8 rsvd;
+};
+
+enum i40e_client_state {
+ __I40E_CLIENT_NULL,
+ __I40E_CLIENT_REGISTERED
+};
+
+enum i40e_client_instance_state {
+ __I40E_CLIENT_INSTANCE_NONE,
+ __I40E_CLIENT_INSTANCE_OPENED,
+};
+
+enum i40e_client_type {
+ I40E_CLIENT_IWARP,
+ I40E_CLIENT_VMDQ2
+};
+
+struct i40e_ops;
+struct i40e_client;
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+ */
+#define I40E_QUEUE_TYPE_PE_AEQ 0x80
+#define I40E_QUEUE_INVALID_IDX 0xFFFF
+
+struct i40e_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+struct i40e_qvlist_info {
+ u32 num_vectors;
+ struct i40e_qv_info qv_info[1];
+};
+
+#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF
+
+/* set of LAN parameters useful for clients managed by LAN */
+
+/* Struct to hold per priority info */
+struct i40e_prio_qos_params {
+ u16 qs_handle; /* qs handle for prio */
+ u8 tc; /* TC mapped to prio */
+ u8 reserved;
+};
+
+#define I40E_CLIENT_MAX_USER_PRIORITY 8
+/* Struct to hold Client QoS */
+struct i40e_qos_params {
+ struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY];
+};
+
+struct i40e_params {
+ struct i40e_qos_params qos;
+ u16 mtu;
+};
+
+/* Structure to hold Lan device info for a client device */
+struct i40e_info {
+ struct i40e_client_version version;
+ u8 lanmac[6];
+ struct net_device *netdev;
+ struct pci_dev *pcidev;
+ u8 __iomem *hw_addr;
+ u8 fid; /* function id, PF id or VF id */
+#define I40E_CLIENT_FTYPE_PF 0
+#define I40E_CLIENT_FTYPE_VF 1
+ u8 ftype; /* function type, PF or VF */
+ void *pf;
+
+ /* All L2 params that could change during the life span of the PF
+ * and needs to be communicated to the client when they change
+ */
+ struct i40e_qvlist_info *qvlist_info;
+ struct i40e_params params;
+ struct i40e_ops *ops;
+
+ u16 msix_count; /* number of msix vectors*/
+ /* Array down below will be dynamically allocated based on msix_count */
+ struct msix_entry *msix_entries;
+ u16 itr_index; /* Which ITR index the PE driver is suppose to use */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u32 fw_build; /* firmware build number */
+};
+
+#define I40E_CLIENT_RESET_LEVEL_PF 1
+#define I40E_CLIENT_RESET_LEVEL_CORE 2
+#define I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE BIT(1)
+
+struct i40e_ops {
+ /* setup_q_vector_list enables queues with a particular vector */
+ int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client,
+ struct i40e_qvlist_info *qv_info);
+
+ int (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client,
+ u32 vf_id, u8 *msg, u16 len);
+
+ /* If the PE Engine is unresponsive, RDMA driver can request a reset.
+ * The level helps determine the level of reset being requested.
+ */
+ void (*request_reset)(struct i40e_info *ldev,
+ struct i40e_client *client, u32 level);
+
+ /* API for the RDMA driver to set certain VSI flags that control
+ * PE Engine.
+ */
+ int (*update_vsi_ctxt)(struct i40e_info *ldev,
+ struct i40e_client *client,
+ bool is_vf, u32 vf_id,
+ u32 flag, u32 valid_flag);
+};
+
+struct i40e_client_ops {
+ /* Should be called from register_client() or whenever PF is ready
+ * to create a specific client instance.
+ */
+ int (*open)(struct i40e_info *ldev, struct i40e_client *client);
+
+ /* Should be called when netdev is unavailable or when unregister
+ * call comes in. If the close is happenening due to a reset being
+ * triggered set the reset bit to true.
+ */
+ void (*close)(struct i40e_info *ldev, struct i40e_client *client,
+ bool reset);
+
+ /* called when some l2 managed parameters changes - mtu */
+ void (*l2_param_change)(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_params *params);
+
+ int (*virtchnl_receive)(struct i40e_info *ldev,
+ struct i40e_client *client, u32 vf_id,
+ u8 *msg, u16 len);
+
+ /* called when a VF is reset by the PF */
+ void (*vf_reset)(struct i40e_info *ldev,
+ struct i40e_client *client, u32 vf_id);
+
+ /* called when the number of VFs changes */
+ void (*vf_enable)(struct i40e_info *ldev,
+ struct i40e_client *client, u32 num_vfs);
+
+ /* returns true if VF is capable of specified offload */
+ int (*vf_capable)(struct i40e_info *ldev,
+ struct i40e_client *client, u32 vf_id);
+};
+
+/* Client device */
+struct i40e_client_instance {
+ struct list_head list;
+ struct i40e_info lan_info;
+ struct i40e_client *client;
+ unsigned long state;
+ /* A count of all the in-progress calls to the client */
+ atomic_t ref_cnt;
+};
+
+struct i40e_client {
+ struct list_head list; /* list of registered clients */
+ char name[I40E_CLIENT_STR_LENGTH];
+ struct i40e_client_version version;
+ unsigned long state; /* client state */
+ atomic_t ref_cnt; /* Count of all the client devices of this kind */
+ u32 flags;
+#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
+#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
+ enum i40e_client_type type;
+ struct i40e_client_ops *ops; /* client ops provided by the client */
+};
+
+static inline bool i40e_client_is_registered(struct i40e_client *client)
+{
+ return test_bit(__I40E_CLIENT_REGISTERED, &client->state);
+}
+
+/* used by clients */
+int i40e_register_client(struct i40e_client *client);
+int i40e_unregister_client(struct i40e_client *client);
+
+#endif /* _I40E_CLIENT_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 70d9605a0d9e..67006431726a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -289,7 +289,7 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
*
* If not already scheduled, this puts the task into the work queue
**/
-static void i40e_service_event_schedule(struct i40e_pf *pf)
+void i40e_service_event_schedule(struct i40e_pf *pf)
{
if (!test_bit(__I40E_DOWN, &pf->state) &&
!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
@@ -2230,7 +2230,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = new_mtu;
if (netif_running(netdev))
i40e_vsi_reinit_locked(vsi);
-
+ i40e_notify_client_of_l2_param_changes(vsi);
return 0;
}
@@ -4169,6 +4169,9 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
free_irq(pf->msix_entries[0].vector, pf);
}
+ i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
+ I40E_IWARP_IRQ_PILE_ID);
+
i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i])
@@ -4212,12 +4215,17 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
**/
static void i40e_vsi_close(struct i40e_vsi *vsi)
{
+ bool reset = false;
+
if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
i40e_down(vsi);
i40e_vsi_free_irq(vsi);
i40e_vsi_free_tx_resources(vsi);
i40e_vsi_free_rx_resources(vsi);
vsi->current_netdev_flags = 0;
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ reset = true;
+ i40e_notify_client_of_netdev_close(vsi, reset);
}
/**
@@ -4850,6 +4858,12 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ctxt.info = vsi->info;
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
+ if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+ ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+ }
+
/* Update the VSI after updating the VSI queue-mapping information */
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
@@ -4993,6 +5007,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
if (pf->vsi[v]->netdev)
i40e_dcbnl_set_all(pf->vsi[v]);
}
+ i40e_notify_client_of_l2_param_changes(pf->vsi[v]);
}
}
@@ -5191,6 +5206,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
}
i40e_fdir_filter_restore(vsi);
}
+
+ /* On the next run of the service_task, notify any clients of the new
+ * opened netdev
+ */
+ pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
i40e_service_event_schedule(pf);
return 0;
@@ -5379,6 +5399,8 @@ int i40e_open(struct net_device *netdev)
geneve_get_rx_port(netdev);
#endif
+ i40e_notify_client_of_netdev_open(vsi);
+
return 0;
}
@@ -6043,6 +6065,7 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
case I40E_VSI_SRIOV:
case I40E_VSI_VMDQ2:
case I40E_VSI_CTRL:
+ case I40E_VSI_IWARP:
case I40E_VSI_MIRROR:
default:
/* there is no notification for other VSIs */
@@ -7148,6 +7171,7 @@ static void i40e_service_task(struct work_struct *work)
i40e_vc_process_vflr_event(pf);
i40e_watchdog_subtask(pf);
i40e_fdir_reinit_subtask(pf);
+ i40e_client_subtask(pf);
i40e_sync_filters_subtask(pf);
i40e_sync_udp_filters_subtask(pf);
i40e_clean_adminq_subtask(pf);
@@ -7550,6 +7574,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
int vectors_left;
int v_budget, i;
int v_actual;
+ int iwarp_requested = 0;
if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
return -ENODEV;
@@ -7563,6 +7588,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
* is governed by number of cpus in the system.
* - assumes symmetric Tx/Rx pairing
* - The number of VMDq pairs
+ * - The CPU count within the NUMA node if iWARP is enabled
#ifdef I40E_FCOE
* - The number of FCOE qps.
#endif
@@ -7609,6 +7635,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
#endif
+ /* can we reserve enough for iWARP? */
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (!vectors_left)
+ pf->num_iwarp_msix = 0;
+ else if (vectors_left < pf->num_iwarp_msix)
+ pf->num_iwarp_msix = 1;
+ v_budget += pf->num_iwarp_msix;
+ vectors_left -= pf->num_iwarp_msix;
+ }
+
/* any vectors left over go for VMDq support */
if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
@@ -7643,6 +7679,8 @@ static int i40e_init_msix(struct i40e_pf *pf)
* of these features based on the policy and at the end disable
* the features that did not get any vectors.
*/
+ iwarp_requested = pf->num_iwarp_msix;
+ pf->num_iwarp_msix = 0;
#ifdef I40E_FCOE
pf->num_fcoe_qps = 0;
pf->num_fcoe_msix = 0;
@@ -7681,17 +7719,33 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_lan_msix = 1;
break;
case 3:
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ pf->num_lan_msix = 1;
+ pf->num_iwarp_msix = 1;
+ } else {
+ pf->num_lan_msix = 2;
+ }
#ifdef I40E_FCOE
/* give one vector to FCoE */
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
pf->num_lan_msix = 1;
pf->num_fcoe_msix = 1;
}
-#else
- pf->num_lan_msix = 2;
#endif
break;
default:
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ pf->num_iwarp_msix = min_t(int, (vec / 3),
+ iwarp_requested);
+ pf->num_vmdq_vsis = min_t(int, (vec / 3),
+ I40E_DEFAULT_NUM_VMDQ_VSI);
+ } else {
+ pf->num_vmdq_vsis = min_t(int, (vec / 2),
+ I40E_DEFAULT_NUM_VMDQ_VSI);
+ }
+ pf->num_lan_msix = min_t(int,
+ (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
+ pf->num_lan_msix);
#ifdef I40E_FCOE
/* give one vector to FCoE */
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
@@ -7699,8 +7753,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
vec--;
}
#endif
- /* give the rest to the PF */
- pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps);
break;
}
}
@@ -7710,6 +7762,12 @@ static int i40e_init_msix(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
}
+
+ if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
+ (pf->num_iwarp_msix == 0)) {
+ dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
+ pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
+ }
#ifdef I40E_FCOE
if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
@@ -7801,6 +7859,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
vectors = i40e_init_msix(pf);
if (vectors < 0) {
pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
+ I40E_FLAG_IWARP_ENABLED |
#ifdef I40E_FCOE
I40E_FLAG_FCOE_ENABLED |
#endif
@@ -8474,6 +8533,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
}
+ if (pf->hw.func_caps.iwarp) {
+ pf->flags |= I40E_FLAG_IWARP_ENABLED;
+ /* IWARP needs one extra vector for CQP just like MISC.*/
+ pf->num_iwarp_msix = (int)num_online_cpus() + 1;
+ }
+
#ifdef I40E_FCOE
i40e_init_pf_fcoe(pf);
@@ -9328,6 +9393,13 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
}
+ if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+ ctxt.info.queueing_opt_flags |=
+ I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+ }
+
ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
if (pf->vf[vsi->vf_id].spoofchk) {
@@ -9351,6 +9423,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
break;
#endif /* I40E_FCOE */
+ case I40E_VSI_IWARP:
+ /* send down message to iWARP */
+ break;
+
default:
return -ENODEV;
}
@@ -10467,6 +10543,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
/* make sure all the fancies are disabled */
pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_IWARP_ENABLED |
#ifdef I40E_FCOE
I40E_FLAG_FCOE_ENABLED |
#endif
@@ -10484,6 +10561,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left -= pf->num_lan_qps;
pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_IWARP_ENABLED |
#ifdef I40E_FCOE
I40E_FLAG_FCOE_ENABLED |
#endif
@@ -11059,7 +11137,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
#endif /* CONFIG_PCI_IOV */
- pfs_found++;
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
+ pf->num_iwarp_msix,
+ I40E_IWARP_IRQ_PILE_ID);
+ if (pf->iwarp_base_vector < 0) {
+ dev_info(&pdev->dev,
+ "failed to get tracking for %d vectors for IWARP err=%d\n",
+ pf->num_iwarp_msix, pf->iwarp_base_vector);
+ pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
+ }
+ }
i40e_dbg_pf_init(pf);
@@ -11070,6 +11158,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mod_timer(&pf->service_timer,
round_jiffies(jiffies + pf->service_timer_period));
+ /* add this PF to client device list and launch a client service task */
+ err = i40e_lan_add_device(pf);
+ if (err)
+ dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
+ err);
+
#ifdef I40E_FCOE
/* create FCoE interface */
i40e_fcoe_vsi_setup(pf);
@@ -11245,6 +11339,13 @@ static void i40e_remove(struct pci_dev *pdev)
if (pf->vsi[pf->lan_vsi])
i40e_vsi_release(pf->vsi[pf->lan_vsi]);
+ /* remove attached clients */
+ ret_code = i40e_lan_del_device(pf);
+ if (ret_code) {
+ dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
+ ret_code);
+ }
+
/* shutdown and destroy the HMC */
if (hw->hmc.hmc_obj) {
ret_code = i40e_shutdown_lan_hmc(hw);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 0a0baf71041b..3335f9d13374 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -78,7 +78,7 @@ enum i40e_debug_mask {
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
I40E_DEBUG_FD = 0x00001000,
-
+ I40E_DEBUG_IWARP = 0x00F00000,
I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
@@ -160,6 +160,7 @@ enum i40e_vsi_type {
I40E_VSI_MIRROR = 5,
I40E_VSI_SRIOV = 6,
I40E_VSI_FDIR = 7,
+ I40E_VSI_IWARP = 8,
I40E_VSI_TYPE_UNKNOWN
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 3226946bf3d4..ab866cf3dc18 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -81,6 +81,9 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
I40E_VIRTCHNL_OP_EVENT = 17,
+ I40E_VIRTCHNL_OP_IWARP = 20,
+ I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
+ I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22,
};
/* Virtual channel message descriptor. This overlays the admin queue
@@ -348,6 +351,37 @@ struct i40e_virtchnl_pf_event {
int severity;
};
+/* I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
+ * VF uses this message to request PF to map IWARP vectors to IWARP queues.
+ * The request for this originates from the VF IWARP driver through
+ * a client interface between VF LAN and VF IWARP driver.
+ * A vector could have an AEQ and CEQ attached to it although
+ * there is a single AEQ per VF IWARP instance in which case
+ * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
+ * There will never be a case where there will be multiple CEQs attached
+ * to a single vector.
+ * PF configures interrupt mapping and returns status.
+ */
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+*/
+#define I40E_QUEUE_TYPE_PE_AEQ 0x80
+#define I40E_QUEUE_INVALID_IDX 0xFFFF
+
+struct i40e_virtchnl_iwarp_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+struct i40e_virtchnl_iwarp_qvlist_info {
+ u32 num_vectors;
+ struct i40e_virtchnl_iwarp_qv_info qv_info[1];
+};
+
/* VF reset states - these are written into the RSTAT register:
* I40E_VFGEN_RSTAT1 on the PF
* I40E_VFGEN_RSTAT on the VF
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index acd2693a4e97..816c6bbf7093 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -352,6 +352,136 @@ irq_list_done:
}
/**
+ * i40e_release_iwarp_qvlist
+ * @vf: pointer to the VF.
+ *
+ **/
+static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
+ u32 msix_vf;
+ u32 i;
+
+ if (!vf->qvlist_info)
+ return;
+
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+ for (i = 0; i < qvlist_info->num_vectors; i++) {
+ struct i40e_virtchnl_iwarp_qv_info *qv_info;
+ u32 next_q_index, next_q_type;
+ struct i40e_hw *hw = &pf->hw;
+ u32 v_idx, reg_idx, reg;
+
+ qv_info = &qvlist_info->qv_info[i];
+ if (!qv_info)
+ continue;
+ v_idx = qv_info->v_idx;
+ if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
+ /* Figure out the queue after CEQ and make that the
+ * first queue.
+ */
+ reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
+ reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
+ next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
+ >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
+ next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
+ >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
+
+ reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
+ reg = (next_q_index &
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
+ (next_q_type <<
+ I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+
+ wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
+ }
+ }
+ kfree(vf->qvlist_info);
+ vf->qvlist_info = NULL;
+}
+
+/**
+ * i40e_config_iwarp_qvlist
+ * @vf: pointer to the VF info
+ * @qvlist_info: queue and vector list
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
+ struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_virtchnl_iwarp_qv_info *qv_info;
+ u32 v_idx, i, reg_idx, reg;
+ u32 next_q_idx, next_q_type;
+ u32 msix_vf, size;
+
+ size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) +
+ (sizeof(struct i40e_virtchnl_iwarp_qv_info) *
+ (qvlist_info->num_vectors - 1));
+ vf->qvlist_info = kzalloc(size, GFP_KERNEL);
+ vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
+
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+ for (i = 0; i < qvlist_info->num_vectors; i++) {
+ qv_info = &qvlist_info->qv_info[i];
+ if (!qv_info)
+ continue;
+ v_idx = qv_info->v_idx;
+
+ /* Validate vector id belongs to this vf */
+ if (!i40e_vc_isvalid_vector_id(vf, v_idx))
+ goto err;
+
+ vf->qvlist_info->qv_info[i] = *qv_info;
+
+ reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
+ /* We might be sharing the interrupt, so get the first queue
+ * index and type, push it down the list by adding the new
+ * queue on top. Also link it with the new queue in CEQCTL.
+ */
+ reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
+ next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
+ next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
+ I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+
+ if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
+ reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
+ reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
+ (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
+ (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
+ (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
+ (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
+ wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
+
+ reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
+ reg = (qv_info->ceq_idx &
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
+ (I40E_QUEUE_TYPE_PE_CEQ <<
+ I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+ wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
+ }
+
+ if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
+ reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
+ (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
+ (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
+
+ wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
+ }
+ }
+
+ return 0;
+err:
+ kfree(vf->qvlist_info);
+ vf->qvlist_info = NULL;
+ return -EINVAL;
+}
+
+/**
* i40e_config_vsi_tx_queue
* @vf: pointer to the VF info
* @vsi_id: id of VSI as provided by the FW
@@ -850,9 +980,11 @@ complete_reset:
/* reallocate VF resources to reset the VSI state */
i40e_free_vf_res(vf);
if (!i40e_alloc_vf_res(vf)) {
+ int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
i40e_enable_vf_mappings(vf);
set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+ i40e_notify_client_of_vf_reset(pf, abs_vf_id);
}
/* tell the VF the reset is done */
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
@@ -877,11 +1009,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
usleep_range(1000, 2000);
- for (i = 0; i < pf->num_alloc_vfs; i++)
- if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
- i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
- false);
-
+ i40e_notify_client_of_vf_enable(pf, 0);
for (i = 0; i < pf->num_alloc_vfs; i++)
if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
@@ -953,6 +1081,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
goto err_iov;
}
}
+ i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
/* allocate memory */
vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
if (!vfs) {
@@ -1206,6 +1335,13 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi->info.pvid)
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+
+ if (i40e_vf_client_capable(pf, vf->vf_id, I40E_CLIENT_IWARP) &&
+ (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) {
+ vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP;
+ set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states);
+ }
+
if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
vfres->vf_offload_flags |=
@@ -1827,6 +1963,72 @@ error_param:
}
/**
+ * i40e_vc_iwarp_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the VF for the iwarp msgs
+ **/
+static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_pf *pf = vf->pf;
+ int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
+ msg, msglen);
+
+error_param:
+ /* send the response to the VF */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_IWARP,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_iwarp_qvmap_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @config: config qvmap or release it
+ *
+ * called from the VF for the iwarp msgs
+ **/
+static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
+ bool config)
+{
+ struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info =
+ (struct i40e_virtchnl_iwarp_qvlist_info *)msg;
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (config) {
+ if (i40e_config_iwarp_qvlist(vf, qvlist_info))
+ aq_ret = I40E_ERR_PARAM;
+ } else {
+ i40e_release_iwarp_qvlist(vf);
+ }
+
+error_param:
+ /* send the response to the VF */
+ return i40e_vc_send_resp_to_vf(vf,
+ config ? I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP :
+ I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
+ aq_ret);
+}
+
+/**
* i40e_vc_validate_vf_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1921,6 +2123,32 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
case I40E_VIRTCHNL_OP_GET_STATS:
valid_len = sizeof(struct i40e_virtchnl_queue_select);
break;
+ case I40E_VIRTCHNL_OP_IWARP:
+ /* These messages are opaque to us and will be validated in
+ * the RDMA client code. We just need to check for nonzero
+ * length. The firmware will enforce max length restrictions.
+ */
+ if (msglen)
+ valid_len = msglen;
+ else
+ err_msg_format = true;
+ break;
+ case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ valid_len = 0;
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ valid_len = sizeof(struct i40e_virtchnl_iwarp_qvlist_info);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_iwarp_qvlist_info *qv =
+ (struct i40e_virtchnl_iwarp_qvlist_info *)msg;
+ if (qv->num_vectors == 0) {
+ err_msg_format = true;
+ break;
+ }
+ valid_len += ((qv->num_vectors - 1) *
+ sizeof(struct i40e_virtchnl_iwarp_qv_info));
+ }
+ break;
/* These are always errors coming from the VF. */
case I40E_VIRTCHNL_OP_EVENT:
case I40E_VIRTCHNL_OP_UNKNOWN:
@@ -2010,6 +2238,15 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
case I40E_VIRTCHNL_OP_GET_STATS:
ret = i40e_vc_get_stats_msg(vf, msg, msglen);
break;
+ case I40E_VIRTCHNL_OP_IWARP:
+ ret = i40e_vc_iwarp_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
+ break;
+ case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
+ break;
case I40E_VIRTCHNL_OP_UNKNOWN:
default:
dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index e74642a0c42e..e7b2fba0309e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -58,6 +58,7 @@ enum i40e_queue_ctrl {
enum i40e_vf_states {
I40E_VF_STAT_INIT = 0,
I40E_VF_STAT_ACTIVE,
+ I40E_VF_STAT_IWARPENA,
I40E_VF_STAT_FCOEENA,
I40E_VF_STAT_DISABLED,
};
@@ -66,6 +67,7 @@ enum i40e_vf_states {
enum i40e_vf_capabilities {
I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
I40E_VIRTCHNL_VF_CAP_L2,
+ I40E_VIRTCHNL_VF_CAP_IWARP,
};
/* VF information structure */
@@ -106,6 +108,8 @@ struct i40e_vf {
bool link_forced;
bool link_up; /* only valid if VF link is forced */
bool spoofchk;
+ /* RDMA Client */
+ struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info;
};
void i40e_free_vfs(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 7f2126b6a179..e0b68afea56e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1690,8 +1690,8 @@ static int mtk_probe(struct platform_device *pdev)
return -ENOMEM;
eth->base = devm_ioremap_resource(&pdev->dev, res);
- if (!eth->base)
- return -EADDRNOTAVAIL;
+ if (IS_ERR(eth->base))
+ return PTR_ERR(eth->base);
spin_lock_init(&eth->page_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 42d8de892bfe..6aa73972d478 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -39,8 +39,6 @@
#include "mlx4.h"
-static const u8 zero_gid[16]; /* automatically initialized to 0 */
-
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
{
return 1 << dev->oper_log_mgm_entry_size;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 97f5114fc113..eb926e1ee71c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -407,6 +407,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
const char *mlx5_command_str(int command)
{
switch (command) {
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+ return "QUERY_HCA_VPORT_CONTEXT";
+
+ case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
+ return "MODIFY_HCA_VPORT_CONTEXT";
+
case MLX5_CMD_OP_QUERY_HCA_CAP:
return "QUERY_HCA_CAP";
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index aa1ab4702385..75c7ae6a5cc4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -98,88 +98,55 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
{
int err;
- err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
- if (err)
- return err;
-
- err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
if (err)
return err;
if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
- HCA_CAP_OPMOD_GET_CUR);
- if (err)
- return err;
- err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
- HCA_CAP_OPMOD_GET_MAX);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, pg)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
- HCA_CAP_OPMOD_GET_CUR);
- if (err)
- return err;
- err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
- HCA_CAP_OPMOD_GET_MAX);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, atomic)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
- HCA_CAP_OPMOD_GET_CUR);
- if (err)
- return err;
- err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
- HCA_CAP_OPMOD_GET_MAX);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, roce)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
- HCA_CAP_OPMOD_GET_CUR);
- if (err)
- return err;
- err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
- HCA_CAP_OPMOD_GET_MAX);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, nic_flow_table)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
- HCA_CAP_OPMOD_GET_CUR);
- if (err)
- return err;
- err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
- HCA_CAP_OPMOD_GET_MAX);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, vport_group_manager) &&
MLX5_CAP_GEN(dev, eswitch_flow_table)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE,
- HCA_CAP_OPMOD_GET_CUR);
- if (err)
- return err;
- err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE,
- HCA_CAP_OPMOD_GET_MAX);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH,
- HCA_CAP_OPMOD_GET_CUR);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
if (err)
return err;
- err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH,
- HCA_CAP_OPMOD_GET_MAX);
+ }
+
+ if (MLX5_CAP_GEN(dev, vector_calc)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_VECTOR_CALC);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 72a94e72ee25..3f3b2fae4991 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -341,8 +341,9 @@ static u16 to_fw_pkey_sz(u32 size)
}
}
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
- enum mlx5_cap_mode cap_mode)
+static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
+ enum mlx5_cap_type cap_type,
+ enum mlx5_cap_mode cap_mode)
{
u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
@@ -392,6 +393,16 @@ query_ex:
return err;
}
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
+{
+ int ret;
+
+ ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR);
+ if (ret)
+ return ret;
+ return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
+}
+
static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod)
{
u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
@@ -419,8 +430,7 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
int err;
if (MLX5_CAP_GEN(dev, atomic)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
- HCA_CAP_OPMOD_GET_CUR);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
if (err)
return err;
} else {
@@ -462,11 +472,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
if (!set_ctx)
goto query_ex;
- err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
- if (err)
- goto query_ex;
-
- err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
if (err)
goto query_ex;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 90ab09e375b8..bd518405859e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -852,7 +852,8 @@ int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
- u8 port_num, void *out, size_t out_sz)
+ int vf, u8 port_num, void *out,
+ size_t out_sz)
{
int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
int is_group_manager;
@@ -871,7 +872,7 @@ int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
if (other_vport) {
if (is_group_manager) {
MLX5_SET(query_vport_counter_in, in, other_vport, 1);
- MLX5_SET(query_vport_counter_in, in, vport_number, 0);
+ MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
} else {
err = -EPERM;
goto free;
@@ -890,3 +891,70 @@ free:
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
+
+int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
+ u8 other_vport, u8 port_num,
+ int vf,
+ struct mlx5_hca_vport_context *req)
+{
+ int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
+ u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
+ int is_group_manager;
+ void *in;
+ int err;
+ void *ctx;
+
+ mlx5_core_dbg(dev, "vf %d\n", vf);
+ is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+ in = kzalloc(in_sz, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ memset(out, 0, sizeof(out));
+ MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
+ if (other_vport) {
+ if (is_group_manager) {
+ MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
+ } else {
+ err = -EPERM;
+ goto ex;
+ }
+ }
+
+ if (MLX5_CAP_GEN(dev, num_ports) > 1)
+ MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
+
+ ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
+ MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
+ MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
+ MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
+ MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
+ MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
+ MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
+ MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
+ MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
+ MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
+ MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
+ MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
+ MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
+ MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
+ MLX5_SET(hca_vport_context, ctx, lid, req->lid);
+ MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
+ MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
+ MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
+ MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
+ MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
+ MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
+ MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
+ err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
+ if (err)
+ goto ex;
+
+ err = mlx5_cmd_status_to_err_v2(out);
+
+ex:
+ kfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index ab264e1bccd0..75683fb26734 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -45,7 +45,7 @@
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
-#include <asm-generic/io-64-nonatomic-hi-lo.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
#include "nfp_net_ctrl.h"
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 3f5711061432..a733868a43aa 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1791,9 +1791,11 @@ static int smc911x_probe(struct net_device *dev)
unsigned int val, chip_id, revision;
const char *version_string;
unsigned long irq_flags;
+#ifdef SMC_USE_DMA
struct dma_slave_config config;
dma_cap_mask_t mask;
struct pxad_param param;
+#endif
DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 192631a345df..bc168894bda3 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -843,8 +843,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
if (info) {
fl6->daddr = info->key.u.ipv6.dst;
fl6->saddr = info->key.u.ipv6.src;
- fl6->flowi6_tos = RT_TOS(info->key.tos);
- fl6->flowlabel = info->key.label;
+ fl6->flowlabel = ip6_make_flowinfo(RT_TOS(info->key.tos),
+ info->key.label);
dst_cache = &info->dst_cache;
} else {
prio = geneve->tos;
@@ -855,8 +855,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
use_cache = false;
}
- fl6->flowi6_tos = RT_TOS(prio);
- fl6->flowlabel = geneve->label;
+ fl6->flowlabel = ip6_make_flowinfo(RT_TOS(prio),
+ geneve->label);
fl6->daddr = geneve->remote.sin6.sin6_addr;
dst_cache = &geneve->dst_cache;
}
@@ -1049,7 +1049,8 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (unlikely(err))
goto err;
- prio = ip_tunnel_ecn_encap(fl6.flowi6_tos, iip, skb);
+ prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
+ iip, skb);
ttl = geneve->ttl;
if (!ttl && ipv6_addr_is_multicast(&fl6.daddr))
ttl = 1;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index b4c68783dfc3..8b3bd8ecd1c4 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -619,6 +619,7 @@ struct nvsp_message {
#define NETVSC_PACKET_SIZE 4096
#define VRSS_SEND_TAB_SIZE 16
+#define VRSS_CHANNEL_MAX 64
#define RNDIS_MAX_PKT_DEFAULT 8
#define RNDIS_PKT_ALIGN_DEFAULT 8
@@ -700,13 +701,13 @@ struct netvsc_device {
struct net_device *ndev;
- struct vmbus_channel *chn_table[NR_CPUS];
+ struct vmbus_channel *chn_table[VRSS_CHANNEL_MAX];
u32 send_table[VRSS_SEND_TAB_SIZE];
u32 max_chn;
u32 num_chn;
spinlock_t sc_lock; /* Protects num_sc_offered variable */
u32 num_sc_offered;
- atomic_t queue_sends[NR_CPUS];
+ atomic_t queue_sends[VRSS_CHANNEL_MAX];
/* Holds rndis device info */
void *extension;
@@ -718,7 +719,7 @@ struct netvsc_device {
/* The sub channel callback buffer */
unsigned char *sub_cb_buf;
- struct multi_send_data msd[NR_CPUS];
+ struct multi_send_data msd[VRSS_CHANNEL_MAX];
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
u32 pkt_align; /* alignment bytes, e.g. 8 */
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 08608499fa17..b8121eba33ff 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -858,6 +858,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
struct netvsc_device *nvdev = hv_get_drvdata(hdev);
struct netvsc_device_info device_info;
int limit = ETH_DATA_LEN;
+ u32 num_chn;
int ret = 0;
if (nvdev == NULL || nvdev->destroy)
@@ -873,6 +874,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
if (ret)
goto out;
+ num_chn = nvdev->num_chn;
+
nvdev->start_remove = true;
rndis_filter_device_remove(hdev);
@@ -883,7 +886,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
- device_info.num_chn = nvdev->num_chn;
+ device_info.num_chn = num_chn;
device_info.max_num_vrss_chns = max_num_vrss_chns;
rndis_filter_device_add(hdev, &device_info);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 47d07c576a34..c4e1e0408433 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -986,12 +986,6 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj);
- spin_lock_irqsave(&nvscdev->sc_lock, flags);
- nvscdev->num_sc_offered--;
- spin_unlock_irqrestore(&nvscdev->sc_lock, flags);
- if (nvscdev->num_sc_offered == 0)
- complete(&nvscdev->channel_init_wait);
-
if (chn_index >= nvscdev->num_chn)
return;
@@ -1004,6 +998,12 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
if (ret == 0)
nvscdev->chn_table[chn_index] = new_sc;
+
+ spin_lock_irqsave(&nvscdev->sc_lock, flags);
+ nvscdev->num_sc_offered--;
+ spin_unlock_irqrestore(&nvscdev->sc_lock, flags);
+ if (nvscdev->num_sc_offered == 0)
+ complete(&nvscdev->channel_init_wait);
}
int rndis_filter_device_add(struct hv_device *dev,
@@ -1113,9 +1113,9 @@ int rndis_filter_device_add(struct hv_device *dev,
if (ret || rsscap.num_recv_que < 2)
goto out;
- num_rss_qs = min(device_info->max_num_vrss_chns, rsscap.num_recv_que);
+ net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, rsscap.num_recv_que);
- net_device->max_chn = rsscap.num_recv_que;
+ num_rss_qs = min(device_info->max_num_vrss_chns, net_device->max_chn);
/*
* We will limit the VRSS channels to the number CPUs in the NUMA node
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 1e901c7cfaac..b3ffaee30858 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -277,12 +277,16 @@ static int at803x_probe(struct phy_device *phydev)
if (!priv)
return -ENOMEM;
- gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (phydev->drv->phy_id != ATH8030_PHY_ID)
+ goto does_not_require_reset_workaround;
+
+ gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(gpiod_reset))
return PTR_ERR(gpiod_reset);
priv->gpiod_reset = gpiod_reset;
+does_not_require_reset_workaround:
phydev->priv = priv;
return 0;
@@ -362,10 +366,10 @@ static void at803x_link_change_notify(struct phy_device *phydev)
at803x_context_save(phydev, &context);
- gpiod_set_value(priv->gpiod_reset, 0);
- msleep(1);
gpiod_set_value(priv->gpiod_reset, 1);
msleep(1);
+ gpiod_set_value(priv->gpiod_reset, 0);
+ msleep(1);
at803x_context_restore(phydev, &context);
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index f70522c35163..135296508a7e 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -122,6 +122,7 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
dev_info(&pdev->dev, "no regulator found\n");
+ data->regulator = NULL;
} else {
ret = regulator_enable(data->regulator);
if (ret)
@@ -137,7 +138,8 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
return 0;
err_out_disable_regulator:
- regulator_disable(data->regulator);
+ if (data->regulator)
+ regulator_disable(data->regulator);
err_out_free_mdiobus:
mdiobus_free(bus);
return ret;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 4fd861063ed4..f572b31a2b20 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2307,7 +2307,7 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
pch->ppp = NULL;
pch->chan = chan;
- pch->chan_net = net;
+ pch->chan_net = get_net(net);
chan->ppp = pch;
init_ppp_file(&pch->file, CHANNEL);
pch->file.hdrlen = chan->hdrlen;
@@ -2404,6 +2404,8 @@ ppp_unregister_channel(struct ppp_channel *chan)
spin_lock_bh(&pn->all_channels_lock);
list_del(&pch->list);
spin_unlock_bh(&pn->all_channels_lock);
+ put_net(pch->chan_net);
+ pch->chan_net = NULL;
pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait);
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 01f08a7751f7..9cfe6aeac84e 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -24,6 +24,7 @@
#include <linux/skbuff.h>
#include <linux/crc32.h>
#include <linux/ethtool.h>
+#include <linux/reboot.h>
#define DRV_NAME "rionet"
#define DRV_VERSION "0.3"
@@ -48,6 +49,8 @@ MODULE_LICENSE("GPL");
#define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE
#define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE
#define RIONET_MAX_NETS 8
+#define RIONET_MSG_SIZE RIO_MAX_MSG_SIZE
+#define RIONET_MAX_MTU (RIONET_MSG_SIZE - ETH_HLEN)
struct rionet_private {
struct rio_mport *mport;
@@ -60,6 +63,7 @@ struct rionet_private {
spinlock_t lock;
spinlock_t tx_lock;
u32 msg_enable;
+ bool open;
};
struct rionet_peer {
@@ -71,6 +75,7 @@ struct rionet_peer {
struct rionet_net {
struct net_device *ndev;
struct list_head peers;
+ spinlock_t lock; /* net info access lock */
struct rio_dev **active;
int nact; /* number of active peers */
};
@@ -232,26 +237,32 @@ static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u
struct net_device *ndev = dev_id;
struct rionet_private *rnet = netdev_priv(ndev);
struct rionet_peer *peer;
+ unsigned char netid = rnet->mport->id;
if (netif_msg_intr(rnet))
printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x",
DRV_NAME, sid, tid, info);
if (info == RIONET_DOORBELL_JOIN) {
- if (!nets[rnet->mport->id].active[sid]) {
- list_for_each_entry(peer,
- &nets[rnet->mport->id].peers, node) {
+ if (!nets[netid].active[sid]) {
+ spin_lock(&nets[netid].lock);
+ list_for_each_entry(peer, &nets[netid].peers, node) {
if (peer->rdev->destid == sid) {
- nets[rnet->mport->id].active[sid] =
- peer->rdev;
- nets[rnet->mport->id].nact++;
+ nets[netid].active[sid] = peer->rdev;
+ nets[netid].nact++;
}
}
+ spin_unlock(&nets[netid].lock);
+
rio_mport_send_doorbell(mport, sid,
RIONET_DOORBELL_JOIN);
}
} else if (info == RIONET_DOORBELL_LEAVE) {
- nets[rnet->mport->id].active[sid] = NULL;
- nets[rnet->mport->id].nact--;
+ spin_lock(&nets[netid].lock);
+ if (nets[netid].active[sid]) {
+ nets[netid].active[sid] = NULL;
+ nets[netid].nact--;
+ }
+ spin_unlock(&nets[netid].lock);
} else {
if (netif_msg_intr(rnet))
printk(KERN_WARNING "%s: unhandled doorbell\n",
@@ -280,7 +291,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
struct net_device *ndev = dev_id;
struct rionet_private *rnet = netdev_priv(ndev);
- spin_lock(&rnet->lock);
+ spin_lock(&rnet->tx_lock);
if (netif_msg_intr(rnet))
printk(KERN_INFO
@@ -299,14 +310,16 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
netif_wake_queue(ndev);
- spin_unlock(&rnet->lock);
+ spin_unlock(&rnet->tx_lock);
}
static int rionet_open(struct net_device *ndev)
{
int i, rc = 0;
- struct rionet_peer *peer, *tmp;
+ struct rionet_peer *peer;
struct rionet_private *rnet = netdev_priv(ndev);
+ unsigned char netid = rnet->mport->id;
+ unsigned long flags;
if (netif_msg_ifup(rnet))
printk(KERN_INFO "%s: open\n", DRV_NAME);
@@ -345,20 +358,13 @@ static int rionet_open(struct net_device *ndev)
netif_carrier_on(ndev);
netif_start_queue(ndev);
- list_for_each_entry_safe(peer, tmp,
- &nets[rnet->mport->id].peers, node) {
- if (!(peer->res = rio_request_outb_dbell(peer->rdev,
- RIONET_DOORBELL_JOIN,
- RIONET_DOORBELL_LEAVE)))
- {
- printk(KERN_ERR "%s: error requesting doorbells\n",
- DRV_NAME);
- continue;
- }
-
+ spin_lock_irqsave(&nets[netid].lock, flags);
+ list_for_each_entry(peer, &nets[netid].peers, node) {
/* Send a join message */
rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
}
+ spin_unlock_irqrestore(&nets[netid].lock, flags);
+ rnet->open = true;
out:
return rc;
@@ -367,7 +373,9 @@ static int rionet_open(struct net_device *ndev)
static int rionet_close(struct net_device *ndev)
{
struct rionet_private *rnet = netdev_priv(ndev);
- struct rionet_peer *peer, *tmp;
+ struct rionet_peer *peer;
+ unsigned char netid = rnet->mport->id;
+ unsigned long flags;
int i;
if (netif_msg_ifup(rnet))
@@ -375,18 +383,21 @@ static int rionet_close(struct net_device *ndev)
netif_stop_queue(ndev);
netif_carrier_off(ndev);
+ rnet->open = false;
for (i = 0; i < RIONET_RX_RING_SIZE; i++)
kfree_skb(rnet->rx_skb[i]);
- list_for_each_entry_safe(peer, tmp,
- &nets[rnet->mport->id].peers, node) {
- if (nets[rnet->mport->id].active[peer->rdev->destid]) {
+ spin_lock_irqsave(&nets[netid].lock, flags);
+ list_for_each_entry(peer, &nets[netid].peers, node) {
+ if (nets[netid].active[peer->rdev->destid]) {
rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE);
- nets[rnet->mport->id].active[peer->rdev->destid] = NULL;
+ nets[netid].active[peer->rdev->destid] = NULL;
}
- rio_release_outb_dbell(peer->rdev, peer->res);
+ if (peer->res)
+ rio_release_outb_dbell(peer->rdev, peer->res);
}
+ spin_unlock_irqrestore(&nets[netid].lock, flags);
rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN,
RIONET_DOORBELL_LEAVE);
@@ -400,22 +411,38 @@ static void rionet_remove_dev(struct device *dev, struct subsys_interface *sif)
{
struct rio_dev *rdev = to_rio_dev(dev);
unsigned char netid = rdev->net->hport->id;
- struct rionet_peer *peer, *tmp;
+ struct rionet_peer *peer;
+ int state, found = 0;
+ unsigned long flags;
- if (dev_rionet_capable(rdev)) {
- list_for_each_entry_safe(peer, tmp, &nets[netid].peers, node) {
- if (peer->rdev == rdev) {
- if (nets[netid].active[rdev->destid]) {
- nets[netid].active[rdev->destid] = NULL;
- nets[netid].nact--;
+ if (!dev_rionet_capable(rdev))
+ return;
+
+ spin_lock_irqsave(&nets[netid].lock, flags);
+ list_for_each_entry(peer, &nets[netid].peers, node) {
+ if (peer->rdev == rdev) {
+ list_del(&peer->node);
+ if (nets[netid].active[rdev->destid]) {
+ state = atomic_read(&rdev->state);
+ if (state != RIO_DEVICE_GONE &&
+ state != RIO_DEVICE_INITIALIZING) {
+ rio_send_doorbell(rdev,
+ RIONET_DOORBELL_LEAVE);
}
-
- list_del(&peer->node);
- kfree(peer);
- break;
+ nets[netid].active[rdev->destid] = NULL;
+ nets[netid].nact--;
}
+ found = 1;
+ break;
}
}
+ spin_unlock_irqrestore(&nets[netid].lock, flags);
+
+ if (found) {
+ if (peer->res)
+ rio_release_outb_dbell(rdev, peer->res);
+ kfree(peer);
+ }
}
static void rionet_get_drvinfo(struct net_device *ndev,
@@ -443,6 +470,17 @@ static void rionet_set_msglevel(struct net_device *ndev, u32 value)
rnet->msg_enable = value;
}
+static int rionet_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > RIONET_MAX_MTU)) {
+ printk(KERN_ERR "%s: Invalid MTU size %d\n",
+ ndev->name, new_mtu);
+ return -EINVAL;
+ }
+ ndev->mtu = new_mtu;
+ return 0;
+}
+
static const struct ethtool_ops rionet_ethtool_ops = {
.get_drvinfo = rionet_get_drvinfo,
.get_msglevel = rionet_get_msglevel,
@@ -454,7 +492,7 @@ static const struct net_device_ops rionet_netdev_ops = {
.ndo_open = rionet_open,
.ndo_stop = rionet_close,
.ndo_start_xmit = rionet_start_xmit,
- .ndo_change_mtu = eth_change_mtu,
+ .ndo_change_mtu = rionet_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
@@ -478,6 +516,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
/* Set up private area */
rnet = netdev_priv(ndev);
rnet->mport = mport;
+ rnet->open = false;
/* Set the default MAC address */
device_id = rio_local_get_device_id(mport);
@@ -489,7 +528,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
ndev->dev_addr[5] = device_id & 0xff;
ndev->netdev_ops = &rionet_netdev_ops;
- ndev->mtu = RIO_MAX_MSG_SIZE - 14;
+ ndev->mtu = RIONET_MAX_MTU;
ndev->features = NETIF_F_LLTX;
SET_NETDEV_DEV(ndev, &mport->dev);
ndev->ethtool_ops = &rionet_ethtool_ops;
@@ -500,8 +539,11 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL;
rc = register_netdev(ndev);
- if (rc != 0)
+ if (rc != 0) {
+ free_pages((unsigned long)nets[mport->id].active,
+ get_order(rionet_active_bytes));
goto out;
+ }
printk(KERN_INFO "%s: %s %s Version %s, MAC %pM, %s\n",
ndev->name,
@@ -515,8 +557,6 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
return rc;
}
-static unsigned long net_table[RIONET_MAX_NETS/sizeof(unsigned long) + 1];
-
static int rionet_add_dev(struct device *dev, struct subsys_interface *sif)
{
int rc = -ENODEV;
@@ -525,19 +565,16 @@ static int rionet_add_dev(struct device *dev, struct subsys_interface *sif)
struct net_device *ndev = NULL;
struct rio_dev *rdev = to_rio_dev(dev);
unsigned char netid = rdev->net->hport->id;
- int oldnet;
if (netid >= RIONET_MAX_NETS)
return rc;
- oldnet = test_and_set_bit(netid, net_table);
-
/*
* If first time through this net, make sure local device is rionet
* capable and setup netdev (this step will be skipped in later probes
* on the same net).
*/
- if (!oldnet) {
+ if (!nets[netid].ndev) {
rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,
&lsrc_ops);
rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,
@@ -555,30 +592,56 @@ static int rionet_add_dev(struct device *dev, struct subsys_interface *sif)
rc = -ENOMEM;
goto out;
}
- nets[netid].ndev = ndev;
+
rc = rionet_setup_netdev(rdev->net->hport, ndev);
if (rc) {
printk(KERN_ERR "%s: failed to setup netdev (rc=%d)\n",
DRV_NAME, rc);
+ free_netdev(ndev);
goto out;
}
INIT_LIST_HEAD(&nets[netid].peers);
+ spin_lock_init(&nets[netid].lock);
nets[netid].nact = 0;
- } else if (nets[netid].ndev == NULL)
- goto out;
+ nets[netid].ndev = ndev;
+ }
/*
* If the remote device has mailbox/doorbell capabilities,
* add it to the peer list.
*/
if (dev_rionet_capable(rdev)) {
- if (!(peer = kmalloc(sizeof(struct rionet_peer), GFP_KERNEL))) {
+ struct rionet_private *rnet;
+ unsigned long flags;
+
+ rnet = netdev_priv(nets[netid].ndev);
+
+ peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+ if (!peer) {
rc = -ENOMEM;
goto out;
}
peer->rdev = rdev;
+ peer->res = rio_request_outb_dbell(peer->rdev,
+ RIONET_DOORBELL_JOIN,
+ RIONET_DOORBELL_LEAVE);
+ if (!peer->res) {
+ pr_err("%s: error requesting doorbells\n", DRV_NAME);
+ kfree(peer);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ spin_lock_irqsave(&nets[netid].lock, flags);
list_add_tail(&peer->node, &nets[netid].peers);
+ spin_unlock_irqrestore(&nets[netid].lock, flags);
+ pr_debug("%s: %s add peer %s\n",
+ DRV_NAME, __func__, rio_name(rdev));
+
+ /* If netdev is already opened, send join request to new peer */
+ if (rnet->open)
+ rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
}
return 0;
@@ -586,6 +649,61 @@ out:
return rc;
}
+static int rionet_shutdown(struct notifier_block *nb, unsigned long code,
+ void *unused)
+{
+ struct rionet_peer *peer;
+ unsigned long flags;
+ int i;
+
+ pr_debug("%s: %s\n", DRV_NAME, __func__);
+
+ for (i = 0; i < RIONET_MAX_NETS; i++) {
+ if (!nets[i].ndev)
+ continue;
+
+ spin_lock_irqsave(&nets[i].lock, flags);
+ list_for_each_entry(peer, &nets[i].peers, node) {
+ if (nets[i].active[peer->rdev->destid]) {
+ rio_send_doorbell(peer->rdev,
+ RIONET_DOORBELL_LEAVE);
+ nets[i].active[peer->rdev->destid] = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&nets[i].lock, flags);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static void rionet_remove_mport(struct device *dev,
+ struct class_interface *class_intf)
+{
+ struct rio_mport *mport = to_rio_mport(dev);
+ struct net_device *ndev;
+ int id = mport->id;
+
+ pr_debug("%s %s\n", __func__, mport->name);
+
+ WARN(nets[id].nact, "%s called when connected to %d peers\n",
+ __func__, nets[id].nact);
+ WARN(!nets[id].ndev, "%s called for mport without NDEV\n",
+ __func__);
+
+ if (nets[id].ndev) {
+ ndev = nets[id].ndev;
+ netif_stop_queue(ndev);
+ unregister_netdev(ndev);
+
+ free_pages((unsigned long)nets[id].active,
+ get_order(sizeof(void *) *
+ RIO_MAX_ROUTE_ENTRIES(mport->sys_size)));
+ nets[id].active = NULL;
+ free_netdev(ndev);
+ nets[id].ndev = NULL;
+ }
+}
+
#ifdef MODULE
static struct rio_device_id rionet_id_table[] = {
{RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)},
@@ -602,40 +720,43 @@ static struct subsys_interface rionet_interface = {
.remove_dev = rionet_remove_dev,
};
+static struct notifier_block rionet_notifier = {
+ .notifier_call = rionet_shutdown,
+};
+
+/* the rio_mport_interface is used to handle local mport devices */
+static struct class_interface rio_mport_interface __refdata = {
+ .class = &rio_mport_class,
+ .add_dev = NULL,
+ .remove_dev = rionet_remove_mport,
+};
+
static int __init rionet_init(void)
{
+ int ret;
+
+ ret = register_reboot_notifier(&rionet_notifier);
+ if (ret) {
+ pr_err("%s: failed to register reboot notifier (err=%d)\n",
+ DRV_NAME, ret);
+ return ret;
+ }
+
+ ret = class_interface_register(&rio_mport_interface);
+ if (ret) {
+ pr_err("%s: class_interface_register error: %d\n",
+ DRV_NAME, ret);
+ return ret;
+ }
+
return subsys_interface_register(&rionet_interface);
}
static void __exit rionet_exit(void)
{
- struct rionet_private *rnet;
- struct net_device *ndev;
- struct rionet_peer *peer, *tmp;
- int i;
-
- for (i = 0; i < RIONET_MAX_NETS; i++) {
- if (nets[i].ndev != NULL) {
- ndev = nets[i].ndev;
- rnet = netdev_priv(ndev);
- unregister_netdev(ndev);
-
- list_for_each_entry_safe(peer,
- tmp, &nets[i].peers, node) {
- list_del(&peer->node);
- kfree(peer);
- }
-
- free_pages((unsigned long)nets[i].active,
- get_order(sizeof(void *) *
- RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size)));
- nets[i].active = NULL;
-
- free_netdev(ndev);
- }
- }
-
+ unregister_reboot_notifier(&rionet_notifier);
subsys_interface_unregister(&rionet_interface);
+ class_interface_unregister(&rio_mport_interface);
}
late_initcall(rionet_init);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index d36d5ebf37f3..f20890ee03f3 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -3261,54 +3261,6 @@ void lan78xx_tx_timeout(struct net_device *net)
tasklet_schedule(&dev->bh);
}
-struct rtnl_link_stats64 *lan78xx_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *storage)
-{
- struct lan78xx_net *dev = netdev_priv(netdev);
- struct lan78xx_statstage64 stats;
-
- /* curr_stat is updated by timer.
- * periodic reading from HW will prevent from entering USB auto suspend.
- * if autosuspend is disabled, read from HW.
- */
- if (!dev->udev->dev.power.runtime_auto)
- lan78xx_update_stats(dev);
-
- mutex_lock(&dev->stats.access_lock);
- memcpy(&stats, &dev->stats.curr_stat, sizeof(stats));
- mutex_unlock(&dev->stats.access_lock);
-
- /* calc by driver */
- storage->rx_packets = (__u64)netdev->stats.rx_packets;
- storage->tx_packets = (__u64)netdev->stats.tx_packets;
- storage->rx_bytes = (__u64)netdev->stats.rx_bytes;
- storage->tx_bytes = (__u64)netdev->stats.tx_bytes;
-
- /* use counter */
- storage->rx_length_errors = stats.rx_undersize_frame_errors +
- stats.rx_oversize_frame_errors;
- storage->rx_crc_errors = stats.rx_fcs_errors;
- storage->rx_frame_errors = stats.rx_alignment_errors;
- storage->rx_fifo_errors = stats.rx_dropped_frames;
- storage->rx_over_errors = stats.rx_oversize_frame_errors;
- storage->rx_errors = stats.rx_fcs_errors +
- stats.rx_alignment_errors +
- stats.rx_fragment_errors +
- stats.rx_jabber_errors +
- stats.rx_undersize_frame_errors +
- stats.rx_oversize_frame_errors +
- stats.rx_dropped_frames;
-
- storage->tx_carrier_errors = stats.tx_carrier_errors;
- storage->tx_errors = stats.tx_fcs_errors +
- stats.tx_excess_deferral_errors +
- stats.tx_carrier_errors;
-
- storage->multicast = stats.rx_multicast_frames;
-
- return storage;
-}
-
static const struct net_device_ops lan78xx_netdev_ops = {
.ndo_open = lan78xx_open,
.ndo_stop = lan78xx_stop,
@@ -3322,7 +3274,6 @@ static const struct net_device_ops lan78xx_netdev_ops = {
.ndo_set_features = lan78xx_set_features,
.ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
- .ndo_get_stats64 = lan78xx_get_stats64,
};
static void lan78xx_stat_monitor(unsigned long param)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 800106a7246c..1c0fa364323e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1143,7 +1143,7 @@ static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
static bool vxlan_remcsum(struct vxlanhdr *unparsed,
struct sk_buff *skb, u32 vxflags)
{
- size_t start, offset, plen;
+ size_t start, offset;
if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
goto out;
@@ -1151,9 +1151,7 @@ static bool vxlan_remcsum(struct vxlanhdr *unparsed,
start = vxlan_rco_start(unparsed->vx_vni);
offset = start + vxlan_rco_offset(unparsed->vx_vni);
- plen = sizeof(struct vxlanhdr) + offset + sizeof(u16);
-
- if (!pskb_may_pull(skb, plen))
+ if (!pskb_may_pull(skb, offset + sizeof(u16)))
return false;
skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
@@ -1810,10 +1808,9 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_oif = oif;
- fl6.flowi6_tos = RT_TOS(tos);
fl6.daddr = *daddr;
fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
- fl6.flowlabel = label;
+ fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = IPPROTO_UDP;
OpenPOWER on IntegriCloud