diff options
Diffstat (limited to 'drivers/net/ethernet/chelsio')
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb/common.h | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb/cxgb2.c | 18 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb/pm3393.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb/vsc7326.c | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 19 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 114 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c | 154 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | 17 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/sched.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/sge.c | 122 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 16 |
16 files changed, 332 insertions, 172 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h index 53b1f9478383..6916c62f2487 100644 --- a/drivers/net/ethernet/chelsio/cxgb/common.h +++ b/drivers/net/ethernet/chelsio/cxgb/common.h @@ -85,6 +85,11 @@ struct t1_rx_mode { #define SPEED_INVALID 0xffff #define DUPLEX_INVALID 0xff +/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */ +#define PM3393_MAX_FRAME_SIZE 9600 + +#define VSC7326_MAX_MTU 9600 + enum { CHBT_BOARD_N110, CHBT_BOARD_N210, diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index f5f1b0b51ebd..81d1d0bc7553 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -825,8 +825,6 @@ static int t1_change_mtu(struct net_device *dev, int new_mtu) if (!mac->ops->set_mtu) return -EOPNOTSUPP; - if (new_mtu < 68) - return -EINVAL; if ((ret = mac->ops->set_mtu(mac, new_mtu))) return ret; dev->mtu = new_mtu; @@ -1101,6 +1099,22 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netif_napi_add(netdev, &adapter->napi, t1_poll, 64); netdev->ethtool_ops = &t1_ethtool_ops; + + switch (bi->board) { + case CHBT_BOARD_CHT110: + case CHBT_BOARD_N110: + case CHBT_BOARD_N210: + case CHBT_BOARD_CHT210: + netdev->max_mtu = PM3393_MAX_FRAME_SIZE - + (ETH_HLEN + ETH_FCS_LEN); + break; + case CHBT_BOARD_CHN204: + netdev->max_mtu = VSC7326_MAX_MTU; + break; + default: + netdev->max_mtu = ETH_DATA_LEN; + break; + } } if (t1_init_sw_modules(adapter, bi) < 0) { diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c index eb462d7db427..c27908e66f5e 100644 --- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c +++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c @@ -47,9 +47,6 @@ #define OFFSET(REG_ADDR) ((REG_ADDR) << 2) -/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */ -#define MAX_FRAME_SIZE 9600 - #define IPG 12 #define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \ SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \ @@ -331,10 +328,7 @@ static int pm3393_set_mtu(struct cmac *cmac, int mtu) { int enabled = cmac->instance->enabled; - /* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */ - mtu += 14 + 4; - if (mtu > MAX_FRAME_SIZE) - return -EINVAL; + mtu += ETH_HLEN + ETH_FCS_LEN; /* Disable Rx/Tx MAC before configuring it. */ if (enabled) diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c index 6f30b6f78553..bdc895bd2a46 100644 --- a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c +++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c @@ -11,8 +11,6 @@ /* 30 minutes for full statistics update */ #define MAJOR_UPDATE_TICKS (1800 / STATS_TICK_SECS) -#define MAX_MTU 9600 - /* The egress WM value 0x01a01fff should be used only when the * interface is down (MAC port disabled). This is a workaround * for disabling the T2/MAC flow-control. When the interface is @@ -452,9 +450,6 @@ static int mac_set_mtu(struct cmac *mac, int mtu) { int port = mac->instance->index; - if (mtu > MAX_MTU) - return -EINVAL; - /* max_len includes header and FCS */ vsc_write(mac->adapter, REG_MAX_LEN(port), mtu + 14 + 4); return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 43da891fab97..092b3c16440b 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -2531,8 +2531,6 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu) struct adapter *adapter = pi->adapter; int ret; - if (new_mtu < 81) /* accommodate SACK */ - return -EINVAL; if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu))) return ret; dev->mtu = new_mtu; @@ -3295,6 +3293,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->netdev_ops = &cxgb_netdev_ops; netdev->ethtool_ops = &cxgb_ethtool_ops; + netdev->min_mtu = 81; + netdev->max_mtu = ETH_MAX_MTU; } pci_set_drvdata(pdev, adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 2125903043fb..0bce1bf9ca0f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -635,6 +635,7 @@ struct tx_sw_desc; struct sge_txq { unsigned int in_use; /* # of in-use Tx descriptors */ + unsigned int q_type; /* Q type Eth/Ctrl/Ofld */ unsigned int size; /* # of descriptors */ unsigned int cidx; /* SW consumer index */ unsigned int pidx; /* producer index */ @@ -665,7 +666,7 @@ struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ } ____cacheline_aligned_in_smp; -struct sge_ofld_txq { /* state for an SGE offload Tx queue */ +struct sge_uld_txq { /* state for an SGE offload Tx queue */ struct sge_txq q; struct adapter *adap; struct sk_buff_head sendq; /* list of backpressured packets */ @@ -693,14 +694,20 @@ struct sge_uld_rxq_info { u8 uld; /* uld type */ }; +struct sge_uld_txq_info { + struct sge_uld_txq *uldtxq; /* Txq's for ULD */ + atomic_t users; /* num users */ + u16 ntxq; /* # of egress uld queues */ +}; + struct sge { struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; - struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS]; struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES]; struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; struct sge_uld_rxq_info **uld_rxq_info; + struct sge_uld_txq_info **uld_txq_info; struct sge_rspq intrq ____cacheline_aligned_in_smp; spinlock_t intrq_lock; @@ -1298,8 +1305,9 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, unsigned int cmplqid); int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid, unsigned int cmplqid); -int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, - struct net_device *dev, unsigned int iqid); +int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, + struct net_device *dev, unsigned int iqid, + unsigned int uld_type); irqreturn_t t4_sge_intr_msix(int irq, void *cookie); int t4_sge_init(struct adapter *adap); void t4_sge_start(struct adapter *adap); @@ -1661,4 +1669,7 @@ int t4_uld_mem_alloc(struct adapter *adap); void t4_uld_clean_up(struct adapter *adap); void t4_register_netevent_notifier(void); void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl); +void free_tx_desc(struct adapter *adap, struct sge_txq *q, + unsigned int n, bool unmap); +void free_txq(struct adapter *adap, struct sge_txq *q); #endif /* __CXGB4_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 20455d082cb8..acc231293e4d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2512,18 +2512,6 @@ do { \ RL("FLLow:", fl.low); RL("FLStarving:", fl.starving); - } else if (ofld_idx < ofld_entries) { - const struct sge_ofld_txq *tx = - &adap->sge.ofldtxq[ofld_idx * 4]; - int n = min(4, adap->sge.ofldqsets - 4 * ofld_idx); - - S("QType:", "OFLD-Txq"); - T("TxQ ID:", q.cntxt_id); - T("TxQ size:", q.size); - T("TxQ inuse:", q.in_use); - T("TxQ CIDX:", q.cidx); - T("TxQ PIDX:", q.pidx); - } else if (ctrl_idx < ctrl_entries) { const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4]; int n = min(4, adap->params.nports - 4 * ctrl_idx); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index f320497368f4..66c37fac59b2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -134,24 +134,6 @@ MODULE_FIRMWARE(FW5_FNAME); MODULE_FIRMWARE(FW6_FNAME); /* - * Normally we're willing to become the firmware's Master PF but will be happy - * if another PF has already become the Master and initialized the adapter. - * Setting "force_init" will cause this driver to forcibly establish itself as - * the Master PF and initialize the adapter. - */ -static uint force_init; - -module_param(force_init, uint, 0644); -MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter," - "deprecated parameter"); - -static int dflt_msg_enable = DFLT_MSG_ENABLE; - -module_param(dflt_msg_enable, int, 0644); -MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap, " - "deprecated parameter"); - -/* * The driver uses the best interrupt scheme available on a platform in the * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which * of these schemes the driver may consider as follows: @@ -179,16 +161,6 @@ MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)"); */ static int rx_dma_offset = 2; -#ifdef CONFIG_PCI_IOV -/* Configure the number of PCI-E Virtual Function which are to be instantiated - * on SR-IOV Capable Physical Functions. - */ -static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV]; - -module_param_array(num_vf, uint, NULL, 0644); -MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3, deprecated parameter - please use the pci sysfs interface."); -#endif - /* TX Queue select used to determine what algorithm to use for selecting TX * queue. Select between the kernel provided function (select_queue=0) or user * cxgb_select_queue function (select_queue=1) @@ -530,15 +502,15 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; txq->restarts++; - if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) { + if (txq->q_type == CXGB4_TXQ_ETH) { struct sge_eth_txq *eq; eq = container_of(txq, struct sge_eth_txq, q); netif_tx_wake_queue(eq->txq); } else { - struct sge_ofld_txq *oq; + struct sge_uld_txq *oq; - oq = container_of(txq, struct sge_ofld_txq, q); + oq = container_of(txq, struct sge_uld_txq, q); tasklet_schedule(&oq->qresume_tsk); } } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) { @@ -885,15 +857,6 @@ static int setup_sge_queues(struct adapter *adap) } } - j = s->ofldqsets / adap->params.nports; /* iscsi queues per channel */ - for_each_ofldtxq(s, i) { - err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], - adap->port[i / j], - s->fw_evtq.cntxt_id); - if (err) - goto freeout; - } - for_each_port(adap, i) { /* Note that cmplqid below is 0 if we don't * have RDMA queues, and that's the right value. @@ -1922,8 +1885,18 @@ static void disable_dbs(struct adapter *adap) for_each_ethrxq(&adap->sge, i) disable_txq_db(&adap->sge.ethtxq[i].q); - for_each_ofldtxq(&adap->sge, i) - disable_txq_db(&adap->sge.ofldtxq[i].q); + if (is_offload(adap)) { + struct sge_uld_txq_info *txq_info = + adap->sge.uld_txq_info[CXGB4_TX_OFLD]; + + if (txq_info) { + for_each_ofldtxq(&adap->sge, i) { + struct sge_uld_txq *txq = &txq_info->uldtxq[i]; + + disable_txq_db(&txq->q); + } + } + } for_each_port(adap, i) disable_txq_db(&adap->sge.ctrlq[i].q); } @@ -1934,8 +1907,18 @@ static void enable_dbs(struct adapter *adap) for_each_ethrxq(&adap->sge, i) enable_txq_db(adap, &adap->sge.ethtxq[i].q); - for_each_ofldtxq(&adap->sge, i) - enable_txq_db(adap, &adap->sge.ofldtxq[i].q); + if (is_offload(adap)) { + struct sge_uld_txq_info *txq_info = + adap->sge.uld_txq_info[CXGB4_TX_OFLD]; + + if (txq_info) { + for_each_ofldtxq(&adap->sge, i) { + struct sge_uld_txq *txq = &txq_info->uldtxq[i]; + + enable_txq_db(adap, &txq->q); + } + } + } for_each_port(adap, i) enable_txq_db(adap, &adap->sge.ctrlq[i].q); } @@ -2006,8 +1989,17 @@ static void recover_all_queues(struct adapter *adap) for_each_ethrxq(&adap->sge, i) sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); - for_each_ofldtxq(&adap->sge, i) - sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q); + if (is_offload(adap)) { + struct sge_uld_txq_info *txq_info = + adap->sge.uld_txq_info[CXGB4_TX_OFLD]; + if (txq_info) { + for_each_ofldtxq(&adap->sge, i) { + struct sge_uld_txq *txq = &txq_info->uldtxq[i]; + + sync_txq_pidx(adap, &txq->q); + } + } + } for_each_port(adap, i) sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); } @@ -2502,8 +2494,6 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu) int ret; struct port_info *pi = netdev_priv(dev); - if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */ - return -EINVAL; ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1, -1, -1, -1, true); if (!ret) @@ -3993,7 +3983,7 @@ static inline bool is_x_10g_port(const struct link_config *lc) static void cfg_queues(struct adapter *adap) { struct sge *s = &adap->sge; - int i, n10g = 0, qidx = 0; + int i = 0, n10g = 0, qidx = 0; #ifndef CONFIG_CHELSIO_T4_DCB int q10g = 0; #endif @@ -4008,8 +3998,7 @@ static void cfg_queues(struct adapter *adap) adap->params.crypto = 0; } - for_each_port(adap, i) - n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); + n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); #ifdef CONFIG_CHELSIO_T4_DCB /* For Data Center Bridging support we need to be able to support up * to 8 Traffic Priorities; each of which will be assigned to its @@ -4057,7 +4046,7 @@ static void cfg_queues(struct adapter *adap) * capped by the number of available cores. */ if (n10g) { - i = num_online_cpus(); + i = min_t(int, MAX_OFLD_QSETS, num_online_cpus()); s->ofldqsets = roundup(i, adap->params.nports); } else { s->ofldqsets = adap->params.nports; @@ -4077,9 +4066,6 @@ static void cfg_queues(struct adapter *adap) for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) s->ctrlq[i].q.size = 512; - for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) - s->ofldtxq[i].q.size = 1024; - init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64); init_rspq(adap, &s->intrq, 0, 1, 512, 64); } @@ -4715,7 +4701,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->name = pci_name(pdev); adapter->mbox = func; adapter->pf = func; - adapter->msg_enable = dflt_msg_enable; + adapter->msg_enable = DFLT_MSG_ENABLE; memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); spin_lock_init(&adapter->stats_lock); @@ -4803,6 +4789,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->priv_flags |= IFF_UNICAST_FLT; + /* MTU range: 81 - 9600 */ + netdev->min_mtu = 81; + netdev->max_mtu = MAX_MTU; + netdev->netdev_ops = &cxgb4_netdev_ops; #ifdef CONFIG_CHELSIO_T4_DCB netdev->dcbnl_ops = &cxgb4_dcb_ops; @@ -4931,6 +4921,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) */ for_each_port(adapter, i) { pi = adap2pinfo(adapter, i); + adapter->port[i]->dev_port = pi->lport; netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets); netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets); @@ -4970,17 +4961,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) sriov: #ifdef CONFIG_PCI_IOV - if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) { - dev_warn(&pdev->dev, - "Enabling SR-IOV VFs using the num_vf module " - "parameter is deprecated - please use the pci sysfs " - "interface instead.\n"); - if (pci_enable_sriov(pdev, num_vf[func]) == 0) - dev_info(&pdev->dev, - "instantiated %u virtual functions\n", - num_vf[func]); - } - adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); if (!adapter) { err = -ENOMEM; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index 49d2debb334e..52af62e0ecb6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c @@ -113,7 +113,7 @@ static int fill_action_fields(struct adapter *adap, } /* Re-direct to specified port in hardware. */ - if (is_tcf_mirred_redirect(a)) { + if (is_tcf_mirred_egress_redirect(a)) { struct net_device *n_dev; unsigned int i, index; bool found = false; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 0945fa49a5dd..8098902c094a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -135,15 +135,17 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, } static int alloc_uld_rxqs(struct adapter *adap, - struct sge_uld_rxq_info *rxq_info, - unsigned int nq, unsigned int offset, bool lro) + struct sge_uld_rxq_info *rxq_info, bool lro) { struct sge *s = &adap->sge; - struct sge_ofld_rxq *q = rxq_info->uldrxq + offset; - unsigned short *ids = rxq_info->rspq_id + offset; - unsigned int per_chan = nq / adap->params.nports; + unsigned int nq = rxq_info->nrxq + rxq_info->nciq; + struct sge_ofld_rxq *q = rxq_info->uldrxq; + unsigned short *ids = rxq_info->rspq_id; unsigned int bmap_idx = 0; - int i, err, msi_idx; + unsigned int per_chan; + int i, err, msi_idx, que_idx = 0; + + per_chan = rxq_info->nrxq / adap->params.nports; if (adap->flags & USING_MSIX) msi_idx = 1; @@ -151,12 +153,18 @@ static int alloc_uld_rxqs(struct adapter *adap, msi_idx = -((int)s->intrq.abs_id + 1); for (i = 0; i < nq; i++, q++) { + if (i == rxq_info->nrxq) { + /* start allocation of concentrator queues */ + per_chan = rxq_info->nciq / adap->params.nports; + que_idx = 0; + } + if (msi_idx >= 0) { bmap_idx = get_msix_idx_from_bmap(adap); msi_idx = adap->msix_info_ulds[bmap_idx].idx; } err = t4_sge_alloc_rxq(adap, &q->rspq, false, - adap->port[i / per_chan], + adap->port[que_idx++ / per_chan], msi_idx, q->fl.size ? &q->fl : NULL, uldrx_handler, @@ -165,29 +173,19 @@ static int alloc_uld_rxqs(struct adapter *adap, if (err) goto freeout; if (msi_idx >= 0) - rxq_info->msix_tbl[i + offset] = bmap_idx; + rxq_info->msix_tbl[i] = bmap_idx; memset(&q->stats, 0, sizeof(q->stats)); if (ids) ids[i] = q->rspq.abs_id; } return 0; freeout: - q = rxq_info->uldrxq + offset; + q = rxq_info->uldrxq; for ( ; i; i--, q++) { if (q->rspq.desc) free_rspq_fl(adap, &q->rspq, q->fl.size ? &q->fl : NULL); } - - /* We need to free rxq also in case of ciq allocation failure */ - if (offset) { - q = rxq_info->uldrxq + offset; - for ( ; i; i--, q++) { - if (q->rspq.desc) - free_rspq_fl(adap, &q->rspq, - q->fl.size ? &q->fl : NULL); - } - } return err; } @@ -205,9 +203,7 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) return -ENOMEM; } - ret = !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) && - !alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq, - rxq_info->nrxq, lro)); + ret = !(!alloc_uld_rxqs(adap, rxq_info, lro)); /* Tell uP to route control queue completions to rdma rspq */ if (adap->flags & FULL_INIT_DONE && @@ -451,6 +447,106 @@ static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type) quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq); } +static void +free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info) +{ + int nq = txq_info->ntxq; + int i; + + for (i = 0; i < nq; i++) { + struct sge_uld_txq *txq = &txq_info->uldtxq[i]; + + if (txq && txq->q.desc) { + tasklet_kill(&txq->qresume_tsk); + t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0, + txq->q.cntxt_id); + free_tx_desc(adap, &txq->q, txq->q.in_use, false); + kfree(txq->q.sdesc); + __skb_queue_purge(&txq->sendq); + free_txq(adap, &txq->q); + } + } +} + +static int +alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info, + unsigned int uld_type) +{ + struct sge *s = &adap->sge; + int nq = txq_info->ntxq; + int i, j, err; + + j = nq / adap->params.nports; + for (i = 0; i < nq; i++) { + struct sge_uld_txq *txq = &txq_info->uldtxq[i]; + + txq->q.size = 1024; + err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j], + s->fw_evtq.cntxt_id, uld_type); + if (err) + goto freeout; + } + return 0; +freeout: + free_sge_txq_uld(adap, txq_info); + return err; +} + +static void +release_sge_txq_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_txq_info *txq_info = NULL; + int tx_uld_type = TX_ULD(uld_type); + + txq_info = adap->sge.uld_txq_info[tx_uld_type]; + + if (txq_info && atomic_dec_and_test(&txq_info->users)) { + free_sge_txq_uld(adap, txq_info); + kfree(txq_info->uldtxq); + kfree(txq_info); + adap->sge.uld_txq_info[tx_uld_type] = NULL; + } +} + +static int +setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type, + const struct cxgb4_uld_info *uld_info) +{ + struct sge_uld_txq_info *txq_info = NULL; + int tx_uld_type, i; + + tx_uld_type = TX_ULD(uld_type); + txq_info = adap->sge.uld_txq_info[tx_uld_type]; + + if ((tx_uld_type == CXGB4_TX_OFLD) && txq_info && + (atomic_inc_return(&txq_info->users) > 1)) + return 0; + + txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL); + if (!txq_info) + return -ENOMEM; + + i = min_t(int, uld_info->ntxq, num_online_cpus()); + txq_info->ntxq = roundup(i, adap->params.nports); + + txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq), + GFP_KERNEL); + if (!txq_info->uldtxq) { + kfree(txq_info); + return -ENOMEM; + } + + if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) { + kfree(txq_info->uldtxq); + kfree(txq_info); + return -ENOMEM; + } + + atomic_inc(&txq_info->users); + adap->sge.uld_txq_info[tx_uld_type] = txq_info; + return 0; +} + static void uld_queue_init(struct adapter *adap, unsigned int uld_type, struct cxgb4_lld_info *lli) { @@ -476,7 +572,15 @@ int t4_uld_mem_alloc(struct adapter *adap) if (!s->uld_rxq_info) goto err_uld; + s->uld_txq_info = kzalloc(CXGB4_TX_MAX * + sizeof(struct sge_uld_txq_info *), + GFP_KERNEL); + if (!s->uld_txq_info) + goto err_uld_rx; return 0; + +err_uld_rx: + kfree(s->uld_rxq_info); err_uld: kfree(adap->uld); return -ENOMEM; @@ -486,6 +590,7 @@ void t4_uld_mem_free(struct adapter *adap) { struct sge *s = &adap->sge; + kfree(s->uld_txq_info); kfree(s->uld_rxq_info); kfree(adap->uld); } @@ -620,6 +725,9 @@ int cxgb4_register_uld(enum cxgb4_uld type, ret = -EBUSY; goto free_irq; } + ret = setup_sge_txq_uld(adap, type, p); + if (ret) + goto free_irq; adap->uld[type] = *p; uld_attach(adap, type); adap_idx++; @@ -648,6 +756,7 @@ out: break; adap->uld[type].handle = NULL; adap->uld[type].add = NULL; + release_sge_txq_uld(adap, type); if (adap->flags & FULL_INIT_DONE) quiesce_rx_uld(adap, type); if (adap->flags & USING_MSIX) @@ -683,6 +792,7 @@ int cxgb4_unregister_uld(enum cxgb4_uld type) continue; adap->uld[type].handle = NULL; adap->uld[type].add = NULL; + release_sge_txq_uld(adap, type); if (adap->flags & FULL_INIT_DONE) quiesce_rx_uld(adap, type); if (adap->flags & USING_MSIX) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 2996793b1aaa..4c856605fdfa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -77,6 +77,8 @@ enum { /* Special asynchronous notification message */ #define CXGB4_MSG_AN ((void *)1) +#define TX_ULD(uld)(((uld) != CXGB4_ULD_CRYPTO) ? CXGB4_TX_OFLD :\ + CXGB4_TX_CRYPTO) struct serv_entry { void *data; @@ -223,6 +225,19 @@ enum cxgb4_uld { CXGB4_ULD_MAX }; +enum cxgb4_tx_uld { + CXGB4_TX_OFLD, + CXGB4_TX_CRYPTO, + CXGB4_TX_MAX +}; + +enum cxgb4_txq_type { + CXGB4_TXQ_ETH, + CXGB4_TXQ_ULD, + CXGB4_TXQ_CTRL, + CXGB4_TXQ_MAX +}; + enum cxgb4_state { CXGB4_STATE_UP, CXGB4_STATE_START_RECOVERY, @@ -316,6 +331,7 @@ struct cxgb4_uld_info { void *handle; unsigned int nrxq; unsigned int rxq_size; + unsigned int ntxq; bool ciq; bool lro; void *(*add)(const struct cxgb4_lld_info *p); @@ -333,6 +349,7 @@ struct cxgb4_uld_info { int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); int cxgb4_unregister_uld(enum cxgb4_uld type); int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb); +int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb); unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo); unsigned int cxgb4_port_chan(const struct net_device *dev); unsigned int cxgb4_port_viid(const struct net_device *dev); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index 539de764bbd3..cbd68a8fe2e4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -210,8 +210,10 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) /* Unbind queue from any existing class */ err = t4_sched_queue_unbind(pi, p); - if (err) + if (err) { + t4_free_mem(qe); goto out; + } /* Bind queue to specified class */ memset(qe, 0, sizeof(*qe)); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 1e74fd6085df..9f606478c29c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -377,8 +377,8 @@ unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]), * Reclaims Tx descriptors from an SGE Tx queue and frees the associated * Tx buffers. Called with the Tx queue lock held. */ -static void free_tx_desc(struct adapter *adap, struct sge_txq *q, - unsigned int n, bool unmap) +void free_tx_desc(struct adapter *adap, struct sge_txq *q, + unsigned int n, bool unmap) { struct tx_sw_desc *d; unsigned int cidx = q->cidx; @@ -1543,7 +1543,7 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) * inability to map packets. A periodic timer attempts to restart * queues so marked. */ -static void txq_stop_maperr(struct sge_ofld_txq *q) +static void txq_stop_maperr(struct sge_uld_txq *q) { q->mapping_err++; q->q.stops++; @@ -1559,7 +1559,7 @@ static void txq_stop_maperr(struct sge_ofld_txq *q) * Stops an offload Tx queue that has become full and modifies the packet * being written to request a wakeup. */ -static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) +static void ofldtxq_stop(struct sge_uld_txq *q, struct sk_buff *skb) { struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data; @@ -1586,7 +1586,7 @@ static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) * boolean "service_ofldq_running" to make sure that only one instance * is ever running at a time ... */ -static void service_ofldq(struct sge_ofld_txq *q) +static void service_ofldq(struct sge_uld_txq *q) { u64 *pos, *before, *end; int credits; @@ -1706,7 +1706,7 @@ static void service_ofldq(struct sge_ofld_txq *q) * * Send an offload packet through an SGE offload queue. */ -static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb) +static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb) { skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ spin_lock(&q->sendq.lock); @@ -1735,7 +1735,7 @@ static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb) */ static void restart_ofldq(unsigned long data) { - struct sge_ofld_txq *q = (struct sge_ofld_txq *)data; + struct sge_uld_txq *q = (struct sge_uld_txq *)data; spin_lock(&q->sendq.lock); q->full = 0; /* the queue actually is completely empty now */ @@ -1767,17 +1767,23 @@ static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb) return skb->queue_mapping & 1; } -static inline int ofld_send(struct adapter *adap, struct sk_buff *skb) +static inline int uld_send(struct adapter *adap, struct sk_buff *skb, + unsigned int tx_uld_type) { + struct sge_uld_txq_info *txq_info; + struct sge_uld_txq *txq; unsigned int idx = skb_txq(skb); + txq_info = adap->sge.uld_txq_info[tx_uld_type]; + txq = &txq_info->uldtxq[idx]; + if (unlikely(is_ctrl_pkt(skb))) { /* Single ctrl queue is a requirement for LE workaround path */ if (adap->tids.nsftids) idx = 0; return ctrl_xmit(&adap->sge.ctrlq[idx], skb); } - return ofld_xmit(&adap->sge.ofldtxq[idx], skb); + return ofld_xmit(txq, skb); } /** @@ -1794,7 +1800,7 @@ int t4_ofld_send(struct adapter *adap, struct sk_buff *skb) int ret; local_bh_disable(); - ret = ofld_send(adap, skb); + ret = uld_send(adap, skb, CXGB4_TX_OFLD); local_bh_enable(); return ret; } @@ -1813,6 +1819,39 @@ int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb) } EXPORT_SYMBOL(cxgb4_ofld_send); +/** + * t4_crypto_send - send crypto packet + * @adap: the adapter + * @skb: the packet + * + * Sends crypto packet. We use the packet queue_mapping to select the + * appropriate Tx queue as follows: bit 0 indicates whether the packet + * should be sent as regular or control, bits 1-15 select the queue. + */ +static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb) +{ + int ret; + + local_bh_disable(); + ret = uld_send(adap, skb, CXGB4_TX_CRYPTO); + local_bh_enable(); + return ret; +} + +/** + * cxgb4_crypto_send - send crypto packet + * @dev: the net device + * @skb: the packet + * + * Sends crypto packet. This is an exported version of @t4_crypto_send, + * intended for ULDs. + */ +int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb) +{ + return t4_crypto_send(netdev2adap(dev), skb); +} +EXPORT_SYMBOL(cxgb4_crypto_send); + static inline void copy_frags(struct sk_buff *skb, const struct pkt_gl *gl, unsigned int offset) { @@ -2479,7 +2518,7 @@ static void sge_tx_timer_cb(unsigned long data) for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) for (m = s->txq_maperr[i]; m; m &= m - 1) { unsigned long id = __ffs(m) + i * BITS_PER_LONG; - struct sge_ofld_txq *txq = s->egr_map[id]; + struct sge_uld_txq *txq = s->egr_map[id]; clear_bit(id, s->txq_maperr); tasklet_schedule(&txq->qresume_tsk); @@ -2799,6 +2838,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, return ret; } + txq->q.q_type = CXGB4_TXQ_ETH; init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); txq->txq = netdevq; txq->tso = txq->tx_cso = txq->vlan_ins = 0; @@ -2852,6 +2892,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, return ret; } + txq->q.q_type = CXGB4_TXQ_CTRL; init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); txq->adap = adap; skb_queue_head_init(&txq->sendq); @@ -2872,13 +2913,15 @@ int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid, return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); } -int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, - struct net_device *dev, unsigned int iqid) +int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, + struct net_device *dev, unsigned int iqid, + unsigned int uld_type) { int ret, nentries; struct fw_eq_ofld_cmd c; struct sge *s = &adap->sge; struct port_info *pi = netdev_priv(dev); + int cmd = FW_EQ_OFLD_CMD; /* Add status entries */ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); @@ -2891,7 +2934,9 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, return -ENOMEM; memset(&c, 0, sizeof(c)); - c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F | + if (unlikely(uld_type == CXGB4_TX_CRYPTO)) + cmd = FW_EQ_CTRL_CMD; + c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F | FW_CMD_WRITE_F | FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN_V(adap->pf) | FW_EQ_OFLD_CMD_VFN_V(0)); @@ -2919,6 +2964,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, return ret; } + txq->q.q_type = CXGB4_TXQ_ULD; init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); txq->adap = adap; skb_queue_head_init(&txq->sendq); @@ -2928,7 +2974,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, return 0; } -static void free_txq(struct adapter *adap, struct sge_txq *q) +void free_txq(struct adapter *adap, struct sge_txq *q) { struct sge *s = &adap->sge; @@ -2951,7 +2997,6 @@ void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, rq->cntxt_id, fl_id, 0xffff); dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, rq->desc, rq->phys_addr); - napi_hash_del(&rq->napi); netif_napi_del(&rq->napi); rq->netdev = NULL; rq->cntxt_id = rq->abs_id = 0; @@ -3026,21 +3071,6 @@ void t4_free_sge_resources(struct adapter *adap) } } - /* clean up offload Tx queues */ - for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) { - struct sge_ofld_txq *q = &adap->sge.ofldtxq[i]; - - if (q->q.desc) { - tasklet_kill(&q->qresume_tsk); - t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0, - q->q.cntxt_id); - free_tx_desc(adap, &q->q, q->q.in_use, false); - kfree(q->q.sdesc); - __skb_queue_purge(&q->sendq); - free_txq(adap, &q->q); - } - } - /* clean up control Tx queues */ for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; @@ -3093,12 +3123,34 @@ void t4_sge_stop(struct adapter *adap) if (s->tx_timer.function) del_timer_sync(&s->tx_timer); - for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) { - struct sge_ofld_txq *q = &s->ofldtxq[i]; + if (is_offload(adap)) { + struct sge_uld_txq_info *txq_info; + + txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; + if (txq_info) { + struct sge_uld_txq *txq = txq_info->uldtxq; - if (q->q.desc) - tasklet_kill(&q->qresume_tsk); + for_each_ofldtxq(&adap->sge, i) { + if (txq->q.desc) + tasklet_kill(&txq->qresume_tsk); + } + } } + + if (is_pci_uld(adap)) { + struct sge_uld_txq_info *txq_info; + + txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; + if (txq_info) { + struct sge_uld_txq *txq = txq_info->uldtxq; + + for_each_ofldtxq(&adap->sge, i) { + if (txq->q.desc) + tasklet_kill(&txq->qresume_tsk); + } + } + } + for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { struct sge_ctrl_txq *cq = &s->ctrlq[i]; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 20dec85da63d..e8139514d32c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -7851,7 +7851,6 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) return ret; memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); - adap->port[i]->dev_port = j; j++; } return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 50812a1d67bd..ecf3ccc257bc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -168,6 +168,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */ CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */ CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/ + CH_PCI_ID_TABLE_FENTRY(0x509d), /* Custom T540-CR*/ /* T6 adapters: */ @@ -178,9 +179,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x6005), CH_PCI_ID_TABLE_FENTRY(0x6006), CH_PCI_ID_TABLE_FENTRY(0x6007), + CH_PCI_ID_TABLE_FENTRY(0x6008), CH_PCI_ID_TABLE_FENTRY(0x6009), CH_PCI_ID_TABLE_FENTRY(0x600d), - CH_PCI_ID_TABLE_FENTRY(0x6010), CH_PCI_ID_TABLE_FENTRY(0x6011), CH_PCI_ID_TABLE_FENTRY(0x6014), CH_PCI_ID_TABLE_FENTRY(0x6015), diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 100b2cc064a3..0d1a134c8174 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -70,13 +70,6 @@ NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) -static int dflt_msg_enable = DFLT_MSG_ENABLE; - -module_param(dflt_msg_enable, int, 0644); -MODULE_PARM_DESC(dflt_msg_enable, - "default adapter ethtool message level bitmap, " - "deprecated parameter"); - /* * The driver uses the best interrupt scheme available on a platform in the * order MSI-X then MSI. This parameter determines which of these schemes the @@ -1108,10 +1101,6 @@ static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu) int ret; struct port_info *pi = netdev_priv(dev); - /* accommodate SACK */ - if (new_mtu < 81) - return -EINVAL; - ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu, -1, -1, -1, -1, true); if (!ret) @@ -2895,7 +2884,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, * Initialize adapter level features. */ adapter->name = pci_name(pdev); - adapter->msg_enable = dflt_msg_enable; + adapter->msg_enable = DFLT_MSG_ENABLE; err = adap_init0(adapter); if (err) goto err_unmap_bar; @@ -2966,9 +2955,12 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, netdev->features |= NETIF_F_HIGHDMA; netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->min_mtu = 81; + netdev->max_mtu = ETH_MAX_MTU; netdev->netdev_ops = &cxgb4vf_netdev_ops; netdev->ethtool_ops = &cxgb4vf_ethtool_ops; + netdev->dev_port = pi->port_id; /* * Initialize the hardware/software state for the port. |