diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic/qede')
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/qede.h | 48 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 314 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/qede_main.c | 526 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qede/qede_roce.c | 314 |
5 files changed, 910 insertions, 293 deletions
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile index 74a49850d74d..28dc58919c85 100644 --- a/drivers/net/ethernet/qlogic/qede/Makefile +++ b/drivers/net/ethernet/qlogic/qede/Makefile @@ -2,3 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o qede-y := qede_main.o qede_ethtool.o qede-$(CONFIG_DCB) += qede_dcbnl.o +qede-$(CONFIG_INFINIBAND_QEDR) += qede_roce.o diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 02b06d4e40ae..28c0e9f42c9e 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -25,7 +25,7 @@ #define QEDE_MAJOR_VERSION 8 #define QEDE_MINOR_VERSION 10 -#define QEDE_REVISION_VERSION 1 +#define QEDE_REVISION_VERSION 9 #define QEDE_ENGINEERING_VERSION 20 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ __stringify(QEDE_MINOR_VERSION) "." \ @@ -36,6 +36,8 @@ struct qede_stats { u64 no_buff_discards; + u64 packet_too_big_discard; + u64 ttl0_discard; u64 rx_ucast_bytes; u64 rx_mcast_bytes; u64 rx_bcast_bytes; @@ -104,6 +106,13 @@ struct qede_vlan { bool configured; }; +struct qede_rdma_dev { + struct qedr_dev *qedr_dev; + struct list_head entry; + struct list_head roce_event_list; + struct workqueue_struct *roce_wq; +}; + struct qede_dev { struct qed_dev *cdev; struct net_device *ndev; @@ -124,16 +133,22 @@ struct qede_dev { (edev)->dev_info.num_tc) struct qede_fastpath *fp_array; - u16 req_rss; - u16 num_rss; + u8 req_num_tx; + u8 fp_num_tx; + u8 req_num_rx; + u8 fp_num_rx; + u16 req_queues; + u16 num_queues; u8 num_tc; -#define QEDE_RSS_CNT(edev) ((edev)->num_rss) -#define QEDE_TSS_CNT(edev) ((edev)->num_rss * \ - (edev)->num_tc) -#define QEDE_TSS_IDX(edev, txqidx) ((txqidx) % (edev)->num_rss) -#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / (edev)->num_rss) +#define QEDE_QUEUE_CNT(edev) ((edev)->num_queues) +#define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx) +#define QEDE_TSS_COUNT(edev) (((edev)->num_queues - (edev)->fp_num_rx) * \ + (edev)->num_tc) +#define QEDE_TX_IDX(edev, txqidx) ((edev)->fp_num_rx + (txqidx) % \ + QEDE_TSS_COUNT(edev)) +#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / QEDE_TSS_COUNT(edev)) #define QEDE_TX_QUEUE(edev, txqidx) \ - (&(edev)->fp_array[QEDE_TSS_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX( \ + (&(edev)->fp_array[QEDE_TX_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX(\ (edev), (txqidx))]) struct qed_int_info int_info; @@ -177,6 +192,8 @@ struct qede_dev { unsigned long sp_flags; u16 vxlan_dst_port; u16 geneve_dst_port; + + struct qede_rdma_dev rdma_info; }; enum QEDE_STATE { @@ -235,6 +252,7 @@ struct qede_rx_queue { u16 num_rx_buffers; u16 rxq_id; + u64 rcv_pkts; u64 rx_hw_errors; u64 rx_alloc_errors; u64 rx_ip_frags; @@ -263,6 +281,10 @@ struct qede_tx_queue { union db_prod tx_db; u16 num_tx_buffers; + u64 xmit_pkts; + u64 stopped_cnt; + + bool is_legacy; }; #define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \ @@ -277,7 +299,11 @@ struct qede_tx_queue { struct qede_fastpath { struct qede_dev *edev; - u8 rss_id; +#define QEDE_FASTPATH_TX BIT(0) +#define QEDE_FASTPATH_RX BIT(1) +#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX) + u8 type; + u8 id; struct napi_struct napi; struct qed_sb_info *sb_info; struct qede_rx_queue *rxq; @@ -337,6 +363,6 @@ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev, #define QEDE_MIN_PKT_LEN 64 #define QEDE_RX_HDR_SIZE 256 -#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++) +#define for_each_queue(i) for (i = 0; i < edev->num_queues; i++) #endif /* _QEDE_H_ */ diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index f8492cac9290..25a9b293ee8f 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -35,6 +35,7 @@ static const struct { u64 offset; char string[ETH_GSTRING_LEN]; } qede_rqstats_arr[] = { + QEDE_RQSTAT(rcv_pkts), QEDE_RQSTAT(rx_hw_errors), QEDE_RQSTAT(rx_alloc_errors), QEDE_RQSTAT(rx_ip_frags), @@ -44,6 +45,24 @@ static const struct { #define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \ (*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\ qede_rqstats_arr[(sindex)].offset))) +#define QEDE_TQSTAT_OFFSET(stat_name) \ + (offsetof(struct qede_tx_queue, stat_name)) +#define QEDE_TQSTAT_STRING(stat_name) (#stat_name) +#define QEDE_TQSTAT(stat_name) \ + {QEDE_TQSTAT_OFFSET(stat_name), QEDE_TQSTAT_STRING(stat_name)} +#define QEDE_NUM_TQSTATS ARRAY_SIZE(qede_tqstats_arr) +static const struct { + u64 offset; + char string[ETH_GSTRING_LEN]; +} qede_tqstats_arr[] = { + QEDE_TQSTAT(xmit_pkts), + QEDE_TQSTAT(stopped_cnt), +}; + +#define QEDE_TQSTATS_DATA(dev, sindex, tssid, tcid) \ + (*((u64 *)(((void *)(&dev->fp_array[tssid].txqs[tcid])) +\ + qede_tqstats_arr[(sindex)].offset))) + static const struct { u64 offset; char string[ETH_GSTRING_LEN]; @@ -107,6 +126,8 @@ static const struct { QEDE_PF_STAT(mftag_filter_discards), QEDE_PF_STAT(mac_filter_discards), QEDE_STAT(tx_err_drop_pkts), + QEDE_STAT(ttl0_discard), + QEDE_STAT(packet_too_big_discard), QEDE_STAT(coalesced_pkts), QEDE_STAT(coalesced_events), @@ -151,17 +172,29 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) { int i, j, k; + for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) { + int tc; + + for (j = 0; j < QEDE_NUM_RQSTATS; j++) + sprintf(buf + (k + j) * ETH_GSTRING_LEN, + "%d: %s", i, qede_rqstats_arr[j].string); + k += QEDE_NUM_RQSTATS; + for (tc = 0; tc < edev->num_tc; tc++) { + for (j = 0; j < QEDE_NUM_TQSTATS; j++) + sprintf(buf + (k + j) * ETH_GSTRING_LEN, + "%d.%d: %s", i, tc, + qede_tqstats_arr[j].string); + k += QEDE_NUM_TQSTATS; + } + } + for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) { if (IS_VF(edev) && qede_stats_arr[i].pf_only) continue; - strcpy(buf + j * ETH_GSTRING_LEN, + strcpy(buf + (k + j) * ETH_GSTRING_LEN, qede_stats_arr[i].string); j++; } - - for (k = 0; k < QEDE_NUM_RQSTATS; k++, j++) - strcpy(buf + j * ETH_GSTRING_LEN, - qede_rqstats_arr[k].string); } static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf) @@ -197,19 +230,30 @@ static void qede_get_ethtool_stats(struct net_device *dev, mutex_lock(&edev->qede_lock); + for (qid = 0; qid < QEDE_QUEUE_CNT(edev); qid++) { + int tc; + + if (edev->fp_array[qid].type & QEDE_FASTPATH_RX) { + for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) + buf[cnt++] = QEDE_RQSTATS_DATA(edev, sidx, qid); + } + + if (edev->fp_array[qid].type & QEDE_FASTPATH_TX) { + for (tc = 0; tc < edev->num_tc; tc++) { + for (sidx = 0; sidx < QEDE_NUM_TQSTATS; sidx++) + buf[cnt++] = QEDE_TQSTATS_DATA(edev, + sidx, + qid, tc); + } + } + } + for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) { if (IS_VF(edev) && qede_stats_arr[sidx].pf_only) continue; buf[cnt++] = QEDE_STATS_DATA(edev, sidx); } - for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) { - buf[cnt] = 0; - for (qid = 0; qid < edev->num_rss; qid++) - buf[cnt] += QEDE_RQSTATS_DATA(edev, sidx, qid); - cnt++; - } - mutex_unlock(&edev->qede_lock); } @@ -227,7 +271,8 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) if (qede_stats_arr[i].pf_only) num_stats--; } - return num_stats + QEDE_NUM_RQSTATS; + return num_stats + QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS + + QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS * edev->num_tc; case ETH_SS_PRIV_FLAGS: return QEDE_PRI_FLAG_LEN; case ETH_SS_TEST: @@ -249,78 +294,150 @@ static u32 qede_get_priv_flags(struct net_device *dev) return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT; } -static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +struct qede_link_mode_mapping { + u32 qed_link_mode; + u32 ethtool_link_mode; +}; + +static const struct qede_link_mode_mapping qed_lm_map[] = { + {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT}, + {QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT}, + {QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT}, + {QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT}, + {QED_LM_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Half_BIT}, + {QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, + {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, + {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT}, + {QED_LM_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT}, + {QED_LM_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT}, + {QED_LM_100000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT}, +}; + +#define QEDE_DRV_TO_ETHTOOL_CAPS(caps, lk_ksettings, name) \ +{ \ + int i; \ + \ + for (i = 0; i < QED_LM_COUNT; i++) { \ + if ((caps) & (qed_lm_map[i].qed_link_mode)) \ + __set_bit(qed_lm_map[i].ethtool_link_mode,\ + lk_ksettings->link_modes.name); \ + } \ +} + +#define QEDE_ETHTOOL_TO_DRV_CAPS(caps, lk_ksettings, name) \ +{ \ + int i; \ + \ + for (i = 0; i < QED_LM_COUNT; i++) { \ + if (test_bit(qed_lm_map[i].ethtool_link_mode, \ + lk_ksettings->link_modes.name)) \ + caps |= qed_lm_map[i].qed_link_mode; \ + } \ +} + +static int qede_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { + struct ethtool_link_settings *base = &cmd->base; struct qede_dev *edev = netdev_priv(dev); struct qed_link_output current_link; memset(¤t_link, 0, sizeof(current_link)); edev->ops->common->get_link(edev->cdev, ¤t_link); - cmd->supported = current_link.supported_caps; - cmd->advertising = current_link.advertised_caps; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + QEDE_DRV_TO_ETHTOOL_CAPS(current_link.supported_caps, cmd, supported) + + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + QEDE_DRV_TO_ETHTOOL_CAPS(current_link.advertised_caps, cmd, advertising) + + ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising); + QEDE_DRV_TO_ETHTOOL_CAPS(current_link.lp_caps, cmd, lp_advertising) + if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) { - ethtool_cmd_speed_set(cmd, current_link.speed); - cmd->duplex = current_link.duplex; + base->speed = current_link.speed; + base->duplex = current_link.duplex; } else { - cmd->duplex = DUPLEX_UNKNOWN; - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + base->speed = SPEED_UNKNOWN; + base->duplex = DUPLEX_UNKNOWN; } - cmd->port = current_link.port; - cmd->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE : - AUTONEG_DISABLE; - cmd->lp_advertising = current_link.lp_caps; + base->port = current_link.port; + base->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE : + AUTONEG_DISABLE; return 0; } -static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int qede_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { + const struct ethtool_link_settings *base = &cmd->base; struct qede_dev *edev = netdev_priv(dev); struct qed_link_output current_link; struct qed_link_params params; - u32 speed; if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { - DP_INFO(edev, - "Link settings are not allowed to be changed\n"); + DP_INFO(edev, "Link settings are not allowed to be changed\n"); return -EOPNOTSUPP; } - memset(¤t_link, 0, sizeof(current_link)); memset(¶ms, 0, sizeof(params)); edev->ops->common->get_link(edev->cdev, ¤t_link); - speed = ethtool_cmd_speed(cmd); params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS; params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG; - if (cmd->autoneg == AUTONEG_ENABLE) { + if (base->autoneg == AUTONEG_ENABLE) { params.autoneg = true; params.forced_speed = 0; - params.adv_speeds = cmd->advertising; - } else { /* forced speed */ + QEDE_ETHTOOL_TO_DRV_CAPS(params.adv_speeds, cmd, advertising) + } else { /* forced speed */ params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED; params.autoneg = false; - params.forced_speed = speed; - switch (speed) { + params.forced_speed = base->speed; + switch (base->speed) { case SPEED_10000: if (!(current_link.supported_caps & - SUPPORTED_10000baseKR_Full)) { + QED_LM_10000baseKR_Full_BIT)) { DP_INFO(edev, "10G speed not supported\n"); return -EINVAL; } - params.adv_speeds = SUPPORTED_10000baseKR_Full; + params.adv_speeds = QED_LM_10000baseKR_Full_BIT; + break; + case SPEED_25000: + if (!(current_link.supported_caps & + QED_LM_25000baseKR_Full_BIT)) { + DP_INFO(edev, "25G speed not supported\n"); + return -EINVAL; + } + params.adv_speeds = QED_LM_25000baseKR_Full_BIT; break; case SPEED_40000: if (!(current_link.supported_caps & - SUPPORTED_40000baseLR4_Full)) { + QED_LM_40000baseLR4_Full_BIT)) { DP_INFO(edev, "40G speed not supported\n"); return -EINVAL; } - params.adv_speeds = SUPPORTED_40000baseLR4_Full; + params.adv_speeds = QED_LM_40000baseLR4_Full_BIT; + break; + case SPEED_50000: + if (!(current_link.supported_caps & + QED_LM_50000baseKR2_Full_BIT)) { + DP_INFO(edev, "50G speed not supported\n"); + return -EINVAL; + } + params.adv_speeds = QED_LM_50000baseKR2_Full_BIT; + break; + case SPEED_100000: + if (!(current_link.supported_caps & + QED_LM_100000baseKR4_Full_BIT)) { + DP_INFO(edev, "100G speed not supported\n"); + return -EINVAL; + } + params.adv_speeds = QED_LM_100000baseKR4_Full_BIT; break; default: - DP_INFO(edev, "Unsupported speed %u\n", speed); + DP_INFO(edev, "Unsupported speed %u\n", base->speed); return -EINVAL; } } @@ -368,8 +485,7 @@ static u32 qede_get_msglevel(struct net_device *ndev) { struct qede_dev *edev = netdev_priv(ndev); - return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | - edev->dp_module; + return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | edev->dp_module; } static void qede_set_msglevel(struct net_device *ndev, u32 level) @@ -393,8 +509,7 @@ static int qede_nway_reset(struct net_device *dev) struct qed_link_params link_params; if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { - DP_INFO(edev, - "Link settings are not allowed to be changed\n"); + DP_INFO(edev, "Link settings are not allowed to be changed\n"); return -EOPNOTSUPP; } @@ -467,7 +582,7 @@ static int qede_set_coalesce(struct net_device *dev, rxc = (u16)coal->rx_coalesce_usecs; txc = (u16)coal->tx_coalesce_usecs; - for_each_rss(i) { + for_each_queue(i) { sb_id = edev->fp_array[i].sb_info->igu_sb_id; rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc, (u8)i, sb_id); @@ -563,7 +678,7 @@ static int qede_set_pauseparam(struct net_device *dev, memset(¶ms, 0, sizeof(params)); params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; if (epause->autoneg) { - if (!(current_link.supported_caps & SUPPORTED_Autoneg)) { + if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) { DP_INFO(edev, "autoneg not supported\n"); return -EINVAL; } @@ -580,6 +695,28 @@ static int qede_set_pauseparam(struct net_device *dev, return 0; } +static void qede_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, void *buffer) +{ + struct qede_dev *edev = netdev_priv(ndev); + + regs->version = 0; + memset(buffer, 0, regs->len); + + if (edev->ops && edev->ops->common) + edev->ops->common->dbg_all_data(edev->cdev, buffer); +} + +static int qede_get_regs_len(struct net_device *ndev) +{ + struct qede_dev *edev = netdev_priv(ndev); + + if (edev->ops && edev->ops->common) + return edev->ops->common->dbg_all_data_size(edev->cdev); + else + return -EINVAL; +} + static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args) { edev->ndev->mtu = args->mtu; @@ -619,45 +756,70 @@ static void qede_get_channels(struct net_device *dev, struct qede_dev *edev = netdev_priv(dev); channels->max_combined = QEDE_MAX_RSS_CNT(edev); - channels->combined_count = QEDE_RSS_CNT(edev); + channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx - + edev->fp_num_rx; + channels->tx_count = edev->fp_num_tx; + channels->rx_count = edev->fp_num_rx; } static int qede_set_channels(struct net_device *dev, struct ethtool_channels *channels) { struct qede_dev *edev = netdev_priv(dev); + u32 count; DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", channels->rx_count, channels->tx_count, channels->other_count, channels->combined_count); - /* We don't support separate rx / tx, nor `other' channels. */ - if (channels->rx_count || channels->tx_count || - channels->other_count || (channels->combined_count == 0) || - (channels->combined_count > QEDE_MAX_RSS_CNT(edev))) { + count = channels->rx_count + channels->tx_count + + channels->combined_count; + + /* We don't support `other' channels */ + if (channels->other_count) { DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "command parameters not supported\n"); return -EINVAL; } + if (!(channels->combined_count || (channels->rx_count && + channels->tx_count))) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "need to request at least one transmit and one receive channel\n"); + return -EINVAL; + } + + if (count > QEDE_MAX_RSS_CNT(edev)) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "requested channels = %d max supported channels = %d\n", + count, QEDE_MAX_RSS_CNT(edev)); + return -EINVAL; + } + /* Check if there was a change in the active parameters */ - if (channels->combined_count == QEDE_RSS_CNT(edev)) { + if ((count == QEDE_QUEUE_CNT(edev)) && + (channels->tx_count == edev->fp_num_tx) && + (channels->rx_count == edev->fp_num_rx)) { DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "No change in active parameters\n"); return 0; } /* We need the number of queues to be divisible between the hwfns */ - if (channels->combined_count % edev->dev_info.common.num_hwfns) { + if ((count % edev->dev_info.common.num_hwfns) || + (channels->tx_count % edev->dev_info.common.num_hwfns) || + (channels->rx_count % edev->dev_info.common.num_hwfns)) { DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), - "Number of channels must be divisable by %04x\n", + "Number of channels must be divisible by %04x\n", edev->dev_info.common.num_hwfns); return -EINVAL; } /* Set number of queues and reload if necessary */ - edev->req_rss = channels->combined_count; + edev->req_queues = count; + edev->req_num_tx = channels->tx_count; + edev->req_num_rx = channels->rx_count; if (netif_running(dev)) qede_reload(edev, NULL, NULL); @@ -727,7 +889,7 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, switch (info->cmd) { case ETHTOOL_GRXRINGS: - info->data = edev->num_rss; + info->data = QEDE_RSS_COUNT(edev); return 0; case ETHTOOL_GRXFH: return qede_get_rss_flags(edev, info); @@ -930,7 +1092,7 @@ static void qede_netif_start(struct qede_dev *edev) if (!netif_running(edev->ndev)) return; - for_each_rss(i) { + for_each_queue(i) { /* Update and reenable interrupts */ qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1); napi_enable(&edev->fp_array[i].napi); @@ -942,7 +1104,7 @@ static void qede_netif_stop(struct qede_dev *edev) { int i; - for_each_rss(i) { + for_each_queue(i) { napi_disable(&edev->fp_array[i].napi); /* Disable interrupts */ qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0); @@ -952,11 +1114,23 @@ static void qede_netif_stop(struct qede_dev *edev) static int qede_selftest_transmit_traffic(struct qede_dev *edev, struct sk_buff *skb) { - struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0]; + struct qede_tx_queue *txq = NULL; struct eth_tx_1st_bd *first_bd; dma_addr_t mapping; int i, idx, val; + for_each_queue(i) { + if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { + txq = edev->fp_array[i].txqs; + break; + } + } + + if (!txq) { + DP_NOTICE(edev, "Tx path is not available\n"); + return -1; + } + /* Fill the entry in the SW ring and the BDs in the FW ring */ idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; txq->sw_tx_ring[idx].skb = skb; @@ -1020,14 +1194,26 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, static int qede_selftest_receive_traffic(struct qede_dev *edev) { - struct qede_rx_queue *rxq = edev->fp_array[0].rxq; u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len; struct eth_fast_path_rx_reg_cqe *fp_cqe; + struct qede_rx_queue *rxq = NULL; struct sw_rx_data *sw_rx_data; union eth_rx_cqe *cqe; u8 *data_ptr; int i; + for_each_queue(i) { + if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { + rxq = edev->fp_array[i].rxq; + break; + } + } + + if (!rxq) { + DP_NOTICE(edev, "Rx path is not available\n"); + return -1; + } + /* The packet is expected to receive on rx-queue 0 even though RSS is * enabled. This is because the queue 0 is configured as the default * queue and that the loopback traffic is not IP. @@ -1228,9 +1414,11 @@ static int qede_get_tunable(struct net_device *dev, } static const struct ethtool_ops qede_ethtool_ops = { - .get_settings = qede_get_settings, - .set_settings = qede_set_settings, + .get_link_ksettings = qede_get_link_ksettings, + .set_link_ksettings = qede_set_link_ksettings, .get_drvinfo = qede_get_drvinfo, + .get_regs_len = qede_get_regs_len, + .get_regs = qede_get_regs, .get_msglevel = qede_get_msglevel, .set_msglevel = qede_set_msglevel, .nway_reset = qede_nway_reset, @@ -1260,7 +1448,7 @@ static const struct ethtool_ops qede_ethtool_ops = { }; static const struct ethtool_ops qede_vf_ethtool_ops = { - .get_settings = qede_get_settings, + .get_link_ksettings = qede_get_link_ksettings, .get_drvinfo = qede_get_drvinfo, .get_msglevel = qede_get_msglevel, .set_msglevel = qede_set_msglevel, diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index e4bd02e46e57..343038ca047d 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -36,7 +36,7 @@ #include <linux/random.h> #include <net/ip6_checksum.h> #include <linux/bitops.h> - +#include <linux/qed/qede_roce.h> #include "qede.h" static char version[] = @@ -100,7 +100,8 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev, static void qede_link_update(void *dev, struct qed_link_output *link); #ifdef CONFIG_QED_SRIOV -static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos) +static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos, + __be16 vlan_proto) { struct qede_dev *edev = netdev_priv(ndev); @@ -109,6 +110,9 @@ static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos) return -EINVAL; } + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n", vlan, vf); @@ -189,8 +193,7 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event, struct ethtool_drvinfo drvinfo; struct qede_dev *edev; - /* Currently only support name change */ - if (event != NETDEV_CHANGENAME) + if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR) goto done; /* Check whether this is a qede device */ @@ -203,11 +206,18 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event, goto done; edev = netdev_priv(ndev); - /* Notify qed of the name change */ - if (!edev->ops || !edev->ops->common) - goto done; - edev->ops->common->set_id(edev->cdev, edev->ndev->name, - "qede"); + switch (event) { + case NETDEV_CHANGENAME: + /* Notify qed of the name change */ + if (!edev->ops || !edev->ops->common) + goto done; + edev->ops->common->set_id(edev->cdev, edev->ndev->name, "qede"); + break; + case NETDEV_CHANGEADDR: + edev = netdev_priv(ndev); + qede_roce_event_changeaddr(edev); + break; + } done: return NOTIFY_DONE; @@ -222,7 +232,7 @@ int __init qede_init(void) { int ret; - pr_notice("qede_init: %s\n", version); + pr_info("qede_init: %s\n", version); qed_ops = qed_get_eth_ops(); if (!qed_ops) { @@ -253,7 +263,8 @@ int __init qede_init(void) static void __exit qede_cleanup(void) { - pr_notice("qede_cleanup called\n"); + if (debug & QED_LOG_INFO_MASK) + pr_info("qede_cleanup called\n"); unregister_netdevice_notifier(&qede_netdev_notifier); pci_unregister_driver(&qede_pci_driver); @@ -270,8 +281,7 @@ module_exit(qede_cleanup); /* Unmap the data and free skb */ static int qede_free_tx_pkt(struct qede_dev *edev, - struct qede_tx_queue *txq, - int *len) + struct qede_tx_queue *txq, int *len) { u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX; struct sk_buff *skb = txq->sw_tx_ring[idx].skb; @@ -329,8 +339,7 @@ static int qede_free_tx_pkt(struct qede_dev *edev, static void qede_free_failed_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, struct eth_tx_1st_bd *first_bd, - int nbd, - bool data_split) + int nbd, bool data_split) { u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; struct sk_buff *skb = txq->sw_tx_ring[idx].skb; @@ -339,8 +348,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev, /* Return prod to its position before this skb was handled */ qed_chain_set_prod(&txq->tx_pbl, - le16_to_cpu(txq->tx_db.data.bd_prod), - first_bd); + le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); @@ -366,8 +374,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev, /* Return again prod to its position before this skb was handled */ qed_chain_set_prod(&txq->tx_pbl, - le16_to_cpu(txq->tx_db.data.bd_prod), - first_bd); + le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); /* Free skb */ dev_kfree_skb_any(skb); @@ -376,8 +383,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev, } static u32 qede_xmit_type(struct qede_dev *edev, - struct sk_buff *skb, - int *ipv6_ext) + struct sk_buff *skb, int *ipv6_ext) { u32 rc = XMIT_L4_CSUM; __be16 l3_proto; @@ -434,15 +440,13 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, } static int map_frag_to_bd(struct qede_dev *edev, - skb_frag_t *frag, - struct eth_tx_bd *bd) + skb_frag_t *frag, struct eth_tx_bd *bd) { dma_addr_t mapping; /* Map skb non-linear frag data for DMA */ mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0, - skb_frag_size(frag), - DMA_TO_DEVICE); + skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { DP_NOTICE(edev, "Unable to map frag - dropping packet\n"); return -ENOMEM; @@ -504,9 +508,8 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq) } /* Main transmit function */ -static -netdev_tx_t qede_start_xmit(struct sk_buff *skb, - struct net_device *ndev) +static netdev_tx_t qede_start_xmit(struct sk_buff *skb, + struct net_device *ndev) { struct qede_dev *edev = netdev_priv(ndev); struct netdev_queue *netdev_txq; @@ -526,12 +529,11 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, /* Get tx-queue context and netdev index */ txq_index = skb_get_queue_mapping(skb); - WARN_ON(txq_index >= QEDE_TSS_CNT(edev)); + WARN_ON(txq_index >= QEDE_TSS_COUNT(edev)); txq = QEDE_TX_QUEUE(edev, txq_index); netdev_txq = netdev_get_tx_queue(ndev, txq_index); - WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < - (MAX_SKB_FRAGS + 1)); + WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); xmit_type = qede_xmit_type(edev, skb, &ipv6_ext); @@ -606,6 +608,14 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; } + /* Legacy FW had flipped behavior in regard to this bit - + * I.e., needed to set to prevent FW from touching encapsulated + * packets when it didn't need to. + */ + if (unlikely(txq->is_legacy)) + first_bd->data.bitfields ^= + 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; + /* If the packet is IPv6 with extension header, indicate that * to FW and pass few params, since the device cracker doesn't * support parsing IPv6 with extension header/s. @@ -722,12 +732,16 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, txq->tx_db.data.bd_prod = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); - if (!skb->xmit_more || netif_tx_queue_stopped(netdev_txq)) + if (!skb->xmit_more || netif_xmit_stopped(netdev_txq)) qede_update_tx_producer(txq); if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1))) { + if (skb->xmit_more) + qede_update_tx_producer(txq); + netif_tx_stop_queue(netdev_txq); + txq->stopped_cnt++; DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, "Stop queue was called\n"); /* paired memory barrier is in qede_tx_int(), we have to keep @@ -761,8 +775,7 @@ int qede_txq_has_work(struct qede_tx_queue *txq) return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl); } -static int qede_tx_int(struct qede_dev *edev, - struct qede_tx_queue *txq) +static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) { struct netdev_queue *netdev_txq; u16 hw_bd_cons; @@ -788,6 +801,7 @@ static int qede_tx_int(struct qede_dev *edev, bytes_compl += len; pkts_compl++; txq->sw_tx_cons++; + txq->xmit_pkts++; } netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); @@ -960,8 +974,7 @@ static inline void qede_update_rx_prod(struct qede_dev *edev, static u32 qede_get_rxhash(struct qede_dev *edev, u8 bitfields, - __le32 rss_hash, - enum pkt_hash_types *rxhash_type) + __le32 rss_hash, enum pkt_hash_types *rxhash_type) { enum rss_hash_type htype; @@ -990,12 +1003,10 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) static inline void qede_skb_receive(struct qede_dev *edev, struct qede_fastpath *fp, - struct sk_buff *skb, - u16 vlan_tag) + struct sk_buff *skb, u16 vlan_tag) { if (vlan_tag) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), - vlan_tag); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); napi_gro_receive(&fp->napi, skb); } @@ -1018,8 +1029,7 @@ static void qede_set_gro_params(struct qede_dev *edev, static int qede_fill_frag_skb(struct qede_dev *edev, struct qede_rx_queue *rxq, - u8 tpa_agg_index, - u16 len_on_bd) + u8 tpa_agg_index, u16 len_on_bd) { struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; @@ -1206,7 +1216,7 @@ static void qede_gro_receive(struct qede_dev *edev, #endif send_skb: - skb_record_rx_queue(skb, fp->rss_id); + skb_record_rx_queue(skb, fp->rxq->rxq_id); qede_skb_receive(edev, fp, skb, vlan_tag); } @@ -1410,7 +1420,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) { edev->ops->eth_cqe_completion( - edev->cdev, fp->rss_id, + edev->cdev, fp->id, (struct eth_slow_path_rx_cqe *)cqe); goto next_cqe; } @@ -1467,7 +1477,7 @@ alloc_skb: skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); if (unlikely(!skb)) { DP_NOTICE(edev, - "Build_skb failed, dropping incoming packet\n"); + "skb allocation failed, dropping incoming packet\n"); qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num); rxq->rx_alloc_errors++; goto next_cqe; @@ -1575,14 +1585,13 @@ alloc_skb: skb->protocol = eth_type_trans(skb, edev->ndev); rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields, - fp_cqe->rss_hash, - &rxhash_type); + fp_cqe->rss_hash, &rxhash_type); skb_set_hash(skb, rx_hash, rxhash_type); qede_set_skb_csum(skb, csum_flag); - skb_record_rx_queue(skb, fp->rss_id); + skb_record_rx_queue(skb, fp->rxq->rxq_id); qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag)); next_rx_only: @@ -1601,6 +1610,8 @@ next_cqe: /* don't consume bd rx buffer */ /* Update producers */ qede_update_rx_prod(edev, rxq); + rxq->rcv_pkts += rx_pkt; + return rx_pkt; } @@ -1613,10 +1624,12 @@ static int qede_poll(struct napi_struct *napi, int budget) u8 tc; for (tc = 0; tc < edev->num_tc; tc++) - if (qede_txq_has_work(&fp->txqs[tc])) + if (likely(fp->type & QEDE_FASTPATH_TX) && + qede_txq_has_work(&fp->txqs[tc])) qede_tx_int(edev, &fp->txqs[tc]); - rx_work_done = qede_has_rx_work(fp->rxq) ? + rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) && + qede_has_rx_work(fp->rxq)) ? qede_rx_int(fp, budget) : 0; if (rx_work_done < budget) { qed_sb_update_sb_idx(fp->sb_info); @@ -1636,8 +1649,10 @@ static int qede_poll(struct napi_struct *napi, int budget) rmb(); /* Fall out from the NAPI loop if needed */ - if (!(qede_has_rx_work(fp->rxq) || - qede_has_tx_work(fp))) { + if (!((likely(fp->type & QEDE_FASTPATH_RX) && + qede_has_rx_work(fp->rxq)) || + (likely(fp->type & QEDE_FASTPATH_TX) && + qede_has_tx_work(fp)))) { napi_complete(napi); /* Update and reenable interrupts */ @@ -1708,6 +1723,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) edev->ops->get_vport_stats(edev->cdev, &stats); edev->stats.no_buff_discards = stats.no_buff_discards; + edev->stats.packet_too_big_discard = stats.packet_too_big_discard; + edev->stats.ttl0_discard = stats.ttl0_discard; edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes; edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes; edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes; @@ -1787,9 +1804,9 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames; } -static struct rtnl_link_stats64 *qede_get_stats64( - struct net_device *dev, - struct rtnl_link_stats64 *stats) +static +struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct qede_dev *edev = netdev_priv(dev); @@ -2103,14 +2120,13 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev) } DP_VERBOSE(edev, NETIF_MSG_IFDOWN, - "marked vlan %d as non-configured\n", - vlan->vid); + "marked vlan %d as non-configured\n", vlan->vid); } edev->accept_any_vlan = false; } -int qede_set_features(struct net_device *dev, netdev_features_t features) +static int qede_set_features(struct net_device *dev, netdev_features_t features) { struct qede_dev *edev = netdev_priv(dev); netdev_features_t changes = features ^ dev->features; @@ -2146,7 +2162,7 @@ static void qede_udp_tunnel_add(struct net_device *dev, edev->vxlan_dst_port = t_port; - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n", t_port); set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); @@ -2157,7 +2173,7 @@ static void qede_udp_tunnel_add(struct net_device *dev, edev->geneve_dst_port = t_port; - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n", t_port); set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); break; @@ -2181,7 +2197,7 @@ static void qede_udp_tunnel_del(struct net_device *dev, edev->vxlan_dst_port = 0; - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n", t_port); set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); @@ -2192,7 +2208,7 @@ static void qede_udp_tunnel_del(struct net_device *dev, edev->geneve_dst_port = 0; - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n", t_port); set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); break; @@ -2237,15 +2253,13 @@ static const struct net_device_ops qede_netdev_ops = { static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, struct pci_dev *pdev, struct qed_dev_eth_info *info, - u32 dp_module, - u8 dp_level) + u32 dp_module, u8 dp_level) { struct net_device *ndev; struct qede_dev *edev; ndev = alloc_etherdev_mqs(sizeof(*edev), - info->num_queues, - info->num_queues); + info->num_queues, info->num_queues); if (!ndev) { pr_err("etherdev allocation failed\n"); return NULL; @@ -2261,6 +2275,9 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, edev->q_num_rx_buffers = NUM_RX_BDS_DEF; edev->q_num_tx_buffers = NUM_TX_BDS_DEF; + DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n", + info->num_queues, info->num_queues); + SET_NETDEV_DEV(ndev, &pdev->dev); memset(&edev->stats, 0, sizeof(edev->stats)); @@ -2349,7 +2366,7 @@ static void qede_free_fp_array(struct qede_dev *edev) struct qede_fastpath *fp; int i; - for_each_rss(i) { + for_each_queue(i) { fp = &edev->fp_array[i]; kfree(fp->sb_info); @@ -2358,22 +2375,33 @@ static void qede_free_fp_array(struct qede_dev *edev) } kfree(edev->fp_array); } - edev->num_rss = 0; + + edev->num_queues = 0; + edev->fp_num_tx = 0; + edev->fp_num_rx = 0; } static int qede_alloc_fp_array(struct qede_dev *edev) { + u8 fp_combined, fp_rx = edev->fp_num_rx; struct qede_fastpath *fp; int i; - edev->fp_array = kcalloc(QEDE_RSS_CNT(edev), + edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev), sizeof(*edev->fp_array), GFP_KERNEL); if (!edev->fp_array) { DP_NOTICE(edev, "fp array allocation failed\n"); goto err; } - for_each_rss(i) { + fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx; + + /* Allocate the FP elements for Rx queues followed by combined and then + * the Tx. This ordering should be maintained so that the respective + * queues (Rx or Tx) will be together in the fastpath array and the + * associated ids will be sequential. + */ + for_each_queue(i) { fp = &edev->fp_array[i]; fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL); @@ -2382,16 +2410,33 @@ static int qede_alloc_fp_array(struct qede_dev *edev) goto err; } - fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL); - if (!fp->rxq) { - DP_NOTICE(edev, "RXQ struct allocation failed\n"); - goto err; + if (fp_rx) { + fp->type = QEDE_FASTPATH_RX; + fp_rx--; + } else if (fp_combined) { + fp->type = QEDE_FASTPATH_COMBINED; + fp_combined--; + } else { + fp->type = QEDE_FASTPATH_TX; } - fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL); - if (!fp->txqs) { - DP_NOTICE(edev, "TXQ array allocation failed\n"); - goto err; + if (fp->type & QEDE_FASTPATH_TX) { + fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), + GFP_KERNEL); + if (!fp->txqs) { + DP_NOTICE(edev, + "TXQ array allocation failed\n"); + goto err; + } + } + + if (fp->type & QEDE_FASTPATH_RX) { + fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL); + if (!fp->rxq) { + DP_NOTICE(edev, + "RXQ struct allocation failed\n"); + goto err; + } } } @@ -2453,7 +2498,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, bool is_vf, enum qede_probe_mode mode) { struct qed_probe_params probe_params; - struct qed_slowpath_params params; + struct qed_slowpath_params sp_params; struct qed_dev_eth_info dev_info; struct qede_dev *edev; struct qed_dev *cdev; @@ -2476,14 +2521,14 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, qede_update_pf_params(cdev); /* Start the Slowpath-process */ - memset(¶ms, 0, sizeof(struct qed_slowpath_params)); - params.int_mode = QED_INT_MODE_MSIX; - params.drv_major = QEDE_MAJOR_VERSION; - params.drv_minor = QEDE_MINOR_VERSION; - params.drv_rev = QEDE_REVISION_VERSION; - params.drv_eng = QEDE_ENGINEERING_VERSION; - strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE); - rc = qed_ops->common->slowpath_start(cdev, ¶ms); + memset(&sp_params, 0, sizeof(sp_params)); + sp_params.int_mode = QED_INT_MODE_MSIX; + sp_params.drv_major = QEDE_MAJOR_VERSION; + sp_params.drv_minor = QEDE_MINOR_VERSION; + sp_params.drv_rev = QEDE_REVISION_VERSION; + sp_params.drv_eng = QEDE_ENGINEERING_VERSION; + strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE); + rc = qed_ops->common->slowpath_start(cdev, &sp_params); if (rc) { pr_notice("Cannot start slowpath\n"); goto err1; @@ -2506,10 +2551,14 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, qede_init_ndev(edev); + rc = qede_roce_dev_add(edev); + if (rc) + goto err3; + rc = register_netdev(edev->ndev); if (rc) { DP_NOTICE(edev, "Cannot register net-device\n"); - goto err3; + goto err4; } edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION); @@ -2517,7 +2566,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, edev->ops->register_ops(cdev, &qede_ll_ops, edev); #ifdef CONFIG_DCB - qede_set_dcbnl_ops(edev->ndev); + if (!IS_VF(edev)) + qede_set_dcbnl_ops(edev->ndev); #endif INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); @@ -2528,6 +2578,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, return 0; +err4: + qede_roce_dev_remove(edev); err3: free_netdev(edev->ndev); err2: @@ -2574,8 +2626,11 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) DP_INFO(edev, "Starting qede_remove\n"); cancel_delayed_work_sync(&edev->sp_task); + unregister_netdev(ndev); + qede_roce_dev_remove(edev); + edev->ops->common->set_power_state(cdev, PCI_D0); pci_set_drvdata(pdev, NULL); @@ -2586,7 +2641,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) qed_ops->common->slowpath_stop(cdev); qed_ops->common->remove(cdev); - pr_notice("Ending successfully qede_remove\n"); + dev_info(&pdev->dev, "Ending qede_remove successfully\n"); } static void qede_remove(struct pci_dev *pdev) @@ -2605,8 +2660,8 @@ static int qede_set_num_queues(struct qede_dev *edev) u16 rss_num; /* Setup queues according to possible resources*/ - if (edev->req_rss) - rss_num = edev->req_rss; + if (edev->req_queues) + rss_num = edev->req_queues; else rss_num = netif_get_num_default_rss_queues() * edev->dev_info.common.num_hwfns; @@ -2616,11 +2671,15 @@ static int qede_set_num_queues(struct qede_dev *edev) rc = edev->ops->common->set_fp_int(edev->cdev, rss_num); if (rc > 0) { /* Managed to request interrupts for our queues */ - edev->num_rss = rc; + edev->num_queues = rc; DP_INFO(edev, "Managed %d [of %d] RSS queues\n", - QEDE_RSS_CNT(edev), rss_num); + QEDE_QUEUE_CNT(edev), rss_num); rc = 0; } + + edev->fp_num_tx = edev->req_num_tx; + edev->fp_num_rx = edev->req_num_rx; + return rc; } @@ -2634,16 +2693,14 @@ static void qede_free_mem_sb(struct qede_dev *edev, /* This function allocates fast-path status block memory */ static int qede_alloc_mem_sb(struct qede_dev *edev, - struct qed_sb_info *sb_info, - u16 sb_id) + struct qed_sb_info *sb_info, u16 sb_id) { struct status_block *sb_virt; dma_addr_t sb_phys; int rc; sb_virt = dma_alloc_coherent(&edev->pdev->dev, - sizeof(*sb_virt), - &sb_phys, GFP_KERNEL); + sizeof(*sb_virt), &sb_phys, GFP_KERNEL); if (!sb_virt) { DP_ERR(edev, "Status block allocation failed\n"); return -ENOMEM; @@ -2675,16 +2732,15 @@ static void qede_free_rx_buffers(struct qede_dev *edev, data = rx_buf->data; dma_unmap_page(&edev->pdev->dev, - rx_buf->mapping, - PAGE_SIZE, DMA_FROM_DEVICE); + rx_buf->mapping, PAGE_SIZE, DMA_FROM_DEVICE); rx_buf->data = NULL; __free_page(data); } } -static void qede_free_sge_mem(struct qede_dev *edev, - struct qede_rx_queue *rxq) { +static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) +{ int i; if (edev->gro_disable) @@ -2703,8 +2759,7 @@ static void qede_free_sge_mem(struct qede_dev *edev, } } -static void qede_free_mem_rxq(struct qede_dev *edev, - struct qede_rx_queue *rxq) +static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) { qede_free_sge_mem(edev, rxq); @@ -2726,9 +2781,6 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev, struct eth_rx_bd *rx_bd; dma_addr_t mapping; struct page *data; - u16 rx_buf_size; - - rx_buf_size = rxq->rx_buf_size; data = alloc_pages(GFP_ATOMIC, 0); if (unlikely(!data)) { @@ -2763,8 +2815,7 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev, return 0; } -static int qede_alloc_sge_mem(struct qede_dev *edev, - struct qede_rx_queue *rxq) +static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) { dma_addr_t mapping; int i; @@ -2811,15 +2862,14 @@ err: } /* This function allocates all memory needed per Rx queue */ -static int qede_alloc_mem_rxq(struct qede_dev *edev, - struct qede_rx_queue *rxq) +static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) { int i, rc, size; rxq->num_rx_buffers = edev->q_num_rx_buffers; - rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + - edev->ndev->mtu; + rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu; + if (rxq->rx_buf_size > PAGE_SIZE) rxq->rx_buf_size = PAGE_SIZE; @@ -2873,8 +2923,7 @@ err: return rc; } -static void qede_free_mem_txq(struct qede_dev *edev, - struct qede_tx_queue *txq) +static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) { /* Free the parallel SW ring */ kfree(txq->sw_tx_ring); @@ -2884,8 +2933,7 @@ static void qede_free_mem_txq(struct qede_dev *edev, } /* This function allocates all memory needed per Tx queue */ -static int qede_alloc_mem_txq(struct qede_dev *edev, - struct qede_tx_queue *txq) +static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) { int size, rc; union eth_tx_bd_types *p_virt; @@ -2917,41 +2965,45 @@ err: } /* This function frees all memory of a single fp */ -static void qede_free_mem_fp(struct qede_dev *edev, - struct qede_fastpath *fp) +static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) { int tc; qede_free_mem_sb(edev, fp->sb_info); - qede_free_mem_rxq(edev, fp->rxq); + if (fp->type & QEDE_FASTPATH_RX) + qede_free_mem_rxq(edev, fp->rxq); - for (tc = 0; tc < edev->num_tc; tc++) - qede_free_mem_txq(edev, &fp->txqs[tc]); + if (fp->type & QEDE_FASTPATH_TX) + for (tc = 0; tc < edev->num_tc; tc++) + qede_free_mem_txq(edev, &fp->txqs[tc]); } /* This function allocates all memory needed for a single fp (i.e. an entity - * which contains status block, one rx queue and multiple per-TC tx queues. + * which contains status block, one rx queue and/or multiple per-TC tx queues. */ -static int qede_alloc_mem_fp(struct qede_dev *edev, - struct qede_fastpath *fp) +static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) { int rc, tc; - rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id); + rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id); if (rc) goto err; - rc = qede_alloc_mem_rxq(edev, fp->rxq); - if (rc) - goto err; - - for (tc = 0; tc < edev->num_tc; tc++) { - rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]); + if (fp->type & QEDE_FASTPATH_RX) { + rc = qede_alloc_mem_rxq(edev, fp->rxq); if (rc) goto err; } + if (fp->type & QEDE_FASTPATH_TX) { + for (tc = 0; tc < edev->num_tc; tc++) { + rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]); + if (rc) + goto err; + } + } + return 0; err: return rc; @@ -2961,7 +3013,7 @@ static void qede_free_mem_load(struct qede_dev *edev) { int i; - for_each_rss(i) { + for_each_queue(i) { struct qede_fastpath *fp = &edev->fp_array[i]; qede_free_mem_fp(edev, fp); @@ -2971,16 +3023,16 @@ static void qede_free_mem_load(struct qede_dev *edev) /* This function allocates all qede memory at NIC load. */ static int qede_alloc_mem_load(struct qede_dev *edev) { - int rc = 0, rss_id; + int rc = 0, queue_id; - for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) { - struct qede_fastpath *fp = &edev->fp_array[rss_id]; + for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) { + struct qede_fastpath *fp = &edev->fp_array[queue_id]; rc = qede_alloc_mem_fp(edev, fp); if (rc) { DP_ERR(edev, "Failed to allocate memory for fastpath - rss id = %d\n", - rss_id); + queue_id); qede_free_mem_load(edev); return rc; } @@ -2992,30 +3044,38 @@ static int qede_alloc_mem_load(struct qede_dev *edev) /* This function inits fp content and resets the SB, RXQ and TXQ structures */ static void qede_init_fp(struct qede_dev *edev) { - int rss_id, txq_index, tc; + int queue_id, rxq_index = 0, txq_index = 0, tc; struct qede_fastpath *fp; - for_each_rss(rss_id) { - fp = &edev->fp_array[rss_id]; + for_each_queue(queue_id) { + fp = &edev->fp_array[queue_id]; fp->edev = edev; - fp->rss_id = rss_id; + fp->id = queue_id; memset((void *)&fp->napi, 0, sizeof(fp->napi)); memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info)); - memset((void *)fp->rxq, 0, sizeof(*fp->rxq)); - fp->rxq->rxq_id = rss_id; + if (fp->type & QEDE_FASTPATH_RX) { + memset((void *)fp->rxq, 0, sizeof(*fp->rxq)); + fp->rxq->rxq_id = rxq_index++; + } - memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs))); - for (tc = 0; tc < edev->num_tc; tc++) { - txq_index = tc * QEDE_RSS_CNT(edev) + rss_id; - fp->txqs[tc].index = txq_index; + if (fp->type & QEDE_FASTPATH_TX) { + memset((void *)fp->txqs, 0, + (edev->num_tc * sizeof(*fp->txqs))); + for (tc = 0; tc < edev->num_tc; tc++) { + fp->txqs[tc].index = txq_index + + tc * QEDE_TSS_COUNT(edev); + if (edev->dev_info.is_legacy) + fp->txqs[tc].is_legacy = true; + } + txq_index++; } snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", - edev->ndev->name, rss_id); + edev->ndev->name, queue_id); } edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO); @@ -3025,12 +3085,13 @@ static int qede_set_real_num_queues(struct qede_dev *edev) { int rc = 0; - rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev)); + rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev)); if (rc) { DP_NOTICE(edev, "Failed to set real number of Tx queues\n"); return rc; } - rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev)); + + rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev)); if (rc) { DP_NOTICE(edev, "Failed to set real number of Rx queues\n"); return rc; @@ -3043,7 +3104,7 @@ static void qede_napi_disable_remove(struct qede_dev *edev) { int i; - for_each_rss(i) { + for_each_queue(i) { napi_disable(&edev->fp_array[i].napi); netif_napi_del(&edev->fp_array[i].napi); @@ -3055,7 +3116,7 @@ static void qede_napi_add_enable(struct qede_dev *edev) int i; /* Add NAPI objects */ - for_each_rss(i) { + for_each_queue(i) { netif_napi_add(edev->ndev, &edev->fp_array[i].napi, qede_poll, NAPI_POLL_WEIGHT); napi_enable(&edev->fp_array[i].napi); @@ -3084,14 +3145,14 @@ static int qede_req_msix_irqs(struct qede_dev *edev) int i, rc; /* Sanitize number of interrupts == number of prepared RSS queues */ - if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) { + if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) { DP_ERR(edev, "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n", - QEDE_RSS_CNT(edev), edev->int_info.msix_cnt); + QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt); return -EINVAL; } - for (i = 0; i < QEDE_RSS_CNT(edev); i++) { + for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) { rc = request_irq(edev->int_info.msix[i].vector, qede_msix_fp_int, 0, edev->fp_array[i].name, &edev->fp_array[i]); @@ -3136,18 +3197,17 @@ static int qede_setup_irqs(struct qede_dev *edev) /* qed should learn receive the RSS ids and callbacks */ ops = edev->ops->common; - for (i = 0; i < QEDE_RSS_CNT(edev); i++) + for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) ops->simd_handler_config(edev->cdev, &edev->fp_array[i], i, qede_simd_fp_handler); - edev->int_info.used_cnt = QEDE_RSS_CNT(edev); + edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev); } return 0; } static int qede_drain_txq(struct qede_dev *edev, - struct qede_tx_queue *txq, - bool allow_drain) + struct qede_tx_queue *txq, bool allow_drain) { int rc, cnt = 1000; @@ -3199,45 +3259,53 @@ static int qede_stop_queues(struct qede_dev *edev) } /* Flush Tx queues. If needed, request drain from MCP */ - for_each_rss(i) { + for_each_queue(i) { struct qede_fastpath *fp = &edev->fp_array[i]; - for (tc = 0; tc < edev->num_tc; tc++) { - struct qede_tx_queue *txq = &fp->txqs[tc]; + if (fp->type & QEDE_FASTPATH_TX) { + for (tc = 0; tc < edev->num_tc; tc++) { + struct qede_tx_queue *txq = &fp->txqs[tc]; - rc = qede_drain_txq(edev, txq, true); - if (rc) - return rc; + rc = qede_drain_txq(edev, txq, true); + if (rc) + return rc; + } } } - /* Stop all Queues in reverse order*/ - for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) { + /* Stop all Queues in reverse order */ + for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) { struct qed_stop_rxq_params rx_params; - /* Stop the Tx Queue(s)*/ - for (tc = 0; tc < edev->num_tc; tc++) { - struct qed_stop_txq_params tx_params; - - tx_params.rss_id = i; - tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i; - rc = edev->ops->q_tx_stop(cdev, &tx_params); - if (rc) { - DP_ERR(edev, "Failed to stop TXQ #%d\n", - tx_params.tx_queue_id); - return rc; + /* Stop the Tx Queue(s) */ + if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { + for (tc = 0; tc < edev->num_tc; tc++) { + struct qed_stop_txq_params tx_params; + u8 val; + + tx_params.rss_id = i; + val = edev->fp_array[i].txqs[tc].index; + tx_params.tx_queue_id = val; + rc = edev->ops->q_tx_stop(cdev, &tx_params); + if (rc) { + DP_ERR(edev, "Failed to stop TXQ #%d\n", + tx_params.tx_queue_id); + return rc; + } } } - /* Stop the Rx Queue*/ - memset(&rx_params, 0, sizeof(rx_params)); - rx_params.rss_id = i; - rx_params.rx_queue_id = i; + /* Stop the Rx Queue */ + if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { + memset(&rx_params, 0, sizeof(rx_params)); + rx_params.rss_id = i; + rx_params.rx_queue_id = edev->fp_array[i].rxq->rxq_id; - rc = edev->ops->q_rx_stop(cdev, &rx_params); - if (rc) { - DP_ERR(edev, "Failed to stop RXQ #%d\n", i); - return rc; + rc = edev->ops->q_rx_stop(cdev, &rx_params); + if (rc) { + DP_ERR(edev, "Failed to stop RXQ #%d\n", i); + return rc; + } } } @@ -3260,7 +3328,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) struct qed_start_vport_params start = {0}; bool reset_rss_indir = false; - if (!edev->num_rss) { + if (!edev->num_queues) { DP_ERR(edev, "Cannot update V-VPORT as active as there are no Rx queues\n"); return -EINVAL; @@ -3284,50 +3352,66 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n", start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en); - for_each_rss(i) { + for_each_queue(i) { struct qede_fastpath *fp = &edev->fp_array[i]; - dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table; - - memset(&q_params, 0, sizeof(q_params)); - q_params.rss_id = i; - q_params.queue_id = i; - q_params.vport_id = 0; - q_params.sb = fp->sb_info->igu_sb_id; - q_params.sb_idx = RX_PI; - - rc = edev->ops->q_rx_start(cdev, &q_params, - fp->rxq->rx_buf_size, - fp->rxq->rx_bd_ring.p_phys_addr, - phys_table, - fp->rxq->rx_comp_ring.page_cnt, - &fp->rxq->hw_rxq_prod_addr); - if (rc) { - DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc); - return rc; - } + dma_addr_t p_phys_table; + u32 page_cnt; + + if (fp->type & QEDE_FASTPATH_RX) { + struct qede_rx_queue *rxq = fp->rxq; + __le16 *val; + + memset(&q_params, 0, sizeof(q_params)); + q_params.rss_id = i; + q_params.queue_id = rxq->rxq_id; + q_params.vport_id = 0; + q_params.sb = fp->sb_info->igu_sb_id; + q_params.sb_idx = RX_PI; + + p_phys_table = + qed_chain_get_pbl_phys(&rxq->rx_comp_ring); + page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring); + + rc = edev->ops->q_rx_start(cdev, &q_params, + rxq->rx_buf_size, + rxq->rx_bd_ring.p_phys_addr, + p_phys_table, + page_cnt, + &rxq->hw_rxq_prod_addr); + if (rc) { + DP_ERR(edev, "Start RXQ #%d failed %d\n", i, + rc); + return rc; + } - fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI]; + val = &fp->sb_info->sb_virt->pi_array[RX_PI]; + rxq->hw_cons_ptr = val; - qede_update_rx_prod(edev, fp->rxq); + qede_update_rx_prod(edev, rxq); + } + + if (!(fp->type & QEDE_FASTPATH_TX)) + continue; for (tc = 0; tc < edev->num_tc; tc++) { struct qede_tx_queue *txq = &fp->txqs[tc]; - int txq_index = tc * QEDE_RSS_CNT(edev) + i; + + p_phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl); + page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl); memset(&q_params, 0, sizeof(q_params)); q_params.rss_id = i; - q_params.queue_id = txq_index; + q_params.queue_id = txq->index; q_params.vport_id = 0; q_params.sb = fp->sb_info->igu_sb_id; q_params.sb_idx = TX_PI(tc); rc = edev->ops->q_tx_start(cdev, &q_params, - txq->tx_pbl.pbl.p_phys_table, - txq->tx_pbl.page_cnt, + p_phys_table, page_cnt, &txq->doorbell_addr); if (rc) { DP_ERR(edev, "Start TXQ #%d failed %d\n", - txq_index, rc); + txq->index, rc); return rc; } @@ -3358,13 +3442,13 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) } /* Fill struct with RSS params */ - if (QEDE_RSS_CNT(edev) > 1) { + if (QEDE_RSS_COUNT(edev) > 1) { vport_update_params.update_rss_flg = 1; /* Need to validate current RSS config uses valid entries */ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { if (edev->rss_params.rss_ind_table[i] >= - edev->num_rss) { + QEDE_RSS_COUNT(edev)) { reset_rss_indir = true; break; } @@ -3377,7 +3461,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { u16 indir_val; - val = QEDE_RSS_CNT(edev); + val = QEDE_RSS_COUNT(edev); indir_val = ethtool_rxfh_indir_default(i, val); edev->rss_params.rss_ind_table[i] = indir_val; } @@ -3443,6 +3527,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode) DP_INFO(edev, "Starting qede unload\n"); + qede_roce_dev_event_close(edev); mutex_lock(&edev->qede_lock); edev->state = QEDE_STATE_CLOSED; @@ -3506,7 +3591,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) if (rc) goto err1; DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n", - QEDE_RSS_CNT(edev), edev->num_tc); + QEDE_QUEUE_CNT(edev), edev->num_tc); rc = qede_set_real_num_queues(edev); if (rc) @@ -3543,6 +3628,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) /* Query whether link is already-up */ memset(&link_output, 0, sizeof(link_output)); edev->ops->common->get_link(edev->cdev, &link_output); + qede_roce_dev_event_open(edev); qede_link_update(edev, &link_output); DP_INFO(edev, "Ending successfully qede load\n"); @@ -3559,7 +3645,9 @@ err2: err1: edev->ops->common->set_fp_int(edev->cdev, 0); qede_free_fp_array(edev); - edev->num_rss = 0; + edev->num_queues = 0; + edev->fp_num_tx = 0; + edev->fp_num_rx = 0; err0: return rc; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_roce.c b/drivers/net/ethernet/qlogic/qede/qede_roce.c new file mode 100644 index 000000000000..9867f960b063 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qede/qede_roce.c @@ -0,0 +1,314 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/qed/qede_roce.h> +#include "qede.h" + +static struct qedr_driver *qedr_drv; +static LIST_HEAD(qedr_dev_list); +static DEFINE_MUTEX(qedr_dev_list_lock); + +bool qede_roce_supported(struct qede_dev *dev) +{ + return dev->dev_info.common.rdma_supported; +} + +static void _qede_roce_dev_add(struct qede_dev *edev) +{ + if (!qedr_drv) + return; + + edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev, + edev->ndev); +} + +static int qede_roce_create_wq(struct qede_dev *edev) +{ + INIT_LIST_HEAD(&edev->rdma_info.roce_event_list); + edev->rdma_info.roce_wq = create_singlethread_workqueue("roce_wq"); + if (!edev->rdma_info.roce_wq) { + DP_NOTICE(edev, "qedr: Could not create workqueue\n"); + return -ENOMEM; + } + + return 0; +} + +static void qede_roce_cleanup_event(struct qede_dev *edev) +{ + struct list_head *head = &edev->rdma_info.roce_event_list; + struct qede_roce_event_work *event_node; + + flush_workqueue(edev->rdma_info.roce_wq); + while (!list_empty(head)) { + event_node = list_entry(head->next, struct qede_roce_event_work, + list); + cancel_work_sync(&event_node->work); + list_del(&event_node->list); + kfree(event_node); + } +} + +static void qede_roce_destroy_wq(struct qede_dev *edev) +{ + qede_roce_cleanup_event(edev); + destroy_workqueue(edev->rdma_info.roce_wq); +} + +int qede_roce_dev_add(struct qede_dev *edev) +{ + int rc = 0; + + if (qede_roce_supported(edev)) { + rc = qede_roce_create_wq(edev); + if (rc) + return rc; + + INIT_LIST_HEAD(&edev->rdma_info.entry); + mutex_lock(&qedr_dev_list_lock); + list_add_tail(&edev->rdma_info.entry, &qedr_dev_list); + _qede_roce_dev_add(edev); + mutex_unlock(&qedr_dev_list_lock); + } + + return rc; +} + +static void _qede_roce_dev_remove(struct qede_dev *edev) +{ + if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev) + qedr_drv->remove(edev->rdma_info.qedr_dev); + edev->rdma_info.qedr_dev = NULL; +} + +void qede_roce_dev_remove(struct qede_dev *edev) +{ + if (!qede_roce_supported(edev)) + return; + + qede_roce_destroy_wq(edev); + mutex_lock(&qedr_dev_list_lock); + _qede_roce_dev_remove(edev); + list_del(&edev->rdma_info.entry); + mutex_unlock(&qedr_dev_list_lock); +} + +static void _qede_roce_dev_open(struct qede_dev *edev) +{ + if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) + qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP); +} + +static void qede_roce_dev_open(struct qede_dev *edev) +{ + if (!qede_roce_supported(edev)) + return; + + mutex_lock(&qedr_dev_list_lock); + _qede_roce_dev_open(edev); + mutex_unlock(&qedr_dev_list_lock); +} + +static void _qede_roce_dev_close(struct qede_dev *edev) +{ + if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) + qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN); +} + +static void qede_roce_dev_close(struct qede_dev *edev) +{ + if (!qede_roce_supported(edev)) + return; + + mutex_lock(&qedr_dev_list_lock); + _qede_roce_dev_close(edev); + mutex_unlock(&qedr_dev_list_lock); +} + +static void qede_roce_dev_shutdown(struct qede_dev *edev) +{ + if (!qede_roce_supported(edev)) + return; + + mutex_lock(&qedr_dev_list_lock); + if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) + qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE); + mutex_unlock(&qedr_dev_list_lock); +} + +int qede_roce_register_driver(struct qedr_driver *drv) +{ + struct qede_dev *edev; + u8 qedr_counter = 0; + + mutex_lock(&qedr_dev_list_lock); + if (qedr_drv) { + mutex_unlock(&qedr_dev_list_lock); + return -EINVAL; + } + qedr_drv = drv; + + list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { + struct net_device *ndev; + + qedr_counter++; + _qede_roce_dev_add(edev); + ndev = edev->ndev; + if (netif_running(ndev) && netif_oper_up(ndev)) + _qede_roce_dev_open(edev); + } + mutex_unlock(&qedr_dev_list_lock); + + DP_INFO(edev, "qedr: discovered and registered %d RoCE funcs\n", + qedr_counter); + + return 0; +} +EXPORT_SYMBOL(qede_roce_register_driver); + +void qede_roce_unregister_driver(struct qedr_driver *drv) +{ + struct qede_dev *edev; + + mutex_lock(&qedr_dev_list_lock); + list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { + if (edev->rdma_info.qedr_dev) + _qede_roce_dev_remove(edev); + } + qedr_drv = NULL; + mutex_unlock(&qedr_dev_list_lock); +} +EXPORT_SYMBOL(qede_roce_unregister_driver); + +static void qede_roce_changeaddr(struct qede_dev *edev) +{ + if (!qede_roce_supported(edev)) + return; + + if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) + qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR); +} + +struct qede_roce_event_work *qede_roce_get_free_event_node(struct qede_dev + *edev) +{ + struct qede_roce_event_work *event_node = NULL; + struct list_head *list_node = NULL; + bool found = false; + + list_for_each(list_node, &edev->rdma_info.roce_event_list) { + event_node = list_entry(list_node, struct qede_roce_event_work, + list); + if (!work_pending(&event_node->work)) { + found = true; + break; + } + } + + if (!found) { + event_node = kzalloc(sizeof(*event_node), GFP_KERNEL); + if (!event_node) { + DP_NOTICE(edev, + "qedr: Could not allocate memory for roce work\n"); + return NULL; + } + list_add_tail(&event_node->list, + &edev->rdma_info.roce_event_list); + } + + return event_node; +} + +static void qede_roce_handle_event(struct work_struct *work) +{ + struct qede_roce_event_work *event_node; + enum qede_roce_event event; + struct qede_dev *edev; + + event_node = container_of(work, struct qede_roce_event_work, work); + event = event_node->event; + edev = event_node->ptr; + + switch (event) { + case QEDE_UP: + qede_roce_dev_open(edev); + break; + case QEDE_DOWN: + qede_roce_dev_close(edev); + break; + case QEDE_CLOSE: + qede_roce_dev_shutdown(edev); + break; + case QEDE_CHANGE_ADDR: + qede_roce_changeaddr(edev); + break; + default: + DP_NOTICE(edev, "Invalid roce event %d", event); + } +} + +static void qede_roce_add_event(struct qede_dev *edev, + enum qede_roce_event event) +{ + struct qede_roce_event_work *event_node; + + if (!edev->rdma_info.qedr_dev) + return; + + event_node = qede_roce_get_free_event_node(edev); + if (!event_node) + return; + + event_node->event = event; + event_node->ptr = edev; + + INIT_WORK(&event_node->work, qede_roce_handle_event); + queue_work(edev->rdma_info.roce_wq, &event_node->work); +} + +void qede_roce_dev_event_open(struct qede_dev *edev) +{ + qede_roce_add_event(edev, QEDE_UP); +} + +void qede_roce_dev_event_close(struct qede_dev *edev) +{ + qede_roce_add_event(edev, QEDE_DOWN); +} + +void qede_roce_event_changeaddr(struct qede_dev *edev) +{ + qede_roce_add_event(edev, QEDE_CHANGE_ADDR); +} |