diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt/bnxt.c')
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.c | 521 |
1 files changed, 338 insertions, 183 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1500243b9886..1991f0c7bc0e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1439,7 +1439,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { u16 vlan_proto = tpa_info->metadata >> RX_CMP_FLAGS2_METADATA_TPID_SFT; - u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK; + u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); } @@ -1623,7 +1623,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); - u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK; + u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); @@ -1922,7 +1922,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) /* Sync BD data before updating doorbell */ wmb(); - bnxt_db_write(bp, db, DB_KEY_TX | prod); + bnxt_db_write_relaxed(bp, db, DB_KEY_TX | prod); } cpr->cp_raw_cons = raw_cons; @@ -2317,6 +2317,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) if (rc) return rc; + ring->grp_idx = i; rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; mem_size = rxr->rx_agg_bmap_size / 8; rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); @@ -2389,6 +2390,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) if (rc) return rc; + ring->grp_idx = txr->bnapi->index; if (bp->tx_push_size) { dma_addr_t mapping; @@ -2442,8 +2444,10 @@ static void bnxt_free_cp_rings(struct bnxt *bp) static int bnxt_alloc_cp_rings(struct bnxt *bp) { - int i, rc; + int i, rc, ulp_base_vec, ulp_msix; + ulp_msix = bnxt_get_ulp_msix_num(bp); + ulp_base_vec = bnxt_get_ulp_msix_base(bp); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr; @@ -2458,6 +2462,11 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp) rc = bnxt_alloc_ring(bp, ring); if (rc) return rc; + + if (ulp_msix && i >= ulp_base_vec) + ring->map_idx = i + ulp_msix; + else + ring->map_idx = i; } return 0; } @@ -3059,12 +3068,21 @@ static void bnxt_free_stats(struct bnxt *bp) u32 size, i; struct pci_dev *pdev = bp->pdev; + bp->flags &= ~BNXT_FLAG_PORT_STATS; + bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; + if (bp->hw_rx_port_stats) { dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, bp->hw_rx_port_stats, bp->hw_rx_port_stats_map); bp->hw_rx_port_stats = NULL; - bp->flags &= ~BNXT_FLAG_PORT_STATS; + } + + if (bp->hw_rx_port_stats_ext) { + dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), + bp->hw_rx_port_stats_ext, + bp->hw_rx_port_stats_ext_map); + bp->hw_rx_port_stats_ext = NULL; } if (!bp->bnapi) @@ -3120,6 +3138,21 @@ static int bnxt_alloc_stats(struct bnxt *bp) bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + sizeof(struct rx_port_stats) + 512; bp->flags |= BNXT_FLAG_PORT_STATS; + + /* Display extended statistics only if FW supports it */ + if (bp->hwrm_spec_code < 0x10804 || + bp->hwrm_spec_code == 0x10900) + return 0; + + bp->hw_rx_port_stats_ext = + dma_zalloc_coherent(&pdev->dev, + sizeof(struct rx_port_stats_ext), + &bp->hw_rx_port_stats_ext_map, + GFP_KERNEL); + if (!bp->hw_rx_port_stats_ext) + return 0; + + bp->flags |= BNXT_FLAG_PORT_STATS_EXT; } return 0; } @@ -3357,6 +3390,15 @@ static void bnxt_disable_int(struct bnxt *bp) } } +static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) +{ + struct bnxt_napi *bnapi = bp->bnapi[n]; + struct bnxt_cp_ring_info *cpr; + + cpr = &bnapi->cp_ring; + return cpr->cp_ring_struct.map_idx; +} + static void bnxt_disable_int_sync(struct bnxt *bp) { int i; @@ -3364,8 +3406,11 @@ static void bnxt_disable_int_sync(struct bnxt *bp) atomic_inc(&bp->intr_sem); bnxt_disable_int(bp); - for (i = 0; i < bp->cp_nr_rings; i++) - synchronize_irq(bp->irq_tbl[i].vector); + for (i = 0; i < bp->cp_nr_rings; i++) { + int map_idx = bnxt_cp_num_to_irq_num(bp, i); + + synchronize_irq(bp->irq_tbl[map_idx].vector); + } } static void bnxt_enable_int(struct bnxt *bp) @@ -3398,7 +3443,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, int i, intr_process, rc, tmo_count; struct input *req = msg; u32 *data = msg; - __le32 *resp_len, *valid; + __le32 *resp_len; + u8 *valid; u16 cp_ring_id, len = 0; struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; @@ -3450,6 +3496,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, i = 0; tmo_count = timeout * 40; + resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; if (intr_process) { /* Wait until hwrm response cmpl interrupt is processed */ while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && @@ -3462,9 +3509,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, le16_to_cpu(req->req_type)); return -1; } + len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> + HWRM_RESP_LEN_SFT; + valid = bp->hwrm_cmd_resp_addr + len - 1; } else { /* Check if response len is updated */ - resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; for (i = 0; i < tmo_count; i++) { len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> HWRM_RESP_LEN_SFT; @@ -3480,10 +3529,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, return -1; } - /* Last word of resp contains valid bit */ - valid = bp->hwrm_cmd_resp_addr + len - 4; + /* Last byte of resp contains valid bit */ + valid = bp->hwrm_cmd_resp_addr + len - 1; for (i = 0; i < 5; i++) { - if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK) + /* make sure we read from updated DMA memory */ + dma_rmb(); + if (*valid) break; udelay(1); } @@ -3496,6 +3547,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, } } + /* Zero valid bit for compatibility. Valid bit in an older spec + * may become a new field in a newer spec. We must make sure that + * a new field not implemented by old spec will read zero. + */ + *valid = 0; rc = le16_to_cpu(resp->error_code); if (rc && !silent) netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", @@ -3577,9 +3633,13 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) FUNC_DRV_RGTR_REQ_ENABLES_VER); req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); - req.ver_maj = DRV_VER_MAJ; - req.ver_min = DRV_VER_MIN; - req.ver_upd = DRV_VER_UPD; + req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE); + req.ver_maj_8b = DRV_VER_MAJ; + req.ver_min_8b = DRV_VER_MIN; + req.ver_upd_8b = DRV_VER_UPD; + req.ver_maj = cpu_to_le16(DRV_VER_MAJ); + req.ver_min = cpu_to_le16(DRV_VER_MIN); + req.ver_upd = cpu_to_le16(DRV_VER_UPD); if (BNXT_PF(bp)) { u32 data[8]; @@ -3847,6 +3907,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_tpa_cfg_input req = {0}; + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + return 0; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); if (tpa_flags) { @@ -3995,6 +4058,13 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) return rc; } +static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) +{ + if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) + return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; + return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; +} + int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) { unsigned int ring = 0, grp_idx; @@ -4050,8 +4120,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) - req.flags |= - cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE); + req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } @@ -4132,9 +4201,13 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { - if (resp->flags & - cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) + u32 flags = le32_to_cpu(resp->flags); + + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP) bp->flags |= BNXT_FLAG_NEW_RSS_CAP; + if (flags & + VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) + bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; } mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -4201,12 +4274,12 @@ static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) static int hwrm_ring_alloc_send_msg(struct bnxt *bp, struct bnxt_ring_struct *ring, - u32 ring_type, u32 map_index, - u32 stats_ctx_id) + u32 ring_type, u32 map_index) { int rc = 0, err = 0; struct hwrm_ring_alloc_input req = {0}; struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_ring_grp_info *grp_info; u16 ring_id; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); @@ -4228,10 +4301,10 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, case HWRM_RING_ALLOC_TX: req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; /* Association of transmit ring with completion ring */ - req.cmpl_ring_id = - cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id); + grp_info = &bp->grp_info[ring->grp_idx]; + req.cmpl_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); req.length = cpu_to_le32(bp->tx_ring_mask + 1); - req.stat_ctx_id = cpu_to_le32(stats_ctx_id); + req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); req.queue_id = cpu_to_le16(ring->queue_id); break; case HWRM_RING_ALLOC_RX: @@ -4318,10 +4391,11 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + u32 map_idx = ring->map_idx; - cpr->cp_doorbell = bp->bar1 + i * 0x80; - rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i, - INVALID_STATS_CTX_ID); + cpr->cp_doorbell = bp->bar1 + map_idx * 0x80; + rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, + map_idx); if (rc) goto err_out; BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); @@ -4337,11 +4411,10 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) for (i = 0; i < bp->tx_nr_rings; i++) { struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; struct bnxt_ring_struct *ring = &txr->tx_ring_struct; - u32 map_idx = txr->bnapi->index; - u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx; + u32 map_idx = i; rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX, - map_idx, fw_stats_ctx); + map_idx); if (rc) goto err_out; txr->tx_doorbell = bp->bar1 + map_idx * 0x80; @@ -4353,7 +4426,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) u32 map_idx = rxr->bnapi->index; rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX, - map_idx, INVALID_STATS_CTX_ID); + map_idx); if (rc) goto err_out; rxr->rx_doorbell = bp->bar1 + map_idx * 0x80; @@ -4366,13 +4439,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; - u32 grp_idx = rxr->bnapi->index; + u32 grp_idx = ring->grp_idx; u32 map_idx = grp_idx + bp->rx_nr_rings; rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_AGG, - map_idx, - INVALID_STATS_CTX_ID); + map_idx); if (rc) goto err_out; @@ -4558,18 +4630,17 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) return rc; } -static int -bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int vnics) +static void +__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, + int tx_rings, int rx_rings, int ring_grps, + int cp_rings, int vnics) { - struct hwrm_func_cfg_input req = {0}; u32 enables = 0; - int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(0xffff); + bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1); + req->fid = cpu_to_le16(0xffff); enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; - req.num_tx_rings = cpu_to_le16(tx_rings); + req->num_tx_rings = cpu_to_le16(tx_rings); if (bp->flags & BNXT_FLAG_NEW_RM) { enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | @@ -4578,16 +4649,53 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; - req.num_rx_rings = cpu_to_le16(rx_rings); - req.num_hw_ring_grps = cpu_to_le16(ring_grps); - req.num_cmpl_rings = cpu_to_le16(cp_rings); - req.num_stat_ctxs = req.num_cmpl_rings; - req.num_vnics = cpu_to_le16(vnics); + req->num_rx_rings = cpu_to_le16(rx_rings); + req->num_hw_ring_grps = cpu_to_le16(ring_grps); + req->num_cmpl_rings = cpu_to_le16(cp_rings); + req->num_stat_ctxs = req->num_cmpl_rings; + req->num_vnics = cpu_to_le16(vnics); } - if (!enables) + req->enables = cpu_to_le32(enables); +} + +static void +__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, + struct hwrm_func_vf_cfg_input *req, int tx_rings, + int rx_rings, int ring_grps, int cp_rings, + int vnics) +{ + u32 enables = 0; + + bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1); + enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; + enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; + enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | + FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; + enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; + + req->num_tx_rings = cpu_to_le16(tx_rings); + req->num_rx_rings = cpu_to_le16(rx_rings); + req->num_hw_ring_grps = cpu_to_le16(ring_grps); + req->num_cmpl_rings = cpu_to_le16(cp_rings); + req->num_stat_ctxs = req->num_cmpl_rings; + req->num_vnics = cpu_to_le16(vnics); + + req->enables = cpu_to_le32(enables); +} + +static int +bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, + int ring_grps, int cp_rings, int vnics) +{ + struct hwrm_func_cfg_input req = {0}; + int rc; + + __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, + cp_rings, vnics); + if (!req.enables) return 0; - req.enables = cpu_to_le32(enables); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) return -ENOMEM; @@ -4604,7 +4712,6 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int vnics) { struct hwrm_func_vf_cfg_input req = {0}; - u32 enables = 0; int rc; if (!(bp->flags & BNXT_FLAG_NEW_RM)) { @@ -4612,22 +4719,8 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, return 0; } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); - enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; - enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; - enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; - enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; - enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; - - req.num_tx_rings = cpu_to_le16(tx_rings); - req.num_rx_rings = cpu_to_le16(rx_rings); - req.num_hw_ring_grps = cpu_to_le16(ring_grps); - req.num_cmpl_rings = cpu_to_le16(cp_rings); - req.num_stat_ctxs = req.num_cmpl_rings; - req.num_vnics = cpu_to_le16(vnics); - - req.enables = cpu_to_le32(enables); + __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, + cp_rings, vnics); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) return -ENOMEM; @@ -4645,20 +4738,59 @@ static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic); } +static int bnxt_cp_rings_in_use(struct bnxt *bp) +{ + int cp = bp->cp_nr_rings; + int ulp_msix, ulp_base; + + ulp_msix = bnxt_get_ulp_msix_num(bp); + if (ulp_msix) { + ulp_base = bnxt_get_ulp_msix_base(bp); + cp += ulp_msix; + if ((ulp_base + ulp_msix) > cp) + cp = ulp_base + ulp_msix; + } + return cp; +} + +static bool bnxt_need_reserve_rings(struct bnxt *bp) +{ + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + int cp = bnxt_cp_rings_in_use(bp); + int rx = bp->rx_nr_rings; + int vnic = 1, grp = rx; + + if (bp->hwrm_spec_code < 0x10601) + return false; + + if (hw_resc->resv_tx_rings != bp->tx_nr_rings) + return true; + + if (bp->flags & BNXT_FLAG_RFS) + vnic = rx + 1; + if (bp->flags & BNXT_FLAG_AGG_RINGS) + rx <<= 1; + if ((bp->flags & BNXT_FLAG_NEW_RM) && + (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || + hw_resc->resv_hw_ring_grps != grp || hw_resc->resv_vnics != vnic)) + return true; + return false; +} + static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, bool shared); static int __bnxt_reserve_rings(struct bnxt *bp) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + int cp = bnxt_cp_rings_in_use(bp); int tx = bp->tx_nr_rings; int rx = bp->rx_nr_rings; - int cp = bp->cp_nr_rings; int grp, rx_rings, rc; bool sh = false; int vnic = 1; - if (bp->hwrm_spec_code < 0x10601) + if (!bnxt_need_reserve_rings(bp)) return 0; if (bp->flags & BNXT_FLAG_SHARED_RINGS) @@ -4667,14 +4799,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp) vnic = rx + 1; if (bp->flags & BNXT_FLAG_AGG_RINGS) rx <<= 1; - grp = bp->rx_nr_rings; - if (tx == hw_resc->resv_tx_rings && - (!(bp->flags & BNXT_FLAG_NEW_RM) || - (rx == hw_resc->resv_rx_rings && - grp == hw_resc->resv_hw_ring_grps && - cp == hw_resc->resv_cp_rings && vnic == hw_resc->resv_vnics))) - return 0; rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic); if (rc) @@ -4718,64 +4843,26 @@ static int __bnxt_reserve_rings(struct bnxt *bp) return rc; } -static bool bnxt_need_reserve_rings(struct bnxt *bp) -{ - struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - int rx = bp->rx_nr_rings; - int vnic = 1; - - if (bp->hwrm_spec_code < 0x10601) - return false; - - if (hw_resc->resv_tx_rings != bp->tx_nr_rings) - return true; - - if (bp->flags & BNXT_FLAG_RFS) - vnic = rx + 1; - if (bp->flags & BNXT_FLAG_AGG_RINGS) - rx <<= 1; - if ((bp->flags & BNXT_FLAG_NEW_RM) && - (hw_resc->resv_rx_rings != rx || - hw_resc->resv_cp_rings != bp->cp_nr_rings || - hw_resc->resv_vnics != vnic)) - return true; - return false; -} - static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings) + int ring_grps, int cp_rings, int vnics) { struct hwrm_func_vf_cfg_input req = {0}; - u32 flags, enables; + u32 flags; int rc; if (!(bp->flags & BNXT_FLAG_NEW_RM)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); + __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, + cp_rings, vnics); flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; - enables = FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS | - FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | - FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | - FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS | - FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS; req.flags = cpu_to_le32(flags); - req.enables = cpu_to_le32(enables); - req.num_tx_rings = cpu_to_le16(tx_rings); - req.num_rx_rings = cpu_to_le16(rx_rings); - req.num_cmpl_rings = cpu_to_le16(cp_rings); - req.num_hw_ring_grps = cpu_to_le16(ring_grps); - req.num_stat_ctxs = cpu_to_le16(cp_rings); - req.num_vnics = cpu_to_le16(1); - if (bp->flags & BNXT_FLAG_RFS) - req.num_vnics = cpu_to_le16(rx_rings + 1); rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) return -ENOMEM; @@ -4783,38 +4870,23 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, } static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings) + int ring_grps, int cp_rings, int vnics) { struct hwrm_func_cfg_input req = {0}; - u32 flags, enables; + u32 flags; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(0xffff); + __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, + cp_rings, vnics); flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; - enables = FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS; - req.num_tx_rings = cpu_to_le16(tx_rings); - if (bp->flags & BNXT_FLAG_NEW_RM) { + if (bp->flags & BNXT_FLAG_NEW_RM) flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; - enables |= FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | - FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | - FUNC_CFG_REQ_ENABLES_NUM_VNICS; - req.num_rx_rings = cpu_to_le16(rx_rings); - req.num_cmpl_rings = cpu_to_le16(cp_rings); - req.num_hw_ring_grps = cpu_to_le16(ring_grps); - req.num_stat_ctxs = cpu_to_le16(cp_rings); - req.num_vnics = cpu_to_le16(1); - if (bp->flags & BNXT_FLAG_RFS) - req.num_vnics = cpu_to_le16(rx_rings + 1); - } + req.flags = cpu_to_le32(flags); - req.enables = cpu_to_le32(enables); rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) return -ENOMEM; @@ -4822,17 +4894,17 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, } static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings) + int ring_grps, int cp_rings, int vnics) { if (bp->hwrm_spec_code < 0x10801) return 0; if (BNXT_PF(bp)) return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, - ring_grps, cp_rings); + ring_grps, cp_rings, vnics); return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, - cp_rings); + cp_rings, vnics); } static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, @@ -5060,7 +5132,7 @@ func_qcfg_exit: return rc; } -static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) +int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) { struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_func_resource_qcaps_input req = {0}; @@ -5077,6 +5149,10 @@ static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) goto hwrm_func_resc_qcaps_exit; } + hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); + if (!all) + goto hwrm_func_resc_qcaps_exit; + hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); @@ -5183,7 +5259,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) if (rc) return rc; if (bp->hwrm_spec_code >= 0x10803) { - rc = bnxt_hwrm_func_resc_qcaps(bp); + rc = bnxt_hwrm_func_resc_qcaps(bp, true); if (!rc) bp->flags |= BNXT_FLAG_NEW_RM; } @@ -5331,6 +5407,21 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp) return rc; } +static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) +{ + struct hwrm_port_qstats_ext_input req = {0}; + struct bnxt_pf_info *pf = &bp->pf; + + if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1); + req.port_id = cpu_to_le16(pf->port_id); + req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); + req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) { if (bp->vxlan_port_cnt) { @@ -5423,10 +5514,9 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(0xffff); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); - req.cache_linesize = FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64; + req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; if (size == 128) - req.cache_linesize = - FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128; + req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) @@ -5745,6 +5835,7 @@ static void bnxt_setup_msix(struct bnxt *bp) } for (i = 0; i < bp->cp_nr_rings; i++) { + int map_idx = bnxt_cp_num_to_irq_num(bp, i); char *attr; if (bp->flags & BNXT_FLAG_SHARED_RINGS) @@ -5754,9 +5845,9 @@ static void bnxt_setup_msix(struct bnxt *bp) else attr = "tx"; - snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr, - i); - bp->irq_tbl[i].handler = bnxt_msix; + snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, + attr, i); + bp->irq_tbl[map_idx].handler = bnxt_msix; } } @@ -5817,7 +5908,7 @@ void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max) bp->hw_resc.max_cp_rings = max; } -static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) +unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; @@ -5829,12 +5920,44 @@ void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) bp->hw_resc.max_irqs = max_irqs; } +int bnxt_get_avail_msix(struct bnxt *bp, int num) +{ + int max_cp = bnxt_get_max_func_cp_rings(bp); + int max_irq = bnxt_get_max_func_irqs(bp); + int total_req = bp->cp_nr_rings + num; + int max_idx, avail_msix; + + max_idx = min_t(int, bp->total_irqs, max_cp); + avail_msix = max_idx - bp->cp_nr_rings; + if (!(bp->flags & BNXT_FLAG_NEW_RM) || avail_msix >= num) + return avail_msix; + + if (max_irq < total_req) { + num = max_irq - bp->cp_nr_rings; + if (num <= 0) + return 0; + } + return num; +} + +static int bnxt_get_num_msix(struct bnxt *bp) +{ + if (!(bp->flags & BNXT_FLAG_NEW_RM)) + return bnxt_get_max_func_irqs(bp); + + return bnxt_cp_rings_in_use(bp); +} + static int bnxt_init_msix(struct bnxt *bp) { - int i, total_vecs, rc = 0, min = 1; + int i, total_vecs, max, rc = 0, min = 1, ulp_msix; struct msix_entry *msix_ent; - total_vecs = bnxt_get_max_func_irqs(bp); + total_vecs = bnxt_get_num_msix(bp); + max = bnxt_get_max_func_irqs(bp); + if (total_vecs > max) + total_vecs = max; + msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); if (!msix_ent) return -ENOMEM; @@ -5848,7 +5971,8 @@ static int bnxt_init_msix(struct bnxt *bp) min = 2; total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); - if (total_vecs < 0) { + ulp_msix = bnxt_get_ulp_msix_num(bp); + if (total_vecs < 0 || total_vecs < ulp_msix) { rc = -ENODEV; goto msix_setup_exit; } @@ -5861,11 +5985,10 @@ static int bnxt_init_msix(struct bnxt *bp) bp->total_irqs = total_vecs; /* Trim rings based upon num of vectors allocated */ rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, - total_vecs, min == 1); + total_vecs - ulp_msix, min == 1); if (rc) goto msix_setup_exit; - bp->tx_nr_rings_per_tc = bp->tx_nr_rings; bp->cp_nr_rings = (min == 1) ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : bp->tx_nr_rings + bp->rx_nr_rings; @@ -5897,7 +6020,6 @@ static int bnxt_init_inta(struct bnxt *bp) bp->rx_nr_rings = 1; bp->tx_nr_rings = 1; bp->cp_nr_rings = 1; - bp->tx_nr_rings_per_tc = bp->tx_nr_rings; bp->flags |= BNXT_FLAG_SHARED_RINGS; bp->irq_tbl[0].vector = bp->pdev->irq; return 0; @@ -5927,9 +6049,8 @@ static void bnxt_clear_int_mode(struct bnxt *bp) bp->flags &= ~BNXT_FLAG_USING_MSIX; } -static int bnxt_reserve_rings(struct bnxt *bp) +int bnxt_reserve_rings(struct bnxt *bp) { - int orig_cp = bp->hw_resc.resv_cp_rings; int tcs = netdev_get_num_tc(bp->dev); int rc; @@ -5941,9 +6062,12 @@ static int bnxt_reserve_rings(struct bnxt *bp) netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc); return rc; } - if ((bp->flags & BNXT_FLAG_NEW_RM) && bp->cp_nr_rings > orig_cp) { + if ((bp->flags & BNXT_FLAG_NEW_RM) && + (bnxt_get_num_msix(bp) != bp->total_irqs)) { + bnxt_ulp_irq_stop(bp); bnxt_clear_int_mode(bp); rc = bnxt_init_int_mode(bp); + bnxt_ulp_irq_restart(bp, rc); if (rc) return rc; } @@ -5970,7 +6094,9 @@ static void bnxt_free_irq(struct bnxt *bp) return; for (i = 0; i < bp->cp_nr_rings; i++) { - irq = &bp->irq_tbl[i]; + int map_idx = bnxt_cp_num_to_irq_num(bp, i); + + irq = &bp->irq_tbl[map_idx]; if (irq->requested) { if (irq->have_cpumask) { irq_set_affinity_hint(irq->vector, NULL); @@ -5989,14 +6115,25 @@ static int bnxt_request_irq(struct bnxt *bp) int i, j, rc = 0; unsigned long flags = 0; #ifdef CONFIG_RFS_ACCEL - struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap; + struct cpu_rmap *rmap; #endif + rc = bnxt_setup_int_mode(bp); + if (rc) { + netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", + rc); + return rc; + } +#ifdef CONFIG_RFS_ACCEL + rmap = bp->dev->rx_cpu_rmap; +#endif if (!(bp->flags & BNXT_FLAG_USING_MSIX)) flags = IRQF_SHARED; for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_irq *irq = &bp->irq_tbl[i]; + int map_idx = bnxt_cp_num_to_irq_num(bp, i); + struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; + #ifdef CONFIG_RFS_ACCEL if (rmap && bp->bnapi[i]->rx_ring) { rc = irq_cpu_rmap_add(rmap, irq->vector); @@ -6716,13 +6853,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) rc = bnxt_reserve_rings(bp); if (rc) return rc; - - rc = bnxt_setup_int_mode(bp); - if (rc) { - netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", - rc); - return rc; - } } if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_USING_MSIX)) { @@ -7485,8 +7615,10 @@ static void bnxt_sp_task(struct work_struct *work) bnxt_hwrm_tunnel_dst_port_free( bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); } - if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) + if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { bnxt_hwrm_port_qstats(bp); + bnxt_hwrm_port_qstats_ext(bp); + } if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { int rc; @@ -7531,7 +7663,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, int max_rx, max_tx, tx_sets = 1; int tx_rings_needed; int rx_rings = rx; - int cp, rc; + int cp, vnics, rc; if (tcs) tx_sets = tcs; @@ -7547,10 +7679,15 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, if (max_tx < tx_rings_needed) return -ENOMEM; + vnics = 1; + if (bp->flags & BNXT_FLAG_RFS) + vnics += rx_rings; + if (bp->flags & BNXT_FLAG_AGG_RINGS) rx_rings <<= 1; cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; - return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp); + return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, + vnics); } static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) @@ -8195,6 +8332,7 @@ static const struct net_device_ops bnxt_netdev_ops = { .ndo_set_vf_rate = bnxt_set_vf_bw, .ndo_set_vf_link_state = bnxt_set_vf_link_state, .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, + .ndo_set_vf_trust = bnxt_set_vf_trust, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = bnxt_poll_controller, @@ -8392,9 +8530,15 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) if (sh) bp->flags |= BNXT_FLAG_SHARED_RINGS; dflt_rings = netif_get_num_default_rss_queues(); - /* Reduce default rings to reduce memory usage on multi-port cards */ - if (bp->port_count > 1) - dflt_rings = min_t(int, dflt_rings, 4); + /* Reduce default rings on multi-port cards so that total default + * rings do not exceed CPU count. + */ + if (bp->port_count > 1) { + int max_rings = + max_t(int, num_online_cpus() / bp->port_count, 1); + + dflt_rings = min_t(int, dflt_rings, max_rings); + } rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); if (rc) return rc; @@ -8433,17 +8577,23 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp) int rc; ASSERT_RTNL(); - if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) - return 0; - bnxt_hwrm_func_qcaps(bp); - __bnxt_close_nic(bp, true, false); + + if (netif_running(bp->dev)) + __bnxt_close_nic(bp, true, false); + + bnxt_ulp_irq_stop(bp); bnxt_clear_int_mode(bp); rc = bnxt_init_int_mode(bp); - if (rc) - dev_close(bp->dev); - else - rc = bnxt_open_nic(bp, true, false); + bnxt_ulp_irq_restart(bp, rc); + + if (netif_running(bp->dev)) { + if (rc) + dev_close(bp->dev); + else + rc = bnxt_open_nic(bp, true, false); + } + return rc; } @@ -8664,6 +8814,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_pci_clean; + /* No TC has been set yet and rings may have been trimmed due to + * limited MSIX, so we re-initialize the TX rings per TC. + */ + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + bnxt_get_wol_settings(bp); if (bp->flags & BNXT_FLAG_WOL_CAP) device_set_wakeup_enable(&pdev->dev, bp->wol); |