diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 1444 |
1 files changed, 890 insertions, 554 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 5d24b539648f..262714d5f54a 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -2,37 +2,36 @@ /* Copyright (c) 2018, Intel Corporation. */ #include "ice.h" +#include "ice_base.h" #include "ice_lib.h" /** - * ice_err_to_virt err - translate errors for VF return code - * @ice_err: error return code - */ -static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err) -{ - switch (ice_err) { - case ICE_SUCCESS: - return VIRTCHNL_STATUS_SUCCESS; - case ICE_ERR_BAD_PTR: - case ICE_ERR_INVAL_SIZE: - case ICE_ERR_DEVICE_NOT_SUPPORTED: - case ICE_ERR_PARAM: - case ICE_ERR_CFG: - return VIRTCHNL_STATUS_ERR_PARAM; - case ICE_ERR_NO_MEMORY: - return VIRTCHNL_STATUS_ERR_NO_MEMORY; - case ICE_ERR_NOT_READY: - case ICE_ERR_RESET_FAILED: - case ICE_ERR_FW_API_VER: - case ICE_ERR_AQ_ERROR: - case ICE_ERR_AQ_TIMEOUT: - case ICE_ERR_AQ_FULL: - case ICE_ERR_AQ_NO_WORK: - case ICE_ERR_AQ_EMPTY: - return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; - default: - return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + * ice_validate_vf_id - helper to check if VF ID is valid + * @pf: pointer to the PF structure + * @vf_id: the ID of the VF to check + */ +static int ice_validate_vf_id(struct ice_pf *pf, int vf_id) +{ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %d\n", vf_id); + return -EINVAL; + } + return 0; +} + +/** + * ice_check_vf_init - helper to check if VF init complete + * @pf: pointer to the PF structure + * @vf: the pointer to the VF to check + */ +static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf) +{ + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + dev_err(ice_pf_to_dev(pf), "VF ID: %d in reset. Try again.\n", + vf->vf_id); + return -EBUSY; } + return 0; } /** @@ -48,10 +47,11 @@ ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) { struct ice_hw *hw = &pf->hw; - struct ice_vf *vf = pf->vf; int i; - for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { + ice_for_each_vf(pf, i) { + struct ice_vf *vf = &pf->vf[i]; + /* Not all vfs are enabled so skip the ones that are not */ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) @@ -91,26 +91,6 @@ ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe, } /** - * ice_set_pfe_link_forced - Force the virtchnl_pf_event link speed/status - * @vf: pointer to the VF structure - * @pfe: pointer to the virtchnl_pf_event to set link speed/status for - * @link_up: whether or not to set the link up/down - */ -static void -ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe, - bool link_up) -{ - u16 link_speed; - - if (link_up) - link_speed = ICE_AQ_LINK_SPEED_100GB; - else - link_speed = ICE_AQ_LINK_SPEED_UNKNOWN; - - ice_set_pfe_link(vf, pfe, link_speed, link_up); -} - -/** * ice_vc_notify_vf_link_state - Inform a VF of link status * @vf: pointer to the VF structure * @@ -129,11 +109,18 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf) pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = PF_EVENT_SEVERITY_INFO; - if (vf->link_forced) - ice_set_pfe_link_forced(vf, &pfe, vf->link_up); - else - ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info & - ICE_AQ_LINK_UP); + /* Always report link is down if the VF queues aren't enabled */ + if (!vf->num_qs_ena) { + ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false); + } else if (vf->link_forced) { + u16 link_speed = vf->link_up ? + ls->link_speed : ICE_AQ_LINK_SPEED_UNKNOWN; + + ice_set_pfe_link(vf, &pfe, link_speed, vf->link_up); + } else { + ice_set_pfe_link(vf, &pfe, ls->link_speed, + ls->link_info & ICE_AQ_LINK_UP); + } ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, @@ -181,12 +168,14 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) { struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; + struct device *dev; int first, last, v; struct ice_hw *hw; hw = &pf->hw; vsi = pf->vsi[vf->lan_vsi_idx]; + dev = ice_pf_to_dev(pf); wr32(hw, VPINT_ALLOC(vf->vf_id), 0); wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); @@ -205,14 +194,12 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0); else - dev_err(&pf->pdev->dev, - "Scattered mode for VF Tx queues is not yet implemented\n"); + dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0); else - dev_err(&pf->pdev->dev, - "Scattered mode for VF Rx queues is not yet implemented\n"); + dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); } /** @@ -252,11 +239,41 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf) } /** + * ice_set_vf_state_qs_dis - Set VF queues state to disabled + * @vf: pointer to the VF structure + */ +void ice_set_vf_state_qs_dis(struct ice_vf *vf) +{ + /* Clear Rx/Tx enabled queues flag */ + bitmap_zero(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF); + bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF); + vf->num_qs_ena = 0; + clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); +} + +/** + * ice_dis_vf_qs - Disable the VF queues + * @vf: pointer to the VF structure + */ +static void ice_dis_vf_qs(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + vsi = pf->vsi[vf->lan_vsi_idx]; + + ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); + ice_vsi_stop_rx_rings(vsi); + ice_set_vf_state_qs_dis(vf); +} + +/** * ice_free_vfs - Free all VFs * @pf: pointer to the PF structure */ void ice_free_vfs(struct ice_pf *pf) { + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; int tmp, i; @@ -267,19 +284,9 @@ void ice_free_vfs(struct ice_pf *pf) usleep_range(1000, 2000); /* Avoid wait time by stopping all VFs at the same time */ - for (i = 0; i < pf->num_alloc_vfs; i++) { - struct ice_vsi *vsi; - - if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states)) - continue; - - vsi = pf->vsi[pf->vf[i].lan_vsi_idx]; - /* stop rings without wait time */ - ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, i); - ice_vsi_stop_rx_rings(vsi); - - clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states); - } + ice_for_each_vf(pf, i) + if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states)) + ice_dis_vf_qs(&pf->vf[i]); /* Disable IOV before freeing resources. This lets any VF drivers * running in the host get themselves cleaned up before we yank @@ -288,31 +295,24 @@ void ice_free_vfs(struct ice_pf *pf) if (!pci_vfs_assigned(pf->pdev)) pci_disable_sriov(pf->pdev); else - dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); + dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); tmp = pf->num_alloc_vfs; pf->num_vf_qps = 0; pf->num_alloc_vfs = 0; for (i = 0; i < tmp; i++) { if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { - /* disable VF qp mappings */ + /* disable VF qp mappings and set VF disable state */ ice_dis_vf_mappings(&pf->vf[i]); - - /* Set this state so that assigned VF vectors can be - * reclaimed by PF for reuse in ice_vsi_release(). No - * need to clear this bit since pf->vf array is being - * freed anyways after this for loop - */ - set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states); + set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states); ice_free_vf_res(&pf->vf[i]); } } if (ice_sriov_free_msix_res(pf)) - dev_err(&pf->pdev->dev, - "Failed to free MSIX resources used by SR-IOV\n"); + dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); - devm_kfree(&pf->pdev->dev, pf->vf); + devm_kfree(dev, pf->vf); pf->vf = NULL; /* This check is for when the driver is unloaded while VFs are @@ -341,18 +341,21 @@ void ice_free_vfs(struct ice_pf *pf) * ice_trigger_vf_reset - Reset a VF on HW * @vf: pointer to the VF structure * @is_vflr: true if VFLR was issued, false if not + * @is_pfr: true if the reset was triggered due to a previous PFR * * Trigger hardware to start a reset for a particular VF. Expects the caller * to wait the proper amount of time to allow hardware to reset the VF before * it cleans up and restores VF functionality. */ -static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr) +static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) { struct ice_pf *pf = vf->pf; u32 reg, reg_idx, bit_idx; + struct device *dev; struct ice_hw *hw; int vf_abs_id, i; + dev = ice_pf_to_dev(pf); hw = &pf->hw; vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; @@ -367,10 +370,13 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr) */ clear_bit(ICE_VF_STATE_INIT, vf->vf_states); - /* Clear the VF's ARQLEN register. This is how the VF detects reset, - * since the VFGEN_RSTAT register doesn't stick at 0 after reset. + /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it + * in the case of VFR. If this is done for PFR, it can mess up VF + * resets because the VF driver may already have started cleanup + * by the time we get here. */ - wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0); + if (!is_pfr) + wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0); /* In the case of a VFLR, the HW has already reset the VF and we * just need to clean up, so don't hit the VFRTRIG register. @@ -389,12 +395,14 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr) wr32(hw, PF_PCI_CIAA, VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S)); - for (i = 0; i < 100; i++) { + for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) { reg = rd32(hw, PF_PCI_CIAD); - if ((reg & VF_TRANS_PENDING_M) != 0) - dev_err(&pf->pdev->dev, - "VF %d PCI transactions stuck\n", vf->vf_id); - udelay(1); + /* no transactions pending so stop polling */ + if ((reg & VF_TRANS_PENDING_M) == 0) + break; + + dev_err(dev, "VF %d PCI transactions stuck\n", vf->vf_id); + udelay(ICE_PCI_CIAD_WAIT_DELAY_US); } } @@ -435,13 +443,12 @@ static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt) */ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable) { - struct device *dev = &vsi->back->pdev->dev; struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; enum ice_status status; int ret = 0; - ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) return -ENOMEM; @@ -453,7 +460,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable) status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (status) { - dev_info(dev, "update VSI for port VLAN failed, err %d aq_err %d\n", + dev_info(ice_pf_to_dev(vsi->back), "update VSI for port VLAN failed, err %d aq_err %d\n", status, hw->adminq.sq_last_status); ret = -EIO; goto out; @@ -461,7 +468,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable) vsi->info = ctxt->info; out: - devm_kfree(dev, ctxt); + kfree(ctxt); return ret; } @@ -481,19 +488,20 @@ ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id) } /** - * ice_calc_vf_first_vector_idx - Calculate absolute MSIX vector index in HW + * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space * @pf: pointer to PF structure * @vf: pointer to VF that the first MSIX vector index is being calculated for * - * This returns the first MSIX vector index in HW that is used by this VF and - * this will always be the OICR index in the AVF driver so any functionality + * This returns the first MSIX vector index in PF space that is used by this VF. + * This index is used when accessing PF relative registers such as + * GLINT_VECT2FUNC and GLINT_DYN_CTL. + * This will always be the OICR index in the AVF driver so any functionality * using vf->first_vector_idx for queue configuration will have to increment by * 1 to avoid meddling with the OICR index. */ static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) { - return pf->hw.func_caps.common_cap.msix_vector_first_id + - pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix; + return pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix; } /** @@ -508,14 +516,16 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) LIST_HEAD(tmp_add_list); u8 broadcast[ETH_ALEN]; struct ice_vsi *vsi; + struct device *dev; int status = 0; + dev = ice_pf_to_dev(pf); /* first vector index is the VFs OICR index */ vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id); if (!vsi) { - dev_err(&pf->pdev->dev, "Failed to create VF VSI\n"); + dev_err(dev, "Failed to create VF VSI\n"); return -ENOMEM; } @@ -543,7 +553,9 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) status = ice_add_mac(&pf->hw, &tmp_add_list); if (status) - dev_err(&pf->pdev->dev, "could not add mac filters\n"); + dev_err(dev, "could not add mac filters error %d\n", status); + else + vf->num_mac = 1; /* Clear this bit after VF initialization since we shouldn't reclaim * and reassign interrupts for synchronous or asynchronous VFR events. @@ -551,9 +563,8 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) * expect vector assignment to be changed unless there is a request for * more vectors. */ - clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states); ice_alloc_vsi_res_exit: - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + ice_free_fltr_list(dev, &tmp_add_list); return status; } @@ -567,20 +578,21 @@ static int ice_alloc_vf_res(struct ice_vf *vf) int tx_rx_queue_left; int status; - /* setup VF VSI and necessary resources */ - status = ice_alloc_vsi_res(vf); - if (status) - goto ice_alloc_vf_res_exit; - /* Update number of VF queues, in case VF had requested for queue * changes */ - tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx); + tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf), + ice_get_avail_rxq_count(pf)); tx_rx_queue_left += ICE_DFLT_QS_PER_VF; if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left && vf->num_req_qs != vf->num_vf_qs) vf->num_vf_qs = vf->num_req_qs; + /* setup VF VSI and necessary resources */ + status = ice_alloc_vsi_res(vf); + if (status) + goto ice_alloc_vf_res_exit; + if (vf->trusted) set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); else @@ -605,27 +617,32 @@ ice_alloc_vf_res_exit: */ static void ice_ena_vf_mappings(struct ice_vf *vf) { + int abs_vf_id, abs_first, abs_last; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; + struct device *dev; int first, last, v; struct ice_hw *hw; - int abs_vf_id; u32 reg; + dev = ice_pf_to_dev(pf); hw = &pf->hw; vsi = pf->vsi[vf->lan_vsi_idx]; first = vf->first_vector_idx; last = (first + pf->num_vf_msix) - 1; + abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id; + abs_last = (abs_first + pf->num_vf_msix) - 1; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; /* VF Vector allocation */ - reg = (((first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) | - ((last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) | + reg = (((abs_first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) | + ((abs_last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M); wr32(hw, VPINT_ALLOC(vf->vf_id), reg); - reg = (((first << VPINT_ALLOC_PCI_FIRST_S) & VPINT_ALLOC_PCI_FIRST_M) | - ((last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) | + reg = (((abs_first << VPINT_ALLOC_PCI_FIRST_S) + & VPINT_ALLOC_PCI_FIRST_M) | + ((abs_last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M); wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg); /* map the interrupts to its functions */ @@ -656,8 +673,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) VPLAN_TX_QBASE_VFNUMQ_M)); wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg); } else { - dev_err(&pf->pdev->dev, - "Scattered mode for VF Tx queues is not yet implemented\n"); + dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); } /* set regardless of mapping mode */ @@ -675,8 +691,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) VPLAN_RX_QBASE_VFNUMQ_M)); wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg); } else { - dev_err(&pf->pdev->dev, - "Scattered mode for VF Rx queues is not yet implemented\n"); + dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); } } @@ -822,6 +837,7 @@ static int ice_check_avail_res(struct ice_pf *pf) { int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); u16 num_msix, num_txq, num_rxq, num_avail_msix; + struct device *dev = ice_pf_to_dev(pf); if (!pf->num_alloc_vfs || max_valid_res_idx < 0) return -EINVAL; @@ -854,8 +870,7 @@ static int ice_check_avail_res(struct ice_pf *pf) ICE_DFLT_INTR_PER_VF, ICE_MIN_INTR_PER_VF); } else { - dev_err(&pf->pdev->dev, - "Number of VFs %d exceeds max VF count %d\n", + dev_err(dev, "Number of VFs %d exceeds max VF count %d\n", pf->num_alloc_vfs, ICE_MAX_VF_COUNT); return -EIO; } @@ -870,11 +885,11 @@ static int ice_check_avail_res(struct ice_pf *pf) * at runtime through Virtchnl, that is the reason we start by reserving * few queues. */ - num_txq = ice_determine_res(pf, pf->q_left_tx, ICE_DFLT_QS_PER_VF, - ICE_MIN_QS_PER_VF); + num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf), + ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF); - num_rxq = ice_determine_res(pf, pf->q_left_rx, ICE_DFLT_QS_PER_VF, - ICE_MIN_QS_PER_VF); + num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf), + ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF); if (!num_txq || !num_rxq) return -EIO; @@ -928,10 +943,17 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf) /* reallocate VF resources to finish resetting the VSI state */ if (!ice_alloc_vf_res(vf)) { + struct ice_vsi *vsi; + ice_ena_vf_mappings(vf); set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); clear_bit(ICE_VF_STATE_DIS, vf->vf_states); - vf->num_vlan = 0; + + vsi = pf->vsi[vf->lan_vsi_idx]; + if (ice_vsi_add_vlan(vsi, 0)) + dev_warn(ice_pf_to_dev(pf), + "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest", + vf->vf_id); } /* Tell the VF driver the reset is done. This needs to be done only @@ -960,7 +982,7 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m, struct ice_hw *hw; hw = &pf->hw; - if (vf->num_vlan) { + if (vsi->num_vlan) { status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m, rm_promisc); } else if (vf->port_vlan_id) { @@ -983,6 +1005,46 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m, } /** + * ice_config_res_vfs - Finalize allocation of VFs resources in one go + * @pf: pointer to the PF structure + * + * This function is being called as last part of resetting all VFs, or when + * configuring VFs for the first time, where there is no resource to be freed + * Returns true if resources were properly allocated for all VFs, and false + * otherwise. + */ +static bool ice_config_res_vfs(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + int v; + + if (ice_check_avail_res(pf)) { + dev_err(dev, "Cannot allocate VF resources, try with fewer number of VFs\n"); + return false; + } + + /* rearm global interrupts */ + if (test_and_clear_bit(__ICE_OICR_INTR_DIS, pf->state)) + ice_irq_dynamic_ena(hw, NULL, NULL); + + /* Finish resetting each VF and allocate resources */ + ice_for_each_vf(pf, v) { + struct ice_vf *vf = &pf->vf[v]; + + vf->num_vf_qs = pf->num_vf_qps; + dev_dbg(dev, "VF-id %d has %d queues configured\n", vf->vf_id, + vf->num_vf_qs); + ice_cleanup_and_realloc_vf(vf); + } + + ice_flush(hw); + clear_bit(__ICE_VF_DIS, pf->state); + + return true; +} + +/** * ice_reset_all_vfs - reset all allocated VFs in one go * @pf: pointer to the PF structure * @is_vflr: true if VFLR was issued, false if not @@ -996,6 +1058,7 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m, */ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) { + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; struct ice_vf *vf; int v, i; @@ -1009,19 +1072,18 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) return false; /* Begin reset on all VFs at once */ - for (v = 0; v < pf->num_alloc_vfs; v++) - ice_trigger_vf_reset(&pf->vf[v], is_vflr); + ice_for_each_vf(pf, v) + ice_trigger_vf_reset(&pf->vf[v], is_vflr, true); - for (v = 0; v < pf->num_alloc_vfs; v++) { + ice_for_each_vf(pf, v) { struct ice_vsi *vsi; vf = &pf->vf[v]; vsi = pf->vsi[vf->lan_vsi_idx]; - if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { - ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id); - ice_vsi_stop_rx_rings(vsi); - clear_bit(ICE_VF_STATE_ENA, vf->vf_states); - } + if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) + ice_dis_vf_qs(vf); + ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, + NULL, ICE_VF_RESET, vf->vf_id, NULL); } /* HW requires some time to make sure it can flush the FIFO for a VF @@ -1031,16 +1093,17 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) * finished resetting. */ for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { - usleep_range(10000, 20000); - /* Check each VF in sequence */ while (v < pf->num_alloc_vfs) { u32 reg; vf = &pf->vf[v]; reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); - if (!(reg & VPGEN_VFRSTAT_VFRD_M)) + if (!(reg & VPGEN_VFRSTAT_VFRD_M)) { + /* only delay if the check failed */ + usleep_range(10, 20); break; + } /* If the current VF has finished resetting, move on * to the next VF in sequence. @@ -1053,11 +1116,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) * time, but continue on with the operation. */ if (v < pf->num_alloc_vfs) - dev_warn(&pf->pdev->dev, "VF reset check timeout\n"); - usleep_range(10000, 20000); + dev_warn(dev, "VF reset check timeout\n"); /* free VF resources to begin resetting the VSI state */ - for (v = 0; v < pf->num_alloc_vfs; v++) { + ice_for_each_vf(pf, v) { vf = &pf->vf[v]; ice_free_vf_res(vf); @@ -1071,30 +1133,31 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) } if (ice_sriov_free_msix_res(pf)) - dev_err(&pf->pdev->dev, - "Failed to free MSIX resources used by SR-IOV\n"); + dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); - if (ice_check_avail_res(pf)) { - dev_err(&pf->pdev->dev, - "Cannot allocate VF resources, try with fewer number of VFs\n"); + if (!ice_config_res_vfs(pf)) return false; - } - - /* Finish the reset on each VF */ - for (v = 0; v < pf->num_alloc_vfs; v++) { - vf = &pf->vf[v]; - vf->num_vf_qs = pf->num_vf_qps; - dev_dbg(&pf->pdev->dev, - "VF-id %d has %d queues configured\n", - vf->vf_id, vf->num_vf_qs); - ice_cleanup_and_realloc_vf(vf); - } + return true; +} - ice_flush(hw); - clear_bit(__ICE_VF_DIS, pf->state); +/** + * ice_is_vf_disabled + * @vf: pointer to the VF info + * + * Returns true if the PF or VF is disabled, false otherwise. + */ +static bool ice_is_vf_disabled(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; - return true; + /* If the PF has been disabled, there is no need resetting VF until + * PF is active again. Similarly, if the VF has been disabled, this + * means something else is resetting the VF, so we shouldn't continue. + * Otherwise, set disable VF state bit for actual reset, and continue. + */ + return (test_bit(__ICE_VF_DIS, pf->state) || + test_bit(ICE_VF_STATE_DIS, vf->vf_states)); } /** @@ -1108,33 +1171,35 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) { struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; + struct device *dev; struct ice_hw *hw; bool rsd = false; u8 promisc_m; u32 reg; int i; - /* If the VFs have been disabled, this means something else is - * resetting the VF, so we shouldn't continue. - */ - if (test_and_set_bit(__ICE_VF_DIS, pf->state)) - return false; + dev = ice_pf_to_dev(pf); - ice_trigger_vf_reset(vf, is_vflr); + if (ice_is_vf_disabled(vf)) { + dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", + vf->vf_id); + return true; + } + + /* Set VF disable bit state here, before triggering reset */ + set_bit(ICE_VF_STATE_DIS, vf->vf_states); + ice_trigger_vf_reset(vf, is_vflr, false); vsi = pf->vsi[vf->lan_vsi_idx]; - if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { - ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id); - ice_vsi_stop_rx_rings(vsi); - clear_bit(ICE_VF_STATE_ENA, vf->vf_states); - } else { - /* Call Disable LAN Tx queue AQ call even when queues are not - * enabled. This is needed for successful completiom of VFR - */ - ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, - NULL, ICE_VF_RESET, vf->vf_id, NULL); - } + if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) + ice_dis_vf_qs(vf); + + /* Call Disable LAN Tx queue AQ whether or not queues are + * enabled. This is needed for successful completion of VFR. + */ + ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, + NULL, ICE_VF_RESET, vf->vf_id, NULL); hw = &pf->hw; /* poll VPGEN_VFRSTAT reg to make sure @@ -1145,36 +1210,35 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) * poll the status register to make sure that the reset * completed successfully. */ - usleep_range(10000, 20000); reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); if (reg & VPGEN_VFRSTAT_VFRD_M) { rsd = true; break; } + + /* only sleep if the reset is not done */ + usleep_range(10, 20); } /* Display a warning if VF didn't manage to reset in time, but need to * continue on with the operation. */ if (!rsd) - dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n", - vf->vf_id); - - usleep_range(10000, 20000); + dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id); /* disable promiscuous modes in case they were enabled * ignore any error if disabling process failed */ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) || test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) { - if (vf->port_vlan_id || vf->num_vlan) + if (vf->port_vlan_id || vsi->num_vlan) promisc_m = ICE_UCAST_VLAN_PROMISC_BITS; else promisc_m = ICE_UCAST_PROMISC_BITS; vsi = pf->vsi[vf->lan_vsi_idx]; if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true)) - dev_err(&pf->pdev->dev, "disabling promiscuous mode failed\n"); + dev_err(dev, "disabling promiscuous mode failed\n"); } /* free VF resources to begin resetting the VSI state */ @@ -1183,7 +1247,6 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) ice_cleanup_and_realloc_vf(vf); ice_flush(hw); - clear_bit(__ICE_VF_DIS, pf->state); return true; } @@ -1196,7 +1259,7 @@ void ice_vc_notify_link_state(struct ice_pf *pf) { int i; - for (i = 0; i < pf->num_alloc_vfs; i++) + ice_for_each_vf(pf, i) ice_vc_notify_vf_link_state(&pf->vf[i]); } @@ -1226,19 +1289,26 @@ void ice_vc_notify_reset(struct ice_pf *pf) static void ice_vc_notify_vf_reset(struct ice_vf *vf) { struct virtchnl_pf_event pfe; + struct ice_pf *pf; - /* validate the request */ - if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) + if (!vf) return; - /* verify if the VF is in either init or active before proceeding */ - if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && - !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + pf = vf->pf; + if (ice_validate_vf_id(pf, vf->vf_id)) + return; + + /* Bail out if VF is in disabled state, neither initialized, nor active + * state - otherwise proceed with notifications + */ + if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && + !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) || + test_bit(ICE_VF_STATE_DIS, vf->vf_states)) return; pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; - ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, + ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe), NULL); } @@ -1250,6 +1320,7 @@ static void ice_vc_notify_vf_reset(struct ice_vf *vf) */ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) { + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; struct ice_vf *vfs; int i, ret; @@ -1257,7 +1328,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) /* Disable global interrupt 0 so we don't try to handle the VFLR. */ wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); - + set_bit(__ICE_OICR_INTR_DIS, pf->state); ice_flush(hw); ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); @@ -1266,16 +1337,16 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) goto err_unroll_intr; } /* allocate memory */ - vfs = devm_kcalloc(&pf->pdev->dev, num_alloc_vfs, sizeof(*vfs), - GFP_KERNEL); + vfs = devm_kcalloc(dev, num_alloc_vfs, sizeof(*vfs), GFP_KERNEL); if (!vfs) { ret = -ENOMEM; goto err_pci_disable_sriov; } pf->vf = vfs; + pf->num_alloc_vfs = num_alloc_vfs; /* apply default profile */ - for (i = 0; i < num_alloc_vfs; i++) { + ice_for_each_vf(pf, i) { vfs[i].pf = pf; vfs[i].vf_sw_id = pf->first_sw; vfs[i].vf_id = i; @@ -1283,23 +1354,19 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) /* assign default capabilities */ set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); vfs[i].spoofchk = true; - - /* Set this state so that PF driver does VF vector assignment */ - set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states); } - pf->num_alloc_vfs = num_alloc_vfs; - /* VF resources get allocated during reset */ - if (!ice_reset_all_vfs(pf, true)) { + /* VF resources get allocated with initialization */ + if (!ice_config_res_vfs(pf)) { ret = -EIO; goto err_unroll_sriov; } - goto err_unroll_intr; + return ret; err_unroll_sriov: pf->vf = NULL; - devm_kfree(&pf->pdev->dev, vfs); + devm_kfree(dev, vfs); vfs = NULL; pf->num_alloc_vfs = 0; err_pci_disable_sriov: @@ -1307,6 +1374,7 @@ err_pci_disable_sriov: err_unroll_intr: /* rearm interrupts here */ ice_irq_dynamic_ena(hw, NULL, NULL); + clear_bit(__ICE_OICR_INTR_DIS, pf->state); return ret; } @@ -1343,7 +1411,7 @@ static bool ice_pf_state_is_nominal(struct ice_pf *pf) static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) { int pre_existing_vfs = pci_num_vf(pf->pdev); - struct device *dev = &pf->pdev->dev; + struct device *dev = ice_pf_to_dev(pf); int err; if (!ice_pf_state_is_nominal(pf)) { @@ -1353,7 +1421,7 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) { dev_err(dev, "This device is not capable of SR-IOV\n"); - return -ENODEV; + return -EOPNOTSUPP; } if (pre_existing_vfs && pre_existing_vfs != num_vfs) @@ -1388,6 +1456,12 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct ice_pf *pf = pci_get_drvdata(pdev); + struct device *dev = ice_pf_to_dev(pf); + + if (ice_is_safe_mode(pf)) { + dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n"); + return -EOPNOTSUPP; + } if (num_vfs) return ice_pci_sriov_ena(pf, num_vfs); @@ -1395,8 +1469,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) if (!pci_vfs_assigned(pdev)) { ice_free_vfs(pf); } else { - dev_err(&pf->pdev->dev, - "can't free VFs because some are assigned to VMs.\n"); + dev_err(dev, "can't free VFs because some are assigned to VMs.\n"); return -EBUSY; } @@ -1420,7 +1493,7 @@ void ice_process_vflr_event(struct ice_pf *pf) !pf->num_alloc_vfs) return; - for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { + ice_for_each_vf(pf, vf_id) { struct ice_vf *vf = &pf->vf[vf_id]; u32 reg_idx, bit_idx; @@ -1435,12 +1508,10 @@ void ice_process_vflr_event(struct ice_pf *pf) } /** - * ice_vc_dis_vf - Disable a given VF via SW reset + * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF * @vf: pointer to the VF info - * - * Disable the VF through a SW reset */ -static void ice_vc_dis_vf(struct ice_vf *vf) +static void ice_vc_reset_vf(struct ice_vf *vf) { ice_vc_notify_vf_reset(vf); ice_reset_vf(vf, false); @@ -1461,24 +1532,27 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) { enum ice_status aq_ret; + struct device *dev; struct ice_pf *pf; - /* validate the request */ - if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) + if (!vf) return -EINVAL; pf = vf->pf; + if (ice_validate_vf_id(pf, vf->vf_id)) + return -EINVAL; + + dev = ice_pf_to_dev(pf); /* single place to detect unsuccessful return values */ if (v_retval) { vf->num_inval_msgs++; - dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", - vf->vf_id, v_opcode, v_retval); + dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id, + v_opcode, v_retval); if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) { - dev_err(&pf->pdev->dev, - "Number of invalid messages exceeded for VF %d\n", + dev_err(dev, "Number of invalid messages exceeded for VF %d\n", vf->vf_id); - dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); + dev_err(dev, "Use PF Control I/F to enable the VF\n"); set_bit(ICE_VF_STATE_DIS, vf->vf_states); return -EIO; } @@ -1490,10 +1564,9 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, msg, msglen, NULL); - if (aq_ret) { - dev_info(&pf->pdev->dev, - "Unable to send the message to VF %d aq_err %d\n", - vf->vf_id, pf->hw.mailboxq.sq_last_status); + if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { + dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %d\n", + vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status); return -EIO; } @@ -1539,14 +1612,14 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) int len = 0; int ret; - if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + if (ice_check_vf_init(pf, vf)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err; } len = sizeof(struct virtchnl_vf_resource); - vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL); + vfres = kzalloc(len, GFP_KERNEL); if (!vfres) { v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; len = 0; @@ -1612,6 +1685,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) ether_addr_copy(vfres->vsi_res[0].default_mac_addr, vf->dflt_lan_addr.addr); + /* match guest capabilities */ + vf->driver_caps = vfres->vf_cap_flags; + set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); err: @@ -1619,7 +1695,7 @@ err: ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret, (u8 *)vfres, len); - devm_kfree(&pf->pdev->dev, vfres); + kfree(vfres); return ret; } @@ -1688,6 +1764,21 @@ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) } /** + * ice_vc_isvalid_ring_len + * @ring_len: length of ring + * + * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE + * or zero + */ +static bool ice_vc_isvalid_ring_len(u16 ring_len) +{ + return ring_len == 0 || + (ring_len >= ICE_MIN_NUM_DESC && + ring_len <= ICE_MAX_NUM_DESC && + !(ring_len % ICE_REQ_DESC_MULTIPLE)); +} + +/** * ice_vc_config_rss_key * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -1700,7 +1791,7 @@ static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg) struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; struct ice_pf *pf = vf->pf; - struct ice_vsi *vsi = NULL; + struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -1712,18 +1803,18 @@ static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg) goto error_param; } - vsi = pf->vsi[vf->lan_vsi_idx]; - if (!vsi) { + if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) { + if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1747,7 +1838,7 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg) struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct ice_pf *pf = vf->pf; - struct ice_vsi *vsi = NULL; + struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -1759,18 +1850,18 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg) goto error_param; } - vsi = pf->vsi[vf->lan_vsi_idx]; - if (!vsi) { + if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) { + if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1783,6 +1874,87 @@ error_param: } /** + * ice_set_vf_spoofchk + * @netdev: network interface device structure + * @vf_id: VF identifier + * @ena: flag to enable or disable feature + * + * Enable or disable VF spoof checking + */ +int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; + struct ice_vsi_ctx *ctx; + struct ice_vsi *vf_vsi; + enum ice_status status; + struct device *dev; + struct ice_vf *vf; + int ret = 0; + + dev = ice_pf_to_dev(pf); + if (ice_validate_vf_id(pf, vf_id)) + return -EINVAL; + + vf = &pf->vf[vf_id]; + + if (ice_check_vf_init(pf, vf)) + return -EBUSY; + + vf_vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vf_vsi) { + netdev_err(netdev, "VSI %d for VF %d is null\n", + vf->lan_vsi_idx, vf->vf_id); + return -EINVAL; + } + + if (vf_vsi->type != ICE_VSI_VF) { + netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n", + vf_vsi->type, vf_vsi->vsi_num, vf->vf_id); + return -ENODEV; + } + + if (ena == vf->spoofchk) { + dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF"); + return 0; + } + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->info.sec_flags = vf_vsi->info.sec_flags; + ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); + if (ena) { + ctx->info.sec_flags |= + ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF | + (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); + } else { + ctx->info.sec_flags &= + ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF | + (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S)); + } + + status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL); + if (status) { + dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d", + ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status); + ret = -EIO; + goto out; + } + + /* only update spoofchk state and VSI context on success */ + vf_vsi->info.sec_flags = ctx->info.sec_flags; + vf->spoofchk = ena; + +out: + kfree(ctx); + return ret; +} + +/** * ice_vc_get_stats_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -1794,8 +1966,8 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg) enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; + struct ice_eth_stats stats = { 0 }; struct ice_pf *pf = vf->pf; - struct ice_eth_stats stats; struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { @@ -1814,7 +1986,6 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - memset(&stats, 0, sizeof(struct ice_eth_stats)); ice_update_eth_stats(vsi); stats = vsi->eth_stats; @@ -1839,6 +2010,8 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) (struct virtchnl_queue_select *)msg; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; + unsigned long q_map; + u16 vf_q_id; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -1855,6 +2028,12 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } + if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF || + vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -1865,12 +2044,47 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) * Tx queue group list was configured and the context bits were * programmed using ice_vsi_cfg_txqs */ - if (ice_vsi_start_rx_rings(vsi)) - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + q_map = vqs->rx_queues; + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { + if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Skip queue if enabled */ + if (test_bit(vf_q_id, vf->rxq_ena)) + continue; + + if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) { + dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n", + vf_q_id, vsi->vsi_num); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + set_bit(vf_q_id, vf->rxq_ena); + vf->num_qs_ena++; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + q_map = vqs->tx_queues; + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { + if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Skip queue if enabled */ + if (test_bit(vf_q_id, vf->txq_ena)) + continue; + + set_bit(vf_q_id, vf->txq_ena); + vf->num_qs_ena++; + } /* Set flag to indicate that queues are enabled */ if (v_ret == VIRTCHNL_STATUS_SUCCESS) - set_bit(ICE_VF_STATE_ENA, vf->vf_states); + set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); error_param: /* send the response to the VF */ @@ -1893,9 +2107,11 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) (struct virtchnl_queue_select *)msg; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; + unsigned long q_map; + u16 vf_q_id; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) && - !test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { + !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1910,29 +2126,79 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } + if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF || + vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) { - dev_err(&vsi->back->pdev->dev, - "Failed to stop tx rings on VSI %d\n", - vsi->vsi_num); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (vqs->tx_queues) { + q_map = vqs->tx_queues; + + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { + struct ice_ring *ring = vsi->tx_rings[vf_q_id]; + struct ice_txq_meta txq_meta = { 0 }; + + if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Skip queue if not enabled */ + if (!test_bit(vf_q_id, vf->txq_ena)) + continue; + + ice_fill_txq_meta(vsi, ring, &txq_meta); + + if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, + ring, &txq_meta)) { + dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n", + vf_q_id, vsi->vsi_num); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Clear enabled queues flag */ + clear_bit(vf_q_id, vf->txq_ena); + vf->num_qs_ena--; + } } - if (ice_vsi_stop_rx_rings(vsi)) { - dev_err(&vsi->back->pdev->dev, - "Failed to stop rx rings on VSI %d\n", - vsi->vsi_num); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (vqs->rx_queues) { + q_map = vqs->rx_queues; + + for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) { + if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Skip queue if not enabled */ + if (!test_bit(vf_q_id, vf->rxq_ena)) + continue; + + if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) { + dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n", + vf_q_id, vsi->vsi_num); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Clear enabled queues flag */ + clear_bit(vf_q_id, vf->rxq_ena); + vf->num_qs_ena--; + } } /* Clear enabled queues flag */ - if (v_ret == VIRTCHNL_STATUS_SUCCESS) - clear_bit(ICE_VF_STATE_ENA, vf->vf_states); + if (v_ret == VIRTCHNL_STATUS_SUCCESS && !vf->num_qs_ena) + clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); error_param: /* send the response to the VF */ @@ -1962,12 +2228,6 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) irqmap_info = (struct virtchnl_irq_map_info *)msg; num_q_vectors_mapped = irqmap_info->num_vectors; - vsi = pf->vsi[vf->lan_vsi_idx]; - if (!vsi) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto error_param; - } - /* Check to make sure number of VF vectors mapped is not greater than * number of VF vectors originally allocated, and check that * there is actually at least a single VF queue vector mapped @@ -1979,6 +2239,12 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) goto error_param; } + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + for (i = 0; i < num_q_vectors_mapped; i++) { struct ice_q_vector *q_vector; @@ -1986,9 +2252,11 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) vector_id = map->vector_id; vsi_id = map->vsi_id; - /* validate msg params */ - if (!(vector_id < pf->hw.func_caps.common_cap - .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id) || + /* vector_id is always 0-based for each VF, and can never be + * larger than or equal to the max allowed interrupts per VF + */ + if (!(vector_id < ICE_MAX_INTR_PER_VF) || + !ice_vc_isvalid_vsi_id(vf, vsi_id) || (!vector_id && (map->rxq_map || map->txq_map))) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -2056,6 +2324,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) struct virtchnl_vsi_queue_config_info *qci = (struct virtchnl_vsi_queue_config_info *)msg; struct virtchnl_queue_pair_info *qpi; + u16 num_rxq = 0, num_txq = 0; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; int i; @@ -2071,13 +2340,15 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) } vsi = pf->vsi[vf->lan_vsi_idx]; - if (!vsi) + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; + } - if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) { - dev_err(&pf->pdev->dev, - "VF-%d requesting more than supported number of queues: %d\n", - vf->vf_id, qci->num_queue_pairs); + if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF || + qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { + dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n", + vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -2087,37 +2358,52 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) if (qpi->txq.vsi_id != qci->vsi_id || qpi->rxq.vsi_id != qci->vsi_id || qpi->rxq.queue_id != qpi->txq.queue_id || + qpi->txq.headwb_enabled || + !ice_vc_isvalid_ring_len(qpi->txq.ring_len) || + !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) || !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } /* copy Tx queue info from VF into VSI */ - vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; - vsi->tx_rings[i]->count = qpi->txq.ring_len; - /* copy Rx queue info from VF into VSI */ - vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; - vsi->rx_rings[i]->count = qpi->rxq.ring_len; - if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto error_param; + if (qpi->txq.ring_len > 0) { + num_txq++; + vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; + vsi->tx_rings[i]->count = qpi->txq.ring_len; } - vsi->rx_buf_len = qpi->rxq.databuffer_size; - if (qpi->rxq.max_pkt_size >= (16 * 1024) || - qpi->rxq.max_pkt_size < 64) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto error_param; + + /* copy Rx queue info from VF into VSI */ + if (qpi->rxq.ring_len > 0) { + num_rxq++; + vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; + vsi->rx_rings[i]->count = qpi->rxq.ring_len; + + if (qpi->rxq.databuffer_size != 0 && + (qpi->rxq.databuffer_size > ((16 * 1024) - 128) || + qpi->rxq.databuffer_size < 1024)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + vsi->rx_buf_len = qpi->rxq.databuffer_size; + vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len; + if (qpi->rxq.max_pkt_size >= (16 * 1024) || + qpi->rxq.max_pkt_size < 64) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } } + vsi->max_frame = qpi->rxq.max_pkt_size; } /* VF can request to configure less than allocated queues * or default allocated queues. So update the VSI with new number */ - vsi->num_txq = qci->num_queue_pairs; - vsi->num_rxq = qci->num_queue_pairs; + vsi->num_txq = num_txq; + vsi->num_rxq = num_rxq; /* All queues of VF VSI are in TC 0 */ - vsi->tc_cfg.tc_info[0].qcount_tx = qci->num_queue_pairs; - vsi->tc_cfg.tc_info[0].qcount_rx = qci->num_queue_pairs; + vsi->tc_cfg.tc_info[0].qcount_tx = num_txq; + vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq; if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi)) v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; @@ -2156,6 +2442,83 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf) } /** + * ice_vc_add_mac_addr - attempt to add the MAC address passed in + * @vf: pointer to the VF info + * @vsi: pointer to the VF's VSI + * @mac_addr: MAC address to add + */ +static int +ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + enum ice_status status; + + /* default unicast MAC already added */ + if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr)) + return 0; + + if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) { + dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); + return -EPERM; + } + + status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, true); + if (status == ICE_ERR_ALREADY_EXISTS) { + dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr, + vf->vf_id); + return -EEXIST; + } else if (status) { + dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n", + mac_addr, vf->vf_id, status); + return -EIO; + } + + /* only set dflt_lan_addr once */ + if (is_zero_ether_addr(vf->dflt_lan_addr.addr) && + is_unicast_ether_addr(mac_addr)) + ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr); + + vf->num_mac++; + + return 0; +} + +/** + * ice_vc_del_mac_addr - attempt to delete the MAC address passed in + * @vf: pointer to the VF info + * @vsi: pointer to the VF's VSI + * @mac_addr: MAC address to delete + */ +static int +ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + enum ice_status status; + + if (!ice_can_vf_change_mac(vf) && + ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr)) + return 0; + + status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, false); + if (status == ICE_ERR_DOES_NOT_EXIST) { + dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr, + vf->vf_id); + return -ENOENT; + } else if (status) { + dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n", + mac_addr, vf->vf_id, status); + return -EIO; + } + + if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr)) + eth_zero_addr(vf->dflt_lan_addr.addr); + + vf->num_mac--; + + return 0; +} + +/** * ice_vc_handle_mac_addr_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2166,20 +2529,23 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf) static int ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) { + int (*ice_vc_cfg_mac) + (struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr); enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_ether_addr_list *al = (struct virtchnl_ether_addr_list *)msg; struct ice_pf *pf = vf->pf; enum virtchnl_ops vc_op; - LIST_HEAD(mac_list); struct ice_vsi *vsi; - int mac_count = 0; int i; - if (set) + if (set) { vc_op = VIRTCHNL_OP_ADD_ETH_ADDR; - else + ice_vc_cfg_mac = ice_vc_add_mac_addr; + } else { vc_op = VIRTCHNL_OP_DEL_ETH_ADDR; + ice_vc_cfg_mac = ice_vc_del_mac_addr; + } if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) { @@ -2187,14 +2553,14 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) goto handle_mac_exit; } + /* If this VF is not privileged, then we can't add more than a + * limited number of addresses. Check to make sure that the + * additions do not push us over the limit. + */ if (set && !ice_is_vf_trusted(vf) && (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) { - dev_err(&pf->pdev->dev, - "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n", + dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n", vf->vf_id); - /* There is no need to let VF know about not being trusted - * to add more MAC addr, so we can just return success message. - */ v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto handle_mac_exit; } @@ -2206,72 +2572,23 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) } for (i = 0; i < al->num_elements; i++) { - u8 *maddr = al->list[i].addr; + u8 *mac_addr = al->list[i].addr; + int result; - if (ether_addr_equal(maddr, vf->dflt_lan_addr.addr) || - is_broadcast_ether_addr(maddr)) { - if (set) { - /* VF is trying to add filters that the PF - * already added. Just continue. - */ - dev_info(&pf->pdev->dev, - "MAC %pM already set for VF %d\n", - maddr, vf->vf_id); - continue; - } else { - /* VF can't remove dflt_lan_addr/bcast MAC */ - dev_err(&pf->pdev->dev, - "VF can't remove default MAC address or MAC %pM programmed by PF for VF %d\n", - maddr, vf->vf_id); - continue; - } - } - - /* check for the invalid cases and bail if necessary */ - if (is_zero_ether_addr(maddr)) { - dev_err(&pf->pdev->dev, - "invalid MAC %pM provided for VF %d\n", - maddr, vf->vf_id); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto handle_mac_exit; - } - - if (is_unicast_ether_addr(maddr) && - !ice_can_vf_change_mac(vf)) { - dev_err(&pf->pdev->dev, - "can't change unicast MAC for untrusted VF %d\n", - vf->vf_id); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto handle_mac_exit; - } + if (is_broadcast_ether_addr(mac_addr) || + is_zero_ether_addr(mac_addr)) + continue; - /* get here if maddr is multicast or if VF can change MAC */ - if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) { - v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + result = ice_vc_cfg_mac(vf, vsi, mac_addr); + if (result == -EEXIST || result == -ENOENT) { + continue; + } else if (result) { + v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; goto handle_mac_exit; } - mac_count++; - } - - /* program the updated filter list */ - if (set) - v_ret = ice_err_to_virt_err(ice_add_mac(&pf->hw, &mac_list)); - else - v_ret = ice_err_to_virt_err(ice_remove_mac(&pf->hw, &mac_list)); - - if (v_ret) { - dev_err(&pf->pdev->dev, - "can't update MAC filters for VF %d, error %d\n", - vf->vf_id, v_ret); - } else { - if (set) - vf->num_mac += mac_count; - else - vf->num_mac -= mac_count; } handle_mac_exit: - ice_free_fltr_list(&pf->pdev->dev, &mac_list); /* send the response to the VF */ return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0); } @@ -2315,41 +2632,41 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_vf_res_request *vfres = (struct virtchnl_vf_res_request *)msg; - int req_queues = vfres->num_queue_pairs; + u16 req_queues = vfres->num_queue_pairs; struct ice_pf *pf = vf->pf; - int max_allowed_vf_queues; - int tx_rx_queue_left; - int cur_queues; + u16 max_allowed_vf_queues; + u16 tx_rx_queue_left; + struct device *dev; + u16 cur_queues; + dev = ice_pf_to_dev(pf); if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } cur_queues = vf->num_vf_qs; - tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx); + tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf), + ice_get_avail_rxq_count(pf)); max_allowed_vf_queues = tx_rx_queue_left + cur_queues; - if (req_queues <= 0) { - dev_err(&pf->pdev->dev, - "VF %d tried to request %d queues. Ignoring.\n", - vf->vf_id, req_queues); + if (!req_queues) { + dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n", + vf->vf_id); } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) { - dev_err(&pf->pdev->dev, - "VF %d tried to request more than %d queues.\n", + dev_err(dev, "VF %d tried to request more than %d queues.\n", vf->vf_id, ICE_MAX_BASE_QS_PER_VF); vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF; - } else if (req_queues - cur_queues > tx_rx_queue_left) { - dev_warn(&pf->pdev->dev, - "VF %d requested %d more queues, but only %d left.\n", + } else if (req_queues > cur_queues && + req_queues - cur_queues > tx_rx_queue_left) { + dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n", vf->vf_id, req_queues - cur_queues, tx_rx_queue_left); - vfres->num_queue_pairs = min_t(int, max_allowed_vf_queues, + vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues, ICE_MAX_BASE_QS_PER_VF); } else { /* request is successful, then reset VF */ vf->num_req_qs = req_queues; - ice_vc_dis_vf(vf); - dev_info(&pf->pdev->dev, - "VF %d granted request of %d queues.\n", + ice_vc_reset_vf(vf); + dev_info(dev, "VF %d granted request of %u queues.\n", vf->vf_id, req_queues); return 0; } @@ -2375,39 +2692,34 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, __be16 vlan_proto) { u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S); - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_vsi *vsi; + struct device *dev; struct ice_vf *vf; int ret = 0; - /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id); + dev = ice_pf_to_dev(pf); + if (ice_validate_vf_id(pf, vf_id)) return -EINVAL; - } if (vlan_id > ICE_MAX_VLANID || qos > 7) { - dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); + dev_err(dev, "Invalid VF Parameters\n"); return -EINVAL; } if (vlan_proto != htons(ETH_P_8021Q)) { - dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n"); + dev_err(dev, "VF VLAN protocol is not supported\n"); return -EPROTONOSUPPORT; } vf = &pf->vf[vf_id]; vsi = pf->vsi[vf->lan_vsi_idx]; - if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { - dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id); + if (ice_check_vf_init(pf, vf)) return -EBUSY; - } if (le16_to_cpu(vsi->info.pvid) == vlanprio) { /* duplicate request, so just return success */ - dev_info(&pf->pdev->dev, - "Duplicate pvid %d request\n", vlanprio); + dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio); return ret; } @@ -2426,7 +2738,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, } if (vlan_id) { - dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", + dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n", vlan_id, qos, vf_id); /* add new VLAN filter for each MAC */ @@ -2445,6 +2757,17 @@ error_set_pvid: } /** + * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads + * @caps: VF driver negotiated capabilities + * + * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false + */ +static bool ice_vf_vlan_offload_ena(u32 caps) +{ + return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN); +} + +/** * ice_vc_process_vlan_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2460,37 +2783,33 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) struct ice_pf *pf = vf->pf; bool vlan_promisc = false; struct ice_vsi *vsi; + struct device *dev; struct ice_hw *hw; int status = 0; u8 promisc_m; int i; + dev = ice_pf_to_dev(pf); if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { + if (!ice_vf_vlan_offload_ena(vf->driver_caps)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - if (add_v && !ice_is_vf_trusted(vf) && - vf->num_vlan >= ICE_MAX_VLAN_PER_VF) { - dev_info(&pf->pdev->dev, - "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n", - vf->vf_id); - /* There is no need to let VF know about being not trusted, - * so we can just return success message here - */ + if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } for (i = 0; i < vfl->num_elements; i++) { if (vfl->vlan_id[i] > ICE_MAX_VLANID) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; - dev_err(&pf->pdev->dev, - "invalid VF VLAN id %d\n", vfl->vlan_id[i]); + dev_err(dev, "invalid VF VLAN id %d\n", + vfl->vlan_id[i]); goto error_param; } } @@ -2502,15 +2821,17 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) goto error_param; } - if (vsi->info.pvid) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (add_v && !ice_is_vf_trusted(vf) && + vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) { + dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n", + vf->vf_id); + /* There is no need to let VF know about being not trusted, + * so we can just return success message here + */ goto error_param; } - if (ice_vsi_manage_vlan_stripping(vsi, add_v)) { - dev_err(&pf->pdev->dev, - "%sable VLAN stripping failed for VSI %i\n", - add_v ? "en" : "dis", vsi->vsi_num); + if (vsi->info.pvid) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -2524,9 +2845,8 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) u16 vid = vfl->vlan_id[i]; if (!ice_is_vf_trusted(vf) && - vf->num_vlan >= ICE_MAX_VLAN_PER_VF) { - dev_info(&pf->pdev->dev, - "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n", + vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) { + dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n", vf->vf_id); /* There is no need to let VF know about being * not trusted, so we can just return success @@ -2535,19 +2855,26 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) goto error_param; } - if (ice_vsi_add_vlan(vsi, vid)) { + /* we add VLAN 0 by default for each VF so we can enable + * Tx VLAN anti-spoof without triggering MDD events so + * we don't need to add it again here + */ + if (!vid) + continue; + + status = ice_vsi_add_vlan(vsi, vid); + if (status) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - vf->num_vlan++; + vsi->num_vlan++; /* Enable VLAN pruning when VLAN is added */ if (!vlan_promisc) { status = ice_cfg_vlan_pruning(vsi, true, false); if (status) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; - dev_err(&pf->pdev->dev, - "Enable VLAN pruning on VLAN ID: %d failed error-%d\n", + dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n", vid, status); goto error_param; } @@ -2560,8 +2887,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) promisc_m, vid); if (status) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; - dev_err(&pf->pdev->dev, - "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n", + dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n", vid, status); } } @@ -2576,21 +2902,30 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) */ int num_vf_vlan; - num_vf_vlan = vf->num_vlan; + num_vf_vlan = vsi->num_vlan; for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) { u16 vid = vfl->vlan_id[i]; + /* we add VLAN 0 by default for each VF so we can enable + * Tx VLAN anti-spoof without triggering MDD events so + * we don't want a VIRTCHNL request to remove it + */ + if (!vid) + continue; + /* Make sure ice_vsi_kill_vlan is successful before * updating VLAN information */ - if (ice_vsi_kill_vlan(vsi, vid)) { + status = ice_vsi_kill_vlan(vsi, vid); + if (status) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - vf->num_vlan--; - /* Disable VLAN pruning when removing VLAN */ - ice_cfg_vlan_pruning(vsi, false, false); + vsi->num_vlan--; + /* Disable VLAN pruning when the last VLAN is removed */ + if (!vsi->num_vlan) + ice_cfg_vlan_pruning(vsi, false, false); /* Disable Unicast/Multicast VLAN promiscuous mode */ if (vlan_promisc) { @@ -2654,6 +2989,11 @@ static int ice_vc_ena_vlan_stripping(struct ice_vf *vf) goto error_param; } + if (!ice_vf_vlan_offload_ena(vf->driver_caps)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + vsi = pf->vsi[vf->lan_vsi_idx]; if (ice_vsi_manage_vlan_stripping(vsi, true)) v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -2680,6 +3020,11 @@ static int ice_vc_dis_vlan_stripping(struct ice_vf *vf) goto error_param; } + if (!ice_vf_vlan_offload_ena(vf->driver_caps)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -2695,6 +3040,33 @@ error_param: } /** + * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization + * @vf: VF to enable/disable VLAN stripping for on initialization + * + * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if + * the flag is cleared then we want to disable stripping. For example, the flag + * will be cleared when port VLANs are configured by the administrator before + * passing the VF to the guest or if the AVF driver doesn't support VLAN + * offloads. + */ +static int ice_vf_init_vlan_stripping(struct ice_vf *vf) +{ + struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; + + if (!vsi) + return -EINVAL; + + /* don't modify stripping if port VLAN is configured */ + if (vsi->info.pvid) + return 0; + + if (ice_vf_vlan_offload_ena(vf->driver_caps)) + return ice_vsi_manage_vlan_stripping(vsi, true); + else + return ice_vsi_manage_vlan_stripping(vsi, false); +} + +/** * ice_vc_process_vf_msg - Process request from VF * @pf: pointer to the PF structure * @event: pointer to the AQ event @@ -2709,9 +3081,11 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) u16 msglen = event->msg_len; u8 *msg = event->msg_buf; struct ice_vf *vf = NULL; + struct device *dev; int err = 0; - if (vf_id >= pf->num_alloc_vfs) { + dev = ice_pf_to_dev(pf); + if (ice_validate_vf_id(pf, vf_id)) { err = -EINVAL; goto error_handler; } @@ -2731,27 +3105,13 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) err = -EPERM; else err = -EINVAL; - goto error_handler; - } - - /* Perform additional checks specific to RSS and Virtchnl */ - if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) { - struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; - - if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) - err = -EINVAL; - } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) { - struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; - - if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) - err = -EINVAL; } error_handler: if (err) { ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM, NULL, 0); - dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n", + dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n", vf_id, v_opcode, msglen, err); return; } @@ -2762,6 +3122,10 @@ error_handler: break; case VIRTCHNL_OP_GET_VF_RESOURCES: err = ice_vc_get_vf_res_msg(vf, msg); + if (ice_vf_init_vlan_stripping(vf)) + dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n", + vf->vf_id); + ice_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_RESET_VF: ice_vc_reset_vf_msg(vf); @@ -2811,8 +3175,8 @@ error_handler: break; case VIRTCHNL_OP_UNKNOWN: default: - dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", - v_opcode, vf_id); + dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode, + vf_id); err = ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL, 0); @@ -2822,8 +3186,7 @@ error_handler: /* Helper function cares less about error return values here * as it is busy with pending work. */ - dev_info(&pf->pdev->dev, - "PF failed to honor VF %d, opcode %d, error %d\n", + dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n", vf_id, v_opcode, err); } } @@ -2839,24 +3202,18 @@ error_handler: int ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_vsi *vsi; struct ice_vf *vf; - /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - netdev_err(netdev, "invalid VF id: %d\n", vf_id); + if (ice_validate_vf_id(pf, vf_id)) return -EINVAL; - } vf = &pf->vf[vf_id]; vsi = pf->vsi[vf->lan_vsi_idx]; - if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { - netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id); + if (ice_check_vf_init(pf, vf)) return -EBUSY; - } ivi->vf = vf_id; ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr); @@ -2880,66 +3237,20 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) } /** - * ice_set_vf_spoofchk - * @netdev: network interface device structure - * @vf_id: VF identifier - * @ena: flag to enable or disable feature + * ice_wait_on_vf_reset + * @vf: The VF being resseting * - * Enable or disable VF spoof checking + * Poll to make sure a given VF is ready after reset */ -int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) +static void ice_wait_on_vf_reset(struct ice_vf *vf) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; - struct ice_vsi_ctx *ctx; - enum ice_status status; - struct ice_vf *vf; - int ret = 0; - - /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - netdev_err(netdev, "invalid VF id: %d\n", vf_id); - return -EINVAL; - } - - vf = &pf->vf[vf_id]; - if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { - netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id); - return -EBUSY; - } - - if (ena == vf->spoofchk) { - dev_dbg(&pf->pdev->dev, "VF spoofchk already %s\n", - ena ? "ON" : "OFF"); - return 0; - } - - ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return -ENOMEM; - - ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); - - if (ena) { - ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; - ctx->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M; - } + int i; - status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); - if (status) { - dev_dbg(&pf->pdev->dev, - "Error %d, failed to update VSI* parameters\n", status); - ret = -EIO; - goto out; + for (i = 0; i < ICE_MAX_VF_RESET_WAIT; i++) { + if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) + break; + msleep(20); } - - vf->spoofchk = ena; - vsi->info.sec_flags = ctx->info.sec_flags; - vsi->info.sw_flags2 = ctx->info.sw_flags2; -out: - devm_kfree(&pf->pdev->dev, ctx); - return ret; } /** @@ -2952,23 +3263,25 @@ out: */ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_vf *vf; int ret = 0; - /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - netdev_err(netdev, "invalid VF id: %d\n", vf_id); + if (ice_validate_vf_id(pf, vf_id)) return -EINVAL; - } vf = &pf->vf[vf_id]; - if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { - netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id); + /* Don't set MAC on disabled VF */ + if (ice_is_vf_disabled(vf)) + return -EINVAL; + + /* In case VF is in reset mode, wait until it is completed. Depending + * on factors like queue disabling routine, this could take ~250ms + */ + ice_wait_on_vf_reset(vf); + + if (ice_check_vf_init(pf, vf)) return -EBUSY; - } if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) { netdev_err(netdev, "%pM not a valid unicast address\n", mac); @@ -2982,11 +3295,10 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) */ ether_addr_copy(vf->dflt_lan_addr.addr, mac); vf->pf_set_mac = true; - netdev_info(netdev, - "MAC on VF %d set to %pM. VF driver will be reinitialized\n", + netdev_info(netdev, "MAC on VF %d set to %pM. VF driver will be reinitialized\n", vf_id, mac); - ice_vc_dis_vf(vf); + ice_vc_reset_vf(vf); return ret; } @@ -3000,30 +3312,32 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) */ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_vf *vf; - /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id); + if (ice_validate_vf_id(pf, vf_id)) return -EINVAL; - } vf = &pf->vf[vf_id]; - if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { - dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id); + /* Don't set Trusted Mode on disabled VF */ + if (ice_is_vf_disabled(vf)) + return -EINVAL; + + /* In case VF is in reset mode, wait until it is completed. Depending + * on factors like queue disabling routine, this could take ~250ms + */ + ice_wait_on_vf_reset(vf); + + if (ice_check_vf_init(pf, vf)) return -EBUSY; - } /* Check if already trusted */ if (trusted == vf->trusted) return 0; vf->trusted = trusted; - ice_vc_dis_vf(vf); - dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", + ice_vc_reset_vf(vf); + dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", vf_id, trusted ? "" : "un"); return 0; @@ -3039,34 +3353,19 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) */ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; - struct virtchnl_pf_event pfe = { 0 }; - struct ice_link_status *ls; + struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_vf *vf; - struct ice_hw *hw; - if (vf_id >= pf->num_alloc_vfs) { - dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); + if (ice_validate_vf_id(pf, vf_id)) return -EINVAL; - } vf = &pf->vf[vf_id]; - hw = &pf->hw; - ls = &pf->hw.port_info->phy.link_info; - - if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { - dev_err(&pf->pdev->dev, "vf %d in reset. Try again.\n", vf_id); + if (ice_check_vf_init(pf, vf)) return -EBUSY; - } - - pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; - pfe.severity = PF_EVENT_SEVERITY_INFO; switch (link_state) { case IFLA_VF_LINK_STATE_AUTO: vf->link_forced = false; - vf->link_up = ls->link_info & ICE_AQ_LINK_UP; break; case IFLA_VF_LINK_STATE_ENABLE: vf->link_forced = true; @@ -3080,15 +3379,52 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) return -EINVAL; } - if (vf->link_forced) - ice_set_pfe_link_forced(vf, &pfe, vf->link_up); - else - ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up); + ice_vc_notify_vf_link_state(vf); - /* Notify the VF of its new link state */ - ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, - VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, - sizeof(pfe), NULL); + return 0; +} + +/** + * ice_get_vf_stats - populate some stats for the VF + * @netdev: the netdev of the PF + * @vf_id: the host OS identifier (0-255) + * @vf_stats: pointer to the OS memory to be initialized + */ +int ice_get_vf_stats(struct net_device *netdev, int vf_id, + struct ifla_vf_stats *vf_stats) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_eth_stats *stats; + struct ice_vsi *vsi; + struct ice_vf *vf; + + if (ice_validate_vf_id(pf, vf_id)) + return -EINVAL; + + vf = &pf->vf[vf_id]; + + if (ice_check_vf_init(pf, vf)) + return -EBUSY; + + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) + return -EINVAL; + + ice_update_eth_stats(vsi); + stats = &vsi->eth_stats; + + memset(vf_stats, 0, sizeof(*vf_stats)); + + vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast + + stats->rx_multicast; + vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast + + stats->tx_multicast; + vf_stats->rx_bytes = stats->rx_bytes; + vf_stats->tx_bytes = stats->tx_bytes; + vf_stats->broadcast = stats->rx_broadcast; + vf_stats->multicast = stats->rx_multicast; + vf_stats->rx_dropped = stats->rx_discards; + vf_stats->tx_dropped = stats->tx_discards; return 0; } |