summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c')
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c2133
1 files changed, 1633 insertions, 500 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 3fde5471e1c0..492bc9446463 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -35,6 +35,27 @@
#define BUF_RESERVE_PERCENT 90
#define HCLGE_RESET_MAX_FAIL_CNT 5
+#define HCLGE_RESET_SYNC_TIME 100
+#define HCLGE_PF_RESET_SYNC_TIME 20
+#define HCLGE_PF_RESET_SYNC_CNT 1500
+
+/* Get DFX BD number offset */
+#define HCLGE_DFX_BIOS_BD_OFFSET 1
+#define HCLGE_DFX_SSU_0_BD_OFFSET 2
+#define HCLGE_DFX_SSU_1_BD_OFFSET 3
+#define HCLGE_DFX_IGU_BD_OFFSET 4
+#define HCLGE_DFX_RPU_0_BD_OFFSET 5
+#define HCLGE_DFX_RPU_1_BD_OFFSET 6
+#define HCLGE_DFX_NCSI_BD_OFFSET 7
+#define HCLGE_DFX_RTC_BD_OFFSET 8
+#define HCLGE_DFX_PPP_BD_OFFSET 9
+#define HCLGE_DFX_RCB_BD_OFFSET 10
+#define HCLGE_DFX_TQP_BD_OFFSET 11
+#define HCLGE_DFX_SSU_2_BD_OFFSET 12
+
+#define HCLGE_LINK_STATUS_MS 10
+
+#define HCLGE_VF_VPORT_START_NUM 1
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
@@ -47,9 +68,12 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
unsigned long *addr);
+static int hclge_set_default_loopback(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo;
+static struct workqueue_struct *hclge_wq;
+
static const struct pci_device_id ae_algo_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
@@ -303,8 +327,7 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
{
.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
.ethter_type = cpu_to_le16(ETH_P_LLDP),
- .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
- .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
+ .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
.i_port_bitmap = 0x1,
},
};
@@ -317,11 +340,85 @@ static const u8 hclge_hash_key[] = {
0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
};
+static const u32 hclge_dfx_bd_offset_list[] = {
+ HCLGE_DFX_BIOS_BD_OFFSET,
+ HCLGE_DFX_SSU_0_BD_OFFSET,
+ HCLGE_DFX_SSU_1_BD_OFFSET,
+ HCLGE_DFX_IGU_BD_OFFSET,
+ HCLGE_DFX_RPU_0_BD_OFFSET,
+ HCLGE_DFX_RPU_1_BD_OFFSET,
+ HCLGE_DFX_NCSI_BD_OFFSET,
+ HCLGE_DFX_RTC_BD_OFFSET,
+ HCLGE_DFX_PPP_BD_OFFSET,
+ HCLGE_DFX_RCB_BD_OFFSET,
+ HCLGE_DFX_TQP_BD_OFFSET,
+ HCLGE_DFX_SSU_2_BD_OFFSET
+};
+
+static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
+ HCLGE_OPC_DFX_BIOS_COMMON_REG,
+ HCLGE_OPC_DFX_SSU_REG_0,
+ HCLGE_OPC_DFX_SSU_REG_1,
+ HCLGE_OPC_DFX_IGU_EGU_REG,
+ HCLGE_OPC_DFX_RPU_REG_0,
+ HCLGE_OPC_DFX_RPU_REG_1,
+ HCLGE_OPC_DFX_NCSI_REG,
+ HCLGE_OPC_DFX_RTC_REG,
+ HCLGE_OPC_DFX_PPP_REG,
+ HCLGE_OPC_DFX_RCB_REG,
+ HCLGE_OPC_DFX_TQP_REG,
+ HCLGE_OPC_DFX_SSU_REG_2
+};
+
+static const struct key_info meta_data_key_info[] = {
+ { PACKET_TYPE_ID, 6},
+ { IP_FRAGEMENT, 1},
+ { ROCE_TYPE, 1},
+ { NEXT_KEY, 5},
+ { VLAN_NUMBER, 2},
+ { SRC_VPORT, 12},
+ { DST_VPORT, 12},
+ { TUNNEL_PACKET, 1},
+};
+
+static const struct key_info tuple_key_info[] = {
+ { OUTER_DST_MAC, 48},
+ { OUTER_SRC_MAC, 48},
+ { OUTER_VLAN_TAG_FST, 16},
+ { OUTER_VLAN_TAG_SEC, 16},
+ { OUTER_ETH_TYPE, 16},
+ { OUTER_L2_RSV, 16},
+ { OUTER_IP_TOS, 8},
+ { OUTER_IP_PROTO, 8},
+ { OUTER_SRC_IP, 32},
+ { OUTER_DST_IP, 32},
+ { OUTER_L3_RSV, 16},
+ { OUTER_SRC_PORT, 16},
+ { OUTER_DST_PORT, 16},
+ { OUTER_L4_RSV, 32},
+ { OUTER_TUN_VNI, 24},
+ { OUTER_TUN_FLOW_ID, 8},
+ { INNER_DST_MAC, 48},
+ { INNER_SRC_MAC, 48},
+ { INNER_VLAN_TAG_FST, 16},
+ { INNER_VLAN_TAG_SEC, 16},
+ { INNER_ETH_TYPE, 16},
+ { INNER_L2_RSV, 16},
+ { INNER_IP_TOS, 8},
+ { INNER_IP_PROTO, 8},
+ { INNER_SRC_IP, 32},
+ { INNER_DST_IP, 32},
+ { INNER_L3_RSV, 16},
+ { INNER_SRC_PORT, 16},
+ { INNER_DST_PORT, 16},
+ { INNER_L4_RSV, 32},
+};
+
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
- u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
+ u64 *data = (u64 *)(&hdev->mac_stats);
struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
__le64 *desc_data;
int i, k, n;
@@ -358,15 +455,19 @@ static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
{
- u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
+ u64 *data = (u64 *)(&hdev->mac_stats);
struct hclge_desc *desc;
__le64 *desc_data;
u16 i, k, n;
int ret;
- desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ /* This may be called inside atomic sections,
+ * so GFP_ATOMIC is more suitalbe here
+ */
+ desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
if (!desc)
return -ENOMEM;
+
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
if (ret) {
@@ -647,6 +748,12 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
count += 2;
handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
+
+ if (hdev->hw.mac.phydev) {
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
+ }
+
} else if (stringset == ETH_SS_STATS) {
count = ARRAY_SIZE(g_mac_stats_string) +
hclge_tqps_get_sset_count(handle, stringset);
@@ -697,19 +804,21 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
struct hclge_dev *hdev = vport->back;
u64 *p;
- p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
+ p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string), data);
p = hclge_tqps_get_stats(handle, p);
}
-static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
- u64 *rx_cnt)
+static void hclge_get_mac_stat(struct hnae3_handle *handle,
+ struct hns3_mac_stats *mac_stats)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
- *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
+ hclge_update_stats(handle, NULL);
+
+ mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
+ mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
}
static int hclge_parse_func_status(struct hclge_dev *hdev,
@@ -753,9 +862,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev)
usleep_range(1000, 2000);
} while (timeout++ < HCLGE_QUERY_MAX_CNT);
- ret = hclge_parse_func_status(hdev, req);
-
- return ret;
+ return hclge_parse_func_status(hdev, req);
}
static int hclge_query_pf_resource(struct hclge_dev *hdev)
@@ -773,12 +880,12 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
}
req = (struct hclge_pf_res_cmd *)desc.data;
- hdev->num_tqps = __le16_to_cpu(req->tqp_num);
- hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
+ hdev->num_tqps = le16_to_cpu(req->tqp_num);
+ hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
if (req->tx_buf_size)
hdev->tx_buf_size =
- __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
+ le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
else
hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
@@ -786,7 +893,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
if (req->dv_buf_size)
hdev->dv_buf_size =
- __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
+ le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
else
hdev->dv_buf_size = HCLGE_DEFAULT_DV;
@@ -794,12 +901,15 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
if (hnae3_dev_roce_supported(hdev)) {
hdev->roce_base_msix_offset =
- hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
+ hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
hdev->num_roce_msi =
- hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+ hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+ /* nic's msix numbers is always equals to the roce's. */
+ hdev->num_nic_msi = hdev->num_roce_msi;
+
/* PF should have NIC vectors and Roce vectors,
* NIC vectors are queued before Roce vectors.
*/
@@ -807,8 +917,17 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->roce_base_msix_offset;
} else {
hdev->num_msi =
- hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+ hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+
+ hdev->num_nic_msi = hdev->num_msi;
+ }
+
+ if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
+ dev_err(&hdev->pdev->dev,
+ "Just %u msi resources, not enough for pf(min:2).\n",
+ hdev->num_nic_msi);
+ return -EINVAL;
}
return 0;
@@ -1075,6 +1194,36 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
hclge_parse_backplane_link_mode(hdev, speed_ability);
}
+
+static u32 hclge_get_max_speed(u8 speed_ability)
+{
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ return HCLGE_MAC_SPEED_100G;
+
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ return HCLGE_MAC_SPEED_50G;
+
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ return HCLGE_MAC_SPEED_40G;
+
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
+ return HCLGE_MAC_SPEED_25G;
+
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
+ return HCLGE_MAC_SPEED_10G;
+
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
+ return HCLGE_MAC_SPEED_1G;
+
+ if (speed_ability & HCLGE_SUPPORT_100M_BIT)
+ return HCLGE_MAC_SPEED_100M;
+
+ if (speed_ability & HCLGE_SUPPORT_10M_BIT)
+ return HCLGE_MAC_SPEED_10M;
+
+ return HCLGE_MAC_SPEED_1G;
+}
+
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
struct hclge_cfg_param_cmd *req;
@@ -1182,11 +1331,7 @@ static int hclge_get_cap(struct hclge_dev *hdev)
}
/* get pf resource */
- ret = hclge_query_pf_resource(hdev);
- if (ret)
- dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
-
- return ret;
+ return hclge_query_pf_resource(hdev);
}
static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
@@ -1245,9 +1390,11 @@ static int hclge_configure(struct hclge_dev *hdev)
hclge_parse_link_mode(hdev, cfg.speed_ability);
+ hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
+
if ((hdev->tc_max > HNAE3_MAX_TC) ||
(hdev->tc_max < 1)) {
- dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
+ dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
hdev->tc_max);
hdev->tc_max = 1;
}
@@ -1270,6 +1417,12 @@ static int hclge_configure(struct hclge_dev *hdev)
hclge_init_kdump_kernel_config(hdev);
+ /* Set the init affinity based on pci func number */
+ i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
+ i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
+ cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
+ &hdev->affinity_mask);
+
return ret;
}
@@ -1394,6 +1547,10 @@ static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
kinfo->rss_size = min_t(u16, hdev->rss_size_max,
vport->alloc_tqps / hdev->tm_info.num_tc);
+ /* ensure one to one mapping between irq and queue at default */
+ kinfo->rss_size = min_t(u16, kinfo->rss_size,
+ (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
+
return 0;
}
@@ -1497,7 +1654,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
if (hdev->num_tqps < num_vport) {
- dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
+ dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
hdev->num_tqps, num_vport);
return -EINVAL;
}
@@ -1520,6 +1677,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
for (i = 0; i < num_vport; i++) {
vport->back = hdev;
vport->vport_id = i;
+ vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
vport->rxvlan_cfg.rx_vlan_offload_en = true;
@@ -2172,7 +2330,8 @@ static int hclge_init_msi(struct hclge_dev *hdev)
int vectors;
int i;
- vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+ vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
+ hdev->num_msi,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (vectors < 0) {
dev_err(&pdev->dev,
@@ -2182,11 +2341,12 @@ static int hclge_init_msi(struct hclge_dev *hdev)
}
if (vectors < hdev->num_msi)
dev_warn(&hdev->pdev->dev,
- "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
+ "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
hdev->num_msi, vectors);
hdev->num_msi = vectors;
hdev->num_msi_left = vectors;
+
hdev->base_msi_vector = pdev->irq;
hdev->roce_base_vector = hdev->base_msi_vector +
hdev->roce_base_msix_offset;
@@ -2455,30 +2615,21 @@ static int hclge_mac_init(struct hclge_dev *hdev)
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
hdev->hw.mac.duplex);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Config mac speed dup fail ret=%d\n", ret);
+ if (ret)
return ret;
- }
if (hdev->hw.mac.support_autoneg) {
ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Config mac autoneg fail ret=%d\n", ret);
+ if (ret)
return ret;
- }
}
mac->link = 0;
if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Fec mode init fail, ret = %d\n", ret);
+ if (ret)
return ret;
- }
}
ret = hclge_set_mac_mtu(hdev, hdev->mps);
@@ -2487,6 +2638,10 @@ static int hclge_mac_init(struct hclge_dev *hdev)
return ret;
}
+ ret = hclge_set_default_loopback(hdev);
+ if (ret)
+ return ret;
+
ret = hclge_buffer_alloc(hdev);
if (ret)
dev_err(&hdev->pdev->dev,
@@ -2497,24 +2652,27 @@ static int hclge_mac_init(struct hclge_dev *hdev)
static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
- if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
- schedule_work(&hdev->mbx_service_task);
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
+ hclge_wq, &hdev->service_task, 0);
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
- schedule_work(&hdev->rst_service_task);
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
+ hclge_wq, &hdev->service_task, 0);
}
-static void hclge_task_schedule(struct hclge_dev *hdev)
+void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
{
- if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
- !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
- !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
- (void)schedule_work(&hdev->service_task);
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
+ hclge_wq, &hdev->service_task,
+ delay_time);
}
static int hclge_get_mac_link_status(struct hclge_dev *hdev)
@@ -2573,6 +2731,10 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
if (!client)
return;
+
+ if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
+ return;
+
state = hclge_get_mac_phy_link(hdev);
if (state != hdev->hw.mac.link) {
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
@@ -2586,6 +2748,8 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
}
hdev->hw.mac.link = state;
}
+
+ clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
}
static void hclge_update_port_capability(struct hclge_mac *mac)
@@ -2602,7 +2766,7 @@ static void hclge_update_port_capability(struct hclge_mac *mac)
else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
mac->module_type = HNAE3_MODULE_TYPE_TP;
- if (mac->support_autoneg == true) {
+ if (mac->support_autoneg) {
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
linkmode_copy(mac->advertising, mac->supported);
} else {
@@ -2656,6 +2820,12 @@ static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
return ret;
}
+ /* In some case, mac speed get from IMP may be 0, it shouldn't be
+ * set to mac->speed.
+ */
+ if (!le32_to_cpu(resp->speed))
+ return 0;
+
mac->speed = le32_to_cpu(resp->speed);
/* if resp->speed_ability is 0, it means it's an old version
* firmware, do not update these params
@@ -2729,23 +2899,63 @@ static int hclge_get_status(struct hnae3_handle *handle)
return hdev->hw.mac.link;
}
-static void hclge_service_timer(struct timer_list *t)
+static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
{
- struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
+ if (!pci_num_vf(hdev->pdev)) {
+ dev_err(&hdev->pdev->dev,
+ "SRIOV is disabled, can not get vport(%d) info.\n", vf);
+ return NULL;
+ }
- mod_timer(&hdev->service_timer, jiffies + HZ);
- hdev->hw_stats.stats_timer++;
- hdev->fd_arfs_expire_timer++;
- hclge_task_schedule(hdev);
+ if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
+ dev_err(&hdev->pdev->dev,
+ "vf id(%d) is out of range(0 <= vfid < %d)\n",
+ vf, pci_num_vf(hdev->pdev));
+ return NULL;
+ }
+
+ /* VF start from 1 in vport */
+ vf += HCLGE_VF_VPORT_START_NUM;
+ return &hdev->vport[vf];
}
-static void hclge_service_complete(struct hclge_dev *hdev)
+static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
+ struct ifla_vf_info *ivf)
{
- WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
- /* Flush memory before next watchdog */
- smp_mb__before_atomic();
- clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
+ ivf->vf = vf;
+ ivf->linkstate = vport->vf_info.link_state;
+ ivf->spoofchk = vport->vf_info.spoofchk;
+ ivf->trusted = vport->vf_info.trusted;
+ ivf->min_tx_rate = 0;
+ ivf->max_tx_rate = vport->vf_info.max_tx_rate;
+ ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
+ ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
+ ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
+ ether_addr_copy(ivf->mac, vport->vf_info.mac);
+
+ return 0;
+}
+
+static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
+ int link_state)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ vport->vf_info.link_state = link_state;
+
+ return 0;
}
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
@@ -2763,9 +2973,9 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
* defer the processing of the mailbox events. Since, we would have not
* cleared RX CMDQ event this time we would receive again another
* interrupt from H/W just for the mailbox.
+ *
+ * check for vector0 reset event sources
*/
-
- /* check for vector0 reset event sources */
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
@@ -2786,8 +2996,6 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
/* check for vector0 msix event source */
if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
- dev_info(&hdev->pdev->dev, "received event 0x%x\n",
- msix_src_reg);
*clearval = msix_src_reg;
return HCLGE_VECTOR0_EVENT_ERR;
}
@@ -2882,10 +3090,15 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
break;
}
- /* clear the source of interrupt if it is not cause by reset */
+ hclge_clear_event_cause(hdev, event_cause, clearval);
+
+ /* Enable interrupt if it is not cause by reset. And when
+ * clearval equal to 0, it means interrupt status may be
+ * cleared by hardware before driver reads status register.
+ * For this case, vector0 interrupt also should be enabled.
+ */
if (!clearval ||
event_cause == HCLGE_VECTOR0_EVENT_MBX) {
- hclge_clear_event_cause(hdev, event_cause, clearval);
hclge_enable_vector(&hdev->misc_vector, true);
}
@@ -2918,6 +3131,36 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
hdev->num_msi_used += 1;
}
+static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
+ affinity_notify);
+
+ cpumask_copy(&hdev->affinity_mask, mask);
+}
+
+static void hclge_irq_affinity_release(struct kref *ref)
+{
+}
+
+static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
+{
+ irq_set_affinity_hint(hdev->misc_vector.vector_irq,
+ &hdev->affinity_mask);
+
+ hdev->affinity_notify.notify = hclge_irq_affinity_notify;
+ hdev->affinity_notify.release = hclge_irq_affinity_release;
+ irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
+ &hdev->affinity_notify);
+}
+
+static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
+{
+ irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
+ irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
+}
+
static int hclge_misc_irq_init(struct hclge_dev *hdev)
{
int ret;
@@ -2925,8 +3168,10 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
hclge_get_misc_vector(hdev);
/* this would be explicitly freed in the end */
+ snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
+ HCLGE_NAME, pci_name(hdev->pdev));
ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
- 0, "hclge_misc", hdev);
+ 0, hdev->misc_vector.name, hdev);
if (ret) {
hclge_free_vector(hdev, 0);
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
@@ -3000,7 +3245,8 @@ static int hclge_notify_roce_client(struct hclge_dev *hdev,
static int hclge_reset_wait(struct hclge_dev *hdev)
{
#define HCLGE_RESET_WATI_MS 100
-#define HCLGE_RESET_WAIT_CNT 200
+#define HCLGE_RESET_WAIT_CNT 350
+
u32 val, reg, reg_bit;
u32 cnt = 0;
@@ -3017,8 +3263,6 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
reg = HCLGE_FUN_RST_ING;
reg_bit = HCLGE_FUN_RST_ING_B;
break;
- case HNAE3_FLR_RESET:
- break;
default:
dev_err(&hdev->pdev->dev,
"Wait for unsupported reset type: %d\n",
@@ -3026,20 +3270,6 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
return -EINVAL;
}
- if (hdev->reset_type == HNAE3_FLR_RESET) {
- while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
- cnt++ < HCLGE_RESET_WAIT_CNT)
- msleep(HCLGE_RESET_WATI_MS);
-
- if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
- dev_err(&hdev->pdev->dev,
- "flr wait timeout: %d\n", cnt);
- return -EBUSY;
- }
-
- return 0;
- }
-
val = hclge_read_dev(&hdev->hw, reg);
while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
msleep(HCLGE_RESET_WATI_MS);
@@ -3083,7 +3313,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
if (ret) {
dev_err(&hdev->pdev->dev,
- "set vf(%d) rst failed %d!\n",
+ "set vf(%u) rst failed %d!\n",
vport->vport_id, ret);
return ret;
}
@@ -3098,13 +3328,92 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
ret = hclge_inform_reset_assert_to_vf(vport);
if (ret)
dev_warn(&hdev->pdev->dev,
- "inform reset to vf(%d) failed %d!\n",
+ "inform reset to vf(%u) failed %d!\n",
vport->vport_id, ret);
}
return 0;
}
+static void hclge_mailbox_service_task(struct hclge_dev *hdev)
+{
+ if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
+ test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
+ test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
+ return;
+
+ hclge_mbx_handler(hdev);
+
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
+{
+ struct hclge_pf_rst_sync_cmd *req;
+ struct hclge_desc desc;
+ int cnt = 0;
+ int ret;
+
+ req = (struct hclge_pf_rst_sync_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
+
+ do {
+ /* vf need to down netdev by mbx during PF or FLR reset */
+ hclge_mailbox_service_task(hdev);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ /* for compatible with old firmware, wait
+ * 100 ms for VF to stop IO
+ */
+ if (ret == -EOPNOTSUPP) {
+ msleep(HCLGE_RESET_SYNC_TIME);
+ return;
+ } else if (ret) {
+ dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
+ ret);
+ return;
+ } else if (req->all_vf_ready) {
+ return;
+ }
+ msleep(HCLGE_PF_RESET_SYNC_TIME);
+ hclge_cmd_reuse_desc(&desc, true);
+ } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
+
+ dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
+}
+
+void hclge_report_hw_error(struct hclge_dev *hdev,
+ enum hnae3_hw_error_type type)
+{
+ struct hnae3_client *client = hdev->nic_client;
+ u16 i;
+
+ if (!client || !client->ops->process_hw_error ||
+ !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
+ return;
+
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
+ client->ops->process_hw_error(&hdev->vport[i].nic, type);
+}
+
+static void hclge_handle_imp_error(struct hclge_dev *hdev)
+{
+ u32 reg_val;
+
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+ if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
+ hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
+ reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
+ }
+
+ if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
+ hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
+ reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
+ }
+}
+
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
{
struct hclge_desc desc;
@@ -3150,12 +3459,6 @@ static void hclge_do_reset(struct hclge_dev *hdev)
set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
hclge_reset_task_schedule(hdev);
break;
- case HNAE3_FLR_RESET:
- dev_info(&pdev->dev, "FLR requested\n");
- /* schedule again to check later */
- set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
- hclge_reset_task_schedule(hdev);
- break;
default:
dev_warn(&pdev->dev,
"Unsupported reset type: %d\n", hdev->reset_type);
@@ -3171,10 +3474,15 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
/* first, resolve any unknown reset type to the known type(s) */
if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
+ u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
+ HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
/* we will intentionally ignore any errors from this function
* as we will end up in *some* reset request in any case
*/
- hclge_handle_hw_msix_error(hdev, addr);
+ if (hclge_handle_hw_msix_error(hdev, addr))
+ dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
+ msix_sts_reg);
+
clear_bit(HNAE3_UNKNOWN_RESET, addr);
/* We defered the clearing of the error event which caused
* interrupt since it was not posssible to do that in
@@ -3229,40 +3537,53 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
if (!clearval)
return;
- hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
+ /* For revision 0x20, the reset interrupt source
+ * can only be cleared after hardware reset done
+ */
+ if (hdev->pdev->revision == 0x20)
+ hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
+ clearval);
+
hclge_enable_vector(&hdev->misc_vector, true);
}
-static int hclge_reset_prepare_down(struct hclge_dev *hdev)
+static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
{
- int ret = 0;
+ u32 reg_val;
- switch (hdev->reset_type) {
- case HNAE3_FUNC_RESET:
- /* fall through */
- case HNAE3_FLR_RESET:
- ret = hclge_set_all_vf_rst(hdev, true);
- break;
- default:
- break;
- }
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
+ if (enable)
+ reg_val |= HCLGE_NIC_SW_RST_RDY;
+ else
+ reg_val &= ~HCLGE_NIC_SW_RST_RDY;
- return ret;
+ hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
}
-static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
+static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
{
-#define HCLGE_RESET_SYNC_TIME 100
+ int ret;
+
+ ret = hclge_set_all_vf_rst(hdev, true);
+ if (ret)
+ return ret;
+ hclge_func_reset_sync_vf(hdev);
+
+ return 0;
+}
+
+static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
+{
u32 reg_val;
int ret = 0;
switch (hdev->reset_type) {
case HNAE3_FUNC_RESET:
- /* There is no mechanism for PF to know if VF has stopped IO
- * for now, just wait 100 ms for VF to stop IO
- */
- msleep(HCLGE_RESET_SYNC_TIME);
+ ret = hclge_func_reset_notify_vf(hdev);
+ if (ret)
+ return ret;
+
ret = hclge_func_reset_cmd(hdev, 0);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -3279,15 +3600,12 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
hdev->rst_stats.pf_rst_cnt++;
break;
case HNAE3_FLR_RESET:
- /* There is no mechanism for PF to know if VF has stopped IO
- * for now, just wait 100 ms for VF to stop IO
- */
- msleep(HCLGE_RESET_SYNC_TIME);
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
- set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
- hdev->rst_stats.flr_rst_cnt++;
+ ret = hclge_func_reset_notify_vf(hdev);
+ if (ret)
+ return ret;
break;
case HNAE3_IMP_RESET:
+ hclge_handle_imp_error(hdev);
reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
@@ -3298,14 +3616,13 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
/* inform hardware that preparatory work is done */
msleep(HCLGE_RESET_SYNC_TIME);
- hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
- HCLGE_NIC_CMQ_ENABLE);
+ hclge_reset_handshake(hdev, true);
dev_info(&hdev->pdev->dev, "prepare wait ok\n");
return ret;
}
-static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
+static bool hclge_reset_err_handle(struct hclge_dev *hdev)
{
#define MAX_RESET_FAIL_CNT 5
@@ -3313,36 +3630,63 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
hdev->reset_pending);
return true;
- } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
- (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
- BIT(HCLGE_IMP_RESET_BIT))) {
+ } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
+ HCLGE_RESET_INT_M) {
dev_info(&hdev->pdev->dev,
- "reset failed because IMP Reset is pending\n");
+ "reset failed because new reset interrupt\n");
hclge_clear_reset_cause(hdev);
return false;
- } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
- hdev->reset_fail_cnt++;
- if (is_timeout) {
- set_bit(hdev->reset_type, &hdev->reset_pending);
- dev_info(&hdev->pdev->dev,
- "re-schedule to wait for hw reset done\n");
- return true;
- }
-
- dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
- hclge_clear_reset_cause(hdev);
- set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
- mod_timer(&hdev->reset_timer,
- jiffies + HCLGE_RESET_INTERVAL);
-
- return false;
+ } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
+ hdev->rst_stats.reset_fail_cnt++;
+ set_bit(hdev->reset_type, &hdev->reset_pending);
+ dev_info(&hdev->pdev->dev,
+ "re-schedule reset task(%u)\n",
+ hdev->rst_stats.reset_fail_cnt);
+ return true;
}
hclge_clear_reset_cause(hdev);
+
+ /* recover the handshake status when reset fail */
+ hclge_reset_handshake(hdev, true);
+
dev_err(&hdev->pdev->dev, "Reset fail!\n");
+
+ hclge_dbg_dump_rst_info(hdev);
+
+ set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
+
return false;
}
+static int hclge_set_rst_done(struct hclge_dev *hdev)
+{
+ struct hclge_pf_rst_done_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_pf_rst_done_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
+ req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ /* To be compatible with the old firmware, which does not support
+ * command HCLGE_OPC_PF_RST_DONE, just print a warning and
+ * return success
+ */
+ if (ret == -EOPNOTSUPP) {
+ dev_warn(&hdev->pdev->dev,
+ "current firmware does not support command(0x%x)!\n",
+ HCLGE_OPC_PF_RST_DONE);
+ return 0;
+ } else if (ret) {
+ dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
+ ret);
+ }
+
+ return ret;
+}
+
static int hclge_reset_prepare_up(struct hclge_dev *hdev)
{
int ret = 0;
@@ -3353,10 +3697,18 @@ static int hclge_reset_prepare_up(struct hclge_dev *hdev)
case HNAE3_FLR_RESET:
ret = hclge_set_all_vf_rst(hdev, false);
break;
+ case HNAE3_GLOBAL_RESET:
+ /* fall through */
+ case HNAE3_IMP_RESET:
+ ret = hclge_set_rst_done(hdev);
+ break;
default:
break;
}
+ /* clear up the handshake status after re-initialize done */
+ hclge_reset_handshake(hdev, false);
+
return ret;
}
@@ -3379,10 +3731,9 @@ static int hclge_reset_stack(struct hclge_dev *hdev)
return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
}
-static void hclge_reset(struct hclge_dev *hdev)
+static int hclge_reset_prepare(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- bool is_timeout = false;
int ret;
/* Initialize ae_dev reset status as well, in case enet layer wants to
@@ -3393,79 +3744,93 @@ static void hclge_reset(struct hclge_dev *hdev)
/* perform reset of the stack & ae device for a client */
ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
if (ret)
- goto err_reset;
-
- ret = hclge_reset_prepare_down(hdev);
- if (ret)
- goto err_reset;
+ return ret;
rtnl_lock();
ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
- if (ret)
- goto err_reset_lock;
-
rtnl_unlock();
-
- ret = hclge_reset_prepare_wait(hdev);
if (ret)
- goto err_reset;
+ return ret;
- if (hclge_reset_wait(hdev)) {
- is_timeout = true;
- goto err_reset;
- }
+ return hclge_reset_prepare_wait(hdev);
+}
+
+static int hclge_reset_rebuild(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ enum hnae3_reset_type reset_level;
+ int ret;
hdev->rst_stats.hw_reset_done_cnt++;
ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
if (ret)
- goto err_reset;
+ return ret;
rtnl_lock();
-
ret = hclge_reset_stack(hdev);
+ rtnl_unlock();
if (ret)
- goto err_reset_lock;
+ return ret;
hclge_clear_reset_cause(hdev);
ret = hclge_reset_prepare_up(hdev);
if (ret)
- goto err_reset_lock;
+ return ret;
- rtnl_unlock();
ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
* times
*/
- if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
- goto err_reset;
+ if (ret &&
+ hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
+ return ret;
rtnl_lock();
-
ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
- if (ret)
- goto err_reset_lock;
-
rtnl_unlock();
+ if (ret)
+ return ret;
ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
if (ret)
- goto err_reset;
+ return ret;
hdev->last_reset_time = jiffies;
- hdev->reset_fail_cnt = 0;
+ hdev->rst_stats.reset_fail_cnt = 0;
hdev->rst_stats.reset_done_cnt++;
ae_dev->reset_type = HNAE3_NONE_RESET;
- del_timer(&hdev->reset_timer);
+ clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
+
+ /* if default_reset_request has a higher level reset request,
+ * it should be handled as soon as possible. since some errors
+ * need this kind of reset to fix.
+ */
+ reset_level = hclge_get_reset_level(ae_dev,
+ &hdev->default_reset_request);
+ if (reset_level != HNAE3_NONE_RESET)
+ set_bit(reset_level, &hdev->reset_request);
+
+ return 0;
+}
+
+static void hclge_reset(struct hclge_dev *hdev)
+{
+ if (hclge_reset_prepare(hdev))
+ goto err_reset;
+
+ if (hclge_reset_wait(hdev))
+ goto err_reset;
+
+ if (hclge_reset_rebuild(hdev))
+ goto err_reset;
return;
-err_reset_lock:
- rtnl_unlock();
err_reset:
- if (hclge_reset_err_handle(hdev, is_timeout))
+ if (hclge_reset_err_handle(hdev))
hclge_reset_task_schedule(hdev);
}
@@ -3493,16 +3858,18 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
handle = &hdev->vport[0].nic;
if (time_before(jiffies, (hdev->last_reset_time +
- HCLGE_RESET_INTERVAL)))
+ HCLGE_RESET_INTERVAL))) {
+ mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
return;
- else if (hdev->default_reset_request)
+ } else if (hdev->default_reset_request) {
hdev->reset_level =
hclge_get_reset_level(ae_dev,
&hdev->default_reset_request);
- else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
+ } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
hdev->reset_level = HNAE3_FUNC_RESET;
+ }
- dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
+ dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
hdev->reset_level);
/* request reset & schedule reset task */
@@ -3525,6 +3892,12 @@ static void hclge_reset_timer(struct timer_list *t)
{
struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
+ /* if default_reset_request has no value, it means that this reset
+ * request has already be handled, so just return here
+ */
+ if (!hdev->default_reset_request)
+ return;
+
dev_info(&hdev->pdev->dev,
"triggering reset in reset timer\n");
hclge_reset_event(hdev->pdev, NULL);
@@ -3556,34 +3929,18 @@ static void hclge_reset_subtask(struct hclge_dev *hdev)
hdev->reset_type = HNAE3_NONE_RESET;
}
-static void hclge_reset_service_task(struct work_struct *work)
+static void hclge_reset_service_task(struct hclge_dev *hdev)
{
- struct hclge_dev *hdev =
- container_of(work, struct hclge_dev, rst_service_task);
-
- if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
return;
- clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
+ down(&hdev->reset_sem);
+ set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
hclge_reset_subtask(hdev);
clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
-}
-
-static void hclge_mailbox_service_task(struct work_struct *work)
-{
- struct hclge_dev *hdev =
- container_of(work, struct hclge_dev, mbx_service_task);
-
- if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
- return;
-
- clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
-
- hclge_mbx_handler(hdev);
-
- clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
}
static void hclge_update_vport_alive(struct hclge_dev *hdev)
@@ -3603,25 +3960,62 @@ static void hclge_update_vport_alive(struct hclge_dev *hdev)
}
}
-static void hclge_service_task(struct work_struct *work)
+static void hclge_periodic_service_task(struct hclge_dev *hdev)
{
- struct hclge_dev *hdev =
- container_of(work, struct hclge_dev, service_task);
+ unsigned long delta = round_jiffies_relative(HZ);
- if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
- hclge_update_stats_for_all(hdev);
- hdev->hw_stats.stats_timer = 0;
+ /* Always handle the link updating to make sure link state is
+ * updated when it is triggered by mbx.
+ */
+ hclge_update_link_status(hdev);
+
+ if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
+ delta = jiffies - hdev->last_serv_processed;
+
+ if (delta < round_jiffies_relative(HZ)) {
+ delta = round_jiffies_relative(HZ) - delta;
+ goto out;
+ }
}
- hclge_update_port_info(hdev);
- hclge_update_link_status(hdev);
+ hdev->serv_processed_cnt++;
hclge_update_vport_alive(hdev);
+
+ if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
+ hdev->last_serv_processed = jiffies;
+ goto out;
+ }
+
+ if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
+ hclge_update_stats_for_all(hdev);
+
+ hclge_update_port_info(hdev);
hclge_sync_vlan_filter(hdev);
- if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
+
+ if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
hclge_rfs_filter_expire(hdev);
- hdev->fd_arfs_expire_timer = 0;
- }
- hclge_service_complete(hdev);
+
+ hdev->last_serv_processed = jiffies;
+
+out:
+ hclge_task_schedule(hdev, delta);
+}
+
+static void hclge_service_task(struct work_struct *work)
+{
+ struct hclge_dev *hdev =
+ container_of(work, struct hclge_dev, service_task.work);
+
+ hclge_reset_service_task(hdev);
+ hclge_mailbox_service_task(hdev);
+ hclge_periodic_service_task(hdev);
+
+ /* Handle reset and mbx again in case periodical task delays the
+ * handling by calling hclge_task_schedule() in
+ * hclge_periodic_service_task().
+ */
+ hclge_reset_service_task(hdev);
+ hclge_mailbox_service_task(hdev);
}
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
@@ -3644,6 +4038,7 @@ static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
int alloc = 0;
int i, j;
+ vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
vector_num = min(hdev->num_msi_left, vector_num);
for (j = 0; j < vector_num; j++) {
@@ -3691,7 +4086,7 @@ static int hclge_put_vector(struct hnae3_handle *handle, int vector)
vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&hdev->pdev->dev,
- "Get vector index fail. vector_id =%d\n", vector_id);
+ "Get vector index fail. vector = %d\n", vector);
return vector_id;
}
@@ -4121,7 +4516,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev)
*/
if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
dev_err(&hdev->pdev->dev,
- "Configure rss tc size failed, invalid TC_SIZE = %d\n",
+ "Configure rss tc size failed, invalid TC_SIZE = %u\n",
rss_size);
return -EINVAL;
}
@@ -4197,8 +4592,8 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
struct hclge_dev *hdev = vport->back;
struct hnae3_ring_chain_node *node;
struct hclge_desc desc;
- struct hclge_ctrl_vector_chain_cmd *req
- = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+ struct hclge_ctrl_vector_chain_cmd *req =
+ (struct hclge_ctrl_vector_chain_cmd *)desc.data;
enum hclge_cmd_status status;
enum hclge_opcode_type op;
u16 tqp_type_and_id;
@@ -4266,7 +4661,7 @@ static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&hdev->pdev->dev,
- "Get vector index fail. vector_id =%d\n", vector_id);
+ "failed to get vector index. vector=%d\n", vector);
return vector_id;
}
@@ -4299,8 +4694,8 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
return ret;
}
-int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
- struct hclge_promisc_param *param)
+static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
+ struct hclge_promisc_param *param)
{
struct hclge_promisc_cfg_cmd *req;
struct hclge_desc desc;
@@ -4327,8 +4722,9 @@ int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
return ret;
}
-void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
- bool en_mc, bool en_bc, int vport_id)
+static void hclge_promisc_param_init(struct hclge_promisc_param *param,
+ bool en_uc, bool en_mc, bool en_bc,
+ int vport_id)
{
if (!param)
return;
@@ -4343,12 +4739,21 @@ void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
param->vf_id = vport_id;
}
+int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
+ bool en_mc_pmc, bool en_bc_pmc)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_promisc_param param;
+
+ hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
+ vport->vport_id);
+ return hclge_cmd_set_promisc_mode(hdev, &param);
+}
+
static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
bool en_mc_pmc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- struct hclge_promisc_param param;
bool en_bc_pmc = true;
/* For revision 0x20, if broadcast promisc enabled, vlan filter is
@@ -4358,9 +4763,8 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
if (handle->pdev->revision == 0x20)
en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
- hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
- vport->vport_id);
- return hclge_cmd_set_promisc_mode(hdev, &param);
+ return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
+ en_bc_pmc);
}
static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
@@ -4462,7 +4866,7 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
break;
default:
dev_err(&hdev->pdev->dev,
- "Unsupported flow director mode %d\n",
+ "Unsupported flow director mode %u\n",
hdev->fd_cfg.fd_mode);
return -EOPNOTSUPP;
}
@@ -4792,7 +5196,7 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
true);
if (ret) {
dev_err(&hdev->pdev->dev,
- "fd key_y config fail, loc=%d, ret=%d\n",
+ "fd key_y config fail, loc=%u, ret=%d\n",
rule->queue_id, ret);
return ret;
}
@@ -4801,7 +5205,7 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
true);
if (ret)
dev_err(&hdev->pdev->dev,
- "fd key_x config fail, loc=%d, ret=%d\n",
+ "fd key_x config fail, loc=%u, ret=%d\n",
rule->queue_id, ret);
return ret;
}
@@ -5050,7 +5454,7 @@ static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
}
} else if (!is_add) {
dev_err(&hdev->pdev->dev,
- "delete fail, rule %d is inexistent\n",
+ "delete fail, rule %u is inexistent\n",
location);
return -EINVAL;
}
@@ -5290,7 +5694,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
if (vf > hdev->num_req_vfs) {
dev_err(&hdev->pdev->dev,
- "Error: vf id (%d) > max vf num (%d)\n",
+ "Error: vf id (%u) > max vf num (%u)\n",
vf, hdev->num_req_vfs);
return -EINVAL;
}
@@ -5300,7 +5704,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
if (ring >= tqps) {
dev_err(&hdev->pdev->dev,
- "Error: queue id (%d) > max tqp num (%d)\n",
+ "Error: queue id (%u) > max tqp num (%u)\n",
ring, tqps - 1);
return -EINVAL;
}
@@ -5359,7 +5763,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
if (!hclge_fd_rule_exist(hdev, fs->location)) {
dev_err(&hdev->pdev->dev,
- "Delete fail, rule %d is inexistent\n", fs->location);
+ "Delete fail, rule %u is inexistent\n", fs->location);
return -ENOENT;
}
@@ -5436,7 +5840,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
if (ret) {
dev_warn(&hdev->pdev->dev,
- "Restore rule %d failed, remove it\n",
+ "Restore rule %u failed, remove it\n",
rule->location);
clear_bit(rule->location, hdev->fd_bmap);
hlist_del(&rule->rule_node);
@@ -5709,6 +6113,9 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
struct hclge_fd_rule_tuples *tuples)
{
+#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
+#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
+
tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
tuples->ip_proto = fkeys->basic.ip_proto;
tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
@@ -5717,12 +6124,12 @@ static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
} else {
- memcpy(tuples->src_ip,
- fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
- sizeof(tuples->src_ip));
- memcpy(tuples->dst_ip,
- fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
- sizeof(tuples->dst_ip));
+ int i;
+
+ for (i = 0; i < IPV6_SIZE; i++) {
+ tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
+ tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
+ }
}
}
@@ -5808,7 +6215,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
return -ENOSPC;
}
- rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
if (!rule) {
spin_unlock_bh(&hdev->fd_rule_lock);
@@ -5921,7 +6328,7 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
bool clear;
hdev->fd_en = enable;
- clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
+ clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
if (!enable)
hclge_del_all_fd_entries(handle, clear);
else
@@ -5959,6 +6366,101 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
"mac enable fail, ret =%d.\n", ret);
}
+static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
+ u8 switch_param, u8 param_mask)
+{
+ struct hclge_mac_vlan_switch_cmd *req;
+ struct hclge_desc desc;
+ u32 func_id;
+ int ret;
+
+ func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
+ req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
+
+ /* read current config parameter */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
+ true);
+ req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
+ req->func_id = cpu_to_le32(func_id);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "read mac vlan switch parameter fail, ret = %d\n", ret);
+ return ret;
+ }
+
+ /* modify and write new config parameter */
+ hclge_cmd_reuse_desc(&desc, false);
+ req->switch_param = (req->switch_param & param_mask) | switch_param;
+ req->param_mask = param_mask;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "set mac vlan switch parameter fail, ret = %d\n", ret);
+ return ret;
+}
+
+static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
+ int link_ret)
+{
+#define HCLGE_PHY_LINK_STATUS_NUM 200
+
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ int i = 0;
+ int ret;
+
+ do {
+ ret = phy_read_status(phydev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "phy update link status fail, ret = %d\n", ret);
+ return;
+ }
+
+ if (phydev->link == link_ret)
+ break;
+
+ msleep(HCLGE_LINK_STATUS_MS);
+ } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
+}
+
+static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
+{
+#define HCLGE_MAC_LINK_STATUS_NUM 100
+
+ int i = 0;
+ int ret;
+
+ do {
+ ret = hclge_get_mac_link_status(hdev);
+ if (ret < 0)
+ return ret;
+ else if (ret == link_ret)
+ return 0;
+
+ msleep(HCLGE_LINK_STATUS_MS);
+ } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
+ return -EBUSY;
+}
+
+static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
+ bool is_phy)
+{
+#define HCLGE_LINK_STATUS_DOWN 0
+#define HCLGE_LINK_STATUS_UP 1
+
+ int link_ret;
+
+ link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
+
+ if (is_phy)
+ hclge_phy_link_status_wait(hdev, link_ret);
+
+ return hclge_mac_link_status_wait(hdev, link_ret);
+}
+
static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
{
struct hclge_config_mac_mode_cmd *req;
@@ -5995,20 +6497,14 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
return ret;
}
-static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
+static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
enum hnae3_loop loop_mode)
{
#define HCLGE_SERDES_RETRY_MS 10
#define HCLGE_SERDES_RETRY_NUM 100
-#define HCLGE_MAC_LINK_STATUS_MS 10
-#define HCLGE_MAC_LINK_STATUS_NUM 100
-#define HCLGE_MAC_LINK_STATUS_DOWN 0
-#define HCLGE_MAC_LINK_STATUS_UP 1
-
struct hclge_serdes_lb_cmd *req;
struct hclge_desc desc;
- int mac_link_ret = 0;
int ret, i = 0;
u8 loop_mode_b;
@@ -6031,10 +6527,8 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
if (en) {
req->enable = loop_mode_b;
req->mask = loop_mode_b;
- mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
} else {
req->mask = loop_mode_b;
- mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -6064,21 +6558,84 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
return -EIO;
}
+ return ret;
+}
+
+static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
+ enum hnae3_loop loop_mode)
+{
+ int ret;
+
+ ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
+ if (ret)
+ return ret;
hclge_cfg_mac_mode(hdev, en);
- i = 0;
- do {
- /* serdes Internal loopback, independent of the network cable.*/
- msleep(HCLGE_MAC_LINK_STATUS_MS);
- ret = hclge_get_mac_link_status(hdev);
- if (ret == mac_link_ret)
- return 0;
- } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
+ ret = hclge_mac_phy_link_status_wait(hdev, en, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "serdes loopback config mac mode timeout\n");
- dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
+ return ret;
+}
- return -EBUSY;
+static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
+ struct phy_device *phydev)
+{
+ int ret;
+
+ if (!phydev->suspended) {
+ ret = phy_suspend(phydev);
+ if (ret)
+ return ret;
+ }
+
+ ret = phy_resume(phydev);
+ if (ret)
+ return ret;
+
+ return phy_loopback(phydev, true);
+}
+
+static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
+ struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_loopback(phydev, false);
+ if (ret)
+ return ret;
+
+ return phy_suspend(phydev);
+}
+
+static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ int ret;
+
+ if (!phydev)
+ return -ENOTSUPP;
+
+ if (en)
+ ret = hclge_enable_phy_loopback(hdev, phydev);
+ else
+ ret = hclge_disable_phy_loopback(hdev, phydev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set phy loopback fail, ret = %d\n", ret);
+ return ret;
+ }
+
+ hclge_cfg_mac_mode(hdev, en);
+
+ ret = hclge_mac_phy_link_status_wait(hdev, en, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "phy loopback config mac mode timeout\n");
+
+ return ret;
}
static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
@@ -6110,6 +6667,20 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
int i, ret;
+ /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
+ * default, SSU loopback is enabled, so if the SMAC and the DMAC are
+ * the same, the packets are looped back in the SSU. If SSU loopback
+ * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
+ */
+ if (hdev->pdev->revision >= 0x21) {
+ u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
+
+ ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
+ HCLGE_SWITCH_ALW_LPBK_MASK);
+ if (ret)
+ return ret;
+ }
+
switch (loop_mode) {
case HNAE3_LOOP_APP:
ret = hclge_set_app_loopback(hdev, en);
@@ -6118,6 +6689,9 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
case HNAE3_LOOP_PARALLEL_SERDES:
ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
break;
+ case HNAE3_LOOP_PHY:
+ ret = hclge_set_phy_loopback(hdev, en);
+ break;
default:
ret = -ENOTSUPP;
dev_err(&hdev->pdev->dev,
@@ -6138,6 +6712,22 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
return 0;
}
+static int hclge_set_default_loopback(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_set_app_loopback(hdev, false);
+ if (ret)
+ return ret;
+
+ ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
+ if (ret)
+ return ret;
+
+ return hclge_cfg_serdes_loopback(hdev, false,
+ HNAE3_LOOP_PARALLEL_SERDES);
+}
+
static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -6154,17 +6744,33 @@ static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
}
}
+static void hclge_flush_link_update(struct hclge_dev *hdev)
+{
+#define HCLGE_FLUSH_LINK_TIMEOUT 100000
+
+ unsigned long last = hdev->serv_processed_cnt;
+ int i = 0;
+
+ while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
+ i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
+ last == hdev->serv_processed_cnt)
+ usleep_range(1, 1);
+}
+
static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
if (enable) {
- mod_timer(&hdev->service_timer, jiffies + HZ);
+ hclge_task_schedule(hdev, round_jiffies_relative(HZ));
} else {
- del_timer_sync(&hdev->service_timer);
- cancel_work_sync(&hdev->service_task);
- clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
+ /* Set the DOWN flag here to disable link updating */
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+
+ /* flush memory to make sure DOWN is seen by service task */
+ smp_mb__before_atomic();
+ hclge_flush_link_update(hdev);
}
}
@@ -6202,12 +6808,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
hdev->reset_type != HNAE3_FUNC_RESET) {
hclge_mac_stop_phy(hdev);
+ hclge_update_link_status(hdev);
return;
}
for (i = 0; i < handle->kinfo.num_tqps; i++)
hclge_reset_tqp(handle, i);
+ hclge_config_mac_tnl_int(hdev, false);
+
/* Mac disable */
hclge_cfg_mac_mode(hdev, false);
@@ -6249,63 +6858,63 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
enum hclge_mac_vlan_tbl_opcode op)
{
struct hclge_dev *hdev = vport->back;
- int return_status = -EIO;
if (cmdq_resp) {
dev_err(&hdev->pdev->dev,
- "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
+ "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
cmdq_resp);
return -EIO;
}
if (op == HCLGE_MAC_VLAN_ADD) {
if ((!resp_code) || (resp_code == 1)) {
- return_status = 0;
+ return 0;
} else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
- return_status = -ENOSPC;
dev_err(&hdev->pdev->dev,
"add mac addr failed for uc_overflow.\n");
+ return -ENOSPC;
} else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
- return_status = -ENOSPC;
dev_err(&hdev->pdev->dev,
"add mac addr failed for mc_overflow.\n");
- } else {
- dev_err(&hdev->pdev->dev,
- "add mac addr failed for undefined, code=%d.\n",
- resp_code);
+ return -ENOSPC;
}
+
+ dev_err(&hdev->pdev->dev,
+ "add mac addr failed for undefined, code=%u.\n",
+ resp_code);
+ return -EIO;
} else if (op == HCLGE_MAC_VLAN_REMOVE) {
if (!resp_code) {
- return_status = 0;
+ return 0;
} else if (resp_code == 1) {
- return_status = -ENOENT;
dev_dbg(&hdev->pdev->dev,
"remove mac addr failed for miss.\n");
- } else {
- dev_err(&hdev->pdev->dev,
- "remove mac addr failed for undefined, code=%d.\n",
- resp_code);
+ return -ENOENT;
}
+
+ dev_err(&hdev->pdev->dev,
+ "remove mac addr failed for undefined, code=%u.\n",
+ resp_code);
+ return -EIO;
} else if (op == HCLGE_MAC_VLAN_LKUP) {
if (!resp_code) {
- return_status = 0;
+ return 0;
} else if (resp_code == 1) {
- return_status = -ENOENT;
dev_dbg(&hdev->pdev->dev,
"lookup mac addr failed for miss.\n");
- } else {
- dev_err(&hdev->pdev->dev,
- "lookup mac addr failed for undefined, code=%d.\n",
- resp_code);
+ return -ENOENT;
}
- } else {
- return_status = -EINVAL;
+
dev_err(&hdev->pdev->dev,
- "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
- op);
+ "lookup mac addr failed for undefined, code=%u.\n",
+ resp_code);
+ return -EIO;
}
- return return_status;
+ dev_err(&hdev->pdev->dev,
+ "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
+
+ return -EINVAL;
}
static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
@@ -6504,7 +7113,7 @@ static int hclge_init_umv_space(struct hclge_dev *hdev)
if (allocated_size < hdev->wanted_umv_size)
dev_warn(&hdev->pdev->dev,
- "Alloc umv space failed, want %d, get %d\n",
+ "Alloc umv space failed, want %u, get %u\n",
hdev->wanted_umv_size, allocated_size);
mutex_init(&hdev->umv_mutex);
@@ -6672,7 +7281,7 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
/* check if we just hit the duplicate */
if (!ret) {
- dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
+ dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
vport->vport_id, addr);
return 0;
}
@@ -6853,7 +7462,7 @@ void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
list_for_each_entry_safe(mac_cfg, tmp, list, node) {
- if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
if (uc_flag && mac_cfg->hd_tbl_status)
hclge_rm_uc_addr_common(vport, mac_addr);
@@ -6897,7 +7506,6 @@ void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
struct hclge_vport *vport;
int i;
- mutex_lock(&hdev->vport_cfg_mutex);
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
@@ -6910,7 +7518,6 @@ void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
kfree(mac);
}
}
- mutex_unlock(&hdev->vport_cfg_mutex);
}
static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
@@ -6925,7 +7532,7 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
if (cmdq_resp) {
dev_err(&hdev->pdev->dev,
- "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
+ "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
cmdq_resp);
return -EIO;
}
@@ -6947,7 +7554,7 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
break;
default:
dev_err(&hdev->pdev->dev,
- "add mac ethertype failed for undefined, code=%d.\n",
+ "add mac ethertype failed for undefined, code=%u.\n",
resp_code);
return_status = -EIO;
}
@@ -6955,6 +7562,67 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
return return_status;
}
+static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
+ u8 *mac_addr)
+{
+ struct hclge_mac_vlan_tbl_entry_cmd req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ u16 egress_port = 0;
+ int i;
+
+ if (is_zero_ether_addr(mac_addr))
+ return false;
+
+ memset(&req, 0, sizeof(req));
+ hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
+ HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
+ req.egress_port = cpu_to_le16(egress_port);
+ hclge_prepare_mac_addr(&req, mac_addr, false);
+
+ if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
+ return true;
+
+ vf_idx += HCLGE_VF_VPORT_START_NUM;
+ for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
+ if (i != vf_idx &&
+ ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
+ return true;
+
+ return false;
+}
+
+static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
+ u8 *mac_addr)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
+ dev_info(&hdev->pdev->dev,
+ "Specified MAC(=%pM) is same as before, no change committed!\n",
+ mac_addr);
+ return 0;
+ }
+
+ if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
+ dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
+ mac_addr);
+ return -EEXIST;
+ }
+
+ ether_addr_copy(vport->vf_info.mac, mac_addr);
+ dev_info(&hdev->pdev->dev,
+ "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
+ vf, mac_addr);
+
+ return hclge_inform_reset_assert_to_vf(vport);
+}
+
static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
const struct hclge_mac_mgr_tbl_entry_cmd *req)
{
@@ -7019,7 +7687,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
is_broadcast_ether_addr(new_addr) ||
is_multicast_ether_addr(new_addr)) {
dev_err(&hdev->pdev->dev,
- "Change uc mac err! invalid mac:%p.\n",
+ "Change uc mac err! invalid mac:%pM.\n",
new_addr);
return -EINVAL;
}
@@ -7124,10 +7792,10 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
}
static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
- bool is_kill, u16 vlan, u8 qos,
+ bool is_kill, u16 vlan,
__be16 proto)
{
-#define HCLGE_MAX_VF_BYTES 16
+ struct hclge_vport *vport = &hdev->vport[vfid];
struct hclge_vlan_filter_vf_cfg_cmd *req0;
struct hclge_vlan_filter_vf_cfg_cmd *req1;
struct hclge_desc desc[2];
@@ -7136,10 +7804,18 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
int ret;
/* if vf vlan table is full, firmware will close vf vlan filter, it
- * is unable and unnecessary to add new vlan id to vf vlan filter
+ * is unable and unnecessary to add new vlan id to vf vlan filter.
+ * If spoof check is enable, and vf vlan is full, it shouldn't add
+ * new vlan, because tx packets with these vlan id will be dropped.
*/
- if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
+ if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
+ if (vport->vf_info.spoofchk && vlan) {
+ dev_err(&hdev->pdev->dev,
+ "Can't add vlan due to spoof check is on and vf vlan table is full\n");
+ return -EPERM;
+ }
return 0;
+ }
hclge_cmd_setup_basic_desc(&desc[0],
HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
@@ -7183,7 +7859,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
}
dev_err(&hdev->pdev->dev,
- "Add vf vlan filter fail, ret =%d.\n",
+ "Add vf vlan filter fail, ret =%u.\n",
req0->resp_code);
} else {
#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
@@ -7199,7 +7875,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
return 0;
dev_err(&hdev->pdev->dev,
- "Kill vf vlan filter fail, ret =%d.\n",
+ "Kill vf vlan filter fail, ret =%u.\n",
req0->resp_code);
}
@@ -7218,9 +7894,10 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
- vlan_offset_160 = vlan_id / 160;
- vlan_offset_byte = (vlan_id % 160) / 8;
- vlan_offset_byte_val = 1 << (vlan_id % 8);
+ vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
+ vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
+ HCLGE_VLAN_BYTE_SIZE;
+ vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
req->vlan_offset = vlan_offset_160;
@@ -7235,7 +7912,7 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
}
static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
- u16 vport_id, u16 vlan_id, u8 qos,
+ u16 vport_id, u16 vlan_id,
bool is_kill)
{
u16 vport_idx, vport_num = 0;
@@ -7245,10 +7922,10 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
return 0;
ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
- 0, proto);
+ proto);
if (ret) {
dev_err(&hdev->pdev->dev,
- "Set %d vport vlan filter config fail, ret =%d.\n",
+ "Set %u vport vlan filter config fail, ret =%d.\n",
vport_id, ret);
return ret;
}
@@ -7260,7 +7937,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
dev_err(&hdev->pdev->dev,
- "Add port vlan failed, vport %d is already in vlan %d\n",
+ "Add port vlan failed, vport %u is already in vlan %u\n",
vport_id, vlan_id);
return -EINVAL;
}
@@ -7268,7 +7945,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
if (is_kill &&
!test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
dev_err(&hdev->pdev->dev,
- "Delete port vlan failed, vport %d is not in vlan %d\n",
+ "Delete port vlan failed, vport %u is not in vlan %u\n",
vport_id, vlan_id);
return -EINVAL;
}
@@ -7289,6 +7966,7 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
struct hclge_vport_vtag_tx_cfg_cmd *req;
struct hclge_dev *hdev = vport->back;
struct hclge_desc desc;
+ u16 bmap_index;
int status;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
@@ -7311,8 +7989,10 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
- req->vf_bitmap[req->vf_offset] =
- 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+ bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
+ HCLGE_VF_NUM_PER_BYTE;
+ req->vf_bitmap[bmap_index] =
+ 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
status = hclge_cmd_send(&hdev->hw, &desc, 1);
if (status)
@@ -7329,6 +8009,7 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
struct hclge_vport_vtag_rx_cfg_cmd *req;
struct hclge_dev *hdev = vport->back;
struct hclge_desc desc;
+ u16 bmap_index;
int status;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
@@ -7344,8 +8025,10 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
vcfg->vlan2_vlan_prionly ? 1 : 0);
req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
- req->vf_bitmap[req->vf_offset] =
- 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+ bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
+ HCLGE_VF_NUM_PER_BYTE;
+ req->vf_bitmap[bmap_index] =
+ 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
status = hclge_cmd_send(&hdev->hw, &desc, 1);
if (status)
@@ -7532,7 +8215,7 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
if (!vlan->hd_tbl_status) {
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
vport->vport_id,
- vlan->vlan_id, 0, false);
+ vlan->vlan_id, false);
if (ret) {
dev_err(&hdev->pdev->dev,
"restore vport vlan list failed, ret=%d\n",
@@ -7558,7 +8241,7 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
hclge_set_vlan_filter_hw(hdev,
htons(ETH_P_8021Q),
vport->vport_id,
- vlan_id, 0,
+ vlan_id,
true);
list_del(&vlan->node);
@@ -7578,7 +8261,7 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
hclge_set_vlan_filter_hw(hdev,
htons(ETH_P_8021Q),
vport->vport_id,
- vlan->vlan_id, 0,
+ vlan->vlan_id,
true);
vlan->hd_tbl_status = false;
@@ -7595,7 +8278,6 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
struct hclge_vport *vport;
int i;
- mutex_lock(&hdev->vport_cfg_mutex);
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
@@ -7603,7 +8285,6 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
kfree(vlan);
}
}
- mutex_unlock(&hdev->vport_cfg_mutex);
}
static void hclge_restore_vlan_table(struct hnae3_handle *handle)
@@ -7611,36 +8292,35 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
- u16 vlan_proto, qos;
+ u16 vlan_proto;
u16 state, vlan_id;
int i;
- mutex_lock(&hdev->vport_cfg_mutex);
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
- qos = vport->port_base_vlan_cfg.vlan_info.qos;
state = vport->port_base_vlan_cfg.state;
if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
- vport->vport_id, vlan_id, qos,
+ vport->vport_id, vlan_id,
false);
continue;
}
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
- if (vlan->hd_tbl_status)
- hclge_set_vlan_filter_hw(hdev,
- htons(ETH_P_8021Q),
- vport->vport_id,
- vlan->vlan_id, 0,
- false);
+ int ret;
+
+ if (!vlan->hd_tbl_status)
+ continue;
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id, false);
+ if (ret)
+ break;
}
}
-
- mutex_unlock(&hdev->vport_cfg_mutex);
}
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
@@ -7675,12 +8355,12 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
htons(new_info->vlan_proto),
vport->vport_id,
new_info->vlan_tag,
- new_info->qos, false);
+ false);
}
ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
vport->vport_id, old_info->vlan_tag,
- old_info->qos, true);
+ true);
if (ret)
return ret;
@@ -7707,7 +8387,7 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
htons(vlan_info->vlan_proto),
vport->vport_id,
vlan_info->vlan_tag,
- vlan_info->qos, false);
+ false);
if (ret)
return ret;
@@ -7716,7 +8396,7 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
htons(old_vlan_info->vlan_proto),
vport->vport_id,
old_vlan_info->vlan_tag,
- old_vlan_info->qos, true);
+ true);
if (ret)
return ret;
@@ -7774,13 +8454,16 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
if (hdev->pdev->revision == 0x20)
return -EOPNOTSUPP;
+ vport = hclge_get_vf_vport(hdev, vfid);
+ if (!vport)
+ return -EINVAL;
+
/* qos is a 3 bits value, so can not be bigger than 7 */
- if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
+ if (vlan > VLAN_N_VID - 1 || qos > 7)
return -EINVAL;
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
- vport = &hdev->vport[vfid];
state = hclge_get_port_base_vlan_state(vport,
vport->port_base_vlan_cfg.state,
vlan);
@@ -7791,21 +8474,12 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
vlan_info.qos = qos;
vlan_info.vlan_proto = ntohs(proto);
- /* update port based VLAN for PF */
- if (!vfid) {
- hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
- ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
- hclge_notify_client(hdev, HNAE3_UP_CLIENT);
-
- return ret;
- }
-
if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
return hclge_update_port_base_vlan_cfg(vport, state,
&vlan_info);
} else {
ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
- (u8)vfid, state,
+ vport->vport_id, state,
vlan, qos,
ntohs(proto));
return ret;
@@ -7829,7 +8503,7 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
return -EBUSY;
}
- /* When port base vlan enabled, we use port base vlan as the vlan
+ /* when port base vlan enabled, we use port base vlan as the vlan
* filter entry. In this case, we don't update vlan filter table
* when user add new vlan or remove exist vlan, just update the vport
* vlan list. The vlan id in vlan list will be writen in vlan filter
@@ -7837,7 +8511,7 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
*/
if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
- vlan_id, 0, is_kill);
+ vlan_id, is_kill);
writen_to_tbl = true;
}
@@ -7848,7 +8522,7 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
hclge_add_vport_vlan_table(vport, vlan_id,
writen_to_tbl);
} else if (is_kill) {
- /* When remove hw vlan filter failed, record the vlan id,
+ /* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack
*/
@@ -7873,7 +8547,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
while (vlan_id != VLAN_N_VID) {
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
vport->vport_id, vlan_id,
- 0, true);
+ true);
if (ret && ret != -EINVAL)
return;
@@ -7916,6 +8590,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
struct hclge_dev *hdev = vport->back;
int i, max_frm_size, ret;
+ /* HW supprt 2 layer vlan */
max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
max_frm_size > HCLGE_MAC_MAX_FRAME)
@@ -8044,11 +8719,12 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
}
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
- /* Wait for tqp hw reset */
- msleep(20);
reset_status = hclge_get_reset_status(hdev, queue_gid);
if (reset_status)
break;
+
+ /* Wait for tqp hw reset */
+ usleep_range(1000, 1200);
}
if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
@@ -8082,11 +8758,12 @@ void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
}
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
- /* Wait for tqp hw reset */
- msleep(20);
reset_status = hclge_get_reset_status(hdev, queue_gid);
if (reset_status)
break;
+
+ /* Wait for tqp hw reset */
+ usleep_range(1000, 1200);
}
if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
@@ -8122,28 +8799,15 @@ static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
{
int ret;
- if (rx_en && tx_en)
- hdev->fc_mode_last_time = HCLGE_FC_FULL;
- else if (rx_en && !tx_en)
- hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
- else if (!rx_en && tx_en)
- hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
- else
- hdev->fc_mode_last_time = HCLGE_FC_NONE;
-
if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
return 0;
ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
- if (ret) {
- dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
- ret);
- return ret;
- }
-
- hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "configure pauseparam error, ret = %d.\n", ret);
- return 0;
+ return ret;
}
int hclge_cfg_flowctrl(struct hclge_dev *hdev)
@@ -8208,6 +8872,21 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
}
}
+static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
+ u32 rx_en, u32 tx_en)
+{
+ if (rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_FULL;
+ else if (rx_en && !tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
+ else if (!rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
+ else
+ hdev->fc_mode_last_time = HCLGE_FC_NONE;
+
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+}
+
static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
u32 rx_en, u32 tx_en)
{
@@ -8233,6 +8912,8 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
+ hclge_record_user_pauseparam(hdev, rx_en, tx_en);
+
if (!auto_neg)
return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
@@ -8325,16 +9006,16 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "PF info begin:\n");
- dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
- dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
- dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
- dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
- dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
- dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
- dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
- dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
- dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
- dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
+ dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
+ dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
+ dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
+ dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
+ dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
+ dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
+ dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
+ dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
+ dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
+ dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
dev_info(dev, "This is %s PF\n",
hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
dev_info(dev, "DCB %s\n",
@@ -8350,10 +9031,9 @@ static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
{
struct hnae3_client *client = vport->nic.client;
struct hclge_dev *hdev = ae_dev->priv;
- int rst_cnt;
+ int rst_cnt = hdev->rst_stats.reset_cnt;
int ret;
- rst_cnt = hdev->rst_stats.reset_cnt;
ret = client->ops->init_instance(&vport->nic);
if (ret)
return ret;
@@ -8453,7 +9133,6 @@ static int hclge_init_client_instance(struct hnae3_client *client,
switch (client->type) {
case HNAE3_CLIENT_KNIC:
-
hdev->nic_client = client;
vport->nic.client = client;
ret = hclge_init_nic_client_instance(ae_dev, vport);
@@ -8481,7 +9160,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
}
}
- return ret;
+ return 0;
clear_nic:
hdev->nic_client = NULL;
@@ -8593,6 +9272,7 @@ static void hclge_state_init(struct hclge_dev *hdev)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
}
@@ -8602,44 +9282,61 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
set_bit(HCLGE_STATE_REMOVING, &hdev->state);
- if (hdev->service_timer.function)
- del_timer_sync(&hdev->service_timer);
if (hdev->reset_timer.function)
del_timer_sync(&hdev->reset_timer);
- if (hdev->service_task.func)
- cancel_work_sync(&hdev->service_task);
- if (hdev->rst_service_task.func)
- cancel_work_sync(&hdev->rst_service_task);
- if (hdev->mbx_service_task.func)
- cancel_work_sync(&hdev->mbx_service_task);
+ if (hdev->service_task.work.func)
+ cancel_delayed_work_sync(&hdev->service_task);
}
static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
{
-#define HCLGE_FLR_WAIT_MS 100
-#define HCLGE_FLR_WAIT_CNT 50
- struct hclge_dev *hdev = ae_dev->priv;
- int cnt = 0;
+#define HCLGE_FLR_RETRY_WAIT_MS 500
+#define HCLGE_FLR_RETRY_CNT 5
- clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
- clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
- set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
- hclge_reset_event(hdev->pdev, NULL);
+ struct hclge_dev *hdev = ae_dev->priv;
+ int retry_cnt = 0;
+ int ret;
- while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
- cnt++ < HCLGE_FLR_WAIT_CNT)
- msleep(HCLGE_FLR_WAIT_MS);
+retry:
+ down(&hdev->reset_sem);
+ set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ hdev->reset_type = HNAE3_FLR_RESET;
+ ret = hclge_reset_prepare(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
+ ret);
+ if (hdev->reset_pending ||
+ retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
+ dev_err(&hdev->pdev->dev,
+ "reset_pending:0x%lx, retry_cnt:%d\n",
+ hdev->reset_pending, retry_cnt);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+ msleep(HCLGE_FLR_RETRY_WAIT_MS);
+ goto retry;
+ }
+ }
- if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
- dev_err(&hdev->pdev->dev,
- "flr wait down timeout: %d\n", cnt);
+ /* disable misc vector before FLR done */
+ hclge_enable_vector(&hdev->misc_vector, false);
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ hdev->rst_stats.flr_rst_cnt++;
}
static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
{
struct hclge_dev *hdev = ae_dev->priv;
+ int ret;
- set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+ hclge_enable_vector(&hdev->misc_vector, true);
+
+ ret = hclge_reset_rebuild(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
+
+ hdev->reset_type = HNAE3_NONE_RESET;
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
}
static void hclge_clear_resetting_state(struct hclge_dev *hdev)
@@ -8654,7 +9351,7 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
if (ret)
dev_warn(&hdev->pdev->dev,
- "clear vf(%d) rst failed %d!\n",
+ "clear vf(%u) rst failed %d!\n",
vport->vport_id, ret);
}
}
@@ -8676,24 +9373,22 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->reset_type = HNAE3_NONE_RESET;
hdev->reset_level = HNAE3_FUNC_RESET;
ae_dev->priv = hdev;
+
+ /* HW supprt 2 layer vlan */
hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
mutex_init(&hdev->vport_lock);
- mutex_init(&hdev->vport_cfg_mutex);
spin_lock_init(&hdev->fd_rule_lock);
+ sema_init(&hdev->reset_sem, 1);
ret = hclge_pci_init(hdev);
- if (ret) {
- dev_err(&pdev->dev, "PCI init failed\n");
+ if (ret)
goto out;
- }
/* Firmware command queue initialize */
ret = hclge_cmd_queue_init(hdev);
- if (ret) {
- dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
+ if (ret)
goto err_pci_uninit;
- }
/* Firmware command initialize */
ret = hclge_cmd_init(hdev);
@@ -8701,11 +9396,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_cmd_uninit;
ret = hclge_get_cap(hdev);
- if (ret) {
- dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
- ret);
+ if (ret)
goto err_cmd_uninit;
- }
ret = hclge_configure(hdev);
if (ret) {
@@ -8720,12 +9412,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
}
ret = hclge_misc_irq_init(hdev);
- if (ret) {
- dev_err(&pdev->dev,
- "Misc IRQ(vector0) init error, ret = %d.\n",
- ret);
+ if (ret)
goto err_msi_uninit;
- }
ret = hclge_alloc_tqps(hdev);
if (ret) {
@@ -8734,31 +9422,22 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
}
ret = hclge_alloc_vport(hdev);
- if (ret) {
- dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
+ if (ret)
goto err_msi_irq_uninit;
- }
ret = hclge_map_tqp(hdev);
- if (ret) {
- dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
+ if (ret)
goto err_msi_irq_uninit;
- }
if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
ret = hclge_mac_mdio_config(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "mdio config fail ret=%d\n", ret);
+ if (ret)
goto err_msi_irq_uninit;
- }
}
ret = hclge_init_umv_space(hdev);
- if (ret) {
- dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
+ if (ret)
goto err_mdiobus_unreg;
- }
ret = hclge_mac_init(hdev);
if (ret) {
@@ -8812,11 +9491,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_dcb_ops_set(hdev);
- timer_setup(&hdev->service_timer, hclge_service_timer, 0);
timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
- INIT_WORK(&hdev->service_task, hclge_service_task);
- INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
- INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
+ INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
+
+ /* Setup affinity after service timer setup because add_timer_on
+ * is called in affinity notify.
+ */
+ hclge_misc_affinity_setup(hdev);
hclge_clear_all_event_cause(hdev);
hclge_clear_resetting_state(hdev);
@@ -8842,7 +9523,11 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_state_init(hdev);
hdev->last_reset_time = jiffies;
- pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
+ dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
+ HCLGE_DRIVER_NAME);
+
+ hclge_task_schedule(hdev, round_jiffies_relative(HZ));
+
return 0;
err_mdiobus_unreg:
@@ -8865,7 +9550,220 @@ out:
static void hclge_stats_clear(struct hclge_dev *hdev)
{
- memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
+ memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
+}
+
+static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
+{
+ return hclge_config_switch_param(hdev, vf, enable,
+ HCLGE_SWITCH_ANTI_SPOOF_MASK);
+}
+
+static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
+{
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_NIC_INGRESS_B,
+ enable, vf);
+}
+
+static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
+{
+ int ret;
+
+ ret = hclge_set_mac_spoofchk(hdev, vf, enable);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Set vf %d mac spoof check %s failed, ret=%d\n",
+ vf, enable ? "on" : "off", ret);
+ return ret;
+ }
+
+ ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Set vf %d vlan spoof check %s failed, ret=%d\n",
+ vf, enable ? "on" : "off", ret);
+
+ return ret;
+}
+
+static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
+ bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 new_spoofchk = enable ? 1 : 0;
+ int ret;
+
+ if (hdev->pdev->revision == 0x20)
+ return -EOPNOTSUPP;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (vport->vf_info.spoofchk == new_spoofchk)
+ return 0;
+
+ if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
+ dev_warn(&hdev->pdev->dev,
+ "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
+ vf);
+ else if (enable && hclge_is_umv_space_full(vport))
+ dev_warn(&hdev->pdev->dev,
+ "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
+ vf);
+
+ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
+ if (ret)
+ return ret;
+
+ vport->vf_info.spoofchk = new_spoofchk;
+ return 0;
+}
+
+static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ int i;
+
+ if (hdev->pdev->revision == 0x20)
+ return 0;
+
+ /* resume the vf spoof check state after reset */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
+ vport->vf_info.spoofchk);
+ if (ret)
+ return ret;
+
+ vport++;
+ }
+
+ return 0;
+}
+
+static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 new_trusted = enable ? 1 : 0;
+ bool en_bc_pmc;
+ int ret;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (vport->vf_info.trusted == new_trusted)
+ return 0;
+
+ /* Disable promisc mode for VF if it is not trusted any more. */
+ if (!enable && vport->vf_info.promisc_enable) {
+ en_bc_pmc = hdev->pdev->revision != 0x20;
+ ret = hclge_set_vport_promisc_mode(vport, false, false,
+ en_bc_pmc);
+ if (ret)
+ return ret;
+ vport->vf_info.promisc_enable = 0;
+ hclge_inform_vf_promisc_info(vport);
+ }
+
+ vport->vf_info.trusted = new_trusted;
+
+ return 0;
+}
+
+static void hclge_reset_vf_rate(struct hclge_dev *hdev)
+{
+ int ret;
+ int vf;
+
+ /* reset vf rate to default value */
+ for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
+ struct hclge_vport *vport = &hdev->vport[vf];
+
+ vport->vf_info.max_tx_rate = 0;
+ ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "vf%d failed to reset to default, ret=%d\n",
+ vf - HCLGE_VF_VPORT_START_NUM, ret);
+ }
+}
+
+static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
+ int min_tx_rate, int max_tx_rate)
+{
+ if (min_tx_rate != 0 ||
+ max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
+ dev_err(&hdev->pdev->dev,
+ "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
+ min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
+ int min_tx_rate, int max_tx_rate, bool force)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
+ if (ret)
+ return ret;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
+ return 0;
+
+ ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
+ if (ret)
+ return ret;
+
+ vport->vf_info.max_tx_rate = max_tx_rate;
+
+ return 0;
+}
+
+static int hclge_resume_vf_rate(struct hclge_dev *hdev)
+{
+ struct hnae3_handle *handle = &hdev->vport->nic;
+ struct hclge_vport *vport;
+ int ret;
+ int vf;
+
+ /* resume the vf max_tx_rate after reset */
+ for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ /* zero means max rate, after reset, firmware already set it to
+ * max rate, so just continue.
+ */
+ if (!vport->vf_info.max_tx_rate)
+ continue;
+
+ ret = hclge_set_vf_rate(handle, vf, 0,
+ vport->vf_info.max_tx_rate, true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "vf%d failed to resume tx_rate:%u, ret=%d\n",
+ vf, vport->vf_info.max_tx_rate, ret);
+ return ret;
+ }
+ }
+
+ return 0;
}
static void hclge_reset_vport_state(struct hclge_dev *hdev)
@@ -8939,12 +9837,22 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = init_mgr_tbl(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to reinit manager table, ret = %d\n", ret);
+ return ret;
+ }
+
ret = hclge_init_fd_config(hdev);
if (ret) {
dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
return ret;
}
+ /* Log and clear the hw errors those already occurred */
+ hclge_handle_all_hns_hw_errors(ae_dev);
+
/* Re-enable the hw error interrupts because
* the interrupts get disabled on global reset.
*/
@@ -8967,6 +9875,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
}
hclge_reset_vport_state(hdev);
+ ret = hclge_reset_vport_spoofchk(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_resume_vf_rate(hdev);
+ if (ret)
+ return ret;
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -8979,6 +9894,8 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
struct hclge_dev *hdev = ae_dev->priv;
struct hclge_mac *mac = &hdev->hw.mac;
+ hclge_reset_vf_rate(hdev);
+ hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
if (mac->phydev)
@@ -9001,7 +9918,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
mutex_destroy(&hdev->vport_lock);
hclge_uninit_vport_mac_table(hdev);
hclge_uninit_vport_vlan_table(hdev);
- mutex_destroy(&hdev->vport_cfg_mutex);
ae_dev->priv = NULL;
}
@@ -9042,8 +9958,8 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
struct hclge_dev *hdev = vport->back;
u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
- int cur_rss_size = kinfo->rss_size;
- int cur_tqps = kinfo->num_tqps;
+ u16 cur_rss_size = kinfo->rss_size;
+ u16 cur_tqps = kinfo->num_tqps;
u16 tc_valid[HCLGE_MAX_TC_NUM];
u16 roundup_size;
u32 *rss_indir;
@@ -9097,7 +10013,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
out:
if (!ret)
dev_info(&hdev->pdev->dev,
- "Channels changed, rss_size from %d to %d, tqps from %d to %d",
+ "Channels changed, rss_size from %u to %u, tqps from %u to %u",
cur_rss_size, kinfo->rss_size,
cur_tqps, kinfo->rss_size * kinfo->num_tc);
@@ -9238,106 +10154,310 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
}
#define MAX_SEPARATE_NUM 4
-#define SEPARATOR_VALUE 0xFFFFFFFF
+#define SEPARATOR_VALUE 0xFDFCFBFA
#define REG_NUM_PER_LINE 4
#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
+#define REG_SEPARATOR_LINE 1
+#define REG_NUM_REMAIN_MASK 3
+#define BD_LIST_MAX_NUM 30
-static int hclge_get_regs_len(struct hnae3_handle *handle)
+int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
{
- int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- u32 regs_num_32_bit, regs_num_64_bit;
+ /*prepare 4 commands to query DFX BD number*/
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
+ desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
+ desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
+
+ return hclge_cmd_send(&hdev->hw, desc, 4);
+}
+
+static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
+ int *bd_num_list,
+ u32 type_num)
+{
+ u32 entries_per_desc, desc_index, index, offset, i;
+ struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
int ret;
- ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ ret = hclge_query_bd_num_cmd_send(hdev, desc);
if (ret) {
dev_err(&hdev->pdev->dev,
- "Get register number failed, ret = %d.\n", ret);
- return -EOPNOTSUPP;
+ "Get dfx bd num fail, status is %d.\n", ret);
+ return ret;
}
- cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
- common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
- ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
- tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
+ entries_per_desc = ARRAY_SIZE(desc[0].data);
+ for (i = 0; i < type_num; i++) {
+ offset = hclge_dfx_bd_offset_list[i];
+ index = offset % entries_per_desc;
+ desc_index = offset / entries_per_desc;
+ bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
+ }
- return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
- tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
- regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
+ return ret;
}
-static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
- void *data)
+static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
+ struct hclge_desc *desc_src, int bd_num,
+ enum hclge_opcode_type cmd)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- u32 regs_num_32_bit, regs_num_64_bit;
- int i, j, reg_um, separator_num;
+ struct hclge_desc *desc = desc_src;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ for (i = 0; i < bd_num - 1; i++) {
+ desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc++;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ }
+
+ desc = desc_src;
+ ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
+ cmd, ret);
+
+ return ret;
+}
+
+static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
+ void *data)
+{
+ int entries_per_desc, reg_num, separator_num, desc_index, index, i;
+ struct hclge_desc *desc = desc_src;
u32 *reg = data;
+
+ entries_per_desc = ARRAY_SIZE(desc->data);
+ reg_num = entries_per_desc * bd_num;
+ separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < reg_num; i++) {
+ index = i % entries_per_desc;
+ desc_index = i / entries_per_desc;
+ *reg++ = le32_to_cpu(desc[desc_index].data[index]);
+ }
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ return reg_num + separator_num;
+}
+
+static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
+{
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ int data_len_per_desc, data_len, bd_num, i;
+ int bd_num_list[BD_LIST_MAX_NUM];
int ret;
- *version = hdev->fw_version;
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg bd num fail, status is %d.\n", ret);
+ return ret;
+ }
- ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ data_len_per_desc = sizeof_field(struct hclge_desc, data);
+ *len = 0;
+ for (i = 0; i < dfx_reg_type_num; i++) {
+ bd_num = bd_num_list[i];
+ data_len = data_len_per_desc * bd_num;
+ *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
+ }
+
+ return ret;
+}
+
+static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
+{
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ int bd_num, bd_num_max, buf_len, i;
+ int bd_num_list[BD_LIST_MAX_NUM];
+ struct hclge_desc *desc_src;
+ u32 *reg = data;
+ int ret;
+
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
if (ret) {
dev_err(&hdev->pdev->dev,
- "Get register number failed, ret = %d.\n", ret);
- return;
+ "Get dfx reg bd num fail, status is %d.\n", ret);
+ return ret;
+ }
+
+ bd_num_max = bd_num_list[0];
+ for (i = 1; i < dfx_reg_type_num; i++)
+ bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
+
+ buf_len = sizeof(*desc_src) * bd_num_max;
+ desc_src = kzalloc(buf_len, GFP_KERNEL);
+ if (!desc_src)
+ return -ENOMEM;
+
+ for (i = 0; i < dfx_reg_type_num; i++) {
+ bd_num = bd_num_list[i];
+ ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
+ hclge_dfx_reg_opcode_list[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg fail, status is %d.\n", ret);
+ break;
+ }
+
+ reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
}
+ kfree(desc_src);
+ return ret;
+}
+
+static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
+ struct hnae3_knic_private_info *kinfo)
+{
+#define HCLGE_RING_REG_OFFSET 0x200
+#define HCLGE_RING_INT_REG_OFFSET 0x4
+
+ int i, j, reg_num, separator_num;
+ int data_num_sum;
+ u32 *reg = data;
+
/* fetching per-PF registers valus from PF PCIe register space */
- reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
+ data_num_sum = reg_num + separator_num;
- reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(common_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
+ data_num_sum += reg_num + separator_num;
- reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ reg_num = ARRAY_SIZE(ring_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
for (j = 0; j < kinfo->num_tqps; j++) {
- for (i = 0; i < reg_um; i++)
+ for (i = 0; i < reg_num; i++)
*reg++ = hclge_read_dev(&hdev->hw,
ring_reg_addr_list[i] +
- 0x200 * j);
+ HCLGE_RING_REG_OFFSET * j);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
}
+ data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
- reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
for (j = 0; j < hdev->num_msi_used - 1; j++) {
- for (i = 0; i < reg_um; i++)
+ for (i = 0; i < reg_num; i++)
*reg++ = hclge_read_dev(&hdev->hw,
tqp_intr_reg_addr_list[i] +
- 4 * j);
+ HCLGE_RING_INT_REG_OFFSET * j);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
}
+ data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
+
+ return data_num_sum;
+}
+
+static int hclge_get_regs_len(struct hnae3_handle *handle)
+{
+ int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
+ int regs_lines_32_bit, regs_lines_64_bit;
+ int ret;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg len failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+
+ return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
+ tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
+ regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
+}
+
+static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 regs_num_32_bit, regs_num_64_bit;
+ int i, reg_num, separator_num, ret;
+ u32 *reg = data;
+
+ *version = hdev->fw_version;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return;
+ }
+
+ reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
- /* fetching PF common registers values from firmware */
ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
if (ret) {
dev_err(&hdev->pdev->dev,
"Get 32 bit register failed, ret = %d.\n", ret);
return;
}
+ reg_num = regs_num_32_bit;
+ reg += reg_num;
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
- reg += regs_num_32_bit;
ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
- if (ret)
+ if (ret) {
dev_err(&hdev->pdev->dev,
"Get 64 bit register failed, ret = %d.\n", ret);
+ return;
+ }
+ reg_num = regs_num_64_bit * 2;
+ reg += reg_num;
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ ret = hclge_get_dfx_reg(hdev, reg);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Get dfx register failed, ret = %d.\n", ret);
}
static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
@@ -9452,7 +10572,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_mtu = hclge_set_mtu,
.reset_queue = hclge_reset_tqp,
.get_stats = hclge_get_stats,
- .get_mac_pause_stats = hclge_get_mac_pause_stat,
+ .get_mac_stats = hclge_get_mac_stat,
.update_stats = hclge_update_stats,
.get_strings = hclge_get_strings,
.get_sset_count = hclge_get_sset_count,
@@ -9492,6 +10612,12 @@ static const struct hnae3_ae_ops hclge_ops = {
.mac_connect_phy = hclge_mac_connect_phy,
.mac_disconnect_phy = hclge_mac_disconnect_phy,
.restore_vlan_table = hclge_restore_vlan_table,
+ .get_vf_config = hclge_get_vf_config,
+ .set_vf_link_state = hclge_set_vf_link_state,
+ .set_vf_spoofchk = hclge_set_vf_spoofchk,
+ .set_vf_trust = hclge_set_vf_trust,
+ .set_vf_rate = hclge_set_vf_rate,
+ .set_vf_mac = hclge_set_vf_mac,
};
static struct hnae3_ae_algo ae_algo = {
@@ -9503,6 +10629,12 @@ static int hclge_init(void)
{
pr_info("%s is initializing\n", HCLGE_NAME);
+ hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
+ if (!hclge_wq) {
+ pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
+ return -ENOMEM;
+ }
+
hnae3_register_ae_algo(&ae_algo);
return 0;
@@ -9511,6 +10643,7 @@ static int hclge_init(void)
static void hclge_exit(void)
{
hnae3_unregister_ae_algo(&ae_algo);
+ destroy_workqueue(hclge_wq);
}
module_init(hclge_init);
module_exit(hclge_exit);
OpenPOWER on IntegriCloud