diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic')
52 files changed, 4674 insertions, 15488 deletions
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index a391cf6ee4b2..55a29ec76680 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -66,15 +66,6 @@ config QLCNIC_HWMON This data is available via the hwmon sysfs interface. -config QLGE - tristate "QLogic QLGE 10Gb Ethernet Driver Support" - depends on PCI - ---help--- - This driver supports QLogic ISP8XXX 10Gb Ethernet cards. - - To compile this driver as a module, choose M here: the module - will be called qlge. - config NETXEN_NIC tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC" depends on PCI diff --git a/drivers/net/ethernet/qlogic/Makefile b/drivers/net/ethernet/qlogic/Makefile index 6cd2e333a5fc..1ae4a0743bd5 100644 --- a/drivers/net/ethernet/qlogic/Makefile +++ b/drivers/net/ethernet/qlogic/Makefile @@ -5,7 +5,6 @@ obj-$(CONFIG_QLA3XXX) += qla3xxx.o obj-$(CONFIG_QLCNIC) += qlcnic/ -obj-$(CONFIG_QLGE) += qlge/ obj-$(CONFIG_NETXEN_NIC) += netxen/ obj-$(CONFIG_QED) += qed/ obj-$(CONFIG_QEDE)+= qede/ diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 58e2eaf77014..8067ea04d455 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -49,7 +49,7 @@ static int netxen_nic_open(struct net_device *netdev); static int netxen_nic_close(struct net_device *netdev); static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *, struct net_device *); -static void netxen_tx_timeout(struct net_device *netdev); +static void netxen_tx_timeout(struct net_device *netdev, unsigned int txqueue); static void netxen_tx_timeout_task(struct work_struct *work); static void netxen_fw_poll_work(struct work_struct *work); static void netxen_schedule_work(struct netxen_adapter *adapter, @@ -1980,7 +1980,7 @@ netxen_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, struct netxen_cmd_buffer *pbuf) { struct netxen_skb_frag *nf; - struct skb_frag_struct *frag; + skb_frag_t *frag; int i, nr_frags; dma_addr_t map; @@ -2043,7 +2043,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) struct pci_dev *pdev; int i, k; int delta = 0; - struct skb_frag_struct *frag; + skb_frag_t *frag; u32 producer; int frag_count; @@ -2222,7 +2222,7 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter) netxen_advert_link_change(adapter, linkup); } -static void netxen_tx_timeout(struct net_device *netdev) +static void netxen_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct netxen_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 89fe091c958d..fa41bf08a589 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -253,7 +253,8 @@ enum qed_resources { QED_VLAN, QED_RDMA_CNQ_RAM, QED_ILT, - QED_LL2_QUEUE, + QED_LL2_RAM_QUEUE, + QED_LL2_CTX_QUEUE, QED_CMDQS_CQS, QED_RDMA_STATS_QUEUE, QED_BDQ, @@ -461,6 +462,8 @@ struct qed_fw_data { const u8 *modes_tree_buf; union init_op *init_ops; const u32 *arr_data; + const u32 *fw_overlays; + u32 fw_overlays_len; u32 init_ops_size; }; @@ -531,6 +534,23 @@ struct qed_nvm_image_info { bool valid; }; +enum qed_hsi_def_type { + QED_HSI_DEF_MAX_NUM_VFS, + QED_HSI_DEF_MAX_NUM_L2_QUEUES, + QED_HSI_DEF_MAX_NUM_PORTS, + QED_HSI_DEF_MAX_SB_PER_PATH, + QED_HSI_DEF_MAX_NUM_PFS, + QED_HSI_DEF_MAX_NUM_VPORTS, + QED_HSI_DEF_NUM_ETH_RSS_ENGINE, + QED_HSI_DEF_MAX_QM_TX_QUEUES, + QED_HSI_DEF_NUM_PXP_ILT_RECORDS, + QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS, + QED_HSI_DEF_MAX_QM_GLOBAL_RLS, + QED_HSI_DEF_MAX_PBF_CMD_LINES, + QED_HSI_DEF_MAX_BTB_BLOCKS, + QED_NUM_HSI_DEFS +}; + #define DRV_MODULE_VERSION \ __stringify(QED_MAJOR_VERSION) "." \ __stringify(QED_MINOR_VERSION) "." \ @@ -646,6 +666,7 @@ struct qed_hwfn { struct dbg_tools_data dbg_info; void *dbg_user_info; + struct virt_mem_desc dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE]; /* PWM region specific data */ u16 wid_count; @@ -668,6 +689,7 @@ struct qed_hwfn { /* Nvm images number and attributes */ struct qed_nvm_image_info nvm_info; + struct phys_mem_desc *fw_overlay_mem; struct qed_ptt *p_arfs_ptt; struct qed_simd_fp_handler simd_proto_handler[64]; @@ -796,8 +818,8 @@ struct qed_dev { u8 cache_shift; /* Init */ - const struct iro *iro_arr; -#define IRO (p_hwfn->cdev->iro_arr) + const u32 *iro_arr; +#define IRO ((const struct iro *)p_hwfn->cdev->iro_arr) /* HW functions */ u8 num_hwfns; @@ -856,6 +878,8 @@ struct qed_dev { struct qed_cb_ll2_info *ll2; u8 ll2_mac_address[ETH_ALEN]; #endif + struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM]; + bool disable_ilt_dump; DECLARE_HASHTABLE(connections, 10); const struct firmware *firmware; @@ -868,16 +892,35 @@ struct qed_dev { bool iwarp_cmt; }; -#define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \ - : MAX_NUM_VFS_K2) -#define NUM_OF_L2_QUEUES(dev) (QED_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \ - : MAX_NUM_L2_QUEUES_K2) -#define NUM_OF_PORTS(dev) (QED_IS_BB(dev) ? MAX_NUM_PORTS_BB \ - : MAX_NUM_PORTS_K2) -#define NUM_OF_SBS(dev) (QED_IS_BB(dev) ? MAX_SB_PER_PATH_BB \ - : MAX_SB_PER_PATH_K2) -#define NUM_OF_ENG_PFS(dev) (QED_IS_BB(dev) ? MAX_NUM_PFS_BB \ - : MAX_NUM_PFS_K2) +u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type); + +#define NUM_OF_VFS(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VFS) +#define NUM_OF_L2_QUEUES(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_L2_QUEUES) +#define NUM_OF_PORTS(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PORTS) +#define NUM_OF_SBS(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_SB_PER_PATH) +#define NUM_OF_ENG_PFS(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PFS) +#define NUM_OF_VPORTS(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VPORTS) +#define NUM_OF_RSS_ENGINES(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_ETH_RSS_ENGINE) +#define NUM_OF_QM_TX_QUEUES(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_TX_QUEUES) +#define NUM_OF_PXP_ILT_RECORDS(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_PXP_ILT_RECORDS) +#define NUM_OF_RDMA_STATISTIC_COUNTERS(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS) +#define NUM_OF_QM_GLOBAL_RLS(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_GLOBAL_RLS) +#define NUM_OF_PBF_CMD_LINES(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_PBF_CMD_LINES) +#define NUM_OF_BTB_BLOCKS(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_BTB_BLOCKS) + /** * @brief qed_concrete_to_sw_fid - get the sw function id from diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 8e1bdf58b9e7..1a636bad717d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -50,12 +50,6 @@ #include "qed_reg_addr.h" #include "qed_sriov.h" -/* Max number of connection types in HW (DQ/CDU etc.) */ -#define MAX_CONN_TYPES PROTOCOLID_COMMON -#define NUM_TASK_TYPES 2 -#define NUM_TASK_PF_SEGMENTS 4 -#define NUM_TASK_VF_SEGMENTS 1 - /* QM constants */ #define QM_PQ_ELEMENT_SIZE 4 /* in bytes */ @@ -123,126 +117,6 @@ struct src_ent { /* Alignment is inherent to the type1_task_context structure */ #define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context) -/* PF per protocl configuration object */ -#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS) -#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS) - -struct qed_tid_seg { - u32 count; - u8 type; - bool has_fl_mem; -}; - -struct qed_conn_type_cfg { - u32 cid_count; - u32 cids_per_vf; - struct qed_tid_seg tid_seg[TASK_SEGMENTS]; -}; - -/* ILT Client configuration, Per connection type (protocol) resources. */ -#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2) -#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2) -#define CDUC_BLK (0) -#define SRQ_BLK (0) -#define CDUT_SEG_BLK(n) (1 + (u8)(n)) -#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS) - -enum ilt_clients { - ILT_CLI_CDUC, - ILT_CLI_CDUT, - ILT_CLI_QM, - ILT_CLI_TM, - ILT_CLI_SRC, - ILT_CLI_TSDM, - ILT_CLI_MAX -}; - -struct ilt_cfg_pair { - u32 reg; - u32 val; -}; - -struct qed_ilt_cli_blk { - u32 total_size; /* 0 means not active */ - u32 real_size_in_page; - u32 start_line; - u32 dynamic_line_cnt; -}; - -struct qed_ilt_client_cfg { - bool active; - - /* ILT boundaries */ - struct ilt_cfg_pair first; - struct ilt_cfg_pair last; - struct ilt_cfg_pair p_size; - - /* ILT client blocks for PF */ - struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS]; - u32 pf_total_lines; - - /* ILT client blocks for VFs */ - struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS]; - u32 vf_total_lines; -}; - -/* Per Path - - * ILT shadow table - * Protocol acquired CID lists - * PF start line in ILT - */ -struct qed_dma_mem { - dma_addr_t p_phys; - void *p_virt; - size_t size; -}; - -struct qed_cid_acquired_map { - u32 start_cid; - u32 max_count; - unsigned long *cid_map; -}; - -struct qed_cxt_mngr { - /* Per protocl configuration */ - struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES]; - - /* computed ILT structure */ - struct qed_ilt_client_cfg clients[ILT_CLI_MAX]; - - /* Task type sizes */ - u32 task_type_size[NUM_TASK_TYPES]; - - /* total number of VFs for this hwfn - - * ALL VFs are symmetric in terms of HW resources - */ - u32 vf_count; - - /* Acquired CIDs */ - struct qed_cid_acquired_map acquired[MAX_CONN_TYPES]; - - struct qed_cid_acquired_map - acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS]; - - /* ILT shadow table */ - struct qed_dma_mem *ilt_shadow; - u32 pf_start_line; - - /* Mutex for a dynamic ILT allocation */ - struct mutex mutex; - - /* SRC T2 */ - struct qed_dma_mem *t2; - u32 t2_num_pages; - u64 first_free; - u64 last_free; - - /* total number of SRQ's for this hwfn */ - u32 srq_count; - - /* Maximal number of L2 steering filters */ - u32 arfs_count; -}; static bool src_proto(enum protocol_type type) { return type == PROTOCOLID_ISCSI || @@ -880,30 +754,60 @@ u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines) static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn) { - struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2; u32 i; - if (!p_mngr->t2) + if (!p_t2 || !p_t2->dma_mem) return; - for (i = 0; i < p_mngr->t2_num_pages; i++) - if (p_mngr->t2[i].p_virt) + for (i = 0; i < p_t2->num_pages; i++) + if (p_t2->dma_mem[i].virt_addr) dma_free_coherent(&p_hwfn->cdev->pdev->dev, - p_mngr->t2[i].size, - p_mngr->t2[i].p_virt, - p_mngr->t2[i].p_phys); + p_t2->dma_mem[i].size, + p_t2->dma_mem[i].virt_addr, + p_t2->dma_mem[i].phys_addr); + + kfree(p_t2->dma_mem); + p_t2->dma_mem = NULL; +} + +static int +qed_cxt_t2_alloc_pages(struct qed_hwfn *p_hwfn, + struct qed_src_t2 *p_t2, u32 total_size, u32 page_size) +{ + void **p_virt; + u32 size, i; + + if (!p_t2 || !p_t2->dma_mem) + return -EINVAL; - kfree(p_mngr->t2); - p_mngr->t2 = NULL; + for (i = 0; i < p_t2->num_pages; i++) { + size = min_t(u32, total_size, page_size); + p_virt = &p_t2->dma_mem[i].virt_addr; + + *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + size, + &p_t2->dma_mem[i].phys_addr, + GFP_KERNEL); + if (!p_t2->dma_mem[i].virt_addr) + return -ENOMEM; + + memset(*p_virt, 0, size); + p_t2->dma_mem[i].size = size; + total_size -= size; + } + + return 0; } static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 conn_num, total_size, ent_per_page, psz, i; + struct phys_mem_desc *p_t2_last_page; struct qed_ilt_client_cfg *p_src; struct qed_src_iids src_iids; - struct qed_dma_mem *p_t2; + struct qed_src_t2 *p_t2; int rc; memset(&src_iids, 0, sizeof(src_iids)); @@ -921,49 +825,39 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn) /* use the same page size as the SRC ILT client */ psz = ILT_PAGE_IN_BYTES(p_src->p_size.val); - p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz); + p_t2 = &p_mngr->src_t2; + p_t2->num_pages = DIV_ROUND_UP(total_size, psz); /* allocate t2 */ - p_mngr->t2 = kcalloc(p_mngr->t2_num_pages, sizeof(struct qed_dma_mem), - GFP_KERNEL); - if (!p_mngr->t2) { + p_t2->dma_mem = kcalloc(p_t2->num_pages, sizeof(struct phys_mem_desc), + GFP_KERNEL); + if (!p_t2->dma_mem) { + DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n"); rc = -ENOMEM; goto t2_fail; } - /* allocate t2 pages */ - for (i = 0; i < p_mngr->t2_num_pages; i++) { - u32 size = min_t(u32, total_size, psz); - void **p_virt = &p_mngr->t2[i].p_virt; - - *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size, - &p_mngr->t2[i].p_phys, - GFP_KERNEL); - if (!p_mngr->t2[i].p_virt) { - rc = -ENOMEM; - goto t2_fail; - } - p_mngr->t2[i].size = size; - total_size -= size; - } + rc = qed_cxt_t2_alloc_pages(p_hwfn, p_t2, total_size, psz); + if (rc) + goto t2_fail; /* Set the t2 pointers */ /* entries per page - must be a power of two */ ent_per_page = psz / sizeof(struct src_ent); - p_mngr->first_free = (u64) p_mngr->t2[0].p_phys; + p_t2->first_free = (u64)p_t2->dma_mem[0].phys_addr; - p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page]; - p_mngr->last_free = (u64) p_t2->p_phys + + p_t2_last_page = &p_t2->dma_mem[(conn_num - 1) / ent_per_page]; + p_t2->last_free = (u64)p_t2_last_page->phys_addr + ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent); - for (i = 0; i < p_mngr->t2_num_pages; i++) { + for (i = 0; i < p_t2->num_pages; i++) { u32 ent_num = min_t(u32, ent_per_page, conn_num); - struct src_ent *entries = p_mngr->t2[i].p_virt; - u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val; + struct src_ent *entries = p_t2->dma_mem[i].virt_addr; + u64 p_ent_phys = (u64)p_t2->dma_mem[i].phys_addr, val; u32 j; for (j = 0; j < ent_num - 1; j++) { @@ -971,8 +865,8 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn) entries[j].next = cpu_to_be64(val); } - if (i < p_mngr->t2_num_pages - 1) - val = (u64) p_mngr->t2[i + 1].p_phys; + if (i < p_t2->num_pages - 1) + val = (u64)p_t2->dma_mem[i + 1].phys_addr; else val = 0; entries[j].next = cpu_to_be64(val); @@ -988,7 +882,7 @@ t2_fail: } #define for_each_ilt_valid_client(pos, clients) \ - for (pos = 0; pos < ILT_CLI_MAX; pos++) \ + for (pos = 0; pos < MAX_ILT_CLIENTS; pos++) \ if (!clients[pos].active) { \ continue; \ } else \ @@ -1014,13 +908,13 @@ static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn) ilt_size = qed_cxt_ilt_shadow_size(p_cli); for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) { - struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i]; + struct phys_mem_desc *p_dma = &p_mngr->ilt_shadow[i]; - if (p_dma->p_virt) + if (p_dma->virt_addr) dma_free_coherent(&p_hwfn->cdev->pdev->dev, - p_dma->size, p_dma->p_virt, - p_dma->p_phys); - p_dma->p_virt = NULL; + p_dma->size, p_dma->virt_addr, + p_dma->phys_addr); + p_dma->virt_addr = NULL; } kfree(p_mngr->ilt_shadow); } @@ -1030,7 +924,7 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, enum ilt_clients ilt_client, u32 start_line_offset) { - struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow; + struct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow; u32 lines, line, sz_left, lines_to_skip = 0; /* Special handling for RoCE that supports dynamic allocation */ @@ -1059,8 +953,8 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, if (!p_virt) return -ENOMEM; - ilt_shadow[line].p_phys = p_phys; - ilt_shadow[line].p_virt = p_virt; + ilt_shadow[line].phys_addr = p_phys; + ilt_shadow[line].virt_addr = p_virt; ilt_shadow[line].size = size; DP_VERBOSE(p_hwfn, QED_MSG_ILT, @@ -1083,7 +977,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn) int rc; size = qed_cxt_ilt_shadow_size(clients); - p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem), + p_mngr->ilt_shadow = kcalloc(size, sizeof(struct phys_mem_desc), GFP_KERNEL); if (!p_mngr->ilt_shadow) { rc = -ENOMEM; @@ -1092,7 +986,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn) DP_VERBOSE(p_hwfn, QED_MSG_ILT, "Allocated 0x%x bytes for ilt shadow\n", - (u32)(size * sizeof(struct qed_dma_mem))); + (u32)(size * sizeof(struct phys_mem_desc))); for_each_ilt_valid_client(i, clients) { for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) { @@ -1238,15 +1132,20 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn) clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT); clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE); /* default ILT page size for all clients is 64K */ - for (i = 0; i < ILT_CLI_MAX; i++) + for (i = 0; i < MAX_ILT_CLIENTS; i++) p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE; + p_mngr->conn_ctx_size = CONN_CXT_SIZE(p_hwfn); + /* Initialize task sizes */ p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn); p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn); - if (p_hwfn->cdev->p_iov_info) + if (p_hwfn->cdev->p_iov_info) { p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs; + p_mngr->first_vf_in_pf = + p_hwfn->cdev->p_iov_info->first_vf_in_pf; + } /* Initialize the dynamic ILT allocation mutex */ mutex_init(&p_mngr->mutex); @@ -1499,14 +1398,11 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, { struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct qed_qm_pf_rt_init_params params; - struct qed_mcp_link_state *p_link; struct qed_qm_iids iids; memset(&iids, 0, sizeof(iids)); qed_cxt_qm_iids(p_hwfn, &iids); - p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output; - memset(¶ms, 0, sizeof(params)); params.port_id = p_hwfn->port_id; params.pf_id = p_hwfn->rel_pf_id; @@ -1522,7 +1418,6 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, params.num_vports = qm_info->num_vports; params.pf_wfq = qm_info->pf_wfq; params.pf_rl = qm_info->pf_rl; - params.link_speed = p_link->speed; params.pq_params = qm_info->qm_pq_params; params.vport_params = qm_info->qm_vport_params; @@ -1674,7 +1569,7 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn) { struct qed_ilt_client_cfg *clients; struct qed_cxt_mngr *p_mngr; - struct qed_dma_mem *p_shdw; + struct phys_mem_desc *p_shdw; u32 line, rt_offst, i; qed_ilt_bounds_init(p_hwfn); @@ -1699,15 +1594,15 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn) /** p_virt could be NULL incase of dynamic * allocation */ - if (p_shdw[line].p_virt) { + if (p_shdw[line].virt_addr) { SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL); SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR, - (p_shdw[line].p_phys >> 12)); + (p_shdw[line].phys_addr >> 12)); DP_VERBOSE(p_hwfn, QED_MSG_ILT, "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n", rt_offst, line, i, - (u64)(p_shdw[line].p_phys >> 12)); + (u64)(p_shdw[line].phys_addr >> 12)); } STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry); @@ -2050,10 +1945,10 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info) line = p_info->iid / cxts_per_p; /* Make sure context is allocated (dynamic allocation) */ - if (!p_mngr->ilt_shadow[line].p_virt) + if (!p_mngr->ilt_shadow[line].virt_addr) return -EINVAL; - p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt + + p_info->p_cxt = p_mngr->ilt_shadow[line].virt_addr + p_info->iid % cxts_per_p * conn_cxt_size; DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT), @@ -2234,7 +2129,7 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, for (i = 0; i < total_lines; i++) { shadow_line = i + p_fl_seg->start_line - p_hwfn->p_cxt_mngr->pf_start_line; - p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt; + p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].virt_addr; } p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) - p_fl_seg->real_size_in_page; @@ -2296,7 +2191,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, mutex_lock(&p_hwfn->p_cxt_mngr->mutex); - if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt) + if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr) goto out0; p_ptt = qed_ptt_acquire(p_hwfn); @@ -2334,8 +2229,8 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, } } - p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt; - p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys; + p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr = p_virt; + p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr = p_phys; p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size = p_blk->real_size_in_page; @@ -2345,9 +2240,9 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, ilt_hw_entry = 0; SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL); - SET_FIELD(ilt_hw_entry, - ILT_ENTRY_PHY_ADDR, - (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12)); + SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR, + (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr + >> 12)); /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */ qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry, @@ -2434,16 +2329,16 @@ qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn, } for (i = shadow_start_line; i < shadow_end_line; i++) { - if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt) + if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr) continue; dma_free_coherent(&p_hwfn->cdev->pdev->dev, p_hwfn->p_cxt_mngr->ilt_shadow[i].size, - p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt, - p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys); + p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr, + p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr); - p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = NULL; - p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0; + p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = NULL; + p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0; p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0; /* compute absolute offset */ @@ -2547,8 +2442,76 @@ int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn, ilt_idx = tid / num_tids_per_block + p_seg->start_line - p_mngr->pf_start_line; - *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt + + *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].virt_addr + (tid % num_tids_per_block) * tid_size; return 0; } + +static u16 qed_blk_calculate_pages(struct qed_ilt_cli_blk *p_blk) +{ + if (p_blk->real_size_in_page == 0) + return 0; + + return DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page); +} + +u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *p_cli; + struct qed_ilt_cli_blk *p_blk; + u16 i, pages = 0; + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]; + pages += qed_blk_calculate_pages(p_blk); + } + + return pages; +} + +u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *p_cli; + struct qed_ilt_cli_blk *p_blk; + u16 i, pages = 0; + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) { + p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(i, VF)]; + pages += qed_blk_calculate_pages(p_blk); + } + + return pages; +} + +u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *p_cli; + struct qed_ilt_cli_blk *p_blk; + u16 i, pages = 0; + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)]; + pages += qed_blk_calculate_pages(p_blk); + } + + return pages; +} + +u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *p_cli; + struct qed_ilt_cli_blk *p_blk; + u16 pages = 0, i; + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) { + p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(i)]; + pages += qed_blk_calculate_pages(p_blk); + } + + return pages; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index 758a8b4c0de8..c4e815f6cabd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -242,4 +242,134 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto); #define QED_CTX_FL_MEM 1 int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn, u32 tid, u8 ctx_type, void **task_ctx); + +/* Max number of connection types in HW (DQ/CDU etc.) */ +#define MAX_CONN_TYPES PROTOCOLID_COMMON +#define NUM_TASK_TYPES 2 +#define NUM_TASK_PF_SEGMENTS 4 +#define NUM_TASK_VF_SEGMENTS 1 + +/* PF per protocl configuration object */ +#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS) +#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS) + +struct qed_tid_seg { + u32 count; + u8 type; + bool has_fl_mem; +}; + +struct qed_conn_type_cfg { + u32 cid_count; + u32 cids_per_vf; + struct qed_tid_seg tid_seg[TASK_SEGMENTS]; +}; + +/* ILT Client configuration, + * Per connection type (protocol) resources (cids, tis, vf cids etc.) + * 1 - for connection context (CDUC) and for each task context we need two + * values, for regular task context and for force load memory + */ +#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2) +#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2) +#define CDUC_BLK (0) +#define SRQ_BLK (0) +#define CDUT_SEG_BLK(n) (1 + (u8)(n)) +#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS) + +struct ilt_cfg_pair { + u32 reg; + u32 val; +}; + +struct qed_ilt_cli_blk { + u32 total_size; /* 0 means not active */ + u32 real_size_in_page; + u32 start_line; + u32 dynamic_line_offset; + u32 dynamic_line_cnt; +}; + +struct qed_ilt_client_cfg { + bool active; + + /* ILT boundaries */ + struct ilt_cfg_pair first; + struct ilt_cfg_pair last; + struct ilt_cfg_pair p_size; + + /* ILT client blocks for PF */ + struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS]; + u32 pf_total_lines; + + /* ILT client blocks for VFs */ + struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS]; + u32 vf_total_lines; +}; + +struct qed_cid_acquired_map { + u32 start_cid; + u32 max_count; + unsigned long *cid_map; +}; + +struct qed_src_t2 { + struct phys_mem_desc *dma_mem; + u32 num_pages; + u64 first_free; + u64 last_free; +}; + +struct qed_cxt_mngr { + /* Per protocl configuration */ + struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES]; + + /* computed ILT structure */ + struct qed_ilt_client_cfg clients[MAX_ILT_CLIENTS]; + + /* Task type sizes */ + u32 task_type_size[NUM_TASK_TYPES]; + + /* total number of VFs for this hwfn - + * ALL VFs are symmetric in terms of HW resources + */ + u32 vf_count; + u32 first_vf_in_pf; + + /* Acquired CIDs */ + struct qed_cid_acquired_map acquired[MAX_CONN_TYPES]; + + struct qed_cid_acquired_map + acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS]; + + /* ILT shadow table */ + struct phys_mem_desc *ilt_shadow; + u32 ilt_shadow_size; + u32 pf_start_line; + + /* Mutex for a dynamic ILT allocation */ + struct mutex mutex; + + /* SRC T2 */ + struct qed_src_t2 src_t2; + u32 t2_num_pages; + u64 first_free; + u64 last_free; + + /* total number of SRQ's for this hwfn */ + u32 srq_count; + + /* Maximal number of L2 steering filters */ + u32 arfs_count; + + u8 task_type_id; + u16 task_ctx_size; + u16 conn_ctx_size; +}; + +u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn); +u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn); +u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn); +u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn); + #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 5ea6c4fc6050..f4eebaabb6d0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -7,6 +7,7 @@ #include <linux/vmalloc.h> #include <linux/crc32.h> #include "qed.h" +#include "qed_cxt.h" #include "qed_hsi.h" #include "qed_hw.h" #include "qed_mcp.h" @@ -22,27 +23,28 @@ enum mem_groups { MEM_GROUP_BRB_RAM, MEM_GROUP_BRB_MEM, MEM_GROUP_PRS_MEM, + MEM_GROUP_SDM_MEM, + MEM_GROUP_PBUF, MEM_GROUP_IOR, + MEM_GROUP_RAM, MEM_GROUP_BTB_RAM, + MEM_GROUP_RDIF_CTX, + MEM_GROUP_TDIF_CTX, + MEM_GROUP_CFC_MEM, MEM_GROUP_CONN_CFC_MEM, - MEM_GROUP_TASK_CFC_MEM, MEM_GROUP_CAU_PI, MEM_GROUP_CAU_MEM, + MEM_GROUP_CAU_MEM_EXT, MEM_GROUP_PXP_ILT, - MEM_GROUP_TM_MEM, - MEM_GROUP_SDM_MEM, - MEM_GROUP_PBUF, - MEM_GROUP_RAM, MEM_GROUP_MULD_MEM, MEM_GROUP_BTB_MEM, - MEM_GROUP_RDIF_CTX, - MEM_GROUP_TDIF_CTX, - MEM_GROUP_CFC_MEM, MEM_GROUP_IGU_MEM, MEM_GROUP_IGU_MSIX, MEM_GROUP_CAU_SB, MEM_GROUP_BMB_RAM, MEM_GROUP_BMB_MEM, + MEM_GROUP_TM_MEM, + MEM_GROUP_TASK_CFC_MEM, MEM_GROUPS_NUM }; @@ -56,27 +58,28 @@ static const char * const s_mem_group_names[] = { "BRB_RAM", "BRB_MEM", "PRS_MEM", + "SDM_MEM", + "PBUF", "IOR", + "RAM", "BTB_RAM", + "RDIF_CTX", + "TDIF_CTX", + "CFC_MEM", "CONN_CFC_MEM", - "TASK_CFC_MEM", "CAU_PI", "CAU_MEM", + "CAU_MEM_EXT", "PXP_ILT", - "TM_MEM", - "SDM_MEM", - "PBUF", - "RAM", "MULD_MEM", "BTB_MEM", - "RDIF_CTX", - "TDIF_CTX", - "CFC_MEM", "IGU_MEM", "IGU_MSIX", "CAU_SB", "BMB_RAM", "BMB_MEM", + "TM_MEM", + "TASK_CFC_MEM", }; /* Idle check conditions */ @@ -170,35 +173,66 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = { cond13, }; +#define NUM_PHYS_BLOCKS 84 + +#define NUM_DBG_RESET_REGS 8 + /******************************* Data Types **********************************/ -enum platform_ids { - PLATFORM_ASIC, +enum hw_types { + HW_TYPE_ASIC, PLATFORM_RESERVED, PLATFORM_RESERVED2, PLATFORM_RESERVED3, - MAX_PLATFORM_IDS + PLATFORM_RESERVED4, + MAX_HW_TYPES +}; + +/* CM context types */ +enum cm_ctx_types { + CM_CTX_CONN_AG, + CM_CTX_CONN_ST, + CM_CTX_TASK_AG, + CM_CTX_TASK_ST, + NUM_CM_CTX_TYPES +}; + +/* Debug bus frame modes */ +enum dbg_bus_frame_modes { + DBG_BUS_FRAME_MODE_4ST = 0, /* 4 Storm dwords (no HW) */ + DBG_BUS_FRAME_MODE_2ST_2HW = 1, /* 2 Storm dwords, 2 HW dwords */ + DBG_BUS_FRAME_MODE_1ST_3HW = 2, /* 1 Storm dwords, 3 HW dwords */ + DBG_BUS_FRAME_MODE_4HW = 3, /* 4 HW dwords (no Storms) */ + DBG_BUS_FRAME_MODE_8HW = 4, /* 8 HW dwords (no Storms) */ + DBG_BUS_NUM_FRAME_MODES }; /* Chip constant definitions */ struct chip_defs { const char *name; + u32 num_ilt_pages; }; -/* Platform constant definitions */ -struct platform_defs { +/* HW type constant definitions */ +struct hw_type_defs { const char *name; u32 delay_factor; u32 dmae_thresh; u32 log_thresh; }; +/* RBC reset definitions */ +struct rbc_reset_defs { + u32 reset_reg_addr; + u32 reset_val[MAX_CHIP_IDS]; +}; + /* Storm constant definitions. * Addresses are in bytes, sizes are in quad-regs. */ struct storm_defs { char letter; - enum block_id block_id; + enum block_id sem_block_id; enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS]; bool has_vfc; u32 sem_fast_mem_addr; @@ -207,47 +241,26 @@ struct storm_defs { u32 sem_slow_mode_addr; u32 sem_slow_mode1_conf_addr; u32 sem_sync_dbg_empty_addr; - u32 sem_slow_dbg_empty_addr; + u32 sem_gpre_vect_addr; u32 cm_ctx_wr_addr; - u32 cm_conn_ag_ctx_lid_size; - u32 cm_conn_ag_ctx_rd_addr; - u32 cm_conn_st_ctx_lid_size; - u32 cm_conn_st_ctx_rd_addr; - u32 cm_task_ag_ctx_lid_size; - u32 cm_task_ag_ctx_rd_addr; - u32 cm_task_st_ctx_lid_size; - u32 cm_task_st_ctx_rd_addr; + u32 cm_ctx_rd_addr[NUM_CM_CTX_TYPES]; + u32 cm_ctx_lid_sizes[MAX_CHIP_IDS][NUM_CM_CTX_TYPES]; }; -/* Block constant definitions */ -struct block_defs { - const char *name; - bool exists[MAX_CHIP_IDS]; - bool associated_to_storm; - - /* Valid only if associated_to_storm is true */ - u32 storm_id; - enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS]; - u32 dbg_select_addr; - u32 dbg_enable_addr; - u32 dbg_shift_addr; - u32 dbg_force_valid_addr; - u32 dbg_force_frame_addr; - bool has_reset_bit; - - /* If true, block is taken out of reset before dump */ - bool unreset; - enum dbg_reset_regs reset_reg; - - /* Bit offset in reset register */ - u8 reset_bit_offset; +/* Debug Bus Constraint operation constant definitions */ +struct dbg_bus_constraint_op_defs { + u8 hw_op_val; + bool is_cyclic; }; -/* Reset register definitions */ -struct reset_reg_defs { - u32 addr; +/* Storm Mode definitions */ +struct storm_mode_defs { + const char *name; + bool is_fast_dbg; + u8 id_in_hw; + u32 src_disable_reg_addr; + u32 src_enable_val; bool exists[MAX_CHIP_IDS]; - u32 unreset_val[MAX_CHIP_IDS]; }; struct grc_param_defs { @@ -257,7 +270,7 @@ struct grc_param_defs { bool is_preset; bool is_persistent; u32 exclude_all_preset_val; - u32 crash_preset_val; + u32 crash_preset_val[MAX_CHIP_IDS]; }; /* Address is in 128b units. Width is in bits. */ @@ -314,15 +327,7 @@ struct split_type_defs { /******************************** Constants **********************************/ -#define MAX_LCIDS 320 -#define MAX_LTIDS 320 - -#define NUM_IOR_SETS 2 -#define IORS_PER_SET 176 -#define IOR_SET_OFFSET(set_id) ((set_id) * 256) - #define BYTES_IN_DWORD sizeof(u32) - /* In the macros below, size and offset are specified in bits */ #define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32) #define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET @@ -348,20 +353,17 @@ struct split_type_defs { qed_wr(dev, ptt, addr, (arr)[i]); \ } while (0) -#define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \ - do { \ - for (i = 0; i < (arr_size); i++) \ - (arr)[i] = qed_rd(dev, ptt, addr); \ - } while (0) - #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD) #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD) -/* Extra lines include a signature line + optional latency events line */ -#define NUM_EXTRA_DBG_LINES(block_desc) \ - (1 + ((block_desc)->has_latency_events ? 1 : 0)) -#define NUM_DBG_LINES(block_desc) \ - ((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc)) +/* extra lines include a signature line + optional latency events line */ +#define NUM_EXTRA_DBG_LINES(block) \ + (GET_FIELD((block)->flags, DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS) ? 2 : 1) +#define NUM_DBG_LINES(block) \ + ((block)->num_of_dbg_bus_lines + NUM_EXTRA_DBG_LINES(block)) + +#define USE_DMAE true +#define PROTECT_WIDE_BUS true #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2) #define RAM_LINES_TO_BYTES(lines) \ @@ -380,6 +382,9 @@ struct split_type_defs { #define IDLE_CHK_RESULT_REG_HDR_DWORDS \ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr)) +#define PAGE_MEM_DESC_SIZE_DWORDS \ + BYTES_TO_DWORDS(sizeof(struct phys_mem_desc)) + #define IDLE_CHK_MAX_ENTRIES_SIZE 32 /* The sizes and offsets below are specified in bits */ @@ -425,7 +430,9 @@ struct split_type_defs { #define STATIC_DEBUG_LINE_DWORDS 9 -#define NUM_COMMON_GLOBAL_PARAMS 8 +#define NUM_COMMON_GLOBAL_PARAMS 9 + +#define MAX_RECURSION_DEPTH 10 #define FW_IMG_MAIN 1 @@ -449,1054 +456,121 @@ struct split_type_defs { (MCP_REG_SCRATCH + \ offsetof(struct static_init, sections[SPAD_SECTION_TRACE])) +#define MAX_SW_PLTAFORM_STR_SIZE 64 + #define EMPTY_FW_VERSION_STR "???_???_???_???" #define EMPTY_FW_IMAGE_STR "???????????????" /***************************** Constant Arrays *******************************/ -struct dbg_array { - const u32 *ptr; - u32 size_in_dwords; -}; - -/* Debug arrays */ -static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} }; - /* Chip constant definitions array */ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { - {"bb"}, - {"ah"}, - {"reserved"}, + {"bb", PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2}, + {"ah", PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2} }; /* Storm constant definitions array */ static struct storm_defs s_storm_defs[] = { /* Tstorm */ {'T', BLOCK_TSEM, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, - DBG_BUS_CLIENT_RBCT}, true, - TSEM_REG_FAST_MEMORY, - TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2, - TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2, - TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2, - TCM_REG_CTX_RBC_ACCS, - 4, TCM_REG_AGG_CON_CTX, - 16, TCM_REG_SM_CON_CTX, - 2, TCM_REG_AGG_TASK_CTX, - 4, TCM_REG_SM_TASK_CTX}, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, + true, + TSEM_REG_FAST_MEMORY, + TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2, + TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2, + TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT, + TCM_REG_CTX_RBC_ACCS, + {TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX, + TCM_REG_SM_TASK_CTX}, + {{4, 16, 2, 4}, {4, 16, 2, 4}} /* {bb} {k2} */ + }, /* Mstorm */ {'M', BLOCK_MSEM, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, - DBG_BUS_CLIENT_RBCM}, false, - MSEM_REG_FAST_MEMORY, - MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2, - MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2, - MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2, - MCM_REG_CTX_RBC_ACCS, - 1, MCM_REG_AGG_CON_CTX, - 10, MCM_REG_SM_CON_CTX, - 2, MCM_REG_AGG_TASK_CTX, - 7, MCM_REG_SM_TASK_CTX}, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + false, + MSEM_REG_FAST_MEMORY, + MSEM_REG_DBG_FRAME_MODE_BB_K2, + MSEM_REG_SLOW_DBG_ACTIVE_BB_K2, + MSEM_REG_SLOW_DBG_MODE_BB_K2, + MSEM_REG_DBG_MODE1_CFG_BB_K2, + MSEM_REG_SYNC_DBG_EMPTY, + MSEM_REG_DBG_GPRE_VECT, + MCM_REG_CTX_RBC_ACCS, + {MCM_REG_AGG_CON_CTX, MCM_REG_SM_CON_CTX, MCM_REG_AGG_TASK_CTX, + MCM_REG_SM_TASK_CTX }, + {{1, 10, 2, 7}, {1, 10, 2, 7}} /* {bb} {k2}*/ + }, /* Ustorm */ {'U', BLOCK_USEM, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, - DBG_BUS_CLIENT_RBCU}, false, - USEM_REG_FAST_MEMORY, - USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2, - USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2, - USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2, - UCM_REG_CTX_RBC_ACCS, - 2, UCM_REG_AGG_CON_CTX, - 13, UCM_REG_SM_CON_CTX, - 3, UCM_REG_AGG_TASK_CTX, - 3, UCM_REG_SM_TASK_CTX}, + {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, + false, + USEM_REG_FAST_MEMORY, + USEM_REG_DBG_FRAME_MODE_BB_K2, + USEM_REG_SLOW_DBG_ACTIVE_BB_K2, + USEM_REG_SLOW_DBG_MODE_BB_K2, + USEM_REG_DBG_MODE1_CFG_BB_K2, + USEM_REG_SYNC_DBG_EMPTY, + USEM_REG_DBG_GPRE_VECT, + UCM_REG_CTX_RBC_ACCS, + {UCM_REG_AGG_CON_CTX, UCM_REG_SM_CON_CTX, UCM_REG_AGG_TASK_CTX, + UCM_REG_SM_TASK_CTX}, + {{2, 13, 3, 3}, {2, 13, 3, 3}} /* {bb} {k2} */ + }, /* Xstorm */ {'X', BLOCK_XSEM, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, - DBG_BUS_CLIENT_RBCX}, false, - XSEM_REG_FAST_MEMORY, - XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2, - XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2, - XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2, - XCM_REG_CTX_RBC_ACCS, - 9, XCM_REG_AGG_CON_CTX, - 15, XCM_REG_SM_CON_CTX, - 0, 0, - 0, 0}, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, + false, + XSEM_REG_FAST_MEMORY, + XSEM_REG_DBG_FRAME_MODE_BB_K2, + XSEM_REG_SLOW_DBG_ACTIVE_BB_K2, + XSEM_REG_SLOW_DBG_MODE_BB_K2, + XSEM_REG_DBG_MODE1_CFG_BB_K2, + XSEM_REG_SYNC_DBG_EMPTY, + XSEM_REG_DBG_GPRE_VECT, + XCM_REG_CTX_RBC_ACCS, + {XCM_REG_AGG_CON_CTX, XCM_REG_SM_CON_CTX, 0, 0}, + {{9, 15, 0, 0}, {9, 15, 0, 0}} /* {bb} {k2} */ + }, /* Ystorm */ {'Y', BLOCK_YSEM, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, - DBG_BUS_CLIENT_RBCY}, false, - YSEM_REG_FAST_MEMORY, - YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2, - YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2, - YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2, - YCM_REG_CTX_RBC_ACCS, - 2, YCM_REG_AGG_CON_CTX, - 3, YCM_REG_SM_CON_CTX, - 2, YCM_REG_AGG_TASK_CTX, - 12, YCM_REG_SM_TASK_CTX}, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, + false, + YSEM_REG_FAST_MEMORY, + YSEM_REG_DBG_FRAME_MODE_BB_K2, + YSEM_REG_SLOW_DBG_ACTIVE_BB_K2, + YSEM_REG_SLOW_DBG_MODE_BB_K2, + YSEM_REG_DBG_MODE1_CFG_BB_K2, + YSEM_REG_SYNC_DBG_EMPTY, + YSEM_REG_DBG_GPRE_VECT, + YCM_REG_CTX_RBC_ACCS, + {YCM_REG_AGG_CON_CTX, YCM_REG_SM_CON_CTX, YCM_REG_AGG_TASK_CTX, + YCM_REG_SM_TASK_CTX}, + {{2, 3, 2, 12}, {2, 3, 2, 12}} /* {bb} {k2} */ + }, /* Pstorm */ {'P', BLOCK_PSEM, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, - DBG_BUS_CLIENT_RBCS}, true, - PSEM_REG_FAST_MEMORY, - PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2, - PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2, - PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2, - PCM_REG_CTX_RBC_ACCS, - 0, 0, - 10, PCM_REG_SM_CON_CTX, - 0, 0, - 0, 0} -}; - -/* Block definitions array */ - -static struct block_defs block_grc_defs = { - "grc", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN}, - GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE, - GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID, - GRC_REG_DBG_FORCE_FRAME, - true, false, DBG_RESET_REG_MISC_PL_UA, 1 -}; - -static struct block_defs block_miscs_defs = { - "miscs", {true, true, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs block_misc_defs = { - "misc", {true, true, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs block_dbu_defs = { - "dbu", {true, true, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs block_pglue_b_defs = { - "pglue_b", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH}, - PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE, - PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID, - PGLUE_B_REG_DBG_FORCE_FRAME, - true, false, DBG_RESET_REG_MISCS_PL_HV, 1 -}; - -static struct block_defs block_cnig_defs = { - "cnig", - {true, true, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, - DBG_BUS_CLIENT_RBCW}, - CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5, - CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5, - CNIG_REG_DBG_FORCE_FRAME_K2_E5, - true, false, DBG_RESET_REG_MISCS_PL_HV, 0 -}; - -static struct block_defs block_cpmu_defs = { - "cpmu", {true, true, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - true, false, DBG_RESET_REG_MISCS_PL_HV, 8 -}; - -static struct block_defs block_ncsi_defs = { - "ncsi", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, - NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE, - NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID, - NCSI_REG_DBG_FORCE_FRAME, - true, false, DBG_RESET_REG_MISCS_PL_HV, 5 -}; - -static struct block_defs block_opte_defs = { - "opte", {true, true, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - true, false, DBG_RESET_REG_MISCS_PL_HV, 4 -}; - -static struct block_defs block_bmb_defs = { - "bmb", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB}, - BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE, - BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID, - BMB_REG_DBG_FORCE_FRAME, - true, false, DBG_RESET_REG_MISCS_PL_UA, 7 -}; - -static struct block_defs block_pcie_defs = { - "pcie", - {true, true, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, - DBG_BUS_CLIENT_RBCH}, - PCIE_REG_DBG_COMMON_SELECT_K2_E5, - PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5, - PCIE_REG_DBG_COMMON_SHIFT_K2_E5, - PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5, - PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs block_mcp_defs = { - "mcp", {true, true, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs block_mcp2_defs = { - "mcp2", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, - MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE, - MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID, - MCP2_REG_DBG_FORCE_FRAME, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs block_pswhst_defs = { - "pswhst", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, - PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE, - PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID, - PSWHST_REG_DBG_FORCE_FRAME, - true, false, DBG_RESET_REG_MISC_PL_HV, 0 -}; - -static struct block_defs block_pswhst2_defs = { - "pswhst2", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, - PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE, - PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID, - PSWHST2_REG_DBG_FORCE_FRAME, - true, false, DBG_RESET_REG_MISC_PL_HV, 0 -}; - -static struct block_defs block_pswrd_defs = { - "pswrd", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, - PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE, - PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID, - PSWRD_REG_DBG_FORCE_FRAME, - true, false, DBG_RESET_REG_MISC_PL_HV, 2 -}; - -static struct block_defs block_pswrd2_defs = { - "pswrd2", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, - PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE, - PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID, - PSWRD2_REG_DBG_FORCE_FRAME, - true, false, DBG_RESET_REG_MISC_PL_HV, 2 -}; - -static struct block_defs block_pswwr_defs = { - "pswwr", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, - PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE, - PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID, - PSWWR_REG_DBG_FORCE_FRAME, - true, false, DBG_RESET_REG_MISC_PL_HV, 3 -}; - -static struct block_defs block_pswwr2_defs = { - "pswwr2", {true, true, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - true, false, DBG_RESET_REG_MISC_PL_HV, 3 -}; - -static struct block_defs block_pswrq_defs = { - "pswrq", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, - PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE, - PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID, - PSWRQ_REG_DBG_FORCE_FRAME, - true, false, DBG_RESET_REG_MISC_PL_HV, 1 -}; - -static struct block_defs block_pswrq2_defs = { - "pswrq2", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, - PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE, - PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID, - PSWRQ2_REG_DBG_FORCE_FRAME, - true, false, DBG_RESET_REG_MISC_PL_HV, 1 -}; - -static struct block_defs block_pglcs_defs = { - "pglcs", - {true, true, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, - DBG_BUS_CLIENT_RBCH}, - PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5, - PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5, - PGLCS_REG_DBG_FORCE_FRAME_K2_E5, - true, false, DBG_RESET_REG_MISCS_PL_HV, 2 -}; - -static struct block_defs block_ptu_defs = { - "ptu", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, - PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE, - PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID, - PTU_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20 -}; - -static struct block_defs block_dmae_defs = { - "dmae", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, - DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE, - DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID, - DMAE_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28 -}; - -static struct block_defs block_tcm_defs = { - "tcm", - {true, true, true}, true, DBG_TSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, - TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE, - TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID, - TCM_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5 -}; - -static struct block_defs block_mcm_defs = { - "mcm", - {true, true, true}, true, DBG_MSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM}, - MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE, - MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID, - MCM_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3 -}; - -static struct block_defs block_ucm_defs = { - "ucm", - {true, true, true}, true, DBG_USTORM_ID, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, - UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE, - UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID, - UCM_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8 -}; - -static struct block_defs block_xcm_defs = { - "xcm", - {true, true, true}, true, DBG_XSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, - XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE, - XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID, - XCM_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19 -}; - -static struct block_defs block_ycm_defs = { - "ycm", - {true, true, true}, true, DBG_YSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY}, - YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE, - YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID, - YCM_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5 -}; - -static struct block_defs block_pcm_defs = { - "pcm", - {true, true, true}, true, DBG_PSTORM_ID, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, - PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE, - PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID, - PCM_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4 -}; - -static struct block_defs block_qm_defs = { - "qm", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ}, - QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE, - QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID, - QM_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16 -}; - -static struct block_defs block_tm_defs = { - "tm", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, - TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE, - TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID, - TM_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17 -}; - -static struct block_defs block_dorq_defs = { - "dorq", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY}, - DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE, - DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID, - DORQ_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18 -}; - -static struct block_defs block_brb_defs = { - "brb", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR}, - BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE, - BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID, - BRB_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0 -}; - -static struct block_defs block_src_defs = { - "src", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, - SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE, - SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID, - SRC_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2 -}; - -static struct block_defs block_prs_defs = { - "prs", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR}, - PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE, - PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID, - PRS_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1 -}; - -static struct block_defs block_tsdm_defs = { - "tsdm", - {true, true, true}, true, DBG_TSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, - TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE, - TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID, - TSDM_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3 -}; - -static struct block_defs block_msdm_defs = { - "msdm", - {true, true, true}, true, DBG_MSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM}, - MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE, - MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID, - MSDM_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6 -}; - -static struct block_defs block_usdm_defs = { - "usdm", - {true, true, true}, true, DBG_USTORM_ID, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, - USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE, - USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID, - USDM_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7 -}; - -static struct block_defs block_xsdm_defs = { - "xsdm", - {true, true, true}, true, DBG_XSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, - XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE, - XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID, - XSDM_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20 -}; - -static struct block_defs block_ysdm_defs = { - "ysdm", - {true, true, true}, true, DBG_YSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY}, - YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE, - YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID, - YSDM_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8 -}; - -static struct block_defs block_psdm_defs = { - "psdm", - {true, true, true}, true, DBG_PSTORM_ID, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, - PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE, - PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID, - PSDM_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7 -}; - -static struct block_defs block_tsem_defs = { - "tsem", - {true, true, true}, true, DBG_TSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, - TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE, - TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID, - TSEM_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4 -}; - -static struct block_defs block_msem_defs = { - "msem", - {true, true, true}, true, DBG_MSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM}, - MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE, - MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID, - MSEM_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9 -}; - -static struct block_defs block_usem_defs = { - "usem", - {true, true, true}, true, DBG_USTORM_ID, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, - USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE, - USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID, - USEM_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9 -}; - -static struct block_defs block_xsem_defs = { - "xsem", - {true, true, true}, true, DBG_XSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, - XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE, - XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID, - XSEM_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21 -}; - -static struct block_defs block_ysem_defs = { - "ysem", - {true, true, true}, true, DBG_YSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY}, - YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE, - YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID, - YSEM_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11 -}; - -static struct block_defs block_psem_defs = { - "psem", - {true, true, true}, true, DBG_PSTORM_ID, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, - PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE, - PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID, - PSEM_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10 -}; - -static struct block_defs block_rss_defs = { - "rss", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, - RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE, - RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID, - RSS_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18 -}; - -static struct block_defs block_tmld_defs = { - "tmld", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM}, - TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE, - TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID, - TMLD_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13 -}; - -static struct block_defs block_muld_defs = { - "muld", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, - MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE, - MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID, - MULD_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14 -}; - -static struct block_defs block_yuld_defs = { - "yuld", - {true, true, false}, false, 0, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, - MAX_DBG_BUS_CLIENTS}, - YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2, - YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2, - YULD_REG_DBG_FORCE_FRAME_BB_K2, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, - 15 -}; - -static struct block_defs block_xyld_defs = { - "xyld", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, - XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE, - XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID, - XYLD_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12 -}; - -static struct block_defs block_ptld_defs = { - "ptld", - {false, false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT}, - PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5, - PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5, - PTLD_REG_DBG_FORCE_FRAME_E5, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, - 28 -}; - -static struct block_defs block_ypld_defs = { - "ypld", - {false, false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS}, - YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5, - YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5, - YPLD_REG_DBG_FORCE_FRAME_E5, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, - 27 -}; - -static struct block_defs block_prm_defs = { - "prm", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM}, - PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE, - PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID, - PRM_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21 -}; - -static struct block_defs block_pbf_pb1_defs = { - "pbf_pb1", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV}, - PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE, - PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID, - PBF_PB1_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, - 11 -}; - -static struct block_defs block_pbf_pb2_defs = { - "pbf_pb2", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV}, - PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE, - PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID, - PBF_PB2_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, - 12 -}; - -static struct block_defs block_rpb_defs = { - "rpb", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM}, - RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE, - RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID, - RPB_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13 -}; - -static struct block_defs block_btb_defs = { - "btb", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV}, - BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE, - BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID, - BTB_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10 -}; - -static struct block_defs block_pbf_defs = { - "pbf", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV}, - PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE, - PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID, - PBF_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15 -}; - -static struct block_defs block_rdif_defs = { - "rdif", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM}, - RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE, - RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID, - RDIF_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16 -}; - -static struct block_defs block_tdif_defs = { - "tdif", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, - TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE, - TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID, - TDIF_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17 -}; - -static struct block_defs block_cdu_defs = { - "cdu", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, - CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE, - CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID, - CDU_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23 -}; - -static struct block_defs block_ccfc_defs = { - "ccfc", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, - CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE, - CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID, - CCFC_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24 -}; - -static struct block_defs block_tcfc_defs = { - "tcfc", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, - TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE, - TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID, - TCFC_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25 -}; - -static struct block_defs block_igu_defs = { - "igu", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, - IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE, - IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID, - IGU_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27 -}; - -static struct block_defs block_cau_defs = { - "cau", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, - CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE, - CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID, - CAU_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19 -}; - -static struct block_defs block_rgfs_defs = { - "rgfs", {false, false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29 -}; - -static struct block_defs block_rgsrc_defs = { - "rgsrc", - {false, false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, - RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5, - RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5, - RGSRC_REG_DBG_FORCE_FRAME_E5, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, - 30 -}; - -static struct block_defs block_tgfs_defs = { - "tgfs", {false, false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30 -}; - -static struct block_defs block_tgsrc_defs = { - "tgsrc", - {false, false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV}, - TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5, - TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5, - TGSRC_REG_DBG_FORCE_FRAME_E5, - true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, - 31 -}; - -static struct block_defs block_umac_defs = { - "umac", - {true, true, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, - DBG_BUS_CLIENT_RBCZ}, - UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5, - UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5, - UMAC_REG_DBG_FORCE_FRAME_K2_E5, - true, false, DBG_RESET_REG_MISCS_PL_HV, 6 -}; - -static struct block_defs block_xmac_defs = { - "xmac", {true, false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs block_dbg_defs = { - "dbg", {true, true, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3 -}; - -static struct block_defs block_nig_defs = { - "nig", - {true, true, true}, false, 0, - {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN}, - NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE, - NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID, - NIG_REG_DBG_FORCE_FRAME, - true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0 -}; - -static struct block_defs block_wol_defs = { - "wol", - {false, true, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, - WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5, - WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5, - WOL_REG_DBG_FORCE_FRAME_K2_E5, - true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7 -}; - -static struct block_defs block_bmbn_defs = { - "bmbn", - {false, true, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB, - DBG_BUS_CLIENT_RBCB}, - BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5, - BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5, - BMBN_REG_DBG_FORCE_FRAME_K2_E5, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs block_ipc_defs = { - "ipc", {true, true, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - true, false, DBG_RESET_REG_MISCS_PL_UA, 8 -}; - -static struct block_defs block_nwm_defs = { - "nwm", - {false, true, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW}, - NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5, - NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5, - NWM_REG_DBG_FORCE_FRAME_K2_E5, - true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0 -}; - -static struct block_defs block_nws_defs = { - "nws", - {false, true, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW}, - NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5, - NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5, - NWS_REG_DBG_FORCE_FRAME_K2_E5, - true, false, DBG_RESET_REG_MISCS_PL_HV, 12 -}; - -static struct block_defs block_ms_defs = { - "ms", - {false, true, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, - MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5, - MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5, - MS_REG_DBG_FORCE_FRAME_K2_E5, - true, false, DBG_RESET_REG_MISCS_PL_HV, 13 -}; - -static struct block_defs block_phy_pcie_defs = { - "phy_pcie", - {false, true, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, - DBG_BUS_CLIENT_RBCH}, - PCIE_REG_DBG_COMMON_SELECT_K2_E5, - PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5, - PCIE_REG_DBG_COMMON_SHIFT_K2_E5, - PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5, - PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs block_led_defs = { - "led", {false, true, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - true, false, DBG_RESET_REG_MISCS_PL_HV, 14 -}; - -static struct block_defs block_avs_wrap_defs = { - "avs_wrap", {false, true, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - true, false, DBG_RESET_REG_MISCS_PL_UA, 11 -}; - -static struct block_defs block_pxpreqbus_defs = { - "pxpreqbus", {false, false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs block_misc_aeu_defs = { - "misc_aeu", {true, true, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs block_bar0_map_defs = { - "bar0_map", {true, true, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs *s_block_defs[MAX_BLOCK_ID] = { - &block_grc_defs, - &block_miscs_defs, - &block_misc_defs, - &block_dbu_defs, - &block_pglue_b_defs, - &block_cnig_defs, - &block_cpmu_defs, - &block_ncsi_defs, - &block_opte_defs, - &block_bmb_defs, - &block_pcie_defs, - &block_mcp_defs, - &block_mcp2_defs, - &block_pswhst_defs, - &block_pswhst2_defs, - &block_pswrd_defs, - &block_pswrd2_defs, - &block_pswwr_defs, - &block_pswwr2_defs, - &block_pswrq_defs, - &block_pswrq2_defs, - &block_pglcs_defs, - &block_dmae_defs, - &block_ptu_defs, - &block_tcm_defs, - &block_mcm_defs, - &block_ucm_defs, - &block_xcm_defs, - &block_ycm_defs, - &block_pcm_defs, - &block_qm_defs, - &block_tm_defs, - &block_dorq_defs, - &block_brb_defs, - &block_src_defs, - &block_prs_defs, - &block_tsdm_defs, - &block_msdm_defs, - &block_usdm_defs, - &block_xsdm_defs, - &block_ysdm_defs, - &block_psdm_defs, - &block_tsem_defs, - &block_msem_defs, - &block_usem_defs, - &block_xsem_defs, - &block_ysem_defs, - &block_psem_defs, - &block_rss_defs, - &block_tmld_defs, - &block_muld_defs, - &block_yuld_defs, - &block_xyld_defs, - &block_ptld_defs, - &block_ypld_defs, - &block_prm_defs, - &block_pbf_pb1_defs, - &block_pbf_pb2_defs, - &block_rpb_defs, - &block_btb_defs, - &block_pbf_defs, - &block_rdif_defs, - &block_tdif_defs, - &block_cdu_defs, - &block_ccfc_defs, - &block_tcfc_defs, - &block_igu_defs, - &block_cau_defs, - &block_rgfs_defs, - &block_rgsrc_defs, - &block_tgfs_defs, - &block_tgsrc_defs, - &block_umac_defs, - &block_xmac_defs, - &block_dbg_defs, - &block_nig_defs, - &block_wol_defs, - &block_bmbn_defs, - &block_ipc_defs, - &block_nwm_defs, - &block_nws_defs, - &block_ms_defs, - &block_phy_pcie_defs, - &block_led_defs, - &block_avs_wrap_defs, - &block_pxpreqbus_defs, - &block_misc_aeu_defs, - &block_bar0_map_defs, -}; - -static struct platform_defs s_platform_defs[] = { + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, + true, + PSEM_REG_FAST_MEMORY, + PSEM_REG_DBG_FRAME_MODE_BB_K2, + PSEM_REG_SLOW_DBG_ACTIVE_BB_K2, + PSEM_REG_SLOW_DBG_MODE_BB_K2, + PSEM_REG_DBG_MODE1_CFG_BB_K2, + PSEM_REG_SYNC_DBG_EMPTY, + PSEM_REG_DBG_GPRE_VECT, + PCM_REG_CTX_RBC_ACCS, + {0, PCM_REG_SM_CON_CTX, 0, 0}, + {{0, 10, 0, 0}, {0, 10, 0, 0}} /* {bb} {k2} */ + }, +}; + +static struct hw_type_defs s_hw_type_defs[] = { + /* HW_TYPE_ASIC */ {"asic", 1, 256, 32768}, {"reserved", 0, 0, 0}, {"reserved2", 0, 0, 0}, @@ -1505,146 +579,159 @@ static struct platform_defs s_platform_defs[] = { static struct grc_param_defs s_grc_param_defs[] = { /* DBG_GRC_PARAM_DUMP_TSTORM */ - {{1, 1, 1}, 0, 1, false, false, 1, 1}, + {{1, 1}, 0, 1, false, false, 1, {1, 1}}, /* DBG_GRC_PARAM_DUMP_MSTORM */ - {{1, 1, 1}, 0, 1, false, false, 1, 1}, + {{1, 1}, 0, 1, false, false, 1, {1, 1}}, /* DBG_GRC_PARAM_DUMP_USTORM */ - {{1, 1, 1}, 0, 1, false, false, 1, 1}, + {{1, 1}, 0, 1, false, false, 1, {1, 1}}, /* DBG_GRC_PARAM_DUMP_XSTORM */ - {{1, 1, 1}, 0, 1, false, false, 1, 1}, + {{1, 1}, 0, 1, false, false, 1, {1, 1}}, /* DBG_GRC_PARAM_DUMP_YSTORM */ - {{1, 1, 1}, 0, 1, false, false, 1, 1}, + {{1, 1}, 0, 1, false, false, 1, {1, 1}}, /* DBG_GRC_PARAM_DUMP_PSTORM */ - {{1, 1, 1}, 0, 1, false, false, 1, 1}, + {{1, 1}, 0, 1, false, false, 1, {1, 1}}, /* DBG_GRC_PARAM_DUMP_REGS */ - {{1, 1, 1}, 0, 1, false, false, 0, 1}, + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_RAM */ - {{1, 1, 1}, 0, 1, false, false, 0, 1}, + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_PBUF */ - {{1, 1, 1}, 0, 1, false, false, 0, 1}, + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_IOR */ - {{0, 0, 0}, 0, 1, false, false, 0, 1}, + {{0, 0}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_VFC */ - {{0, 0, 0}, 0, 1, false, false, 0, 1}, + {{0, 0}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_CM_CTX */ - {{1, 1, 1}, 0, 1, false, false, 0, 1}, + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_ILT */ - {{1, 1, 1}, 0, 1, false, false, 0, 1}, + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_RSS */ - {{1, 1, 1}, 0, 1, false, false, 0, 1}, + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_CAU */ - {{1, 1, 1}, 0, 1, false, false, 0, 1}, + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_QM */ - {{1, 1, 1}, 0, 1, false, false, 0, 1}, + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_MCP */ - {{1, 1, 1}, 0, 1, false, false, 0, 1}, + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, - /* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */ - {{1, 1, 1}, 1, 0xffffffff, false, true, 0, 1}, + /* DBG_GRC_PARAM_DUMP_DORQ */ + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_CFC */ - {{1, 1, 1}, 0, 1, false, false, 0, 1}, + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_IGU */ - {{1, 1, 1}, 0, 1, false, false, 0, 1}, + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_BRB */ - {{0, 0, 0}, 0, 1, false, false, 0, 1}, + {{0, 0}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_BTB */ - {{0, 0, 0}, 0, 1, false, false, 0, 1}, + {{0, 0}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_BMB */ - {{0, 0, 0}, 0, 1, false, false, 0, 0}, + {{0, 0}, 0, 1, false, false, 0, {0, 0}}, - /* DBG_GRC_PARAM_DUMP_NIG */ - {{1, 1, 1}, 0, 1, false, false, 0, 1}, + /* DBG_GRC_PARAM_RESERVED1 */ + {{0, 0}, 0, 1, false, false, 0, {0, 0}}, /* DBG_GRC_PARAM_DUMP_MULD */ - {{1, 1, 1}, 0, 1, false, false, 0, 1}, + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_PRS */ - {{1, 1, 1}, 0, 1, false, false, 0, 1}, + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_DMAE */ - {{1, 1, 1}, 0, 1, false, false, 0, 1}, + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_TM */ - {{1, 1, 1}, 0, 1, false, false, 0, 1}, + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_SDM */ - {{1, 1, 1}, 0, 1, false, false, 0, 1}, + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_DIF */ - {{1, 1, 1}, 0, 1, false, false, 0, 1}, + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_STATIC */ - {{1, 1, 1}, 0, 1, false, false, 0, 1}, + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_UNSTALL */ - {{0, 0, 0}, 0, 1, false, false, 0, 0}, + {{0, 0}, 0, 1, false, false, 0, {0, 0}}, - /* DBG_GRC_PARAM_NUM_LCIDS */ - {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, false, - MAX_LCIDS, MAX_LCIDS}, + /* DBG_GRC_PARAM_RESERVED2 */ + {{0, 0}, 0, 1, false, false, 0, {0, 0}}, - /* DBG_GRC_PARAM_NUM_LTIDS */ - {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, false, - MAX_LTIDS, MAX_LTIDS}, + /* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */ + {{0, 0}, 1, 0xffffffff, false, true, 0, {0, 0}}, /* DBG_GRC_PARAM_EXCLUDE_ALL */ - {{0, 0, 0}, 0, 1, true, false, 0, 0}, + {{0, 0}, 0, 1, true, false, 0, {0, 0}}, /* DBG_GRC_PARAM_CRASH */ - {{0, 0, 0}, 0, 1, true, false, 0, 0}, + {{0, 0}, 0, 1, true, false, 0, {0, 0}}, /* DBG_GRC_PARAM_PARITY_SAFE */ - {{0, 0, 0}, 0, 1, false, false, 1, 0}, + {{0, 0}, 0, 1, false, false, 0, {0, 0}}, /* DBG_GRC_PARAM_DUMP_CM */ - {{1, 1, 1}, 0, 1, false, false, 0, 1}, + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_PHY */ - {{1, 1, 1}, 0, 1, false, false, 0, 1}, + {{0, 0}, 0, 1, false, false, 0, {0, 0}}, /* DBG_GRC_PARAM_NO_MCP */ - {{0, 0, 0}, 0, 1, false, false, 0, 0}, + {{0, 0}, 0, 1, false, false, 0, {0, 0}}, /* DBG_GRC_PARAM_NO_FW_VER */ - {{0, 0, 0}, 0, 1, false, false, 0, 0} + {{0, 0}, 0, 1, false, false, 0, {0, 0}}, + + /* DBG_GRC_PARAM_RESERVED3 */ + {{0, 0}, 0, 1, false, false, 0, {0, 0}}, + + /* DBG_GRC_PARAM_DUMP_MCP_HW_DUMP */ + {{0, 1}, 0, 1, false, false, 0, {0, 1}}, + + /* DBG_GRC_PARAM_DUMP_ILT_CDUC */ + {{1, 1}, 0, 1, false, false, 0, {0, 0}}, + + /* DBG_GRC_PARAM_DUMP_ILT_CDUT */ + {{1, 1}, 0, 1, false, false, 0, {0, 0}}, + + /* DBG_GRC_PARAM_DUMP_CAU_EXT */ + {{0, 0}, 0, 1, false, false, 0, {1, 1}} }; static struct rss_mem_defs s_rss_mem_defs[] = { - { "rss_mem_cid", "rss_cid", 0, 32, - {256, 320, 512} }, + {"rss_mem_cid", "rss_cid", 0, 32, + {256, 320}}, - { "rss_mem_key_msb", "rss_key", 1024, 256, - {128, 208, 257} }, + {"rss_mem_key_msb", "rss_key", 1024, 256, + {128, 208}}, - { "rss_mem_key_lsb", "rss_key", 2048, 64, - {128, 208, 257} }, + {"rss_mem_key_lsb", "rss_key", 2048, 64, + {128, 208}}, - { "rss_mem_info", "rss_info", 3072, 16, - {128, 208, 256} }, + {"rss_mem_info", "rss_info", 3072, 16, + {128, 208}}, - { "rss_mem_ind", "rss_ind", 4096, 16, - {16384, 26624, 32768} } + {"rss_mem_ind", "rss_ind", 4096, 16, + {16384, 26624}} }; static struct vfc_ram_defs s_vfc_ram_defs[] = { @@ -1655,54 +742,31 @@ static struct vfc_ram_defs s_vfc_ram_defs[] = { }; static struct big_ram_defs s_big_ram_defs[] = { - { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB, - BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA, - MISC_REG_BLOCK_256B_EN, {0, 0, 0}, - {153600, 180224, 282624} }, - - { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB, - BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA, - MISC_REG_BLOCK_256B_EN, {0, 1, 1}, - {92160, 117760, 168960} }, - - { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB, - BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA, - MISCS_REG_BLOCK_256B_EN, {0, 0, 0}, - {36864, 36864, 36864} } -}; - -static struct reset_reg_defs s_reset_regs_defs[] = { - /* DBG_RESET_REG_MISCS_PL_UA */ - { MISCS_REG_RESET_PL_UA, - {true, true, true}, {0x0, 0x0, 0x0} }, + {"BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB, + BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA, + MISC_REG_BLOCK_256B_EN, {0, 0}, + {153600, 180224}}, - /* DBG_RESET_REG_MISCS_PL_HV */ - { MISCS_REG_RESET_PL_HV, - {true, true, true}, {0x0, 0x400, 0x600} }, + {"BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB, + BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA, + MISC_REG_BLOCK_256B_EN, {0, 1}, + {92160, 117760}}, - /* DBG_RESET_REG_MISCS_PL_HV_2 */ - { MISCS_REG_RESET_PL_HV_2_K2_E5, - {false, true, true}, {0x0, 0x0, 0x0} }, - - /* DBG_RESET_REG_MISC_PL_UA */ - { MISC_REG_RESET_PL_UA, - {true, true, true}, {0x0, 0x0, 0x0} }, - - /* DBG_RESET_REG_MISC_PL_HV */ - { MISC_REG_RESET_PL_HV, - {true, true, true}, {0x0, 0x0, 0x0} }, - - /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */ - { MISC_REG_RESET_PL_PDA_VMAIN_1, - {true, true, true}, {0x4404040, 0x4404040, 0x404040} }, - - /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */ - { MISC_REG_RESET_PL_PDA_VMAIN_2, - {true, true, true}, {0x7, 0x7c00007, 0x5c08007} }, + {"BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB, + BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA, + MISCS_REG_BLOCK_256B_EN, {0, 0}, + {36864, 36864}} +}; - /* DBG_RESET_REG_MISC_PL_PDA_VAUX */ - { MISC_REG_RESET_PL_PDA_VAUX, - {true, true, true}, {0x2, 0x2, 0x2} }, +static struct rbc_reset_defs s_rbc_reset_defs[] = { + {MISCS_REG_RESET_PL_HV, + {0x0, 0x400}}, + {MISC_REG_RESET_PL_PDA_VMAIN_1, + {0x4404040, 0x4404040}}, + {MISC_REG_RESET_PL_PDA_VMAIN_2, + {0x7, 0x7c00007}}, + {MISC_REG_RESET_PL_PDA_VAUX, + {0x2, 0x2}}, }; static struct phy_defs s_phy_defs[] = { @@ -1756,6 +820,15 @@ static u32 qed_read_unaligned_dword(u8 *buf) return dword; } +/* Sets the value of the specified GRC param */ +static void qed_grc_set_param(struct qed_hwfn *p_hwfn, + enum dbg_grc_params grc_param, u32 val) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + + dev_data->grc.param_val[grc_param] = val; +} + /* Returns the value of the specified GRC param */ static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn, enum dbg_grc_params grc_param) @@ -1776,9 +849,19 @@ static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn) } } +/* Sets pointer and size for the specified binary buffer type */ +static void qed_set_dbg_bin_buf(struct qed_hwfn *p_hwfn, + enum bin_dbg_buffer_type buf_type, + const u32 *ptr, u32 size) +{ + struct virt_mem_desc *buf = &p_hwfn->dbg_arrays[buf_type]; + + buf->ptr = (void *)ptr; + buf->size = size; +} + /* Initializes debug data for the specified device */ -static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u8 num_pfs = 0, max_pfs_per_port = 0; @@ -1803,26 +886,25 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn, return DBG_STATUS_UNKNOWN_CHIP; } - /* Set platofrm */ - dev_data->platform_id = PLATFORM_ASIC; + /* Set HW type */ + dev_data->hw_type = HW_TYPE_ASIC; dev_data->mode_enable[MODE_ASIC] = 1; /* Set port mode */ - switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) { - case 0: + switch (p_hwfn->cdev->num_ports_in_engine) { + case 1: dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1; break; - case 1: + case 2: dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1; break; - case 2: + case 4: dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1; break; } /* Set 100G mode */ - if (dev_data->chip_id == CHIP_BB && - qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB) == 2) + if (QED_IS_CMT(p_hwfn->cdev)) dev_data->mode_enable[MODE_100G] = 1; /* Set number of ports */ @@ -1848,14 +930,36 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn, return DBG_STATUS_OK; } -static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn, - enum block_id block_id) +static const struct dbg_block *get_dbg_block(struct qed_hwfn *p_hwfn, + enum block_id block_id) +{ + const struct dbg_block *dbg_block; + + dbg_block = p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS].ptr; + return dbg_block + block_id; +} + +static const struct dbg_block_chip *qed_get_dbg_block_per_chip(struct qed_hwfn + *p_hwfn, + enum block_id + block_id) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + + return (const struct dbg_block_chip *) + p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_CHIP_DATA].ptr + + block_id * MAX_CHIP_IDS + dev_data->chip_id; +} + +static const struct dbg_reset_reg *qed_get_dbg_reset_reg(struct qed_hwfn + *p_hwfn, + u8 reset_reg_id) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - return (struct dbg_bus_block *)&dbg_bus_blocks[block_id * - MAX_CHIP_IDS + - dev_data->chip_id]; + return (const struct dbg_reset_reg *) + p_hwfn->dbg_arrays[BIN_BUF_DBG_RESET_REGS].ptr + + reset_reg_id * MAX_CHIP_IDS + dev_data->chip_id; } /* Reads the FW info structure for the specified Storm from the chip, @@ -1876,8 +980,9 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn, * The address is located in the last line of the Storm RAM. */ addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM + - DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) - - sizeof(fw_info_location); + DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) - + sizeof(fw_info_location); + dest = (u32 *)&fw_info_location; for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location)); @@ -2072,6 +1177,29 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn, return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str); } +/* Reads the chip revision from the chip and writes it as a param to the + * specified buffer. Returns the dumped size in dwords. + */ +static u32 qed_dump_chip_revision_param(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, bool dump) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + char param_str[3] = "??"; + + if (dev_data->hw_type == HW_TYPE_ASIC) { + u32 chip_rev, chip_metal; + + chip_rev = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV); + chip_metal = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL); + + param_str[0] = 'a' + (u8)chip_rev; + param_str[1] = '0' + (u8)chip_metal; + } + + return qed_dump_str_param(dump_buf, dump, "chip-revision", param_str); +} + /* Writes a section header to the specified buffer. * Returns the dumped size in dwords. */ @@ -2095,7 +1223,8 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn, u8 num_params; /* Dump global params section header */ - num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params; + num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params + + (dev_data->chip_id == CHIP_BB ? 1 : 0); offset += qed_dump_section_hdr(dump_buf + offset, dump, "global_params", num_params); @@ -2103,6 +1232,8 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn, offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump); offset += qed_dump_mfw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump); + offset += qed_dump_chip_revision_param(p_hwfn, + p_ptt, dump_buf + offset, dump); offset += qed_dump_num_param(dump_buf + offset, dump, "tools-version", TOOLS_VERSION); offset += qed_dump_str_param(dump_buf + offset, @@ -2112,11 +1243,12 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn, offset += qed_dump_str_param(dump_buf + offset, dump, "platform", - s_platform_defs[dev_data->platform_id]. - name); - offset += - qed_dump_num_param(dump_buf + offset, dump, "pci-func", - p_hwfn->abs_pf_id); + s_hw_type_defs[dev_data->hw_type].name); + offset += qed_dump_num_param(dump_buf + offset, + dump, "pci-func", p_hwfn->abs_pf_id); + if (dev_data->chip_id == CHIP_BB) + offset += qed_dump_num_param(dump_buf + offset, + dump, "path", QED_PATH_ID(p_hwfn)); return offset; } @@ -2147,24 +1279,87 @@ static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - u32 reg_val[MAX_DBG_RESET_REGS] = { 0 }; - u32 i; + u32 reg_val[NUM_DBG_RESET_REGS] = { 0 }; + u8 rst_reg_id; + u32 blk_id; /* Read reset registers */ - for (i = 0; i < MAX_DBG_RESET_REGS; i++) - if (s_reset_regs_defs[i].exists[dev_data->chip_id]) - reg_val[i] = qed_rd(p_hwfn, - p_ptt, s_reset_regs_defs[i].addr); + for (rst_reg_id = 0; rst_reg_id < NUM_DBG_RESET_REGS; rst_reg_id++) { + const struct dbg_reset_reg *rst_reg; + bool rst_reg_removed; + u32 rst_reg_addr; + + rst_reg = qed_get_dbg_reset_reg(p_hwfn, rst_reg_id); + rst_reg_removed = GET_FIELD(rst_reg->data, + DBG_RESET_REG_IS_REMOVED); + rst_reg_addr = DWORDS_TO_BYTES(GET_FIELD(rst_reg->data, + DBG_RESET_REG_ADDR)); + + if (!rst_reg_removed) + reg_val[rst_reg_id] = qed_rd(p_hwfn, p_ptt, + rst_reg_addr); + } /* Check if blocks are in reset */ - for (i = 0; i < MAX_BLOCK_ID; i++) { - struct block_defs *block = s_block_defs[i]; + for (blk_id = 0; blk_id < NUM_PHYS_BLOCKS; blk_id++) { + const struct dbg_block_chip *blk; + bool has_rst_reg; + bool is_removed; + + blk = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)blk_id); + is_removed = GET_FIELD(blk->flags, DBG_BLOCK_CHIP_IS_REMOVED); + has_rst_reg = GET_FIELD(blk->flags, + DBG_BLOCK_CHIP_HAS_RESET_REG); + + if (!is_removed && has_rst_reg) + dev_data->block_in_reset[blk_id] = + !(reg_val[blk->reset_reg_id] & + BIT(blk->reset_reg_bit_offset)); + } +} + +/* is_mode_match recursive function */ +static bool qed_is_mode_match_rec(struct qed_hwfn *p_hwfn, + u16 *modes_buf_offset, u8 rec_depth) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u8 *dbg_array; + bool arg1, arg2; + u8 tree_val; - dev_data->block_in_reset[i] = block->has_reset_bit && - !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset)); + if (rec_depth > MAX_RECURSION_DEPTH) { + DP_NOTICE(p_hwfn, + "Unexpected error: is_mode_match_rec exceeded the max recursion depth. This is probably due to a corrupt init/debug buffer.\n"); + return false; + } + + /* Get next element from modes tree buffer */ + dbg_array = p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr; + tree_val = dbg_array[(*modes_buf_offset)++]; + + switch (tree_val) { + case INIT_MODE_OP_NOT: + return !qed_is_mode_match_rec(p_hwfn, + modes_buf_offset, rec_depth + 1); + case INIT_MODE_OP_OR: + case INIT_MODE_OP_AND: + arg1 = qed_is_mode_match_rec(p_hwfn, + modes_buf_offset, rec_depth + 1); + arg2 = qed_is_mode_match_rec(p_hwfn, + modes_buf_offset, rec_depth + 1); + return (tree_val == INIT_MODE_OP_OR) ? (arg1 || + arg2) : (arg1 && arg2); + default: + return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0; } } +/* Returns true if the mode (specified using modes_buf_offset) is enabled */ +static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset) +{ + return qed_is_mode_match_rec(p_hwfn, modes_buf_offset, 0); +} + /* Enable / disable the Debug block */ static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool enable) @@ -2176,23 +1371,21 @@ static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn, static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val; - struct block_defs *dbg_block = s_block_defs[BLOCK_DBG]; + u32 reset_reg_addr, old_reset_reg_val, new_reset_reg_val; + const struct dbg_reset_reg *reset_reg; + const struct dbg_block_chip *block; - dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr; - old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr); - new_reset_reg_val = - old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset); + block = qed_get_dbg_block_per_chip(p_hwfn, BLOCK_DBG); + reset_reg = qed_get_dbg_reset_reg(p_hwfn, block->reset_reg_id); + reset_reg_addr = + DWORDS_TO_BYTES(GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR)); - qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val); - qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val); -} + old_reset_reg_val = qed_rd(p_hwfn, p_ptt, reset_reg_addr); + new_reset_reg_val = + old_reset_reg_val & ~BIT(block->reset_reg_bit_offset); -static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - enum dbg_bus_frame_modes mode) -{ - qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode); + qed_wr(p_hwfn, p_ptt, reset_reg_addr, new_reset_reg_val); + qed_wr(p_hwfn, p_ptt, reset_reg_addr, old_reset_reg_val); } /* Enable / disable Debug Bus clients according to the specified mask @@ -2204,28 +1397,65 @@ static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask); } -static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset) +static void qed_bus_config_dbg_line(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum block_id block_id, + u8 line_id, + u8 enable_mask, + u8 right_shift, + u8 force_valid_mask, u8 force_frame_mask) +{ + const struct dbg_block_chip *block = + qed_get_dbg_block_per_chip(p_hwfn, block_id); + + qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_select_reg_addr), + line_id); + qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_dword_enable_reg_addr), + enable_mask); + qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_shift_reg_addr), + right_shift); + qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_valid_reg_addr), + force_valid_mask); + qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_frame_reg_addr), + force_frame_mask); +} + +/* Disable debug bus in all blocks */ +static void qed_bus_disable_blocks(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - bool arg1, arg2; - const u32 *ptr; - u8 tree_val; + u32 block_id; - /* Get next element from modes tree buffer */ - ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr; - tree_val = ((u8 *)ptr)[(*modes_buf_offset)++]; + /* Disable all blocks */ + for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { + const struct dbg_block_chip *block_per_chip = + qed_get_dbg_block_per_chip(p_hwfn, + (enum block_id)block_id); - switch (tree_val) { - case INIT_MODE_OP_NOT: - return !qed_is_mode_match(p_hwfn, modes_buf_offset); - case INIT_MODE_OP_OR: - case INIT_MODE_OP_AND: - arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset); - arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset); - return (tree_val == INIT_MODE_OP_OR) ? (arg1 || - arg2) : (arg1 && arg2); - default: - return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0; + if (GET_FIELD(block_per_chip->flags, + DBG_BLOCK_CHIP_IS_REMOVED) || + dev_data->block_in_reset[block_id]) + continue; + + /* Disable debug bus */ + if (GET_FIELD(block_per_chip->flags, + DBG_BLOCK_CHIP_HAS_DBG_BUS)) { + u32 dbg_en_addr = + block_per_chip->dbg_dword_enable_reg_addr; + u16 modes_buf_offset = + GET_FIELD(block_per_chip->dbg_bus_mode.data, + DBG_MODE_HDR_MODES_BUF_OFFSET); + bool eval_mode = + GET_FIELD(block_per_chip->dbg_bus_mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0; + + if (!eval_mode || + qed_is_mode_match(p_hwfn, &modes_buf_offset)) + qed_wr(p_hwfn, p_ptt, + DWORDS_TO_BYTES(dbg_en_addr), + 0); + } } } @@ -2238,6 +1468,20 @@ static bool qed_grc_is_included(struct qed_hwfn *p_hwfn, return qed_grc_get_param(p_hwfn, grc_param) > 0; } +/* Returns the storm_id that matches the specified Storm letter, + * or MAX_DBG_STORMS if invalid storm letter. + */ +static enum dbg_storms qed_get_id_from_letter(char storm_letter) +{ + u8 storm_id; + + for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) + if (s_storm_defs[storm_id].letter == storm_letter) + return (enum dbg_storms)storm_id; + + return MAX_DBG_STORMS; +} + /* Returns true of the specified Storm should be included in the dump, false * otherwise. */ @@ -2253,14 +1497,20 @@ static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn, static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn, enum block_id block_id, u8 mem_group_id) { - struct block_defs *block = s_block_defs[block_id]; + const struct dbg_block *block; u8 i; - /* Check Storm match */ - if (block->associated_to_storm && - !qed_grc_is_storm_included(p_hwfn, - (enum dbg_storms)block->storm_id)) - return false; + block = get_dbg_block(p_hwfn, block_id); + + /* If the block is associated with a Storm, check Storm match */ + if (block->associated_storm_letter) { + enum dbg_storms associated_storm_id = + qed_get_id_from_letter(block->associated_storm_letter); + + if (associated_storm_id == MAX_DBG_STORMS || + !qed_grc_is_storm_included(p_hwfn, associated_storm_id)) + return false; + } for (i = 0; i < NUM_BIG_RAM_TYPES; i++) { struct big_ram_defs *big_ram = &s_big_ram_defs[i]; @@ -2282,6 +1532,8 @@ static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn, case MEM_GROUP_CAU_SB: case MEM_GROUP_CAU_PI: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU); + case MEM_GROUP_CAU_MEM_EXT: + return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU_EXT); case MEM_GROUP_QM_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM); case MEM_GROUP_CFC_MEM: @@ -2289,6 +1541,8 @@ static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn, case MEM_GROUP_TASK_CFC_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) || qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX); + case MEM_GROUP_DORQ_MEM: + return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DORQ); case MEM_GROUP_IGU_MEM: case MEM_GROUP_IGU_MSIX: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU); @@ -2334,64 +1588,104 @@ static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn, msleep(STALL_DELAY_MS); } -/* Takes all blocks out of reset */ +/* Takes all blocks out of reset. If rbc_only is true, only RBC clients are + * taken out of reset. + */ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) + struct qed_ptt *p_ptt, bool rbc_only) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - u32 reg_val[MAX_DBG_RESET_REGS] = { 0 }; - u32 block_id, i; + u8 chip_id = dev_data->chip_id; + u32 i; - /* Fill reset regs values */ - for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { - struct block_defs *block = s_block_defs[block_id]; + /* Take RBCs out of reset */ + for (i = 0; i < ARRAY_SIZE(s_rbc_reset_defs); i++) + if (s_rbc_reset_defs[i].reset_val[dev_data->chip_id]) + qed_wr(p_hwfn, + p_ptt, + s_rbc_reset_defs[i].reset_reg_addr + + RESET_REG_UNRESET_OFFSET, + s_rbc_reset_defs[i].reset_val[chip_id]); - if (block->exists[dev_data->chip_id] && block->has_reset_bit && - block->unreset) - reg_val[block->reset_reg] |= - BIT(block->reset_bit_offset); - } + if (!rbc_only) { + u32 reg_val[NUM_DBG_RESET_REGS] = { 0 }; + u8 reset_reg_id; + u32 block_id; - /* Write reset registers */ - for (i = 0; i < MAX_DBG_RESET_REGS; i++) { - if (!s_reset_regs_defs[i].exists[dev_data->chip_id]) - continue; + /* Fill reset regs values */ + for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) { + bool is_removed, has_reset_reg, unreset_before_dump; + const struct dbg_block_chip *block; + + block = qed_get_dbg_block_per_chip(p_hwfn, + (enum block_id) + block_id); + is_removed = + GET_FIELD(block->flags, DBG_BLOCK_CHIP_IS_REMOVED); + has_reset_reg = + GET_FIELD(block->flags, + DBG_BLOCK_CHIP_HAS_RESET_REG); + unreset_before_dump = + GET_FIELD(block->flags, + DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP); + + if (!is_removed && has_reset_reg && unreset_before_dump) + reg_val[block->reset_reg_id] |= + BIT(block->reset_reg_bit_offset); + } - reg_val[i] |= - s_reset_regs_defs[i].unreset_val[dev_data->chip_id]; + /* Write reset registers */ + for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS; + reset_reg_id++) { + const struct dbg_reset_reg *reset_reg; + u32 reset_reg_addr; - if (reg_val[i]) - qed_wr(p_hwfn, - p_ptt, - s_reset_regs_defs[i].addr + - RESET_REG_UNRESET_OFFSET, reg_val[i]); + reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id); + + if (GET_FIELD + (reset_reg->data, DBG_RESET_REG_IS_REMOVED)) + continue; + + if (reg_val[reset_reg_id]) { + reset_reg_addr = + GET_FIELD(reset_reg->data, + DBG_RESET_REG_ADDR); + qed_wr(p_hwfn, + p_ptt, + DWORDS_TO_BYTES(reset_reg_addr) + + RESET_REG_UNRESET_OFFSET, + reg_val[reset_reg_id]); + } + } } } /* Returns the attention block data of the specified block */ static const struct dbg_attn_block_type_data * -qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type) +qed_get_block_attn_data(struct qed_hwfn *p_hwfn, + enum block_id block_id, enum dbg_attn_type attn_type) { const struct dbg_attn_block *base_attn_block_arr = - (const struct dbg_attn_block *) - s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr; + (const struct dbg_attn_block *) + p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr; return &base_attn_block_arr[block_id].per_type_data[attn_type]; } /* Returns the attention registers of the specified block */ static const struct dbg_attn_reg * -qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type, +qed_get_block_attn_regs(struct qed_hwfn *p_hwfn, + enum block_id block_id, enum dbg_attn_type attn_type, u8 *num_attn_regs) { const struct dbg_attn_block_type_data *block_type_data = - qed_get_block_attn_data(block_id, attn_type); + qed_get_block_attn_data(p_hwfn, block_id, attn_type); *num_attn_regs = block_type_data->num_regs; - return &((const struct dbg_attn_reg *) - s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data-> - regs_offset]; + return (const struct dbg_attn_reg *) + p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr + + block_type_data->regs_offset; } /* For each block, clear the status of all parities */ @@ -2403,11 +1697,12 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn, u8 reg_idx, num_attn_regs; u32 block_id; - for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { + for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) { if (dev_data->block_in_reset[block_id]) continue; - attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id, + attn_reg_arr = qed_get_block_attn_regs(p_hwfn, + (enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs); @@ -2435,22 +1730,20 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn, } /* Dumps GRC registers section header. Returns the dumped size in dwords. - * The following parameters are dumped: + * the following parameters are dumped: * - count: no. of dumped entries * - split_type: split type * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE) - * - param_name: user parameter value (dumped only if param_name != NULL - * and param_val != NULL). + * - reg_type_name: register type name (dumped only if reg_type_name != NULL) */ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf, bool dump, u32 num_reg_entries, enum init_split_types split_type, - u8 split_id, - const char *param_name, const char *param_val) + u8 split_id, const char *reg_type_name) { u8 num_params = 2 + - (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (param_name ? 1 : 0); + (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (reg_type_name ? 1 : 0); u32 offset = 0; offset += qed_dump_section_hdr(dump_buf + offset, @@ -2463,9 +1756,9 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf, if (split_type != SPLIT_TYPE_NONE) offset += qed_dump_num_param(dump_buf + offset, dump, "id", split_id); - if (param_name && param_val) + if (reg_type_name) offset += qed_dump_str_param(dump_buf + offset, - dump, param_name, param_val); + dump, "type", reg_type_name); return offset; } @@ -2495,21 +1788,12 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0; + bool read_using_dmae = false; + u32 thresh; if (!dump) return len; - /* Print log if needed */ - dev_data->num_regs_read += len; - if (dev_data->num_regs_read >= - s_platform_defs[dev_data->platform_id].log_thresh) { - DP_VERBOSE(p_hwfn, - QED_MSG_DEBUG, - "Dumping %d registers...\n", - dev_data->num_regs_read); - dev_data->num_regs_read = 0; - } - switch (split_type) { case SPLIT_TYPE_PORT: port_id = split_id; @@ -2530,38 +1814,77 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, } /* Try reading using DMAE */ - if (dev_data->use_dmae && split_type == SPLIT_TYPE_NONE && - (len >= s_platform_defs[dev_data->platform_id].dmae_thresh || - wide_bus)) { - if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr), - (u64)(uintptr_t)(dump_buf), len, NULL)) - return len; - dev_data->use_dmae = 0; - DP_VERBOSE(p_hwfn, - QED_MSG_DEBUG, - "Failed reading from chip using DMAE, using GRC instead\n"); + if (dev_data->use_dmae && split_type != SPLIT_TYPE_VF && + (len >= s_hw_type_defs[dev_data->hw_type].dmae_thresh || + (PROTECT_WIDE_BUS && wide_bus))) { + struct qed_dmae_params dmae_params; + + /* Set DMAE params */ + memset(&dmae_params, 0, sizeof(dmae_params)); + SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1); + switch (split_type) { + case SPLIT_TYPE_PORT: + SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID, + 1); + dmae_params.port_id = port_id; + break; + case SPLIT_TYPE_PF: + SET_FIELD(dmae_params.flags, + QED_DMAE_PARAMS_SRC_PF_VALID, 1); + dmae_params.src_pfid = pf_id; + break; + case SPLIT_TYPE_PORT_PF: + SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID, + 1); + SET_FIELD(dmae_params.flags, + QED_DMAE_PARAMS_SRC_PF_VALID, 1); + dmae_params.port_id = port_id; + dmae_params.src_pfid = pf_id; + break; + default: + break; + } + + /* Execute DMAE command */ + read_using_dmae = !qed_dmae_grc2host(p_hwfn, + p_ptt, + DWORDS_TO_BYTES(addr), + (u64)(uintptr_t)(dump_buf), + len, &dmae_params); + if (!read_using_dmae) { + dev_data->use_dmae = 0; + DP_VERBOSE(p_hwfn, + QED_MSG_DEBUG, + "Failed reading from chip using DMAE, using GRC instead\n"); + } } + if (read_using_dmae) + goto print_log; + /* If not read using DMAE, read using GRC */ /* Set pretend */ - if (split_type != dev_data->pretend.split_type || split_id != - dev_data->pretend.split_id) { + if (split_type != dev_data->pretend.split_type || + split_id != dev_data->pretend.split_id) { switch (split_type) { case SPLIT_TYPE_PORT: qed_port_pretend(p_hwfn, p_ptt, port_id); break; case SPLIT_TYPE_PF: - fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; + fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID, + pf_id); qed_fid_pretend(p_hwfn, p_ptt, fid); break; case SPLIT_TYPE_PORT_PF: - fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; + fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID, + pf_id); qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid); break; case SPLIT_TYPE_VF: - fid = BIT(PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) | - (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT); + fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFVALID, 1) + | FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFID, + vf_id); qed_fid_pretend(p_hwfn, p_ptt, fid); break; default: @@ -2575,6 +1898,16 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, /* Read registers using GRC */ qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len); +print_log: + /* Print log */ + dev_data->num_regs_read += len; + thresh = s_hw_type_defs[dev_data->hw_type].log_thresh; + if ((dev_data->num_regs_read / thresh) > + ((dev_data->num_regs_read - len) / thresh)) + DP_VERBOSE(p_hwfn, + QED_MSG_DEBUG, + "Dumped %d registers...\n", dev_data->num_regs_read); + return len; } @@ -2659,7 +1992,7 @@ static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn, /* Dumps GRC registers entries. Returns the dumped size in dwords. */ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - struct dbg_array input_regs_arr, + struct virt_mem_desc input_regs_arr, u32 *dump_buf, bool dump, enum init_split_types split_type, @@ -2672,10 +2005,10 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn, *num_dumped_reg_entries = 0; - while (input_offset < input_regs_arr.size_in_dwords) { + while (input_offset < BYTES_TO_DWORDS(input_regs_arr.size)) { const struct dbg_dump_cond_hdr *cond_hdr = (const struct dbg_dump_cond_hdr *) - &input_regs_arr.ptr[input_offset++]; + input_regs_arr.ptr + input_offset++; u16 modes_buf_offset; bool eval_mode; @@ -2698,7 +2031,7 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn, for (i = 0; i < cond_hdr->data_size; i++, input_offset++) { const struct dbg_dump_reg *reg = (const struct dbg_dump_reg *) - &input_regs_arr.ptr[input_offset]; + input_regs_arr.ptr + input_offset; u32 addr, len; bool wide_bus; @@ -2723,14 +2056,12 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn, /* Dumps GRC registers entries. Returns the dumped size in dwords. */ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - struct dbg_array input_regs_arr, + struct virt_mem_desc input_regs_arr, u32 *dump_buf, bool dump, bool block_enable[MAX_BLOCK_ID], enum init_split_types split_type, - u8 split_id, - const char *param_name, - const char *param_val) + u8 split_id, const char *reg_type_name) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; enum init_split_types hdr_split_type = split_type; @@ -2748,7 +2079,7 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn, false, 0, hdr_split_type, - hdr_split_id, param_name, param_val); + hdr_split_id, reg_type_name); /* Dump registers */ offset += qed_grc_dump_regs_entries(p_hwfn, @@ -2767,7 +2098,7 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn, dump, num_dumped_reg_entries, hdr_split_type, - hdr_split_id, param_name, param_val); + hdr_split_id, reg_type_name); return num_dumped_reg_entries > 0 ? offset : 0; } @@ -2780,32 +2111,33 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, bool block_enable[MAX_BLOCK_ID], - const char *param_name, const char *param_val) + const char *reg_type_name) { + struct virt_mem_desc *dbg_buf = + &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG]; struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u32 offset = 0, input_offset = 0; - u16 fid; - while (input_offset < - s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) { + + while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) { const struct dbg_dump_split_hdr *split_hdr; - struct dbg_array curr_input_regs_arr; + struct virt_mem_desc curr_input_regs_arr; enum init_split_types split_type; u16 split_count = 0; u32 split_data_size; u8 split_id; split_hdr = - (const struct dbg_dump_split_hdr *) - &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++]; + (const struct dbg_dump_split_hdr *) + dbg_buf->ptr + input_offset++; split_type = - GET_FIELD(split_hdr->hdr, - DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); - split_data_size = - GET_FIELD(split_hdr->hdr, - DBG_DUMP_SPLIT_HDR_DATA_SIZE); + GET_FIELD(split_hdr->hdr, + DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); + split_data_size = GET_FIELD(split_hdr->hdr, + DBG_DUMP_SPLIT_HDR_DATA_SIZE); curr_input_regs_arr.ptr = - &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset]; - curr_input_regs_arr.size_in_dwords = split_data_size; + (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr + + input_offset; + curr_input_regs_arr.size = DWORDS_TO_BYTES(split_data_size); switch (split_type) { case SPLIT_TYPE_NONE: @@ -2833,16 +2165,16 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, dump, block_enable, split_type, split_id, - param_name, - param_val); + reg_type_name); input_offset += split_data_size; } /* Cancel pretends (pretend to original PF) */ if (dump) { - fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; - qed_fid_pretend(p_hwfn, p_ptt, fid); + qed_fid_pretend(p_hwfn, p_ptt, + FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID, + p_hwfn->rel_pf_id)); dev_data->pretend.split_type = SPLIT_TYPE_NONE; dev_data->pretend.split_id = 0; } @@ -2855,26 +2187,32 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { - struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - u32 i, offset = 0, num_regs = 0; + u32 offset = 0, num_regs = 0; + u8 reset_reg_id; /* Calculate header size */ offset += qed_grc_dump_regs_hdr(dump_buf, - false, 0, - SPLIT_TYPE_NONE, 0, NULL, NULL); + false, + 0, SPLIT_TYPE_NONE, 0, "RESET_REGS"); /* Write reset registers */ - for (i = 0; i < MAX_DBG_RESET_REGS; i++) { - if (!s_reset_regs_defs[i].exists[dev_data->chip_id]) + for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS; + reset_reg_id++) { + const struct dbg_reset_reg *reset_reg; + u32 reset_reg_addr; + + reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id); + + if (GET_FIELD(reset_reg->data, DBG_RESET_REG_IS_REMOVED)) continue; + reset_reg_addr = GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR); offset += qed_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, - BYTES_TO_DWORDS - (s_reset_regs_defs[i].addr), 1, - false, SPLIT_TYPE_NONE, 0); + reset_reg_addr, + 1, false, SPLIT_TYPE_NONE, 0); num_regs++; } @@ -2882,7 +2220,7 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn, if (dump) qed_grc_dump_regs_hdr(dump_buf, true, num_regs, SPLIT_TYPE_NONE, - 0, NULL, NULL); + 0, "RESET_REGS"); return offset; } @@ -2895,21 +2233,23 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - u32 block_id, offset = 0, num_reg_entries = 0; + u32 block_id, offset = 0, stall_regs_offset; const struct dbg_attn_reg *attn_reg_arr; u8 storm_id, reg_idx, num_attn_regs; + u32 num_reg_entries = 0; - /* Calculate header size */ + /* Write empty header for attention registers */ offset += qed_grc_dump_regs_hdr(dump_buf, - false, 0, SPLIT_TYPE_NONE, - 0, NULL, NULL); + false, + 0, SPLIT_TYPE_NONE, 0, "ATTN_REGS"); /* Write parity registers */ - for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { + for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) { if (dev_data->block_in_reset[block_id] && dump) continue; - attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id, + attn_reg_arr = qed_get_block_attn_regs(p_hwfn, + (enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs); @@ -2952,16 +2292,29 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, } } + /* Overwrite header for attention registers */ + if (dump) + qed_grc_dump_regs_hdr(dump_buf, + true, + num_reg_entries, + SPLIT_TYPE_NONE, 0, "ATTN_REGS"); + + /* Write empty header for stall registers */ + stall_regs_offset = offset; + offset += qed_grc_dump_regs_hdr(dump_buf, + false, 0, SPLIT_TYPE_NONE, 0, "REGS"); + /* Write Storm stall status registers */ - for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { + for (storm_id = 0, num_reg_entries = 0; storm_id < MAX_DBG_STORMS; + storm_id++) { struct storm_defs *storm = &s_storm_defs[storm_id]; u32 addr; - if (dev_data->block_in_reset[storm->block_id] && dump) + if (dev_data->block_in_reset[storm->sem_block_id] && dump) continue; addr = - BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr + + BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STALLED); offset += qed_grc_dump_reg_entry(p_hwfn, p_ptt, @@ -2973,12 +2326,12 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, num_reg_entries++; } - /* Write header */ + /* Overwrite header for stall registers */ if (dump) - qed_grc_dump_regs_hdr(dump_buf, + qed_grc_dump_regs_hdr(dump_buf + stall_regs_offset, true, - num_reg_entries, SPLIT_TYPE_NONE, - 0, NULL, NULL); + num_reg_entries, + SPLIT_TYPE_NONE, 0, "REGS"); return offset; } @@ -2991,8 +2344,7 @@ static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn, u32 offset = 0, addr; offset += qed_grc_dump_regs_hdr(dump_buf, - dump, 2, SPLIT_TYPE_NONE, 0, - NULL, NULL); + dump, 2, SPLIT_TYPE_NONE, 0, "REGS"); /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be * skipped). @@ -3040,8 +2392,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn, u32 len, u32 bit_width, bool packed, - const char *mem_group, - bool is_storm, char storm_letter) + const char *mem_group, char storm_letter) { u8 num_params = 3; u32 offset = 0; @@ -3062,7 +2413,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn, if (name) { /* Dump name */ - if (is_storm) { + if (storm_letter) { strcpy(buf, "?STORM_"); buf[0] = storm_letter; strcpy(buf + strlen(buf), name); @@ -3094,7 +2445,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn, dump, "packed", 1); /* Dump reg type */ - if (is_storm) { + if (storm_letter) { strcpy(buf, "?STORM_"); buf[0] = storm_letter; strcpy(buf + strlen(buf), mem_group); @@ -3121,8 +2472,7 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn, bool wide_bus, u32 bit_width, bool packed, - const char *mem_group, - bool is_storm, char storm_letter) + const char *mem_group, char storm_letter) { u32 offset = 0; @@ -3133,8 +2483,7 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn, addr, len, bit_width, - packed, - mem_group, is_storm, storm_letter); + packed, mem_group, storm_letter); offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, @@ -3147,20 +2496,21 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn, /* Dumps GRC memories entries. Returns the dumped size in dwords. */ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - struct dbg_array input_mems_arr, + struct virt_mem_desc input_mems_arr, u32 *dump_buf, bool dump) { u32 i, offset = 0, input_offset = 0; bool mode_match = true; - while (input_offset < input_mems_arr.size_in_dwords) { + while (input_offset < BYTES_TO_DWORDS(input_mems_arr.size)) { const struct dbg_dump_cond_hdr *cond_hdr; u16 modes_buf_offset; u32 num_entries; bool eval_mode; - cond_hdr = (const struct dbg_dump_cond_hdr *) - &input_mems_arr.ptr[input_offset++]; + cond_hdr = + (const struct dbg_dump_cond_hdr *)input_mems_arr.ptr + + input_offset++; num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS; /* Check required mode */ @@ -3182,24 +2532,25 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn, for (i = 0; i < num_entries; i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) { const struct dbg_dump_mem *mem = - (const struct dbg_dump_mem *) - &input_mems_arr.ptr[input_offset]; - u8 mem_group_id = GET_FIELD(mem->dword0, - DBG_DUMP_MEM_MEM_GROUP_ID); - bool is_storm = false, mem_wide_bus; - enum dbg_grc_params grc_param; - char storm_letter = 'a'; - enum block_id block_id; + (const struct dbg_dump_mem *)((u32 *) + input_mems_arr.ptr + + input_offset); + const struct dbg_block *block; + char storm_letter = 0; u32 mem_addr, mem_len; + bool mem_wide_bus; + u8 mem_group_id; + mem_group_id = GET_FIELD(mem->dword0, + DBG_DUMP_MEM_MEM_GROUP_ID); if (mem_group_id >= MEM_GROUPS_NUM) { DP_NOTICE(p_hwfn, "Invalid mem_group_id\n"); return 0; } - block_id = (enum block_id)cond_hdr->block_id; if (!qed_grc_is_mem_included(p_hwfn, - block_id, + (enum block_id) + cond_hdr->block_id, mem_group_id)) continue; @@ -3208,42 +2559,14 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn, mem_wide_bus = GET_FIELD(mem->dword1, DBG_DUMP_MEM_WIDE_BUS); - /* Update memory length for CCFC/TCFC memories - * according to number of LCIDs/LTIDs. - */ - if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) { - if (mem_len % MAX_LCIDS) { - DP_NOTICE(p_hwfn, - "Invalid CCFC connection memory size\n"); - return 0; - } - - grc_param = DBG_GRC_PARAM_NUM_LCIDS; - mem_len = qed_grc_get_param(p_hwfn, grc_param) * - (mem_len / MAX_LCIDS); - } else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) { - if (mem_len % MAX_LTIDS) { - DP_NOTICE(p_hwfn, - "Invalid TCFC task memory size\n"); - return 0; - } - - grc_param = DBG_GRC_PARAM_NUM_LTIDS; - mem_len = qed_grc_get_param(p_hwfn, grc_param) * - (mem_len / MAX_LTIDS); - } + block = get_dbg_block(p_hwfn, + cond_hdr->block_id); - /* If memory is associated with Storm, update Storm - * details. + /* If memory is associated with Storm, + * update storm details */ - if (s_block_defs - [cond_hdr->block_id]->associated_to_storm) { - is_storm = true; - storm_letter = - s_storm_defs[s_block_defs - [cond_hdr->block_id]-> - storm_id].letter; - } + if (block->associated_storm_letter) + storm_letter = block->associated_storm_letter; /* Dump memory */ offset += qed_grc_dump_mem(p_hwfn, @@ -3257,7 +2580,6 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn, 0, false, s_mem_group_names[mem_group_id], - is_storm, storm_letter); } } @@ -3272,26 +2594,25 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { + struct virt_mem_desc *dbg_buf = + &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM]; u32 offset = 0, input_offset = 0; - while (input_offset < - s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) { + while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) { const struct dbg_dump_split_hdr *split_hdr; - struct dbg_array curr_input_mems_arr; + struct virt_mem_desc curr_input_mems_arr; enum init_split_types split_type; u32 split_data_size; - split_hdr = (const struct dbg_dump_split_hdr *) - &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++]; - split_type = - GET_FIELD(split_hdr->hdr, - DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); - split_data_size = - GET_FIELD(split_hdr->hdr, - DBG_DUMP_SPLIT_HDR_DATA_SIZE); - curr_input_mems_arr.ptr = - &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset]; - curr_input_mems_arr.size_in_dwords = split_data_size; + split_hdr = + (const struct dbg_dump_split_hdr *)dbg_buf->ptr + + input_offset++; + split_type = GET_FIELD(split_hdr->hdr, + DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); + split_data_size = GET_FIELD(split_hdr->hdr, + DBG_DUMP_SPLIT_HDR_DATA_SIZE); + curr_input_mems_arr.ptr = (u32 *)dbg_buf->ptr + input_offset; + curr_input_mems_arr.size = DWORDS_TO_BYTES(split_data_size); if (split_type == SPLIT_TYPE_NONE) offset += qed_grc_dump_mem_entries(p_hwfn, @@ -3319,17 +2640,19 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn, bool dump, const char *name, u32 num_lids, - u32 lid_size, - u32 rd_reg_addr, - u8 storm_id) + enum cm_ctx_types ctx_type, u8 storm_id) { + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; struct storm_defs *storm = &s_storm_defs[storm_id]; - u32 i, lid, total_size, offset = 0; + u32 i, lid, lid_size, total_size; + u32 rd_reg_addr, offset = 0; + + /* Convert quad-regs to dwords */ + lid_size = storm->cm_ctx_lid_sizes[dev_data->chip_id][ctx_type] * 4; if (!lid_size) return 0; - lid_size *= BYTES_IN_DWORD; total_size = num_lids * lid_size; offset += qed_grc_dump_mem_hdr(p_hwfn, @@ -3339,18 +2662,26 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn, 0, total_size, lid_size * 32, - false, name, true, storm->letter); + false, name, storm->letter); if (!dump) return offset + total_size; + rd_reg_addr = BYTES_TO_DWORDS(storm->cm_ctx_rd_addr[ctx_type]); + /* Dump context data */ for (lid = 0; lid < num_lids; lid++) { - for (i = 0; i < lid_size; i++, offset++) { + for (i = 0; i < lid_size; i++) { qed_wr(p_hwfn, p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid); - *(dump_buf + offset) = qed_rd(p_hwfn, - p_ptt, rd_reg_addr); + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + rd_reg_addr, + 1, + false, + SPLIT_TYPE_NONE, 0); } } @@ -3361,115 +2692,126 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn, static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { - enum dbg_grc_params grc_param; u32 offset = 0; u8 storm_id; for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { - struct storm_defs *storm = &s_storm_defs[storm_id]; - if (!qed_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id)) continue; /* Dump Conn AG context size */ - grc_param = DBG_GRC_PARAM_NUM_LCIDS; - offset += - qed_grc_dump_ctx_data(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - "CONN_AG_CTX", - qed_grc_get_param(p_hwfn, - grc_param), - storm->cm_conn_ag_ctx_lid_size, - storm->cm_conn_ag_ctx_rd_addr, - storm_id); + offset += qed_grc_dump_ctx_data(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + "CONN_AG_CTX", + NUM_OF_LCIDS, + CM_CTX_CONN_AG, storm_id); /* Dump Conn ST context size */ - grc_param = DBG_GRC_PARAM_NUM_LCIDS; - offset += - qed_grc_dump_ctx_data(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - "CONN_ST_CTX", - qed_grc_get_param(p_hwfn, - grc_param), - storm->cm_conn_st_ctx_lid_size, - storm->cm_conn_st_ctx_rd_addr, - storm_id); + offset += qed_grc_dump_ctx_data(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + "CONN_ST_CTX", + NUM_OF_LCIDS, + CM_CTX_CONN_ST, storm_id); /* Dump Task AG context size */ - grc_param = DBG_GRC_PARAM_NUM_LTIDS; - offset += - qed_grc_dump_ctx_data(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - "TASK_AG_CTX", - qed_grc_get_param(p_hwfn, - grc_param), - storm->cm_task_ag_ctx_lid_size, - storm->cm_task_ag_ctx_rd_addr, - storm_id); + offset += qed_grc_dump_ctx_data(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + "TASK_AG_CTX", + NUM_OF_LTIDS, + CM_CTX_TASK_AG, storm_id); /* Dump Task ST context size */ - grc_param = DBG_GRC_PARAM_NUM_LTIDS; - offset += - qed_grc_dump_ctx_data(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - "TASK_ST_CTX", - qed_grc_get_param(p_hwfn, - grc_param), - storm->cm_task_st_ctx_lid_size, - storm->cm_task_st_ctx_rd_addr, - storm_id); + offset += qed_grc_dump_ctx_data(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + "TASK_ST_CTX", + NUM_OF_LTIDS, + CM_CTX_TASK_ST, storm_id); } return offset; } -/* Dumps GRC IORs data. Returns the dumped size in dwords. */ -static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) -{ - char buf[10] = "IOR_SET_?"; - u32 addr, offset = 0; - u8 storm_id, set_id; +#define VFC_STATUS_RESP_READY_BIT 0 +#define VFC_STATUS_BUSY_BIT 1 +#define VFC_STATUS_SENDING_CMD_BIT 2 - for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { - struct storm_defs *storm = &s_storm_defs[storm_id]; +#define VFC_POLLING_DELAY_MS 1 +#define VFC_POLLING_COUNT 20 - if (!qed_grc_is_storm_included(p_hwfn, - (enum dbg_storms)storm_id)) - continue; +/* Reads data from VFC. Returns the number of dwords read (0 on error). + * Sizes are specified in dwords. + */ +static u32 qed_grc_dump_read_from_vfc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct storm_defs *storm, + u32 *cmd_data, + u32 cmd_size, + u32 *addr_data, + u32 addr_size, + u32 resp_size, u32 *dump_buf) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u32 vfc_status, polling_ms, polling_count = 0, i; + u32 reg_addr, sem_base; + bool is_ready = false; + + sem_base = storm->sem_fast_mem_addr; + polling_ms = VFC_POLLING_DELAY_MS * + s_hw_type_defs[dev_data->hw_type].delay_factor; + + /* Write VFC command */ + ARR_REG_WR(p_hwfn, + p_ptt, + sem_base + SEM_FAST_REG_VFC_DATA_WR, + cmd_data, cmd_size); + + /* Write VFC address */ + ARR_REG_WR(p_hwfn, + p_ptt, + sem_base + SEM_FAST_REG_VFC_ADDR, + addr_data, addr_size); + + /* Read response */ + for (i = 0; i < resp_size; i++) { + /* Poll until ready */ + do { + reg_addr = sem_base + SEM_FAST_REG_VFC_STATUS; + qed_grc_dump_addr_range(p_hwfn, + p_ptt, + &vfc_status, + true, + BYTES_TO_DWORDS(reg_addr), + 1, + false, SPLIT_TYPE_NONE, 0); + is_ready = vfc_status & BIT(VFC_STATUS_RESP_READY_BIT); + + if (!is_ready) { + if (polling_count++ == VFC_POLLING_COUNT) + return 0; - for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) { - addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr + - SEM_FAST_REG_STORM_REG_FILE) + - IOR_SET_OFFSET(set_id); - if (strlen(buf) > 0) - buf[strlen(buf) - 1] = '0' + set_id; - offset += qed_grc_dump_mem(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - buf, - addr, - IORS_PER_SET, - false, - 32, - false, - "ior", - true, - storm->letter); - } + msleep(polling_ms); + } + } while (!is_ready); + + reg_addr = sem_base + SEM_FAST_REG_VFC_DATA_RD; + qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + i, + true, + BYTES_TO_DWORDS(reg_addr), + 1, false, SPLIT_TYPE_NONE, 0); } - return offset; + return resp_size; } /* Dump VFC CAM. Returns the dumped size in dwords. */ @@ -3481,7 +2823,7 @@ static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn, struct storm_defs *storm = &s_storm_defs[storm_id]; u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 }; u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 }; - u32 row, i, offset = 0; + u32 row, offset = 0; offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, @@ -3490,7 +2832,7 @@ static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn, 0, total_size, 256, - false, "vfc_cam", true, storm->letter); + false, "vfc_cam", storm->letter); if (!dump) return offset + total_size; @@ -3498,26 +2840,18 @@ static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn, /* Prepare CAM address */ SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD); - for (row = 0; row < VFC_CAM_NUM_ROWS; - row++, offset += VFC_CAM_RESP_DWORDS) { - /* Write VFC CAM command */ + /* Read VFC CAM data */ + for (row = 0; row < VFC_CAM_NUM_ROWS; row++) { SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row); - ARR_REG_WR(p_hwfn, - p_ptt, - storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, - cam_cmd, VFC_CAM_CMD_DWORDS); - - /* Write VFC CAM address */ - ARR_REG_WR(p_hwfn, - p_ptt, - storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, - cam_addr, VFC_CAM_ADDR_DWORDS); - - /* Read VFC CAM read response */ - ARR_REG_RD(p_hwfn, - p_ptt, - storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, - dump_buf + offset, VFC_CAM_RESP_DWORDS); + offset += qed_grc_dump_read_from_vfc(p_hwfn, + p_ptt, + storm, + cam_cmd, + VFC_CAM_CMD_DWORDS, + cam_addr, + VFC_CAM_ADDR_DWORDS, + VFC_CAM_RESP_DWORDS, + dump_buf + offset); } return offset; @@ -3534,7 +2868,7 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn, struct storm_defs *storm = &s_storm_defs[storm_id]; u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 }; u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 }; - u32 row, i, offset = 0; + u32 row, offset = 0; offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, @@ -3545,35 +2879,27 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn, 256, false, ram_defs->type_name, - true, storm->letter); - - /* Prepare RAM address */ - SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD); + storm->letter); if (!dump) return offset + total_size; + /* Prepare RAM address */ + SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD); + + /* Read VFC RAM data */ for (row = ram_defs->base_row; - row < ram_defs->base_row + ram_defs->num_rows; - row++, offset += VFC_RAM_RESP_DWORDS) { - /* Write VFC RAM command */ - ARR_REG_WR(p_hwfn, - p_ptt, - storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, - ram_cmd, VFC_RAM_CMD_DWORDS); - - /* Write VFC RAM address */ + row < ram_defs->base_row + ram_defs->num_rows; row++) { SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row); - ARR_REG_WR(p_hwfn, - p_ptt, - storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, - ram_addr, VFC_RAM_ADDR_DWORDS); - - /* Read VFC RAM read response */ - ARR_REG_RD(p_hwfn, - p_ptt, - storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, - dump_buf + offset, VFC_RAM_RESP_DWORDS); + offset += qed_grc_dump_read_from_vfc(p_hwfn, + p_ptt, + storm, + ram_cmd, + VFC_RAM_CMD_DWORDS, + ram_addr, + VFC_RAM_ADDR_DWORDS, + VFC_RAM_RESP_DWORDS, + dump_buf + offset); } return offset; @@ -3583,16 +2909,13 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn, static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { - struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u8 storm_id, i; u32 offset = 0; for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { if (!qed_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id) || - !s_storm_defs[storm_id].has_vfc || - (storm_id == DBG_PSTORM_ID && dev_data->platform_id != - PLATFORM_ASIC)) + !s_storm_defs[storm_id].has_vfc) continue; /* Read CAM */ @@ -3642,7 +2965,7 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn, total_dwords, rss_defs->entry_width, packed, - rss_defs->type_name, false, 0); + rss_defs->type_name, 0); /* Dump RSS data */ if (!dump) { @@ -3702,7 +3025,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, 0, ram_size, block_size * 8, - false, type_name, false, 0); + false, type_name, 0); /* Read and dump Big RAM data */ if (!dump) @@ -3728,6 +3051,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, return offset; } +/* Dumps MCP scratchpad. Returns the dumped size in dwords. */ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { @@ -3749,8 +3073,8 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, dump, NULL, BYTES_TO_DWORDS(MCP_REG_SCRATCH), - MCP_REG_SCRATCH_SIZE_BB_K2, - false, 0, false, "MCP", false, 0); + MCP_REG_SCRATCH_SIZE, + false, 0, false, "MCP", 0); /* Dump MCP cpu_reg_file */ offset += qed_grc_dump_mem(p_hwfn, @@ -3760,19 +3084,19 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, NULL, BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE), MCP_REG_CPU_REG_FILE_SIZE, - false, 0, false, "MCP", false, 0); + false, 0, false, "MCP", 0); /* Dump MCP registers */ block_enable[BLOCK_MCP] = true; offset += qed_grc_dump_registers(p_hwfn, p_ptt, dump_buf + offset, - dump, block_enable, "block", "MCP"); + dump, block_enable, "MCP"); /* Dump required non-MCP registers */ offset += qed_grc_dump_regs_hdr(dump_buf + offset, dump, 1, SPLIT_TYPE_NONE, 0, - "block", "MCP"); + "MCP"); addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR); offset += qed_grc_dump_reg_entry(p_hwfn, p_ptt, @@ -3789,7 +3113,9 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, return offset; } -/* Dumps the tbus indirect memory for all PHYs. */ +/* Dumps the tbus indirect memory for all PHYs. + * Returns the dumped size in dwords. + */ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { @@ -3823,7 +3149,7 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn, mem_name, 0, PHY_DUMP_SIZE_DWORDS, - 16, true, mem_name, false, 0); + 16, true, mem_name, 0); if (!dump) { offset += PHY_DUMP_SIZE_DWORDS; @@ -3854,21 +3180,58 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn, return offset; } -static void qed_config_dbg_line(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - enum block_id block_id, - u8 line_id, - u8 enable_mask, - u8 right_shift, - u8 force_valid_mask, u8 force_frame_mask) +static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 image_type, + u32 *nvram_offset_bytes, + u32 *nvram_size_bytes); + +static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 nvram_offset_bytes, + u32 nvram_size_bytes, u32 *ret_buf); + +/* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */ +static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, bool dump) { - struct block_defs *block = s_block_defs[block_id]; + u32 hw_dump_offset_bytes = 0, hw_dump_size_bytes = 0; + u32 hw_dump_size_dwords = 0, offset = 0; + enum dbg_status status; + + /* Read HW dump image from NVRAM */ + status = qed_find_nvram_image(p_hwfn, + p_ptt, + NVM_TYPE_HW_DUMP_OUT, + &hw_dump_offset_bytes, + &hw_dump_size_bytes); + if (status != DBG_STATUS_OK) + return 0; + + hw_dump_size_dwords = BYTES_TO_DWORDS(hw_dump_size_bytes); + + /* Dump HW dump image section */ + offset += qed_dump_section_hdr(dump_buf + offset, + dump, "mcp_hw_dump", 1); + offset += qed_dump_num_param(dump_buf + offset, + dump, "size", hw_dump_size_dwords); + + /* Read MCP HW dump image into dump buffer */ + if (dump && hw_dump_size_dwords) { + status = qed_nvram_read(p_hwfn, + p_ptt, + hw_dump_offset_bytes, + hw_dump_size_bytes, dump_buf + offset); + if (status != DBG_STATUS_OK) { + DP_NOTICE(p_hwfn, + "Failed to read MCP HW Dump image from NVRAM\n"); + return 0; + } + } + offset += hw_dump_size_dwords; - qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id); - qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask); - qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift); - qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask); - qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask); + return offset; } /* Dumps Static Debug data. Returns the dumped size in dwords. */ @@ -3877,26 +3240,19 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - u32 block_id, line_id, offset = 0; + u32 block_id, line_id, offset = 0, addr, len; /* Don't dump static debug if a debug bus recording is in progress */ if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON)) return 0; if (dump) { - /* Disable all blocks debug output */ - for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { - struct block_defs *block = s_block_defs[block_id]; - - if (block->dbg_client_id[dev_data->chip_id] != - MAX_DBG_BUS_CLIENTS) - qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, - 0); - } + /* Disable debug bus in all blocks */ + qed_bus_disable_blocks(p_hwfn, p_ptt); qed_bus_reset_dbg_block(p_hwfn, p_ptt); - qed_bus_set_framing_mode(p_hwfn, - p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST); + qed_wr(p_hwfn, + p_ptt, DBG_REG_FRAMING_MODE, DBG_BUS_FRAME_MODE_8HW); qed_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF); qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1); @@ -3905,28 +3261,48 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, /* Dump all static debug lines for each relevant block */ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { - struct block_defs *block = s_block_defs[block_id]; - struct dbg_bus_block *block_desc; - u32 block_dwords, addr, len; - u8 dbg_client_id; + const struct dbg_block_chip *block_per_chip; + const struct dbg_block *block; + bool is_removed, has_dbg_bus; + u16 modes_buf_offset; + u32 block_dwords; + + block_per_chip = + qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)block_id); + is_removed = GET_FIELD(block_per_chip->flags, + DBG_BLOCK_CHIP_IS_REMOVED); + has_dbg_bus = GET_FIELD(block_per_chip->flags, + DBG_BLOCK_CHIP_HAS_DBG_BUS); - if (block->dbg_client_id[dev_data->chip_id] == - MAX_DBG_BUS_CLIENTS) + /* read+clear for NWS parity is not working, skip NWS block */ + if (block_id == BLOCK_NWS) continue; - block_desc = get_dbg_bus_block_desc(p_hwfn, - (enum block_id)block_id); - block_dwords = NUM_DBG_LINES(block_desc) * + if (!is_removed && has_dbg_bus && + GET_FIELD(block_per_chip->dbg_bus_mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0) { + modes_buf_offset = + GET_FIELD(block_per_chip->dbg_bus_mode.data, + DBG_MODE_HDR_MODES_BUF_OFFSET); + if (!qed_is_mode_match(p_hwfn, &modes_buf_offset)) + has_dbg_bus = false; + } + + if (is_removed || !has_dbg_bus) + continue; + + block_dwords = NUM_DBG_LINES(block_per_chip) * STATIC_DEBUG_LINE_DWORDS; /* Dump static section params */ + block = get_dbg_block(p_hwfn, (enum block_id)block_id); offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, block->name, 0, block_dwords, - 32, false, "STATIC", false, 0); + 32, false, "STATIC", 0); if (!dump) { offset += block_dwords; @@ -3942,20 +3318,19 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, } /* Enable block's client */ - dbg_client_id = block->dbg_client_id[dev_data->chip_id]; qed_bus_enable_clients(p_hwfn, p_ptt, - BIT(dbg_client_id)); + BIT(block_per_chip->dbg_client_id)); addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA); len = STATIC_DEBUG_LINE_DWORDS; - for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc); + for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_per_chip); line_id++) { /* Configure debug line ID */ - qed_config_dbg_line(p_hwfn, - p_ptt, - (enum block_id)block_id, - (u8)line_id, 0xf, 0, 0, 0); + qed_bus_config_dbg_line(p_hwfn, + p_ptt, + (enum block_id)block_id, + (u8)line_id, 0xf, 0, 0, 0); /* Read debug line info */ offset += qed_grc_dump_addr_range(p_hwfn, @@ -3970,7 +3345,8 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, /* Disable block's client and debug output */ qed_bus_enable_clients(p_hwfn, p_ptt, 0); - qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0); + qed_bus_config_dbg_line(p_hwfn, p_ptt, + (enum block_id)block_id, 0, 0, 0, 0, 0); } if (dump) { @@ -3990,8 +3366,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, bool dump, u32 *num_dumped_dwords) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u32 dwords_read, offset = 0; bool parities_masked = false; - u32 offset = 0; u8 i; *num_dumped_dwords = 0; @@ -4010,13 +3386,11 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, offset += qed_dump_num_param(dump_buf + offset, dump, "num-lcids", - qed_grc_get_param(p_hwfn, - DBG_GRC_PARAM_NUM_LCIDS)); + NUM_OF_LCIDS); offset += qed_dump_num_param(dump_buf + offset, dump, "num-ltids", - qed_grc_get_param(p_hwfn, - DBG_GRC_PARAM_NUM_LTIDS)); + NUM_OF_LTIDS); offset += qed_dump_num_param(dump_buf + offset, dump, "num-ports", dev_data->num_ports); @@ -4028,7 +3402,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, /* Take all blocks out of reset (using reset registers) */ if (dump) { - qed_grc_unreset_blocks(p_hwfn, p_ptt); + qed_grc_unreset_blocks(p_hwfn, p_ptt, false); qed_update_blocks_reset_state(p_hwfn, p_ptt); } @@ -4071,7 +3445,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, - block_enable, NULL, NULL); + block_enable, NULL); /* Dump special registers */ offset += qed_grc_dump_special_regs(p_hwfn, @@ -4105,23 +3479,29 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, i); - /* Dump IORs */ - if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR)) - offset += qed_grc_dump_iors(p_hwfn, - p_ptt, dump_buf + offset, dump); - /* Dump VFC */ - if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)) - offset += qed_grc_dump_vfc(p_hwfn, - p_ptt, dump_buf + offset, dump); + if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)) { + dwords_read = qed_grc_dump_vfc(p_hwfn, + p_ptt, dump_buf + offset, dump); + offset += dwords_read; + if (!dwords_read) + return DBG_STATUS_VFC_READ_ERROR; + } /* Dump PHY tbus */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id == - CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC) + CHIP_K2 && dev_data->hw_type == HW_TYPE_ASIC) offset += qed_grc_dump_phy(p_hwfn, p_ptt, dump_buf + offset, dump); + /* Dump MCP HW Dump */ + if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP_HW_DUMP) && + !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP) && 1) + offset += qed_grc_dump_mcp_hw_dump(p_hwfn, + p_ptt, + dump_buf + offset, dump); + /* Dump static debug data (only if not during debug bus recording) */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_STATIC) && @@ -4172,8 +3552,9 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, u8 reg_id; hdr = (struct dbg_idle_chk_result_hdr *)dump_buf; - regs = &((const union dbg_idle_chk_reg *) - s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset]; + regs = (const union dbg_idle_chk_reg *) + p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr + + rule->reg_offset; cond_regs = ®s[0].cond_reg; info_regs = ®s[rule->num_cond_regs].info_reg; @@ -4193,8 +3574,8 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id]; struct dbg_idle_chk_result_reg_hdr *reg_hdr; - reg_hdr = (struct dbg_idle_chk_result_reg_hdr *) - (dump_buf + offset); + reg_hdr = + (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset); /* Write register header */ if (!dump) { @@ -4311,12 +3692,13 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, const u32 *imm_values; rule = &input_rules[i]; - regs = &((const union dbg_idle_chk_reg *) - s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr) - [rule->reg_offset]; + regs = (const union dbg_idle_chk_reg *) + p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr + + rule->reg_offset; cond_regs = ®s[0].cond_reg; - imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr - [rule->imm_offset]; + imm_values = + (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr + + rule->imm_offset; /* Check if all condition register blocks are out of reset, and * find maximal number of entries (all condition registers that @@ -4434,10 +3816,12 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { - u32 num_failing_rules_offset, offset = 0, input_offset = 0; - u32 num_failing_rules = 0; + struct virt_mem_desc *dbg_buf = + &p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES]; + u32 num_failing_rules_offset, offset = 0, + input_offset = 0, num_failing_rules = 0; - /* Dump global params */ + /* Dump global params - 1 must match below amount of params */ offset += qed_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1); @@ -4449,12 +3833,10 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn, num_failing_rules_offset = offset; offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0); - while (input_offset < - s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) { + while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) { const struct dbg_idle_chk_cond_hdr *cond_hdr = - (const struct dbg_idle_chk_cond_hdr *) - &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr - [input_offset++]; + (const struct dbg_idle_chk_cond_hdr *)dbg_buf->ptr + + input_offset++; bool eval_mode, mode_match = true; u32 curr_failing_rules; u16 modes_buf_offset; @@ -4471,16 +3853,21 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn, } if (mode_match) { + const struct dbg_idle_chk_rule *rule = + (const struct dbg_idle_chk_rule *)((u32 *) + dbg_buf->ptr + + input_offset); + u32 num_input_rules = + cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS; offset += qed_idle_chk_dump_rule_entries(p_hwfn, - p_ptt, - dump_buf + offset, - dump, - (const struct dbg_idle_chk_rule *) - &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES]. - ptr[input_offset], - cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS, - &curr_failing_rules); + p_ptt, + dump_buf + + offset, + dump, + rule, + num_input_rules, + &curr_failing_rules); num_failing_rules += curr_failing_rules; } @@ -4547,7 +3934,7 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, { u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy; s32 bytes_left = nvram_size_bytes; - u32 read_offset = 0; + u32 read_offset = 0, param = 0; DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, @@ -4560,14 +3947,14 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left; /* Call NVRAM read command */ + SET_MFW_FIELD(param, + DRV_MB_PARAM_NVM_OFFSET, + nvram_offset_bytes + read_offset); + SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy); if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, - DRV_MSG_CODE_NVM_READ_NVRAM, - (nvram_offset_bytes + - read_offset) | - (bytes_to_copy << - DRV_MB_PARAM_NVM_LEN_OFFSET), - &ret_mcp_resp, &ret_mcp_param, - &ret_read_size, + DRV_MSG_CODE_NVM_READ_NVRAM, param, + &ret_mcp_resp, + &ret_mcp_param, &ret_read_size, (u32 *)((u8 *)ret_buf + read_offset))) return DBG_STATUS_NVRAM_READ_FAILED; @@ -4705,12 +4092,12 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0; u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0; enum dbg_status status; - bool mcp_access; int halted = 0; + bool use_mfw; *num_dumped_dwords = 0; - mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP); + use_mfw = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP); /* Get trace data info */ status = qed_mcp_trace_get_data_info(p_hwfn, @@ -4731,7 +4118,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, * consistent. if halt fails, MCP trace is taken anyway, with a small * risk that it may be corrupt. */ - if (dump && mcp_access) { + if (dump && use_mfw) { halted = !qed_mcp_halt(p_hwfn, p_ptt); if (!halted) DP_NOTICE(p_hwfn, "MCP halt failed!\n"); @@ -4771,17 +4158,15 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, */ trace_meta_size_bytes = qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE); - if ((!trace_meta_size_bytes || dump) && mcp_access) { + if ((!trace_meta_size_bytes || dump) && use_mfw) status = qed_mcp_trace_get_meta_info(p_hwfn, p_ptt, trace_data_size_bytes, &running_bundle_id, &trace_meta_offset_bytes, &trace_meta_size_bytes); - if (status == DBG_STATUS_OK) - trace_meta_size_dwords = - BYTES_TO_DWORDS(trace_meta_size_bytes); - } + if (status == DBG_STATUS_OK) + trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes); /* Dump trace meta size param */ offset += qed_dump_num_param(dump_buf + offset, @@ -4805,7 +4190,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, /* If no mcp access, indicate that the dump doesn't contain the meta * data from NVRAM. */ - return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED; + return use_mfw ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED; } /* Dump GRC FIFO */ @@ -4983,16 +4368,18 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, override_window_dwords = qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * PROTECTION_OVERRIDE_ELEMENT_DWORDS; - addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW); - offset += qed_grc_dump_addr_range(p_hwfn, - p_ptt, - dump_buf + offset, - true, - addr, - override_window_dwords, - true, SPLIT_TYPE_NONE, 0); - qed_dump_num_param(dump_buf + size_param_offset, dump, "size", - override_window_dwords); + if (override_window_dwords) { + addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW); + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + true, + addr, + override_window_dwords, + true, SPLIT_TYPE_NONE, 0); + qed_dump_num_param(dump_buf + size_param_offset, dump, "size", + override_window_dwords); + } out: /* Dump last section */ offset += qed_dump_last_section(dump_buf, offset, dump); @@ -5028,7 +4415,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, struct storm_defs *storm = &s_storm_defs[storm_id]; u32 last_list_idx, addr; - if (dev_data->block_in_reset[storm->block_id]) + if (dev_data->block_in_reset[storm->sem_block_id]) continue; /* Read FW info for the current Storm */ @@ -5079,20 +4466,362 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, return offset; } +/* Dumps the specified ILT pages to the specified buffer. + * Returns the dumped size in dwords. + */ +static u32 qed_ilt_dump_pages_range(u32 *dump_buf, + bool dump, + u32 start_page_id, + u32 num_pages, + struct phys_mem_desc *ilt_pages, + bool dump_page_ids) +{ + u32 page_id, end_page_id, offset = 0; + + if (num_pages == 0) + return offset; + + end_page_id = start_page_id + num_pages - 1; + + for (page_id = start_page_id; page_id <= end_page_id; page_id++) { + struct phys_mem_desc *mem_desc = &ilt_pages[page_id]; + + /** + * + * if (page_id >= ->p_cxt_mngr->ilt_shadow_size) + * break; + */ + + if (!ilt_pages[page_id].virt_addr) + continue; + + if (dump_page_ids) { + /* Copy page ID to dump buffer */ + if (dump) + *(dump_buf + offset) = page_id; + offset++; + } else { + /* Copy page memory to dump buffer */ + if (dump) + memcpy(dump_buf + offset, + mem_desc->virt_addr, mem_desc->size); + offset += BYTES_TO_DWORDS(mem_desc->size); + } + } + + return offset; +} + +/* Dumps a section containing the dumped ILT pages. + * Returns the dumped size in dwords. + */ +static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + bool dump, + u32 valid_conn_pf_pages, + u32 valid_conn_vf_pages, + struct phys_mem_desc *ilt_pages, + bool dump_page_ids) +{ + struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; + u32 pf_start_line, start_page_id, offset = 0; + u32 cdut_pf_init_pages, cdut_vf_init_pages; + u32 cdut_pf_work_pages, cdut_vf_work_pages; + u32 base_data_offset, size_param_offset; + u32 cdut_pf_pages, cdut_vf_pages; + const char *section_name; + u8 i; + + section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem"; + cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn); + cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn); + cdut_pf_work_pages = qed_get_cdut_num_pf_work_pages(p_hwfn); + cdut_vf_work_pages = qed_get_cdut_num_vf_work_pages(p_hwfn); + cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages; + cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages; + pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line; + + offset += + qed_dump_section_hdr(dump_buf + offset, dump, section_name, 1); + + /* Dump size parameter (0 for now, overwritten with real size later) */ + size_param_offset = offset; + offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0); + base_data_offset = offset; + + /* CDUC pages are ordered as follows: + * - PF pages - valid section (included in PF connection type mapping) + * - PF pages - invalid section (not dumped) + * - For each VF in the PF: + * - VF pages - valid section (included in VF connection type mapping) + * - VF pages - invalid section (not dumped) + */ + if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) { + /* Dump connection PF pages */ + start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line; + offset += qed_ilt_dump_pages_range(dump_buf + offset, + dump, + start_page_id, + valid_conn_pf_pages, + ilt_pages, dump_page_ids); + + /* Dump connection VF pages */ + start_page_id += clients[ILT_CLI_CDUC].pf_total_lines; + for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count; + i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines) + offset += qed_ilt_dump_pages_range(dump_buf + offset, + dump, + start_page_id, + valid_conn_vf_pages, + ilt_pages, + dump_page_ids); + } + + /* CDUT pages are ordered as follows: + * - PF init pages (not dumped) + * - PF work pages + * - For each VF in the PF: + * - VF init pages (not dumped) + * - VF work pages + */ + if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUT)) { + /* Dump task PF pages */ + start_page_id = clients[ILT_CLI_CDUT].first.val + + cdut_pf_init_pages - pf_start_line; + offset += qed_ilt_dump_pages_range(dump_buf + offset, + dump, + start_page_id, + cdut_pf_work_pages, + ilt_pages, dump_page_ids); + + /* Dump task VF pages */ + start_page_id = clients[ILT_CLI_CDUT].first.val + + cdut_pf_pages + cdut_vf_init_pages - pf_start_line; + for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count; + i++, start_page_id += cdut_vf_pages) + offset += qed_ilt_dump_pages_range(dump_buf + offset, + dump, + start_page_id, + cdut_vf_work_pages, + ilt_pages, + dump_page_ids); + } + + /* Overwrite size param */ + if (dump) + qed_dump_num_param(dump_buf + size_param_offset, + dump, "size", offset - base_data_offset); + + return offset; +} + +/* Performs ILT Dump to the specified buffer. + * Returns the dumped size in dwords. + */ +static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) +{ + struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; + u32 valid_conn_vf_cids, valid_conn_vf_pages, offset = 0; + u32 valid_conn_pf_cids, valid_conn_pf_pages, num_pages; + u32 num_cids_per_page, conn_ctx_size; + u32 cduc_page_size, cdut_page_size; + struct phys_mem_desc *ilt_pages; + u8 conn_type; + + cduc_page_size = 1 << + (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN); + cdut_page_size = 1 << + (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN); + conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size; + num_cids_per_page = (int)(cduc_page_size / conn_ctx_size); + ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow; + + /* Dump global params - 22 must match number of params below */ + offset += qed_dump_common_global_params(p_hwfn, p_ptt, + dump_buf + offset, dump, 22); + offset += qed_dump_str_param(dump_buf + offset, + dump, "dump-type", "ilt-dump"); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cduc-page-size", cduc_page_size); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cduc-first-page-id", + clients[ILT_CLI_CDUC].first.val); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cduc-last-page-id", + clients[ILT_CLI_CDUC].last.val); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cduc-num-pf-pages", + clients + [ILT_CLI_CDUC].pf_total_lines); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cduc-num-vf-pages", + clients + [ILT_CLI_CDUC].vf_total_lines); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "max-conn-ctx-size", + conn_ctx_size); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cdut-page-size", cdut_page_size); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cdut-first-page-id", + clients[ILT_CLI_CDUT].first.val); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cdut-last-page-id", + clients[ILT_CLI_CDUT].last.val); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cdut-num-pf-init-pages", + qed_get_cdut_num_pf_init_pages(p_hwfn)); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cdut-num-vf-init-pages", + qed_get_cdut_num_vf_init_pages(p_hwfn)); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cdut-num-pf-work-pages", + qed_get_cdut_num_pf_work_pages(p_hwfn)); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cdut-num-vf-work-pages", + qed_get_cdut_num_vf_work_pages(p_hwfn)); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "max-task-ctx-size", + p_hwfn->p_cxt_mngr->task_ctx_size); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "task-type-id", + p_hwfn->p_cxt_mngr->task_type_id); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "first-vf-id-in-pf", + p_hwfn->p_cxt_mngr->first_vf_in_pf); + offset += /* 18 */ qed_dump_num_param(dump_buf + offset, + dump, + "num-vfs-in-pf", + p_hwfn->p_cxt_mngr->vf_count); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "ptr-size-bytes", sizeof(void *)); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "pf-start-line", + p_hwfn->p_cxt_mngr->pf_start_line); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "page-mem-desc-size-dwords", + PAGE_MEM_DESC_SIZE_DWORDS); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "ilt-shadow-size", + p_hwfn->p_cxt_mngr->ilt_shadow_size); + /* Additional/Less parameters require matching of number in call to + * dump_common_global_params() + */ + + /* Dump section containing number of PF CIDs per connection type */ + offset += qed_dump_section_hdr(dump_buf + offset, + dump, "num_pf_cids_per_conn_type", 1); + offset += qed_dump_num_param(dump_buf + offset, + dump, "size", NUM_OF_CONNECTION_TYPES_E4); + for (conn_type = 0, valid_conn_pf_cids = 0; + conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) { + u32 num_pf_cids = + p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count; + + if (dump) + *(dump_buf + offset) = num_pf_cids; + valid_conn_pf_cids += num_pf_cids; + } + + /* Dump section containing number of VF CIDs per connection type */ + offset += qed_dump_section_hdr(dump_buf + offset, + dump, "num_vf_cids_per_conn_type", 1); + offset += qed_dump_num_param(dump_buf + offset, + dump, "size", NUM_OF_CONNECTION_TYPES_E4); + for (conn_type = 0, valid_conn_vf_cids = 0; + conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) { + u32 num_vf_cids = + p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf; + + if (dump) + *(dump_buf + offset) = num_vf_cids; + valid_conn_vf_cids += num_vf_cids; + } + + /* Dump section containing physical memory descs for each ILT page */ + num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size; + offset += qed_dump_section_hdr(dump_buf + offset, + dump, "ilt_page_desc", 1); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "size", + num_pages * PAGE_MEM_DESC_SIZE_DWORDS); + + /* Copy memory descriptors to dump buffer */ + if (dump) { + u32 page_id; + + for (page_id = 0; page_id < num_pages; + page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS) + memcpy(dump_buf + offset, + &ilt_pages[page_id], + DWORDS_TO_BYTES(PAGE_MEM_DESC_SIZE_DWORDS)); + } else { + offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS; + } + + valid_conn_pf_pages = DIV_ROUND_UP(valid_conn_pf_cids, + num_cids_per_page); + valid_conn_vf_pages = DIV_ROUND_UP(valid_conn_vf_cids, + num_cids_per_page); + + /* Dump ILT pages IDs */ + offset += qed_ilt_dump_pages_section(p_hwfn, + dump_buf + offset, + dump, + valid_conn_pf_pages, + valid_conn_vf_pages, + ilt_pages, true); + + /* Dump ILT pages memory */ + offset += qed_ilt_dump_pages_section(p_hwfn, + dump_buf + offset, + dump, + valid_conn_pf_pages, + valid_conn_vf_pages, + ilt_pages, false); + + /* Dump last section */ + offset += qed_dump_last_section(dump_buf, offset, dump); + + return offset; +} + /***************************** Public Functions *******************************/ -enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr) +enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn, + const u8 * const bin_ptr) { - struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr; + struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr; u8 buf_id; - /* convert binary data to debug arrays */ - for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) { - s_dbg_arrays[buf_id].ptr = - (u32 *)(bin_ptr + buf_array[buf_id].offset); - s_dbg_arrays[buf_id].size_in_dwords = - BYTES_TO_DWORDS(buf_array[buf_id].length); - } + /* Convert binary data to debug arrays */ + for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) + qed_set_dbg_bin_buf(p_hwfn, + buf_id, + (u32 *)(bin_ptr + buf_hdrs[buf_id].offset), + buf_hdrs[buf_id].length); return DBG_STATUS_OK; } @@ -5107,7 +4836,7 @@ bool qed_read_fw_info(struct qed_hwfn *p_hwfn, struct storm_defs *storm = &s_storm_defs[storm_id]; /* Skip Storm if it's in reset */ - if (dev_data->block_in_reset[storm->block_id]) + if (dev_data->block_in_reset[storm->sem_block_id]) continue; /* Read FW info for the current Storm */ @@ -5119,6 +4848,69 @@ bool qed_read_fw_info(struct qed_hwfn *p_hwfn, return false; } +enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn, + enum dbg_grc_params grc_param, u32 val) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + enum dbg_status status; + int i; + + DP_VERBOSE(p_hwfn, + QED_MSG_DEBUG, + "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val); + + status = qed_dbg_dev_init(p_hwfn); + if (status != DBG_STATUS_OK) + return status; + + /* Initializes the GRC parameters (if not initialized). Needed in order + * to set the default parameter values for the first time. + */ + qed_dbg_grc_init_params(p_hwfn); + + if (grc_param >= MAX_DBG_GRC_PARAMS) + return DBG_STATUS_INVALID_ARGS; + if (val < s_grc_param_defs[grc_param].min || + val > s_grc_param_defs[grc_param].max) + return DBG_STATUS_INVALID_ARGS; + + if (s_grc_param_defs[grc_param].is_preset) { + /* Preset param */ + + /* Disabling a preset is not allowed. Call + * dbg_grc_set_params_default instead. + */ + if (!val) + return DBG_STATUS_INVALID_ARGS; + + /* Update all params with the preset values */ + for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) { + struct grc_param_defs *defs = &s_grc_param_defs[i]; + u32 preset_val; + /* Skip persistent params */ + if (defs->is_persistent) + continue; + + /* Find preset value */ + if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL) + preset_val = + defs->exclude_all_preset_val; + else if (grc_param == DBG_GRC_PARAM_CRASH) + preset_val = + defs->crash_preset_val[dev_data->chip_id]; + else + return DBG_STATUS_INVALID_ARGS; + + qed_grc_set_param(p_hwfn, i, preset_val); + } + } else { + /* Regular param - set its value */ + qed_grc_set_param(p_hwfn, grc_param, val); + } + + return DBG_STATUS_OK; +} + /* Assign default GRC param values */ void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn) { @@ -5135,18 +4927,18 @@ enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size) { - enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); + enum dbg_status status = qed_dbg_dev_init(p_hwfn); *buf_size = 0; if (status != DBG_STATUS_OK) return status; - if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || - !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr || - !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr || - !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || - !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr) + if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr) return DBG_STATUS_DBG_ARRAY_NOT_SET; return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size); @@ -5186,20 +4978,19 @@ enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn, u32 *buf_size) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - struct idle_chk_data *idle_chk; + struct idle_chk_data *idle_chk = &dev_data->idle_chk; enum dbg_status status; - idle_chk = &dev_data->idle_chk; *buf_size = 0; - status = qed_dbg_dev_init(p_hwfn, p_ptt); + status = qed_dbg_dev_init(p_hwfn); if (status != DBG_STATUS_OK) return status; - if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || - !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr || - !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr || - !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr) + if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr) return DBG_STATUS_DBG_ARRAY_NOT_SET; if (!idle_chk->buf_size_set) { @@ -5234,6 +5025,7 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, return DBG_STATUS_DUMP_BUF_TOO_SMALL; /* Update reset state */ + qed_grc_unreset_blocks(p_hwfn, p_ptt, true); qed_update_blocks_reset_state(p_hwfn, p_ptt); /* Idle Check Dump */ @@ -5249,7 +5041,7 @@ enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size) { - enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); + enum dbg_status status = qed_dbg_dev_init(p_hwfn); *buf_size = 0; @@ -5296,7 +5088,7 @@ enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size) { - enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); + enum dbg_status status = qed_dbg_dev_init(p_hwfn); *buf_size = 0; @@ -5342,7 +5134,7 @@ enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size) { - enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); + enum dbg_status status = qed_dbg_dev_init(p_hwfn); *buf_size = 0; @@ -5388,7 +5180,7 @@ qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size) { - enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); + enum dbg_status status = qed_dbg_dev_init(p_hwfn); *buf_size = 0; @@ -5438,7 +5230,7 @@ enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size) { - enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); + enum dbg_status status = qed_dbg_dev_init(p_hwfn); *buf_size = 0; @@ -5482,6 +5274,50 @@ enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn, return DBG_STATUS_OK; } +static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size) +{ + enum dbg_status status = qed_dbg_dev_init(p_hwfn); + + *buf_size = 0; + + if (status != DBG_STATUS_OK) + return status; + + *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, false); + + return DBG_STATUS_OK; +} + +static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords) +{ + u32 needed_buf_size_in_dwords; + enum dbg_status status; + + *num_dumped_dwords = 0; + + status = qed_dbg_ilt_get_dump_buf_size(p_hwfn, + p_ptt, + &needed_buf_size_in_dwords); + if (status != DBG_STATUS_OK) + return status; + + if (buf_size_in_dwords < needed_buf_size_in_dwords) + return DBG_STATUS_DUMP_BUF_TOO_SMALL; + + *num_dumped_dwords = qed_ilt_dump(p_hwfn, p_ptt, dump_buf, true); + + /* Reveret GRC params to their default */ + qed_dbg_grc_set_params_default(p_hwfn); + + return DBG_STATUS_OK; +} + enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum block_id block_id, @@ -5489,19 +5325,20 @@ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, bool clear_status, struct dbg_attn_block_result *results) { - enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); + enum dbg_status status = qed_dbg_dev_init(p_hwfn); u8 reg_idx, num_attn_regs, num_result_regs = 0; const struct dbg_attn_reg *attn_reg_arr; if (status != DBG_STATUS_OK) return status; - if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || - !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || - !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr) + if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr) return DBG_STATUS_DBG_ARRAY_NOT_SET; - attn_reg_arr = qed_get_block_attn_regs(block_id, + attn_reg_arr = qed_get_block_attn_regs(p_hwfn, + block_id, attn_type, &num_attn_regs); for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) { @@ -5546,7 +5383,7 @@ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, results->block_id = (u8)block_id; results->names_offset = - qed_get_block_attn_data(block_id, attn_type)->names_offset; + qed_get_block_attn_data(p_hwfn, block_id, attn_type)->names_offset; SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type); SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs); @@ -5556,11 +5393,6 @@ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, /******************************* Data Types **********************************/ -struct block_info { - const char *name; - enum block_id id; -}; - /* REG fifo element */ struct reg_fifo_element { u64 data; @@ -5584,6 +5416,12 @@ struct reg_fifo_element { #define REG_FIFO_ELEMENT_ERROR_MASK 0x1f }; +/* REG fifo error element */ +struct reg_fifo_err { + u32 err_code; + const char *err_msg; +}; + /* IGU fifo element */ struct igu_fifo_element { u32 dword0; @@ -5683,20 +5521,6 @@ struct igu_fifo_addr_data { enum igu_fifo_addr_types type; }; -struct mcp_trace_meta { - u32 modules_num; - char **modules; - u32 formats_num; - struct mcp_trace_format *formats; - bool is_allocated; -}; - -/* Debug Tools user data */ -struct dbg_tools_user_data { - struct mcp_trace_meta mcp_trace_meta; - const u32 *mcp_trace_user_meta_buf; -}; - /******************************** Constants **********************************/ #define MAX_MSG_LEN 1024 @@ -5704,7 +5528,7 @@ struct dbg_tools_user_data { #define MCP_TRACE_MAX_MODULE_LEN 8 #define MCP_TRACE_FORMAT_MAX_PARAMS 3 #define MCP_TRACE_FORMAT_PARAM_WIDTH \ - (MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT) + (MCP_TRACE_FORMAT_P2_SIZE_OFFSET - MCP_TRACE_FORMAT_P1_SIZE_OFFSET) #define REG_FIFO_ELEMENT_ADDR_FACTOR 4 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127 @@ -5713,107 +5537,6 @@ struct dbg_tools_user_data { /***************************** Constant Arrays *******************************/ -struct user_dbg_array { - const u32 *ptr; - u32 size_in_dwords; -}; - -/* Debug arrays */ -static struct user_dbg_array -s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} }; - -/* Block names array */ -static struct block_info s_block_info_arr[] = { - {"grc", BLOCK_GRC}, - {"miscs", BLOCK_MISCS}, - {"misc", BLOCK_MISC}, - {"dbu", BLOCK_DBU}, - {"pglue_b", BLOCK_PGLUE_B}, - {"cnig", BLOCK_CNIG}, - {"cpmu", BLOCK_CPMU}, - {"ncsi", BLOCK_NCSI}, - {"opte", BLOCK_OPTE}, - {"bmb", BLOCK_BMB}, - {"pcie", BLOCK_PCIE}, - {"mcp", BLOCK_MCP}, - {"mcp2", BLOCK_MCP2}, - {"pswhst", BLOCK_PSWHST}, - {"pswhst2", BLOCK_PSWHST2}, - {"pswrd", BLOCK_PSWRD}, - {"pswrd2", BLOCK_PSWRD2}, - {"pswwr", BLOCK_PSWWR}, - {"pswwr2", BLOCK_PSWWR2}, - {"pswrq", BLOCK_PSWRQ}, - {"pswrq2", BLOCK_PSWRQ2}, - {"pglcs", BLOCK_PGLCS}, - {"ptu", BLOCK_PTU}, - {"dmae", BLOCK_DMAE}, - {"tcm", BLOCK_TCM}, - {"mcm", BLOCK_MCM}, - {"ucm", BLOCK_UCM}, - {"xcm", BLOCK_XCM}, - {"ycm", BLOCK_YCM}, - {"pcm", BLOCK_PCM}, - {"qm", BLOCK_QM}, - {"tm", BLOCK_TM}, - {"dorq", BLOCK_DORQ}, - {"brb", BLOCK_BRB}, - {"src", BLOCK_SRC}, - {"prs", BLOCK_PRS}, - {"tsdm", BLOCK_TSDM}, - {"msdm", BLOCK_MSDM}, - {"usdm", BLOCK_USDM}, - {"xsdm", BLOCK_XSDM}, - {"ysdm", BLOCK_YSDM}, - {"psdm", BLOCK_PSDM}, - {"tsem", BLOCK_TSEM}, - {"msem", BLOCK_MSEM}, - {"usem", BLOCK_USEM}, - {"xsem", BLOCK_XSEM}, - {"ysem", BLOCK_YSEM}, - {"psem", BLOCK_PSEM}, - {"rss", BLOCK_RSS}, - {"tmld", BLOCK_TMLD}, - {"muld", BLOCK_MULD}, - {"yuld", BLOCK_YULD}, - {"xyld", BLOCK_XYLD}, - {"ptld", BLOCK_PTLD}, - {"ypld", BLOCK_YPLD}, - {"prm", BLOCK_PRM}, - {"pbf_pb1", BLOCK_PBF_PB1}, - {"pbf_pb2", BLOCK_PBF_PB2}, - {"rpb", BLOCK_RPB}, - {"btb", BLOCK_BTB}, - {"pbf", BLOCK_PBF}, - {"rdif", BLOCK_RDIF}, - {"tdif", BLOCK_TDIF}, - {"cdu", BLOCK_CDU}, - {"ccfc", BLOCK_CCFC}, - {"tcfc", BLOCK_TCFC}, - {"igu", BLOCK_IGU}, - {"cau", BLOCK_CAU}, - {"rgfs", BLOCK_RGFS}, - {"rgsrc", BLOCK_RGSRC}, - {"tgfs", BLOCK_TGFS}, - {"tgsrc", BLOCK_TGSRC}, - {"umac", BLOCK_UMAC}, - {"xmac", BLOCK_XMAC}, - {"dbg", BLOCK_DBG}, - {"nig", BLOCK_NIG}, - {"wol", BLOCK_WOL}, - {"bmbn", BLOCK_BMBN}, - {"ipc", BLOCK_IPC}, - {"nwm", BLOCK_NWM}, - {"nws", BLOCK_NWS}, - {"ms", BLOCK_MS}, - {"phy_pcie", BLOCK_PHY_PCIE}, - {"led", BLOCK_LED}, - {"avs_wrap", BLOCK_AVS_WRAP}, - {"pxpreqbus", BLOCK_PXPREQBUS}, - {"misc_aeu", BLOCK_MISC_AEU}, - {"bar0_map", BLOCK_BAR0_MAP} -}; - /* Status string array */ static const char * const s_status_str[] = { /* DBG_STATUS_OK */ @@ -5843,14 +5566,12 @@ static const char * const s_status_str[] = { /* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */ "A PCI buffer wasn't allocated", - /* DBG_STATUS_TOO_MANY_INPUTS */ - "Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true", + /* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */ + "The filter/trigger constraint dword offsets are not enabled for recording", - /* DBG_STATUS_INPUT_OVERLAP */ - "Overlapping debug bus inputs", - /* DBG_STATUS_HW_ONLY_RECORDING */ - "Cannot record Storm data since the entire recording cycle is used by HW", + /* DBG_STATUS_VFC_READ_ERROR */ + "Error reading from VFC", /* DBG_STATUS_STORM_ALREADY_ENABLED */ "The Storm was already enabled", @@ -5867,8 +5588,8 @@ static const char * const s_status_str[] = { /* DBG_STATUS_NO_INPUT_ENABLED */ "No input was enabled for recording", - /* DBG_STATUS_NO_FILTER_TRIGGER_64B */ - "Filters and triggers are not allowed when recording in 64b units", + /* DBG_STATUS_NO_FILTER_TRIGGER_256B */ + "Filters and triggers are not allowed in E4 256-bit mode", /* DBG_STATUS_FILTER_ALREADY_ENABLED */ "The filter was already enabled", @@ -5942,8 +5663,8 @@ static const char * const s_status_str[] = { /* DBG_STATUS_MCP_COULD_NOT_RESUME */ "Failed to resume MCP after halt", - /* DBG_STATUS_RESERVED2 */ - "Reserved debug status - shouldn't be returned", + /* DBG_STATUS_RESERVED0 */ + "", /* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */ "Failed to empty SEMI sync FIFO", @@ -5966,17 +5687,32 @@ static const char * const s_status_str[] = { /* DBG_STATUS_DBG_ARRAY_NOT_SET */ "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)", - /* DBG_STATUS_FILTER_BUG */ - "Debug Bus filtering requires the -unifyInputs option (due to a HW bug)", + /* DBG_STATUS_RESERVED1 */ + "", /* DBG_STATUS_NON_MATCHING_LINES */ - "Non-matching debug lines - all lines must be of the same type (either 128b or 256b)", + "Non-matching debug lines - in E4, all lines must be of the same type (either 128b or 256b)", - /* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */ - "The selected trigger dword offset wasn't enabled in the recorded HW block", + /* DBG_STATUS_INSUFFICIENT_HW_IDS */ + "Insufficient HW IDs. Try to record less Storms/blocks", /* DBG_STATUS_DBG_BUS_IN_USE */ - "The debug bus is in use" + "The debug bus is in use", + + /* DBG_STATUS_INVALID_STORM_DBG_MODE */ + "The storm debug mode is not supported in the current chip", + + /* DBG_STATUS_OTHER_ENGINE_BB_ONLY */ + "Other engine is supported only in BB", + + /* DBG_STATUS_FILTER_SINGLE_HW_ID */ + "The configured filter mode requires a single Storm/block input", + + /* DBG_STATUS_TRIGGER_SINGLE_HW_ID */ + "The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input", + + /* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */ + "When triggering on Storm data, the Storm to trigger on must be specified" }; /* Idle check severity names array */ @@ -6032,7 +5768,7 @@ static const char * const s_master_strs[] = { "xsdm", "dbu", "dmae", - "???", + "jdap", "???", "???", "???", @@ -6040,12 +5776,13 @@ static const char * const s_master_strs[] = { }; /* REG FIFO error messages array */ -static const char * const s_reg_fifo_error_strs[] = { - "grc timeout", - "address doesn't belong to any block", - "reserved address in block or write to read-only address", - "privilege/protection mismatch", - "path isolation error" +static struct reg_fifo_err s_reg_fifo_errors[] = { + {1, "grc timeout"}, + {2, "address doesn't belong to any block"}, + {4, "reserved address in block or write to read-only address"}, + {8, "privilege/protection mismatch"}, + {16, "path isolation error"}, + {17, "RSL error"} }; /* IGU FIFO sources array */ @@ -6285,8 +6022,21 @@ static u32 qed_print_section_params(u32 *dump_buf, return dump_offset; } -static struct dbg_tools_user_data * -qed_dbg_get_user_data(struct qed_hwfn *p_hwfn) +/* Returns the block name that matches the specified block ID, + * or NULL if not found. + */ +static const char *qed_dbg_get_block_name(struct qed_hwfn *p_hwfn, + enum block_id block_id) +{ + const struct dbg_block_user *block = + (const struct dbg_block_user *) + p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_USER_DATA].ptr + block_id; + + return (const char *)block->name; +} + +static struct dbg_tools_user_data *qed_dbg_get_user_data(struct qed_hwfn + *p_hwfn) { return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info; } @@ -6294,7 +6044,8 @@ qed_dbg_get_user_data(struct qed_hwfn *p_hwfn) /* Parses the idle check rules and returns the number of characters printed. * In case of parsing error, returns 0. */ -static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf, +static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn, + u32 *dump_buf, u32 *dump_buf_end, u32 num_rules, bool print_fw_idle_chk, @@ -6322,19 +6073,18 @@ static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf, hdr = (struct dbg_idle_chk_result_hdr *)dump_buf; rule_parsing_data = - (const struct dbg_idle_chk_rule_parsing_data *) - &s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA]. - ptr[hdr->rule_id]; + (const struct dbg_idle_chk_rule_parsing_data *) + p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr + + hdr->rule_id; parsing_str_offset = - GET_FIELD(rule_parsing_data->data, - DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET); + GET_FIELD(rule_parsing_data->data, + DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET); has_fw_msg = - GET_FIELD(rule_parsing_data->data, - DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0; - parsing_str = - &((const char *) - s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr) - [parsing_str_offset]; + GET_FIELD(rule_parsing_data->data, + DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0; + parsing_str = (const char *) + p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr + + parsing_str_offset; lsi_msg = parsing_str; curr_reg_id = 0; @@ -6438,7 +6188,8 @@ static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf, * parsed_results_bytes. * The parsing status is returned. */ -static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf, +static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, + u32 *dump_buf, u32 num_dumped_dwords, char *results_buf, u32 *parsed_results_bytes, @@ -6456,8 +6207,8 @@ static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf, *num_errors = 0; *num_warnings = 0; - if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr || - !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr) + if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr) return DBG_STATUS_DBG_ARRAY_NOT_SET; /* Read global_params section */ @@ -6490,7 +6241,8 @@ static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf, results_offset), "FW_IDLE_CHECK:\n"); rules_print_size = - qed_parse_idle_chk_dump_rules(dump_buf, + qed_parse_idle_chk_dump_rules(p_hwfn, + dump_buf, dump_buf_end, num_rules, true, @@ -6510,7 +6262,8 @@ static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf, results_offset), "\nLSI_IDLE_CHECK:\n"); rules_print_size = - qed_parse_idle_chk_dump_rules(dump_buf, + qed_parse_idle_chk_dump_rules(p_hwfn, + dump_buf, dump_buf_end, num_rules, false, @@ -6622,9 +6375,8 @@ qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn, format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes, &offset); - format_len = - (format_ptr->data & - MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT; + format_len = GET_MFW_FIELD(format_ptr->data, + MCP_TRACE_FORMAT_LEN); format_ptr->format_str = kzalloc(format_len, GFP_KERNEL); if (!format_ptr->format_str) { /* Update number of modules to be released */ @@ -6647,7 +6399,7 @@ qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn, * trace_buf - MCP trace cyclic buffer * trace_buf_size - MCP trace cyclic buffer size in bytes * data_offset - offset in bytes of the data to parse in the MCP trace cyclic - * buffer. + * buffer. * data_size - size in bytes of data to parse. * parsed_buf - destination buffer for parsed data. * parsed_results_bytes - size of parsed data in bytes. @@ -6692,9 +6444,8 @@ static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn, /* Skip message if its index doesn't exist in the meta data */ if (format_idx >= meta->formats_num) { - u8 format_size = - (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >> - MFW_TRACE_PRM_SIZE_SHIFT); + u8 format_size = (u8)GET_MFW_FIELD(header, + MFW_TRACE_PRM_SIZE); if (data_size < format_size) return DBG_STATUS_MCP_TRACE_BAD_DATA; @@ -6709,11 +6460,10 @@ static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn, format_ptr = &meta->formats[format_idx]; for (i = 0, - param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, - param_shift = MCP_TRACE_FORMAT_P1_SIZE_SHIFT; + param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift = + MCP_TRACE_FORMAT_P1_SIZE_OFFSET; i < MCP_TRACE_FORMAT_MAX_PARAMS; - i++, - param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH, + i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH, param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) { /* Extract param size (0..3) */ u8 param_size = (u8)((format_ptr->data & param_mask) >> @@ -6741,12 +6491,10 @@ static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn, data_size -= param_size; } - format_level = (u8)((format_ptr->data & - MCP_TRACE_FORMAT_LEVEL_MASK) >> - MCP_TRACE_FORMAT_LEVEL_SHIFT); - format_module = (u8)((format_ptr->data & - MCP_TRACE_FORMAT_MODULE_MASK) >> - MCP_TRACE_FORMAT_MODULE_SHIFT); + format_level = (u8)GET_MFW_FIELD(format_ptr->data, + MCP_TRACE_FORMAT_LEVEL); + format_module = (u8)GET_MFW_FIELD(format_ptr->data, + MCP_TRACE_FORMAT_MODULE); if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str)) return DBG_STATUS_MCP_TRACE_BAD_DATA; @@ -6888,7 +6636,7 @@ static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf, const char *section_name, *param_name, *param_str_val; u32 param_num_val, num_section_params, num_elements; struct reg_fifo_element *elements; - u8 i, j, err_val, vf_val; + u8 i, j, err_code, vf_val; u32 results_offset = 0; char vf_str[4]; @@ -6919,7 +6667,7 @@ static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf, /* Decode elements */ for (i = 0; i < num_elements; i++) { - bool err_printed = false; + const char *err_msg = NULL; /* Discover if element belongs to a VF or a PF */ vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF); @@ -6928,11 +6676,17 @@ static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf, else sprintf(vf_str, "%d", vf_val); + /* Find error message */ + err_code = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ERROR); + for (j = 0; j < ARRAY_SIZE(s_reg_fifo_errors) && !err_msg; j++) + if (err_code == s_reg_fifo_errors[j].err_code) + err_msg = s_reg_fifo_errors[j].err_msg; + /* Add parsed element to parsed buffer */ results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), - "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ", + "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, error: %s\n", elements[i].data, (u32)GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ADDRESS) * @@ -6949,30 +6703,8 @@ static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf, s_protection_strs[GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_PROTECTION)], s_master_strs[GET_FIELD(elements[i].data, - REG_FIFO_ELEMENT_MASTER)]); - - /* Print errors */ - for (j = 0, - err_val = GET_FIELD(elements[i].data, - REG_FIFO_ELEMENT_ERROR); - j < ARRAY_SIZE(s_reg_fifo_error_strs); - j++, err_val >>= 1) { - if (err_val & 0x1) { - if (err_printed) - results_offset += - sprintf(qed_get_buf_ptr - (results_buf, - results_offset), ", "); - results_offset += - sprintf(qed_get_buf_ptr - (results_buf, results_offset), "%s", - s_reg_fifo_error_strs[j]); - err_printed = true; - } - } - - results_offset += - sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n"); + REG_FIFO_ELEMENT_MASTER)], + err_msg ? err_msg : "unknown error code"); } results_offset += sprintf(qed_get_buf_ptr(results_buf, @@ -7326,27 +7058,28 @@ static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf, /***************************** Public Functions *******************************/ -enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr) +enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn, + const u8 * const bin_ptr) { - struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr; + struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr; u8 buf_id; /* Convert binary data to debug arrays */ - for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) { - s_user_dbg_arrays[buf_id].ptr = - (u32 *)(bin_ptr + buf_array[buf_id].offset); - s_user_dbg_arrays[buf_id].size_in_dwords = - BYTES_TO_DWORDS(buf_array[buf_id].length); - } + for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) + qed_set_dbg_bin_buf(p_hwfn, + (enum bin_dbg_buffer_type)buf_id, + (u32 *)(bin_ptr + buf_hdrs[buf_id].offset), + buf_hdrs[buf_id].length); return DBG_STATUS_OK; } -enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn) +enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn, + void **user_data_ptr) { - p_hwfn->dbg_user_info = kzalloc(sizeof(struct dbg_tools_user_data), - GFP_KERNEL); - if (!p_hwfn->dbg_user_info) + *user_data_ptr = kzalloc(sizeof(struct dbg_tools_user_data), + GFP_KERNEL); + if (!(*user_data_ptr)) return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; return DBG_STATUS_OK; @@ -7365,7 +7098,8 @@ enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn, { u32 num_errors, num_warnings; - return qed_parse_idle_chk_dump(dump_buf, + return qed_parse_idle_chk_dump(p_hwfn, + dump_buf, num_dumped_dwords, NULL, results_buf_size, @@ -7381,7 +7115,8 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, { u32 parsed_buf_size; - return qed_parse_idle_chk_dump(dump_buf, + return qed_parse_idle_chk_dump(p_hwfn, + dump_buf, num_dumped_dwords, results_buf, &parsed_buf_size, @@ -7552,25 +7287,28 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, struct dbg_attn_block_result *results) { - struct user_dbg_array *block_attn, *pstrings; const u32 *block_attn_name_offsets; - enum dbg_attn_type attn_type; + const char *attn_name_base; const char *block_name; + enum dbg_attn_type attn_type; u8 num_regs, i, j; num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS); - attn_type = (enum dbg_attn_type) - GET_FIELD(results->data, - DBG_ATTN_BLOCK_RESULT_ATTN_TYPE); - block_name = s_block_info_arr[results->block_id].name; - - if (!s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr || - !s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr || - !s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr) + attn_type = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE); + block_name = qed_dbg_get_block_name(p_hwfn, results->block_id); + if (!block_name) + return DBG_STATUS_INVALID_ARGS; + + if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr) return DBG_STATUS_DBG_ARRAY_NOT_SET; - block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS]; - block_attn_name_offsets = &block_attn->ptr[results->names_offset]; + block_attn_name_offsets = + (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr + + results->names_offset; + + attn_name_base = p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr; /* Go over registers with a non-zero attention status */ for (i = 0; i < num_regs; i++) { @@ -7581,18 +7319,17 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, reg_result = &results->reg_results[i]; num_reg_attn = GET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_NUM_REG_ATTN); - block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES]; - bit_mapping = &((struct dbg_attn_bit_mapping *) - block_attn->ptr)[reg_result->block_attn_offset]; - - pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS]; + bit_mapping = (struct dbg_attn_bit_mapping *) + p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr + + reg_result->block_attn_offset; /* Go over attention status bits */ - for (j = 0; j < num_reg_attn; j++) { + for (j = 0; j < num_reg_attn; j++, bit_idx++) { u16 attn_idx_val = GET_FIELD(bit_mapping[j].data, DBG_ATTN_BIT_MAPPING_VAL); const char *attn_name, *attn_type_str, *masked_str; - u32 attn_name_offset, sts_addr; + u32 attn_name_offset; + u32 sts_addr; /* Check if bit mask should be advanced (due to unused * bits). @@ -7604,18 +7341,19 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, } /* Check current bit index */ - if (!(reg_result->sts_val & BIT(bit_idx))) { - bit_idx++; + if (!(reg_result->sts_val & BIT(bit_idx))) continue; - } - /* Find attention name */ + /* An attention bit with value=1 was found + * Find attention name + */ attn_name_offset = block_attn_name_offsets[attn_idx_val]; - attn_name = &((const char *) - pstrings->ptr)[attn_name_offset]; - attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ? - "Interrupt" : "Parity"; + attn_name = attn_name_base + attn_name_offset; + attn_type_str = + (attn_type == + ATTN_TYPE_INTERRUPT ? "Interrupt" : + "Parity"); masked_str = reg_result->mask_val & BIT(bit_idx) ? " [masked]" : ""; sts_addr = GET_FIELD(reg_result->data, @@ -7623,15 +7361,15 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "%s (%s) : %s [address 0x%08x, bit %d]%s\n", block_name, attn_type_str, attn_name, - sts_addr, bit_idx, masked_str); - - bit_idx++; + sts_addr * 4, bit_idx, masked_str); } } return DBG_STATUS_OK; } +static DEFINE_MUTEX(qed_dbg_lock); + /* Wrapper for unifying the idle_chk and mcp_trace api */ static enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn, @@ -7691,7 +7429,10 @@ static struct { qed_dbg_fw_asserts_get_dump_buf_size, qed_dbg_fw_asserts_dump, qed_print_fw_asserts_results, - qed_get_fw_asserts_results_buf_size},}; + qed_get_fw_asserts_results_buf_size}, { + "ilt", + qed_dbg_ilt_get_dump_buf_size, + qed_dbg_ilt_dump, NULL, NULL},}; static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size) { @@ -7774,6 +7515,8 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn, return rc; } +#define MAX_DBG_FEATURE_SIZE_DWORDS 0x3FFFFFFF + /* Generic function for performing the dump of a debug feature. */ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -7803,6 +7546,17 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, &buf_size_dwords); if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED) return rc; + + if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS) { + feature->buf_size = 0; + DP_NOTICE(p_hwfn->cdev, + "Debug feature [\"%s\"] size (0x%x dwords) exceeds maximum size (0x%x dwords)\n", + qed_features_lookup[feature_idx].name, + buf_size_dwords, MAX_DBG_FEATURE_SIZE_DWORDS); + + return DBG_STATUS_OK; + } + feature->buf_size = buf_size_dwords * sizeof(u32); feature->dump_buf = vmalloc(feature->buf_size); if (!feature->dump_buf) @@ -7949,6 +7703,16 @@ int qed_dbg_fw_asserts_size(struct qed_dev *cdev) return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS); } +int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) +{ + return qed_dbg_feature(cdev, buffer, DBG_FEATURE_ILT, num_dumped_bytes); +} + +int qed_dbg_ilt_size(struct qed_dev *cdev) +{ + return qed_dbg_feature_size(cdev, DBG_FEATURE_ILT); +} + int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) { @@ -7965,9 +7729,17 @@ int qed_dbg_mcp_trace_size(struct qed_dev *cdev) * feature buffer. */ #define REGDUMP_HEADER_SIZE sizeof(u32) +#define REGDUMP_HEADER_SIZE_SHIFT 0 +#define REGDUMP_HEADER_SIZE_MASK 0xffffff #define REGDUMP_HEADER_FEATURE_SHIFT 24 -#define REGDUMP_HEADER_ENGINE_SHIFT 31 +#define REGDUMP_HEADER_FEATURE_MASK 0x3f #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30 +#define REGDUMP_HEADER_OMIT_ENGINE_MASK 0x1 +#define REGDUMP_HEADER_ENGINE_SHIFT 31 +#define REGDUMP_HEADER_ENGINE_MASK 0x1 +#define REGDUMP_MAX_SIZE 0x1000000 +#define ILT_DUMP_MAX_SIZE (1024 * 1024 * 15) + enum debug_print_features { OLD_MODE = 0, IDLE_CHK = 1, @@ -7981,28 +7753,47 @@ enum debug_print_features { NVM_CFG1 = 9, DEFAULT_CFG = 10, NVM_META = 11, + MDUMP = 12, + ILT_DUMP = 13, }; -static u32 qed_calc_regdump_header(enum debug_print_features feature, +static u32 qed_calc_regdump_header(struct qed_dev *cdev, + enum debug_print_features feature, int engine, u32 feature_size, u8 omit_engine) { - /* Insert the engine, feature and mode inside the header and combine it - * with feature size. - */ - return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) | - (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) | - (engine << REGDUMP_HEADER_ENGINE_SHIFT); + u32 res = 0; + + SET_FIELD(res, REGDUMP_HEADER_SIZE, feature_size); + if (res != feature_size) + DP_NOTICE(cdev, + "Feature %d is too large (size 0x%x) and will corrupt the dump\n", + feature, feature_size); + + SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature); + SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine); + SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine); + + return res; } int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) { u8 cur_engine, omit_engine = 0, org_engine; + struct qed_hwfn *p_hwfn = + &cdev->hwfns[cdev->dbg_params.engine_for_debug]; + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + int grc_params[MAX_DBG_GRC_PARAMS], i; u32 offset = 0, feature_size; int rc; - if (cdev->num_hwfns == 1) + for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) + grc_params[i] = dev_data->grc.param_val[i]; + + if (!QED_IS_CMT(cdev)) omit_engine = 1; + mutex_lock(&qed_dbg_lock); + org_engine = qed_get_debug_engine(cdev); for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) { /* Collect idle_chks and grcDump for each hw function */ @@ -8015,7 +7806,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(IDLE_CHK, cur_engine, + qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine, feature_size, omit_engine); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { @@ -8027,7 +7818,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(IDLE_CHK, cur_engine, + qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine, feature_size, omit_engine); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { @@ -8039,7 +7830,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(REG_FIFO, cur_engine, + qed_calc_regdump_header(cdev, REG_FIFO, cur_engine, feature_size, omit_engine); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { @@ -8051,7 +7842,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(IGU_FIFO, cur_engine, + qed_calc_regdump_header(cdev, IGU_FIFO, cur_engine, feature_size, omit_engine); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { @@ -8064,7 +7855,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(PROTECTION_OVERRIDE, + qed_calc_regdump_header(cdev, PROTECTION_OVERRIDE, cur_engine, feature_size, omit_engine); offset += (feature_size + REGDUMP_HEADER_SIZE); @@ -8079,22 +7870,45 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(FW_ASSERTS, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, FW_ASSERTS, + cur_engine, feature_size, + omit_engine); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n", rc); } + feature_size = qed_dbg_ilt_size(cdev); + if (!cdev->disable_ilt_dump && + feature_size < ILT_DUMP_MAX_SIZE) { + rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, &feature_size); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(cdev, ILT_DUMP, + cur_engine, + feature_size, + omit_engine); + offset += feature_size + REGDUMP_HEADER_SIZE; + } else { + DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n", + rc); + } + } + /* GRC dump - must be last because when mcp stuck it will * clutter idle_chk, reg_fifo, ... */ + for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) + dev_data->grc.param_val[i] = grc_params[i]; + rc = qed_dbg_grc(cdev, (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(GRC_DUMP, cur_engine, + qed_calc_regdump_header(cdev, GRC_DUMP, + cur_engine, feature_size, omit_engine); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { @@ -8103,12 +7917,13 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) } qed_set_debug_engine(cdev, org_engine); + /* mcp_trace */ rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(MCP_TRACE, cur_engine, + qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine, feature_size, omit_engine); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { @@ -8117,11 +7932,12 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) /* nvm cfg1 */ rc = qed_dbg_nvm_image(cdev, - (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, - &feature_size, QED_NVM_IMAGE_NVM_CFG1); + (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, &feature_size, + QED_NVM_IMAGE_NVM_CFG1); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(NVM_CFG1, cur_engine, + qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine, feature_size, omit_engine); offset += (feature_size + REGDUMP_HEADER_SIZE); } else if (rc != -ENOENT) { @@ -8136,7 +7952,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) &feature_size, QED_NVM_IMAGE_DEFAULT_CFG); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(DEFAULT_CFG, cur_engine, + qed_calc_regdump_header(cdev, DEFAULT_CFG, cur_engine, feature_size, omit_engine); offset += (feature_size + REGDUMP_HEADER_SIZE); } else if (rc != -ENOENT) { @@ -8152,8 +7968,8 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) &feature_size, QED_NVM_IMAGE_NVM_META); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(NVM_META, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, NVM_META, cur_engine, + feature_size, omit_engine); offset += (feature_size + REGDUMP_HEADER_SIZE); } else if (rc != -ENOENT) { DP_ERR(cdev, @@ -8161,6 +7977,23 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc); } + /* nvm mdump */ + rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, &feature_size, + QED_NVM_IMAGE_MDUMP); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(cdev, MDUMP, cur_engine, + feature_size, omit_engine); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else if (rc != -ENOENT) { + DP_ERR(cdev, + "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", + QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc); + } + + mutex_unlock(&qed_dbg_lock); + return 0; } @@ -8168,9 +8001,10 @@ int qed_dbg_all_data_size(struct qed_dev *cdev) { struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->dbg_params.engine_for_debug]; - u32 regs_len = 0, image_len = 0; + u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0; u8 cur_engine, org_engine; + cdev->disable_ilt_dump = false; org_engine = qed_get_debug_engine(cdev); for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) { /* Engine specific */ @@ -8185,6 +8019,12 @@ int qed_dbg_all_data_size(struct qed_dev *cdev) REGDUMP_HEADER_SIZE + qed_dbg_protection_override_size(cdev) + REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev); + + ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev); + if (ilt_len < ILT_DUMP_MAX_SIZE) { + total_ilt_len += ilt_len; + regs_len += ilt_len; + } } qed_set_debug_engine(cdev, org_engine); @@ -8200,6 +8040,17 @@ int qed_dbg_all_data_size(struct qed_dev *cdev) qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len); if (image_len) regs_len += REGDUMP_HEADER_SIZE + image_len; + qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_MDUMP, &image_len); + if (image_len) + regs_len += REGDUMP_HEADER_SIZE + image_len; + + if (regs_len > REGDUMP_MAX_SIZE) { + DP_VERBOSE(cdev, QED_MSG_DEBUG, + "Dump exceeds max size 0x%x, disable ILT dump\n", + REGDUMP_MAX_SIZE); + cdev->disable_ilt_dump = true; + regs_len -= total_ilt_len; + } return regs_len; } @@ -8245,9 +8096,8 @@ int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature) { struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->dbg_params.engine_for_debug]; + struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature]; struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); - struct qed_dbg_feature *qed_feature = - &cdev->dbg_params.features[feature]; u32 buf_size_dwords; enum dbg_status rc; @@ -8259,6 +8109,10 @@ int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature) if (rc != DBG_STATUS_OK) buf_size_dwords = 0; + /* Feature will not be dumped if it exceeds maximum size */ + if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS) + buf_size_dwords = 0; + qed_ptt_release(p_hwfn, p_ptt); qed_feature->buf_size = buf_size_dwords * sizeof(u32); return qed_feature->buf_size; @@ -8278,14 +8132,21 @@ void qed_set_debug_engine(struct qed_dev *cdev, int engine_number) void qed_dbg_pf_init(struct qed_dev *cdev) { - const u8 *dbg_values; + const u8 *dbg_values = NULL; + int i; /* Debug values are after init values. * The offset is the first dword of the file. */ dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data; - qed_dbg_set_bin_ptr((u8 *)dbg_values); - qed_dbg_user_set_bin_ptr((u8 *)dbg_values); + + for_each_hwfn(cdev, i) { + qed_dbg_set_bin_ptr(&cdev->hwfns[i], dbg_values); + qed_dbg_user_set_bin_ptr(&cdev->hwfns[i], dbg_values); + } + + /* Set the hwfn to be 0 as default */ + cdev->dbg_params.engine_for_debug = 0; } void qed_dbg_pf_exit(struct qed_dev *cdev) @@ -8293,11 +8154,11 @@ void qed_dbg_pf_exit(struct qed_dev *cdev) struct qed_dbg_feature *feature = NULL; enum qed_dbg_features feature_idx; - /* Debug features' buffers may be allocated if debug feature was used - * but dump wasn't called. + /* debug features' buffers may be allocated if debug feature was used + * but dump wasn't called */ for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) { - feature = &cdev->dbg_params.features[feature_idx]; + feature = &cdev->dbg_features[feature_idx]; if (feature->dump_buf) { vfree(feature->dump_buf); feature->dump_buf = NULL; diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h index e47e0e8d75b0..edf99d296bd1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.h +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h @@ -14,11 +14,13 @@ enum qed_dbg_features { DBG_FEATURE_IGU_FIFO, DBG_FEATURE_PROTECTION_OVERRIDE, DBG_FEATURE_FW_ASSERTS, + DBG_FEATURE_ILT, DBG_FEATURE_NUM }; /* Forward Declaration */ struct qed_dev; +struct qed_hwfn; int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes); int qed_dbg_grc_size(struct qed_dev *cdev); @@ -37,6 +39,8 @@ int qed_dbg_protection_override_size(struct qed_dev *cdev); int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes); int qed_dbg_fw_asserts_size(struct qed_dev *cdev); +int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes); +int qed_dbg_ilt_size(struct qed_dev *cdev); int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes); int qed_dbg_mcp_trace_size(struct qed_dev *cdev); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index a1ebc2b1ca0b..03bdd2e26329 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -907,7 +907,7 @@ qed_llh_access_filter(struct qed_hwfn *p_hwfn, /* Filter value */ addr = NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * filter_idx * 0x4; - params.flags = QED_DMAE_FLAG_PF_DST; + SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_PF_VALID, 0x1); params.dst_pfid = pfid; rc = qed_dmae_host2grc(p_hwfn, p_ptt, @@ -1412,6 +1412,7 @@ void qed_resc_free(struct qed_dev *cdev) qed_dmae_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn); qed_dbg_user_data_free(p_hwfn); + qed_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem); /* Destroy doorbell recovery mechanism */ qed_db_recovery_teardown(p_hwfn); @@ -1571,7 +1572,7 @@ static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn) /* all vports participate in weighted fair queueing */ for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++) - qm_info->qm_vport_params[i].vport_wfq = 1; + qm_info->qm_vport_params[i].wfq = 1; } /* initialize qm port params */ @@ -1579,6 +1580,7 @@ static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn) { /* Initialize qm port parameters */ u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engine; + struct qed_dev *cdev = p_hwfn->cdev; /* indicate how ooo and high pri traffic is dealt with */ active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ? @@ -1588,11 +1590,13 @@ static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn) for (i = 0; i < num_ports; i++) { struct init_qm_port_params *p_qm_port = &p_hwfn->qm_info.qm_port_params[i]; + u16 pbf_max_cmd_lines; p_qm_port->active = 1; p_qm_port->active_phys_tcs = active_phys_tcs; - p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports; - p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports; + pbf_max_cmd_lines = (u16)NUM_OF_PBF_CMD_LINES(cdev); + p_qm_port->num_pbf_cmd_lines = pbf_max_cmd_lines / num_ports; + p_qm_port->num_btb_blocks = NUM_OF_BTB_BLOCKS(cdev) / num_ports; } } @@ -2034,9 +2038,8 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn) vport = &(qm_info->qm_vport_params[i]); DP_VERBOSE(p_hwfn, NETIF_MSG_HW, - "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ", - qm_info->start_vport + i, - vport->vport_rl, vport->vport_wfq); + "vport idx %d, wfq %d, first_tx_pq_id [ ", + qm_info->start_vport + i, vport->wfq); for (tc = 0; tc < NUM_OF_TCS; tc++) DP_VERBOSE(p_hwfn, NETIF_MSG_HW, @@ -2049,11 +2052,11 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn) pq = &(qm_info->qm_pq_params[i]); DP_VERBOSE(p_hwfn, NETIF_MSG_HW, - "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n", + "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d rl_id %d\n", qm_info->start_pq + i, pq->port_id, pq->vport_id, - pq->tc_id, pq->wrr_group, pq->rl_valid); + pq->tc_id, pq->wrr_group, pq->rl_valid, pq->rl_id); } } @@ -2103,9 +2106,6 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) if (!b_rc) return -EINVAL; - /* clear the QM_PF runtime phase leftovers from previous init */ - qed_init_clear_rt_data(p_hwfn); - /* prepare QM portion of runtime array */ qed_qm_init_pf(p_hwfn, p_ptt, false); @@ -2346,7 +2346,7 @@ int qed_resc_alloc(struct qed_dev *cdev) if (rc) goto alloc_err; - rc = qed_dbg_alloc_user_data(p_hwfn); + rc = qed_dbg_alloc_user_data(p_hwfn, &p_hwfn->dbg_user_info); if (rc) goto alloc_err; } @@ -2623,7 +2623,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; params.pf_rl_en = qm_info->pf_rl_en; params.pf_wfq_en = qm_info->pf_wfq_en; - params.vport_rl_en = qm_info->vport_rl_en; + params.global_rl_en = qm_info->vport_rl_en; params.vport_wfq_en = qm_info->vport_wfq_en; params.port_params = qm_info->qm_port_params; @@ -2891,6 +2891,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, if (rc) return rc; + qed_fw_overlay_init_ram(p_hwfn, p_ptt, p_hwfn->fw_overlay_mem); + /* Pure runtime initializations - directly to the HW */ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); @@ -3000,8 +3002,10 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) u32 load_code, resp, param, drv_mb_param; bool b_default_mtu = true; struct qed_hwfn *p_hwfn; - int rc = 0, i; + const u32 *fw_overlays; + u32 fw_overlays_len; u16 ether_type; + int rc = 0, i; if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); @@ -3102,6 +3106,18 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) */ qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); + fw_overlays = cdev->fw_data->fw_overlays; + fw_overlays_len = cdev->fw_data->fw_overlays_len; + p_hwfn->fw_overlay_mem = + qed_fw_overlay_mem_alloc(p_hwfn, fw_overlays, + fw_overlays_len); + if (!p_hwfn->fw_overlay_mem) { + DP_NOTICE(p_hwfn, + "Failed to allocate fw overlay memory\n"); + rc = -ENOMEM; + goto load_err; + } + switch (load_code) { case FW_MSG_CODE_DRV_LOAD_ENGINE: rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, @@ -3566,8 +3582,10 @@ const char *qed_hw_get_resc_name(enum qed_resources res_id) return "RDMA_CNQ_RAM"; case QED_ILT: return "ILT"; - case QED_LL2_QUEUE: - return "LL2_QUEUE"; + case QED_LL2_RAM_QUEUE: + return "LL2_RAM_QUEUE"; + case QED_LL2_CTX_QUEUE: + return "LL2_CTX_QUEUE"; case QED_CMDQS_CQS: return "CMDQS_CQS"; case QED_RDMA_STATS_QUEUE: @@ -3606,18 +3624,46 @@ __qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, return 0; } +static u32 qed_hsi_def_val[][MAX_CHIP_IDS] = { + {MAX_NUM_VFS_BB, MAX_NUM_VFS_K2}, + {MAX_NUM_L2_QUEUES_BB, MAX_NUM_L2_QUEUES_K2}, + {MAX_NUM_PORTS_BB, MAX_NUM_PORTS_K2}, + {MAX_SB_PER_PATH_BB, MAX_SB_PER_PATH_K2,}, + {MAX_NUM_PFS_BB, MAX_NUM_PFS_K2}, + {MAX_NUM_VPORTS_BB, MAX_NUM_VPORTS_K2}, + {ETH_RSS_ENGINE_NUM_BB, ETH_RSS_ENGINE_NUM_K2}, + {MAX_QM_TX_QUEUES_BB, MAX_QM_TX_QUEUES_K2}, + {PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2}, + {RDMA_NUM_STATISTIC_COUNTERS_BB, RDMA_NUM_STATISTIC_COUNTERS_K2}, + {MAX_QM_GLOBAL_RLS, MAX_QM_GLOBAL_RLS}, + {PBF_MAX_CMD_LINES, PBF_MAX_CMD_LINES}, + {BTB_MAX_BLOCKS_BB, BTB_MAX_BLOCKS_K2}, +}; + +u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type) +{ + enum chip_ids chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2; + + if (type >= QED_NUM_HSI_DEFS) { + DP_ERR(cdev, "Unexpected HSI definition type [%d]\n", type); + return 0; + } + + return qed_hsi_def_val[type][chip_id]; +} static int qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - bool b_ah = QED_IS_AH(p_hwfn->cdev); u32 resc_max_val, mcp_resp; u8 res_id; int rc; - for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { switch (res_id) { - case QED_LL2_QUEUE: - resc_max_val = MAX_NUM_LL2_RX_QUEUES; + case QED_LL2_RAM_QUEUE: + resc_max_val = MAX_NUM_LL2_RX_RAM_QUEUES; + break; + case QED_LL2_CTX_QUEUE: + resc_max_val = MAX_NUM_LL2_RX_CTX_QUEUES; break; case QED_RDMA_CNQ_RAM: /* No need for a case for QED_CMDQS_CQS since @@ -3626,8 +3672,8 @@ qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) resc_max_val = NUM_OF_GLOBAL_QUEUES; break; case QED_RDMA_STATS_QUEUE: - resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 - : RDMA_NUM_STATISTIC_COUNTERS_BB; + resc_max_val = + NUM_OF_RDMA_STATISTIC_COUNTERS(p_hwfn->cdev); break; case QED_BDQ: resc_max_val = BDQ_NUM_RESOURCES; @@ -3660,28 +3706,24 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, u32 *p_resc_num, u32 *p_resc_start) { u8 num_funcs = p_hwfn->num_funcs_on_engine; - bool b_ah = QED_IS_AH(p_hwfn->cdev); + struct qed_dev *cdev = p_hwfn->cdev; switch (res_id) { case QED_L2_QUEUE: - *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 : - MAX_NUM_L2_QUEUES_BB) / num_funcs; + *p_resc_num = NUM_OF_L2_QUEUES(cdev) / num_funcs; break; case QED_VPORT: - *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 : - MAX_NUM_VPORTS_BB) / num_funcs; + *p_resc_num = NUM_OF_VPORTS(cdev) / num_funcs; break; case QED_RSS_ENG: - *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 : - ETH_RSS_ENGINE_NUM_BB) / num_funcs; + *p_resc_num = NUM_OF_RSS_ENGINES(cdev) / num_funcs; break; case QED_PQ: - *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 : - MAX_QM_TX_QUEUES_BB) / num_funcs; + *p_resc_num = NUM_OF_QM_TX_QUEUES(cdev) / num_funcs; *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */ break; case QED_RL: - *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs; + *p_resc_num = NUM_OF_QM_GLOBAL_RLS(cdev) / num_funcs; break; case QED_MAC: case QED_VLAN: @@ -3689,11 +3731,13 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; break; case QED_ILT: - *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 : - PXP_NUM_ILT_RECORDS_BB) / num_funcs; + *p_resc_num = NUM_OF_PXP_ILT_RECORDS(cdev) / num_funcs; + break; + case QED_LL2_RAM_QUEUE: + *p_resc_num = MAX_NUM_LL2_RX_RAM_QUEUES / num_funcs; break; - case QED_LL2_QUEUE: - *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs; + case QED_LL2_CTX_QUEUE: + *p_resc_num = MAX_NUM_LL2_RX_CTX_QUEUES / num_funcs; break; case QED_RDMA_CNQ_RAM: case QED_CMDQS_CQS: @@ -3701,8 +3745,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs; break; case QED_RDMA_STATS_QUEUE: - *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 : - RDMA_NUM_STATISTIC_COUNTERS_BB) / num_funcs; + *p_resc_num = NUM_OF_RDMA_STATISTIC_COUNTERS(cdev) / num_funcs; break; case QED_BDQ: if (p_hwfn->hw_info.personality != QED_PCI_ISCSI && @@ -5087,11 +5130,11 @@ static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn, for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; - vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) / + vport_params[i].wfq = (wfq_speed * QED_WFQ_UNIT) / min_pf_rate; qed_init_vport_wfq(p_hwfn, p_ptt, vport_params[i].first_tx_pq_id, - vport_params[i].vport_wfq); + vport_params[i].wfq); } } @@ -5102,7 +5145,7 @@ static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn, int i; for (i = 0; i < p_hwfn->qm_info.num_vports; i++) - p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1; + p_hwfn->qm_info.qm_vport_params[i].wfq = 1; } static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn, @@ -5118,7 +5161,7 @@ static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn, qed_init_wfq_default_param(p_hwfn, min_pf_rate); qed_init_vport_wfq(p_hwfn, p_ptt, vport_params[i].first_tx_pq_id, - vport_params[i].vport_wfq); + vport_params[i].wfq); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index 47376d4d071f..eb4808b3bf67 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -230,30 +230,6 @@ enum qed_dmae_address_type_t { QED_DMAE_ADDRESS_GRC }; -/* value of flags If QED_DMAE_FLAG_RW_REPL_SRC flag is set and the - * source is a block of length DMAE_MAX_RW_SIZE and the - * destination is larger, the source block will be duplicated as - * many times as required to fill the destination block. This is - * used mostly to write a zeroed buffer to destination address - * using DMA - */ -#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001 -#define QED_DMAE_FLAG_VF_SRC 0x00000002 -#define QED_DMAE_FLAG_VF_DST 0x00000004 -#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008 -#define QED_DMAE_FLAG_PORT 0x00000010 -#define QED_DMAE_FLAG_PF_SRC 0x00000020 -#define QED_DMAE_FLAG_PF_DST 0x00000040 - -struct qed_dmae_params { - u32 flags; /* consists of QED_DMAE_FLAG_* values */ - u8 src_vfid; - u8 dst_vfid; - u8 port_id; - u8 src_pfid; - u8 dst_pfid; -}; - /** * @brief qed_dmae_host2grc - copy data from source addr to * dmae registers using the given ptt diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index de31a382f58e..4c7fa391fd33 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -167,6 +167,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, goto err; } p_cxt = cxt_info.p_cxt; + memset(p_cxt, 0, sizeof(*p_cxt)); + SET_FIELD(p_cxt->tstorm_ag_context.flags3, E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1); diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index e054f6c69e3a..4597015b8bff 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -98,6 +98,7 @@ enum core_event_opcode { CORE_EVENT_RX_QUEUE_STOP, CORE_EVENT_RX_QUEUE_FLUSH, CORE_EVENT_TX_QUEUE_UPDATE, + CORE_EVENT_QUEUE_STATS_QUERY, MAX_CORE_EVENT_OPCODE }; @@ -116,7 +117,7 @@ struct core_ll2_port_stats { struct regpair gsi_crcchksm_error; }; -/* Ethernet TX Per Queue Stats */ +/* LL2 TX Per Queue Stats */ struct core_ll2_pstorm_per_queue_stat { struct regpair sent_ucast_bytes; struct regpair sent_mcast_bytes; @@ -124,13 +125,13 @@ struct core_ll2_pstorm_per_queue_stat { struct regpair sent_ucast_pkts; struct regpair sent_mcast_pkts; struct regpair sent_bcast_pkts; + struct regpair error_drop_pkts; }; /* Light-L2 RX Producers in Tstorm RAM */ struct core_ll2_rx_prod { __le16 bd_prod; __le16 cqe_prod; - __le32 reserved; }; struct core_ll2_tstorm_per_queue_stat { @@ -147,6 +148,18 @@ struct core_ll2_ustorm_per_queue_stat { struct regpair rcv_bcast_pkts; }; +/* Structure for doorbell data, in PWM mode, for RX producers update. */ +struct core_pwm_prod_update_data { + __le16 icid; /* internal CID */ + u8 reserved0; + u8 params; +#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_MASK 0x3 +#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_SHIFT 0 +#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_MASK 0x3F /* Set 0 */ +#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_SHIFT 2 + struct core_ll2_rx_prod prod; /* Producers */ +}; + /* Core Ramrod Command IDs (light L2) */ enum core_ramrod_cmd_id { CORE_RAMROD_UNUSED, @@ -156,6 +169,7 @@ enum core_ramrod_cmd_id { CORE_RAMROD_TX_QUEUE_STOP, CORE_RAMROD_RX_QUEUE_FLUSH, CORE_RAMROD_TX_QUEUE_UPDATE, + CORE_RAMROD_QUEUE_STATS_QUERY, MAX_CORE_RAMROD_CMD_ID }; @@ -236,7 +250,8 @@ struct core_rx_gsi_offload_cqe { __le16 src_mac_addrlo; __le16 qp_id; __le32 src_qp; - __le32 reserved[3]; + struct core_rx_cqe_opaque_data opaque_data; + __le32 reserved; }; /* Core RX CQE for Light L2 */ @@ -274,8 +289,11 @@ struct core_rx_start_ramrod_data { u8 mf_si_mcast_accept_all; struct core_rx_action_on_error action_on_error; u8 gsi_offload_flag; + u8 vport_id_valid; + u8 vport_id; + u8 zero_prod_flg; u8 wipe_inner_vlan_pri_en; - u8 reserved[5]; + u8 reserved[2]; }; /* Ramrod data for rx queue stop ramrod */ @@ -352,8 +370,11 @@ struct core_tx_start_ramrod_data { __le16 pbl_size; __le16 qm_pq_id; u8 gsi_offload_flag; + u8 ctx_stats_en; + u8 vport_id_valid; u8 vport_id; - u8 resrved[2]; + u8 enforce_security_flag; + u8 reserved[7]; }; /* Ramrod data for tx queue stop ramrod */ @@ -385,7 +406,7 @@ struct ystorm_core_conn_st_ctx { /* The core storm context for the Pstorm */ struct pstorm_core_conn_st_ctx { - __le32 reserved[4]; + __le32 reserved[20]; }; /* Core Slowpath Connection storm context of Xstorm */ @@ -761,7 +782,7 @@ struct e4_tstorm_core_conn_ag_ctx { __le16 word1; __le16 word2; __le16 word3; - __le32 reg9; + __le32 ll2_rx_prod; __le32 reg10; }; @@ -836,11 +857,16 @@ struct e4_ustorm_core_conn_ag_ctx { /* The core storm context for the Mstorm */ struct mstorm_core_conn_st_ctx { - __le32 reserved[24]; + __le32 reserved[40]; }; /* The core storm context for the Ustorm */ struct ustorm_core_conn_st_ctx { + __le32 reserved[20]; +}; + +/* The core storm context for the Tstorm */ +struct tstorm_core_conn_st_ctx { __le32 reserved[4]; }; @@ -857,6 +883,8 @@ struct e4_core_conn_context { struct mstorm_core_conn_st_ctx mstorm_st_context; struct ustorm_core_conn_st_ctx ustorm_st_context; struct regpair ustorm_st_padding[2]; + struct tstorm_core_conn_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2]; }; struct eth_mstorm_per_pf_stat { @@ -888,12 +916,21 @@ struct eth_pstorm_per_pf_stat { struct regpair sent_gre_bytes; struct regpair sent_vxlan_bytes; struct regpair sent_geneve_bytes; + struct regpair sent_mpls_bytes; + struct regpair sent_gre_mpls_bytes; + struct regpair sent_udp_mpls_bytes; struct regpair sent_gre_pkts; struct regpair sent_vxlan_pkts; struct regpair sent_geneve_pkts; + struct regpair sent_mpls_pkts; + struct regpair sent_gre_mpls_pkts; + struct regpair sent_udp_mpls_pkts; struct regpair gre_drop_pkts; struct regpair vxlan_drop_pkts; struct regpair geneve_drop_pkts; + struct regpair mpls_drop_pkts; + struct regpair gre_mpls_drop_pkts; + struct regpair udp_mpls_drop_pkts; }; /* Ethernet TX Per Queue Stats */ @@ -983,7 +1020,8 @@ union event_ring_data { struct event_ring_entry { u8 protocol_id; u8 opcode; - __le16 reserved0; + u8 reserved0; + u8 vf_id; __le16 echo; u8 fw_return_code; u8 flags; @@ -1061,7 +1099,20 @@ enum malicious_vf_error_id { ETH_CONTROL_PACKET_VIOLATION, ETH_ANTI_SPOOFING_ERR, ETH_PACKET_SIZE_TOO_LARGE, - MAX_MALICIOUS_VF_ERROR_ID + CORE_ILLEGAL_VLAN_MODE, + CORE_ILLEGAL_NBDS, + CORE_FIRST_BD_WO_SOP, + CORE_INSUFFICIENT_BDS, + CORE_PACKET_TOO_SMALL, + CORE_ILLEGAL_INBAND_TAGS, + CORE_VLAN_INSERT_AND_INBAND_VLAN, + CORE_MTU_VIOLATION, + CORE_CONTROL_PACKET_VIOLATION, + CORE_ANTI_SPOOFING_ERR, + CORE_PACKET_SIZE_TOO_LARGE, + CORE_ILLEGAL_BD_FLAGS, + CORE_GSI_PACKET_VIOLATION, + MAX_MALICIOUS_VF_ERROR_ID, }; /* Mstorm non-triggering VF zone */ @@ -1367,6 +1418,16 @@ enum vf_zone_size_mode { MAX_VF_ZONE_SIZE_MODE }; +/* Xstorm non-triggering VF zone */ +struct xstorm_non_trigger_vf_zone { + struct regpair non_edpm_ack_pkts; +}; + +/* Tstorm VF zone */ +struct xstorm_vf_zone { + struct xstorm_non_trigger_vf_zone non_trigger; +}; + /* Attentions status block */ struct atten_status_block { __le32 atten_bits; @@ -1435,7 +1496,11 @@ struct dmae_cmd { __le16 crc16; __le16 crc16_c; __le16 crc10; - __le16 reserved; + __le16 error_bit_reserved; +#define DMAE_CMD_ERROR_BIT_MASK 0x1 +#define DMAE_CMD_ERROR_BIT_SHIFT 0 +#define DMAE_CMD_RESERVED_MASK 0x7FFF +#define DMAE_CMD_RESERVED_SHIFT 1 __le16 xsum16; __le16 xsum8; }; @@ -1566,6 +1631,41 @@ struct e4_ystorm_core_conn_ag_ctx { __le32 reg3; }; +/* DMAE parameters */ +struct qed_dmae_params { + u32 flags; +/* If QED_DMAE_PARAMS_RW_REPL_SRC flag is set and the + * source is a block of length DMAE_MAX_RW_SIZE and the + * destination is larger, the source block will be duplicated as + * many times as required to fill the destination block. This is + * used mostly to write a zeroed buffer to destination address + * using DMA + */ +#define QED_DMAE_PARAMS_RW_REPL_SRC_MASK 0x1 +#define QED_DMAE_PARAMS_RW_REPL_SRC_SHIFT 0 +#define QED_DMAE_PARAMS_SRC_VF_VALID_MASK 0x1 +#define QED_DMAE_PARAMS_SRC_VF_VALID_SHIFT 1 +#define QED_DMAE_PARAMS_DST_VF_VALID_MASK 0x1 +#define QED_DMAE_PARAMS_DST_VF_VALID_SHIFT 2 +#define QED_DMAE_PARAMS_COMPLETION_DST_MASK 0x1 +#define QED_DMAE_PARAMS_COMPLETION_DST_SHIFT 3 +#define QED_DMAE_PARAMS_PORT_VALID_MASK 0x1 +#define QED_DMAE_PARAMS_PORT_VALID_SHIFT 4 +#define QED_DMAE_PARAMS_SRC_PF_VALID_MASK 0x1 +#define QED_DMAE_PARAMS_SRC_PF_VALID_SHIFT 5 +#define QED_DMAE_PARAMS_DST_PF_VALID_MASK 0x1 +#define QED_DMAE_PARAMS_DST_PF_VALID_SHIFT 6 +#define QED_DMAE_PARAMS_RESERVED_MASK 0x1FFFFFF +#define QED_DMAE_PARAMS_RESERVED_SHIFT 7 + u8 src_vfid; + u8 dst_vfid; + u8 port_id; + u8 src_pfid; + u8 dst_pfid; + u8 reserved1; + __le16 reserved2; +}; + /* IGU cleanup command */ struct igu_cleanup { __le32 sb_id_and_flags; @@ -1743,102 +1843,23 @@ struct sdm_op_gen { #define SDM_OP_GEN_RESERVED_SHIFT 20 }; +/* Physical memory descriptor */ +struct phys_mem_desc { + dma_addr_t phys_addr; + void *virt_addr; + u32 size; /* In bytes */ +}; + +/* Virtual memory descriptor */ +struct virt_mem_desc { + void *ptr; + u32 size; /* In bytes */ +}; + /****************************************/ /* Debug Tools HSI constants and macros */ /****************************************/ -enum block_addr { - GRCBASE_GRC = 0x50000, - GRCBASE_MISCS = 0x9000, - GRCBASE_MISC = 0x8000, - GRCBASE_DBU = 0xa000, - GRCBASE_PGLUE_B = 0x2a8000, - GRCBASE_CNIG = 0x218000, - GRCBASE_CPMU = 0x30000, - GRCBASE_NCSI = 0x40000, - GRCBASE_OPTE = 0x53000, - GRCBASE_BMB = 0x540000, - GRCBASE_PCIE = 0x54000, - GRCBASE_MCP = 0xe00000, - GRCBASE_MCP2 = 0x52000, - GRCBASE_PSWHST = 0x2a0000, - GRCBASE_PSWHST2 = 0x29e000, - GRCBASE_PSWRD = 0x29c000, - GRCBASE_PSWRD2 = 0x29d000, - GRCBASE_PSWWR = 0x29a000, - GRCBASE_PSWWR2 = 0x29b000, - GRCBASE_PSWRQ = 0x280000, - GRCBASE_PSWRQ2 = 0x240000, - GRCBASE_PGLCS = 0x0, - GRCBASE_DMAE = 0xc000, - GRCBASE_PTU = 0x560000, - GRCBASE_TCM = 0x1180000, - GRCBASE_MCM = 0x1200000, - GRCBASE_UCM = 0x1280000, - GRCBASE_XCM = 0x1000000, - GRCBASE_YCM = 0x1080000, - GRCBASE_PCM = 0x1100000, - GRCBASE_QM = 0x2f0000, - GRCBASE_TM = 0x2c0000, - GRCBASE_DORQ = 0x100000, - GRCBASE_BRB = 0x340000, - GRCBASE_SRC = 0x238000, - GRCBASE_PRS = 0x1f0000, - GRCBASE_TSDM = 0xfb0000, - GRCBASE_MSDM = 0xfc0000, - GRCBASE_USDM = 0xfd0000, - GRCBASE_XSDM = 0xf80000, - GRCBASE_YSDM = 0xf90000, - GRCBASE_PSDM = 0xfa0000, - GRCBASE_TSEM = 0x1700000, - GRCBASE_MSEM = 0x1800000, - GRCBASE_USEM = 0x1900000, - GRCBASE_XSEM = 0x1400000, - GRCBASE_YSEM = 0x1500000, - GRCBASE_PSEM = 0x1600000, - GRCBASE_RSS = 0x238800, - GRCBASE_TMLD = 0x4d0000, - GRCBASE_MULD = 0x4e0000, - GRCBASE_YULD = 0x4c8000, - GRCBASE_XYLD = 0x4c0000, - GRCBASE_PTLD = 0x5a0000, - GRCBASE_YPLD = 0x5c0000, - GRCBASE_PRM = 0x230000, - GRCBASE_PBF_PB1 = 0xda0000, - GRCBASE_PBF_PB2 = 0xda4000, - GRCBASE_RPB = 0x23c000, - GRCBASE_BTB = 0xdb0000, - GRCBASE_PBF = 0xd80000, - GRCBASE_RDIF = 0x300000, - GRCBASE_TDIF = 0x310000, - GRCBASE_CDU = 0x580000, - GRCBASE_CCFC = 0x2e0000, - GRCBASE_TCFC = 0x2d0000, - GRCBASE_IGU = 0x180000, - GRCBASE_CAU = 0x1c0000, - GRCBASE_RGFS = 0xf00000, - GRCBASE_RGSRC = 0x320000, - GRCBASE_TGFS = 0xd00000, - GRCBASE_TGSRC = 0x322000, - GRCBASE_UMAC = 0x51000, - GRCBASE_XMAC = 0x210000, - GRCBASE_DBG = 0x10000, - GRCBASE_NIG = 0x500000, - GRCBASE_WOL = 0x600000, - GRCBASE_BMBN = 0x610000, - GRCBASE_IPC = 0x20000, - GRCBASE_NWM = 0x800000, - GRCBASE_NWS = 0x700000, - GRCBASE_MS = 0x6a0000, - GRCBASE_PHY_PCIE = 0x620000, - GRCBASE_LED = 0x6b8000, - GRCBASE_AVS_WRAP = 0x6b0000, - GRCBASE_PXPREQBUS = 0x56000, - GRCBASE_MISC_AEU = 0x8000, - GRCBASE_BAR0_MAP = 0x1c00000, - MAX_BLOCK_ADDR -}; - enum block_id { BLOCK_GRC, BLOCK_MISCS, @@ -1893,8 +1914,6 @@ enum block_id { BLOCK_MULD, BLOCK_YULD, BLOCK_XYLD, - BLOCK_PTLD, - BLOCK_YPLD, BLOCK_PRM, BLOCK_PBF_PB1, BLOCK_PBF_PB2, @@ -1908,12 +1927,9 @@ enum block_id { BLOCK_TCFC, BLOCK_IGU, BLOCK_CAU, - BLOCK_RGFS, - BLOCK_RGSRC, - BLOCK_TGFS, - BLOCK_TGSRC, BLOCK_UMAC, BLOCK_XMAC, + BLOCK_MSTAT, BLOCK_DBG, BLOCK_NIG, BLOCK_WOL, @@ -1926,8 +1942,17 @@ enum block_id { BLOCK_LED, BLOCK_AVS_WRAP, BLOCK_PXPREQBUS, - BLOCK_MISC_AEU, BLOCK_BAR0_MAP, + BLOCK_MCP_FIO, + BLOCK_LAST_INIT, + BLOCK_PRS_FC, + BLOCK_PBF_FC, + BLOCK_NIG_LB_FC, + BLOCK_NIG_LB_FC_PLLH, + BLOCK_NIG_TX_FC_PLLH, + BLOCK_NIG_TX_FC, + BLOCK_NIG_RX_FC_PLLH, + BLOCK_NIG_RX_FC, MAX_BLOCK_ID }; @@ -1944,10 +1969,13 @@ enum bin_dbg_buffer_type { BIN_BUF_DBG_ATTN_REGS, BIN_BUF_DBG_ATTN_INDEXES, BIN_BUF_DBG_ATTN_NAME_OFFSETS, - BIN_BUF_DBG_BUS_BLOCKS, + BIN_BUF_DBG_BLOCKS, + BIN_BUF_DBG_BLOCKS_CHIP_DATA, BIN_BUF_DBG_BUS_LINES, - BIN_BUF_DBG_BUS_BLOCKS_USER_DATA, + BIN_BUF_DBG_BLOCKS_USER_DATA, + BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA, BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS, + BIN_BUF_DBG_RESET_REGS, BIN_BUF_DBG_PARSING_STRINGS, MAX_BIN_DBG_BUFFER_TYPE }; @@ -2031,20 +2059,54 @@ enum dbg_attn_type { MAX_DBG_ATTN_TYPE }; -/* Debug Bus block data */ -struct dbg_bus_block { - u8 num_of_lines; - u8 has_latency_events; - u16 lines_offset; +/* Block debug data */ +struct dbg_block { + u8 name[15]; + u8 associated_storm_letter; }; -/* Debug Bus block user data */ -struct dbg_bus_block_user_data { - u8 num_of_lines; +/* Chip-specific block debug data */ +struct dbg_block_chip { + u8 flags; +#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1 +#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0 +#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1 +#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1 +#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1 +#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2 +#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1 +#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3 +#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1 +#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4 +#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7 +#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5 + u8 dbg_client_id; + u8 reset_reg_id; + u8 reset_reg_bit_offset; + struct dbg_mode_hdr dbg_bus_mode; + u16 reserved1; + u8 reserved2; + u8 num_of_dbg_bus_lines; + u16 dbg_bus_lines_offset; + u32 dbg_select_reg_addr; + u32 dbg_dword_enable_reg_addr; + u32 dbg_shift_reg_addr; + u32 dbg_force_valid_reg_addr; + u32 dbg_force_frame_reg_addr; +}; + +/* Chip-specific block user debug data */ +struct dbg_block_chip_user { + u8 num_of_dbg_bus_lines; u8 has_latency_events; u16 names_offset; }; +/* Block user debug data */ +struct dbg_block_user { + u8 name[16]; +}; + /* Block Debug line data */ struct dbg_bus_line { u8 data; @@ -2197,22 +2259,33 @@ enum dbg_idle_chk_severity_types { MAX_DBG_IDLE_CHK_SEVERITY_TYPES }; +/* Reset register */ +struct dbg_reset_reg { + u32 data; +#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF +#define DBG_RESET_REG_ADDR_SHIFT 0 +#define DBG_RESET_REG_IS_REMOVED_MASK 0x1 +#define DBG_RESET_REG_IS_REMOVED_SHIFT 24 +#define DBG_RESET_REG_RESERVED_MASK 0x7F +#define DBG_RESET_REG_RESERVED_SHIFT 25 +}; + /* Debug Bus block data */ struct dbg_bus_block_data { - u16 data; -#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF -#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0 -#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF -#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4 -#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF -#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8 -#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF -#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12 + u8 enable_mask; + u8 right_shift; + u8 force_valid_mask; + u8 force_frame_mask; + u8 dword_mask; u8 line_num; u8 hw_id; + u8 flags; +#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1 +#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0 +#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F +#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1 }; -/* Debug Bus Clients */ enum dbg_bus_clients { DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCP, @@ -2253,11 +2326,10 @@ enum dbg_bus_constraint_ops { /* Debug Bus trigger state data */ struct dbg_bus_trigger_state_data { - u8 data; -#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF -#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0 -#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF -#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4 + u8 msg_len; + u8 constraint_dword_mask; + u8 storm_id; + u8 reserved; }; /* Debug Bus memory address */ @@ -2307,8 +2379,7 @@ struct dbg_bus_storm_data { struct dbg_bus_data { u32 app_version; u8 state; - u8 hw_dwords; - u16 hw_id_mask; + u8 mode_256b_en; u8 num_enabled_blocks; u8 num_enabled_storms; u8 target; @@ -2319,67 +2390,21 @@ struct dbg_bus_data { u8 adding_filter; u8 filter_pre_trigger; u8 filter_post_trigger; - u16 reserved; u8 trigger_en; - struct dbg_bus_trigger_state_data trigger_states[3]; + u8 filter_constraint_dword_mask; u8 next_trigger_state; u8 next_constraint_id; - u8 unify_inputs; + struct dbg_bus_trigger_state_data trigger_states[3]; + u8 filter_msg_len; u8 rcv_from_other_engine; + u8 blocks_dword_mask; + u8 blocks_dword_overlap; + u32 hw_id_mask; struct dbg_bus_pci_buf_data pci_buf; - struct dbg_bus_block_data blocks[88]; + struct dbg_bus_block_data blocks[132]; struct dbg_bus_storm_data storms[6]; }; -/* Debug bus filter types */ -enum dbg_bus_filter_types { - DBG_BUS_FILTER_TYPE_OFF, - DBG_BUS_FILTER_TYPE_PRE, - DBG_BUS_FILTER_TYPE_POST, - DBG_BUS_FILTER_TYPE_ON, - MAX_DBG_BUS_FILTER_TYPES -}; - -/* Debug bus frame modes */ -enum dbg_bus_frame_modes { - DBG_BUS_FRAME_MODE_0HW_4ST = 0, /* 0 HW dwords, 4 Storm dwords */ - DBG_BUS_FRAME_MODE_4HW_0ST = 3, /* 4 HW dwords, 0 Storm dwords */ - DBG_BUS_FRAME_MODE_8HW_0ST = 4, /* 8 HW dwords, 0 Storm dwords */ - MAX_DBG_BUS_FRAME_MODES -}; - -/* Debug bus other engine mode */ -enum dbg_bus_other_engine_modes { - DBG_BUS_OTHER_ENGINE_MODE_NONE, - DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX, - DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX, - DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX, - DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX, - MAX_DBG_BUS_OTHER_ENGINE_MODES -}; - -/* Debug bus post-trigger recording types */ -enum dbg_bus_post_trigger_types { - DBG_BUS_POST_TRIGGER_RECORD, - DBG_BUS_POST_TRIGGER_DROP, - MAX_DBG_BUS_POST_TRIGGER_TYPES -}; - -/* Debug bus pre-trigger recording types */ -enum dbg_bus_pre_trigger_types { - DBG_BUS_PRE_TRIGGER_START_FROM_ZERO, - DBG_BUS_PRE_TRIGGER_NUM_CHUNKS, - DBG_BUS_PRE_TRIGGER_DROP, - MAX_DBG_BUS_PRE_TRIGGER_TYPES -}; - -/* Debug bus SEMI frame modes */ -enum dbg_bus_semi_frame_modes { - DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0, - DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3, - MAX_DBG_BUS_SEMI_FRAME_MODES -}; - /* Debug bus states */ enum dbg_bus_states { DBG_BUS_STATE_IDLE, @@ -2397,7 +2422,9 @@ enum dbg_bus_storm_modes { DBG_BUS_STORM_MODE_DRA_W, DBG_BUS_STORM_MODE_LD_ST_ADDR, DBG_BUS_STORM_MODE_DRA_FSM, + DBG_BUS_STORM_MODE_FAST_DBGMUX, DBG_BUS_STORM_MODE_RH, + DBG_BUS_STORM_MODE_RH_WITH_STORE, DBG_BUS_STORM_MODE_FOC, DBG_BUS_STORM_MODE_EXT_STORE, MAX_DBG_BUS_STORM_MODES @@ -2438,13 +2465,13 @@ enum dbg_grc_params { DBG_GRC_PARAM_DUMP_CAU, DBG_GRC_PARAM_DUMP_QM, DBG_GRC_PARAM_DUMP_MCP, - DBG_GRC_PARAM_MCP_TRACE_META_SIZE, + DBG_GRC_PARAM_DUMP_DORQ, DBG_GRC_PARAM_DUMP_CFC, DBG_GRC_PARAM_DUMP_IGU, DBG_GRC_PARAM_DUMP_BRB, DBG_GRC_PARAM_DUMP_BTB, DBG_GRC_PARAM_DUMP_BMB, - DBG_GRC_PARAM_DUMP_NIG, + DBG_GRC_PARAM_RESERVD1, DBG_GRC_PARAM_DUMP_MULD, DBG_GRC_PARAM_DUMP_PRS, DBG_GRC_PARAM_DUMP_DMAE, @@ -2453,8 +2480,8 @@ enum dbg_grc_params { DBG_GRC_PARAM_DUMP_DIF, DBG_GRC_PARAM_DUMP_STATIC, DBG_GRC_PARAM_UNSTALL, - DBG_GRC_PARAM_NUM_LCIDS, - DBG_GRC_PARAM_NUM_LTIDS, + DBG_GRC_PARAM_RESERVED2, + DBG_GRC_PARAM_MCP_TRACE_META_SIZE, DBG_GRC_PARAM_EXCLUDE_ALL, DBG_GRC_PARAM_CRASH, DBG_GRC_PARAM_PARITY_SAFE, @@ -2462,22 +2489,14 @@ enum dbg_grc_params { DBG_GRC_PARAM_DUMP_PHY, DBG_GRC_PARAM_NO_MCP, DBG_GRC_PARAM_NO_FW_VER, + DBG_GRC_PARAM_RESERVED3, + DBG_GRC_PARAM_DUMP_MCP_HW_DUMP, + DBG_GRC_PARAM_DUMP_ILT_CDUC, + DBG_GRC_PARAM_DUMP_ILT_CDUT, + DBG_GRC_PARAM_DUMP_CAU_EXT, MAX_DBG_GRC_PARAMS }; -/* Debug reset registers */ -enum dbg_reset_regs { - DBG_RESET_REG_MISCS_PL_UA, - DBG_RESET_REG_MISCS_PL_HV, - DBG_RESET_REG_MISCS_PL_HV_2, - DBG_RESET_REG_MISC_PL_UA, - DBG_RESET_REG_MISC_PL_HV, - DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, - DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, - DBG_RESET_REG_MISC_PL_PDA_VAUX, - MAX_DBG_RESET_REGS -}; - /* Debug status codes */ enum dbg_status { DBG_STATUS_OK, @@ -2489,15 +2508,15 @@ enum dbg_status { DBG_STATUS_INVALID_PCI_BUF_SIZE, DBG_STATUS_PCI_BUF_ALLOC_FAILED, DBG_STATUS_PCI_BUF_NOT_ALLOCATED, - DBG_STATUS_TOO_MANY_INPUTS, - DBG_STATUS_INPUT_OVERLAP, - DBG_STATUS_HW_ONLY_RECORDING, + DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS, + DBG_STATUS_NO_MATCHING_FRAMING_MODE, + DBG_STATUS_VFC_READ_ERROR, DBG_STATUS_STORM_ALREADY_ENABLED, DBG_STATUS_STORM_NOT_ENABLED, DBG_STATUS_BLOCK_ALREADY_ENABLED, DBG_STATUS_BLOCK_NOT_ENABLED, DBG_STATUS_NO_INPUT_ENABLED, - DBG_STATUS_NO_FILTER_TRIGGER_64B, + DBG_STATUS_NO_FILTER_TRIGGER_256B, DBG_STATUS_FILTER_ALREADY_ENABLED, DBG_STATUS_TRIGGER_ALREADY_ENABLED, DBG_STATUS_TRIGGER_NOT_ENABLED, @@ -2522,7 +2541,7 @@ enum dbg_status { DBG_STATUS_MCP_TRACE_NO_META, DBG_STATUS_MCP_COULD_NOT_HALT, DBG_STATUS_MCP_COULD_NOT_RESUME, - DBG_STATUS_RESERVED2, + DBG_STATUS_RESERVED0, DBG_STATUS_SEMI_FIFO_NOT_EMPTY, DBG_STATUS_IGU_FIFO_BAD_DATA, DBG_STATUS_MCP_COULD_NOT_MASK_PRTY, @@ -2530,10 +2549,15 @@ enum dbg_status { DBG_STATUS_REG_FIFO_BAD_DATA, DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA, DBG_STATUS_DBG_ARRAY_NOT_SET, - DBG_STATUS_FILTER_BUG, + DBG_STATUS_RESERVED1, DBG_STATUS_NON_MATCHING_LINES, - DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET, + DBG_STATUS_INSUFFICIENT_HW_IDS, DBG_STATUS_DBG_BUS_IN_USE, + DBG_STATUS_INVALID_STORM_DBG_MODE, + DBG_STATUS_OTHER_ENGINE_BB_ONLY, + DBG_STATUS_FILTER_SINGLE_HW_ID, + DBG_STATUS_TRIGGER_SINGLE_HW_ID, + DBG_STATUS_MISSING_TRIGGER_STATE_STORM, MAX_DBG_STATUS }; @@ -2569,9 +2593,9 @@ struct dbg_tools_data { struct dbg_bus_data bus; struct idle_chk_data idle_chk; u8 mode_enable[40]; - u8 block_in_reset[88]; + u8 block_in_reset[132]; u8 chip_id; - u8 platform_id; + u8 hw_type; u8 num_ports; u8 num_pfs_per_port; u8 num_vfs; @@ -2582,6 +2606,19 @@ struct dbg_tools_data { u32 num_regs_read; }; +/* ILT Clients */ +enum ilt_clients { + ILT_CLI_CDUC, + ILT_CLI_CDUT, + ILT_CLI_QM, + ILT_CLI_TM, + ILT_CLI_SRC, + ILT_CLI_TSDM, + ILT_CLI_RGFS, + ILT_CLI_TGFS, + MAX_ILT_CLIENTS +}; + /********************************/ /* HSI Init Functions constants */ /********************************/ @@ -2630,13 +2667,18 @@ struct init_nig_pri_tc_map_req { struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES]; }; +/* QM per global RL init parameters */ +struct init_qm_global_rl_params { + u32 rate_limit; +}; + /* QM per-port init parameters */ struct init_qm_port_params { - u8 active; - u8 active_phys_tcs; + u16 active_phys_tcs; u16 num_pbf_cmd_lines; u16 num_btb_blocks; - u16 reserved; + u8 active; + u8 reserved; }; /* QM per-PQ init parameters */ @@ -2645,15 +2687,14 @@ struct init_qm_pq_params { u8 tc_id; u8 wrr_group; u8 rl_valid; + u16 rl_id; u8 port_id; - u8 reserved0; - u16 reserved1; + u8 reserved; }; /* QM per-vport init parameters */ struct init_qm_vport_params { - u32 vport_rl; - u16 vport_wfq; + u16 wfq; u16 first_tx_pq_id[NUM_OF_TCS]; }; @@ -2673,13 +2714,12 @@ struct init_qm_vport_params { enum chip_ids { CHIP_BB, CHIP_K2, - CHIP_RESERVED, MAX_CHIP_IDS }; struct fw_asserts_ram_section { - u16 section_ram_line_offset; - u16 section_ram_line_size; + __le16 section_ram_line_offset; + __le16 section_ram_line_size; u8 list_dword_offset; u8 list_element_dword_size; u8 list_num_elements; @@ -2729,6 +2769,7 @@ enum init_modes { MODE_PORTS_PER_ENG_4, MODE_100G, MODE_RESERVED6, + MODE_RESERVED7, MAX_INIT_MODES }; @@ -2763,9 +2804,19 @@ enum bin_init_buffer_type { BIN_BUF_INIT_VAL, BIN_BUF_INIT_MODE_TREE, BIN_BUF_INIT_IRO, + BIN_BUF_INIT_OVERLAYS, MAX_BIN_INIT_BUFFER_TYPE }; +/* FW overlay buffer header */ +struct fw_overlay_buf_hdr { + u32 data; +#define FW_OVERLAY_BUF_HDR_STORM_ID_MASK 0xFF +#define FW_OVERLAY_BUF_HDR_STORM_ID_SHIFT 0 +#define FW_OVERLAY_BUF_HDR_BUF_SIZE_MASK 0xFFFFFF +#define FW_OVERLAY_BUF_HDR_BUF_SIZE_SHIFT 8 +}; + /* init array header: raw */ struct init_array_raw_hdr { u32 data; @@ -2859,10 +2910,8 @@ struct init_if_phase_op { u32 op_data; #define INIT_IF_PHASE_OP_OP_MASK 0xF #define INIT_IF_PHASE_OP_OP_SHIFT 0 -#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1 -#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4 -#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF -#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5 +#define INIT_IF_PHASE_OP_RESERVED1_MASK 0xFFF +#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 4 #define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF #define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16 u32 phase_data; @@ -2991,9 +3040,11 @@ struct iro { * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug * arrays. * + * @param p_hwfn - HW device data * @param bin_ptr - a pointer to the binary data with debug arrays. */ -enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr); +enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn, + const u8 * const bin_ptr); /** * @brief qed_read_regs - Reads registers into a buffer (using GRC). @@ -3024,6 +3075,20 @@ void qed_read_regs(struct qed_hwfn *p_hwfn, */ bool qed_read_fw_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct fw_info *fw_info); +/** + * @brief qed_dbg_grc_config - Sets the value of a GRC parameter. + * + * @param p_hwfn - HW device data + * @param grc_param - GRC parameter + * @param val - Value to set. + * + * @return error if one of the following holds: + * - the version wasn't set + * - grc_param is invalid + * - val is outside the allowed boundaries + */ +enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn, + enum dbg_grc_params grc_param, u32 val); /** * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their @@ -3343,20 +3408,36 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, struct mcp_trace_format { u32 data; #define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff -#define MCP_TRACE_FORMAT_MODULE_SHIFT 0 +#define MCP_TRACE_FORMAT_MODULE_OFFSET 0 #define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000 -#define MCP_TRACE_FORMAT_LEVEL_SHIFT 16 +#define MCP_TRACE_FORMAT_LEVEL_OFFSET 16 #define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000 -#define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18 +#define MCP_TRACE_FORMAT_P1_SIZE_OFFSET 18 #define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000 -#define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20 +#define MCP_TRACE_FORMAT_P2_SIZE_OFFSET 20 #define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000 -#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22 +#define MCP_TRACE_FORMAT_P3_SIZE_OFFSET 22 #define MCP_TRACE_FORMAT_LEN_MASK 0xff000000 -#define MCP_TRACE_FORMAT_LEN_SHIFT 24 +#define MCP_TRACE_FORMAT_LEN_OFFSET 24 + char *format_str; }; +/* MCP Trace Meta data structure */ +struct mcp_trace_meta { + u32 modules_num; + char **modules; + u32 formats_num; + struct mcp_trace_format *formats; + bool is_allocated; +}; + +/* Debug Tools user data */ +struct dbg_tools_user_data { + struct mcp_trace_meta mcp_trace_meta; + const u32 *mcp_trace_user_meta_buf; +}; + /******************************** Constants **********************************/ #define MAX_NAME_LEN 16 @@ -3367,16 +3448,20 @@ struct mcp_trace_format { * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with * debug arrays. * + * @param p_hwfn - HW device data * @param bin_ptr - a pointer to the binary data with debug arrays. */ -enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr); +enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn, + const u8 * const bin_ptr); /** * @brief qed_dbg_alloc_user_data - Allocates user debug data. * * @param p_hwfn - HW device data + * @param user_data_ptr - OUT: a pointer to the allocated memory. */ -enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn); +enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn, + void **user_data_ptr); /** * @brief qed_dbg_get_status_str - Returns a string for the specified status. @@ -3649,271 +3734,6 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, struct dbg_attn_block_result *results); -/* Debug Bus blocks */ -static const u32 dbg_bus_blocks[] = { - 0x0000000f, /* grc, bb, 15 lines */ - 0x0000000f, /* grc, k2, 15 lines */ - 0x00000000, - 0x00000000, /* miscs, bb, 0 lines */ - 0x00000000, /* miscs, k2, 0 lines */ - 0x00000000, - 0x00000000, /* misc, bb, 0 lines */ - 0x00000000, /* misc, k2, 0 lines */ - 0x00000000, - 0x00000000, /* dbu, bb, 0 lines */ - 0x00000000, /* dbu, k2, 0 lines */ - 0x00000000, - 0x000f0127, /* pglue_b, bb, 39 lines */ - 0x0036012a, /* pglue_b, k2, 42 lines */ - 0x00000000, - 0x00000000, /* cnig, bb, 0 lines */ - 0x00120102, /* cnig, k2, 2 lines */ - 0x00000000, - 0x00000000, /* cpmu, bb, 0 lines */ - 0x00000000, /* cpmu, k2, 0 lines */ - 0x00000000, - 0x00000001, /* ncsi, bb, 1 lines */ - 0x00000001, /* ncsi, k2, 1 lines */ - 0x00000000, - 0x00000000, /* opte, bb, 0 lines */ - 0x00000000, /* opte, k2, 0 lines */ - 0x00000000, - 0x00600085, /* bmb, bb, 133 lines */ - 0x00600085, /* bmb, k2, 133 lines */ - 0x00000000, - 0x00000000, /* pcie, bb, 0 lines */ - 0x00e50033, /* pcie, k2, 51 lines */ - 0x00000000, - 0x00000000, /* mcp, bb, 0 lines */ - 0x00000000, /* mcp, k2, 0 lines */ - 0x00000000, - 0x01180009, /* mcp2, bb, 9 lines */ - 0x01180009, /* mcp2, k2, 9 lines */ - 0x00000000, - 0x01210104, /* pswhst, bb, 4 lines */ - 0x01210104, /* pswhst, k2, 4 lines */ - 0x00000000, - 0x01250103, /* pswhst2, bb, 3 lines */ - 0x01250103, /* pswhst2, k2, 3 lines */ - 0x00000000, - 0x00340101, /* pswrd, bb, 1 lines */ - 0x00340101, /* pswrd, k2, 1 lines */ - 0x00000000, - 0x01280119, /* pswrd2, bb, 25 lines */ - 0x01280119, /* pswrd2, k2, 25 lines */ - 0x00000000, - 0x01410109, /* pswwr, bb, 9 lines */ - 0x01410109, /* pswwr, k2, 9 lines */ - 0x00000000, - 0x00000000, /* pswwr2, bb, 0 lines */ - 0x00000000, /* pswwr2, k2, 0 lines */ - 0x00000000, - 0x001c0001, /* pswrq, bb, 1 lines */ - 0x001c0001, /* pswrq, k2, 1 lines */ - 0x00000000, - 0x014a0015, /* pswrq2, bb, 21 lines */ - 0x014a0015, /* pswrq2, k2, 21 lines */ - 0x00000000, - 0x00000000, /* pglcs, bb, 0 lines */ - 0x00120006, /* pglcs, k2, 6 lines */ - 0x00000000, - 0x00100001, /* dmae, bb, 1 lines */ - 0x00100001, /* dmae, k2, 1 lines */ - 0x00000000, - 0x015f0105, /* ptu, bb, 5 lines */ - 0x015f0105, /* ptu, k2, 5 lines */ - 0x00000000, - 0x01640120, /* tcm, bb, 32 lines */ - 0x01640120, /* tcm, k2, 32 lines */ - 0x00000000, - 0x01640120, /* mcm, bb, 32 lines */ - 0x01640120, /* mcm, k2, 32 lines */ - 0x00000000, - 0x01640120, /* ucm, bb, 32 lines */ - 0x01640120, /* ucm, k2, 32 lines */ - 0x00000000, - 0x01640120, /* xcm, bb, 32 lines */ - 0x01640120, /* xcm, k2, 32 lines */ - 0x00000000, - 0x01640120, /* ycm, bb, 32 lines */ - 0x01640120, /* ycm, k2, 32 lines */ - 0x00000000, - 0x01640120, /* pcm, bb, 32 lines */ - 0x01640120, /* pcm, k2, 32 lines */ - 0x00000000, - 0x01840062, /* qm, bb, 98 lines */ - 0x01840062, /* qm, k2, 98 lines */ - 0x00000000, - 0x01e60021, /* tm, bb, 33 lines */ - 0x01e60021, /* tm, k2, 33 lines */ - 0x00000000, - 0x02070107, /* dorq, bb, 7 lines */ - 0x02070107, /* dorq, k2, 7 lines */ - 0x00000000, - 0x00600185, /* brb, bb, 133 lines */ - 0x00600185, /* brb, k2, 133 lines */ - 0x00000000, - 0x020e0019, /* src, bb, 25 lines */ - 0x020c001a, /* src, k2, 26 lines */ - 0x00000000, - 0x02270104, /* prs, bb, 4 lines */ - 0x02270104, /* prs, k2, 4 lines */ - 0x00000000, - 0x022b0133, /* tsdm, bb, 51 lines */ - 0x022b0133, /* tsdm, k2, 51 lines */ - 0x00000000, - 0x022b0133, /* msdm, bb, 51 lines */ - 0x022b0133, /* msdm, k2, 51 lines */ - 0x00000000, - 0x022b0133, /* usdm, bb, 51 lines */ - 0x022b0133, /* usdm, k2, 51 lines */ - 0x00000000, - 0x022b0133, /* xsdm, bb, 51 lines */ - 0x022b0133, /* xsdm, k2, 51 lines */ - 0x00000000, - 0x022b0133, /* ysdm, bb, 51 lines */ - 0x022b0133, /* ysdm, k2, 51 lines */ - 0x00000000, - 0x022b0133, /* psdm, bb, 51 lines */ - 0x022b0133, /* psdm, k2, 51 lines */ - 0x00000000, - 0x025e010c, /* tsem, bb, 12 lines */ - 0x025e010c, /* tsem, k2, 12 lines */ - 0x00000000, - 0x025e010c, /* msem, bb, 12 lines */ - 0x025e010c, /* msem, k2, 12 lines */ - 0x00000000, - 0x025e010c, /* usem, bb, 12 lines */ - 0x025e010c, /* usem, k2, 12 lines */ - 0x00000000, - 0x025e010c, /* xsem, bb, 12 lines */ - 0x025e010c, /* xsem, k2, 12 lines */ - 0x00000000, - 0x025e010c, /* ysem, bb, 12 lines */ - 0x025e010c, /* ysem, k2, 12 lines */ - 0x00000000, - 0x025e010c, /* psem, bb, 12 lines */ - 0x025e010c, /* psem, k2, 12 lines */ - 0x00000000, - 0x026a000d, /* rss, bb, 13 lines */ - 0x026a000d, /* rss, k2, 13 lines */ - 0x00000000, - 0x02770106, /* tmld, bb, 6 lines */ - 0x02770106, /* tmld, k2, 6 lines */ - 0x00000000, - 0x027d0106, /* muld, bb, 6 lines */ - 0x027d0106, /* muld, k2, 6 lines */ - 0x00000000, - 0x02770005, /* yuld, bb, 5 lines */ - 0x02770005, /* yuld, k2, 5 lines */ - 0x00000000, - 0x02830107, /* xyld, bb, 7 lines */ - 0x027d0107, /* xyld, k2, 7 lines */ - 0x00000000, - 0x00000000, /* ptld, bb, 0 lines */ - 0x00000000, /* ptld, k2, 0 lines */ - 0x00000000, - 0x00000000, /* ypld, bb, 0 lines */ - 0x00000000, /* ypld, k2, 0 lines */ - 0x00000000, - 0x028a010e, /* prm, bb, 14 lines */ - 0x02980110, /* prm, k2, 16 lines */ - 0x00000000, - 0x02a8000d, /* pbf_pb1, bb, 13 lines */ - 0x02a8000d, /* pbf_pb1, k2, 13 lines */ - 0x00000000, - 0x02a8000d, /* pbf_pb2, bb, 13 lines */ - 0x02a8000d, /* pbf_pb2, k2, 13 lines */ - 0x00000000, - 0x02a8000d, /* rpb, bb, 13 lines */ - 0x02a8000d, /* rpb, k2, 13 lines */ - 0x00000000, - 0x00600185, /* btb, bb, 133 lines */ - 0x00600185, /* btb, k2, 133 lines */ - 0x00000000, - 0x02b50117, /* pbf, bb, 23 lines */ - 0x02b50117, /* pbf, k2, 23 lines */ - 0x00000000, - 0x02cc0006, /* rdif, bb, 6 lines */ - 0x02cc0006, /* rdif, k2, 6 lines */ - 0x00000000, - 0x02d20006, /* tdif, bb, 6 lines */ - 0x02d20006, /* tdif, k2, 6 lines */ - 0x00000000, - 0x02d80003, /* cdu, bb, 3 lines */ - 0x02db000e, /* cdu, k2, 14 lines */ - 0x00000000, - 0x02e9010d, /* ccfc, bb, 13 lines */ - 0x02f60117, /* ccfc, k2, 23 lines */ - 0x00000000, - 0x02e9010d, /* tcfc, bb, 13 lines */ - 0x02f60117, /* tcfc, k2, 23 lines */ - 0x00000000, - 0x030d0133, /* igu, bb, 51 lines */ - 0x030d0133, /* igu, k2, 51 lines */ - 0x00000000, - 0x03400106, /* cau, bb, 6 lines */ - 0x03400106, /* cau, k2, 6 lines */ - 0x00000000, - 0x00000000, /* rgfs, bb, 0 lines */ - 0x00000000, /* rgfs, k2, 0 lines */ - 0x00000000, - 0x00000000, /* rgsrc, bb, 0 lines */ - 0x00000000, /* rgsrc, k2, 0 lines */ - 0x00000000, - 0x00000000, /* tgfs, bb, 0 lines */ - 0x00000000, /* tgfs, k2, 0 lines */ - 0x00000000, - 0x00000000, /* tgsrc, bb, 0 lines */ - 0x00000000, /* tgsrc, k2, 0 lines */ - 0x00000000, - 0x00000000, /* umac, bb, 0 lines */ - 0x00120006, /* umac, k2, 6 lines */ - 0x00000000, - 0x00000000, /* xmac, bb, 0 lines */ - 0x00000000, /* xmac, k2, 0 lines */ - 0x00000000, - 0x00000000, /* dbg, bb, 0 lines */ - 0x00000000, /* dbg, k2, 0 lines */ - 0x00000000, - 0x0346012b, /* nig, bb, 43 lines */ - 0x0346011d, /* nig, k2, 29 lines */ - 0x00000000, - 0x00000000, /* wol, bb, 0 lines */ - 0x001c0002, /* wol, k2, 2 lines */ - 0x00000000, - 0x00000000, /* bmbn, bb, 0 lines */ - 0x00210008, /* bmbn, k2, 8 lines */ - 0x00000000, - 0x00000000, /* ipc, bb, 0 lines */ - 0x00000000, /* ipc, k2, 0 lines */ - 0x00000000, - 0x00000000, /* nwm, bb, 0 lines */ - 0x0371000b, /* nwm, k2, 11 lines */ - 0x00000000, - 0x00000000, /* nws, bb, 0 lines */ - 0x037c0009, /* nws, k2, 9 lines */ - 0x00000000, - 0x00000000, /* ms, bb, 0 lines */ - 0x00120004, /* ms, k2, 4 lines */ - 0x00000000, - 0x00000000, /* phy_pcie, bb, 0 lines */ - 0x00e5001a, /* phy_pcie, k2, 26 lines */ - 0x00000000, - 0x00000000, /* led, bb, 0 lines */ - 0x00000000, /* led, k2, 0 lines */ - 0x00000000, - 0x00000000, /* avs_wrap, bb, 0 lines */ - 0x00000000, /* avs_wrap, k2, 0 lines */ - 0x00000000, - 0x00000000, /* bar0_map, bb, 0 lines */ - 0x00000000, /* bar0_map, k2, 0 lines */ - 0x00000000, - 0x00000000, /* bar0_map, bb, 0 lines */ - 0x00000000, /* bar0_map, k2, 0 lines */ - 0x00000000, -}; - /* Win 2 */ #define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL @@ -3927,22 +3747,28 @@ static const u32 dbg_bus_blocks[] = { #define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL /* Win 6 */ -#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL +#define GTT_BAR0_MAP_REG_MSDM_RAM_2048 0x013000UL /* Win 7 */ -#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL +#define GTT_BAR0_MAP_REG_USDM_RAM 0x014000UL /* Win 8 */ -#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL +#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x015000UL /* Win 9 */ -#define GTT_BAR0_MAP_REG_XSDM_RAM 0x016000UL +#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x016000UL /* Win 10 */ -#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL +#define GTT_BAR0_MAP_REG_XSDM_RAM 0x017000UL /* Win 11 */ -#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL +#define GTT_BAR0_MAP_REG_XSDM_RAM_1024 0x018000UL + +/* Win 12 */ +#define GTT_BAR0_MAP_REG_YSDM_RAM 0x019000UL + +/* Win 13 */ +#define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL /** * @brief qed_qm_pf_mem_size - prepare QM ILT sizes @@ -3967,7 +3793,7 @@ struct qed_qm_common_rt_init_params { u8 max_phys_tcs_per_port; bool pf_rl_en; bool pf_wfq_en; - bool vport_rl_en; + bool global_rl_en; bool vport_wfq_en; struct init_qm_port_params *port_params; }; @@ -3986,11 +3812,10 @@ struct qed_qm_pf_rt_init_params { u16 start_pq; u16 num_pf_pqs; u16 num_vf_pqs; - u8 start_vport; - u8 num_vports; + u16 start_vport; + u16 num_vports; u16 pf_wfq; u32 pf_rl; - u32 link_speed; struct init_qm_pq_params *pq_params; struct init_qm_vport_params *vport_params; }; @@ -4039,22 +3864,22 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn, */ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq); + u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq); /** - * @brief qed_init_vport_rl - Initializes the rate limit of the specified VPORT + * @brief qed_init_global_rl - Initializes the rate limit of the specified + * rate limiter * * @param p_hwfn * @param p_ptt - ptt window used for writing the registers - * @param vport_id - VPORT ID - * @param vport_rl - rate limit in Mb/sec units - * @param link_speed - link speed in Mbps. + * @param rl_id - RL ID + * @param rate_limit - rate limit in Mb/sec units * * @return 0 on success, -1 on error. */ -int qed_init_vport_rl(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u8 vport_id, u32 vport_rl, u32 link_speed); +int qed_init_global_rl(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 rl_id, u32 rate_limit); /** * @brief qed_send_qm_stop_cmd Sends a stop command to the QM @@ -4142,7 +3967,7 @@ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id); /** * @brief qed_gft_config - Enable and configure HW for GFT * - * @param p_hwfn + * @param p_hwfn - HW device data * @param p_ptt - ptt window used for writing the registers. * @param pf_id - pf on which to enable GFT. * @param tcp - set profile tcp packets. @@ -4227,6 +4052,42 @@ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 assert_level[NUM_STORMS]); +/** + * @brief qed_fw_overlay_mem_alloc - Allocates and fills the FW overlay memory. + * + * @param p_hwfn - HW device data + * @param fw_overlay_in_buf - the input FW overlay buffer. + * @param buf_size - the size of the input FW overlay buffer in bytes. + * must be aligned to dwords. + * @param fw_overlay_out_mem - OUT: a pointer to the allocated overlays memory. + * + * @return a pointer to the allocated overlays memory, + * or NULL in case of failures. + */ +struct phys_mem_desc * +qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn, + const u32 * const fw_overlay_in_buf, + u32 buf_size_in_bytes); + +/** + * @brief qed_fw_overlay_init_ram - Initializes the FW overlay RAM. + * + * @param p_hwfn - HW device data. + * @param p_ptt - ptt window used for writing the registers. + * @param fw_overlay_mem - the allocated FW overlay memory. + */ +void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct phys_mem_desc *fw_overlay_mem); + +/** + * @brief qed_fw_overlay_mem_free - Frees the FW overlay memory. + * + * @param p_hwfn - HW device data. + * @param fw_overlay_mem - the allocated FW overlay memory to free. + */ +void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, + struct phys_mem_desc *fw_overlay_mem); /* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ #define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) @@ -4267,851 +4128,807 @@ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, (IRO[7].base + ((queue_zone_id) * IRO[7].m1)) #define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size) +/* Xstorm common PQ info */ +#define XSTORM_PQ_INFO_OFFSET(pq_id) \ + (IRO[8].base + ((pq_id) * IRO[8].m1)) +#define XSTORM_PQ_INFO_SIZE (IRO[8].size) + /* Xstorm Integration Test Data */ -#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base) -#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size) +#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base) +#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size) /* Ystorm Integration Test Data */ -#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base) -#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size) +#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base) +#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size) /* Pstorm Integration Test Data */ -#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base) -#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size) +#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base) +#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size) /* Tstorm Integration Test Data */ -#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base) -#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size) +#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base) +#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size) /* Mstorm Integration Test Data */ -#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base) -#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size) +#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base) +#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[13].size) /* Ustorm Integration Test Data */ -#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base) -#define USTORM_INTEG_TEST_DATA_SIZE (IRO[13].size) +#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[14].base) +#define USTORM_INTEG_TEST_DATA_SIZE (IRO[14].size) + +/* Xstorm overlay buffer host address */ +#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[15].base) +#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[15].size) + +/* Ystorm overlay buffer host address */ +#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[16].base) +#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[16].size) + +/* Pstorm overlay buffer host address */ +#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[17].base) +#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[17].size) + +/* Tstorm overlay buffer host address */ +#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[18].base) +#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[18].size) + +/* Mstorm overlay buffer host address */ +#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[19].base) +#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[19].size) + +/* Ustorm overlay buffer host address */ +#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[20].base) +#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[20].size) /* Tstorm producers */ #define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \ - (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1)) -#define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size) + (IRO[21].base + ((core_rx_queue_id) * IRO[21].m1)) +#define TSTORM_LL2_RX_PRODS_SIZE (IRO[21].size) /* Tstorm LightL2 queue statistics */ #define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ - (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1)) -#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size) + (IRO[22].base + ((core_rx_queue_id) * IRO[22].m1)) +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[22].size) /* Ustorm LiteL2 queue statistics */ #define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ - (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1)) -#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size) + (IRO[23].base + ((core_rx_queue_id) * IRO[23].m1)) +#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[23].size) /* Pstorm LiteL2 queue statistics */ #define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \ - (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1)) -#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17].size) + (IRO[24].base + ((core_tx_stats_id) * IRO[24].m1)) +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[24].size) /* Mstorm queue statistics */ #define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[18].base + ((stat_counter_id) * IRO[18].m1)) -#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size) + (IRO[25].base + ((stat_counter_id) * IRO[25].m1)) +#define MSTORM_QUEUE_STAT_SIZE (IRO[25].size) -/* Mstorm ETH PF queues producers */ -#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \ - (IRO[19].base + ((queue_id) * IRO[19].m1)) -#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size) +/* TPA agregation timeout in us resolution (on ASIC) */ +#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[26].base) +#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[26].size) /* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size - * mode. + * mode */ #define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \ - (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2)) -#define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size) + (IRO[27].base + ((vf_id) * IRO[27].m1) + ((vf_queue_id) * IRO[27].m2)) +#define MSTORM_ETH_VF_PRODS_SIZE (IRO[27].size) -/* TPA agregation timeout in us resolution (on ASIC) */ -#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base) -#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size) +/* Mstorm ETH PF queues producers */ +#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \ + (IRO[28].base + ((queue_id) * IRO[28].m1)) +#define MSTORM_ETH_PF_PRODS_SIZE (IRO[28].size) /* Mstorm pf statistics */ #define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \ - (IRO[22].base + ((pf_id) * IRO[22].m1)) -#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size) + (IRO[29].base + ((pf_id) * IRO[29].m1)) +#define MSTORM_ETH_PF_STAT_SIZE (IRO[29].size) /* Ustorm queue statistics */ #define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[23].base + ((stat_counter_id) * IRO[23].m1)) -#define USTORM_QUEUE_STAT_SIZE (IRO[23].size) + (IRO[30].base + ((stat_counter_id) * IRO[30].m1)) +#define USTORM_QUEUE_STAT_SIZE (IRO[30].size) /* Ustorm pf statistics */ -#define USTORM_ETH_PF_STAT_OFFSET(pf_id)\ - (IRO[24].base + ((pf_id) * IRO[24].m1)) -#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size) +#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \ + (IRO[31].base + ((pf_id) * IRO[31].m1)) +#define USTORM_ETH_PF_STAT_SIZE (IRO[31].size) /* Pstorm queue statistics */ -#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[25].base + ((stat_counter_id) * IRO[25].m1)) -#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size) +#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[32].base + ((stat_counter_id) * IRO[32].m1)) +#define PSTORM_QUEUE_STAT_SIZE (IRO[32].size) /* Pstorm pf statistics */ #define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \ - (IRO[26].base + ((pf_id) * IRO[26].m1)) -#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size) + (IRO[33].base + ((pf_id) * IRO[33].m1)) +#define PSTORM_ETH_PF_STAT_SIZE (IRO[33].size) /* Control frame's EthType configuration for TX control frame security */ -#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \ - (IRO[27].base + ((eth_type_id) * IRO[27].m1)) -#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size) +#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \ + (IRO[34].base + ((eth_type_id) * IRO[34].m1)) +#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[34].size) /* Tstorm last parser message */ -#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base) -#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size) +#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[35].base) +#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[35].size) /* Tstorm Eth limit Rx rate */ -#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \ - (IRO[29].base + ((pf_id) * IRO[29].m1)) -#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size) +#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \ + (IRO[36].base + ((pf_id) * IRO[36].m1)) +#define ETH_RX_RATE_LIMIT_SIZE (IRO[36].size) /* RSS indirection table entry update command per PF offset in TSTORM PF BAR0. - * Use eth_tstorm_rss_update_data for update. + * Use eth_tstorm_rss_update_data for update */ #define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) \ - (IRO[30].base + ((pf_id) * IRO[30].m1)) -#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[30].size) + (IRO[37].base + ((pf_id) * IRO[37].m1)) +#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[37].size) /* Xstorm queue zone */ #define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ - (IRO[31].base + ((queue_id) * IRO[31].m1)) -#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[31].size) + (IRO[38].base + ((queue_id) * IRO[38].m1)) +#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[38].size) /* Ystorm cqe producer */ #define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[32].base + ((rss_id) * IRO[32].m1)) -#define YSTORM_TOE_CQ_PROD_SIZE (IRO[32].size) + (IRO[39].base + ((rss_id) * IRO[39].m1)) +#define YSTORM_TOE_CQ_PROD_SIZE (IRO[39].size) /* Ustorm cqe producer */ #define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[33].base + ((rss_id) * IRO[33].m1)) -#define USTORM_TOE_CQ_PROD_SIZE (IRO[33].size) + (IRO[40].base + ((rss_id) * IRO[40].m1)) +#define USTORM_TOE_CQ_PROD_SIZE (IRO[40].size) /* Ustorm grq producer */ #define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ - (IRO[34].base + ((pf_id) * IRO[34].m1)) -#define USTORM_TOE_GRQ_PROD_SIZE (IRO[34].size) + (IRO[41].base + ((pf_id) * IRO[41].m1)) +#define USTORM_TOE_GRQ_PROD_SIZE (IRO[41].size) /* Tstorm cmdq-cons of given command queue-id */ #define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ - (IRO[35].base + ((cmdq_queue_id) * IRO[35].m1)) -#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[35].size) + (IRO[42].base + ((cmdq_queue_id) * IRO[42].m1)) +#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[42].size) /* Tstorm (reflects M-Storm) bdq-external-producer of given function ID, - * BDqueue-id. + * BDqueue-id */ -#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2)) -#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size) +#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \ + (IRO[43].base + ((storage_func_id) * IRO[43].m1) + \ + ((bdq_id) * IRO[43].m2)) +#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[43].size) /* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */ -#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[37].base + ((func_id) * IRO[37].m1) + ((bdq_id) * IRO[37].m2)) -#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[37].size) +#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \ + (IRO[44].base + ((storage_func_id) * IRO[44].m1) + \ + ((bdq_id) * IRO[44].m2)) +#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[44].size) /* Tstorm iSCSI RX stats */ -#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[38].base + ((pf_id) * IRO[38].m1)) -#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size) +#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ + (IRO[45].base + ((storage_func_id) * IRO[45].m1)) +#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[45].size) /* Mstorm iSCSI RX stats */ -#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[39].base + ((pf_id) * IRO[39].m1)) -#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[39].size) +#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ + (IRO[46].base + ((storage_func_id) * IRO[46].m1)) +#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[46].size) /* Ustorm iSCSI RX stats */ -#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[40].base + ((pf_id) * IRO[40].m1)) -#define USTORM_ISCSI_RX_STATS_SIZE (IRO[40].size) +#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ + (IRO[47].base + ((storage_func_id) * IRO[47].m1)) +#define USTORM_ISCSI_RX_STATS_SIZE (IRO[47].size) /* Xstorm iSCSI TX stats */ -#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[41].base + ((pf_id) * IRO[41].m1)) -#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size) +#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ + (IRO[48].base + ((storage_func_id) * IRO[48].m1)) +#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[48].size) /* Ystorm iSCSI TX stats */ -#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[42].base + ((pf_id) * IRO[42].m1)) -#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size) +#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ + (IRO[49].base + ((storage_func_id) * IRO[49].m1)) +#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[49].size) /* Pstorm iSCSI TX stats */ -#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[43].base + ((pf_id) * IRO[43].m1)) -#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[43].size) +#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ + (IRO[50].base + ((storage_func_id) * IRO[50].m1)) +#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[50].size) /* Tstorm FCoE RX stats */ #define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ - (IRO[44].base + ((pf_id) * IRO[44].m1)) -#define TSTORM_FCOE_RX_STATS_SIZE (IRO[44].size) + (IRO[51].base + ((pf_id) * IRO[51].m1)) +#define TSTORM_FCOE_RX_STATS_SIZE (IRO[51].size) /* Pstorm FCoE TX stats */ #define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ - (IRO[45].base + ((pf_id) * IRO[45].m1)) -#define PSTORM_FCOE_TX_STATS_SIZE (IRO[45].size) + (IRO[52].base + ((pf_id) * IRO[52].m1)) +#define PSTORM_FCOE_TX_STATS_SIZE (IRO[52].size) /* Pstorm RDMA queue statistics */ #define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) -#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) + (IRO[53].base + ((rdma_stat_counter_id) * IRO[53].m1)) +#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[53].size) /* Tstorm RDMA queue statistics */ #define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[47].base + ((rdma_stat_counter_id) * IRO[47].m1)) -#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[47].size) + (IRO[54].base + ((rdma_stat_counter_id) * IRO[54].m1)) +#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[54].size) /* Xstorm error level for assert */ #define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[48].base + ((pf_id) * IRO[48].m1)) -#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size) + (IRO[55].base + ((pf_id) * IRO[55].m1)) +#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[55].size) /* Ystorm error level for assert */ #define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[49].base + ((pf_id) * IRO[49].m1)) -#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size) + (IRO[56].base + ((pf_id) * IRO[56].m1)) +#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[56].size) /* Pstorm error level for assert */ #define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[50].base + ((pf_id) * IRO[50].m1)) -#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size) + (IRO[57].base + ((pf_id) * IRO[57].m1)) +#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[57].size) /* Tstorm error level for assert */ #define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[51].base + ((pf_id) * IRO[51].m1)) -#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size) + (IRO[58].base + ((pf_id) * IRO[58].m1)) +#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[58].size) /* Mstorm error level for assert */ #define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[52].base + ((pf_id) * IRO[52].m1)) -#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size) + (IRO[59].base + ((pf_id) * IRO[59].m1)) +#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[59].size) /* Ustorm error level for assert */ #define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[53].base + ((pf_id) * IRO[53].m1)) -#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[53].size) + (IRO[60].base + ((pf_id) * IRO[60].m1)) +#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[60].size) /* Xstorm iWARP rxmit stats */ #define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \ - (IRO[54].base + ((pf_id) * IRO[54].m1)) -#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[54].size) + (IRO[61].base + ((pf_id) * IRO[61].m1)) +#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[61].size) /* Tstorm RoCE Event Statistics */ -#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ - (IRO[55].base + ((roce_pf_id) * IRO[55].m1)) -#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[55].size) +#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ + (IRO[62].base + ((roce_pf_id) * IRO[62].m1)) +#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[62].size) /* DCQCN Received Statistics */ -#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \ - (IRO[56].base + ((roce_pf_id) * IRO[56].m1)) -#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[56].size) +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id)\ + (IRO[63].base + ((roce_pf_id) * IRO[63].m1)) +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[63].size) /* RoCE Error Statistics */ -#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \ - (IRO[57].base + ((roce_pf_id) * IRO[57].m1)) -#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[57].size) +#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \ + (IRO[64].base + ((roce_pf_id) * IRO[64].m1)) +#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[64].size) /* DCQCN Sent Statistics */ -#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ - (IRO[58].base + ((roce_pf_id) * IRO[58].m1)) -#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[58].size) +#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ + (IRO[65].base + ((roce_pf_id) * IRO[65].m1)) +#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[65].size) /* RoCE CQEs Statistics */ -#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \ - (IRO[59].base + ((roce_pf_id) * IRO[59].m1)) -#define USTORM_ROCE_CQE_STATS_SIZE (IRO[59].size) - -static const struct iro iro_arr[60] = { - {0x0, 0x0, 0x0, 0x0, 0x8}, - {0x4cb8, 0x88, 0x0, 0x0, 0x88}, - {0x6530, 0x20, 0x0, 0x0, 0x20}, - {0xb00, 0x8, 0x0, 0x0, 0x4}, - {0xa80, 0x8, 0x0, 0x0, 0x4}, - {0x0, 0x8, 0x0, 0x0, 0x2}, - {0x80, 0x8, 0x0, 0x0, 0x4}, - {0x84, 0x8, 0x0, 0x0, 0x2}, - {0x4c48, 0x0, 0x0, 0x0, 0x78}, - {0x3e38, 0x0, 0x0, 0x0, 0x78}, - {0x3ef8, 0x0, 0x0, 0x0, 0x78}, - {0x4c40, 0x0, 0x0, 0x0, 0x78}, - {0x4998, 0x0, 0x0, 0x0, 0x78}, - {0x7f50, 0x0, 0x0, 0x0, 0x78}, - {0xa28, 0x8, 0x0, 0x0, 0x8}, - {0x6210, 0x10, 0x0, 0x0, 0x10}, - {0xb820, 0x30, 0x0, 0x0, 0x30}, - {0xa990, 0x30, 0x0, 0x0, 0x30}, - {0x4b68, 0x80, 0x0, 0x0, 0x40}, - {0x1f8, 0x4, 0x0, 0x0, 0x4}, - {0x53a8, 0x80, 0x4, 0x0, 0x4}, - {0xc7d0, 0x0, 0x0, 0x0, 0x4}, - {0x4ba8, 0x80, 0x0, 0x0, 0x20}, - {0x8158, 0x40, 0x0, 0x0, 0x30}, - {0xe770, 0x60, 0x0, 0x0, 0x60}, - {0x4090, 0x80, 0x0, 0x0, 0x38}, - {0xfea8, 0x78, 0x0, 0x0, 0x78}, - {0x1f8, 0x4, 0x0, 0x0, 0x4}, - {0xaf20, 0x0, 0x0, 0x0, 0xf0}, - {0xb010, 0x8, 0x0, 0x0, 0x8}, - {0xc00, 0x8, 0x0, 0x0, 0x8}, - {0x1f8, 0x8, 0x0, 0x0, 0x8}, - {0xac0, 0x8, 0x0, 0x0, 0x8}, - {0x2578, 0x8, 0x0, 0x0, 0x8}, - {0x24f8, 0x8, 0x0, 0x0, 0x8}, - {0x0, 0x8, 0x0, 0x0, 0x8}, - {0x400, 0x18, 0x8, 0x0, 0x8}, - {0xb78, 0x18, 0x8, 0x0, 0x2}, - {0xd898, 0x50, 0x0, 0x0, 0x3c}, - {0x12908, 0x18, 0x0, 0x0, 0x10}, - {0x11aa8, 0x40, 0x0, 0x0, 0x18}, - {0xa588, 0x50, 0x0, 0x0, 0x20}, - {0x8f00, 0x40, 0x0, 0x0, 0x28}, - {0x10e30, 0x18, 0x0, 0x0, 0x10}, - {0xde48, 0x48, 0x0, 0x0, 0x38}, - {0x11298, 0x20, 0x0, 0x0, 0x20}, - {0x40c8, 0x80, 0x0, 0x0, 0x10}, - {0x5048, 0x10, 0x0, 0x0, 0x10}, - {0xc748, 0x8, 0x0, 0x0, 0x1}, - {0xa928, 0x8, 0x0, 0x0, 0x1}, - {0x11a30, 0x8, 0x0, 0x0, 0x1}, - {0xf030, 0x8, 0x0, 0x0, 0x1}, - {0x13028, 0x8, 0x0, 0x0, 0x1}, - {0x12c58, 0x8, 0x0, 0x0, 0x1}, - {0xc9b8, 0x30, 0x0, 0x0, 0x10}, - {0xed90, 0x28, 0x0, 0x0, 0x28}, - {0xad20, 0x18, 0x0, 0x0, 0x18}, - {0xaea0, 0x8, 0x0, 0x0, 0x8}, - {0x13c38, 0x8, 0x0, 0x0, 0x8}, - {0x13c50, 0x18, 0x0, 0x0, 0x18}, +#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \ + (IRO[66].base + ((roce_pf_id) * IRO[66].m1)) +#define USTORM_ROCE_CQE_STATS_SIZE (IRO[66].size) + +/* IRO Array */ +static const u32 iro_arr[] = { + 0x00000000, 0x00000000, 0x00080000, + 0x00003288, 0x00000088, 0x00880000, + 0x000058e8, 0x00000020, 0x00200000, + 0x00000b00, 0x00000008, 0x00040000, + 0x00000a80, 0x00000008, 0x00040000, + 0x00000000, 0x00000008, 0x00020000, + 0x00000080, 0x00000008, 0x00040000, + 0x00000084, 0x00000008, 0x00020000, + 0x00005718, 0x00000004, 0x00040000, + 0x00004dd0, 0x00000000, 0x00780000, + 0x00003e40, 0x00000000, 0x00780000, + 0x00004480, 0x00000000, 0x00780000, + 0x00003210, 0x00000000, 0x00780000, + 0x00003b50, 0x00000000, 0x00780000, + 0x00007f58, 0x00000000, 0x00780000, + 0x00005f58, 0x00000000, 0x00080000, + 0x00007100, 0x00000000, 0x00080000, + 0x0000aea0, 0x00000000, 0x00080000, + 0x00004398, 0x00000000, 0x00080000, + 0x0000a5a0, 0x00000000, 0x00080000, + 0x0000bde8, 0x00000000, 0x00080000, + 0x00000020, 0x00000004, 0x00040000, + 0x000056c8, 0x00000010, 0x00100000, + 0x0000c210, 0x00000030, 0x00300000, + 0x0000b088, 0x00000038, 0x00380000, + 0x00003d20, 0x00000080, 0x00400000, + 0x0000bf60, 0x00000000, 0x00040000, + 0x00004560, 0x00040080, 0x00040000, + 0x000001f8, 0x00000004, 0x00040000, + 0x00003d60, 0x00000080, 0x00200000, + 0x00008960, 0x00000040, 0x00300000, + 0x0000e840, 0x00000060, 0x00600000, + 0x00004618, 0x00000080, 0x00380000, + 0x00010738, 0x000000c0, 0x00c00000, + 0x000001f8, 0x00000002, 0x00020000, + 0x0000a2a0, 0x00000000, 0x01080000, + 0x0000a3a8, 0x00000008, 0x00080000, + 0x000001c0, 0x00000008, 0x00080000, + 0x000001f8, 0x00000008, 0x00080000, + 0x00000ac0, 0x00000008, 0x00080000, + 0x00002578, 0x00000008, 0x00080000, + 0x000024f8, 0x00000008, 0x00080000, + 0x00000280, 0x00000008, 0x00080000, + 0x00000680, 0x00080018, 0x00080000, + 0x00000b78, 0x00080018, 0x00020000, + 0x0000c640, 0x00000050, 0x003c0000, + 0x00012038, 0x00000018, 0x00100000, + 0x00011b00, 0x00000040, 0x00180000, + 0x000095d0, 0x00000050, 0x00200000, + 0x00008b10, 0x00000040, 0x00280000, + 0x00011640, 0x00000018, 0x00100000, + 0x0000c828, 0x00000048, 0x00380000, + 0x00011710, 0x00000020, 0x00200000, + 0x00004650, 0x00000080, 0x00100000, + 0x00003618, 0x00000010, 0x00100000, + 0x0000a968, 0x00000008, 0x00010000, + 0x000097a0, 0x00000008, 0x00010000, + 0x00011990, 0x00000008, 0x00010000, + 0x0000f018, 0x00000008, 0x00010000, + 0x00012628, 0x00000008, 0x00010000, + 0x00011da8, 0x00000008, 0x00010000, + 0x0000aa78, 0x00000030, 0x00100000, + 0x0000d768, 0x00000028, 0x00280000, + 0x00009a58, 0x00000018, 0x00180000, + 0x00009bd8, 0x00000008, 0x00080000, + 0x00013a18, 0x00000008, 0x00080000, + 0x000126e8, 0x00000018, 0x00180000, + 0x0000e608, 0x00500288, 0x00100000, + 0x00012970, 0x00000138, 0x00280000, }; /* Runtime array offsets */ -#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 -#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1 -#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2 -#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3 -#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4 -#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5 -#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6 -#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7 -#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8 -#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9 -#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10 -#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11 -#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12 -#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13 -#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 -#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 -#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16 -#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17 -#define DORQ_REG_GLB_MAX_ICID_0_RT_OFFSET 18 -#define DORQ_REG_GLB_MAX_ICID_1_RT_OFFSET 19 -#define DORQ_REG_GLB_RANGE2CONN_TYPE_0_RT_OFFSET 20 -#define DORQ_REG_GLB_RANGE2CONN_TYPE_1_RT_OFFSET 21 -#define DORQ_REG_PRV_PF_MAX_ICID_2_RT_OFFSET 22 -#define DORQ_REG_PRV_PF_MAX_ICID_3_RT_OFFSET 23 -#define DORQ_REG_PRV_PF_MAX_ICID_4_RT_OFFSET 24 -#define DORQ_REG_PRV_PF_MAX_ICID_5_RT_OFFSET 25 -#define DORQ_REG_PRV_VF_MAX_ICID_2_RT_OFFSET 26 -#define DORQ_REG_PRV_VF_MAX_ICID_3_RT_OFFSET 27 -#define DORQ_REG_PRV_VF_MAX_ICID_4_RT_OFFSET 28 -#define DORQ_REG_PRV_VF_MAX_ICID_5_RT_OFFSET 29 -#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_2_RT_OFFSET 30 -#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_3_RT_OFFSET 31 -#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_4_RT_OFFSET 32 -#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_5_RT_OFFSET 33 -#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_2_RT_OFFSET 34 -#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_3_RT_OFFSET 35 -#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_4_RT_OFFSET 36 -#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_5_RT_OFFSET 37 -#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 38 -#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 39 -#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 40 -#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 41 -#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 42 -#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 43 -#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 44 -#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 45 -#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 1024 -#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1069 -#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 1024 -#define CAU_REG_PI_MEMORY_RT_OFFSET 2093 -#define CAU_REG_PI_MEMORY_RT_SIZE 4416 -#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6509 -#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6510 -#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6511 -#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6512 -#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6513 -#define PRS_REG_SEARCH_TCP_RT_OFFSET 6514 -#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6515 -#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6516 -#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6517 -#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6518 -#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6519 -#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6520 -#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6521 -#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6522 -#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6523 -#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6524 -#define SRC_REG_FIRSTFREE_RT_OFFSET 6525 -#define SRC_REG_FIRSTFREE_RT_SIZE 2 -#define SRC_REG_LASTFREE_RT_OFFSET 6527 -#define SRC_REG_LASTFREE_RT_SIZE 2 -#define SRC_REG_COUNTFREE_RT_OFFSET 6529 -#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6530 -#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6531 -#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6532 -#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6533 -#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6534 -#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6535 -#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6536 -#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6537 -#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6538 -#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6539 -#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6540 -#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6541 -#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6542 -#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6543 -#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6544 -#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6545 -#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6546 -#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6547 -#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6548 -#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6549 -#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6550 -#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6551 -#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6552 -#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6553 -#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6554 -#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6555 -#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6556 -#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6557 -#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6558 -#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6559 -#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6560 -#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6561 -#define PSWRQ2_REG_TGSRC_FIRST_ILT_RT_OFFSET 6562 -#define PSWRQ2_REG_RGSRC_FIRST_ILT_RT_OFFSET 6563 -#define PSWRQ2_REG_TGSRC_LAST_ILT_RT_OFFSET 6564 -#define PSWRQ2_REG_RGSRC_LAST_ILT_RT_OFFSET 6565 -#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6566 -#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 26414 -#define PGLUE_REG_B_VF_BASE_RT_OFFSET 32980 -#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 32981 -#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 32982 -#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 32983 -#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 32984 -#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 32985 -#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 32986 -#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 32987 -#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 32988 -#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 32989 -#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 32990 -#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 32991 -#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 32992 -#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 -#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 33408 -#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608 -#define QM_REG_MAXPQSIZE_0_RT_OFFSET 34016 -#define QM_REG_MAXPQSIZE_1_RT_OFFSET 34017 -#define QM_REG_MAXPQSIZE_2_RT_OFFSET 34018 -#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 34019 -#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 34020 -#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 34021 -#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 34022 -#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 34023 -#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 34024 -#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 34025 -#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 34026 -#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 34027 -#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 34028 -#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 34029 -#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 34030 -#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 34031 -#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 34032 -#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 34033 -#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 34034 -#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 34035 -#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 34036 -#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 34037 -#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 34038 -#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 34039 -#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 34040 -#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 34041 -#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 34042 -#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 34043 -#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 34044 -#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 34045 -#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 34046 -#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 34047 -#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 34048 -#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 34049 -#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 34050 -#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 34051 -#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 34052 -#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 34053 -#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 34054 -#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 34055 -#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 34056 -#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 34057 -#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 34058 -#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 34059 -#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 34060 -#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 34061 -#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 34062 -#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 34063 -#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 34064 -#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 34065 -#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 34066 -#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 34067 -#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 34068 -#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 34069 -#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 34070 -#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 34071 -#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 34072 -#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 34073 -#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 34074 -#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 34075 -#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 34076 -#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 34077 -#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 34078 -#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 34079 -#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 34080 -#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 34081 -#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 34082 -#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 34083 -#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 -#define QM_REG_PTRTBLOTHER_RT_OFFSET 34211 -#define QM_REG_PTRTBLOTHER_RT_SIZE 256 -#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34467 -#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34468 -#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34469 -#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34470 -#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34471 -#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34472 -#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34473 -#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34474 -#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34475 -#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34476 -#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34477 -#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34478 -#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34479 -#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34480 -#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34481 -#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34482 -#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34483 -#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34484 -#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34485 -#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34486 -#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34487 -#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34488 -#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34489 -#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34490 -#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34491 -#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34492 -#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34493 -#define QM_REG_PQTX2PF_0_RT_OFFSET 34494 -#define QM_REG_PQTX2PF_1_RT_OFFSET 34495 -#define QM_REG_PQTX2PF_2_RT_OFFSET 34496 -#define QM_REG_PQTX2PF_3_RT_OFFSET 34497 -#define QM_REG_PQTX2PF_4_RT_OFFSET 34498 -#define QM_REG_PQTX2PF_5_RT_OFFSET 34499 -#define QM_REG_PQTX2PF_6_RT_OFFSET 34500 -#define QM_REG_PQTX2PF_7_RT_OFFSET 34501 -#define QM_REG_PQTX2PF_8_RT_OFFSET 34502 -#define QM_REG_PQTX2PF_9_RT_OFFSET 34503 -#define QM_REG_PQTX2PF_10_RT_OFFSET 34504 -#define QM_REG_PQTX2PF_11_RT_OFFSET 34505 -#define QM_REG_PQTX2PF_12_RT_OFFSET 34506 -#define QM_REG_PQTX2PF_13_RT_OFFSET 34507 -#define QM_REG_PQTX2PF_14_RT_OFFSET 34508 -#define QM_REG_PQTX2PF_15_RT_OFFSET 34509 -#define QM_REG_PQTX2PF_16_RT_OFFSET 34510 -#define QM_REG_PQTX2PF_17_RT_OFFSET 34511 -#define QM_REG_PQTX2PF_18_RT_OFFSET 34512 -#define QM_REG_PQTX2PF_19_RT_OFFSET 34513 -#define QM_REG_PQTX2PF_20_RT_OFFSET 34514 -#define QM_REG_PQTX2PF_21_RT_OFFSET 34515 -#define QM_REG_PQTX2PF_22_RT_OFFSET 34516 -#define QM_REG_PQTX2PF_23_RT_OFFSET 34517 -#define QM_REG_PQTX2PF_24_RT_OFFSET 34518 -#define QM_REG_PQTX2PF_25_RT_OFFSET 34519 -#define QM_REG_PQTX2PF_26_RT_OFFSET 34520 -#define QM_REG_PQTX2PF_27_RT_OFFSET 34521 -#define QM_REG_PQTX2PF_28_RT_OFFSET 34522 -#define QM_REG_PQTX2PF_29_RT_OFFSET 34523 -#define QM_REG_PQTX2PF_30_RT_OFFSET 34524 -#define QM_REG_PQTX2PF_31_RT_OFFSET 34525 -#define QM_REG_PQTX2PF_32_RT_OFFSET 34526 -#define QM_REG_PQTX2PF_33_RT_OFFSET 34527 -#define QM_REG_PQTX2PF_34_RT_OFFSET 34528 -#define QM_REG_PQTX2PF_35_RT_OFFSET 34529 -#define QM_REG_PQTX2PF_36_RT_OFFSET 34530 -#define QM_REG_PQTX2PF_37_RT_OFFSET 34531 -#define QM_REG_PQTX2PF_38_RT_OFFSET 34532 -#define QM_REG_PQTX2PF_39_RT_OFFSET 34533 -#define QM_REG_PQTX2PF_40_RT_OFFSET 34534 -#define QM_REG_PQTX2PF_41_RT_OFFSET 34535 -#define QM_REG_PQTX2PF_42_RT_OFFSET 34536 -#define QM_REG_PQTX2PF_43_RT_OFFSET 34537 -#define QM_REG_PQTX2PF_44_RT_OFFSET 34538 -#define QM_REG_PQTX2PF_45_RT_OFFSET 34539 -#define QM_REG_PQTX2PF_46_RT_OFFSET 34540 -#define QM_REG_PQTX2PF_47_RT_OFFSET 34541 -#define QM_REG_PQTX2PF_48_RT_OFFSET 34542 -#define QM_REG_PQTX2PF_49_RT_OFFSET 34543 -#define QM_REG_PQTX2PF_50_RT_OFFSET 34544 -#define QM_REG_PQTX2PF_51_RT_OFFSET 34545 -#define QM_REG_PQTX2PF_52_RT_OFFSET 34546 -#define QM_REG_PQTX2PF_53_RT_OFFSET 34547 -#define QM_REG_PQTX2PF_54_RT_OFFSET 34548 -#define QM_REG_PQTX2PF_55_RT_OFFSET 34549 -#define QM_REG_PQTX2PF_56_RT_OFFSET 34550 -#define QM_REG_PQTX2PF_57_RT_OFFSET 34551 -#define QM_REG_PQTX2PF_58_RT_OFFSET 34552 -#define QM_REG_PQTX2PF_59_RT_OFFSET 34553 -#define QM_REG_PQTX2PF_60_RT_OFFSET 34554 -#define QM_REG_PQTX2PF_61_RT_OFFSET 34555 -#define QM_REG_PQTX2PF_62_RT_OFFSET 34556 -#define QM_REG_PQTX2PF_63_RT_OFFSET 34557 -#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34558 -#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34559 -#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34560 -#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34561 -#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34562 -#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34563 -#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34564 -#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34565 -#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34566 -#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34567 -#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34568 -#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34569 -#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34570 -#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34571 -#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34572 -#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34573 -#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34574 -#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34575 -#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34576 -#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34577 -#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34578 -#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34579 -#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34580 -#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34581 -#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34582 -#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34583 -#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34584 -#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34585 -#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34586 -#define QM_REG_RLGLBLINCVAL_RT_SIZE 256 -#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34842 -#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 -#define QM_REG_RLGLBLCRD_RT_OFFSET 35098 -#define QM_REG_RLGLBLCRD_RT_SIZE 256 -#define QM_REG_RLGLBLENABLE_RT_OFFSET 35354 -#define QM_REG_RLPFPERIOD_RT_OFFSET 35355 -#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35356 -#define QM_REG_RLPFINCVAL_RT_OFFSET 35357 -#define QM_REG_RLPFINCVAL_RT_SIZE 16 -#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35373 -#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_RLPFCRD_RT_OFFSET 35389 -#define QM_REG_RLPFCRD_RT_SIZE 16 -#define QM_REG_RLPFENABLE_RT_OFFSET 35405 -#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35406 -#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35407 -#define QM_REG_WFQPFWEIGHT_RT_SIZE 16 -#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35423 -#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_WFQPFCRD_RT_OFFSET 35439 -#define QM_REG_WFQPFCRD_RT_SIZE 256 -#define QM_REG_WFQPFENABLE_RT_OFFSET 35695 -#define QM_REG_WFQVPENABLE_RT_OFFSET 35696 -#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35697 -#define QM_REG_BASEADDRTXPQ_RT_SIZE 512 -#define QM_REG_TXPQMAP_RT_OFFSET 36209 -#define QM_REG_TXPQMAP_RT_SIZE 512 -#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36721 -#define QM_REG_WFQVPWEIGHT_RT_SIZE 512 -#define QM_REG_WFQVPCRD_RT_OFFSET 37233 -#define QM_REG_WFQVPCRD_RT_SIZE 512 -#define QM_REG_WFQVPMAP_RT_OFFSET 37745 -#define QM_REG_WFQVPMAP_RT_SIZE 512 -#define QM_REG_PTRTBLTX_RT_OFFSET 38257 -#define QM_REG_PTRTBLTX_RT_SIZE 1024 -#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 39281 -#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320 -#define QM_REG_VOQCRDLINE_RT_OFFSET 39601 -#define QM_REG_VOQCRDLINE_RT_SIZE 36 -#define QM_REG_VOQINITCRDLINE_RT_OFFSET 39637 -#define QM_REG_VOQINITCRDLINE_RT_SIZE 36 -#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 39673 -#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 39674 -#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 39675 -#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 39676 -#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 39677 -#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 39678 -#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 39679 -#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 39680 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 39681 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 39685 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 39689 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 39721 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 39737 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 39753 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 -#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785 -#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39786 -#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39794 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40818 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41330 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41842 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42354 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42866 -#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42898 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42899 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42900 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42901 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42902 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42903 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42904 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42905 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42906 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42907 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42908 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42909 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42910 -#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42911 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42912 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42913 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42914 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42915 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42916 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42917 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42918 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42919 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42920 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42921 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42922 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42923 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42924 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42925 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42926 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42927 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42928 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42929 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42930 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42931 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42932 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42933 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42934 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42935 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42936 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42937 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42938 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42939 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42940 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42941 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42942 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42943 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42944 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42945 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42946 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42947 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42948 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42949 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42950 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42951 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42952 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42953 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42954 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42955 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42956 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42957 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42958 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42959 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42960 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42961 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42962 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42963 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42964 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42965 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42966 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42967 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42968 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42969 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42970 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42971 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42972 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42973 -#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42974 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42975 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42976 -#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42977 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42978 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42979 -#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42980 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42981 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42982 -#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42983 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42984 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42985 -#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42986 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42987 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42988 -#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42989 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42990 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42991 -#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42992 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42993 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42994 -#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42995 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42996 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42997 -#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42998 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 42999 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43000 -#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43001 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43002 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43003 -#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43004 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43005 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43006 -#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43007 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43008 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43009 -#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43010 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43011 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43012 -#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43013 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43014 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43015 -#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43016 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43017 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43018 -#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43019 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43020 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43021 - -#define RUNTIME_ARRAY_SIZE 43022 - +#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 +#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1 +#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2 +#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3 +#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4 +#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5 +#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6 +#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7 +#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8 +#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9 +#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10 +#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11 +#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12 +#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13 +#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 +#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 +#define DORQ_REG_VF_ICID_BIT_SHIFT_NORM_RT_OFFSET 16 +#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 17 +#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 18 +#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 19 +#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 20 +#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 21 +#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 22 +#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 23 +#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 24 +#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 25 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 26 +#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 +#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 762 +#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736 +#define CAU_REG_PI_MEMORY_RT_OFFSET 1498 +#define CAU_REG_PI_MEMORY_RT_SIZE 4416 +#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 5914 +#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 5915 +#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 5916 +#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 5917 +#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 5918 +#define PRS_REG_SEARCH_TCP_RT_OFFSET 5919 +#define PRS_REG_SEARCH_FCOE_RT_OFFSET 5920 +#define PRS_REG_SEARCH_ROCE_RT_OFFSET 5921 +#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 5922 +#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 5923 +#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 5924 +#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 5925 +#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 5926 +#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 5927 +#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 5928 +#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 5929 +#define SRC_REG_FIRSTFREE_RT_OFFSET 5930 +#define SRC_REG_FIRSTFREE_RT_SIZE 2 +#define SRC_REG_LASTFREE_RT_OFFSET 5932 +#define SRC_REG_LASTFREE_RT_SIZE 2 +#define SRC_REG_COUNTFREE_RT_OFFSET 5934 +#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 5935 +#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 5936 +#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 5937 +#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 5938 +#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 5939 +#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 5940 +#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 5941 +#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 5942 +#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 5943 +#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 5944 +#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 5945 +#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 5946 +#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 5947 +#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 5948 +#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 5949 +#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 5950 +#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 5951 +#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 5952 +#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 5953 +#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 5954 +#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 5955 +#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 5956 +#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 5957 +#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 5958 +#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 5959 +#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 5960 +#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 5961 +#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 5962 +#define PSWRQ2_REG_VF_BASE_RT_OFFSET 5963 +#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 5964 +#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 5965 +#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 5966 +#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 5967 +#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000 +#define PGLUE_REG_B_VF_BASE_RT_OFFSET 27967 +#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 27968 +#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 27969 +#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 27970 +#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 27971 +#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 27972 +#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 27973 +#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 27974 +#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 27975 +#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 27976 +#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 27977 +#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 27978 +#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 27979 +#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 +#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 28395 +#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512 +#define QM_REG_MAXPQSIZE_0_RT_OFFSET 28907 +#define QM_REG_MAXPQSIZE_1_RT_OFFSET 28908 +#define QM_REG_MAXPQSIZE_2_RT_OFFSET 28909 +#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 28910 +#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 28911 +#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 28912 +#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 28913 +#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 28914 +#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 28915 +#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 28916 +#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 28917 +#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 28918 +#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 28919 +#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 28920 +#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 28921 +#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 28922 +#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 28923 +#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 28924 +#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 28925 +#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 28926 +#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 28927 +#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 28928 +#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 28929 +#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 28930 +#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 28931 +#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 28932 +#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 28933 +#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 28934 +#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 28935 +#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 28936 +#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 28937 +#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 28938 +#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 28939 +#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 28940 +#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 28941 +#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 28942 +#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 28943 +#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 28944 +#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 28945 +#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 28946 +#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 28947 +#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 28948 +#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 28949 +#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 28950 +#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 28951 +#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 28952 +#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 28953 +#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 28954 +#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 28955 +#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 28956 +#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 28957 +#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 28958 +#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 28959 +#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 28960 +#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 28961 +#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 28962 +#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 28963 +#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 28964 +#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 28965 +#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 28966 +#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 28967 +#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 28968 +#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 28969 +#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 28970 +#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 28971 +#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 28972 +#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 28973 +#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 28974 +#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 +#define QM_REG_PTRTBLOTHER_RT_OFFSET 29102 +#define QM_REG_PTRTBLOTHER_RT_SIZE 256 +#define QM_REG_VOQCRDLINE_RT_OFFSET 29358 +#define QM_REG_VOQCRDLINE_RT_SIZE 20 +#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29378 +#define QM_REG_VOQINITCRDLINE_RT_SIZE 20 +#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29398 +#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29399 +#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29400 +#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29401 +#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29402 +#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29403 +#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29404 +#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29405 +#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29406 +#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29407 +#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29408 +#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29409 +#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29410 +#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29411 +#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29412 +#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29413 +#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29414 +#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29415 +#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29416 +#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29417 +#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29418 +#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29419 +#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29420 +#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29421 +#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29422 +#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29423 +#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29424 +#define QM_REG_PQTX2PF_0_RT_OFFSET 29425 +#define QM_REG_PQTX2PF_1_RT_OFFSET 29426 +#define QM_REG_PQTX2PF_2_RT_OFFSET 29427 +#define QM_REG_PQTX2PF_3_RT_OFFSET 29428 +#define QM_REG_PQTX2PF_4_RT_OFFSET 29429 +#define QM_REG_PQTX2PF_5_RT_OFFSET 29430 +#define QM_REG_PQTX2PF_6_RT_OFFSET 29431 +#define QM_REG_PQTX2PF_7_RT_OFFSET 29432 +#define QM_REG_PQTX2PF_8_RT_OFFSET 29433 +#define QM_REG_PQTX2PF_9_RT_OFFSET 29434 +#define QM_REG_PQTX2PF_10_RT_OFFSET 29435 +#define QM_REG_PQTX2PF_11_RT_OFFSET 29436 +#define QM_REG_PQTX2PF_12_RT_OFFSET 29437 +#define QM_REG_PQTX2PF_13_RT_OFFSET 29438 +#define QM_REG_PQTX2PF_14_RT_OFFSET 29439 +#define QM_REG_PQTX2PF_15_RT_OFFSET 29440 +#define QM_REG_PQTX2PF_16_RT_OFFSET 29441 +#define QM_REG_PQTX2PF_17_RT_OFFSET 29442 +#define QM_REG_PQTX2PF_18_RT_OFFSET 29443 +#define QM_REG_PQTX2PF_19_RT_OFFSET 29444 +#define QM_REG_PQTX2PF_20_RT_OFFSET 29445 +#define QM_REG_PQTX2PF_21_RT_OFFSET 29446 +#define QM_REG_PQTX2PF_22_RT_OFFSET 29447 +#define QM_REG_PQTX2PF_23_RT_OFFSET 29448 +#define QM_REG_PQTX2PF_24_RT_OFFSET 29449 +#define QM_REG_PQTX2PF_25_RT_OFFSET 29450 +#define QM_REG_PQTX2PF_26_RT_OFFSET 29451 +#define QM_REG_PQTX2PF_27_RT_OFFSET 29452 +#define QM_REG_PQTX2PF_28_RT_OFFSET 29453 +#define QM_REG_PQTX2PF_29_RT_OFFSET 29454 +#define QM_REG_PQTX2PF_30_RT_OFFSET 29455 +#define QM_REG_PQTX2PF_31_RT_OFFSET 29456 +#define QM_REG_PQTX2PF_32_RT_OFFSET 29457 +#define QM_REG_PQTX2PF_33_RT_OFFSET 29458 +#define QM_REG_PQTX2PF_34_RT_OFFSET 29459 +#define QM_REG_PQTX2PF_35_RT_OFFSET 29460 +#define QM_REG_PQTX2PF_36_RT_OFFSET 29461 +#define QM_REG_PQTX2PF_37_RT_OFFSET 29462 +#define QM_REG_PQTX2PF_38_RT_OFFSET 29463 +#define QM_REG_PQTX2PF_39_RT_OFFSET 29464 +#define QM_REG_PQTX2PF_40_RT_OFFSET 29465 +#define QM_REG_PQTX2PF_41_RT_OFFSET 29466 +#define QM_REG_PQTX2PF_42_RT_OFFSET 29467 +#define QM_REG_PQTX2PF_43_RT_OFFSET 29468 +#define QM_REG_PQTX2PF_44_RT_OFFSET 29469 +#define QM_REG_PQTX2PF_45_RT_OFFSET 29470 +#define QM_REG_PQTX2PF_46_RT_OFFSET 29471 +#define QM_REG_PQTX2PF_47_RT_OFFSET 29472 +#define QM_REG_PQTX2PF_48_RT_OFFSET 29473 +#define QM_REG_PQTX2PF_49_RT_OFFSET 29474 +#define QM_REG_PQTX2PF_50_RT_OFFSET 29475 +#define QM_REG_PQTX2PF_51_RT_OFFSET 29476 +#define QM_REG_PQTX2PF_52_RT_OFFSET 29477 +#define QM_REG_PQTX2PF_53_RT_OFFSET 29478 +#define QM_REG_PQTX2PF_54_RT_OFFSET 29479 +#define QM_REG_PQTX2PF_55_RT_OFFSET 29480 +#define QM_REG_PQTX2PF_56_RT_OFFSET 29481 +#define QM_REG_PQTX2PF_57_RT_OFFSET 29482 +#define QM_REG_PQTX2PF_58_RT_OFFSET 29483 +#define QM_REG_PQTX2PF_59_RT_OFFSET 29484 +#define QM_REG_PQTX2PF_60_RT_OFFSET 29485 +#define QM_REG_PQTX2PF_61_RT_OFFSET 29486 +#define QM_REG_PQTX2PF_62_RT_OFFSET 29487 +#define QM_REG_PQTX2PF_63_RT_OFFSET 29488 +#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29489 +#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29490 +#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29491 +#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29492 +#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29493 +#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29494 +#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29495 +#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29496 +#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29497 +#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29498 +#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29499 +#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29500 +#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29501 +#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29502 +#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29503 +#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29504 +#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29505 +#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29506 +#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29507 +#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29508 +#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29509 +#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29510 +#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29511 +#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29512 +#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29513 +#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29514 +#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29515 +#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29516 +#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29517 +#define QM_REG_RLGLBLINCVAL_RT_SIZE 256 +#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 29773 +#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 +#define QM_REG_RLGLBLCRD_RT_OFFSET 30029 +#define QM_REG_RLGLBLCRD_RT_SIZE 256 +#define QM_REG_RLGLBLENABLE_RT_OFFSET 30285 +#define QM_REG_RLPFPERIOD_RT_OFFSET 30286 +#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30287 +#define QM_REG_RLPFINCVAL_RT_OFFSET 30288 +#define QM_REG_RLPFINCVAL_RT_SIZE 16 +#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30304 +#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_RLPFCRD_RT_OFFSET 30320 +#define QM_REG_RLPFCRD_RT_SIZE 16 +#define QM_REG_RLPFENABLE_RT_OFFSET 30336 +#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30337 +#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30338 +#define QM_REG_WFQPFWEIGHT_RT_SIZE 16 +#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30354 +#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_WFQPFCRD_RT_OFFSET 30370 +#define QM_REG_WFQPFCRD_RT_SIZE 160 +#define QM_REG_WFQPFENABLE_RT_OFFSET 30530 +#define QM_REG_WFQVPENABLE_RT_OFFSET 30531 +#define QM_REG_BASEADDRTXPQ_RT_OFFSET 30532 +#define QM_REG_BASEADDRTXPQ_RT_SIZE 512 +#define QM_REG_TXPQMAP_RT_OFFSET 31044 +#define QM_REG_TXPQMAP_RT_SIZE 512 +#define QM_REG_WFQVPWEIGHT_RT_OFFSET 31556 +#define QM_REG_WFQVPWEIGHT_RT_SIZE 512 +#define QM_REG_WFQVPCRD_RT_OFFSET 32068 +#define QM_REG_WFQVPCRD_RT_SIZE 512 +#define QM_REG_WFQVPMAP_RT_OFFSET 32580 +#define QM_REG_WFQVPMAP_RT_SIZE 512 +#define QM_REG_PTRTBLTX_RT_OFFSET 33092 +#define QM_REG_PTRTBLTX_RT_SIZE 1024 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34116 +#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34276 +#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34277 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34278 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34279 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34280 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34281 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34282 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34283 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34287 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34291 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34323 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34339 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34355 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34371 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34387 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34388 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34396 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34397 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34398 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34399 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34400 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34401 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34402 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34403 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34404 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34405 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34406 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34407 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34408 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34409 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34410 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34411 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34412 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34413 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34414 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34415 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34416 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34417 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34418 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34419 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34420 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34421 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34422 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34423 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34424 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34425 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34426 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34427 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34428 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34429 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34430 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34431 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34432 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34433 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34434 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34435 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34436 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34437 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34438 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34439 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34440 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34441 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34442 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34443 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34444 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34445 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34446 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34447 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34448 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34449 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34450 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34451 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34452 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34453 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34454 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34455 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34456 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34457 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34458 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34459 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34460 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34461 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34462 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34463 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34464 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34465 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34466 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34467 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34468 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34469 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34470 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34471 + +#define RUNTIME_ARRAY_SIZE 34472 /* Init Callbacks */ #define DMAE_READY_CB 0 @@ -5633,9 +5450,9 @@ struct e4_eth_conn_context { struct pstorm_eth_conn_st_ctx pstorm_st_context; struct xstorm_eth_conn_st_ctx xstorm_st_context; struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context; + struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context; struct ystorm_eth_conn_st_ctx ystorm_st_context; struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context; - struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context; struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context; struct ustorm_eth_conn_st_ctx ustorm_st_context; struct mstorm_eth_conn_st_ctx mstorm_st_context; @@ -5665,6 +5482,16 @@ enum eth_error_code { ETH_FILTERS_VNI_ADD_FAIL_FULL, ETH_FILTERS_VNI_ADD_FAIL_DUP, ETH_FILTERS_GFT_UPDATE_FAIL, + ETH_RX_QUEUE_FAIL_LOAD_VF_DATA, + ETH_FILTERS_GFS_ADD_FILTER_FAIL_MAX_HOPS, + ETH_FILTERS_GFS_ADD_FILTER_FAIL_NO_FREE_ENRTY, + ETH_FILTERS_GFS_ADD_FILTER_FAIL_ALREADY_EXISTS, + ETH_FILTERS_GFS_ADD_FILTER_FAIL_PCI_ERROR, + ETH_FILTERS_GFS_ADD_FINLER_FAIL_MAGIC_NUM_ERROR, + ETH_FILTERS_GFS_DEL_FILTER_FAIL_MAX_HOPS, + ETH_FILTERS_GFS_DEL_FILTER_FAIL_NO_MATCH_ENRTY, + ETH_FILTERS_GFS_DEL_FILTER_FAIL_PCI_ERROR, + ETH_FILTERS_GFS_DEL_FILTER_FAIL_MAGIC_NUM_ERROR, MAX_ETH_ERROR_CODE }; @@ -5688,6 +5515,11 @@ enum eth_event_opcode { ETH_EVENT_RX_CREATE_GFT_ACTION, ETH_EVENT_RX_GFT_UPDATE_FILTER, ETH_EVENT_TX_QUEUE_UPDATE, + ETH_EVENT_RGFS_ADD_FILTER, + ETH_EVENT_RGFS_DEL_FILTER, + ETH_EVENT_TGFS_ADD_FILTER, + ETH_EVENT_TGFS_DEL_FILTER, + ETH_EVENT_GFS_COUNTERS_REPORT_REQUEST, MAX_ETH_EVENT_OPCODE }; @@ -5780,18 +5612,31 @@ enum eth_ramrod_cmd_id { ETH_RAMROD_RX_CREATE_GFT_ACTION, ETH_RAMROD_GFT_UPDATE_FILTER, ETH_RAMROD_TX_QUEUE_UPDATE, + ETH_RAMROD_RGFS_FILTER_ADD, + ETH_RAMROD_RGFS_FILTER_DEL, + ETH_RAMROD_TGFS_FILTER_ADD, + ETH_RAMROD_TGFS_FILTER_DEL, + ETH_RAMROD_GFS_COUNTERS_REPORT_REQUEST, MAX_ETH_RAMROD_CMD_ID }; /* Return code from eth sp ramrods */ struct eth_return_code { u8 value; -#define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F -#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0 -#define ETH_RETURN_CODE_RESERVED_MASK 0x3 -#define ETH_RETURN_CODE_RESERVED_SHIFT 5 -#define ETH_RETURN_CODE_RX_TX_MASK 0x1 -#define ETH_RETURN_CODE_RX_TX_SHIFT 7 +#define ETH_RETURN_CODE_ERR_CODE_MASK 0x3F +#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0 +#define ETH_RETURN_CODE_RESERVED_MASK 0x1 +#define ETH_RETURN_CODE_RESERVED_SHIFT 6 +#define ETH_RETURN_CODE_RX_TX_MASK 0x1 +#define ETH_RETURN_CODE_RX_TX_SHIFT 7 +}; + +/* tx destination enum */ +enum eth_tx_dst_mode_config_enum { + ETH_TX_DST_MODE_CONFIG_DISABLE, + ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_BD, + ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_VPORT, + MAX_ETH_TX_DST_MODE_CONFIG_ENUM }; /* What to do in case an error occurs */ @@ -5818,8 +5663,10 @@ struct eth_tx_err_vals { #define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5 #define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1 #define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6 -#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF -#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7 +#define ETH_TX_ERR_VALS_ILLEGAL_BD_FLAGS_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_BD_FLAGS_SHIFT 7 +#define ETH_TX_ERR_VALS_RESERVED_MASK 0xFF +#define ETH_TX_ERR_VALS_RESERVED_SHIFT 8 }; /* vport rss configuration data */ @@ -5849,7 +5696,6 @@ struct eth_vport_rss_config { u8 tbl_size; __le32 reserved2[2]; __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM]; - __le32 rss_key[ETH_RSS_KEY_SIZE_REGS]; __le32 reserved3[2]; }; @@ -6051,7 +5897,7 @@ struct rx_update_gft_filter_data { u8 inner_vlan_removal_en; }; -/* Ramrod data for rx queue start ramrod */ +/* Ramrod data for tx queue start ramrod */ struct tx_queue_start_ramrod_data { __le16 sb_id; u8 sb_index; @@ -6064,16 +5910,14 @@ struct tx_queue_start_ramrod_data { #define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0 #define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1 #define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1 -#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1 -#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2 #define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1 -#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3 +#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 2 #define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1 -#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4 +#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 3 #define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1 -#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5 -#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3 -#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6 +#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 4 +#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x7 +#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 5 u8 pxp_st_hint; u8 pxp_tph_valid_bd; u8 pxp_tph_valid_pkt; @@ -6129,18 +5973,22 @@ struct vport_start_ramrod_data { __le16 default_vlan; u8 tx_switching_en; u8 anti_spoofing_en; - u8 default_vlan_en; - u8 handle_ptp_pkts; u8 silent_vlan_removal_en; u8 untagged; struct eth_tx_err_vals tx_err_behav; - u8 zero_placement_offset; u8 ctl_frame_mac_check_en; u8 ctl_frame_ethtype_check_en; + u8 reserved0; + u8 reserved1; + u8 tx_dst_port_mode_config; + u8 dst_vport_id; + u8 tx_dst_port_mode; + u8 dst_vport_id_valid; u8 wipe_inner_vlan_pri_en; + u8 reserved2[2]; struct eth_in_to_in_pri_map_cfg in_to_in_vlan_pri_map_cfg; }; @@ -6700,19 +6548,6 @@ struct e4_xstorm_eth_hw_conn_ag_ctx { __le16 conn_dpi; }; -/* GFT CAM line struct */ -struct gft_cam_line { - __le32 camline; -#define GFT_CAM_LINE_VALID_MASK 0x1 -#define GFT_CAM_LINE_VALID_SHIFT 0 -#define GFT_CAM_LINE_DATA_MASK 0x3FFF -#define GFT_CAM_LINE_DATA_SHIFT 1 -#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF -#define GFT_CAM_LINE_MASK_BITS_SHIFT 15 -#define GFT_CAM_LINE_RESERVED1_MASK 0x7 -#define GFT_CAM_LINE_RESERVED1_SHIFT 29 -}; - /* GFT CAM line struct with fields breakout */ struct gft_cam_line_mapped { __le32 camline; @@ -6742,10 +6577,6 @@ struct gft_cam_line_mapped { #define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29 }; -union gft_cam_line_union { - struct gft_cam_line cam_line; - struct gft_cam_line_mapped cam_line_mapped; -}; /* Used in gft_profile_key: Indication for ip version */ enum gft_profile_ip_version { @@ -7024,6 +6855,11 @@ struct mstorm_rdma_task_st_ctx { struct regpair temp[4]; }; +/* The roce task context of Ustorm */ +struct ustorm_rdma_task_st_ctx { + struct regpair temp[6]; +}; + struct e4_ustorm_rdma_task_ag_ctx { u8 reserved; u8 state; @@ -7033,8 +6869,8 @@ struct e4_ustorm_rdma_task_ag_ctx { #define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 #define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 #define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5 +#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 #define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3 #define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6 u8 flags1; @@ -7064,29 +6900,29 @@ struct e4_ustorm_rdma_task_ag_ctx { #define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 #define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7 u8 flags3; -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_SHIFT 0 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_SHIFT 2 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 __le32 dif_err_intervals; __le32 dif_error_1st_interval; - __le32 sq_cons; - __le32 dif_runt_value; + __le32 dif_rxmit_cons; + __le32 dif_rxmit_prod; __le32 sge_index; - __le32 reg5; + __le32 sq_cons; u8 byte2; u8 byte3; - __le16 word1; - __le16 word2; + __le16 dif_write_cons; + __le16 dif_write_prod; __le16 word3; - __le32 reg6; - __le32 reg7; + __le32 dif_error_buffer_address_lo; + __le32 dif_error_buffer_address_hi; }; /* RDMA task context */ @@ -7097,6 +6933,8 @@ struct e4_rdma_task_context { struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context; struct mstorm_rdma_task_st_ctx mstorm_st_context; struct rdif_task_context rdif_context; + struct ustorm_rdma_task_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context; }; @@ -7132,7 +6970,12 @@ struct rdma_create_cq_ramrod_data { u8 pbl_log_page_size; u8 toggle_bit; __le16 int_timeout; - __le16 reserved1; + u8 vf_id; + u8 flags; +#define RDMA_CREATE_CQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1 +#define RDMA_CREATE_CQ_RAMROD_DATA_VF_ID_VALID_SHIFT 0 +#define RDMA_CREATE_CQ_RAMROD_DATA_RESERVED1_MASK 0x7F +#define RDMA_CREATE_CQ_RAMROD_DATA_RESERVED1_SHIFT 1 }; /* rdma deregister tid ramrod data */ @@ -7176,6 +7019,7 @@ enum rdma_fw_return_code { RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR, RDMA_RETURN_RESIZE_CQ_ERR, RDMA_RETURN_NIG_DRAIN_REQ, + RDMA_RETURN_GENERAL_ERR, MAX_RDMA_FW_RETURN_CODE }; @@ -7189,7 +7033,10 @@ struct rdma_init_func_hdr { u8 relaxed_ordering; __le16 first_reg_srq_id; __le32 reg_srq_base_addr; - __le32 reserved; + u8 searcher_mode; + u8 pvrdma_mode; + u8 max_num_ns_log; + u8 reserved; }; /* rdma function init ramrod data */ @@ -7279,16 +7126,20 @@ struct rdma_resize_cq_ramrod_data { #define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0 #define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1 #define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1 -#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x3F -#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 2 +#define RDMA_RESIZE_CQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1 +#define RDMA_RESIZE_CQ_RAMROD_DATA_VF_ID_VALID_SHIFT 2 +#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x1F +#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 3 u8 pbl_log_page_size; __le16 pbl_num_pages; __le32 max_cqes; struct regpair pbl_addr; struct regpair output_params_addr; + u8 vf_id; + u8 reserved1[7]; }; -/* The rdma storm context of Mstorm */ +/* The rdma SRQ context */ struct rdma_srq_context { struct regpair temp[8]; }; @@ -7335,6 +7186,7 @@ enum rdma_tid_type { MAX_RDMA_TID_TYPE }; +/* The rdma XRC SRQ context */ struct rdma_xrc_srq_context { struct regpair temp[9]; }; @@ -7516,12 +7368,12 @@ struct e4_xstorm_roce_conn_ag_ctx { #define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2 #define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1 #define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT12_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT12_SHIFT 4 +#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 #define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 #define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 6 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_MASK 0x1 +#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_SHIFT 6 #define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 #define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 u8 flags2; @@ -7845,9 +7697,9 @@ struct mstorm_roce_conn_st_ctx { struct regpair temp[6]; }; -/* The roce storm context of Ystorm */ +/* The roce storm context of Ustorm */ struct ustorm_roce_conn_st_ctx { - struct regpair temp[12]; + struct regpair temp[14]; }; /* roce connection context */ @@ -7865,6 +7717,7 @@ struct e4_roce_conn_context { struct mstorm_roce_conn_st_ctx mstorm_st_context; struct regpair mstorm_st_padding[2]; struct ustorm_roce_conn_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; }; /* roce cqes statistics */ @@ -7919,12 +7772,17 @@ struct roce_create_qp_req_ramrod_data { struct regpair qp_handle_for_cqe; struct regpair qp_handle_for_async; u8 stats_counter_id; - u8 reserved3[6]; + u8 vf_id; + u8 vport_id; u8 flags2; #define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_MASK 0x1 #define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x7F -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_SHIFT 1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x3F +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 2 + u8 name_space; + u8 reserved3[3]; __le16 regular_latency_phy_queue; __le16 dpi; }; @@ -7952,8 +7810,10 @@ struct roce_create_qp_resp_ramrod_data { #define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_MASK 0x1 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_SHIFT 16 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x7FFF -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 17 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_SHIFT 17 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x3FFF +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 18 __le16 xrc_domain; u8 max_ird; u8 traffic_class; @@ -7980,10 +7840,14 @@ struct roce_create_qp_resp_ramrod_data { struct regpair qp_handle_for_cqe; struct regpair qp_handle_for_async; __le16 low_latency_phy_queue; - u8 reserved2[2]; + u8 vf_id; + u8 vport_id; __le32 cq_cid; __le16 regular_latency_phy_queue; __le16 dpi; + __le32 src_qp_id; + u8 name_space; + u8 reserved3[3]; }; /* roce DCQCN received statistics */ @@ -8017,6 +7881,8 @@ struct roce_destroy_qp_resp_output_params { /* RoCE destroy qp responder ramrod data */ struct roce_destroy_qp_resp_ramrod_data { struct regpair output_params_addr; + __le32 src_qp_id; + __le32 reserved; }; /* roce error statistics */ @@ -8100,8 +7966,8 @@ struct roce_modify_qp_req_ramrod_data { #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT 13 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 13 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x3 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 14 u8 fields; @@ -8147,8 +8013,8 @@ struct roce_modify_qp_resp_ramrod_data { #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT 10 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 10 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x1F #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 11 u8 fields; @@ -8189,7 +8055,7 @@ struct roce_query_qp_req_ramrod_data { /* RoCE query qp responder output params */ struct roce_query_qp_resp_output_params { __le32 psn; - __le32 err_flag; + __le32 flags; #define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1 #define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0 #define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF @@ -8256,12 +8122,12 @@ struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part { #define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2 #define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1 #define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_SHIFT 5 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_MASK 0x1 -#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6 #define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1 #define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7 u8 flags2; @@ -8634,8 +8500,8 @@ struct e4_tstorm_roce_req_conn_ag_ctx { u8 flags5; #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_SHIFT 1 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 @@ -8648,13 +8514,13 @@ struct e4_tstorm_roce_req_conn_ag_ctx { #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 #define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 - __le32 reg0; + __le32 dif_rxmit_cnt; __le32 snd_nxt_psn; __le32 snd_max_psn; __le32 orq_prod; __le32 reg4; - __le32 reg5; - __le32 reg6; + __le32 dif_acked_cnt; + __le32 dif_cnt; __le32 reg7; __le32 reg8; u8 tx_cqe_error_type; @@ -8665,7 +8531,7 @@ struct e4_tstorm_roce_req_conn_ag_ctx { __le16 snd_sq_cons; __le16 conn_dpi; __le16 force_comp_cons; - __le32 reg9; + __le32 dif_rxmit_acked_cnt; __le32 reg10; }; @@ -8940,10 +8806,10 @@ struct e4_xstorm_roce_req_conn_ag_ctx { #define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2 #define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1 #define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT 5 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 #define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1 #define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6 #define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 @@ -9169,10 +9035,10 @@ struct e4_xstorm_roce_resp_conn_ag_ctx { #define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2 #define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1 #define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT 5 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 #define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1 #define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6 #define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 @@ -9899,7 +9765,7 @@ struct mstorm_iwarp_conn_st_ctx { /* The iwarp storm context of Ustorm */ struct ustorm_iwarp_conn_st_ctx { - __le32 reserved[24]; + struct regpair reserved[14]; }; /* iwarp connection context */ @@ -9917,6 +9783,7 @@ struct e4_iwarp_conn_context { struct regpair tstorm_st_padding[2]; struct mstorm_iwarp_conn_st_ctx mstorm_st_context; struct ustorm_iwarp_conn_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; }; /* iWARP create QP params passed by driver to FW in CreateQP Request Ramrod */ @@ -9969,7 +9836,8 @@ enum iwarp_eqe_async_opcode { struct iwarp_eqe_data_mpa_async_completion { __le16 ulp_data_len; - u8 reserved[6]; + u8 rtr_type_sent; + u8 reserved[5]; }; struct iwarp_eqe_data_tcp_async_completion { @@ -9994,7 +9862,7 @@ enum iwarp_eqe_sync_opcode { /* iWARP EQE completion status */ enum iwarp_fw_return_code { - IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 5, + IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 6, IWARP_CONN_ERROR_TCP_CONNECTION_RST, IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT, IWARP_CONN_ERROR_MPA_ERROR_REJECT, @@ -10163,8 +10031,8 @@ struct iwarp_rxmit_stats_drv { * offload ramrod. */ struct iwarp_tcp_offload_ramrod_data { - struct iwarp_offload_params iwarp; struct tcp_offload_params_opt2 tcp; + struct iwarp_offload_params iwarp; }; /* iWARP MPA negotiation types */ @@ -11456,8 +11324,8 @@ struct e4_tstorm_iscsi_conn_ag_ctx { u8 flags3; #define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 #define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_SHIFT 2 #define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 #define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4 #define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1 @@ -11479,8 +11347,8 @@ struct e4_tstorm_iscsi_conn_ag_ctx { #define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4 #define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 #define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_SHIFT 6 #define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 #define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; @@ -11712,7 +11580,7 @@ struct e4_ystorm_iscsi_conn_ag_ctx { /* The trace in the buffer */ #define MFW_TRACE_EVENTID_MASK 0x00ffff #define MFW_TRACE_PRM_SIZE_MASK 0x0f0000 -#define MFW_TRACE_PRM_SIZE_SHIFT 16 +#define MFW_TRACE_PRM_SIZE_OFFSET 16 #define MFW_TRACE_ENTRY_SIZE 3 struct mcp_trace { @@ -12470,6 +12338,11 @@ enum resource_id_enum { RESOURCE_LL2_QUEUE_E = 15, RESOURCE_RDMA_STATS_QUEUE_E = 16, RESOURCE_BDQ_E = 17, + RESOURCE_QCN_E = 18, + RESOURCE_LLH_FILTER_E = 19, + RESOURCE_VF_MAC_ADDR = 20, + RESOURCE_LL2_CQS_E = 21, + RESOURCE_VF_CNQS = 22, RESOURCE_MAX_NUM, RESOURCE_NUM_INVALID = 0xFFFFFFFF }; @@ -12580,6 +12453,8 @@ struct public_drv_mb { #define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 #define DRV_MSG_CODE_NIG_DRAIN 0x30000000 #define DRV_MSG_CODE_S_TAG_UPDATE_ACK 0x3b000000 +#define DRV_MSG_CODE_GET_NVM_CFG_OPTION 0x003e0000 +#define DRV_MSG_CODE_SET_NVM_CFG_OPTION 0x003f0000 #define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 #define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000 @@ -12658,7 +12533,10 @@ struct public_drv_mb { #define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3 #define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3 +#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0 +#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF #define DRV_MB_PARAM_NVM_LEN_OFFSET 24 +#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000 #define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0 #define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF @@ -12748,6 +12626,21 @@ struct public_drv_mb { #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 #define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000FFFF +#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17 +#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18 +#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19 +#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000 + u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 #define FW_MSG_CODE_UNSUPPORTED 0x00000000 @@ -13404,6 +13297,21 @@ enum nvm_image_type { NVM_TYPE_FCOE_CFG = 0x1f, NVM_TYPE_ETH_PHY_FW1 = 0x20, NVM_TYPE_ETH_PHY_FW2 = 0x21, + NVM_TYPE_BDN = 0x22, + NVM_TYPE_8485X_PHY_FW = 0x23, + NVM_TYPE_PUB_KEY = 0x24, + NVM_TYPE_RECOVERY = 0x25, + NVM_TYPE_PLDM = 0x26, + NVM_TYPE_UPK1 = 0x27, + NVM_TYPE_UPK2 = 0x28, + NVM_TYPE_MASTER_KC = 0x29, + NVM_TYPE_BACKUP_KC = 0x2a, + NVM_TYPE_HW_DUMP = 0x2b, + NVM_TYPE_HW_DUMP_OUT = 0x2c, + NVM_TYPE_BIN_NVM_META = 0x30, + NVM_TYPE_ROM_TEST = 0xf0, + NVM_TYPE_88X33X0_PHY_FW = 0x31, + NVM_TYPE_88X33X0_PHY_SLAVE_FW = 0x32, NVM_TYPE_MAX, }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index a4de9e3ef72c..4ab8cfaf63d1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -393,7 +393,7 @@ u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid) /* DMAE */ #define QED_DMAE_FLAGS_IS_SET(params, flag) \ - ((params) != NULL && ((params)->flags & QED_DMAE_FLAG_##flag)) + ((params) != NULL && GET_FIELD((params)->flags, QED_DMAE_PARAMS_##flag)) static void qed_dmae_opcode(struct qed_hwfn *p_hwfn, const u8 is_src_type_grc, @@ -408,62 +408,55 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn, * 0- The source is the PCIe * 1- The source is the GRC. */ - opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC - : DMAE_CMD_SRC_MASK_PCIE) << - DMAE_CMD_SRC_SHIFT; - src_pfid = QED_DMAE_FLAGS_IS_SET(p_params, PF_SRC) ? - p_params->src_pfid : p_hwfn->rel_pf_id; - opcode |= ((src_pfid & DMAE_CMD_SRC_PF_ID_MASK) << - DMAE_CMD_SRC_PF_ID_SHIFT); + SET_FIELD(opcode, DMAE_CMD_SRC, + (is_src_type_grc ? dmae_cmd_src_grc : dmae_cmd_src_pcie)); + src_pfid = QED_DMAE_FLAGS_IS_SET(p_params, SRC_PF_VALID) ? + p_params->src_pfid : p_hwfn->rel_pf_id; + SET_FIELD(opcode, DMAE_CMD_SRC_PF_ID, src_pfid); /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */ - opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC - : DMAE_CMD_DST_MASK_PCIE) << - DMAE_CMD_DST_SHIFT; - dst_pfid = QED_DMAE_FLAGS_IS_SET(p_params, PF_DST) ? - p_params->dst_pfid : p_hwfn->rel_pf_id; - opcode |= ((dst_pfid & DMAE_CMD_DST_PF_ID_MASK) << - DMAE_CMD_DST_PF_ID_SHIFT); + SET_FIELD(opcode, DMAE_CMD_DST, + (is_dst_type_grc ? dmae_cmd_dst_grc : dmae_cmd_dst_pcie)); + dst_pfid = QED_DMAE_FLAGS_IS_SET(p_params, DST_PF_VALID) ? + p_params->dst_pfid : p_hwfn->rel_pf_id; + SET_FIELD(opcode, DMAE_CMD_DST_PF_ID, dst_pfid); + /* Whether to write a completion word to the completion destination: * 0-Do not write a completion word * 1-Write the completion word */ - opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT); - opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK << - DMAE_CMD_SRC_ADDR_RESET_SHIFT); + SET_FIELD(opcode, DMAE_CMD_COMP_WORD_EN, 1); + SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1); if (QED_DMAE_FLAGS_IS_SET(p_params, COMPLETION_DST)) - opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT); + SET_FIELD(opcode, DMAE_CMD_COMP_FUNC, 1); - opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT); + /* swapping mode 3 - big endian */ + SET_FIELD(opcode, DMAE_CMD_ENDIANITY_MODE, DMAE_CMD_ENDIANITY); - port_id = (QED_DMAE_FLAGS_IS_SET(p_params, PORT)) ? - p_params->port_id : p_hwfn->port_id; - opcode |= (port_id << DMAE_CMD_PORT_ID_SHIFT); + port_id = (QED_DMAE_FLAGS_IS_SET(p_params, PORT_VALID)) ? + p_params->port_id : p_hwfn->port_id; + SET_FIELD(opcode, DMAE_CMD_PORT_ID, port_id); /* reset source address in next go */ - opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK << - DMAE_CMD_SRC_ADDR_RESET_SHIFT); + SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1); /* reset dest address in next go */ - opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK << - DMAE_CMD_DST_ADDR_RESET_SHIFT); + SET_FIELD(opcode, DMAE_CMD_DST_ADDR_RESET, 1); /* SRC/DST VFID: all 1's - pf, otherwise VF id */ - if (QED_DMAE_FLAGS_IS_SET(p_params, VF_SRC)) { - opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT; - opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT; + if (QED_DMAE_FLAGS_IS_SET(p_params, SRC_VF_VALID)) { + SET_FIELD(opcode, DMAE_CMD_SRC_VF_ID_VALID, 1); + SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, p_params->src_vfid); } else { - opcode_b |= DMAE_CMD_SRC_VF_ID_MASK << - DMAE_CMD_SRC_VF_ID_SHIFT; + SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, 0xFF); } - - if (QED_DMAE_FLAGS_IS_SET(p_params, VF_DST)) { - opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT; - opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT; + if (QED_DMAE_FLAGS_IS_SET(p_params, DST_VF_VALID)) { + SET_FIELD(opcode, DMAE_CMD_DST_VF_ID_VALID, 1); + SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, p_params->dst_vfid); } else { - opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT; + SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, 0xFF); } p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode); diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index d6430dfebd83..2f1049b0b93a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -44,9 +44,9 @@ #define CDU_VALIDATION_DEFAULT_CFG 61 static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = { - {400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */ - {528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */ - {608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */ + {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */ + {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */ + {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */ }; static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { @@ -61,6 +61,9 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { 0x100) - 1 : 0) #define QM_INVALID_PQ_ID 0xffff +/* Max link speed (in Mbps) */ +#define QM_MAX_LINK_SPEED 100000 + /* Feature enable */ #define QM_BYPASS_EN 1 #define QM_BYTE_CRD_EN 1 @@ -128,8 +131,6 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { /* Pure LB CmdQ lines (+spare) */ #define PBF_CMDQ_PURE_LB_LINES 150 -#define PBF_CMDQ_LINES_E5_RSVD_RATIO 8 - #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \ (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \ (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \ @@ -140,6 +141,9 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \ PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET)) +/* Returns the VOQ line credit for the specified number of PBF command lines. + * PBF lines are specified in 256b units. + */ #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \ ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT) @@ -178,14 +182,14 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { cmd ## _ ## field, \ value) -#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, vp_pq_id, rl_id, \ +#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid, rl_id, \ ext_voq, wrr) \ do { \ typeof(map) __map; \ memset(&__map, 0, sizeof(__map)); \ SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _PQ_VALID, 1); \ SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_VALID, \ - rl_valid); \ + rl_valid ? 1 : 0);\ SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VP_PQ_ID, \ vp_pq_id); \ SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_ID, rl_id); \ @@ -200,9 +204,12 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { #define WRITE_PQ_INFO_TO_RAM 1 #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \ (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \ - ((rl_valid) << 22) | ((rl) << 24)) + ((rl_valid ? 1 : 0) << 22) | (((rl) & 255) << 24) | \ + (((rl) >> 8) << 9)) + #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \ - (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4) + XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \ + XSTORM_PQ_INFO_OFFSET(pq_id) /******************** INTERNAL IMPLEMENTATION *********************/ @@ -228,9 +235,6 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, (u32)voq_bit_mask); - if (num_ext_voqs >= 32) - STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET, - (u32)(voq_bit_mask >> 32)); /* Write RL period */ STORE_RT_REG(p_hwfn, @@ -259,12 +263,12 @@ static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en) QM_WFQ_UPPER_BOUND); } -/* Prepare VPORT RL enable/disable runtime init values */ -static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en) +/* Prepare global RL enable/disable runtime init values */ +static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en) { STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, - vport_rl_en ? 1 : 0); - if (vport_rl_en) { + global_rl_en ? 1 : 0); + if (global_rl_en) { /* Write RL period (use timer 0 only) */ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET, @@ -331,8 +335,7 @@ static void qed_cmdq_lines_rt_init( continue; /* Find number of command queue lines to divide between the - * active physical TCs. In E5, 1/8 of the lines are reserved. - * the lines for pure LB TC are subtracted. + * active physical TCs. */ phys_lines = port_params[port_id].num_pbf_cmd_lines; phys_lines -= PBF_CMDQ_PURE_LB_LINES; @@ -361,11 +364,30 @@ static void qed_cmdq_lines_rt_init( ext_voq = qed_get_ext_voq(p_hwfn, port_id, PURE_LB_TC, max_phys_tcs_per_port); - qed_cmdq_lines_voq_rt_init(p_hwfn, - ext_voq, PBF_CMDQ_PURE_LB_LINES); + qed_cmdq_lines_voq_rt_init(p_hwfn, ext_voq, + PBF_CMDQ_PURE_LB_LINES); } } +/* Prepare runtime init values to allocate guaranteed BTB blocks for the + * specified port. The guaranteed BTB space is divided between the TCs as + * follows (shared space Is currently not used): + * 1. Parameters: + * B - BTB blocks for this port + * C - Number of physical TCs for this port + * 2. Calculation: + * a. 38 blocks (9700B jumbo frame) are allocated for global per port + * headroom. + * b. B = B - 38 (remainder after global headroom allocation). + * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ. + * d. B = B - MAX(38, B/(C+0.7)) (remainder after pure LB allocation). + * e. B/C blocks are allocated for each physical TC. + * Assumptions: + * - MTU is up to 9700 bytes (38 blocks) + * - All TCs are considered symmetrical (same rate and packet size) + * - No optimization for lossy TC (all are considered lossless). Shared space + * is not enabled and allocated for each TC. + */ static void qed_btb_blocks_rt_init( struct qed_hwfn *p_hwfn, u8 max_ports_per_engine, @@ -424,6 +446,34 @@ static void qed_btb_blocks_rt_init( } } +/* Prepare runtime init values for the specified RL. + * Set max link speed (100Gbps) per rate limiter. + * Return -1 on error. + */ +static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn) +{ + u32 upper_bound = QM_VP_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) | + (u32)QM_RL_CRD_REG_SIGN_BIT; + u32 inc_val; + u16 rl_id; + + /* Go over all global RLs */ + for (rl_id = 0; rl_id < MAX_QM_GLOBAL_RLS; rl_id++) { + inc_val = QM_RL_INC_VAL(QM_MAX_LINK_SPEED); + + STORE_RT_REG(p_hwfn, + QM_REG_RLGLBLCRD_RT_OFFSET + rl_id, + (u32)QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, + QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id, + upper_bound); + STORE_RT_REG(p_hwfn, + QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, inc_val); + } + + return 0; +} + /* Prepare Tx PQ mapping runtime init values for the specified PF */ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -460,18 +510,17 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, /* Go over all Tx PQs */ for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) { - u8 ext_voq, vport_id_in_pf, tc_id = pq_params[i].tc_id; - u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS; + u16 *p_first_tx_pq_id, vport_id_in_pf; struct qm_rf_pq_map_e4 tx_pq_map; - bool is_vf_pq, rl_valid; - u16 *p_first_tx_pq_id; + u8 tc_id = pq_params[i].tc_id; + bool is_vf_pq; + u8 ext_voq; ext_voq = qed_get_ext_voq(p_hwfn, pq_params[i].port_id, tc_id, p_params->max_phys_tcs_per_port); is_vf_pq = (i >= p_params->num_pf_pqs); - rl_valid = pq_params[i].rl_valid > 0; /* Update first Tx PQ of VPORT/TC */ vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport; @@ -492,21 +541,14 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, map_val); } - /* Check RL ID */ - if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) { - DP_NOTICE(p_hwfn, - "Invalid VPORT ID for rate limiter configuration\n"); - rl_valid = false; - } - /* Prepare PQ map entry */ QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E4, pq_id, - rl_valid ? 1 : 0, *p_first_tx_pq_id, - rl_valid ? pq_params[i].vport_id : 0, + pq_params[i].rl_valid, + pq_params[i].rl_id, ext_voq, pq_params[i].wrr_group); /* Set PQ base address */ @@ -529,9 +571,8 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, p_params->pf_id, tc_id, pq_params[i].port_id, - rl_valid ? 1 : 0, - rl_valid ? - pq_params[i].vport_id : 0); + pq_params[i].rl_valid, + pq_params[i].rl_id); qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id), pq_info); } @@ -669,19 +710,19 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl) * Return -1 on error. */ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, - u8 num_vports, + u16 num_vports, struct init_qm_vport_params *vport_params) { - u16 vport_pq_id; + u16 vport_pq_id, i; u32 inc_val; - u8 tc, i; + u8 tc; /* Go over all PF VPORTs */ for (i = 0; i < num_vports; i++) { - if (!vport_params[i].vport_wfq) + if (!vport_params[i].wfq) continue; - inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq); + inc_val = QM_WFQ_INC_VAL(vport_params[i].wfq); if (inc_val > QM_WFQ_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration\n"); @@ -706,48 +747,6 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, return 0; } -/* Prepare VPORT RL runtime init values for the specified VPORTs. - * Return -1 on error. - */ -static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn, - u8 start_vport, - u8 num_vports, - u32 link_speed, - struct init_qm_vport_params *vport_params) -{ - u8 i, vport_id; - u32 inc_val; - - if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) { - DP_NOTICE(p_hwfn, - "Invalid VPORT ID for rate limiter configuration\n"); - return -1; - } - - /* Go over all PF VPORTs */ - for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) { - inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ? - vport_params[i].vport_rl : - link_speed); - if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) { - DP_NOTICE(p_hwfn, - "Invalid VPORT rate-limit configuration\n"); - return -1; - } - - STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id, - (u32)QM_RL_CRD_REG_SIGN_BIT); - STORE_RT_REG(p_hwfn, - QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id, - QM_VP_RL_UPPER_BOUND(link_speed) | - (u32)QM_RL_CRD_REG_SIGN_BIT); - STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id, - inc_val); - } - - return 0; -} - static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { @@ -799,23 +798,20 @@ u32 qed_qm_pf_mem_size(u32 num_pf_cids, int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn, struct qed_qm_common_rt_init_params *p_params) { - /* Init AFullOprtnstcCrdMask */ - u32 mask = (QM_OPPOR_LINE_VOQ_DEF << - QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) | - (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) | - (p_params->pf_wfq_en << - QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) | - (p_params->vport_wfq_en << - QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) | - (p_params->pf_rl_en << - QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) | - (p_params->vport_rl_en << - QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) | - (QM_OPPOR_FW_STOP_DEF << - QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) | - (QM_OPPOR_PQ_EMPTY_DEF << - QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT); + u32 mask = 0; + /* Init AFullOprtnstcCrdMask */ + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ, + QM_OPPOR_LINE_VOQ_DEF); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, p_params->pf_wfq_en); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, p_params->vport_wfq_en); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, p_params->pf_rl_en); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL, + p_params->global_rl_en); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF); + SET_FIELD(mask, + QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF); STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask); /* Enable/disable PF RL */ @@ -824,8 +820,8 @@ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn, /* Enable/disable PF WFQ */ qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en); - /* Enable/disable VPORT RL */ - qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en); + /* Enable/disable global RL */ + qed_enable_global_rl(p_hwfn, p_params->global_rl_en); /* Enable/disable VPORT WFQ */ qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en); @@ -842,6 +838,8 @@ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn, p_params->max_phys_tcs_per_port, p_params->port_params); + qed_global_rl_rt_init(p_hwfn); + return 0; } @@ -853,7 +851,9 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids + p_params->num_tids) * QM_OTHER_PQS_PER_PF; - u8 tc, i; + u16 i; + u8 tc; + /* Clear first Tx PQ ID array for each VPORT */ for (i = 0; i < p_params->num_vports; i++) @@ -878,16 +878,10 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl)) return -1; - /* Set VPORT WFQ */ + /* Init VPORT WFQ */ if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params)) return -1; - /* Set VPORT RL */ - if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport, - p_params->num_vports, p_params->link_speed, - vport_params)) - return -1; - return 0; } @@ -925,18 +919,19 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn, int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq) + u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq) { u16 vport_pq_id; u32 inc_val; u8 tc; - inc_val = QM_WFQ_INC_VAL(vport_wfq); + inc_val = QM_WFQ_INC_VAL(wfq); if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { - DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration\n"); + DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n"); return -1; } + /* A VPORT can have several VPORT PQ IDs for various TCs */ for (tc = 0; tc < NUM_OF_TCS; tc++) { vport_pq_id = first_tx_pq_id[tc]; if (vport_pq_id != QM_INVALID_PQ_ID) @@ -948,28 +943,20 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, return 0; } -int qed_init_vport_rl(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u8 vport_id, u32 vport_rl, u32 link_speed) +int qed_init_global_rl(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit) { - u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS; + u32 inc_val; - if (vport_id >= max_qm_global_rls) { - DP_NOTICE(p_hwfn, - "Invalid VPORT ID for rate limiter configuration\n"); + inc_val = QM_RL_INC_VAL(rate_limit); + if (inc_val > QM_VP_RL_MAX_INC_VAL(rate_limit)) { + DP_NOTICE(p_hwfn, "Invalid rate limit configuration.\n"); return -1; } - inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed); - if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) { - DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n"); - return -1; - } - - qed_wr(p_hwfn, - p_ptt, - QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); - qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val); + qed_wr(p_hwfn, p_ptt, + QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); + qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val); return 0; } @@ -1013,7 +1000,6 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, return true; } - #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \ do { \ typeof(var) *__p_var = &(var); \ @@ -1021,8 +1007,59 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, *__p_var = (*__p_var & ~BIT(__offset)) | \ ((enable) ? BIT(__offset) : 0); \ } while (0) -#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008 -#define PRS_ETH_OUTPUT_FORMAT -46832 + +#define PRS_ETH_TUNN_OUTPUT_FORMAT 0xF4DAB910 +#define PRS_ETH_OUTPUT_FORMAT 0xFFFF4910 + +#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \ + do { \ + u32 i; \ + \ + for (i = 0; i < (arr_size); i++) \ + qed_wr(dev, ptt, \ + ((addr) + (4 * i)), \ + ((u32 *)&(arr))[i]); \ + } while (0) + +/** + * @brief qed_dmae_to_grc - is an internal function - writes from host to + * wide-bus registers (split registers are not supported yet) + * + * @param p_hwfn - HW device data + * @param p_ptt - ptt window used for writing the registers. + * @param p_data - pointer to source data. + * @param addr - Destination register address. + * @param len_in_dwords - data length in DWARDS (u32) + */ +static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *p_data, u32 addr, u32 len_in_dwords) +{ + struct qed_dmae_params params = {}; + int rc; + + if (!p_data) + return -1; + + /* Set DMAE params */ + SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1); + + /* Execute DMAE command */ + rc = qed_dmae_host2grc(p_hwfn, p_ptt, + (u64)(uintptr_t)(p_data), + addr, len_in_dwords, ¶ms); + + /* If not read using DMAE, read using GRC */ + if (rc) { + DP_VERBOSE(p_hwfn, + QED_MSG_DEBUG, + "Failed writing to chip using DMAE, using GRC instead\n"); + /* write to registers using GRC */ + ARR_REG_WR(p_hwfn, p_ptt, addr, p_data, len_in_dwords); + } + + return len_in_dwords; +} void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 dest_port) @@ -1166,8 +1203,8 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, ip_geneve_enable ? 1 : 0); } -#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4 -#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512 +#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3 +#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -925189872 void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool enable) @@ -1208,6 +1245,8 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id) { + struct regpair ram_line = { }; + /* Disable gft search for PF */ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0); @@ -1217,12 +1256,9 @@ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id) qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0); /* Zero ramline */ - qed_wr(p_hwfn, - p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, 0); - qed_wr(p_hwfn, - p_ptt, - PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE, - 0); + qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, + sizeof(ram_line) / REG_SIZE); } void qed_gft_config(struct qed_hwfn *p_hwfn, @@ -1232,7 +1268,8 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, bool udp, bool ipv4, bool ipv6, enum gft_profile_type profile_type) { - u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft; + u32 reg_val, cam_line, search_non_ip_as_gft; + struct regpair ram_line = { }; if (!ipv6 && !ipv4) DP_NOTICE(p_hwfn, @@ -1298,35 +1335,33 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id); /* Write line to RAM - compare to filter 4 tuple */ - ram_line_lo = 0; - ram_line_hi = 0; /* Search no IP as GFT */ search_non_ip_as_gft = 0; /* Tunnel type */ - SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); - SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); + SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); + SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) { - SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1); - SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1); - SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); - SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); - SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1); - SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1); + SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1); + SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1); + SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); + SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(ram_line.lo, GFT_RAM_LINE_SRC_PORT, 1); + SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1); } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) { - SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); - SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); - SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1); + SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); + SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1); } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) { - SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1); - SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1); + SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1); } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) { - SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1); - SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1); + SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1); } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) { - SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); + SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); /* Allow tunneled traffic without inner IP */ search_non_ip_as_gft = 1; @@ -1334,24 +1369,17 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft); - qed_wr(p_hwfn, - p_ptt, - PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, - ram_line_lo); - qed_wr(p_hwfn, - p_ptt, - PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE, - ram_line_hi); + qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, + sizeof(ram_line) / REG_SIZE); /* Set default profile so that no filter match will happen */ - qed_wr(p_hwfn, - p_ptt, - PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * - PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff); - qed_wr(p_hwfn, - p_ptt, - PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * - PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff); + ram_line.lo = 0xffffffff; + ram_line.hi = 0x3ff; + qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * + PRS_GFT_CAM_LINES_NO_MATCH, + sizeof(ram_line) / REG_SIZE); /* Enable gft search */ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1); @@ -1544,3 +1572,144 @@ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]); } } + +#define PHYS_ADDR_DWORDS DIV_ROUND_UP(sizeof(dma_addr_t), 4) +#define OVERLAY_HDR_SIZE_DWORDS (sizeof(struct fw_overlay_buf_hdr) / 4) + +static u32 qed_get_overlay_addr_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id) +{ + switch (storm_id) { + case 0: + return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + TSTORM_OVERLAY_BUF_ADDR_OFFSET; + case 1: + return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + MSTORM_OVERLAY_BUF_ADDR_OFFSET; + case 2: + return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + USTORM_OVERLAY_BUF_ADDR_OFFSET; + case 3: + return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + XSTORM_OVERLAY_BUF_ADDR_OFFSET; + case 4: + return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + YSTORM_OVERLAY_BUF_ADDR_OFFSET; + case 5: + return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + PSTORM_OVERLAY_BUF_ADDR_OFFSET; + + default: + return 0; + } +} + +struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn, + const u32 * const + fw_overlay_in_buf, + u32 buf_size_in_bytes) +{ + u32 buf_size = buf_size_in_bytes / sizeof(u32), buf_offset = 0; + struct phys_mem_desc *allocated_mem; + + if (!buf_size) + return NULL; + + allocated_mem = kcalloc(NUM_STORMS, sizeof(struct phys_mem_desc), + GFP_KERNEL); + if (!allocated_mem) + return NULL; + + memset(allocated_mem, 0, NUM_STORMS * sizeof(struct phys_mem_desc)); + + /* For each Storm, set physical address in RAM */ + while (buf_offset < buf_size) { + struct phys_mem_desc *storm_mem_desc; + struct fw_overlay_buf_hdr *hdr; + u32 storm_buf_size; + u8 storm_id; + + hdr = + (struct fw_overlay_buf_hdr *)&fw_overlay_in_buf[buf_offset]; + storm_buf_size = GET_FIELD(hdr->data, + FW_OVERLAY_BUF_HDR_BUF_SIZE); + storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID); + storm_mem_desc = allocated_mem + storm_id; + storm_mem_desc->size = storm_buf_size * sizeof(u32); + + /* Allocate physical memory for Storm's overlays buffer */ + storm_mem_desc->virt_addr = + dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + storm_mem_desc->size, + &storm_mem_desc->phys_addr, GFP_KERNEL); + if (!storm_mem_desc->virt_addr) + break; + + /* Skip overlays buffer header */ + buf_offset += OVERLAY_HDR_SIZE_DWORDS; + + /* Copy Storm's overlays buffer to allocated memory */ + memcpy(storm_mem_desc->virt_addr, + &fw_overlay_in_buf[buf_offset], storm_mem_desc->size); + + /* Advance to next Storm */ + buf_offset += storm_buf_size; + } + + /* If memory allocation has failed, free all allocated memory */ + if (buf_offset < buf_size) { + qed_fw_overlay_mem_free(p_hwfn, allocated_mem); + return NULL; + } + + return allocated_mem; +} + +void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct phys_mem_desc *fw_overlay_mem) +{ + u8 storm_id; + + for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { + struct phys_mem_desc *storm_mem_desc = + (struct phys_mem_desc *)fw_overlay_mem + storm_id; + u32 ram_addr, i; + + /* Skip Storms with no FW overlays */ + if (!storm_mem_desc->virt_addr) + continue; + + /* Calculate overlay RAM GRC address of current PF */ + ram_addr = qed_get_overlay_addr_ram_addr(p_hwfn, storm_id) + + sizeof(dma_addr_t) * p_hwfn->rel_pf_id; + + /* Write Storm's overlay physical address to RAM */ + for (i = 0; i < PHYS_ADDR_DWORDS; i++, ram_addr += sizeof(u32)) + qed_wr(p_hwfn, p_ptt, ram_addr, + ((u32 *)&storm_mem_desc->phys_addr)[i]); + } +} + +void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, + struct phys_mem_desc *fw_overlay_mem) +{ + u8 storm_id; + + if (!fw_overlay_mem) + return; + + for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { + struct phys_mem_desc *storm_mem_desc = + (struct phys_mem_desc *)fw_overlay_mem + storm_id; + + /* Free Storm's physical memory */ + if (storm_mem_desc->virt_addr) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + storm_mem_desc->size, + storm_mem_desc->virt_addr, + storm_mem_desc->phys_addr); + } + + /* Free allocated virtual memory */ + kfree(fw_overlay_mem); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index a868d7f88601..5a6e4ac4fef4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -54,15 +54,15 @@ static u32 pxp_global_win[] = { 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */ 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */ 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */ - 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */ - 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */ - 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */ - 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */ - 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */ - 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */ - 0, - 0, - 0, + 0x1d02, /* win 6: addr=0x1d02000, size=4096 bytes */ + 0x1d80, /* win 7: addr=0x1d80000, size=4096 bytes */ + 0x1d81, /* win 8: addr=0x1d81000, size=4096 bytes */ + 0x1d82, /* win 9: addr=0x1d82000, size=4096 bytes */ + 0x1e00, /* win 10: addr=0x1e00000, size=4096 bytes */ + 0x1e01, /* win 11: addr=0x1e01000, size=4096 bytes */ + 0x1e80, /* win 12: addr=0x1e80000, size=4096 bytes */ + 0x1f00, /* win 13: addr=0x1f00000, size=4096 bytes */ + 0x1c08, /* win 14: addr=0x1c08000, size=4096 bytes */ 0, 0, 0, @@ -74,15 +74,6 @@ void qed_init_iro_array(struct qed_dev *cdev) cdev->iro_arr = iro_arr; } -/* Runtime configuration helpers */ -void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn) -{ - int i; - - for (i = 0; i < RUNTIME_ARRAY_SIZE; i++) - p_hwfn->rt_data.b_valid[i] = false; -} - void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val) { p_hwfn->rt_data.init_val[rt_offset] = val; @@ -106,7 +97,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn, { u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset]; bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset]; - u16 i, segment; + u16 i, j, segment; int rc = 0; /* Since not all RT entries are initialized, go over the RT and @@ -121,6 +112,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn, */ if (!b_must_dmae) { qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]); + p_valid[i] = false; continue; } @@ -135,6 +127,10 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn, if (rc) return rc; + /* invalidate after writing */ + for (j = i; j < i + segment; j++) + p_valid[j] = false; + /* Jump over the entire segment, including invalid entry */ i += segment; } @@ -215,7 +211,7 @@ static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn, * 3. p_hwfb->temp_data, * 4. fill_count */ - params.flags = QED_DMAE_FLAG_RW_REPL_SRC; + SET_FIELD(params.flags, QED_DMAE_PARAMS_RW_REPL_SRC, 0x1); return qed_dmae_host2grc(p_hwfn, p_ptt, (uintptr_t)(&zero_buffer[0]), addr, fill_count, ¶ms); @@ -490,10 +486,10 @@ static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn, int qed_init_run(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int phase, int phase_id, int modes) { + bool b_dmae = (phase != PHASE_ENGINE); struct qed_dev *cdev = p_hwfn->cdev; u32 cmd_num, num_init_ops; union init_op *init_ops; - bool b_dmae = false; int rc = 0; num_init_ops = cdev->fw_data->init_ops_size; @@ -522,7 +518,6 @@ int qed_init_run(struct qed_hwfn *p_hwfn, case INIT_OP_IF_PHASE: cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase, phase, phase_id); - b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE); break; case INIT_OP_DELAY: /* qed_init_run is always invoked from @@ -533,6 +528,9 @@ int qed_init_run(struct qed_hwfn *p_hwfn, case INIT_OP_CALLBACK: rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); + if (phase == PHASE_ENGINE && + cmd->callback.callback_id == DMAE_READY_CB) + b_dmae = true; break; } @@ -587,5 +585,10 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data) len = buf_hdr[BIN_BUF_INIT_CMD].length; fw->init_ops_size = len / sizeof(struct init_raw_op); + offset = buf_hdr[BIN_BUF_INIT_OVERLAYS].offset; + fw->fw_overlays = (u32 *)(data + offset); + len = buf_hdr[BIN_BUF_INIT_OVERLAYS].length; + fw->fw_overlays_len = len; + return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h index 555dd086796d..e9e8ade50ed3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h @@ -81,14 +81,6 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn); void qed_init_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_init_clear_rt_data - Clears the runtime init array. - * - * - * @param p_hwfn - */ -void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn); - -/** * @brief qed_init_store_rt_reg - Store a configuration value in the RT array. * * diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index d473b522afc5..9ad568d93ae6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -37,14 +37,14 @@ #include <linux/slab.h> #include "qed.h" -/* Fields of IGU PF CONFIGRATION REGISTER */ +/* Fields of IGU PF CONFIGURATION REGISTER */ #define IGU_PF_CONF_FUNC_EN (0x1 << 0) /* function enable */ #define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */ #define IGU_PF_CONF_INT_LINE_EN (0x1 << 2) /* INT enable */ #define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */ #define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */ #define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */ -/* Fields of IGU VF CONFIGRATION REGISTER */ +/* Fields of IGU VF CONFIGURATION REGISTER */ #define IGU_VF_CONF_FUNC_EN (0x1 << 0) /* function enable */ #define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */ #define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 5585c18053ec..7245a615517a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -204,17 +204,14 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, return -EINVAL; } - SET_FIELD(p_init->hdr.flags, - ISCSI_SLOW_PATH_HDR_LAYER_CODE, ISCSI_SLOW_PATH_LAYER_CODE); - p_init->hdr.op_code = ISCSI_RAMROD_CMD_ID_INIT_FUNC; - val = p_params->half_way_close_timeout; p_init->half_way_close_timeout = cpu_to_le16(val); p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring; p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring; p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring; - p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + - p_params->ll2_ooo_queue_id; + p_init->ll2_rx_queue_id = + p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] + + p_params->ll2_ooo_queue_id; p_init->func_params.log_page_size = p_params->log_page_size; val = p_params->num_tasks; @@ -331,12 +328,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, p_conn->physical_q1 = cpu_to_le16(physical_q); p_ramrod->iscsi.physical_q1 = cpu_to_le16(physical_q); - p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN; - SET_FIELD(p_ramrod->hdr.flags, ISCSI_SLOW_PATH_HDR_LAYER_CODE, - p_conn->layer_code); - p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); - p_ramrod->fw_cid = cpu_to_le32(p_conn->icid); DMA_REGPAIR_LE(p_ramrod->iscsi.sq_pbl_addr, p_conn->sq_pbl_addr); @@ -492,12 +484,8 @@ static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn, return rc; p_ramrod = &p_ent->ramrod.iscsi_conn_update; - p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_UPDATE_CONN; - SET_FIELD(p_ramrod->hdr.flags, - ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code); p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); - p_ramrod->fw_cid = cpu_to_le32(p_conn->icid); p_ramrod->flags = p_conn->update_flag; p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size); dval = p_conn->max_recv_pdu_length; @@ -537,12 +525,8 @@ qed_sp_iscsi_mac_update(struct qed_hwfn *p_hwfn, return rc; p_ramrod = &p_ent->ramrod.iscsi_conn_mac_update; - p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_MAC_UPDATE; - SET_FIELD(p_ramrod->hdr.flags, - ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code); p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); - p_ramrod->fw_cid = cpu_to_le32(p_conn->icid); ucval = p_conn->remote_mac[1]; ((u8 *)(&p_ramrod->remote_mac_addr_hi))[0] = ucval; ucval = p_conn->remote_mac[0]; @@ -583,12 +567,8 @@ static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn, return rc; p_ramrod = &p_ent->ramrod.iscsi_conn_terminate; - p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_TERMINATION_CONN; - SET_FIELD(p_ramrod->hdr.flags, - ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code); p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); - p_ramrod->fw_cid = cpu_to_le32(p_conn->icid); p_ramrod->abortive = p_conn->abortive_dsconnect; DMA_REGPAIR_LE(p_ramrod->query_params_addr, @@ -603,7 +583,6 @@ static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr) { - struct iscsi_slow_path_hdr *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; @@ -621,11 +600,6 @@ static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn, if (rc) return rc; - p_ramrod = &p_ent->ramrod.iscsi_empty; - p_ramrod->op_code = ISCSI_RAMROD_CMD_ID_CLEAR_SQ; - SET_FIELD(p_ramrod->flags, - ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code); - return qed_spq_post(p_hwfn, p_ent, NULL); } @@ -633,7 +607,6 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr) { - struct iscsi_spe_func_dstry *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = 0; @@ -651,9 +624,6 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn, if (rc) return rc; - p_ramrod = &p_ent->ramrod.iscsi_destroy; - p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_DESTROY_FUNC; - rc = qed_spq_post(p_hwfn, p_ent, NULL); qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ISCSI); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index f380fae8799d..d2fe61a5cf56 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -137,8 +137,8 @@ qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, struct iwarp_init_func_ramrod_data *p_ramrod) { p_ramrod->iwarp.ll2_ooo_q_index = - RESC_START(p_hwfn, QED_LL2_QUEUE) + - p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle; + RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) + + p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle; p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT; @@ -382,7 +382,7 @@ qed_iwarp2roce_state(enum qed_iwarp_qp_state state) } } -const static char *iwarp_state_names[] = { +static const char * const iwarp_state_names[] = { "IDLE", "RTS", "TERMINATE", @@ -2651,6 +2651,8 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, memset(&data, 0, sizeof(data)); data.input.conn_type = QED_LL2_TYPE_IWARP; + /* SYN will use ctx based queues */ + data.input.rx_conn_type = QED_LL2_RX_TYPE_CTX; data.input.mtu = params->max_mtu; data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; @@ -2683,6 +2685,8 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, /* Start OOO connection */ data.input.conn_type = QED_LL2_TYPE_OOO; + /* OOO/unaligned will use legacy ll2 queues (ram based) */ + data.input.rx_conn_type = QED_LL2_RX_TYPE_LEGACY; data.input.mtu = params->max_mtu; n_ooo_bufs = (QED_IWARP_MAX_OOO * rcv_wnd_size) / diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 9f36e7948222..1a5fc2ae351c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1631,10 +1631,9 @@ static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn, } } -static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_eth_stats *p_stats, - u16 statistics_bin) +static noinline_for_stack void +__qed_get_vport_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, u16 statistics_bin) { struct eth_pstorm_per_queue_stat pstats; u32 pstats_addr = 0, pstats_len = 0; @@ -1661,10 +1660,9 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, HILO_64_REGPAIR(pstats.error_drop_pkts); } -static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_eth_stats *p_stats, - u16 statistics_bin) +static noinline_for_stack void +__qed_get_vport_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, u16 statistics_bin) { struct tstorm_per_port_stat tstats; u32 tstats_addr, tstats_len; @@ -1709,10 +1707,9 @@ static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, } } -static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_eth_stats *p_stats, - u16 statistics_bin) +static noinline_for_stack +void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, u16 statistics_bin) { struct eth_ustorm_per_queue_stat ustats; u32 ustats_addr = 0, ustats_len = 0; @@ -1751,10 +1748,9 @@ static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, } } -static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_eth_stats *p_stats, - u16 statistics_bin) +static noinline_for_stack void +__qed_get_vport_mstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, u16 statistics_bin) { struct eth_mstorm_per_queue_stat mstats; u32 mstats_addr = 0, mstats_len = 0; @@ -1780,9 +1776,9 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); } -static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_eth_stats *p_stats) +static noinline_for_stack void +__qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats) { struct qed_eth_stats_common *p_common = &p_stats->common; struct port_stats port_stats; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 19a1a58d60f8..037e5978787e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -962,7 +962,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, return rc; p_ramrod = &p_ent->ramrod.core_rx_queue_start; - + memset(p_ramrod, 0, sizeof(*p_ramrod)); p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); p_ramrod->sb_index = p_rx->rx_sb_index; p_ramrod->complete_event_flg = 1; @@ -996,6 +996,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->action_on_error.error_type = action_on_error; p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable; + p_ramrod->zero_prod_flg = 1; + return qed_spq_post(p_hwfn, p_ent, NULL); } @@ -1317,6 +1319,25 @@ qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs) return 0; } +static void _qed_ll2_calc_allowed_conns(struct qed_hwfn *p_hwfn, + struct qed_ll2_acquire_data *data, + u8 *start_idx, u8 *last_idx) +{ + /* LL2 queues handles will be split as follows: + * First will be the legacy queues, and then the ctx based. + */ + if (data->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) { + *start_idx = QED_LL2_LEGACY_CONN_BASE_PF; + *last_idx = *start_idx + + QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF; + } else { + /* QED_LL2_RX_TYPE_CTX */ + *start_idx = QED_LL2_CTX_CONN_BASE_PF; + *last_idx = *start_idx + + QED_MAX_NUM_OF_CTX_LL2_CONNS_PF; + } +} + static enum core_error_handle qed_ll2_get_error_choice(enum qed_ll2_error_handle err) { @@ -1337,14 +1358,16 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data) struct qed_hwfn *p_hwfn = cxt; qed_int_comp_cb_t comp_rx_cb, comp_tx_cb; struct qed_ll2_info *p_ll2_info = NULL; - u8 i, *p_tx_max; + u8 i, first_idx, last_idx, *p_tx_max; int rc; if (!data->p_connection_handle || !p_hwfn->p_ll2_info) return -EINVAL; + _qed_ll2_calc_allowed_conns(p_hwfn, data, &first_idx, &last_idx); + /* Find a free connection to be used */ - for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) { + for (i = first_idx; i < last_idx; i++) { mutex_lock(&p_hwfn->p_ll2_info[i].mutex); if (p_hwfn->p_ll2_info[i].b_active) { mutex_unlock(&p_hwfn->p_ll2_info[i].mutex); @@ -1448,6 +1471,7 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn, enum qed_ll2_error_handle error_input; enum core_error_handle error_mode; u8 action_on_error = 0; + int rc; if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) return 0; @@ -1461,7 +1485,18 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn, error_mode = qed_ll2_get_error_choice(error_input); SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode); - return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error); + rc = qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error); + if (rc) + return rc; + + if (p_ll2_conn->rx_queue.ctx_based) { + rc = qed_db_recovery_add(p_hwfn->cdev, + p_ll2_conn->rx_queue.set_prod_addr, + &p_ll2_conn->rx_queue.db_data, + DB_REC_WIDTH_64B, DB_REC_KERNEL); + } + + return rc; } static void @@ -1475,13 +1510,41 @@ qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn); } +static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn, + u8 handle, + u8 ll2_queue_type) +{ + u8 qid; + + if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY) + return p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] + handle; + + /* QED_LL2_RX_TYPE_CTX + * FW distinguishes between the legacy queues (ram based) and the + * ctx based queues by the queue_id. + * The first MAX_NUM_LL2_RX_RAM_QUEUES queues are legacy + * and the queue ids above that are ctx base. + */ + qid = p_hwfn->hw_info.resc_start[QED_LL2_CTX_QUEUE] + + MAX_NUM_LL2_RX_RAM_QUEUES; + + /* See comment on the acquire connection for how the ll2 + * queues handles are divided. + */ + qid += (handle - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF); + + return qid; +} + int qed_ll2_establish_connection(void *cxt, u8 connection_handle) { - struct qed_hwfn *p_hwfn = cxt; - struct qed_ll2_info *p_ll2_conn; + struct e4_core_conn_context *p_cxt; struct qed_ll2_tx_packet *p_pkt; + struct qed_ll2_info *p_ll2_conn; + struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_rx_queue *p_rx; struct qed_ll2_tx_queue *p_tx; + struct qed_cxt_info cxt_info; struct qed_ptt *p_ptt; int rc = -EINVAL; u32 i, capacity; @@ -1539,13 +1602,46 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid); if (rc) goto out; + cxt_info.iid = p_ll2_conn->cid; + rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); + if (rc) { + DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n", + p_ll2_conn->cid); + goto out; + } + + p_cxt = cxt_info.p_cxt; + + memset(p_cxt, 0, sizeof(*p_cxt)); - qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle; + qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle, + p_ll2_conn->input.rx_conn_type); p_ll2_conn->queue_id = qid; p_ll2_conn->tx_stats_id = qid; - p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview + - GTT_BAR0_MAP_REG_TSDM_RAM + - TSTORM_LL2_RX_PRODS_OFFSET(qid); + + DP_VERBOSE(p_hwfn, QED_MSG_LL2, + "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d\n", + p_hwfn->rel_pf_id, p_ll2_conn->input.rx_conn_type, qid); + + if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) { + p_rx->set_prod_addr = p_hwfn->regview + + GTT_BAR0_MAP_REG_TSDM_RAM + TSTORM_LL2_RX_PRODS_OFFSET(qid); + } else { + /* QED_LL2_RX_TYPE_CTX - using doorbell */ + p_rx->ctx_based = 1; + + p_rx->set_prod_addr = p_hwfn->doorbells + + p_hwfn->dpi_start_offset + + DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE); + + /* prepare db data */ + p_rx->db_data.icid = cpu_to_le16((u16)p_ll2_conn->cid); + SET_FIELD(p_rx->db_data.params, + CORE_PWM_PROD_UPDATE_DATA_AGG_CMD, DB_AGG_CMD_SET); + SET_FIELD(p_rx->db_data.params, + CORE_PWM_PROD_UPDATE_DATA_RESERVED1, 0); + } + p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells + qed_db_addr(p_ll2_conn->cid, DQ_DEMS_LEGACY); @@ -1556,7 +1652,6 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) DQ_XCM_CORE_TX_BD_PROD_CMD); p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; - rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn); if (rc) goto out; @@ -1590,7 +1685,7 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn, struct qed_ll2_rx_packet *p_curp) { struct qed_ll2_rx_packet *p_posting_packet = NULL; - struct core_ll2_rx_prod rx_prod = { 0, 0, 0 }; + struct core_ll2_rx_prod rx_prod = { 0, 0 }; bool b_notify_fw = false; u16 bd_prod, cq_prod; @@ -1615,13 +1710,27 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn, bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain); cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain); - rx_prod.bd_prod = cpu_to_le16(bd_prod); - rx_prod.cqe_prod = cpu_to_le16(cq_prod); + if (p_rx->ctx_based) { + /* update producer by giving a doorbell */ + p_rx->db_data.prod.bd_prod = cpu_to_le16(bd_prod); + p_rx->db_data.prod.cqe_prod = cpu_to_le16(cq_prod); + /* Make sure chain element is updated before ringing the + * doorbell + */ + dma_wmb(); + DIRECT_REG_WR64(p_rx->set_prod_addr, + *((u64 *)&p_rx->db_data)); + } else { + rx_prod.bd_prod = cpu_to_le16(bd_prod); + rx_prod.cqe_prod = cpu_to_le16(cq_prod); - /* Make sure chain element is updated before ringing the doorbell */ - dma_wmb(); + /* Make sure chain element is updated before ringing the + * doorbell + */ + dma_wmb(); - DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod)); + DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod)); + } } int qed_ll2_post_rx_buffer(void *cxt, @@ -1965,6 +2074,12 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { p_ll2_conn->rx_queue.b_cb_registered = false; smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ + + if (p_ll2_conn->rx_queue.ctx_based) + qed_db_recovery_del(p_hwfn->cdev, + p_ll2_conn->rx_queue.set_prod_addr, + &p_ll2_conn->rx_queue.db_data); + rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn); if (rc) goto out; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 5f01fbd3c073..288642d526b7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -46,6 +46,18 @@ #include "qed_sp.h" #define QED_MAX_NUM_OF_LL2_CONNECTIONS (4) +/* LL2 queues handles will be split as follows: + * first will be legacy queues, and then the ctx based queues. + */ +#define QED_MAX_NUM_OF_LL2_CONNS_PF (4) +#define QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF (3) + +#define QED_MAX_NUM_OF_CTX_LL2_CONNS_PF \ + (QED_MAX_NUM_OF_LL2_CONNS_PF - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF) + +#define QED_LL2_LEGACY_CONN_BASE_PF 0 +#define QED_LL2_CTX_CONN_BASE_PF QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF + struct qed_ll2_rx_packet { struct list_head list_entry; @@ -79,6 +91,7 @@ struct qed_ll2_rx_queue { struct qed_chain rxq_chain; struct qed_chain rcq_chain; u8 rx_sb_index; + u8 ctx_based; bool b_cb_registered; __le16 *p_fw_cons; struct list_head active_descq; @@ -86,6 +99,7 @@ struct qed_ll2_rx_queue { struct list_head posting_descq; struct qed_ll2_rx_packet *descq_array; void __iomem *set_prod_addr; + struct core_pwm_prod_update_data db_data; }; struct qed_ll2_tx_queue { diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 829dd60ab937..2c189c637cca 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -67,6 +67,9 @@ #define QED_ROCE_QPS (8192) #define QED_ROCE_DPIS (8) #define QED_RDMA_SRQS QED_ROCE_QPS +#define QED_NVM_CFG_GET_FLAGS 0xA +#define QED_NVM_CFG_GET_PF_FLAGS 0x1A +#define QED_NVM_CFG_MAX_ATTRS 50 static char version[] = "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; @@ -1325,7 +1328,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, &drv_version); if (rc) { DP_NOTICE(cdev, "Failed sending drv version command\n"); - return rc; + goto err4; } } @@ -1333,6 +1336,8 @@ static int qed_slowpath_start(struct qed_dev *cdev, return 0; +err4: + qed_ll2_dealloc_if(cdev); err3: qed_hw_stop(cdev); err2: @@ -1688,6 +1693,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn, switch (media_type) { case MEDIA_DA_TWINAX: + *if_capability |= QED_LM_FIBRE_BIT; if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) *if_capability |= QED_LM_20000baseKR2_Full_BIT; /* For DAC media multiple speed capabilities are supported*/ @@ -1707,6 +1713,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn, *if_capability |= QED_LM_100000baseCR4_Full_BIT; break; case MEDIA_BASE_T: + *if_capability |= QED_LM_TP_BIT; if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) { if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) { @@ -1718,6 +1725,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn, } } if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) { + *if_capability |= QED_LM_FIBRE_BIT; if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET) *if_capability |= QED_LM_1000baseT_Full_BIT; if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET) @@ -1728,6 +1736,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn, case MEDIA_SFPP_10G_FIBER: case MEDIA_XFP_FIBER: case MEDIA_MODULE_FIBER: + *if_capability |= QED_LM_FIBRE_BIT; if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) { if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) || @@ -1770,6 +1779,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn, break; case MEDIA_KR: + *if_capability |= QED_LM_Backplane_BIT; if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) *if_capability |= QED_LM_20000baseKR2_Full_BIT; if (capability & @@ -1821,7 +1831,6 @@ static void qed_fill_link(struct qed_hwfn *hwfn, if_link->link_up = true; /* TODO - at the moment assume supported and advertised speed equal */ - if_link->supported_caps = QED_LM_FIBRE_BIT; if (link_caps.default_speed_autoneg) if_link->supported_caps |= QED_LM_Autoneg_BIT; if (params.pause.autoneg || @@ -2227,6 +2236,135 @@ static int qed_nvm_flash_image_validate(struct qed_dev *cdev, return 0; } +/* Binary file format - + * /----------------------------------------------------------------------\ + * 0B | 0x5 [command index] | + * 4B | Number of config attributes | Reserved | + * 4B | Config ID | Entity ID | Length | + * 4B | Value | + * | | + * \----------------------------------------------------------------------/ + * There can be several cfg_id-entity_id-Length-Value sets as specified by + * 'Number of config attributes'. + * + * The API parses config attributes from the user provided buffer and flashes + * them to the respective NVM path using Management FW inerface. + */ +static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + u8 entity_id, len, buf[32]; + bool need_nvm_init = true; + struct qed_ptt *ptt; + u16 cfg_id, count; + int rc = 0, i; + u32 flags; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + /* NVM CFG ID attribute header */ + *data += 4; + count = *((u16 *)*data); + *data += 4; + + DP_VERBOSE(cdev, NETIF_MSG_DRV, + "Read config ids: num_attrs = %0d\n", count); + /* NVM CFG ID attributes. Start loop index from 1 to avoid additional + * arithmetic operations in the implementation. + */ + for (i = 1; i <= count; i++) { + cfg_id = *((u16 *)*data); + *data += 2; + entity_id = **data; + (*data)++; + len = **data; + (*data)++; + memcpy(buf, *data, len); + *data += len; + + flags = 0; + if (need_nvm_init) { + flags |= QED_NVM_CFG_OPTION_INIT; + need_nvm_init = false; + } + + /* Commit to flash and free the resources */ + if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) { + flags |= QED_NVM_CFG_OPTION_COMMIT | + QED_NVM_CFG_OPTION_FREE; + need_nvm_init = true; + } + + if (entity_id) + flags |= QED_NVM_CFG_OPTION_ENTITY_SEL; + + DP_VERBOSE(cdev, NETIF_MSG_DRV, + "cfg_id = %d entity = %d len = %d\n", cfg_id, + entity_id, len); + rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags, + buf, len); + if (rc) { + DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id); + break; + } + } + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +#define QED_MAX_NVM_BUF_LEN 32 +static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + u8 buf[QED_MAX_NVM_BUF_LEN]; + struct qed_ptt *ptt; + u32 len; + int rc; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return QED_MAX_NVM_BUF_LEN; + + rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf, + &len); + if (rc || !len) { + DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); + len = QED_MAX_NVM_BUF_LEN; + } + + qed_ptt_release(hwfn, ptt); + + return len; +} + +static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data, + u32 cmd, u32 entity_id) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + u32 flags, len; + int rc = 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + DP_VERBOSE(cdev, NETIF_MSG_DRV, + "Read config cmd = %d entity id %d\n", cmd, entity_id); + flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS; + rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len); + if (rc) + DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + static int qed_nvm_flash(struct qed_dev *cdev, const char *name) { const struct firmware *image; @@ -2268,6 +2406,9 @@ static int qed_nvm_flash(struct qed_dev *cdev, const char *name) rc = qed_nvm_flash_image_access(cdev, &data, &check_resp); break; + case QED_NVM_FLASH_CMD_NVM_CFG_ID: + rc = qed_nvm_flash_cfg_write(cdev, &data); + break; default: DP_ERR(cdev, "Unknown command %08x\n", cmd_type); rc = -EINVAL; @@ -2483,6 +2624,26 @@ static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, return rc; } +static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int rc = 0; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + rc = qed_dbg_grc_config(hwfn, cfg_id, val); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev) { return QED_AFFIN_HWFN_IDX(cdev); @@ -2536,6 +2697,9 @@ const struct qed_common_ops qed_common_ops_pass = { .db_recovery_del = &qed_db_recovery_del, .read_module_eeprom = &qed_read_module_eeprom, .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx, + .read_nvm_cfg = &qed_nvm_flash_cfg_read, + .read_nvm_cfg_len = &qed_nvm_flash_cfg_len, + .set_grc_config = &qed_set_grc_config, }; void qed_get_protocol_stats(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 758702c1ce9c..280527cc0578 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -48,6 +48,8 @@ #include "qed_reg_addr.h" #include "qed_sriov.h" +#define GRCBASE_MCP 0xe00000 + #define QED_MCP_RESP_ITER_US 10 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ @@ -3165,6 +3167,9 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, case QED_NVM_IMAGE_FCOE_CFG: type = NVM_TYPE_FCOE_CFG; break; + case QED_NVM_IMAGE_MDUMP: + type = NVM_TYPE_MDUMP; + break; case QED_NVM_IMAGE_NVM_CFG1: type = NVM_TYPE_NVM_CFG1; break; @@ -3261,9 +3266,12 @@ static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id) case QED_ILT: mfw_res_id = RESOURCE_ILT_E; break; - case QED_LL2_QUEUE: + case QED_LL2_RAM_QUEUE: mfw_res_id = RESOURCE_LL2_QUEUE_E; break; + case QED_LL2_CTX_QUEUE: + mfw_res_id = RESOURCE_LL2_CQS_E; + break; case QED_RDMA_CNQ_RAM: case QED_CMDQS_CQS: /* CNQ/CMDQS are the same resource */ @@ -3750,3 +3758,64 @@ int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return 0; } + +int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, + u32 *p_len) +{ + u32 mb_param = 0, resp, param; + int rc; + + QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id); + if (flags & QED_NVM_CFG_OPTION_INIT) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1); + if (flags & QED_NVM_CFG_OPTION_FREE) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1); + if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) { + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1); + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID, + entity_id); + } + + rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_GET_NVM_CFG_OPTION, + mb_param, &resp, ¶m, p_len, (u32 *)p_buf); + + return rc; +} + +int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, + u32 len) +{ + u32 mb_param = 0, resp, param; + + QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id); + if (flags & QED_NVM_CFG_OPTION_ALL) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_ALL, 1); + if (flags & QED_NVM_CFG_OPTION_INIT) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1); + if (flags & QED_NVM_CFG_OPTION_COMMIT) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT, 1); + if (flags & QED_NVM_CFG_OPTION_FREE) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1); + if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) { + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1); + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID, + entity_id); + } + + return qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_SET_NVM_CFG_OPTION, + mb_param, &resp, ¶m, len, (u32 *)p_buf); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index e4f8fe4bd062..9c4c2763de8d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -251,6 +251,12 @@ union qed_mfw_tlv_data { struct qed_mfw_tlv_iscsi iscsi; }; +#define QED_NVM_CFG_OPTION_ALL BIT(0) +#define QED_NVM_CFG_OPTION_INIT BIT(1) +#define QED_NVM_CFG_OPTION_COMMIT BIT(2) +#define QED_NVM_CFG_OPTION_FREE BIT(3) +#define QED_NVM_CFG_OPTION_ENTITY_SEL BIT(4) + /** * @brief - returns the link params of the hw function * @@ -1202,4 +1208,33 @@ int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); */ int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +/** + * @brief Get NVM config attribute value. + * + * @param p_hwfn + * @param p_ptt + * @param option_id + * @param entity_id + * @param flags + * @param p_buf + * @param p_len + */ +int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, + u32 *p_len); + +/** + * @brief Set NVM config attribute value. + * + * @param p_hwfn + * @param p_ptt + * @param option_id + * @param entity_id + * @param flags + * @param p_buf + * @param len + */ +int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, + u32 len); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c index 0dacf2c18c09..3e613058e225 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c @@ -44,8 +44,8 @@ /* Add/subtract the Adjustment_Value when making a Drift adjustment */ #define QED_DRIFT_CNTR_DIRECTION_SHIFT 31 #define QED_TIMESTAMP_MASK BIT(16) -/* Param mask for Hardware to detect/timestamp the unicast PTP packets */ -#define QED_PTP_UCAST_PARAM_MASK 0xF +/* Param mask for Hardware to detect/timestamp the L2/L4 unicast PTP packets */ +#define QED_PTP_UCAST_PARAM_MASK 0x70F static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 158ac0738911..38b1f402f7ed 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -798,9 +798,8 @@ static int qed_rdma_add_user(void *rdma_cxt, /* Calculate the corresponding DPI address */ dpi_start_offset = p_hwfn->dpi_start_offset; - out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells + - dpi_start_offset + - ((out_params->dpi) * p_hwfn->dpi_size)); + out_params->dpi_addr = p_hwfn->doorbells + dpi_start_offset + + out_params->dpi * p_hwfn->dpi_size; out_params->dpi_phys_addr = p_hwfn->db_phys_addr + dpi_start_offset + diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 60f850c3bdd6..3dcb6ff58e73 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -178,6 +178,8 @@ 0x008c80UL #define MCP_REG_SCRATCH \ 0xe20000UL +#define MCP_REG_SCRATCH_SIZE \ + 57344 #define CNIG_REG_NW_PORT_MODE_BB \ 0x218200UL #define MISCS_REG_CHIP_NUM \ @@ -212,6 +214,8 @@ 0x580900UL #define DBG_REG_CLIENT_ENABLE \ 0x010004UL +#define DBG_REG_TIMESTAMP_VALID_EN \ + 0x010b58UL #define DMAE_REG_INIT \ 0x00c000UL #define DORQ_REG_IFEN \ @@ -350,6 +354,10 @@ 0x24000cUL #define PSWRQ2_REG_ILT_MEMORY \ 0x260000UL +#define PSWRQ2_REG_ILT_MEMORY_SIZE_BB \ + 15200 +#define PSWRQ2_REG_ILT_MEMORY_SIZE_K2 \ + 22000 #define PSWHST_REG_DISCARD_INTERNAL_WRITES \ 0x2a0040UL #define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \ @@ -1453,6 +1461,8 @@ 0x1401404UL #define XSEM_REG_DBG_FRAME_MODE_BB_K2 \ 0x1401408UL +#define XSEM_REG_DBG_GPRE_VECT \ + 0x1401410UL #define XSEM_REG_DBG_MODE1_CFG_BB_K2 \ 0x1401420UL #define XSEM_REG_FAST_MEMORY \ @@ -1465,6 +1475,8 @@ 0x1501404UL #define YSEM_REG_DBG_FRAME_MODE_BB_K2 \ 0x1501408UL +#define YSEM_REG_DBG_GPRE_VECT \ + 0x1501410UL #define YSEM_REG_DBG_MODE1_CFG_BB_K2 \ 0x1501420UL #define YSEM_REG_FAST_MEMORY \ @@ -1479,6 +1491,8 @@ 0x1601404UL #define PSEM_REG_DBG_FRAME_MODE_BB_K2 \ 0x1601408UL +#define PSEM_REG_DBG_GPRE_VECT \ + 0x1601410UL #define PSEM_REG_DBG_MODE1_CFG_BB_K2 \ 0x1601420UL #define PSEM_REG_FAST_MEMORY \ @@ -1493,6 +1507,8 @@ 0x1701404UL #define TSEM_REG_DBG_FRAME_MODE_BB_K2 \ 0x1701408UL +#define TSEM_REG_DBG_GPRE_VECT \ + 0x1701410UL #define TSEM_REG_DBG_MODE1_CFG_BB_K2 \ 0x1701420UL #define TSEM_REG_FAST_MEMORY \ @@ -1507,12 +1523,16 @@ 0x1801404UL #define MSEM_REG_DBG_FRAME_MODE_BB_K2 \ 0x1801408UL +#define MSEM_REG_DBG_GPRE_VECT \ + 0x1801410UL #define MSEM_REG_DBG_MODE1_CFG_BB_K2 \ 0x1801420UL #define MSEM_REG_FAST_MEMORY \ 0x1840000UL #define USEM_REG_SLOW_DBG_EMPTY_BB_K2 \ 0x1901140UL +#define SEM_FAST_REG_INT_RAM_SIZE \ + 20480 #define USEM_REG_SYNC_DBG_EMPTY \ 0x1901160UL #define USEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ @@ -1521,14 +1541,26 @@ 0x1901404UL #define USEM_REG_DBG_FRAME_MODE_BB_K2 \ 0x1901408UL +#define USEM_REG_DBG_GPRE_VECT \ + 0x1901410UL #define USEM_REG_DBG_MODE1_CFG_BB_K2 \ 0x1901420UL #define USEM_REG_FAST_MEMORY \ 0x1940000UL +#define SEM_FAST_REG_DBG_MODE23_SRC_DISABLE \ + 0x000748UL +#define SEM_FAST_REG_DBG_MODE4_SRC_DISABLE \ + 0x00074cUL +#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE \ + 0x000750UL +#define SEM_FAST_REG_DEBUG_ACTIVE \ + 0x000740UL #define SEM_FAST_REG_INT_RAM \ 0x020000UL #define SEM_FAST_REG_INT_RAM_SIZE_BB_K2 \ 20480 +#define SEM_FAST_REG_RECORD_FILTER_ENABLE \ + 0x000768UL #define GRC_REG_TRACE_FIFO_VALID_DATA \ 0x050064UL #define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW \ @@ -1583,14 +1615,20 @@ 0x181530UL #define DBG_REG_DBG_BLOCK_ON \ 0x010454UL +#define DBG_REG_FILTER_ENABLE \ + 0x0109d0UL #define DBG_REG_FRAMING_MODE \ 0x010058UL +#define DBG_REG_TRIGGER_ENABLE \ + 0x01054cUL #define SEM_FAST_REG_VFC_DATA_WR \ 0x000b40UL #define SEM_FAST_REG_VFC_ADDR \ 0x000b44UL #define SEM_FAST_REG_VFC_DATA_RD \ 0x000b48UL +#define SEM_FAST_REG_VFC_STATUS \ + 0x000b4cUL #define RSS_REG_RSS_RAM_DATA \ 0x238c20UL #define RSS_REG_RSS_RAM_DATA_SIZE \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index e49fada85410..37e70562a964 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -900,7 +900,7 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn, goto err_resp; out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn); - rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag), + rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->flags), ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG); dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res), diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 96ab77ae6af5..b7b4fbbbccfe 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -120,9 +120,7 @@ union ramrod_data { struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate; struct fcoe_stat_ramrod_params fcoe_stat; - struct iscsi_slow_path_hdr iscsi_empty; struct iscsi_init_ramrod_params iscsi_init; - struct iscsi_spe_func_dstry iscsi_destroy; struct iscsi_spe_conn_offload iscsi_conn_offload; struct iscsi_conn_update_ramrod_params iscsi_conn_update; struct iscsi_spe_conn_mac_update iscsi_conn_mac_update; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 7e0b795230b2..900bc603e30a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -331,8 +331,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, u8 sb_index = p_hwfn->p_eq->eq_sb_index; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; - int rc = -EINVAL; u8 page_cnt, i; + int rc; /* update initial eq producer */ qed_eq_prod_update(p_hwfn, @@ -447,7 +447,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn) { struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; - int rc = -EINVAL; + int rc; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); @@ -471,7 +471,7 @@ int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn) { struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; - int rc = -EOPNOTSUPP; + int rc; if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_UNKNOWN) { DP_INFO(p_hwfn, "Invalid priority type %d\n", @@ -509,7 +509,7 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, { struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; - int rc = -EINVAL; + int rc; if (IS_VF(p_hwfn->cdev)) return qed_vf_pf_tunnel_param_update(p_hwfn, p_tunn); @@ -546,7 +546,7 @@ int qed_sp_pf_stop(struct qed_hwfn *p_hwfn) { struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; - int rc = -EINVAL; + int rc; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 78f77b712b10..66876af814c4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -352,7 +352,7 @@ static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, /* propagate bulletin board via dmae to vm memory */ memset(¶ms, 0, sizeof(params)); - params.flags = QED_DMAE_FLAG_VF_DST; + SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1); params.dst_vfid = p_vf->abs_vf_id; return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys, p_vf->vf_bulletin, p_vf->bulletin.size / 4, @@ -1225,8 +1225,8 @@ static void qed_iov_send_response(struct qed_hwfn *p_hwfn, eng_vf_id = p_vf->abs_vf_id; - memset(¶ms, 0, sizeof(struct qed_dmae_params)); - params.flags = QED_DMAE_FLAG_VF_DST; + memset(¶ms, 0, sizeof(params)); + SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1); params.dst_vfid = eng_vf_id; qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64), @@ -2005,7 +2005,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, (qed_iov_validate_active_txq(p_hwfn, vf))) { vf->b_malicious = true; DP_NOTICE(p_hwfn, - "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n", + "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n", vf->abs_vf_id); status = PFVF_STATUS_MALICIOUS; goto out; @@ -4103,8 +4103,9 @@ static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt, if (!vf_info) return -EINVAL; - memset(¶ms, 0, sizeof(struct qed_dmae_params)); - params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST; + memset(¶ms, 0, sizeof(params)); + SET_FIELD(params.flags, QED_DMAE_PARAMS_SRC_VF_VALID, 0x1); + SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 0x1); params.src_vfid = vf_info->abs_vf_id; if (qed_dmae_host2host(p_hwfn, ptt, @@ -4354,9 +4355,9 @@ qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id) static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int vfid, int val) { - struct qed_mcp_link_state *p_link; struct qed_vf_info *vf; u8 abs_vp_id = 0; + u16 rl_id; int rc; vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); @@ -4367,10 +4368,8 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, if (rc) return rc; - p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output; - - return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val, - p_link->speed); + rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */ + return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val); } static int diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 9a8fd79611f2..368e88565783 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -305,7 +305,7 @@ void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, /** * @brief Read sriov related information and allocated resources - * reads from configuraiton space, shmem, etc. + * reads from configuration space, shmem, etc. * * @param p_hwfn * diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 5dda547772c1..856051f50eb7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -231,7 +231,7 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, { DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n", + "PF unwilling to fulfill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n", p_req->num_rxqs, p_resp->num_rxqs, p_req->num_rxqs, diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 0e931c04fecf..e8a1b27db84d 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -177,6 +177,20 @@ enum qede_flags_bit { QEDE_FLAGS_TX_TIMESTAMPING_EN }; +#define QEDE_DUMP_MAX_ARGS 4 +enum qede_dump_cmd { + QEDE_DUMP_CMD_NONE = 0, + QEDE_DUMP_CMD_NVM_CFG, + QEDE_DUMP_CMD_GRCDUMP, + QEDE_DUMP_CMD_MAX +}; + +struct qede_dump_info { + enum qede_dump_cmd cmd; + u8 num_args; + u32 args[QEDE_DUMP_MAX_ARGS]; +}; + struct qede_dev { struct qed_dev *cdev; struct net_device *ndev; @@ -262,6 +276,7 @@ struct qede_dev { struct qede_rdma_dev rdma_info; struct bpf_prog *xdp_prog; + struct qede_dump_info dump_info; }; enum QEDE_STATE { @@ -449,7 +464,7 @@ struct qede_fastpath { struct qede_tx_queue *txq; struct qede_tx_queue *xdp_tx; -#define VEC_NAME_SIZE (FIELD_SIZEOF(struct net_device, name) + 8) +#define VEC_NAME_SIZE (sizeof_field(struct net_device, name) + 8) char name[VEC_NAME_SIZE]; }; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index e85f9fef930c..8a426afb6a55 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -48,6 +48,8 @@ {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)} #define QEDE_SELFTEST_POLL_COUNT 100 +#define QEDE_DUMP_VERSION 0x1 +#define QEDE_DUMP_NVM_ARG_COUNT 2 static const struct { u64 offset; @@ -424,12 +426,13 @@ struct qede_link_mode_mapping { }; static const struct qede_link_mode_mapping qed_lm_map[] = { + {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT}, {QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT}, {QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT}, {QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT}, {QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, {QED_LM_10000baseT_Full_BIT, ETHTOOL_LINK_MODE_10000baseT_Full_BIT}, - {QED_LM_2500baseX_Full_BIT, ETHTOOL_LINK_MODE_2500baseX_Full_BIT}, + {QED_LM_TP_BIT, ETHTOOL_LINK_MODE_TP_BIT}, {QED_LM_Backplane_BIT, ETHTOOL_LINK_MODE_Backplane_BIT}, {QED_LM_1000baseKX_Full_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT}, {QED_LM_10000baseKX4_Full_BIT, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT}, @@ -1972,6 +1975,117 @@ static int qede_get_module_eeprom(struct net_device *dev, return rc; } +static int qede_set_dump(struct net_device *dev, struct ethtool_dump *val) +{ + struct qede_dev *edev = netdev_priv(dev); + int rc = 0; + + if (edev->dump_info.cmd == QEDE_DUMP_CMD_NONE) { + if (val->flag > QEDE_DUMP_CMD_MAX) { + DP_ERR(edev, "Invalid command %d\n", val->flag); + return -EINVAL; + } + edev->dump_info.cmd = val->flag; + edev->dump_info.num_args = 0; + return 0; + } + + if (edev->dump_info.num_args == QEDE_DUMP_MAX_ARGS) { + DP_ERR(edev, "Arg count = %d\n", edev->dump_info.num_args); + return -EINVAL; + } + + switch (edev->dump_info.cmd) { + case QEDE_DUMP_CMD_NVM_CFG: + edev->dump_info.args[edev->dump_info.num_args] = val->flag; + edev->dump_info.num_args++; + break; + case QEDE_DUMP_CMD_GRCDUMP: + rc = edev->ops->common->set_grc_config(edev->cdev, + val->flag, 1); + break; + default: + break; + } + + return rc; +} + +static int qede_get_dump_flag(struct net_device *dev, + struct ethtool_dump *dump) +{ + struct qede_dev *edev = netdev_priv(dev); + + if (!edev->ops || !edev->ops->common) { + DP_ERR(edev, "Edev ops not populated\n"); + return -EINVAL; + } + + dump->version = QEDE_DUMP_VERSION; + switch (edev->dump_info.cmd) { + case QEDE_DUMP_CMD_NVM_CFG: + dump->flag = QEDE_DUMP_CMD_NVM_CFG; + dump->len = edev->ops->common->read_nvm_cfg_len(edev->cdev, + edev->dump_info.args[0]); + break; + case QEDE_DUMP_CMD_GRCDUMP: + dump->flag = QEDE_DUMP_CMD_GRCDUMP; + dump->len = edev->ops->common->dbg_all_data_size(edev->cdev); + break; + default: + DP_ERR(edev, "Invalid cmd = %d\n", edev->dump_info.cmd); + return -EINVAL; + } + + DP_VERBOSE(edev, QED_MSG_DEBUG, + "dump->version = 0x%x dump->flag = %d dump->len = %d\n", + dump->version, dump->flag, dump->len); + return 0; +} + +static int qede_get_dump_data(struct net_device *dev, + struct ethtool_dump *dump, void *buf) +{ + struct qede_dev *edev = netdev_priv(dev); + int rc = 0; + + if (!edev->ops || !edev->ops->common) { + DP_ERR(edev, "Edev ops not populated\n"); + rc = -EINVAL; + goto err; + } + + switch (edev->dump_info.cmd) { + case QEDE_DUMP_CMD_NVM_CFG: + if (edev->dump_info.num_args != QEDE_DUMP_NVM_ARG_COUNT) { + DP_ERR(edev, "Arg count = %d required = %d\n", + edev->dump_info.num_args, + QEDE_DUMP_NVM_ARG_COUNT); + rc = -EINVAL; + goto err; + } + rc = edev->ops->common->read_nvm_cfg(edev->cdev, (u8 **)&buf, + edev->dump_info.args[0], + edev->dump_info.args[1]); + break; + case QEDE_DUMP_CMD_GRCDUMP: + memset(buf, 0, dump->len); + rc = edev->ops->common->dbg_all_data(edev->cdev, buf); + break; + default: + DP_ERR(edev, "Invalid cmd = %d\n", edev->dump_info.cmd); + rc = -EINVAL; + break; + } + +err: + edev->dump_info.cmd = QEDE_DUMP_CMD_NONE; + edev->dump_info.num_args = 0; + memset(edev->dump_info.args, 0, sizeof(edev->dump_info.args)); + + return rc; +} + static const struct ethtool_ops qede_ethtool_ops = { .get_link_ksettings = qede_get_link_ksettings, .set_link_ksettings = qede_set_link_ksettings, @@ -2013,6 +2127,9 @@ static const struct ethtool_ops qede_ethtool_ops = { .get_tunable = qede_get_tunable, .set_tunable = qede_set_tunable, .flash_device = qede_flash_device, + .get_dump_flag = qede_get_dump_flag, + .get_dump_data = qede_get_dump_data, + .set_dump = qede_set_dump, }; static const struct ethtool_ops qede_vf_ethtool_ops = { diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 9a6a9a008714..d1ce4531d01a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1230,7 +1230,7 @@ qede_configure_mcast_filtering(struct net_device *ndev, netif_addr_lock_bh(ndev); mc_count = netdev_mc_count(ndev); - if (mc_count < 64) { + if (mc_count <= 64) { netdev_for_each_mc_addr(ha, ndev) { ether_addr_copy(temp, ha->addr); temp += ETH_ALEN; @@ -1298,7 +1298,7 @@ void qede_config_rx_mode(struct net_device *ndev) rx_mode.type = QED_FILTER_TYPE_RX_MODE; /* Remove all previous unicast secondary macs and multicast macs - * (configrue / leave the primary mac) + * (configure / leave the primary mac) */ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE, edev->ndev->dev_addr); diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 0ae28f0d2523..c6c20776b474 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -779,8 +779,7 @@ qede_rx_build_skb(struct qede_dev *edev, return NULL; skb_reserve(skb, pad); - memcpy(skb_put(skb, len), - page_address(bd->data) + offset, len); + skb_put_data(skb, page_address(bd->data) + offset, len); qede_reuse_page(rxq, bd); goto out; } @@ -849,13 +848,13 @@ static void qede_tpa_start(struct qede_dev *edev, qede_set_gro_params(edev, tpa_info->skb, cqe); cons_buf: /* We still need to handle bd_len_list to consume buffers */ - if (likely(cqe->ext_bd_len_list[0])) + if (likely(cqe->bw_ext_bd_len_list[0])) qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, - le16_to_cpu(cqe->ext_bd_len_list[0])); + le16_to_cpu(cqe->bw_ext_bd_len_list[0])); - if (unlikely(cqe->ext_bd_len_list[1])) { + if (unlikely(cqe->bw_ext_bd_len_list[1])) { DP_ERR(edev, - "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n"); + "Unlikely - got a TPA aggregation with more than one bw_ext_bd_len_list entry in the TPA start\n"); tpa_info->state = QEDE_AGG_STATE_ERROR; } } diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 8d1c208f778f..34fa3917eb33 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1208,8 +1208,16 @@ enum qede_remove_mode { static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) { struct net_device *ndev = pci_get_drvdata(pdev); - struct qede_dev *edev = netdev_priv(ndev); - struct qed_dev *cdev = edev->cdev; + struct qede_dev *edev; + struct qed_dev *cdev; + + if (!ndev) { + dev_info(&pdev->dev, "Device has already been removed\n"); + return; + } + + edev = netdev_priv(ndev); + cdev = edev->cdev; DP_INFO(edev, "Starting qede_remove\n"); @@ -1398,6 +1406,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) rxq->rx_buf_seg_size = roundup_pow_of_two(size); } else { rxq->rx_buf_seg_size = PAGE_SIZE; + edev->ndev->features &= ~NETIF_F_GRO_HW; } /* Allocate the parallel driver ring for Rx buffers */ @@ -1442,6 +1451,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) } } + edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW); if (!edev->gro_disable) qede_set_tpa_param(rxq); err: @@ -1694,8 +1704,6 @@ static void qede_init_fp(struct qede_dev *edev) snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", edev->ndev->name, queue_id); } - - edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW); } static int qede_set_real_num_queues(struct qede_dev *edev) @@ -2107,12 +2115,8 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) if (rc) goto out; - fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1); - if (IS_ERR(fp->rxq->xdp_prog)) { - rc = PTR_ERR(fp->rxq->xdp_prog); - fp->rxq->xdp_prog = NULL; - goto out; - } + bpf_prog_add(edev->xdp_prog, 1); + fp->rxq->xdp_prog = edev->xdp_prog; } if (fp->type & QEDE_FASTPATH_TX) { diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index f815435cf106..4c7f7a7fc151 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -247,6 +247,7 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev) break; case HWTSTAMP_TX_ONESTEP_SYNC: + case HWTSTAMP_TX_ONESTEP_P2P: DP_ERR(edev, "One-step timestamping is not supported\n"); return -ERANGE; } diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 457444894d80..0fade19e00d4 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -2756,6 +2756,9 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) int err; for (i = 0; i < qdev->num_large_buffers; i++) { + lrg_buf_cb = &qdev->lrg_buf[i]; + memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); + skb = netdev_alloc_skb(qdev->ndev, qdev->lrg_buffer_len); if (unlikely(!skb)) { @@ -2766,11 +2769,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) ql_free_large_buffers(qdev); return -ENOMEM; } else { - - lrg_buf_cb = &qdev->lrg_buf[i]; - memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); lrg_buf_cb->index = i; - lrg_buf_cb->skb = skb; /* * We save some space to copy the ethhdr from first * buffer @@ -2787,10 +2786,12 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", err); + dev_kfree_skb_irq(skb); ql_free_large_buffers(qdev); return -ENOMEM; } + lrg_buf_cb->skb = skb; dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); dma_unmap_len_set(lrg_buf_cb, maplen, qdev->lrg_buffer_len - @@ -3601,7 +3602,7 @@ static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) return 0; } -static void ql3xxx_tx_timeout(struct net_device *ndev) +static void ql3xxx_tx_timeout(struct net_device *ndev, unsigned int txqueue) { struct ql3_adapter *qdev = netdev_priv(ndev); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index a496390b8632..07f9067affc6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -2043,6 +2043,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev, break; } entry += p_hdr->size; + cond_resched(); } p_dev->ahw->reset.seq_index = index; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index a4cd6f2cfb86..75d83c3cbf27 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -20,7 +20,7 @@ struct qlcnic_stats { int stat_offset; }; -#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m) +#define QLC_SIZEOF(m) sizeof_field(struct qlcnic_adapter, m) #define QLC_OFF(m) offsetof(struct qlcnic_adapter, m) static const u32 qlcnic_fw_dump_level[] = { 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 14f26bf3b388..ac61f614de37 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -581,7 +581,7 @@ static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf) { struct qlcnic_skb_frag *nf; - struct skb_frag_struct *frag; + skb_frag_t *frag; int i, nr_frags; dma_addr_t map; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index c07438db30ba..9dd6cb36f366 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -56,7 +56,7 @@ static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void qlcnic_remove(struct pci_dev *pdev); static int qlcnic_open(struct net_device *netdev); static int qlcnic_close(struct net_device *netdev); -static void qlcnic_tx_timeout(struct net_device *netdev); +static void qlcnic_tx_timeout(struct net_device *netdev, unsigned int txqueue); static void qlcnic_attach_work(struct work_struct *work); static void qlcnic_fwinit_work(struct work_struct *work); @@ -3068,7 +3068,7 @@ static void qlcnic_dump_rings(struct qlcnic_adapter *adapter) } -static void qlcnic_tx_timeout(struct net_device *netdev) +static void qlcnic_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct qlcnic_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index afa10a163da1..f34ae8c75bc5 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -703,6 +703,7 @@ static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter, addr += 16; reg_read -= 16; ret += 16; + cond_resched(); } out: mutex_unlock(&adapter->ahw->mem_lock); @@ -1383,6 +1384,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) buf_offset += entry->hdr.cap_size; entry_offset += entry->hdr.offset; buffer = fw_dump->data + buf_offset; + cond_resched(); } fw_dump->clr = 1; diff --git a/drivers/net/ethernet/qlogic/qlge/Makefile b/drivers/net/ethernet/qlogic/qlge/Makefile deleted file mode 100644 index 1dc2568e820c..000000000000 --- a/drivers/net/ethernet/qlogic/qlge/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Makefile for the Qlogic 10GbE PCI Express ethernet driver -# - -obj-$(CONFIG_QLGE) += qlge.o - -qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h deleted file mode 100644 index ad7c5eb8a3b6..000000000000 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ /dev/null @@ -1,2353 +0,0 @@ -/* - * QLogic QLA41xx NIC HBA Driver - * Copyright (c) 2003-2006 QLogic Corporation - * - * See LICENSE.qlge for copyright and licensing details. - */ -#ifndef _QLGE_H_ -#define _QLGE_H_ - -#include <linux/interrupt.h> -#include <linux/pci.h> -#include <linux/netdevice.h> -#include <linux/rtnetlink.h> -#include <linux/if_vlan.h> - -/* - * General definitions... - */ -#define DRV_NAME "qlge" -#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " -#define DRV_VERSION "1.00.00.35" - -#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ - -#define QLGE_VENDOR_ID 0x1077 -#define QLGE_DEVICE_ID_8012 0x8012 -#define QLGE_DEVICE_ID_8000 0x8000 -#define QLGE_MEZZ_SSYS_ID_068 0x0068 -#define QLGE_MEZZ_SSYS_ID_180 0x0180 -#define MAX_CPUS 8 -#define MAX_TX_RINGS MAX_CPUS -#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1) - -#define NUM_TX_RING_ENTRIES 256 -#define NUM_RX_RING_ENTRIES 256 - -#define NUM_SMALL_BUFFERS 512 -#define NUM_LARGE_BUFFERS 512 -#define DB_PAGE_SIZE 4096 - -/* Calculate the number of (4k) pages required to - * contain a buffer queue of the given length. - */ -#define MAX_DB_PAGES_PER_BQ(x) \ - (((x * sizeof(u64)) / DB_PAGE_SIZE) + \ - (((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0)) - -#define RX_RING_SHADOW_SPACE (sizeof(u64) + \ - MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \ - MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64)) -#define LARGE_BUFFER_MAX_SIZE 8192 -#define LARGE_BUFFER_MIN_SIZE 2048 - -#define MAX_CQ 128 -#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */ -#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */ -#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2) -#define UDELAY_COUNT 3 -#define UDELAY_DELAY 100 - - -#define TX_DESC_PER_IOCB 8 - -#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0 -#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) -#else /* all other page sizes */ -#define TX_DESC_PER_OAL 0 -#endif - -/* Word shifting for converting 64-bit - * address to a series of 16-bit words. - * This is used for some MPI firmware - * mailbox commands. - */ -#define LSW(x) ((u16)(x)) -#define MSW(x) ((u16)((u32)(x) >> 16)) -#define LSD(x) ((u32)((u64)(x))) -#define MSD(x) ((u32)((((u64)(x)) >> 32))) - -/* MPI test register definitions. This register - * is used for determining alternate NIC function's - * PCI->func number. - */ -enum { - MPI_TEST_FUNC_PORT_CFG = 0x1002, - MPI_TEST_FUNC_PRB_CTL = 0x100e, - MPI_TEST_FUNC_PRB_EN = 0x18a20000, - MPI_TEST_FUNC_RST_STS = 0x100a, - MPI_TEST_FUNC_RST_FRC = 0x00000003, - MPI_TEST_NIC_FUNC_MASK = 0x00000007, - MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0), - MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e, - MPI_TEST_NIC1_FUNC_SHIFT = 1, - MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4), - MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0, - MPI_TEST_NIC2_FUNC_SHIFT = 5, - MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8), - MPI_TEST_FC1_FUNCTION_MASK = 0x00000e00, - MPI_TEST_FC1_FUNCTION_SHIFT = 9, - MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12), - MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000, - MPI_TEST_FC2_FUNCTION_SHIFT = 13, - - MPI_NIC_READ = 0x00000000, - MPI_NIC_REG_BLOCK = 0x00020000, - MPI_NIC_FUNCTION_SHIFT = 6, -}; - -/* - * Processor Address Register (PROC_ADDR) bit definitions. - */ -enum { - - /* Misc. stuff */ - MAILBOX_COUNT = 16, - MAILBOX_TIMEOUT = 5, - - PROC_ADDR_RDY = (1 << 31), - PROC_ADDR_R = (1 << 30), - PROC_ADDR_ERR = (1 << 29), - PROC_ADDR_DA = (1 << 28), - PROC_ADDR_FUNC0_MBI = 0x00001180, - PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT), - PROC_ADDR_FUNC0_CTL = 0x000011a1, - PROC_ADDR_FUNC2_MBI = 0x00001280, - PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT), - PROC_ADDR_FUNC2_CTL = 0x000012a1, - PROC_ADDR_MPI_RISC = 0x00000000, - PROC_ADDR_MDE = 0x00010000, - PROC_ADDR_REGBLOCK = 0x00020000, - PROC_ADDR_RISC_REG = 0x00030000, -}; - -/* - * System Register (SYS) bit definitions. - */ -enum { - SYS_EFE = (1 << 0), - SYS_FAE = (1 << 1), - SYS_MDC = (1 << 2), - SYS_DST = (1 << 3), - SYS_DWC = (1 << 4), - SYS_EVW = (1 << 5), - SYS_OMP_DLY_MASK = 0x3f000000, - /* - * There are no values defined as of edit #15. - */ - SYS_ODI = (1 << 14), -}; - -/* - * Reset/Failover Register (RST_FO) bit definitions. - */ -enum { - RST_FO_TFO = (1 << 0), - RST_FO_RR_MASK = 0x00060000, - RST_FO_RR_CQ_CAM = 0x00000000, - RST_FO_RR_DROP = 0x00000002, - RST_FO_RR_DQ = 0x00000004, - RST_FO_RR_RCV_FUNC_CQ = 0x00000006, - RST_FO_FRB = (1 << 12), - RST_FO_MOP = (1 << 13), - RST_FO_REG = (1 << 14), - RST_FO_FR = (1 << 15), -}; - -/* - * Function Specific Control Register (FSC) bit definitions. - */ -enum { - FSC_DBRST_MASK = 0x00070000, - FSC_DBRST_256 = 0x00000000, - FSC_DBRST_512 = 0x00000001, - FSC_DBRST_768 = 0x00000002, - FSC_DBRST_1024 = 0x00000003, - FSC_DBL_MASK = 0x00180000, - FSC_DBL_DBRST = 0x00000000, - FSC_DBL_MAX_PLD = 0x00000008, - FSC_DBL_MAX_BRST = 0x00000010, - FSC_DBL_128_BYTES = 0x00000018, - FSC_EC = (1 << 5), - FSC_EPC_MASK = 0x00c00000, - FSC_EPC_INBOUND = (1 << 6), - FSC_EPC_OUTBOUND = (1 << 7), - FSC_VM_PAGESIZE_MASK = 0x07000000, - FSC_VM_PAGE_2K = 0x00000100, - FSC_VM_PAGE_4K = 0x00000200, - FSC_VM_PAGE_8K = 0x00000300, - FSC_VM_PAGE_64K = 0x00000600, - FSC_SH = (1 << 11), - FSC_DSB = (1 << 12), - FSC_STE = (1 << 13), - FSC_FE = (1 << 15), -}; - -/* - * Host Command Status Register (CSR) bit definitions. - */ -enum { - CSR_ERR_STS_MASK = 0x0000003f, - /* - * There are no valued defined as of edit #15. - */ - CSR_RR = (1 << 8), - CSR_HRI = (1 << 9), - CSR_RP = (1 << 10), - CSR_CMD_PARM_SHIFT = 22, - CSR_CMD_NOP = 0x00000000, - CSR_CMD_SET_RST = 0x10000000, - CSR_CMD_CLR_RST = 0x20000000, - CSR_CMD_SET_PAUSE = 0x30000000, - CSR_CMD_CLR_PAUSE = 0x40000000, - CSR_CMD_SET_H2R_INT = 0x50000000, - CSR_CMD_CLR_H2R_INT = 0x60000000, - CSR_CMD_PAR_EN = 0x70000000, - CSR_CMD_SET_BAD_PAR = 0x80000000, - CSR_CMD_CLR_BAD_PAR = 0x90000000, - CSR_CMD_CLR_R2PCI_INT = 0xa0000000, -}; - -/* - * Configuration Register (CFG) bit definitions. - */ -enum { - CFG_LRQ = (1 << 0), - CFG_DRQ = (1 << 1), - CFG_LR = (1 << 2), - CFG_DR = (1 << 3), - CFG_LE = (1 << 5), - CFG_LCQ = (1 << 6), - CFG_DCQ = (1 << 7), - CFG_Q_SHIFT = 8, - CFG_Q_MASK = 0x7f000000, -}; - -/* - * Status Register (STS) bit definitions. - */ -enum { - STS_FE = (1 << 0), - STS_PI = (1 << 1), - STS_PL0 = (1 << 2), - STS_PL1 = (1 << 3), - STS_PI0 = (1 << 4), - STS_PI1 = (1 << 5), - STS_FUNC_ID_MASK = 0x000000c0, - STS_FUNC_ID_SHIFT = 6, - STS_F0E = (1 << 8), - STS_F1E = (1 << 9), - STS_F2E = (1 << 10), - STS_F3E = (1 << 11), - STS_NFE = (1 << 12), -}; - -/* - * Interrupt Enable Register (INTR_EN) bit definitions. - */ -enum { - INTR_EN_INTR_MASK = 0x007f0000, - INTR_EN_TYPE_MASK = 0x03000000, - INTR_EN_TYPE_ENABLE = 0x00000100, - INTR_EN_TYPE_DISABLE = 0x00000200, - INTR_EN_TYPE_READ = 0x00000300, - INTR_EN_IHD = (1 << 13), - INTR_EN_IHD_MASK = (INTR_EN_IHD << 16), - INTR_EN_EI = (1 << 14), - INTR_EN_EN = (1 << 15), -}; - -/* - * Interrupt Mask Register (INTR_MASK) bit definitions. - */ -enum { - INTR_MASK_PI = (1 << 0), - INTR_MASK_HL0 = (1 << 1), - INTR_MASK_LH0 = (1 << 2), - INTR_MASK_HL1 = (1 << 3), - INTR_MASK_LH1 = (1 << 4), - INTR_MASK_SE = (1 << 5), - INTR_MASK_LSC = (1 << 6), - INTR_MASK_MC = (1 << 7), - INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC, -}; - -/* - * Register (REV_ID) bit definitions. - */ -enum { - REV_ID_MASK = 0x0000000f, - REV_ID_NICROLL_SHIFT = 0, - REV_ID_NICREV_SHIFT = 4, - REV_ID_XGROLL_SHIFT = 8, - REV_ID_XGREV_SHIFT = 12, - REV_ID_CHIPREV_SHIFT = 28, -}; - -/* - * Force ECC Error Register (FRC_ECC_ERR) bit definitions. - */ -enum { - FRC_ECC_ERR_VW = (1 << 12), - FRC_ECC_ERR_VB = (1 << 13), - FRC_ECC_ERR_NI = (1 << 14), - FRC_ECC_ERR_NO = (1 << 15), - FRC_ECC_PFE_SHIFT = 16, - FRC_ECC_ERR_DO = (1 << 18), - FRC_ECC_P14 = (1 << 19), -}; - -/* - * Error Status Register (ERR_STS) bit definitions. - */ -enum { - ERR_STS_NOF = (1 << 0), - ERR_STS_NIF = (1 << 1), - ERR_STS_DRP = (1 << 2), - ERR_STS_XGP = (1 << 3), - ERR_STS_FOU = (1 << 4), - ERR_STS_FOC = (1 << 5), - ERR_STS_FOF = (1 << 6), - ERR_STS_FIU = (1 << 7), - ERR_STS_FIC = (1 << 8), - ERR_STS_FIF = (1 << 9), - ERR_STS_MOF = (1 << 10), - ERR_STS_TA = (1 << 11), - ERR_STS_MA = (1 << 12), - ERR_STS_MPE = (1 << 13), - ERR_STS_SCE = (1 << 14), - ERR_STS_STE = (1 << 15), - ERR_STS_FOW = (1 << 16), - ERR_STS_UE = (1 << 17), - ERR_STS_MCH = (1 << 26), - ERR_STS_LOC_SHIFT = 27, -}; - -/* - * RAM Debug Address Register (RAM_DBG_ADDR) bit definitions. - */ -enum { - RAM_DBG_ADDR_FW = (1 << 30), - RAM_DBG_ADDR_FR = (1 << 31), -}; - -/* - * Semaphore Register (SEM) bit definitions. - */ -enum { - /* - * Example: - * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT) - */ - SEM_CLEAR = 0, - SEM_SET = 1, - SEM_FORCE = 3, - SEM_XGMAC0_SHIFT = 0, - SEM_XGMAC1_SHIFT = 2, - SEM_ICB_SHIFT = 4, - SEM_MAC_ADDR_SHIFT = 6, - SEM_FLASH_SHIFT = 8, - SEM_PROBE_SHIFT = 10, - SEM_RT_IDX_SHIFT = 12, - SEM_PROC_REG_SHIFT = 14, - SEM_XGMAC0_MASK = 0x00030000, - SEM_XGMAC1_MASK = 0x000c0000, - SEM_ICB_MASK = 0x00300000, - SEM_MAC_ADDR_MASK = 0x00c00000, - SEM_FLASH_MASK = 0x03000000, - SEM_PROBE_MASK = 0x0c000000, - SEM_RT_IDX_MASK = 0x30000000, - SEM_PROC_REG_MASK = 0xc0000000, -}; - -/* - * 10G MAC Address Register (XGMAC_ADDR) bit definitions. - */ -enum { - XGMAC_ADDR_RDY = (1 << 31), - XGMAC_ADDR_R = (1 << 30), - XGMAC_ADDR_XME = (1 << 29), - - /* XGMAC control registers */ - PAUSE_SRC_LO = 0x00000100, - PAUSE_SRC_HI = 0x00000104, - GLOBAL_CFG = 0x00000108, - GLOBAL_CFG_RESET = (1 << 0), - GLOBAL_CFG_JUMBO = (1 << 6), - GLOBAL_CFG_TX_STAT_EN = (1 << 10), - GLOBAL_CFG_RX_STAT_EN = (1 << 11), - TX_CFG = 0x0000010c, - TX_CFG_RESET = (1 << 0), - TX_CFG_EN = (1 << 1), - TX_CFG_PREAM = (1 << 2), - RX_CFG = 0x00000110, - RX_CFG_RESET = (1 << 0), - RX_CFG_EN = (1 << 1), - RX_CFG_PREAM = (1 << 2), - FLOW_CTL = 0x0000011c, - PAUSE_OPCODE = 0x00000120, - PAUSE_TIMER = 0x00000124, - PAUSE_FRM_DEST_LO = 0x00000128, - PAUSE_FRM_DEST_HI = 0x0000012c, - MAC_TX_PARAMS = 0x00000134, - MAC_TX_PARAMS_JUMBO = (1 << 31), - MAC_TX_PARAMS_SIZE_SHIFT = 16, - MAC_RX_PARAMS = 0x00000138, - MAC_SYS_INT = 0x00000144, - MAC_SYS_INT_MASK = 0x00000148, - MAC_MGMT_INT = 0x0000014c, - MAC_MGMT_IN_MASK = 0x00000150, - EXT_ARB_MODE = 0x000001fc, - - /* XGMAC TX statistics registers */ - TX_PKTS = 0x00000200, - TX_BYTES = 0x00000208, - TX_MCAST_PKTS = 0x00000210, - TX_BCAST_PKTS = 0x00000218, - TX_UCAST_PKTS = 0x00000220, - TX_CTL_PKTS = 0x00000228, - TX_PAUSE_PKTS = 0x00000230, - TX_64_PKT = 0x00000238, - TX_65_TO_127_PKT = 0x00000240, - TX_128_TO_255_PKT = 0x00000248, - TX_256_511_PKT = 0x00000250, - TX_512_TO_1023_PKT = 0x00000258, - TX_1024_TO_1518_PKT = 0x00000260, - TX_1519_TO_MAX_PKT = 0x00000268, - TX_UNDERSIZE_PKT = 0x00000270, - TX_OVERSIZE_PKT = 0x00000278, - - /* XGMAC statistics control registers */ - RX_HALF_FULL_DET = 0x000002a0, - TX_HALF_FULL_DET = 0x000002a4, - RX_OVERFLOW_DET = 0x000002a8, - TX_OVERFLOW_DET = 0x000002ac, - RX_HALF_FULL_MASK = 0x000002b0, - TX_HALF_FULL_MASK = 0x000002b4, - RX_OVERFLOW_MASK = 0x000002b8, - TX_OVERFLOW_MASK = 0x000002bc, - STAT_CNT_CTL = 0x000002c0, - STAT_CNT_CTL_CLEAR_TX = (1 << 0), - STAT_CNT_CTL_CLEAR_RX = (1 << 1), - AUX_RX_HALF_FULL_DET = 0x000002d0, - AUX_TX_HALF_FULL_DET = 0x000002d4, - AUX_RX_OVERFLOW_DET = 0x000002d8, - AUX_TX_OVERFLOW_DET = 0x000002dc, - AUX_RX_HALF_FULL_MASK = 0x000002f0, - AUX_TX_HALF_FULL_MASK = 0x000002f4, - AUX_RX_OVERFLOW_MASK = 0x000002f8, - AUX_TX_OVERFLOW_MASK = 0x000002fc, - - /* XGMAC RX statistics registers */ - RX_BYTES = 0x00000300, - RX_BYTES_OK = 0x00000308, - RX_PKTS = 0x00000310, - RX_PKTS_OK = 0x00000318, - RX_BCAST_PKTS = 0x00000320, - RX_MCAST_PKTS = 0x00000328, - RX_UCAST_PKTS = 0x00000330, - RX_UNDERSIZE_PKTS = 0x00000338, - RX_OVERSIZE_PKTS = 0x00000340, - RX_JABBER_PKTS = 0x00000348, - RX_UNDERSIZE_FCERR_PKTS = 0x00000350, - RX_DROP_EVENTS = 0x00000358, - RX_FCERR_PKTS = 0x00000360, - RX_ALIGN_ERR = 0x00000368, - RX_SYMBOL_ERR = 0x00000370, - RX_MAC_ERR = 0x00000378, - RX_CTL_PKTS = 0x00000380, - RX_PAUSE_PKTS = 0x00000388, - RX_64_PKTS = 0x00000390, - RX_65_TO_127_PKTS = 0x00000398, - RX_128_255_PKTS = 0x000003a0, - RX_256_511_PKTS = 0x000003a8, - RX_512_TO_1023_PKTS = 0x000003b0, - RX_1024_TO_1518_PKTS = 0x000003b8, - RX_1519_TO_MAX_PKTS = 0x000003c0, - RX_LEN_ERR_PKTS = 0x000003c8, - - /* XGMAC MDIO control registers */ - MDIO_TX_DATA = 0x00000400, - MDIO_RX_DATA = 0x00000410, - MDIO_CMD = 0x00000420, - MDIO_PHY_ADDR = 0x00000430, - MDIO_PORT = 0x00000440, - MDIO_STATUS = 0x00000450, - - XGMAC_REGISTER_END = 0x00000740, -}; - -/* - * Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions. - */ -enum { - ETS_QUEUE_SHIFT = 29, - ETS_REF = (1 << 26), - ETS_RS = (1 << 27), - ETS_P = (1 << 28), - ETS_FC_COS_SHIFT = 23, -}; - -/* - * Flash Address Register (FLASH_ADDR) bit definitions. - */ -enum { - FLASH_ADDR_RDY = (1 << 31), - FLASH_ADDR_R = (1 << 30), - FLASH_ADDR_ERR = (1 << 29), -}; - -/* - * Stop CQ Processing Register (CQ_STOP) bit definitions. - */ -enum { - CQ_STOP_QUEUE_MASK = (0x007f0000), - CQ_STOP_TYPE_MASK = (0x03000000), - CQ_STOP_TYPE_START = 0x00000100, - CQ_STOP_TYPE_STOP = 0x00000200, - CQ_STOP_TYPE_READ = 0x00000300, - CQ_STOP_EN = (1 << 15), -}; - -/* - * MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions. - */ -enum { - MAC_ADDR_IDX_SHIFT = 4, - MAC_ADDR_TYPE_SHIFT = 16, - MAC_ADDR_TYPE_COUNT = 10, - MAC_ADDR_TYPE_MASK = 0x000f0000, - MAC_ADDR_TYPE_CAM_MAC = 0x00000000, - MAC_ADDR_TYPE_MULTI_MAC = 0x00010000, - MAC_ADDR_TYPE_VLAN = 0x00020000, - MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000, - MAC_ADDR_TYPE_FC_MAC = 0x00040000, - MAC_ADDR_TYPE_MGMT_MAC = 0x00050000, - MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000, - MAC_ADDR_TYPE_MGMT_V4 = 0x00070000, - MAC_ADDR_TYPE_MGMT_V6 = 0x00080000, - MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000, - MAC_ADDR_ADR = (1 << 25), - MAC_ADDR_RS = (1 << 26), - MAC_ADDR_E = (1 << 27), - MAC_ADDR_MR = (1 << 30), - MAC_ADDR_MW = (1 << 31), - MAX_MULTICAST_ENTRIES = 32, - - /* Entry count and words per entry - * for each address type in the filter. - */ - MAC_ADDR_MAX_CAM_ENTRIES = 512, - MAC_ADDR_MAX_CAM_WCOUNT = 3, - MAC_ADDR_MAX_MULTICAST_ENTRIES = 32, - MAC_ADDR_MAX_MULTICAST_WCOUNT = 2, - MAC_ADDR_MAX_VLAN_ENTRIES = 4096, - MAC_ADDR_MAX_VLAN_WCOUNT = 1, - MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096, - MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1, - MAC_ADDR_MAX_FC_MAC_ENTRIES = 4, - MAC_ADDR_MAX_FC_MAC_WCOUNT = 2, - MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8, - MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2, - MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16, - MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1, - MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4, - MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1, - MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4, - MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4, - MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4, - MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1, -}; - -/* - * MAC Protocol Address Index Register (SPLT_HDR) bit definitions. - */ -enum { - SPLT_HDR_EP = (1 << 31), -}; - -/* - * FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions. - */ -enum { - FC_RCV_CFG_ECT = (1 << 15), - FC_RCV_CFG_DFH = (1 << 20), - FC_RCV_CFG_DVF = (1 << 21), - FC_RCV_CFG_RCE = (1 << 27), - FC_RCV_CFG_RFE = (1 << 28), - FC_RCV_CFG_TEE = (1 << 29), - FC_RCV_CFG_TCE = (1 << 30), - FC_RCV_CFG_TFE = (1 << 31), -}; - -/* - * NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions. - */ -enum { - NIC_RCV_CFG_PPE = (1 << 0), - NIC_RCV_CFG_VLAN_MASK = 0x00060000, - NIC_RCV_CFG_VLAN_ALL = 0x00000000, - NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002, - NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004, - NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006, - NIC_RCV_CFG_RV = (1 << 3), - NIC_RCV_CFG_DFQ_MASK = (0x7f000000), - NIC_RCV_CFG_DFQ_SHIFT = 8, - NIC_RCV_CFG_DFQ = 0, /* HARDCODE default queue to 0. */ -}; - -/* - * Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions. - */ -enum { - MGMT_RCV_CFG_ARP = (1 << 0), - MGMT_RCV_CFG_DHC = (1 << 1), - MGMT_RCV_CFG_DHS = (1 << 2), - MGMT_RCV_CFG_NP = (1 << 3), - MGMT_RCV_CFG_I6N = (1 << 4), - MGMT_RCV_CFG_I6R = (1 << 5), - MGMT_RCV_CFG_DH6 = (1 << 6), - MGMT_RCV_CFG_UD1 = (1 << 7), - MGMT_RCV_CFG_UD0 = (1 << 8), - MGMT_RCV_CFG_BCT = (1 << 9), - MGMT_RCV_CFG_MCT = (1 << 10), - MGMT_RCV_CFG_DM = (1 << 11), - MGMT_RCV_CFG_RM = (1 << 12), - MGMT_RCV_CFG_STL = (1 << 13), - MGMT_RCV_CFG_VLAN_MASK = 0xc0000000, - MGMT_RCV_CFG_VLAN_ALL = 0x00000000, - MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000, - MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000, - MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000, -}; - -/* - * Routing Index Register (RT_IDX) bit definitions. - */ -enum { - RT_IDX_IDX_SHIFT = 8, - RT_IDX_TYPE_MASK = 0x000f0000, - RT_IDX_TYPE_SHIFT = 16, - RT_IDX_TYPE_RT = 0x00000000, - RT_IDX_TYPE_RT_INV = 0x00010000, - RT_IDX_TYPE_NICQ = 0x00020000, - RT_IDX_TYPE_NICQ_INV = 0x00030000, - RT_IDX_DST_MASK = 0x00700000, - RT_IDX_DST_RSS = 0x00000000, - RT_IDX_DST_CAM_Q = 0x00100000, - RT_IDX_DST_COS_Q = 0x00200000, - RT_IDX_DST_DFLT_Q = 0x00300000, - RT_IDX_DST_DEST_Q = 0x00400000, - RT_IDX_RS = (1 << 26), - RT_IDX_E = (1 << 27), - RT_IDX_MR = (1 << 30), - RT_IDX_MW = (1 << 31), - - /* Nic Queue format - type 2 bits */ - RT_IDX_BCAST = (1 << 0), - RT_IDX_MCAST = (1 << 1), - RT_IDX_MCAST_MATCH = (1 << 2), - RT_IDX_MCAST_REG_MATCH = (1 << 3), - RT_IDX_MCAST_HASH_MATCH = (1 << 4), - RT_IDX_FC_MACH = (1 << 5), - RT_IDX_ETH_FCOE = (1 << 6), - RT_IDX_CAM_HIT = (1 << 7), - RT_IDX_CAM_BIT0 = (1 << 8), - RT_IDX_CAM_BIT1 = (1 << 9), - RT_IDX_VLAN_TAG = (1 << 10), - RT_IDX_VLAN_MATCH = (1 << 11), - RT_IDX_VLAN_FILTER = (1 << 12), - RT_IDX_ETH_SKIP1 = (1 << 13), - RT_IDX_ETH_SKIP2 = (1 << 14), - RT_IDX_BCAST_MCAST_MATCH = (1 << 15), - RT_IDX_802_3 = (1 << 16), - RT_IDX_LLDP = (1 << 17), - RT_IDX_UNUSED018 = (1 << 18), - RT_IDX_UNUSED019 = (1 << 19), - RT_IDX_UNUSED20 = (1 << 20), - RT_IDX_UNUSED21 = (1 << 21), - RT_IDX_ERR = (1 << 22), - RT_IDX_VALID = (1 << 23), - RT_IDX_TU_CSUM_ERR = (1 << 24), - RT_IDX_IP_CSUM_ERR = (1 << 25), - RT_IDX_MAC_ERR = (1 << 26), - RT_IDX_RSS_TCP6 = (1 << 27), - RT_IDX_RSS_TCP4 = (1 << 28), - RT_IDX_RSS_IPV6 = (1 << 29), - RT_IDX_RSS_IPV4 = (1 << 30), - RT_IDX_RSS_MATCH = (1 << 31), - - /* Hierarchy for the NIC Queue Mask */ - RT_IDX_ALL_ERR_SLOT = 0, - RT_IDX_MAC_ERR_SLOT = 0, - RT_IDX_IP_CSUM_ERR_SLOT = 1, - RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2, - RT_IDX_BCAST_SLOT = 3, - RT_IDX_MCAST_MATCH_SLOT = 4, - RT_IDX_ALLMULTI_SLOT = 5, - RT_IDX_UNUSED6_SLOT = 6, - RT_IDX_UNUSED7_SLOT = 7, - RT_IDX_RSS_MATCH_SLOT = 8, - RT_IDX_RSS_IPV4_SLOT = 8, - RT_IDX_RSS_IPV6_SLOT = 9, - RT_IDX_RSS_TCP4_SLOT = 10, - RT_IDX_RSS_TCP6_SLOT = 11, - RT_IDX_CAM_HIT_SLOT = 12, - RT_IDX_UNUSED013 = 13, - RT_IDX_UNUSED014 = 14, - RT_IDX_PROMISCUOUS_SLOT = 15, - RT_IDX_MAX_RT_SLOTS = 8, - RT_IDX_MAX_NIC_SLOTS = 16, -}; - -/* - * Serdes Address Register (XG_SERDES_ADDR) bit definitions. - */ -enum { - XG_SERDES_ADDR_RDY = (1 << 31), - XG_SERDES_ADDR_R = (1 << 30), - - XG_SERDES_ADDR_STS = 0x00001E06, - XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005, - XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a, - XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001, - - /* Serdes coredump definitions. */ - XG_SERDES_XAUI_AN_START = 0x00000000, - XG_SERDES_XAUI_AN_END = 0x00000034, - XG_SERDES_XAUI_HSS_PCS_START = 0x00000800, - XG_SERDES_XAUI_HSS_PCS_END = 0x0000880, - XG_SERDES_XFI_AN_START = 0x00001000, - XG_SERDES_XFI_AN_END = 0x00001034, - XG_SERDES_XFI_TRAIN_START = 0x10001050, - XG_SERDES_XFI_TRAIN_END = 0x1000107C, - XG_SERDES_XFI_HSS_PCS_START = 0x00001800, - XG_SERDES_XFI_HSS_PCS_END = 0x00001838, - XG_SERDES_XFI_HSS_TX_START = 0x00001c00, - XG_SERDES_XFI_HSS_TX_END = 0x00001c1f, - XG_SERDES_XFI_HSS_RX_START = 0x00001c40, - XG_SERDES_XFI_HSS_RX_END = 0x00001c5f, - XG_SERDES_XFI_HSS_PLL_START = 0x00001e00, - XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f, -}; - -/* - * NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions. - */ -enum { - PRB_MX_ADDR_ARE = (1 << 16), - PRB_MX_ADDR_UP = (1 << 15), - PRB_MX_ADDR_SWP = (1 << 14), - - /* Module select values. */ - PRB_MX_ADDR_MAX_MODS = 21, - PRB_MX_ADDR_MOD_SEL_SHIFT = 9, - PRB_MX_ADDR_MOD_SEL_TBD = 0, - PRB_MX_ADDR_MOD_SEL_IDE1 = 1, - PRB_MX_ADDR_MOD_SEL_IDE2 = 2, - PRB_MX_ADDR_MOD_SEL_FRB = 3, - PRB_MX_ADDR_MOD_SEL_ODE1 = 4, - PRB_MX_ADDR_MOD_SEL_ODE2 = 5, - PRB_MX_ADDR_MOD_SEL_DA1 = 6, - PRB_MX_ADDR_MOD_SEL_DA2 = 7, - PRB_MX_ADDR_MOD_SEL_IMP1 = 8, - PRB_MX_ADDR_MOD_SEL_IMP2 = 9, - PRB_MX_ADDR_MOD_SEL_OMP1 = 10, - PRB_MX_ADDR_MOD_SEL_OMP2 = 11, - PRB_MX_ADDR_MOD_SEL_ORS1 = 12, - PRB_MX_ADDR_MOD_SEL_ORS2 = 13, - PRB_MX_ADDR_MOD_SEL_REG = 14, - PRB_MX_ADDR_MOD_SEL_MAC1 = 16, - PRB_MX_ADDR_MOD_SEL_MAC2 = 17, - PRB_MX_ADDR_MOD_SEL_VQM1 = 18, - PRB_MX_ADDR_MOD_SEL_VQM2 = 19, - PRB_MX_ADDR_MOD_SEL_MOP = 20, - /* Bit fields indicating which modules - * are valid for each clock domain. - */ - PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7, - PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1, - PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309, - PRB_MX_ADDR_VALID_FC_MOD = 0x00003001, - PRB_MX_ADDR_VALID_TOTAL = 34, - - /* Clock domain values. */ - PRB_MX_ADDR_CLOCK_SHIFT = 6, - PRB_MX_ADDR_SYS_CLOCK = 0, - PRB_MX_ADDR_PCI_CLOCK = 2, - PRB_MX_ADDR_FC_CLOCK = 5, - PRB_MX_ADDR_XGM_CLOCK = 6, - - PRB_MX_ADDR_MAX_MUX = 64, -}; - -/* - * Control Register Set Map - */ -enum { - PROC_ADDR = 0, /* Use semaphore */ - PROC_DATA = 0x04, /* Use semaphore */ - SYS = 0x08, - RST_FO = 0x0c, - FSC = 0x10, - CSR = 0x14, - LED = 0x18, - ICB_RID = 0x1c, /* Use semaphore */ - ICB_L = 0x20, /* Use semaphore */ - ICB_H = 0x24, /* Use semaphore */ - CFG = 0x28, - BIOS_ADDR = 0x2c, - STS = 0x30, - INTR_EN = 0x34, - INTR_MASK = 0x38, - ISR1 = 0x3c, - ISR2 = 0x40, - ISR3 = 0x44, - ISR4 = 0x48, - REV_ID = 0x4c, - FRC_ECC_ERR = 0x50, - ERR_STS = 0x54, - RAM_DBG_ADDR = 0x58, - RAM_DBG_DATA = 0x5c, - ECC_ERR_CNT = 0x60, - SEM = 0x64, - GPIO_1 = 0x68, /* Use semaphore */ - GPIO_2 = 0x6c, /* Use semaphore */ - GPIO_3 = 0x70, /* Use semaphore */ - RSVD2 = 0x74, - XGMAC_ADDR = 0x78, /* Use semaphore */ - XGMAC_DATA = 0x7c, /* Use semaphore */ - NIC_ETS = 0x80, - CNA_ETS = 0x84, - FLASH_ADDR = 0x88, /* Use semaphore */ - FLASH_DATA = 0x8c, /* Use semaphore */ - CQ_STOP = 0x90, - PAGE_TBL_RID = 0x94, - WQ_PAGE_TBL_LO = 0x98, - WQ_PAGE_TBL_HI = 0x9c, - CQ_PAGE_TBL_LO = 0xa0, - CQ_PAGE_TBL_HI = 0xa4, - MAC_ADDR_IDX = 0xa8, /* Use semaphore */ - MAC_ADDR_DATA = 0xac, /* Use semaphore */ - COS_DFLT_CQ1 = 0xb0, - COS_DFLT_CQ2 = 0xb4, - ETYPE_SKIP1 = 0xb8, - ETYPE_SKIP2 = 0xbc, - SPLT_HDR = 0xc0, - FC_PAUSE_THRES = 0xc4, - NIC_PAUSE_THRES = 0xc8, - FC_ETHERTYPE = 0xcc, - FC_RCV_CFG = 0xd0, - NIC_RCV_CFG = 0xd4, - FC_COS_TAGS = 0xd8, - NIC_COS_TAGS = 0xdc, - MGMT_RCV_CFG = 0xe0, - RT_IDX = 0xe4, - RT_DATA = 0xe8, - RSVD7 = 0xec, - XG_SERDES_ADDR = 0xf0, - XG_SERDES_DATA = 0xf4, - PRB_MX_ADDR = 0xf8, /* Use semaphore */ - PRB_MX_DATA = 0xfc, /* Use semaphore */ -}; - -#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -#define SMALL_BUFFER_SIZE 256 -#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE -#define SPLT_SETTING FSC_DBRST_1024 -#define SPLT_LEN 0 -#define QLGE_SB_PAD 0 -#else -#define SMALL_BUFFER_SIZE 512 -#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2) -#define SPLT_SETTING FSC_SH -#define SPLT_LEN (SPLT_HDR_EP | \ - min(SMALL_BUF_MAP_SIZE, 1023)) -#define QLGE_SB_PAD 32 -#endif - -/* - * CAM output format. - */ -enum { - CAM_OUT_ROUTE_FC = 0, - CAM_OUT_ROUTE_NIC = 1, - CAM_OUT_FUNC_SHIFT = 2, - CAM_OUT_RV = (1 << 4), - CAM_OUT_SH = (1 << 15), - CAM_OUT_CQ_ID_SHIFT = 5, -}; - -/* - * Mailbox definitions - */ -enum { - /* Asynchronous Event Notifications */ - AEN_SYS_ERR = 0x00008002, - AEN_LINK_UP = 0x00008011, - AEN_LINK_DOWN = 0x00008012, - AEN_IDC_CMPLT = 0x00008100, - AEN_IDC_REQ = 0x00008101, - AEN_IDC_EXT = 0x00008102, - AEN_DCBX_CHG = 0x00008110, - AEN_AEN_LOST = 0x00008120, - AEN_AEN_SFP_IN = 0x00008130, - AEN_AEN_SFP_OUT = 0x00008131, - AEN_FW_INIT_DONE = 0x00008400, - AEN_FW_INIT_FAIL = 0x00008401, - - /* Mailbox Command Opcodes. */ - MB_CMD_NOP = 0x00000000, - MB_CMD_EX_FW = 0x00000002, - MB_CMD_MB_TEST = 0x00000006, - MB_CMD_CSUM_TEST = 0x00000007, /* Verify Checksum */ - MB_CMD_ABOUT_FW = 0x00000008, - MB_CMD_COPY_RISC_RAM = 0x0000000a, - MB_CMD_LOAD_RISC_RAM = 0x0000000b, - MB_CMD_DUMP_RISC_RAM = 0x0000000c, - MB_CMD_WRITE_RAM = 0x0000000d, - MB_CMD_INIT_RISC_RAM = 0x0000000e, - MB_CMD_READ_RAM = 0x0000000f, - MB_CMD_STOP_FW = 0x00000014, - MB_CMD_MAKE_SYS_ERR = 0x0000002a, - MB_CMD_WRITE_SFP = 0x00000030, - MB_CMD_READ_SFP = 0x00000031, - MB_CMD_INIT_FW = 0x00000060, - MB_CMD_GET_IFCB = 0x00000061, - MB_CMD_GET_FW_STATE = 0x00000069, - MB_CMD_IDC_REQ = 0x00000100, /* Inter-Driver Communication */ - MB_CMD_IDC_ACK = 0x00000101, /* Inter-Driver Communication */ - MB_CMD_SET_WOL_MODE = 0x00000110, /* Wake On Lan */ - MB_WOL_DISABLE = 0, - MB_WOL_MAGIC_PKT = (1 << 1), - MB_WOL_FLTR = (1 << 2), - MB_WOL_UCAST = (1 << 3), - MB_WOL_MCAST = (1 << 4), - MB_WOL_BCAST = (1 << 5), - MB_WOL_LINK_UP = (1 << 6), - MB_WOL_LINK_DOWN = (1 << 7), - MB_WOL_MODE_ON = (1 << 16), /* Wake on Lan Mode on */ - MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */ - MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */ - MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */ - MB_CMD_CLEAR_WOL_MAGIC = 0x00000114,/* Wake On Lan Magic Packet */ - MB_CMD_SET_WOL_IMMED = 0x00000115, - MB_CMD_PORT_RESET = 0x00000120, - MB_CMD_SET_PORT_CFG = 0x00000122, - MB_CMD_GET_PORT_CFG = 0x00000123, - MB_CMD_GET_LINK_STS = 0x00000124, - MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */ - QL_LED_BLINK = 0x03e803e8, - MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */ - MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */ - MB_SET_MPI_TFK_STOP = (1 << 0), - MB_SET_MPI_TFK_RESUME = (1 << 1), - MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */ - MB_GET_MPI_TFK_STOPPED = (1 << 0), - MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1), - /* Sub-commands for IDC request. - * This describes the reason for the - * IDC request. - */ - MB_CMD_IOP_NONE = 0x0000, - MB_CMD_IOP_PREP_UPDATE_MPI = 0x0001, - MB_CMD_IOP_COMP_UPDATE_MPI = 0x0002, - MB_CMD_IOP_PREP_LINK_DOWN = 0x0010, - MB_CMD_IOP_DVR_START = 0x0100, - MB_CMD_IOP_FLASH_ACC = 0x0101, - MB_CMD_IOP_RESTART_MPI = 0x0102, - MB_CMD_IOP_CORE_DUMP_MPI = 0x0103, - - /* Mailbox Command Status. */ - MB_CMD_STS_GOOD = 0x00004000, /* Success. */ - MB_CMD_STS_INTRMDT = 0x00001000, /* Intermediate Complete. */ - MB_CMD_STS_INVLD_CMD = 0x00004001, /* Invalid. */ - MB_CMD_STS_XFC_ERR = 0x00004002, /* Interface Error. */ - MB_CMD_STS_CSUM_ERR = 0x00004003, /* Csum Error. */ - MB_CMD_STS_ERR = 0x00004005, /* System Error. */ - MB_CMD_STS_PARAM_ERR = 0x00004006, /* Parameter Error. */ -}; - -struct mbox_params { - u32 mbox_in[MAILBOX_COUNT]; - u32 mbox_out[MAILBOX_COUNT]; - int in_count; - int out_count; -}; - -struct flash_params_8012 { - u8 dev_id_str[4]; - __le16 size; - __le16 csum; - __le16 ver; - __le16 sub_dev_id; - u8 mac_addr[6]; - __le16 res; -}; - -/* 8000 device's flash is a different structure - * at a different offset in flash. - */ -#define FUNC0_FLASH_OFFSET 0x140200 -#define FUNC1_FLASH_OFFSET 0x140600 - -/* Flash related data structures. */ -struct flash_params_8000 { - u8 dev_id_str[4]; /* "8000" */ - __le16 ver; - __le16 size; - __le16 csum; - __le16 reserved0; - __le16 total_size; - __le16 entry_count; - u8 data_type0; - u8 data_size0; - u8 mac_addr[6]; - u8 data_type1; - u8 data_size1; - u8 mac_addr1[6]; - u8 data_type2; - u8 data_size2; - __le16 vlan_id; - u8 data_type3; - u8 data_size3; - __le16 last; - u8 reserved1[464]; - __le16 subsys_ven_id; - __le16 subsys_dev_id; - u8 reserved2[4]; -}; - -union flash_params { - struct flash_params_8012 flash_params_8012; - struct flash_params_8000 flash_params_8000; -}; - -/* - * doorbell space for the rx ring context - */ -struct rx_doorbell_context { - u32 cnsmr_idx; /* 0x00 */ - u32 valid; /* 0x04 */ - u32 reserved[4]; /* 0x08-0x14 */ - u32 lbq_prod_idx; /* 0x18 */ - u32 sbq_prod_idx; /* 0x1c */ -}; - -/* - * doorbell space for the tx ring context - */ -struct tx_doorbell_context { - u32 prod_idx; /* 0x00 */ - u32 valid; /* 0x04 */ - u32 reserved[4]; /* 0x08-0x14 */ - u32 lbq_prod_idx; /* 0x18 */ - u32 sbq_prod_idx; /* 0x1c */ -}; - -/* DATA STRUCTURES SHARED WITH HARDWARE. */ -struct tx_buf_desc { - __le64 addr; - __le32 len; -#define TX_DESC_LEN_MASK 0x000fffff -#define TX_DESC_C 0x40000000 -#define TX_DESC_E 0x80000000 -} __packed; - -/* - * IOCB Definitions... - */ - -#define OPCODE_OB_MAC_IOCB 0x01 -#define OPCODE_OB_MAC_TSO_IOCB 0x02 -#define OPCODE_IB_MAC_IOCB 0x20 -#define OPCODE_IB_MPI_IOCB 0x21 -#define OPCODE_IB_AE_IOCB 0x3f - -struct ob_mac_iocb_req { - u8 opcode; - u8 flags1; -#define OB_MAC_IOCB_REQ_OI 0x01 -#define OB_MAC_IOCB_REQ_I 0x02 -#define OB_MAC_IOCB_REQ_D 0x08 -#define OB_MAC_IOCB_REQ_F 0x10 - u8 flags2; - u8 flags3; -#define OB_MAC_IOCB_DFP 0x02 -#define OB_MAC_IOCB_V 0x04 - __le32 reserved1[2]; - __le16 frame_len; -#define OB_MAC_IOCB_LEN_MASK 0x3ffff - __le16 reserved2; - u32 tid; - u32 txq_idx; - __le32 reserved3; - __le16 vlan_tci; - __le16 reserved4; - struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; -} __packed; - -struct ob_mac_iocb_rsp { - u8 opcode; /* */ - u8 flags1; /* */ -#define OB_MAC_IOCB_RSP_OI 0x01 /* */ -#define OB_MAC_IOCB_RSP_I 0x02 /* */ -#define OB_MAC_IOCB_RSP_E 0x08 /* */ -#define OB_MAC_IOCB_RSP_S 0x10 /* too Short */ -#define OB_MAC_IOCB_RSP_L 0x20 /* too Large */ -#define OB_MAC_IOCB_RSP_P 0x40 /* Padded */ - u8 flags2; /* */ - u8 flags3; /* */ -#define OB_MAC_IOCB_RSP_B 0x80 /* */ - u32 tid; - u32 txq_idx; - __le32 reserved[13]; -} __packed; - -struct ob_mac_tso_iocb_req { - u8 opcode; - u8 flags1; -#define OB_MAC_TSO_IOCB_OI 0x01 -#define OB_MAC_TSO_IOCB_I 0x02 -#define OB_MAC_TSO_IOCB_D 0x08 -#define OB_MAC_TSO_IOCB_IP4 0x40 -#define OB_MAC_TSO_IOCB_IP6 0x80 - u8 flags2; -#define OB_MAC_TSO_IOCB_LSO 0x20 -#define OB_MAC_TSO_IOCB_UC 0x40 -#define OB_MAC_TSO_IOCB_TC 0x80 - u8 flags3; -#define OB_MAC_TSO_IOCB_IC 0x01 -#define OB_MAC_TSO_IOCB_DFP 0x02 -#define OB_MAC_TSO_IOCB_V 0x04 - __le32 reserved1[2]; - __le32 frame_len; - u32 tid; - u32 txq_idx; - __le16 total_hdrs_len; - __le16 net_trans_offset; -#define OB_MAC_TRANSPORT_HDR_SHIFT 6 - __le16 vlan_tci; - __le16 mss; - struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; -} __packed; - -struct ob_mac_tso_iocb_rsp { - u8 opcode; - u8 flags1; -#define OB_MAC_TSO_IOCB_RSP_OI 0x01 -#define OB_MAC_TSO_IOCB_RSP_I 0x02 -#define OB_MAC_TSO_IOCB_RSP_E 0x08 -#define OB_MAC_TSO_IOCB_RSP_S 0x10 -#define OB_MAC_TSO_IOCB_RSP_L 0x20 -#define OB_MAC_TSO_IOCB_RSP_P 0x40 - u8 flags2; /* */ - u8 flags3; /* */ -#define OB_MAC_TSO_IOCB_RSP_B 0x8000 - u32 tid; - u32 txq_idx; - __le32 reserved2[13]; -} __packed; - -struct ib_mac_iocb_rsp { - u8 opcode; /* 0x20 */ - u8 flags1; -#define IB_MAC_IOCB_RSP_OI 0x01 /* Override intr delay */ -#define IB_MAC_IOCB_RSP_I 0x02 /* Disable Intr Generation */ -#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */ -#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */ -#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */ -#define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */ -#define IB_MAC_IOCB_RSP_M_MASK 0x60 /* Multicast info */ -#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* Not mcast frame */ -#define IB_MAC_IOCB_RSP_M_HASH 0x20 /* HASH mcast frame */ -#define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */ -#define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */ -#define IB_MAC_IOCB_RSP_B 0x80 /* Broadcast frame */ - u8 flags2; -#define IB_MAC_IOCB_RSP_P 0x01 /* Promiscuous frame */ -#define IB_MAC_IOCB_RSP_V 0x02 /* Vlan tag present */ -#define IB_MAC_IOCB_RSP_ERR_MASK 0x1c /* */ -#define IB_MAC_IOCB_RSP_ERR_CODE_ERR 0x04 -#define IB_MAC_IOCB_RSP_ERR_OVERSIZE 0x08 -#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE 0x10 -#define IB_MAC_IOCB_RSP_ERR_PREAMBLE 0x14 -#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN 0x18 -#define IB_MAC_IOCB_RSP_ERR_CRC 0x1c -#define IB_MAC_IOCB_RSP_U 0x20 /* UDP packet */ -#define IB_MAC_IOCB_RSP_T 0x40 /* TCP packet */ -#define IB_MAC_IOCB_RSP_FO 0x80 /* Failover port */ - u8 flags3; -#define IB_MAC_IOCB_RSP_RSS_MASK 0x07 /* RSS mask */ -#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */ -#define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */ -#define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */ -#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */ -#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */ -#define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */ -#define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */ -#define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */ -#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */ -#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */ - __le32 data_len; /* */ - __le64 data_addr; /* */ - __le32 rss; /* */ - __le16 vlan_id; /* 12 bits */ -#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */ -#define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */ -#define IB_MAC_IOCB_RSP_VLAN_MASK 0x0ffff - - __le16 reserved1; - __le32 reserved2[6]; - u8 reserved3[3]; - u8 flags4; -#define IB_MAC_IOCB_RSP_HV 0x20 -#define IB_MAC_IOCB_RSP_HS 0x40 -#define IB_MAC_IOCB_RSP_HL 0x80 - __le32 hdr_len; /* */ - __le64 hdr_addr; /* */ -} __packed; - -struct ib_ae_iocb_rsp { - u8 opcode; - u8 flags1; -#define IB_AE_IOCB_RSP_OI 0x01 -#define IB_AE_IOCB_RSP_I 0x02 - u8 event; -#define LINK_UP_EVENT 0x00 -#define LINK_DOWN_EVENT 0x01 -#define CAM_LOOKUP_ERR_EVENT 0x06 -#define SOFT_ECC_ERROR_EVENT 0x07 -#define MGMT_ERR_EVENT 0x08 -#define TEN_GIG_MAC_EVENT 0x09 -#define GPI0_H2L_EVENT 0x10 -#define GPI0_L2H_EVENT 0x20 -#define GPI1_H2L_EVENT 0x11 -#define GPI1_L2H_EVENT 0x21 -#define PCI_ERR_ANON_BUF_RD 0x40 - u8 q_id; - __le32 reserved[15]; -} __packed; - -/* - * These three structures are for generic - * handling of ib and ob iocbs. - */ -struct ql_net_rsp_iocb { - u8 opcode; - u8 flags0; - __le16 length; - __le32 tid; - __le32 reserved[14]; -} __packed; - -struct net_req_iocb { - u8 opcode; - u8 flags0; - __le16 flags1; - __le32 tid; - __le32 reserved1[30]; -} __packed; - -/* - * tx ring initialization control block for chip. - * It is defined as: - * "Work Queue Initialization Control Block" - */ -struct wqicb { - __le16 len; -#define Q_LEN_V (1 << 4) -#define Q_LEN_CPP_CONT 0x0000 -#define Q_LEN_CPP_16 0x0001 -#define Q_LEN_CPP_32 0x0002 -#define Q_LEN_CPP_64 0x0003 -#define Q_LEN_CPP_512 0x0006 - __le16 flags; -#define Q_PRI_SHIFT 1 -#define Q_FLAGS_LC 0x1000 -#define Q_FLAGS_LB 0x2000 -#define Q_FLAGS_LI 0x4000 -#define Q_FLAGS_LO 0x8000 - __le16 cq_id_rss; -#define Q_CQ_ID_RSS_RV 0x8000 - __le16 rid; - __le64 addr; - __le64 cnsmr_idx_addr; -} __packed; - -/* - * rx ring initialization control block for chip. - * It is defined as: - * "Completion Queue Initialization Control Block" - */ -struct cqicb { - u8 msix_vect; - u8 reserved1; - u8 reserved2; - u8 flags; -#define FLAGS_LV 0x08 -#define FLAGS_LS 0x10 -#define FLAGS_LL 0x20 -#define FLAGS_LI 0x40 -#define FLAGS_LC 0x80 - __le16 len; -#define LEN_V (1 << 4) -#define LEN_CPP_CONT 0x0000 -#define LEN_CPP_32 0x0001 -#define LEN_CPP_64 0x0002 -#define LEN_CPP_128 0x0003 - __le16 rid; - __le64 addr; - __le64 prod_idx_addr; - __le16 pkt_delay; - __le16 irq_delay; - __le64 lbq_addr; - __le16 lbq_buf_size; - __le16 lbq_len; /* entry count */ - __le64 sbq_addr; - __le16 sbq_buf_size; - __le16 sbq_len; /* entry count */ -} __packed; - -struct ricb { - u8 base_cq; -#define RSS_L4K 0x80 - u8 flags; -#define RSS_L6K 0x01 -#define RSS_LI 0x02 -#define RSS_LB 0x04 -#define RSS_LM 0x08 -#define RSS_RI4 0x10 -#define RSS_RT4 0x20 -#define RSS_RI6 0x40 -#define RSS_RT6 0x80 - __le16 mask; - u8 hash_cq_id[1024]; - __le32 ipv6_hash_key[10]; - __le32 ipv4_hash_key[4]; -} __packed; - -/* SOFTWARE/DRIVER DATA STRUCTURES. */ - -struct oal { - struct tx_buf_desc oal[TX_DESC_PER_OAL]; -}; - -struct map_list { - DEFINE_DMA_UNMAP_ADDR(mapaddr); - DEFINE_DMA_UNMAP_LEN(maplen); -}; - -struct tx_ring_desc { - struct sk_buff *skb; - struct ob_mac_iocb_req *queue_entry; - u32 index; - struct oal oal; - struct map_list map[MAX_SKB_FRAGS + 2]; - int map_cnt; - struct tx_ring_desc *next; -}; - -struct page_chunk { - struct page *page; /* master page */ - char *va; /* virt addr for this chunk */ - u64 map; /* mapping for master */ - unsigned int offset; /* offset for this chunk */ - unsigned int last_flag; /* flag set for last chunk in page */ -}; - -struct bq_desc { - union { - struct page_chunk pg_chunk; - struct sk_buff *skb; - } p; - __le64 *addr; - u32 index; - DEFINE_DMA_UNMAP_ADDR(mapaddr); - DEFINE_DMA_UNMAP_LEN(maplen); -}; - -#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count)) - -struct tx_ring { - /* - * queue info. - */ - struct wqicb wqicb; /* structure used to inform chip of new queue */ - void *wq_base; /* pci_alloc:virtual addr for tx */ - dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */ - __le32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */ - dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */ - u32 wq_size; /* size in bytes of queue area */ - u32 wq_len; /* number of entries in queue */ - void __iomem *prod_idx_db_reg; /* doorbell area index reg at offset 0x00 */ - void __iomem *valid_db_reg; /* doorbell area valid reg at offset 0x04 */ - u16 prod_idx; /* current value for prod idx */ - u16 cq_id; /* completion (rx) queue for tx completions */ - u8 wq_id; /* queue id for this entry */ - u8 reserved1[3]; - struct tx_ring_desc *q; /* descriptor list for the queue */ - spinlock_t lock; - atomic_t tx_count; /* counts down for every outstanding IO */ - struct delayed_work tx_work; - struct ql_adapter *qdev; - u64 tx_packets; - u64 tx_bytes; - u64 tx_errors; -}; - -/* - * Type of inbound queue. - */ -enum { - DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */ - TX_Q = 3, /* Handles outbound completions. */ - RX_Q = 4, /* Handles inbound completions. */ -}; - -struct rx_ring { - struct cqicb cqicb; /* The chip's completion queue init control block. */ - - /* Completion queue elements. */ - void *cq_base; - dma_addr_t cq_base_dma; - u32 cq_size; - u32 cq_len; - u16 cq_id; - __le32 *prod_idx_sh_reg; /* Shadowed producer register. */ - dma_addr_t prod_idx_sh_reg_dma; - void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */ - u32 cnsmr_idx; /* current sw idx */ - struct ql_net_rsp_iocb *curr_entry; /* next entry on queue */ - void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */ - - /* Large buffer queue elements. */ - u32 lbq_len; /* entry count */ - u32 lbq_size; /* size in bytes of queue */ - u32 lbq_buf_size; - void *lbq_base; - dma_addr_t lbq_base_dma; - void *lbq_base_indirect; - dma_addr_t lbq_base_indirect_dma; - struct page_chunk pg_chunk; /* current page for chunks */ - struct bq_desc *lbq; /* array of control blocks */ - void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */ - u32 lbq_prod_idx; /* current sw prod idx */ - u32 lbq_curr_idx; /* next entry we expect */ - u32 lbq_clean_idx; /* beginning of new descs */ - u32 lbq_free_cnt; /* free buffer desc cnt */ - - /* Small buffer queue elements. */ - u32 sbq_len; /* entry count */ - u32 sbq_size; /* size in bytes of queue */ - u32 sbq_buf_size; - void *sbq_base; - dma_addr_t sbq_base_dma; - void *sbq_base_indirect; - dma_addr_t sbq_base_indirect_dma; - struct bq_desc *sbq; /* array of control blocks */ - void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */ - u32 sbq_prod_idx; /* current sw prod idx */ - u32 sbq_curr_idx; /* next entry we expect */ - u32 sbq_clean_idx; /* beginning of new descs */ - u32 sbq_free_cnt; /* free buffer desc cnt */ - - /* Misc. handler elements. */ - u32 type; /* Type of queue, tx, rx. */ - u32 irq; /* Which vector this ring is assigned. */ - u32 cpu; /* Which CPU this should run on. */ - char name[IFNAMSIZ + 5]; - struct napi_struct napi; - u8 reserved; - struct ql_adapter *qdev; - u64 rx_packets; - u64 rx_multicast; - u64 rx_bytes; - u64 rx_dropped; - u64 rx_errors; -}; - -/* - * RSS Initialization Control Block - */ -struct hash_id { - u8 value[4]; -}; - -struct nic_stats { - /* - * These stats come from offset 200h to 278h - * in the XGMAC register. - */ - u64 tx_pkts; - u64 tx_bytes; - u64 tx_mcast_pkts; - u64 tx_bcast_pkts; - u64 tx_ucast_pkts; - u64 tx_ctl_pkts; - u64 tx_pause_pkts; - u64 tx_64_pkt; - u64 tx_65_to_127_pkt; - u64 tx_128_to_255_pkt; - u64 tx_256_511_pkt; - u64 tx_512_to_1023_pkt; - u64 tx_1024_to_1518_pkt; - u64 tx_1519_to_max_pkt; - u64 tx_undersize_pkt; - u64 tx_oversize_pkt; - - /* - * These stats come from offset 300h to 3C8h - * in the XGMAC register. - */ - u64 rx_bytes; - u64 rx_bytes_ok; - u64 rx_pkts; - u64 rx_pkts_ok; - u64 rx_bcast_pkts; - u64 rx_mcast_pkts; - u64 rx_ucast_pkts; - u64 rx_undersize_pkts; - u64 rx_oversize_pkts; - u64 rx_jabber_pkts; - u64 rx_undersize_fcerr_pkts; - u64 rx_drop_events; - u64 rx_fcerr_pkts; - u64 rx_align_err; - u64 rx_symbol_err; - u64 rx_mac_err; - u64 rx_ctl_pkts; - u64 rx_pause_pkts; - u64 rx_64_pkts; - u64 rx_65_to_127_pkts; - u64 rx_128_255_pkts; - u64 rx_256_511_pkts; - u64 rx_512_to_1023_pkts; - u64 rx_1024_to_1518_pkts; - u64 rx_1519_to_max_pkts; - u64 rx_len_err_pkts; - /* Receive Mac Err stats */ - u64 rx_code_err; - u64 rx_oversize_err; - u64 rx_undersize_err; - u64 rx_preamble_err; - u64 rx_frame_len_err; - u64 rx_crc_err; - u64 rx_err_count; - /* - * These stats come from offset 500h to 5C8h - * in the XGMAC register. - */ - u64 tx_cbfc_pause_frames0; - u64 tx_cbfc_pause_frames1; - u64 tx_cbfc_pause_frames2; - u64 tx_cbfc_pause_frames3; - u64 tx_cbfc_pause_frames4; - u64 tx_cbfc_pause_frames5; - u64 tx_cbfc_pause_frames6; - u64 tx_cbfc_pause_frames7; - u64 rx_cbfc_pause_frames0; - u64 rx_cbfc_pause_frames1; - u64 rx_cbfc_pause_frames2; - u64 rx_cbfc_pause_frames3; - u64 rx_cbfc_pause_frames4; - u64 rx_cbfc_pause_frames5; - u64 rx_cbfc_pause_frames6; - u64 rx_cbfc_pause_frames7; - u64 rx_nic_fifo_drop; -}; - -/* Firmware coredump internal register address/length pairs. */ -enum { - MPI_CORE_REGS_ADDR = 0x00030000, - MPI_CORE_REGS_CNT = 127, - MPI_CORE_SH_REGS_CNT = 16, - TEST_REGS_ADDR = 0x00001000, - TEST_REGS_CNT = 23, - RMII_REGS_ADDR = 0x00001040, - RMII_REGS_CNT = 64, - FCMAC1_REGS_ADDR = 0x00001080, - FCMAC2_REGS_ADDR = 0x000010c0, - FCMAC_REGS_CNT = 64, - FC1_MBX_REGS_ADDR = 0x00001100, - FC2_MBX_REGS_ADDR = 0x00001240, - FC_MBX_REGS_CNT = 64, - IDE_REGS_ADDR = 0x00001140, - IDE_REGS_CNT = 64, - NIC1_MBX_REGS_ADDR = 0x00001180, - NIC2_MBX_REGS_ADDR = 0x00001280, - NIC_MBX_REGS_CNT = 64, - SMBUS_REGS_ADDR = 0x00001200, - SMBUS_REGS_CNT = 64, - I2C_REGS_ADDR = 0x00001fc0, - I2C_REGS_CNT = 64, - MEMC_REGS_ADDR = 0x00003000, - MEMC_REGS_CNT = 256, - PBUS_REGS_ADDR = 0x00007c00, - PBUS_REGS_CNT = 256, - MDE_REGS_ADDR = 0x00010000, - MDE_REGS_CNT = 6, - CODE_RAM_ADDR = 0x00020000, - CODE_RAM_CNT = 0x2000, - MEMC_RAM_ADDR = 0x00100000, - MEMC_RAM_CNT = 0x2000, -}; - -#define MPI_COREDUMP_COOKIE 0x5555aaaa -struct mpi_coredump_global_header { - u32 cookie; - u8 idString[16]; - u32 timeLo; - u32 timeHi; - u32 imageSize; - u32 headerSize; - u8 info[220]; -}; - -struct mpi_coredump_segment_header { - u32 cookie; - u32 segNum; - u32 segSize; - u32 extra; - u8 description[16]; -}; - -/* Firmware coredump header segment numbers. */ -enum { - CORE_SEG_NUM = 1, - TEST_LOGIC_SEG_NUM = 2, - RMII_SEG_NUM = 3, - FCMAC1_SEG_NUM = 4, - FCMAC2_SEG_NUM = 5, - FC1_MBOX_SEG_NUM = 6, - IDE_SEG_NUM = 7, - NIC1_MBOX_SEG_NUM = 8, - SMBUS_SEG_NUM = 9, - FC2_MBOX_SEG_NUM = 10, - NIC2_MBOX_SEG_NUM = 11, - I2C_SEG_NUM = 12, - MEMC_SEG_NUM = 13, - PBUS_SEG_NUM = 14, - MDE_SEG_NUM = 15, - NIC1_CONTROL_SEG_NUM = 16, - NIC2_CONTROL_SEG_NUM = 17, - NIC1_XGMAC_SEG_NUM = 18, - NIC2_XGMAC_SEG_NUM = 19, - WCS_RAM_SEG_NUM = 20, - MEMC_RAM_SEG_NUM = 21, - XAUI_AN_SEG_NUM = 22, - XAUI_HSS_PCS_SEG_NUM = 23, - XFI_AN_SEG_NUM = 24, - XFI_TRAIN_SEG_NUM = 25, - XFI_HSS_PCS_SEG_NUM = 26, - XFI_HSS_TX_SEG_NUM = 27, - XFI_HSS_RX_SEG_NUM = 28, - XFI_HSS_PLL_SEG_NUM = 29, - MISC_NIC_INFO_SEG_NUM = 30, - INTR_STATES_SEG_NUM = 31, - CAM_ENTRIES_SEG_NUM = 32, - ROUTING_WORDS_SEG_NUM = 33, - ETS_SEG_NUM = 34, - PROBE_DUMP_SEG_NUM = 35, - ROUTING_INDEX_SEG_NUM = 36, - MAC_PROTOCOL_SEG_NUM = 37, - XAUI2_AN_SEG_NUM = 38, - XAUI2_HSS_PCS_SEG_NUM = 39, - XFI2_AN_SEG_NUM = 40, - XFI2_TRAIN_SEG_NUM = 41, - XFI2_HSS_PCS_SEG_NUM = 42, - XFI2_HSS_TX_SEG_NUM = 43, - XFI2_HSS_RX_SEG_NUM = 44, - XFI2_HSS_PLL_SEG_NUM = 45, - SEM_REGS_SEG_NUM = 50 - -}; - -/* There are 64 generic NIC registers. */ -#define NIC_REGS_DUMP_WORD_COUNT 64 -/* XGMAC word count. */ -#define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4) -/* Word counts for the SERDES blocks. */ -#define XG_SERDES_XAUI_AN_COUNT 14 -#define XG_SERDES_XAUI_HSS_PCS_COUNT 33 -#define XG_SERDES_XFI_AN_COUNT 14 -#define XG_SERDES_XFI_TRAIN_COUNT 12 -#define XG_SERDES_XFI_HSS_PCS_COUNT 15 -#define XG_SERDES_XFI_HSS_TX_COUNT 32 -#define XG_SERDES_XFI_HSS_RX_COUNT 32 -#define XG_SERDES_XFI_HSS_PLL_COUNT 32 - -/* There are 2 CNA ETS and 8 NIC ETS registers. */ -#define ETS_REGS_DUMP_WORD_COUNT 10 - -/* Each probe mux entry stores the probe type plus 64 entries - * that are each each 64-bits in length. There are a total of - * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes. - */ -#define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2)) -#define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \ - PRB_MX_ADDR_VALID_TOTAL) -/* Each routing entry consists of 4 32-bit words. - * They are route type, index, index word, and result. - * There are 2 route blocks with 8 entries each and - * 2 NIC blocks with 16 entries each. - * The totol entries is 48 with 4 words each. - */ -#define RT_IDX_DUMP_ENTRIES 48 -#define RT_IDX_DUMP_WORDS_PER_ENTRY 4 -#define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \ - RT_IDX_DUMP_WORDS_PER_ENTRY) -/* There are 10 address blocks in filter, each with - * different entry counts and different word-count-per-entry. - */ -#define MAC_ADDR_DUMP_ENTRIES \ - ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \ - (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \ - (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \ - (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \ - (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \ - (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \ - (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \ - (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \ - (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \ - (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT)) -#define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2 -#define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \ - MAC_ADDR_DUMP_WORDS_PER_ENTRY) -/* Maximum of 4 functions whose semaphore registeres are - * in the coredump. - */ -#define MAX_SEMAPHORE_FUNCTIONS 4 -/* Defines for access the MPI shadow registers. */ -#define RISC_124 0x0003007c -#define RISC_127 0x0003007f -#define SHADOW_OFFSET 0xb0000000 -#define SHADOW_REG_SHIFT 20 - -struct ql_nic_misc { - u32 rx_ring_count; - u32 tx_ring_count; - u32 intr_count; - u32 function; -}; - -struct ql_reg_dump { - - /* segment 0 */ - struct mpi_coredump_global_header mpi_global_header; - - /* segment 16 */ - struct mpi_coredump_segment_header nic_regs_seg_hdr; - u32 nic_regs[64]; - - /* segment 30 */ - struct mpi_coredump_segment_header misc_nic_seg_hdr; - struct ql_nic_misc misc_nic_info; - - /* segment 31 */ - /* one interrupt state for each CQ */ - struct mpi_coredump_segment_header intr_states_seg_hdr; - u32 intr_states[MAX_CPUS]; - - /* segment 32 */ - /* 3 cam words each for 16 unicast, - * 2 cam words for each of 32 multicast. - */ - struct mpi_coredump_segment_header cam_entries_seg_hdr; - u32 cam_entries[(16 * 3) + (32 * 3)]; - - /* segment 33 */ - struct mpi_coredump_segment_header nic_routing_words_seg_hdr; - u32 nic_routing_words[16]; - - /* segment 34 */ - struct mpi_coredump_segment_header ets_seg_hdr; - u32 ets[8+2]; -}; - -struct ql_mpi_coredump { - /* segment 0 */ - struct mpi_coredump_global_header mpi_global_header; - - /* segment 1 */ - struct mpi_coredump_segment_header core_regs_seg_hdr; - u32 mpi_core_regs[MPI_CORE_REGS_CNT]; - u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT]; - - /* segment 2 */ - struct mpi_coredump_segment_header test_logic_regs_seg_hdr; - u32 test_logic_regs[TEST_REGS_CNT]; - - /* segment 3 */ - struct mpi_coredump_segment_header rmii_regs_seg_hdr; - u32 rmii_regs[RMII_REGS_CNT]; - - /* segment 4 */ - struct mpi_coredump_segment_header fcmac1_regs_seg_hdr; - u32 fcmac1_regs[FCMAC_REGS_CNT]; - - /* segment 5 */ - struct mpi_coredump_segment_header fcmac2_regs_seg_hdr; - u32 fcmac2_regs[FCMAC_REGS_CNT]; - - /* segment 6 */ - struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr; - u32 fc1_mbx_regs[FC_MBX_REGS_CNT]; - - /* segment 7 */ - struct mpi_coredump_segment_header ide_regs_seg_hdr; - u32 ide_regs[IDE_REGS_CNT]; - - /* segment 8 */ - struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr; - u32 nic1_mbx_regs[NIC_MBX_REGS_CNT]; - - /* segment 9 */ - struct mpi_coredump_segment_header smbus_regs_seg_hdr; - u32 smbus_regs[SMBUS_REGS_CNT]; - - /* segment 10 */ - struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr; - u32 fc2_mbx_regs[FC_MBX_REGS_CNT]; - - /* segment 11 */ - struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr; - u32 nic2_mbx_regs[NIC_MBX_REGS_CNT]; - - /* segment 12 */ - struct mpi_coredump_segment_header i2c_regs_seg_hdr; - u32 i2c_regs[I2C_REGS_CNT]; - /* segment 13 */ - struct mpi_coredump_segment_header memc_regs_seg_hdr; - u32 memc_regs[MEMC_REGS_CNT]; - - /* segment 14 */ - struct mpi_coredump_segment_header pbus_regs_seg_hdr; - u32 pbus_regs[PBUS_REGS_CNT]; - - /* segment 15 */ - struct mpi_coredump_segment_header mde_regs_seg_hdr; - u32 mde_regs[MDE_REGS_CNT]; - - /* segment 16 */ - struct mpi_coredump_segment_header nic_regs_seg_hdr; - u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT]; - - /* segment 17 */ - struct mpi_coredump_segment_header nic2_regs_seg_hdr; - u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT]; - - /* segment 18 */ - struct mpi_coredump_segment_header xgmac1_seg_hdr; - u32 xgmac1[XGMAC_DUMP_WORD_COUNT]; - - /* segment 19 */ - struct mpi_coredump_segment_header xgmac2_seg_hdr; - u32 xgmac2[XGMAC_DUMP_WORD_COUNT]; - - /* segment 20 */ - struct mpi_coredump_segment_header code_ram_seg_hdr; - u32 code_ram[CODE_RAM_CNT]; - - /* segment 21 */ - struct mpi_coredump_segment_header memc_ram_seg_hdr; - u32 memc_ram[MEMC_RAM_CNT]; - - /* segment 22 */ - struct mpi_coredump_segment_header xaui_an_hdr; - u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT]; - - /* segment 23 */ - struct mpi_coredump_segment_header xaui_hss_pcs_hdr; - u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT]; - - /* segment 24 */ - struct mpi_coredump_segment_header xfi_an_hdr; - u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT]; - - /* segment 25 */ - struct mpi_coredump_segment_header xfi_train_hdr; - u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT]; - - /* segment 26 */ - struct mpi_coredump_segment_header xfi_hss_pcs_hdr; - u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT]; - - /* segment 27 */ - struct mpi_coredump_segment_header xfi_hss_tx_hdr; - u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT]; - - /* segment 28 */ - struct mpi_coredump_segment_header xfi_hss_rx_hdr; - u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT]; - - /* segment 29 */ - struct mpi_coredump_segment_header xfi_hss_pll_hdr; - u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT]; - - /* segment 30 */ - struct mpi_coredump_segment_header misc_nic_seg_hdr; - struct ql_nic_misc misc_nic_info; - - /* segment 31 */ - /* one interrupt state for each CQ */ - struct mpi_coredump_segment_header intr_states_seg_hdr; - u32 intr_states[MAX_RX_RINGS]; - - /* segment 32 */ - /* 3 cam words each for 16 unicast, - * 2 cam words for each of 32 multicast. - */ - struct mpi_coredump_segment_header cam_entries_seg_hdr; - u32 cam_entries[(16 * 3) + (32 * 3)]; - - /* segment 33 */ - struct mpi_coredump_segment_header nic_routing_words_seg_hdr; - u32 nic_routing_words[16]; - /* segment 34 */ - struct mpi_coredump_segment_header ets_seg_hdr; - u32 ets[ETS_REGS_DUMP_WORD_COUNT]; - - /* segment 35 */ - struct mpi_coredump_segment_header probe_dump_seg_hdr; - u32 probe_dump[PRB_MX_DUMP_TOT_COUNT]; - - /* segment 36 */ - struct mpi_coredump_segment_header routing_reg_seg_hdr; - u32 routing_regs[RT_IDX_DUMP_TOT_WORDS]; - - /* segment 37 */ - struct mpi_coredump_segment_header mac_prot_reg_seg_hdr; - u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS]; - - /* segment 38 */ - struct mpi_coredump_segment_header xaui2_an_hdr; - u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT]; - - /* segment 39 */ - struct mpi_coredump_segment_header xaui2_hss_pcs_hdr; - u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT]; - - /* segment 40 */ - struct mpi_coredump_segment_header xfi2_an_hdr; - u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT]; - - /* segment 41 */ - struct mpi_coredump_segment_header xfi2_train_hdr; - u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT]; - - /* segment 42 */ - struct mpi_coredump_segment_header xfi2_hss_pcs_hdr; - u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT]; - - /* segment 43 */ - struct mpi_coredump_segment_header xfi2_hss_tx_hdr; - u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT]; - - /* segment 44 */ - struct mpi_coredump_segment_header xfi2_hss_rx_hdr; - u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT]; - - /* segment 45 */ - struct mpi_coredump_segment_header xfi2_hss_pll_hdr; - u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT]; - - /* segment 50 */ - /* semaphore register for all 5 functions */ - struct mpi_coredump_segment_header sem_regs_seg_hdr; - u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS]; -}; - -/* - * intr_context structure is used during initialization - * to hook the interrupts. It is also used in a single - * irq environment as a context to the ISR. - */ -struct intr_context { - struct ql_adapter *qdev; - u32 intr; - u32 irq_mask; /* Mask of which rings the vector services. */ - u32 hooked; - u32 intr_en_mask; /* value/mask used to enable this intr */ - u32 intr_dis_mask; /* value/mask used to disable this intr */ - u32 intr_read_mask; /* value/mask used to read this intr */ - char name[IFNAMSIZ * 2]; - atomic_t irq_cnt; /* irq_cnt is used in single vector - * environment. It's incremented for each - * irq handler that is scheduled. When each - * handler finishes it decrements irq_cnt and - * enables interrupts if it's zero. */ - irq_handler_t handler; -}; - -/* adapter flags definitions. */ -enum { - QL_ADAPTER_UP = 0, /* Adapter has been brought up. */ - QL_LEGACY_ENABLED = 1, - QL_MSI_ENABLED = 2, - QL_MSIX_ENABLED = 3, - QL_DMA64 = 4, - QL_PROMISCUOUS = 5, - QL_ALLMULTI = 6, - QL_PORT_CFG = 7, - QL_CAM_RT_SET = 8, - QL_SELFTEST = 9, - QL_LB_LINK_UP = 10, - QL_FRC_COREDUMP = 11, - QL_EEH_FATAL = 12, - QL_ASIC_RECOVERY = 14, /* We are in ascic recovery. */ -}; - -/* link_status bit definitions */ -enum { - STS_LOOPBACK_MASK = 0x00000700, - STS_LOOPBACK_PCS = 0x00000100, - STS_LOOPBACK_HSS = 0x00000200, - STS_LOOPBACK_EXT = 0x00000300, - STS_PAUSE_MASK = 0x000000c0, - STS_PAUSE_STD = 0x00000040, - STS_PAUSE_PRI = 0x00000080, - STS_SPEED_MASK = 0x00000038, - STS_SPEED_100Mb = 0x00000000, - STS_SPEED_1Gb = 0x00000008, - STS_SPEED_10Gb = 0x00000010, - STS_LINK_TYPE_MASK = 0x00000007, - STS_LINK_TYPE_XFI = 0x00000001, - STS_LINK_TYPE_XAUI = 0x00000002, - STS_LINK_TYPE_XFI_BP = 0x00000003, - STS_LINK_TYPE_XAUI_BP = 0x00000004, - STS_LINK_TYPE_10GBASET = 0x00000005, -}; - -/* link_config bit definitions */ -enum { - CFG_JUMBO_FRAME_SIZE = 0x00010000, - CFG_PAUSE_MASK = 0x00000060, - CFG_PAUSE_STD = 0x00000020, - CFG_PAUSE_PRI = 0x00000040, - CFG_DCBX = 0x00000010, - CFG_LOOPBACK_MASK = 0x00000007, - CFG_LOOPBACK_PCS = 0x00000002, - CFG_LOOPBACK_HSS = 0x00000004, - CFG_LOOPBACK_EXT = 0x00000006, - CFG_DEFAULT_MAX_FRAME_SIZE = 0x00002580, -}; - -struct nic_operations { - - int (*get_flash) (struct ql_adapter *); - int (*port_initialize) (struct ql_adapter *); -}; - -/* - * The main Adapter structure definition. - * This structure has all fields relevant to the hardware. - */ -struct ql_adapter { - struct ricb ricb; - unsigned long flags; - u32 wol; - - struct nic_stats nic_stats; - - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - - /* PCI Configuration information for this device */ - struct pci_dev *pdev; - struct net_device *ndev; /* Parent NET device */ - - /* Hardware information */ - u32 chip_rev_id; - u32 fw_rev_id; - u32 func; /* PCI function for this adapter */ - u32 alt_func; /* PCI function for alternate adapter */ - u32 port; /* Port number this adapter */ - - spinlock_t adapter_lock; - spinlock_t hw_lock; - spinlock_t stats_lock; - - /* PCI Bus Relative Register Addresses */ - void __iomem *reg_base; - void __iomem *doorbell_area; - u32 doorbell_area_size; - - u32 msg_enable; - - /* Page for Shadow Registers */ - void *rx_ring_shadow_reg_area; - dma_addr_t rx_ring_shadow_reg_dma; - void *tx_ring_shadow_reg_area; - dma_addr_t tx_ring_shadow_reg_dma; - - u32 mailbox_in; - u32 mailbox_out; - struct mbox_params idc_mbc; - struct mutex mpi_mutex; - - int tx_ring_size; - int rx_ring_size; - u32 intr_count; - struct msix_entry *msi_x_entry; - struct intr_context intr_context[MAX_RX_RINGS]; - - int tx_ring_count; /* One per online CPU. */ - u32 rss_ring_count; /* One per irq vector. */ - /* - * rx_ring_count = - * (CPU count * outbound completion rx_ring) + - * (irq_vector_cnt * inbound (RSS) completion rx_ring) - */ - int rx_ring_count; - int ring_mem_size; - void *ring_mem; - - struct rx_ring rx_ring[MAX_RX_RINGS]; - struct tx_ring tx_ring[MAX_TX_RINGS]; - unsigned int lbq_buf_order; - - int rx_csum; - u32 default_rx_queue; - - u16 rx_coalesce_usecs; /* cqicb->int_delay */ - u16 rx_max_coalesced_frames; /* cqicb->pkt_int_delay */ - u16 tx_coalesce_usecs; /* cqicb->int_delay */ - u16 tx_max_coalesced_frames; /* cqicb->pkt_int_delay */ - - u32 xg_sem_mask; - u32 port_link_up; - u32 port_init; - u32 link_status; - struct ql_mpi_coredump *mpi_coredump; - u32 core_is_dumped; - u32 link_config; - u32 led_config; - u32 max_frame_size; - - union flash_params flash; - - struct workqueue_struct *workqueue; - struct delayed_work asic_reset_work; - struct delayed_work mpi_reset_work; - struct delayed_work mpi_work; - struct delayed_work mpi_port_cfg_work; - struct delayed_work mpi_idc_work; - struct delayed_work mpi_core_to_log; - struct completion ide_completion; - const struct nic_operations *nic_ops; - u16 device_id; - struct timer_list timer; - atomic_t lb_count; - /* Keep local copy of current mac address. */ - char current_mac_addr[ETH_ALEN]; -}; - -/* - * Typical Register accessor for memory mapped device. - */ -static inline u32 ql_read32(const struct ql_adapter *qdev, int reg) -{ - return readl(qdev->reg_base + reg); -} - -/* - * Typical Register accessor for memory mapped device. - */ -static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val) -{ - writel(val, qdev->reg_base + reg); -} - -/* - * Doorbell Registers: - * Doorbell registers are virtual registers in the PCI memory space. - * The space is allocated by the chip during PCI initialization. The - * device driver finds the doorbell address in BAR 3 in PCI config space. - * The registers are used to control outbound and inbound queues. For - * example, the producer index for an outbound queue. Each queue uses - * 1 4k chunk of memory. The lower half of the space is for outbound - * queues. The upper half is for inbound queues. - */ -static inline void ql_write_db_reg(u32 val, void __iomem *addr) -{ - writel(val, addr); -} - -/* - * Doorbell Registers: - * Doorbell registers are virtual registers in the PCI memory space. - * The space is allocated by the chip during PCI initialization. The - * device driver finds the doorbell address in BAR 3 in PCI config space. - * The registers are used to control outbound and inbound queues. For - * example, the producer index for an outbound queue. Each queue uses - * 1 4k chunk of memory. The lower half of the space is for outbound - * queues. The upper half is for inbound queues. - * Caller has to guarantee ordering. - */ -static inline void ql_write_db_reg_relaxed(u32 val, void __iomem *addr) -{ - writel_relaxed(val, addr); -} - -/* - * Shadow Registers: - * Outbound queues have a consumer index that is maintained by the chip. - * Inbound queues have a producer index that is maintained by the chip. - * For lower overhead, these registers are "shadowed" to host memory - * which allows the device driver to track the queue progress without - * PCI reads. When an entry is placed on an inbound queue, the chip will - * update the relevant index register and then copy the value to the - * shadow register in host memory. - */ -static inline u32 ql_read_sh_reg(__le32 *addr) -{ - u32 reg; - reg = le32_to_cpu(*addr); - rmb(); - return reg; -} - -extern char qlge_driver_name[]; -extern const char qlge_driver_version[]; -extern const struct ethtool_ops qlge_ethtool_ops; - -int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask); -void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask); -int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data); -int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, - u32 *value); -int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value); -int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, - u16 q_id); -void ql_queue_fw_error(struct ql_adapter *qdev); -void ql_mpi_work(struct work_struct *work); -void ql_mpi_reset_work(struct work_struct *work); -void ql_mpi_core_to_log(struct work_struct *work); -int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit); -void ql_queue_asic_error(struct ql_adapter *qdev); -u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); -void ql_set_ethtool_ops(struct net_device *ndev); -int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data); -void ql_mpi_idc_work(struct work_struct *work); -void ql_mpi_port_cfg_work(struct work_struct *work); -int ql_mb_get_fw_state(struct ql_adapter *qdev); -int ql_cam_route_initialize(struct ql_adapter *qdev); -int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data); -int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data); -int ql_unpause_mpi_risc(struct ql_adapter *qdev); -int ql_pause_mpi_risc(struct ql_adapter *qdev); -int ql_hard_reset_mpi_risc(struct ql_adapter *qdev); -int ql_soft_reset_mpi_risc(struct ql_adapter *qdev); -int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr, - int word_count); -int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump); -int ql_mb_about_fw(struct ql_adapter *qdev); -int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol); -int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol); -int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config); -int ql_mb_get_led_cfg(struct ql_adapter *qdev); -void ql_link_on(struct ql_adapter *qdev); -void ql_link_off(struct ql_adapter *qdev); -int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control); -int ql_mb_get_port_cfg(struct ql_adapter *qdev); -int ql_mb_set_port_cfg(struct ql_adapter *qdev); -int ql_wait_fifo_empty(struct ql_adapter *qdev); -void ql_get_dump(struct ql_adapter *qdev, void *buff); -netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev); -void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *); -int ql_own_firmware(struct ql_adapter *qdev); -int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget); - -/* #define QL_ALL_DUMP */ -/* #define QL_REG_DUMP */ -/* #define QL_DEV_DUMP */ -/* #define QL_CB_DUMP */ -/* #define QL_IB_DUMP */ -/* #define QL_OB_DUMP */ - -#ifdef QL_REG_DUMP -void ql_dump_xgmac_control_regs(struct ql_adapter *qdev); -void ql_dump_routing_entries(struct ql_adapter *qdev); -void ql_dump_regs(struct ql_adapter *qdev); -#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev) -#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev) -#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev) -#else -#define QL_DUMP_REGS(qdev) -#define QL_DUMP_ROUTE(qdev) -#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) -#endif - -#ifdef QL_STAT_DUMP -void ql_dump_stat(struct ql_adapter *qdev); -#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev) -#else -#define QL_DUMP_STAT(qdev) -#endif - -#ifdef QL_DEV_DUMP -void ql_dump_qdev(struct ql_adapter *qdev); -#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev) -#else -#define QL_DUMP_QDEV(qdev) -#endif - -#ifdef QL_CB_DUMP -void ql_dump_wqicb(struct wqicb *wqicb); -void ql_dump_tx_ring(struct tx_ring *tx_ring); -void ql_dump_ricb(struct ricb *ricb); -void ql_dump_cqicb(struct cqicb *cqicb); -void ql_dump_rx_ring(struct rx_ring *rx_ring); -void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id); -#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb) -#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb) -#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring) -#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb) -#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring) -#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \ - ql_dump_hw_cb(qdev, size, bit, q_id) -#else -#define QL_DUMP_RICB(ricb) -#define QL_DUMP_WQICB(wqicb) -#define QL_DUMP_TX_RING(tx_ring) -#define QL_DUMP_CQICB(cqicb) -#define QL_DUMP_RX_RING(rx_ring) -#define QL_DUMP_HW_CB(qdev, size, bit, q_id) -#endif - -#ifdef QL_OB_DUMP -void ql_dump_tx_desc(struct tx_buf_desc *tbd); -void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb); -void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp); -#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb) -#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp) -#else -#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) -#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) -#endif - -#ifdef QL_IB_DUMP -void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp); -#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp) -#else -#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) -#endif - -#ifdef QL_ALL_DUMP -void ql_dump_all(struct ql_adapter *qdev); -#define QL_DUMP_ALL(qdev) ql_dump_all(qdev) -#else -#define QL_DUMP_ALL(qdev) -#endif - -#endif /* _QLGE_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c deleted file mode 100644 index 31389ab8bdf7..000000000000 --- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c +++ /dev/null @@ -1,2024 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/slab.h> - -#include "qlge.h" - -/* Read a NIC register from the alternate function. */ -static u32 ql_read_other_func_reg(struct ql_adapter *qdev, - u32 reg) -{ - u32 register_to_read; - u32 reg_val; - unsigned int status = 0; - - register_to_read = MPI_NIC_REG_BLOCK - | MPI_NIC_READ - | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) - | reg; - status = ql_read_mpi_reg(qdev, register_to_read, ®_val); - if (status != 0) - return 0xffffffff; - - return reg_val; -} - -/* Write a NIC register from the alternate function. */ -static int ql_write_other_func_reg(struct ql_adapter *qdev, - u32 reg, u32 reg_val) -{ - u32 register_to_read; - int status = 0; - - register_to_read = MPI_NIC_REG_BLOCK - | MPI_NIC_READ - | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) - | reg; - status = ql_write_mpi_reg(qdev, register_to_read, reg_val); - - return status; -} - -static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg, - u32 bit, u32 err_bit) -{ - u32 temp; - int count = 10; - - while (count) { - temp = ql_read_other_func_reg(qdev, reg); - - /* check for errors */ - if (temp & err_bit) - return -1; - else if (temp & bit) - return 0; - mdelay(10); - count--; - } - return -1; -} - -static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg, - u32 *data) -{ - int status; - - /* wait for reg to come ready */ - status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, - XG_SERDES_ADDR_RDY, 0); - if (status) - goto exit; - - /* set up for reg read */ - ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R); - - /* wait for reg to come ready */ - status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, - XG_SERDES_ADDR_RDY, 0); - if (status) - goto exit; - - /* get the data */ - *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4)); -exit: - return status; -} - -/* Read out the SERDES registers */ -static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data) -{ - int status; - - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0); - if (status) - goto exit; - - /* set up for reg read */ - ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R); - - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0); - if (status) - goto exit; - - /* get the data */ - *data = ql_read32(qdev, XG_SERDES_DATA); -exit: - return status; -} - -static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr, - u32 *direct_ptr, u32 *indirect_ptr, - unsigned int direct_valid, unsigned int indirect_valid) -{ - unsigned int status; - - status = 1; - if (direct_valid) - status = ql_read_serdes_reg(qdev, addr, direct_ptr); - /* Dead fill any failures or invalids. */ - if (status) - *direct_ptr = 0xDEADBEEF; - - status = 1; - if (indirect_valid) - status = ql_read_other_func_serdes_reg( - qdev, addr, indirect_ptr); - /* Dead fill any failures or invalids. */ - if (status) - *indirect_ptr = 0xDEADBEEF; -} - -static int ql_get_serdes_regs(struct ql_adapter *qdev, - struct ql_mpi_coredump *mpi_coredump) -{ - int status; - unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid; - unsigned int xaui_indirect_valid, i; - u32 *direct_ptr, temp; - u32 *indirect_ptr; - - xfi_direct_valid = xfi_indirect_valid = 0; - xaui_direct_valid = xaui_indirect_valid = 1; - - /* The XAUI needs to be read out per port */ - status = ql_read_other_func_serdes_reg(qdev, - XG_SERDES_XAUI_HSS_PCS_START, &temp); - if (status) - temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; - - if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == - XG_SERDES_ADDR_XAUI_PWR_DOWN) - xaui_indirect_valid = 0; - - status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp); - - if (status) - temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; - - if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == - XG_SERDES_ADDR_XAUI_PWR_DOWN) - xaui_direct_valid = 0; - - /* - * XFI register is shared so only need to read one - * functions and then check the bits. - */ - status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp); - if (status) - temp = 0; - - if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) == - XG_SERDES_ADDR_XFI1_PWR_UP) { - /* now see if i'm NIC 1 or NIC 2 */ - if (qdev->func & 1) - /* I'm NIC 2, so the indirect (NIC1) xfi is up. */ - xfi_indirect_valid = 1; - else - xfi_direct_valid = 1; - } - if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) == - XG_SERDES_ADDR_XFI2_PWR_UP) { - /* now see if i'm NIC 1 or NIC 2 */ - if (qdev->func & 1) - /* I'm NIC 2, so the indirect (NIC1) xfi is up. */ - xfi_direct_valid = 1; - else - xfi_indirect_valid = 1; - } - - /* Get XAUI_AN register block. */ - if (qdev->func & 1) { - /* Function 2 is direct */ - direct_ptr = mpi_coredump->serdes2_xaui_an; - indirect_ptr = mpi_coredump->serdes_xaui_an; - } else { - /* Function 1 is direct */ - direct_ptr = mpi_coredump->serdes_xaui_an; - indirect_ptr = mpi_coredump->serdes2_xaui_an; - } - - for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xaui_direct_valid, xaui_indirect_valid); - - /* Get XAUI_HSS_PCS register block. */ - if (qdev->func & 1) { - direct_ptr = - mpi_coredump->serdes2_xaui_hss_pcs; - indirect_ptr = - mpi_coredump->serdes_xaui_hss_pcs; - } else { - direct_ptr = - mpi_coredump->serdes_xaui_hss_pcs; - indirect_ptr = - mpi_coredump->serdes2_xaui_hss_pcs; - } - - for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xaui_direct_valid, xaui_indirect_valid); - - /* Get XAUI_XFI_AN register block. */ - if (qdev->func & 1) { - direct_ptr = mpi_coredump->serdes2_xfi_an; - indirect_ptr = mpi_coredump->serdes_xfi_an; - } else { - direct_ptr = mpi_coredump->serdes_xfi_an; - indirect_ptr = mpi_coredump->serdes2_xfi_an; - } - - for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); - - /* Get XAUI_XFI_TRAIN register block. */ - if (qdev->func & 1) { - direct_ptr = mpi_coredump->serdes2_xfi_train; - indirect_ptr = - mpi_coredump->serdes_xfi_train; - } else { - direct_ptr = mpi_coredump->serdes_xfi_train; - indirect_ptr = - mpi_coredump->serdes2_xfi_train; - } - - for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); - - /* Get XAUI_XFI_HSS_PCS register block. */ - if (qdev->func & 1) { - direct_ptr = - mpi_coredump->serdes2_xfi_hss_pcs; - indirect_ptr = - mpi_coredump->serdes_xfi_hss_pcs; - } else { - direct_ptr = - mpi_coredump->serdes_xfi_hss_pcs; - indirect_ptr = - mpi_coredump->serdes2_xfi_hss_pcs; - } - - for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); - - /* Get XAUI_XFI_HSS_TX register block. */ - if (qdev->func & 1) { - direct_ptr = - mpi_coredump->serdes2_xfi_hss_tx; - indirect_ptr = - mpi_coredump->serdes_xfi_hss_tx; - } else { - direct_ptr = mpi_coredump->serdes_xfi_hss_tx; - indirect_ptr = - mpi_coredump->serdes2_xfi_hss_tx; - } - for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); - - /* Get XAUI_XFI_HSS_RX register block. */ - if (qdev->func & 1) { - direct_ptr = - mpi_coredump->serdes2_xfi_hss_rx; - indirect_ptr = - mpi_coredump->serdes_xfi_hss_rx; - } else { - direct_ptr = mpi_coredump->serdes_xfi_hss_rx; - indirect_ptr = - mpi_coredump->serdes2_xfi_hss_rx; - } - - for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); - - - /* Get XAUI_XFI_HSS_PLL register block. */ - if (qdev->func & 1) { - direct_ptr = - mpi_coredump->serdes2_xfi_hss_pll; - indirect_ptr = - mpi_coredump->serdes_xfi_hss_pll; - } else { - direct_ptr = - mpi_coredump->serdes_xfi_hss_pll; - indirect_ptr = - mpi_coredump->serdes2_xfi_hss_pll; - } - for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++) - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, - xfi_direct_valid, xfi_indirect_valid); - return 0; -} - -static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg, - u32 *data) -{ - int status = 0; - - /* wait for reg to come ready */ - status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, - XGMAC_ADDR_RDY, XGMAC_ADDR_XME); - if (status) - goto exit; - - /* set up for reg read */ - ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R); - - /* wait for reg to come ready */ - status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, - XGMAC_ADDR_RDY, XGMAC_ADDR_XME); - if (status) - goto exit; - - /* get the data */ - *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4); -exit: - return status; -} - -/* Read the 400 xgmac control/statistics registers - * skipping unused locations. - */ -static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf, - unsigned int other_function) -{ - int status = 0; - int i; - - for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) { - /* We're reading 400 xgmac registers, but we filter out - * serveral locations that are non-responsive to reads. - */ - if ((i == 0x00000114) || - (i == 0x00000118) || - (i == 0x0000013c) || - (i == 0x00000140) || - (i > 0x00000150 && i < 0x000001fc) || - (i > 0x00000278 && i < 0x000002a0) || - (i > 0x000002c0 && i < 0x000002cf) || - (i > 0x000002dc && i < 0x000002f0) || - (i > 0x000003c8 && i < 0x00000400) || - (i > 0x00000400 && i < 0x00000410) || - (i > 0x00000410 && i < 0x00000420) || - (i > 0x00000420 && i < 0x00000430) || - (i > 0x00000430 && i < 0x00000440) || - (i > 0x00000440 && i < 0x00000450) || - (i > 0x00000450 && i < 0x00000500) || - (i > 0x0000054c && i < 0x00000568) || - (i > 0x000005c8 && i < 0x00000600)) { - if (other_function) - status = - ql_read_other_func_xgmac_reg(qdev, i, buf); - else - status = ql_read_xgmac_reg(qdev, i, buf); - - if (status) - *buf = 0xdeadbeef; - break; - } - } - return status; -} - -static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf) -{ - int status = 0; - int i; - - for (i = 0; i < 8; i++, buf++) { - ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000); - *buf = ql_read32(qdev, NIC_ETS); - } - - for (i = 0; i < 2; i++, buf++) { - ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000); - *buf = ql_read32(qdev, CNA_ETS); - } - - return status; -} - -static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf) -{ - int i; - - for (i = 0; i < qdev->rx_ring_count; i++, buf++) { - ql_write32(qdev, INTR_EN, - qdev->intr_context[i].intr_read_mask); - *buf = ql_read32(qdev, INTR_EN); - } -} - -static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf) -{ - int i, status; - u32 value[3]; - - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - return status; - - for (i = 0; i < 16; i++) { - status = ql_get_mac_addr_reg(qdev, - MAC_ADDR_TYPE_CAM_MAC, i, value); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed read of mac index register\n"); - goto err; - } - *buf++ = value[0]; /* lower MAC address */ - *buf++ = value[1]; /* upper MAC address */ - *buf++ = value[2]; /* output */ - } - for (i = 0; i < 32; i++) { - status = ql_get_mac_addr_reg(qdev, - MAC_ADDR_TYPE_MULTI_MAC, i, value); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed read of mac index register\n"); - goto err; - } - *buf++ = value[0]; /* lower Mcast address */ - *buf++ = value[1]; /* upper Mcast address */ - } -err: - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); - return status; -} - -static int ql_get_routing_entries(struct ql_adapter *qdev, u32 *buf) -{ - int status; - u32 value, i; - - status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); - if (status) - return status; - - for (i = 0; i < 16; i++) { - status = ql_get_routing_reg(qdev, i, &value); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed read of routing index register\n"); - goto err; - } else { - *buf++ = value; - } - } -err: - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); - return status; -} - -/* Read the MPI Processor shadow registers */ -static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf) -{ - u32 i; - int status; - - for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) { - status = ql_write_mpi_reg(qdev, RISC_124, - (SHADOW_OFFSET | i << SHADOW_REG_SHIFT)); - if (status) - goto end; - status = ql_read_mpi_reg(qdev, RISC_127, buf); - if (status) - goto end; - } -end: - return status; -} - -/* Read the MPI Processor core registers */ -static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf, - u32 offset, u32 count) -{ - int i, status = 0; - for (i = 0; i < count; i++, buf++) { - status = ql_read_mpi_reg(qdev, offset + i, buf); - if (status) - return status; - } - return status; -} - -/* Read the ASIC probe dump */ -static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock, - u32 valid, u32 *buf) -{ - u32 module, mux_sel, probe, lo_val, hi_val; - - for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) { - if (!((valid >> module) & 1)) - continue; - for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) { - probe = clock - | PRB_MX_ADDR_ARE - | mux_sel - | (module << PRB_MX_ADDR_MOD_SEL_SHIFT); - ql_write32(qdev, PRB_MX_ADDR, probe); - lo_val = ql_read32(qdev, PRB_MX_DATA); - if (mux_sel == 0) { - *buf = probe; - buf++; - } - probe |= PRB_MX_ADDR_UP; - ql_write32(qdev, PRB_MX_ADDR, probe); - hi_val = ql_read32(qdev, PRB_MX_DATA); - *buf = lo_val; - buf++; - *buf = hi_val; - buf++; - } - } - return buf; -} - -static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf) -{ - /* First we have to enable the probe mux */ - ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN); - buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK, - PRB_MX_ADDR_VALID_SYS_MOD, buf); - buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK, - PRB_MX_ADDR_VALID_PCI_MOD, buf); - buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK, - PRB_MX_ADDR_VALID_XGM_MOD, buf); - buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK, - PRB_MX_ADDR_VALID_FC_MOD, buf); - return 0; - -} - -/* Read out the routing index registers */ -static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf) -{ - int status; - u32 type, index, index_max; - u32 result_index; - u32 result_data; - u32 val; - - status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); - if (status) - return status; - - for (type = 0; type < 4; type++) { - if (type < 2) - index_max = 8; - else - index_max = 16; - for (index = 0; index < index_max; index++) { - val = RT_IDX_RS - | (type << RT_IDX_TYPE_SHIFT) - | (index << RT_IDX_IDX_SHIFT); - ql_write32(qdev, RT_IDX, val); - result_index = 0; - while ((result_index & RT_IDX_MR) == 0) - result_index = ql_read32(qdev, RT_IDX); - result_data = ql_read32(qdev, RT_DATA); - *buf = type; - buf++; - *buf = index; - buf++; - *buf = result_index; - buf++; - *buf = result_data; - buf++; - } - } - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); - return status; -} - -/* Read out the MAC protocol registers */ -static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf) -{ - u32 result_index, result_data; - u32 type; - u32 index; - u32 offset; - u32 val; - u32 initial_val = MAC_ADDR_RS; - u32 max_index; - u32 max_offset; - - for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) { - switch (type) { - - case 0: /* CAM */ - initial_val |= MAC_ADDR_ADR; - max_index = MAC_ADDR_MAX_CAM_ENTRIES; - max_offset = MAC_ADDR_MAX_CAM_WCOUNT; - break; - case 1: /* Multicast MAC Address */ - max_index = MAC_ADDR_MAX_CAM_WCOUNT; - max_offset = MAC_ADDR_MAX_CAM_WCOUNT; - break; - case 2: /* VLAN filter mask */ - case 3: /* MC filter mask */ - max_index = MAC_ADDR_MAX_CAM_WCOUNT; - max_offset = MAC_ADDR_MAX_CAM_WCOUNT; - break; - case 4: /* FC MAC addresses */ - max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES; - max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT; - break; - case 5: /* Mgmt MAC addresses */ - max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES; - max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT; - break; - case 6: /* Mgmt VLAN addresses */ - max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES; - max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT; - break; - case 7: /* Mgmt IPv4 address */ - max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES; - max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT; - break; - case 8: /* Mgmt IPv6 address */ - max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES; - max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT; - break; - case 9: /* Mgmt TCP/UDP Dest port */ - max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES; - max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT; - break; - default: - pr_err("Bad type!!! 0x%08x\n", type); - max_index = 0; - max_offset = 0; - break; - } - for (index = 0; index < max_index; index++) { - for (offset = 0; offset < max_offset; offset++) { - val = initial_val - | (type << MAC_ADDR_TYPE_SHIFT) - | (index << MAC_ADDR_IDX_SHIFT) - | (offset); - ql_write32(qdev, MAC_ADDR_IDX, val); - result_index = 0; - while ((result_index & MAC_ADDR_MR) == 0) { - result_index = ql_read32(qdev, - MAC_ADDR_IDX); - } - result_data = ql_read32(qdev, MAC_ADDR_DATA); - *buf = result_index; - buf++; - *buf = result_data; - buf++; - } - } - } -} - -static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf) -{ - u32 func_num, reg, reg_val; - int status; - - for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) { - reg = MPI_NIC_REG_BLOCK - | (func_num << MPI_NIC_FUNCTION_SHIFT) - | (SEM / 4); - status = ql_read_mpi_reg(qdev, reg, ®_val); - *buf = reg_val; - /* if the read failed then dead fill the element. */ - if (!status) - *buf = 0xdeadbeef; - buf++; - } -} - -/* Create a coredump segment header */ -static void ql_build_coredump_seg_header( - struct mpi_coredump_segment_header *seg_hdr, - u32 seg_number, u32 seg_size, u8 *desc) -{ - memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header)); - seg_hdr->cookie = MPI_COREDUMP_COOKIE; - seg_hdr->segNum = seg_number; - seg_hdr->segSize = seg_size; - strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1); -} - -/* - * This function should be called when a coredump / probedump - * is to be extracted from the HBA. It is assumed there is a - * qdev structure that contains the base address of the register - * space for this function as well as a coredump structure that - * will contain the dump. - */ -int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) -{ - int status; - int i; - - if (!mpi_coredump) { - netif_err(qdev, drv, qdev->ndev, "No memory allocated\n"); - return -EINVAL; - } - - /* Try to get the spinlock, but dont worry if - * it isn't available. If the firmware died it - * might be holding the sem. - */ - ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); - - status = ql_pause_mpi_risc(qdev); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed RISC pause. Status = 0x%.08x\n", status); - goto err; - } - - /* Insert the global header */ - memset(&(mpi_coredump->mpi_global_header), 0, - sizeof(struct mpi_coredump_global_header)); - mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE; - mpi_coredump->mpi_global_header.headerSize = - sizeof(struct mpi_coredump_global_header); - mpi_coredump->mpi_global_header.imageSize = - sizeof(struct ql_mpi_coredump); - strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", - sizeof(mpi_coredump->mpi_global_header.idString)); - - /* Get generic NIC reg dump */ - ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, - NIC1_CONTROL_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->nic_regs), "NIC1 Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr, - NIC2_CONTROL_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->nic2_regs), "NIC2 Registers"); - - /* Get XGMac registers. (Segment 18, Rev C. step 21) */ - ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr, - NIC1_XGMAC_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr, - NIC2_XGMAC_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers"); - - if (qdev->func & 1) { - /* Odd means our function is NIC 2 */ - for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) - mpi_coredump->nic2_regs[i] = - ql_read32(qdev, i * sizeof(u32)); - - for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) - mpi_coredump->nic_regs[i] = - ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4); - - ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0); - ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1); - } else { - /* Even means our function is NIC 1 */ - for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) - mpi_coredump->nic_regs[i] = - ql_read32(qdev, i * sizeof(u32)); - for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) - mpi_coredump->nic2_regs[i] = - ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4); - - ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0); - ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1); - } - - /* Rev C. Step 20a */ - ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr, - XAUI_AN_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xaui_an), - "XAUI AN Registers"); - - /* Rev C. Step 20b */ - ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr, - XAUI_HSS_PCS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xaui_hss_pcs), - "XAUI HSS PCS Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xfi_an), - "XFI AN Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr, - XFI_TRAIN_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xfi_train), - "XFI TRAIN Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr, - XFI_HSS_PCS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xfi_hss_pcs), - "XFI HSS PCS Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr, - XFI_HSS_TX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xfi_hss_tx), - "XFI HSS TX Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr, - XFI_HSS_RX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xfi_hss_rx), - "XFI HSS RX Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr, - XFI_HSS_PLL_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes_xfi_hss_pll), - "XFI HSS PLL Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr, - XAUI2_AN_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xaui_an), - "XAUI2 AN Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr, - XAUI2_HSS_PCS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xaui_hss_pcs), - "XAUI2 HSS PCS Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr, - XFI2_AN_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xfi_an), - "XFI2 AN Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr, - XFI2_TRAIN_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xfi_train), - "XFI2 TRAIN Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr, - XFI2_HSS_PCS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xfi_hss_pcs), - "XFI2 HSS PCS Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr, - XFI2_HSS_TX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xfi_hss_tx), - "XFI2 HSS TX Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr, - XFI2_HSS_RX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xfi_hss_rx), - "XFI2 HSS RX Registers"); - - ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr, - XFI2_HSS_PLL_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->serdes2_xfi_hss_pll), - "XFI2 HSS PLL Registers"); - - status = ql_get_serdes_regs(qdev, mpi_coredump); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed Dump of Serdes Registers. Status = 0x%.08x\n", - status); - goto err; - } - - ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr, - CORE_SEG_NUM, - sizeof(mpi_coredump->core_regs_seg_hdr) + - sizeof(mpi_coredump->mpi_core_regs) + - sizeof(mpi_coredump->mpi_core_sh_regs), - "Core Registers"); - - /* Get the MPI Core Registers */ - status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0], - MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT); - if (status) - goto err; - /* Get the 16 MPI shadow registers */ - status = ql_get_mpi_shadow_regs(qdev, - &mpi_coredump->mpi_core_sh_regs[0]); - if (status) - goto err; - - /* Get the Test Logic Registers */ - ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr, - TEST_LOGIC_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->test_logic_regs), - "Test Logic Regs"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0], - TEST_REGS_ADDR, TEST_REGS_CNT); - if (status) - goto err; - - /* Get the RMII Registers */ - ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr, - RMII_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->rmii_regs), - "RMII Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0], - RMII_REGS_ADDR, RMII_REGS_CNT); - if (status) - goto err; - - /* Get the FCMAC1 Registers */ - ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr, - FCMAC1_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->fcmac1_regs), - "FCMAC1 Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0], - FCMAC1_REGS_ADDR, FCMAC_REGS_CNT); - if (status) - goto err; - - /* Get the FCMAC2 Registers */ - - ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr, - FCMAC2_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->fcmac2_regs), - "FCMAC2 Registers"); - - status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0], - FCMAC2_REGS_ADDR, FCMAC_REGS_CNT); - if (status) - goto err; - - /* Get the FC1 MBX Registers */ - ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr, - FC1_MBOX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->fc1_mbx_regs), - "FC1 MBox Regs"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0], - FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT); - if (status) - goto err; - - /* Get the IDE Registers */ - ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr, - IDE_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->ide_regs), - "IDE Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0], - IDE_REGS_ADDR, IDE_REGS_CNT); - if (status) - goto err; - - /* Get the NIC1 MBX Registers */ - ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr, - NIC1_MBOX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->nic1_mbx_regs), - "NIC1 MBox Regs"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0], - NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT); - if (status) - goto err; - - /* Get the SMBus Registers */ - ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr, - SMBUS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->smbus_regs), - "SMBus Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0], - SMBUS_REGS_ADDR, SMBUS_REGS_CNT); - if (status) - goto err; - - /* Get the FC2 MBX Registers */ - ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr, - FC2_MBOX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->fc2_mbx_regs), - "FC2 MBox Regs"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0], - FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT); - if (status) - goto err; - - /* Get the NIC2 MBX Registers */ - ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr, - NIC2_MBOX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->nic2_mbx_regs), - "NIC2 MBox Regs"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0], - NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT); - if (status) - goto err; - - /* Get the I2C Registers */ - ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr, - I2C_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->i2c_regs), - "I2C Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0], - I2C_REGS_ADDR, I2C_REGS_CNT); - if (status) - goto err; - - /* Get the MEMC Registers */ - ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr, - MEMC_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->memc_regs), - "MEMC Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0], - MEMC_REGS_ADDR, MEMC_REGS_CNT); - if (status) - goto err; - - /* Get the PBus Registers */ - ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr, - PBUS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->pbus_regs), - "PBUS Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0], - PBUS_REGS_ADDR, PBUS_REGS_CNT); - if (status) - goto err; - - /* Get the MDE Registers */ - ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr, - MDE_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->mde_regs), - "MDE Registers"); - status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0], - MDE_REGS_ADDR, MDE_REGS_CNT); - if (status) - goto err; - - ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, - MISC_NIC_INFO_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->misc_nic_info), - "MISC NIC INFO"); - mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count; - mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count; - mpi_coredump->misc_nic_info.intr_count = qdev->intr_count; - mpi_coredump->misc_nic_info.function = qdev->func; - - /* Segment 31 */ - /* Get indexed register values. */ - ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, - INTR_STATES_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->intr_states), - "INTR States"); - ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]); - - ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, - CAM_ENTRIES_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->cam_entries), - "CAM Entries"); - status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]); - if (status) - goto err; - - ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, - ROUTING_WORDS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->nic_routing_words), - "Routing Words"); - status = ql_get_routing_entries(qdev, - &mpi_coredump->nic_routing_words[0]); - if (status) - goto err; - - /* Segment 34 (Rev C. step 23) */ - ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, - ETS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->ets), - "ETS Registers"); - status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); - if (status) - goto err; - - ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr, - PROBE_DUMP_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->probe_dump), - "Probe Dump"); - ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]); - - ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr, - ROUTING_INDEX_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->routing_regs), - "Routing Regs"); - status = ql_get_routing_index_registers(qdev, - &mpi_coredump->routing_regs[0]); - if (status) - goto err; - - ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr, - MAC_PROTOCOL_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->mac_prot_regs), - "MAC Prot Regs"); - ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]); - - /* Get the semaphore registers for all 5 functions */ - ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr, - SEM_REGS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) + - sizeof(mpi_coredump->sem_regs), "Sem Registers"); - - ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]); - - /* Prevent the mpi restarting while we dump the memory.*/ - ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC); - - /* clear the pause */ - status = ql_unpause_mpi_risc(qdev); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed RISC unpause. Status = 0x%.08x\n", status); - goto err; - } - - /* Reset the RISC so we can dump RAM */ - status = ql_hard_reset_mpi_risc(qdev); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed RISC reset. Status = 0x%.08x\n", status); - goto err; - } - - ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr, - WCS_RAM_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->code_ram), - "WCS RAM"); - status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0], - CODE_RAM_ADDR, CODE_RAM_CNT); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed Dump of CODE RAM. Status = 0x%.08x\n", - status); - goto err; - } - - /* Insert the segment header */ - ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr, - MEMC_RAM_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->memc_ram), - "MEMC RAM"); - status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0], - MEMC_RAM_ADDR, MEMC_RAM_CNT); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Failed Dump of MEMC RAM. Status = 0x%.08x\n", - status); - goto err; - } -err: - ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */ - return status; - -} - -static void ql_get_core_dump(struct ql_adapter *qdev) -{ - if (!ql_own_firmware(qdev)) { - netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n"); - return; - } - - if (!netif_running(qdev->ndev)) { - netif_err(qdev, ifup, qdev->ndev, - "Force Coredump can only be done from interface that is up\n"); - return; - } - ql_queue_fw_error(qdev); -} - -static void ql_gen_reg_dump(struct ql_adapter *qdev, - struct ql_reg_dump *mpi_coredump) -{ - int i, status; - - - memset(&(mpi_coredump->mpi_global_header), 0, - sizeof(struct mpi_coredump_global_header)); - mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE; - mpi_coredump->mpi_global_header.headerSize = - sizeof(struct mpi_coredump_global_header); - mpi_coredump->mpi_global_header.imageSize = - sizeof(struct ql_reg_dump); - strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", - sizeof(mpi_coredump->mpi_global_header.idString)); - - - /* segment 16 */ - ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, - MISC_NIC_INFO_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->misc_nic_info), - "MISC NIC INFO"); - mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count; - mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count; - mpi_coredump->misc_nic_info.intr_count = qdev->intr_count; - mpi_coredump->misc_nic_info.function = qdev->func; - - /* Segment 16, Rev C. Step 18 */ - ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, - NIC1_CONTROL_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->nic_regs), - "NIC Registers"); - /* Get generic reg dump */ - for (i = 0; i < 64; i++) - mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32)); - - /* Segment 31 */ - /* Get indexed register values. */ - ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, - INTR_STATES_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->intr_states), - "INTR States"); - ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]); - - ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, - CAM_ENTRIES_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->cam_entries), - "CAM Entries"); - status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]); - if (status) - return; - - ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, - ROUTING_WORDS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->nic_routing_words), - "Routing Words"); - status = ql_get_routing_entries(qdev, - &mpi_coredump->nic_routing_words[0]); - if (status) - return; - - /* Segment 34 (Rev C. step 23) */ - ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, - ETS_SEG_NUM, - sizeof(struct mpi_coredump_segment_header) - + sizeof(mpi_coredump->ets), - "ETS Registers"); - status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); - if (status) - return; -} - -void ql_get_dump(struct ql_adapter *qdev, void *buff) -{ - /* - * If the dump has already been taken and is stored - * in our internal buffer and if force dump is set then - * just start the spool to dump it to the log file - * and also, take a snapshot of the general regs to - * to the user's buffer or else take complete dump - * to the user's buffer if force is not set. - */ - - if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) { - if (!ql_core_dump(qdev, buff)) - ql_soft_reset_mpi_risc(qdev); - else - netif_err(qdev, drv, qdev->ndev, "coredump failed!\n"); - } else { - ql_gen_reg_dump(qdev, buff); - ql_get_core_dump(qdev); - } -} - -/* Coredump to messages log file using separate worker thread */ -void ql_mpi_core_to_log(struct work_struct *work) -{ - struct ql_adapter *qdev = - container_of(work, struct ql_adapter, mpi_core_to_log.work); - u32 *tmp, count; - int i; - - count = sizeof(struct ql_mpi_coredump) / sizeof(u32); - tmp = (u32 *)qdev->mpi_coredump; - netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, - "Core is dumping to log file!\n"); - - for (i = 0; i < count; i += 8) { - pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x " - "%.08x %.08x %.08x\n", i, - tmp[i + 0], - tmp[i + 1], - tmp[i + 2], - tmp[i + 3], - tmp[i + 4], - tmp[i + 5], - tmp[i + 6], - tmp[i + 7]); - msleep(5); - } -} - -#ifdef QL_REG_DUMP -static void ql_dump_intr_states(struct ql_adapter *qdev) -{ - int i; - u32 value; - for (i = 0; i < qdev->intr_count; i++) { - ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask); - value = ql_read32(qdev, INTR_EN); - pr_err("%s: Interrupt %d is %s\n", - qdev->ndev->name, i, - (value & INTR_EN_EN ? "enabled" : "disabled")); - } -} - -#define DUMP_XGMAC(qdev, reg) \ -do { \ - u32 data; \ - ql_read_xgmac_reg(qdev, reg, &data); \ - pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \ -} while (0) - -void ql_dump_xgmac_control_regs(struct ql_adapter *qdev) -{ - if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { - pr_err("%s: Couldn't get xgmac sem\n", __func__); - return; - } - DUMP_XGMAC(qdev, PAUSE_SRC_LO); - DUMP_XGMAC(qdev, PAUSE_SRC_HI); - DUMP_XGMAC(qdev, GLOBAL_CFG); - DUMP_XGMAC(qdev, TX_CFG); - DUMP_XGMAC(qdev, RX_CFG); - DUMP_XGMAC(qdev, FLOW_CTL); - DUMP_XGMAC(qdev, PAUSE_OPCODE); - DUMP_XGMAC(qdev, PAUSE_TIMER); - DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO); - DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI); - DUMP_XGMAC(qdev, MAC_TX_PARAMS); - DUMP_XGMAC(qdev, MAC_RX_PARAMS); - DUMP_XGMAC(qdev, MAC_SYS_INT); - DUMP_XGMAC(qdev, MAC_SYS_INT_MASK); - DUMP_XGMAC(qdev, MAC_MGMT_INT); - DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK); - DUMP_XGMAC(qdev, EXT_ARB_MODE); - ql_sem_unlock(qdev, qdev->xg_sem_mask); -} - -static void ql_dump_ets_regs(struct ql_adapter *qdev) -{ -} - -static void ql_dump_cam_entries(struct ql_adapter *qdev) -{ - int i; - u32 value[3]; - - i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (i) - return; - for (i = 0; i < 4; i++) { - if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) { - pr_err("%s: Failed read of mac index register\n", - __func__); - return; - } else { - if (value[0]) - pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n", - qdev->ndev->name, i, value[1], value[0], - value[2]); - } - } - for (i = 0; i < 32; i++) { - if (ql_get_mac_addr_reg - (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) { - pr_err("%s: Failed read of mac index register\n", - __func__); - return; - } else { - if (value[0]) - pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n", - qdev->ndev->name, i, value[1], value[0]); - } - } - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); -} - -void ql_dump_routing_entries(struct ql_adapter *qdev) -{ - int i; - u32 value; - i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); - if (i) - return; - for (i = 0; i < 16; i++) { - value = 0; - if (ql_get_routing_reg(qdev, i, &value)) { - pr_err("%s: Failed read of routing index register\n", - __func__); - return; - } else { - if (value) - pr_err("%s: Routing Mask %d = 0x%.08x\n", - qdev->ndev->name, i, value); - } - } - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); -} - -#define DUMP_REG(qdev, reg) \ - pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg)) - -void ql_dump_regs(struct ql_adapter *qdev) -{ - pr_err("reg dump for function #%d\n", qdev->func); - DUMP_REG(qdev, SYS); - DUMP_REG(qdev, RST_FO); - DUMP_REG(qdev, FSC); - DUMP_REG(qdev, CSR); - DUMP_REG(qdev, ICB_RID); - DUMP_REG(qdev, ICB_L); - DUMP_REG(qdev, ICB_H); - DUMP_REG(qdev, CFG); - DUMP_REG(qdev, BIOS_ADDR); - DUMP_REG(qdev, STS); - DUMP_REG(qdev, INTR_EN); - DUMP_REG(qdev, INTR_MASK); - DUMP_REG(qdev, ISR1); - DUMP_REG(qdev, ISR2); - DUMP_REG(qdev, ISR3); - DUMP_REG(qdev, ISR4); - DUMP_REG(qdev, REV_ID); - DUMP_REG(qdev, FRC_ECC_ERR); - DUMP_REG(qdev, ERR_STS); - DUMP_REG(qdev, RAM_DBG_ADDR); - DUMP_REG(qdev, RAM_DBG_DATA); - DUMP_REG(qdev, ECC_ERR_CNT); - DUMP_REG(qdev, SEM); - DUMP_REG(qdev, GPIO_1); - DUMP_REG(qdev, GPIO_2); - DUMP_REG(qdev, GPIO_3); - DUMP_REG(qdev, XGMAC_ADDR); - DUMP_REG(qdev, XGMAC_DATA); - DUMP_REG(qdev, NIC_ETS); - DUMP_REG(qdev, CNA_ETS); - DUMP_REG(qdev, FLASH_ADDR); - DUMP_REG(qdev, FLASH_DATA); - DUMP_REG(qdev, CQ_STOP); - DUMP_REG(qdev, PAGE_TBL_RID); - DUMP_REG(qdev, WQ_PAGE_TBL_LO); - DUMP_REG(qdev, WQ_PAGE_TBL_HI); - DUMP_REG(qdev, CQ_PAGE_TBL_LO); - DUMP_REG(qdev, CQ_PAGE_TBL_HI); - DUMP_REG(qdev, COS_DFLT_CQ1); - DUMP_REG(qdev, COS_DFLT_CQ2); - DUMP_REG(qdev, SPLT_HDR); - DUMP_REG(qdev, FC_PAUSE_THRES); - DUMP_REG(qdev, NIC_PAUSE_THRES); - DUMP_REG(qdev, FC_ETHERTYPE); - DUMP_REG(qdev, FC_RCV_CFG); - DUMP_REG(qdev, NIC_RCV_CFG); - DUMP_REG(qdev, FC_COS_TAGS); - DUMP_REG(qdev, NIC_COS_TAGS); - DUMP_REG(qdev, MGMT_RCV_CFG); - DUMP_REG(qdev, XG_SERDES_ADDR); - DUMP_REG(qdev, XG_SERDES_DATA); - DUMP_REG(qdev, PRB_MX_ADDR); - DUMP_REG(qdev, PRB_MX_DATA); - ql_dump_intr_states(qdev); - ql_dump_xgmac_control_regs(qdev); - ql_dump_ets_regs(qdev); - ql_dump_cam_entries(qdev); - ql_dump_routing_entries(qdev); -} -#endif - -#ifdef QL_STAT_DUMP - -#define DUMP_STAT(qdev, stat) \ - pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat) - -void ql_dump_stat(struct ql_adapter *qdev) -{ - pr_err("%s: Enter\n", __func__); - DUMP_STAT(qdev, tx_pkts); - DUMP_STAT(qdev, tx_bytes); - DUMP_STAT(qdev, tx_mcast_pkts); - DUMP_STAT(qdev, tx_bcast_pkts); - DUMP_STAT(qdev, tx_ucast_pkts); - DUMP_STAT(qdev, tx_ctl_pkts); - DUMP_STAT(qdev, tx_pause_pkts); - DUMP_STAT(qdev, tx_64_pkt); - DUMP_STAT(qdev, tx_65_to_127_pkt); - DUMP_STAT(qdev, tx_128_to_255_pkt); - DUMP_STAT(qdev, tx_256_511_pkt); - DUMP_STAT(qdev, tx_512_to_1023_pkt); - DUMP_STAT(qdev, tx_1024_to_1518_pkt); - DUMP_STAT(qdev, tx_1519_to_max_pkt); - DUMP_STAT(qdev, tx_undersize_pkt); - DUMP_STAT(qdev, tx_oversize_pkt); - DUMP_STAT(qdev, rx_bytes); - DUMP_STAT(qdev, rx_bytes_ok); - DUMP_STAT(qdev, rx_pkts); - DUMP_STAT(qdev, rx_pkts_ok); - DUMP_STAT(qdev, rx_bcast_pkts); - DUMP_STAT(qdev, rx_mcast_pkts); - DUMP_STAT(qdev, rx_ucast_pkts); - DUMP_STAT(qdev, rx_undersize_pkts); - DUMP_STAT(qdev, rx_oversize_pkts); - DUMP_STAT(qdev, rx_jabber_pkts); - DUMP_STAT(qdev, rx_undersize_fcerr_pkts); - DUMP_STAT(qdev, rx_drop_events); - DUMP_STAT(qdev, rx_fcerr_pkts); - DUMP_STAT(qdev, rx_align_err); - DUMP_STAT(qdev, rx_symbol_err); - DUMP_STAT(qdev, rx_mac_err); - DUMP_STAT(qdev, rx_ctl_pkts); - DUMP_STAT(qdev, rx_pause_pkts); - DUMP_STAT(qdev, rx_64_pkts); - DUMP_STAT(qdev, rx_65_to_127_pkts); - DUMP_STAT(qdev, rx_128_255_pkts); - DUMP_STAT(qdev, rx_256_511_pkts); - DUMP_STAT(qdev, rx_512_to_1023_pkts); - DUMP_STAT(qdev, rx_1024_to_1518_pkts); - DUMP_STAT(qdev, rx_1519_to_max_pkts); - DUMP_STAT(qdev, rx_len_err_pkts); -}; -#endif - -#ifdef QL_DEV_DUMP - -#define DUMP_QDEV_FIELD(qdev, type, field) \ - pr_err("qdev->%-24s = " type "\n", #field, qdev->field) -#define DUMP_QDEV_DMA_FIELD(qdev, field) \ - pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field) -#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \ - pr_err("%s[%d].%s = " type "\n", \ - #array, index, #field, qdev->array[index].field); -void ql_dump_qdev(struct ql_adapter *qdev) -{ - int i; - DUMP_QDEV_FIELD(qdev, "%lx", flags); - DUMP_QDEV_FIELD(qdev, "%p", vlgrp); - DUMP_QDEV_FIELD(qdev, "%p", pdev); - DUMP_QDEV_FIELD(qdev, "%p", ndev); - DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id); - DUMP_QDEV_FIELD(qdev, "%p", reg_base); - DUMP_QDEV_FIELD(qdev, "%p", doorbell_area); - DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size); - DUMP_QDEV_FIELD(qdev, "%x", msg_enable); - DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area); - DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma); - DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area); - DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma); - DUMP_QDEV_FIELD(qdev, "%d", intr_count); - if (qdev->msi_x_entry) - for (i = 0; i < qdev->intr_count; i++) { - DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector); - DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry); - } - for (i = 0; i < qdev->intr_count; i++) { - DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev); - DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr); - DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked); - DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask); - DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask); - DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask); - } - DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count); - DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count); - DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size); - DUMP_QDEV_FIELD(qdev, "%p", ring_mem); - DUMP_QDEV_FIELD(qdev, "%d", intr_count); - DUMP_QDEV_FIELD(qdev, "%p", tx_ring); - DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count); - DUMP_QDEV_FIELD(qdev, "%p", rx_ring); - DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue); - DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask); - DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up); - DUMP_QDEV_FIELD(qdev, "0x%08x", port_init); -} -#endif - -#ifdef QL_CB_DUMP -void ql_dump_wqicb(struct wqicb *wqicb) -{ - pr_err("Dumping wqicb stuff...\n"); - pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len)); - pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags)); - pr_err("wqicb->cq_id_rss = %d\n", - le16_to_cpu(wqicb->cq_id_rss)); - pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid)); - pr_err("wqicb->wq_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(wqicb->addr)); - pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr)); -} - -void ql_dump_tx_ring(struct tx_ring *tx_ring) -{ - if (tx_ring == NULL) - return; - pr_err("===================== Dumping tx_ring %d ===============\n", - tx_ring->wq_id); - pr_err("tx_ring->base = %p\n", tx_ring->wq_base); - pr_err("tx_ring->base_dma = 0x%llx\n", - (unsigned long long) tx_ring->wq_base_dma); - pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n", - tx_ring->cnsmr_idx_sh_reg, - tx_ring->cnsmr_idx_sh_reg - ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0); - pr_err("tx_ring->size = %d\n", tx_ring->wq_size); - pr_err("tx_ring->len = %d\n", tx_ring->wq_len); - pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg); - pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg); - pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx); - pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id); - pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id); - pr_err("tx_ring->q = %p\n", tx_ring->q); - pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count)); -} - -void ql_dump_ricb(struct ricb *ricb) -{ - int i; - pr_err("===================== Dumping ricb ===============\n"); - pr_err("Dumping ricb stuff...\n"); - - pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f); - pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n", - ricb->base_cq & RSS_L4K ? "RSS_L4K " : "", - ricb->flags & RSS_L6K ? "RSS_L6K " : "", - ricb->flags & RSS_LI ? "RSS_LI " : "", - ricb->flags & RSS_LB ? "RSS_LB " : "", - ricb->flags & RSS_LM ? "RSS_LM " : "", - ricb->flags & RSS_RI4 ? "RSS_RI4 " : "", - ricb->flags & RSS_RT4 ? "RSS_RT4 " : "", - ricb->flags & RSS_RI6 ? "RSS_RI6 " : "", - ricb->flags & RSS_RT6 ? "RSS_RT6 " : ""); - pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask)); - for (i = 0; i < 16; i++) - pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i, - le32_to_cpu(ricb->hash_cq_id[i])); - for (i = 0; i < 10; i++) - pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i, - le32_to_cpu(ricb->ipv6_hash_key[i])); - for (i = 0; i < 4; i++) - pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i, - le32_to_cpu(ricb->ipv4_hash_key[i])); -} - -void ql_dump_cqicb(struct cqicb *cqicb) -{ - pr_err("Dumping cqicb stuff...\n"); - - pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect); - pr_err("cqicb->flags = %x\n", cqicb->flags); - pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len)); - pr_err("cqicb->addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(cqicb->addr)); - pr_err("cqicb->prod_idx_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr)); - pr_err("cqicb->pkt_delay = 0x%.04x\n", - le16_to_cpu(cqicb->pkt_delay)); - pr_err("cqicb->irq_delay = 0x%.04x\n", - le16_to_cpu(cqicb->irq_delay)); - pr_err("cqicb->lbq_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(cqicb->lbq_addr)); - pr_err("cqicb->lbq_buf_size = 0x%.04x\n", - le16_to_cpu(cqicb->lbq_buf_size)); - pr_err("cqicb->lbq_len = 0x%.04x\n", - le16_to_cpu(cqicb->lbq_len)); - pr_err("cqicb->sbq_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(cqicb->sbq_addr)); - pr_err("cqicb->sbq_buf_size = 0x%.04x\n", - le16_to_cpu(cqicb->sbq_buf_size)); - pr_err("cqicb->sbq_len = 0x%.04x\n", - le16_to_cpu(cqicb->sbq_len)); -} - -void ql_dump_rx_ring(struct rx_ring *rx_ring) -{ - if (rx_ring == NULL) - return; - pr_err("===================== Dumping rx_ring %d ===============\n", - rx_ring->cq_id); - pr_err("Dumping rx_ring %d, type = %s%s%s\n", - rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "", - rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "", - rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : ""); - pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb); - pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base); - pr_err("rx_ring->cq_base_dma = %llx\n", - (unsigned long long) rx_ring->cq_base_dma); - pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size); - pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len); - pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n", - rx_ring->prod_idx_sh_reg, - rx_ring->prod_idx_sh_reg - ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0); - pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n", - (unsigned long long) rx_ring->prod_idx_sh_reg_dma); - pr_err("rx_ring->cnsmr_idx_db_reg = %p\n", - rx_ring->cnsmr_idx_db_reg); - pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx); - pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry); - pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg); - - pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base); - pr_err("rx_ring->lbq_base_dma = %llx\n", - (unsigned long long) rx_ring->lbq_base_dma); - pr_err("rx_ring->lbq_base_indirect = %p\n", - rx_ring->lbq_base_indirect); - pr_err("rx_ring->lbq_base_indirect_dma = %llx\n", - (unsigned long long) rx_ring->lbq_base_indirect_dma); - pr_err("rx_ring->lbq = %p\n", rx_ring->lbq); - pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len); - pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size); - pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n", - rx_ring->lbq_prod_idx_db_reg); - pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx); - pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx); - pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx); - pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt); - pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size); - - pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base); - pr_err("rx_ring->sbq_base_dma = %llx\n", - (unsigned long long) rx_ring->sbq_base_dma); - pr_err("rx_ring->sbq_base_indirect = %p\n", - rx_ring->sbq_base_indirect); - pr_err("rx_ring->sbq_base_indirect_dma = %llx\n", - (unsigned long long) rx_ring->sbq_base_indirect_dma); - pr_err("rx_ring->sbq = %p\n", rx_ring->sbq); - pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len); - pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size); - pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n", - rx_ring->sbq_prod_idx_db_reg); - pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx); - pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx); - pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx); - pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt); - pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size); - pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id); - pr_err("rx_ring->irq = %d\n", rx_ring->irq); - pr_err("rx_ring->cpu = %d\n", rx_ring->cpu); - pr_err("rx_ring->qdev = %p\n", rx_ring->qdev); -} - -void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id) -{ - void *ptr; - - pr_err("%s: Enter\n", __func__); - - ptr = kmalloc(size, GFP_ATOMIC); - if (ptr == NULL) - return; - - if (ql_write_cfg(qdev, ptr, size, bit, q_id)) { - pr_err("%s: Failed to upload control block!\n", __func__); - goto fail_it; - } - switch (bit) { - case CFG_DRQ: - ql_dump_wqicb((struct wqicb *)ptr); - break; - case CFG_DCQ: - ql_dump_cqicb((struct cqicb *)ptr); - break; - case CFG_DR: - ql_dump_ricb((struct ricb *)ptr); - break; - default: - pr_err("%s: Invalid bit value = %x\n", __func__, bit); - break; - } -fail_it: - kfree(ptr); -} -#endif - -#ifdef QL_OB_DUMP -void ql_dump_tx_desc(struct tx_buf_desc *tbd) -{ - pr_err("tbd->addr = 0x%llx\n", - le64_to_cpu((u64) tbd->addr)); - pr_err("tbd->len = %d\n", - le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); - pr_err("tbd->flags = %s %s\n", - tbd->len & TX_DESC_C ? "C" : ".", - tbd->len & TX_DESC_E ? "E" : "."); - tbd++; - pr_err("tbd->addr = 0x%llx\n", - le64_to_cpu((u64) tbd->addr)); - pr_err("tbd->len = %d\n", - le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); - pr_err("tbd->flags = %s %s\n", - tbd->len & TX_DESC_C ? "C" : ".", - tbd->len & TX_DESC_E ? "E" : "."); - tbd++; - pr_err("tbd->addr = 0x%llx\n", - le64_to_cpu((u64) tbd->addr)); - pr_err("tbd->len = %d\n", - le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); - pr_err("tbd->flags = %s %s\n", - tbd->len & TX_DESC_C ? "C" : ".", - tbd->len & TX_DESC_E ? "E" : "."); - -} - -void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb) -{ - struct ob_mac_tso_iocb_req *ob_mac_tso_iocb = - (struct ob_mac_tso_iocb_req *)ob_mac_iocb; - struct tx_buf_desc *tbd; - u16 frame_len; - - pr_err("%s\n", __func__); - pr_err("opcode = %s\n", - (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO"); - pr_err("flags1 = %s %s %s %s %s\n", - ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "", - ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "", - ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "", - ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "", - ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : ""); - pr_err("flags2 = %s %s %s\n", - ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "", - ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "", - ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : ""); - pr_err("flags3 = %s %s %s\n", - ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "", - ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "", - ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : ""); - pr_err("tid = %x\n", ob_mac_iocb->tid); - pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx); - pr_err("vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci); - if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) { - pr_err("frame_len = %d\n", - le32_to_cpu(ob_mac_tso_iocb->frame_len)); - pr_err("mss = %d\n", - le16_to_cpu(ob_mac_tso_iocb->mss)); - pr_err("prot_hdr_len = %d\n", - le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len)); - pr_err("hdr_offset = 0x%.04x\n", - le16_to_cpu(ob_mac_tso_iocb->net_trans_offset)); - frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len); - } else { - pr_err("frame_len = %d\n", - le16_to_cpu(ob_mac_iocb->frame_len)); - frame_len = le16_to_cpu(ob_mac_iocb->frame_len); - } - tbd = &ob_mac_iocb->tbd[0]; - ql_dump_tx_desc(tbd); -} - -void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp) -{ - pr_err("%s\n", __func__); - pr_err("opcode = %d\n", ob_mac_rsp->opcode); - pr_err("flags = %s %s %s %s %s %s %s\n", - ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".", - ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".", - ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".", - ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".", - ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".", - ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".", - ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : "."); - pr_err("tid = %x\n", ob_mac_rsp->tid); -} -#endif - -#ifdef QL_IB_DUMP -void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) -{ - pr_err("%s\n", __func__); - pr_err("opcode = 0x%x\n", ib_mac_rsp->opcode); - pr_err("flags1 = %s%s%s%s%s%s\n", - ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "", - ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "", - ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "", - ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "", - ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "", - ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : ""); - - if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) - pr_err("%s%s%s Multicast\n", - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "", - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_REG ? "Registered" : "", - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); - - pr_err("flags2 = %s%s%s%s%s\n", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : ""); - - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) - pr_err("%s%s%s%s%s error\n", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == - IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == - IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == - IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == - IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "", - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == - IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : ""); - - pr_err("flags3 = %s%s\n", - ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "", - ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : ""); - - if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) - pr_err("RSS flags = %s%s%s%s\n", - ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == - IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "", - ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == - IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "", - ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == - IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "", - ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == - IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : ""); - - pr_err("data_len = %d\n", - le32_to_cpu(ib_mac_rsp->data_len)); - pr_err("data_addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr)); - if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) - pr_err("rss = %x\n", - le32_to_cpu(ib_mac_rsp->rss)); - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) - pr_err("vlan_id = %x\n", - le16_to_cpu(ib_mac_rsp->vlan_id)); - - pr_err("flags4 = %s%s%s\n", - ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "", - ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "", - ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : ""); - - if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { - pr_err("hdr length = %d\n", - le32_to_cpu(ib_mac_rsp->hdr_len)); - pr_err("hdr addr = 0x%llx\n", - (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr)); - } -} -#endif - -#ifdef QL_ALL_DUMP -void ql_dump_all(struct ql_adapter *qdev) -{ - int i; - - QL_DUMP_REGS(qdev); - QL_DUMP_QDEV(qdev); - for (i = 0; i < qdev->tx_ring_count; i++) { - QL_DUMP_TX_RING(&qdev->tx_ring[i]); - QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]); - } - for (i = 0; i < qdev->rx_ring_count; i++) { - QL_DUMP_RX_RING(&qdev->rx_ring[i]); - QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]); - } -} -#endif diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c deleted file mode 100644 index a6886cc5654c..000000000000 --- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c +++ /dev/null @@ -1,735 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/module.h> -#include <linux/list.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> -#include <linux/pagemap.h> -#include <linux/sched.h> -#include <linux/dmapool.h> -#include <linux/mempool.h> -#include <linux/spinlock.h> -#include <linux/kthread.h> -#include <linux/interrupt.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/in.h> -#include <linux/ip.h> -#include <linux/ipv6.h> -#include <net/ipv6.h> -#include <linux/tcp.h> -#include <linux/udp.h> -#include <linux/if_arp.h> -#include <linux/if_ether.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/ethtool.h> -#include <linux/skbuff.h> -#include <linux/rtnetlink.h> -#include <linux/if_vlan.h> -#include <linux/delay.h> -#include <linux/mm.h> -#include <linux/vmalloc.h> - - -#include "qlge.h" - -struct ql_stats { - char stat_string[ETH_GSTRING_LEN]; - int sizeof_stat; - int stat_offset; -}; - -#define QL_SIZEOF(m) FIELD_SIZEOF(struct ql_adapter, m) -#define QL_OFF(m) offsetof(struct ql_adapter, m) - -static const struct ql_stats ql_gstrings_stats[] = { - {"tx_pkts", QL_SIZEOF(nic_stats.tx_pkts), QL_OFF(nic_stats.tx_pkts)}, - {"tx_bytes", QL_SIZEOF(nic_stats.tx_bytes), QL_OFF(nic_stats.tx_bytes)}, - {"tx_mcast_pkts", QL_SIZEOF(nic_stats.tx_mcast_pkts), - QL_OFF(nic_stats.tx_mcast_pkts)}, - {"tx_bcast_pkts", QL_SIZEOF(nic_stats.tx_bcast_pkts), - QL_OFF(nic_stats.tx_bcast_pkts)}, - {"tx_ucast_pkts", QL_SIZEOF(nic_stats.tx_ucast_pkts), - QL_OFF(nic_stats.tx_ucast_pkts)}, - {"tx_ctl_pkts", QL_SIZEOF(nic_stats.tx_ctl_pkts), - QL_OFF(nic_stats.tx_ctl_pkts)}, - {"tx_pause_pkts", QL_SIZEOF(nic_stats.tx_pause_pkts), - QL_OFF(nic_stats.tx_pause_pkts)}, - {"tx_64_pkts", QL_SIZEOF(nic_stats.tx_64_pkt), - QL_OFF(nic_stats.tx_64_pkt)}, - {"tx_65_to_127_pkts", QL_SIZEOF(nic_stats.tx_65_to_127_pkt), - QL_OFF(nic_stats.tx_65_to_127_pkt)}, - {"tx_128_to_255_pkts", QL_SIZEOF(nic_stats.tx_128_to_255_pkt), - QL_OFF(nic_stats.tx_128_to_255_pkt)}, - {"tx_256_511_pkts", QL_SIZEOF(nic_stats.tx_256_511_pkt), - QL_OFF(nic_stats.tx_256_511_pkt)}, - {"tx_512_to_1023_pkts", QL_SIZEOF(nic_stats.tx_512_to_1023_pkt), - QL_OFF(nic_stats.tx_512_to_1023_pkt)}, - {"tx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.tx_1024_to_1518_pkt), - QL_OFF(nic_stats.tx_1024_to_1518_pkt)}, - {"tx_1519_to_max_pkts", QL_SIZEOF(nic_stats.tx_1519_to_max_pkt), - QL_OFF(nic_stats.tx_1519_to_max_pkt)}, - {"tx_undersize_pkts", QL_SIZEOF(nic_stats.tx_undersize_pkt), - QL_OFF(nic_stats.tx_undersize_pkt)}, - {"tx_oversize_pkts", QL_SIZEOF(nic_stats.tx_oversize_pkt), - QL_OFF(nic_stats.tx_oversize_pkt)}, - {"rx_bytes", QL_SIZEOF(nic_stats.rx_bytes), QL_OFF(nic_stats.rx_bytes)}, - {"rx_bytes_ok", QL_SIZEOF(nic_stats.rx_bytes_ok), - QL_OFF(nic_stats.rx_bytes_ok)}, - {"rx_pkts", QL_SIZEOF(nic_stats.rx_pkts), QL_OFF(nic_stats.rx_pkts)}, - {"rx_pkts_ok", QL_SIZEOF(nic_stats.rx_pkts_ok), - QL_OFF(nic_stats.rx_pkts_ok)}, - {"rx_bcast_pkts", QL_SIZEOF(nic_stats.rx_bcast_pkts), - QL_OFF(nic_stats.rx_bcast_pkts)}, - {"rx_mcast_pkts", QL_SIZEOF(nic_stats.rx_mcast_pkts), - QL_OFF(nic_stats.rx_mcast_pkts)}, - {"rx_ucast_pkts", QL_SIZEOF(nic_stats.rx_ucast_pkts), - QL_OFF(nic_stats.rx_ucast_pkts)}, - {"rx_undersize_pkts", QL_SIZEOF(nic_stats.rx_undersize_pkts), - QL_OFF(nic_stats.rx_undersize_pkts)}, - {"rx_oversize_pkts", QL_SIZEOF(nic_stats.rx_oversize_pkts), - QL_OFF(nic_stats.rx_oversize_pkts)}, - {"rx_jabber_pkts", QL_SIZEOF(nic_stats.rx_jabber_pkts), - QL_OFF(nic_stats.rx_jabber_pkts)}, - {"rx_undersize_fcerr_pkts", - QL_SIZEOF(nic_stats.rx_undersize_fcerr_pkts), - QL_OFF(nic_stats.rx_undersize_fcerr_pkts)}, - {"rx_drop_events", QL_SIZEOF(nic_stats.rx_drop_events), - QL_OFF(nic_stats.rx_drop_events)}, - {"rx_fcerr_pkts", QL_SIZEOF(nic_stats.rx_fcerr_pkts), - QL_OFF(nic_stats.rx_fcerr_pkts)}, - {"rx_align_err", QL_SIZEOF(nic_stats.rx_align_err), - QL_OFF(nic_stats.rx_align_err)}, - {"rx_symbol_err", QL_SIZEOF(nic_stats.rx_symbol_err), - QL_OFF(nic_stats.rx_symbol_err)}, - {"rx_mac_err", QL_SIZEOF(nic_stats.rx_mac_err), - QL_OFF(nic_stats.rx_mac_err)}, - {"rx_ctl_pkts", QL_SIZEOF(nic_stats.rx_ctl_pkts), - QL_OFF(nic_stats.rx_ctl_pkts)}, - {"rx_pause_pkts", QL_SIZEOF(nic_stats.rx_pause_pkts), - QL_OFF(nic_stats.rx_pause_pkts)}, - {"rx_64_pkts", QL_SIZEOF(nic_stats.rx_64_pkts), - QL_OFF(nic_stats.rx_64_pkts)}, - {"rx_65_to_127_pkts", QL_SIZEOF(nic_stats.rx_65_to_127_pkts), - QL_OFF(nic_stats.rx_65_to_127_pkts)}, - {"rx_128_255_pkts", QL_SIZEOF(nic_stats.rx_128_255_pkts), - QL_OFF(nic_stats.rx_128_255_pkts)}, - {"rx_256_511_pkts", QL_SIZEOF(nic_stats.rx_256_511_pkts), - QL_OFF(nic_stats.rx_256_511_pkts)}, - {"rx_512_to_1023_pkts", QL_SIZEOF(nic_stats.rx_512_to_1023_pkts), - QL_OFF(nic_stats.rx_512_to_1023_pkts)}, - {"rx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.rx_1024_to_1518_pkts), - QL_OFF(nic_stats.rx_1024_to_1518_pkts)}, - {"rx_1519_to_max_pkts", QL_SIZEOF(nic_stats.rx_1519_to_max_pkts), - QL_OFF(nic_stats.rx_1519_to_max_pkts)}, - {"rx_len_err_pkts", QL_SIZEOF(nic_stats.rx_len_err_pkts), - QL_OFF(nic_stats.rx_len_err_pkts)}, - {"rx_code_err", QL_SIZEOF(nic_stats.rx_code_err), - QL_OFF(nic_stats.rx_code_err)}, - {"rx_oversize_err", QL_SIZEOF(nic_stats.rx_oversize_err), - QL_OFF(nic_stats.rx_oversize_err)}, - {"rx_undersize_err", QL_SIZEOF(nic_stats.rx_undersize_err), - QL_OFF(nic_stats.rx_undersize_err)}, - {"rx_preamble_err", QL_SIZEOF(nic_stats.rx_preamble_err), - QL_OFF(nic_stats.rx_preamble_err)}, - {"rx_frame_len_err", QL_SIZEOF(nic_stats.rx_frame_len_err), - QL_OFF(nic_stats.rx_frame_len_err)}, - {"rx_crc_err", QL_SIZEOF(nic_stats.rx_crc_err), - QL_OFF(nic_stats.rx_crc_err)}, - {"rx_err_count", QL_SIZEOF(nic_stats.rx_err_count), - QL_OFF(nic_stats.rx_err_count)}, - {"tx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames0), - QL_OFF(nic_stats.tx_cbfc_pause_frames0)}, - {"tx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames1), - QL_OFF(nic_stats.tx_cbfc_pause_frames1)}, - {"tx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames2), - QL_OFF(nic_stats.tx_cbfc_pause_frames2)}, - {"tx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames3), - QL_OFF(nic_stats.tx_cbfc_pause_frames3)}, - {"tx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames4), - QL_OFF(nic_stats.tx_cbfc_pause_frames4)}, - {"tx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames5), - QL_OFF(nic_stats.tx_cbfc_pause_frames5)}, - {"tx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames6), - QL_OFF(nic_stats.tx_cbfc_pause_frames6)}, - {"tx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames7), - QL_OFF(nic_stats.tx_cbfc_pause_frames7)}, - {"rx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames0), - QL_OFF(nic_stats.rx_cbfc_pause_frames0)}, - {"rx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames1), - QL_OFF(nic_stats.rx_cbfc_pause_frames1)}, - {"rx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames2), - QL_OFF(nic_stats.rx_cbfc_pause_frames2)}, - {"rx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames3), - QL_OFF(nic_stats.rx_cbfc_pause_frames3)}, - {"rx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames4), - QL_OFF(nic_stats.rx_cbfc_pause_frames4)}, - {"rx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames5), - QL_OFF(nic_stats.rx_cbfc_pause_frames5)}, - {"rx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames6), - QL_OFF(nic_stats.rx_cbfc_pause_frames6)}, - {"rx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames7), - QL_OFF(nic_stats.rx_cbfc_pause_frames7)}, - {"rx_nic_fifo_drop", QL_SIZEOF(nic_stats.rx_nic_fifo_drop), - QL_OFF(nic_stats.rx_nic_fifo_drop)}, -}; - -static const char ql_gstrings_test[][ETH_GSTRING_LEN] = { - "Loopback test (offline)" -}; -#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) -#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats) -#define QLGE_RCV_MAC_ERR_STATS 7 - -static int ql_update_ring_coalescing(struct ql_adapter *qdev) -{ - int i, status = 0; - struct rx_ring *rx_ring; - struct cqicb *cqicb; - - if (!netif_running(qdev->ndev)) - return status; - - /* Skip the default queue, and update the outbound handler - * queues if they changed. - */ - cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count]; - if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs || - le16_to_cpu(cqicb->pkt_delay) != - qdev->tx_max_coalesced_frames) { - for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { - rx_ring = &qdev->rx_ring[i]; - cqicb = (struct cqicb *)rx_ring; - cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); - cqicb->pkt_delay = - cpu_to_le16(qdev->tx_max_coalesced_frames); - cqicb->flags = FLAGS_LI; - status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), - CFG_LCQ, rx_ring->cq_id); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to load CQICB.\n"); - goto exit; - } - } - } - - /* Update the inbound (RSS) handler queues if they changed. */ - cqicb = (struct cqicb *)&qdev->rx_ring[0]; - if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs || - le16_to_cpu(cqicb->pkt_delay) != - qdev->rx_max_coalesced_frames) { - for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { - rx_ring = &qdev->rx_ring[i]; - cqicb = (struct cqicb *)rx_ring; - cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); - cqicb->pkt_delay = - cpu_to_le16(qdev->rx_max_coalesced_frames); - cqicb->flags = FLAGS_LI; - status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), - CFG_LCQ, rx_ring->cq_id); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to load CQICB.\n"); - goto exit; - } - } - } -exit: - return status; -} - -static void ql_update_stats(struct ql_adapter *qdev) -{ - u32 i; - u64 data; - u64 *iter = &qdev->nic_stats.tx_pkts; - - spin_lock(&qdev->stats_lock); - if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { - netif_err(qdev, drv, qdev->ndev, - "Couldn't get xgmac sem.\n"); - goto quit; - } - /* - * Get TX statistics. - */ - for (i = 0x200; i < 0x280; i += 8) { - if (ql_read_xgmac_reg64(qdev, i, &data)) { - netif_err(qdev, drv, qdev->ndev, - "Error reading status register 0x%.04x.\n", - i); - goto end; - } else - *iter = data; - iter++; - } - - /* - * Get RX statistics. - */ - for (i = 0x300; i < 0x3d0; i += 8) { - if (ql_read_xgmac_reg64(qdev, i, &data)) { - netif_err(qdev, drv, qdev->ndev, - "Error reading status register 0x%.04x.\n", - i); - goto end; - } else - *iter = data; - iter++; - } - - /* Update receive mac error statistics */ - iter += QLGE_RCV_MAC_ERR_STATS; - - /* - * Get Per-priority TX pause frame counter statistics. - */ - for (i = 0x500; i < 0x540; i += 8) { - if (ql_read_xgmac_reg64(qdev, i, &data)) { - netif_err(qdev, drv, qdev->ndev, - "Error reading status register 0x%.04x.\n", - i); - goto end; - } else - *iter = data; - iter++; - } - - /* - * Get Per-priority RX pause frame counter statistics. - */ - for (i = 0x568; i < 0x5a8; i += 8) { - if (ql_read_xgmac_reg64(qdev, i, &data)) { - netif_err(qdev, drv, qdev->ndev, - "Error reading status register 0x%.04x.\n", - i); - goto end; - } else - *iter = data; - iter++; - } - - /* - * Get RX NIC FIFO DROP statistics. - */ - if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) { - netif_err(qdev, drv, qdev->ndev, - "Error reading status register 0x%.04x.\n", i); - goto end; - } else - *iter = data; -end: - ql_sem_unlock(qdev, qdev->xg_sem_mask); -quit: - spin_unlock(&qdev->stats_lock); - - QL_DUMP_STAT(qdev); -} - -static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf) -{ - int index; - switch (stringset) { - case ETH_SS_TEST: - memcpy(buf, *ql_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN); - break; - case ETH_SS_STATS: - for (index = 0; index < QLGE_STATS_LEN; index++) { - memcpy(buf + index * ETH_GSTRING_LEN, - ql_gstrings_stats[index].stat_string, - ETH_GSTRING_LEN); - } - break; - } -} - -static int ql_get_sset_count(struct net_device *dev, int sset) -{ - switch (sset) { - case ETH_SS_TEST: - return QLGE_TEST_LEN; - case ETH_SS_STATS: - return QLGE_STATS_LEN; - default: - return -EOPNOTSUPP; - } -} - -static void -ql_get_ethtool_stats(struct net_device *ndev, - struct ethtool_stats *stats, u64 *data) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - int index, length; - - length = QLGE_STATS_LEN; - ql_update_stats(qdev); - - for (index = 0; index < length; index++) { - char *p = (char *)qdev + - ql_gstrings_stats[index].stat_offset; - *data++ = (ql_gstrings_stats[index].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : (*(u32 *)p); - } -} - -static int ql_get_link_ksettings(struct net_device *ndev, - struct ethtool_link_ksettings *ecmd) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - u32 supported, advertising; - - supported = SUPPORTED_10000baseT_Full; - advertising = ADVERTISED_10000baseT_Full; - - if ((qdev->link_status & STS_LINK_TYPE_MASK) == - STS_LINK_TYPE_10GBASET) { - supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); - advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); - ecmd->base.port = PORT_TP; - ecmd->base.autoneg = AUTONEG_ENABLE; - } else { - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; - ecmd->base.port = PORT_FIBRE; - } - - ecmd->base.speed = SPEED_10000; - ecmd->base.duplex = DUPLEX_FULL; - - ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising, - advertising); - - return 0; -} - -static void ql_get_drvinfo(struct net_device *ndev, - struct ethtool_drvinfo *drvinfo) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, qlge_driver_version, - sizeof(drvinfo->version)); - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "v%d.%d.%d", - (qdev->fw_rev_id & 0x00ff0000) >> 16, - (qdev->fw_rev_id & 0x0000ff00) >> 8, - (qdev->fw_rev_id & 0x000000ff)); - strlcpy(drvinfo->bus_info, pci_name(qdev->pdev), - sizeof(drvinfo->bus_info)); -} - -static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - unsigned short ssys_dev = qdev->pdev->subsystem_device; - - /* WOL is only supported for mezz card. */ - if (ssys_dev == QLGE_MEZZ_SSYS_ID_068 || - ssys_dev == QLGE_MEZZ_SSYS_ID_180) { - wol->supported = WAKE_MAGIC; - wol->wolopts = qdev->wol; - } -} - -static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - unsigned short ssys_dev = qdev->pdev->subsystem_device; - - /* WOL is only supported for mezz card. */ - if (ssys_dev != QLGE_MEZZ_SSYS_ID_068 && - ssys_dev != QLGE_MEZZ_SSYS_ID_180) { - netif_info(qdev, drv, qdev->ndev, - "WOL is only supported for mezz card\n"); - return -EOPNOTSUPP; - } - if (wol->wolopts & ~WAKE_MAGIC) - return -EINVAL; - qdev->wol = wol->wolopts; - - netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol); - return 0; -} - -static int ql_set_phys_id(struct net_device *ndev, - enum ethtool_phys_id_state state) - -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - switch (state) { - case ETHTOOL_ID_ACTIVE: - /* Save the current LED settings */ - if (ql_mb_get_led_cfg(qdev)) - return -EIO; - - /* Start blinking */ - ql_mb_set_led_cfg(qdev, QL_LED_BLINK); - return 0; - - case ETHTOOL_ID_INACTIVE: - /* Restore LED settings */ - if (ql_mb_set_led_cfg(qdev, qdev->led_config)) - return -EIO; - return 0; - - default: - return -EINVAL; - } -} - -static int ql_start_loopback(struct ql_adapter *qdev) -{ - if (netif_carrier_ok(qdev->ndev)) { - set_bit(QL_LB_LINK_UP, &qdev->flags); - netif_carrier_off(qdev->ndev); - } else - clear_bit(QL_LB_LINK_UP, &qdev->flags); - qdev->link_config |= CFG_LOOPBACK_PCS; - return ql_mb_set_port_cfg(qdev); -} - -static void ql_stop_loopback(struct ql_adapter *qdev) -{ - qdev->link_config &= ~CFG_LOOPBACK_PCS; - ql_mb_set_port_cfg(qdev); - if (test_bit(QL_LB_LINK_UP, &qdev->flags)) { - netif_carrier_on(qdev->ndev); - clear_bit(QL_LB_LINK_UP, &qdev->flags); - } -} - -static void ql_create_lb_frame(struct sk_buff *skb, - unsigned int frame_size) -{ - memset(skb->data, 0xFF, frame_size); - frame_size &= ~1; - memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); - memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); - memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); -} - -void ql_check_lb_frame(struct ql_adapter *qdev, - struct sk_buff *skb) -{ - unsigned int frame_size = skb->len; - - if ((*(skb->data + 3) == 0xFF) && - (*(skb->data + frame_size / 2 + 10) == 0xBE) && - (*(skb->data + frame_size / 2 + 12) == 0xAF)) { - atomic_dec(&qdev->lb_count); - return; - } -} - -static int ql_run_loopback_test(struct ql_adapter *qdev) -{ - int i; - netdev_tx_t rc; - struct sk_buff *skb; - unsigned int size = SMALL_BUF_MAP_SIZE; - - for (i = 0; i < 64; i++) { - skb = netdev_alloc_skb(qdev->ndev, size); - if (!skb) - return -ENOMEM; - - skb->queue_mapping = 0; - skb_put(skb, size); - ql_create_lb_frame(skb, size); - rc = ql_lb_send(skb, qdev->ndev); - if (rc != NETDEV_TX_OK) - return -EPIPE; - atomic_inc(&qdev->lb_count); - } - /* Give queue time to settle before testing results. */ - msleep(2); - ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128); - return atomic_read(&qdev->lb_count) ? -EIO : 0; -} - -static int ql_loopback_test(struct ql_adapter *qdev, u64 *data) -{ - *data = ql_start_loopback(qdev); - if (*data) - goto out; - *data = ql_run_loopback_test(qdev); -out: - ql_stop_loopback(qdev); - return *data; -} - -static void ql_self_test(struct net_device *ndev, - struct ethtool_test *eth_test, u64 *data) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - memset(data, 0, sizeof(u64) * QLGE_TEST_LEN); - - if (netif_running(ndev)) { - set_bit(QL_SELFTEST, &qdev->flags); - if (eth_test->flags == ETH_TEST_FL_OFFLINE) { - /* Offline tests */ - if (ql_loopback_test(qdev, &data[0])) - eth_test->flags |= ETH_TEST_FL_FAILED; - - } else { - /* Online tests */ - data[0] = 0; - } - clear_bit(QL_SELFTEST, &qdev->flags); - /* Give link time to come up after - * port configuration changes. - */ - msleep_interruptible(4 * 1000); - } else { - netif_err(qdev, drv, qdev->ndev, - "is down, Loopback test will fail.\n"); - eth_test->flags |= ETH_TEST_FL_FAILED; - } -} - -static int ql_get_regs_len(struct net_device *ndev) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) - return sizeof(struct ql_mpi_coredump); - else - return sizeof(struct ql_reg_dump); -} - -static void ql_get_regs(struct net_device *ndev, - struct ethtool_regs *regs, void *p) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - ql_get_dump(qdev, p); - qdev->core_is_dumped = 0; - if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) - regs->len = sizeof(struct ql_mpi_coredump); - else - regs->len = sizeof(struct ql_reg_dump); -} - -static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) -{ - struct ql_adapter *qdev = netdev_priv(dev); - - c->rx_coalesce_usecs = qdev->rx_coalesce_usecs; - c->tx_coalesce_usecs = qdev->tx_coalesce_usecs; - - /* This chip coalesces as follows: - * If a packet arrives, hold off interrupts until - * cqicb->int_delay expires, but if no other packets arrive don't - * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a - * timer to coalesce on a frame basis. So, we have to take ethtool's - * max_coalesced_frames value and convert it to a delay in microseconds. - * We do this by using a basic thoughput of 1,000,000 frames per - * second @ (1024 bytes). This means one frame per usec. So it's a - * simple one to one ratio. - */ - c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames; - c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames; - - return 0; -} - -static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - /* Validate user parameters. */ - if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2) - return -EINVAL; - /* Don't wait more than 10 usec. */ - if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT) - return -EINVAL; - if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2) - return -EINVAL; - if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT) - return -EINVAL; - - /* Verify a change took place before updating the hardware. */ - if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs && - qdev->tx_coalesce_usecs == c->tx_coalesce_usecs && - qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames && - qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames) - return 0; - - qdev->rx_coalesce_usecs = c->rx_coalesce_usecs; - qdev->tx_coalesce_usecs = c->tx_coalesce_usecs; - qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames; - qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames; - - return ql_update_ring_coalescing(qdev); -} - -static void ql_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct ql_adapter *qdev = netdev_priv(netdev); - - ql_mb_get_port_cfg(qdev); - if (qdev->link_config & CFG_PAUSE_STD) { - pause->rx_pause = 1; - pause->tx_pause = 1; - } -} - -static int ql_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) -{ - struct ql_adapter *qdev = netdev_priv(netdev); - int status = 0; - - if ((pause->rx_pause) && (pause->tx_pause)) - qdev->link_config |= CFG_PAUSE_STD; - else if (!pause->rx_pause && !pause->tx_pause) - qdev->link_config &= ~CFG_PAUSE_STD; - else - return -EINVAL; - - status = ql_mb_set_port_cfg(qdev); - return status; -} - -static u32 ql_get_msglevel(struct net_device *ndev) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - return qdev->msg_enable; -} - -static void ql_set_msglevel(struct net_device *ndev, u32 value) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - qdev->msg_enable = value; -} - -const struct ethtool_ops qlge_ethtool_ops = { - .get_drvinfo = ql_get_drvinfo, - .get_wol = ql_get_wol, - .set_wol = ql_set_wol, - .get_regs_len = ql_get_regs_len, - .get_regs = ql_get_regs, - .get_msglevel = ql_get_msglevel, - .set_msglevel = ql_set_msglevel, - .get_link = ethtool_op_get_link, - .set_phys_id = ql_set_phys_id, - .self_test = ql_self_test, - .get_pauseparam = ql_get_pauseparam, - .set_pauseparam = ql_set_pauseparam, - .get_coalesce = ql_get_coalesce, - .set_coalesce = ql_set_coalesce, - .get_sset_count = ql_get_sset_count, - .get_strings = ql_get_strings, - .get_ethtool_stats = ql_get_ethtool_stats, - .get_link_ksettings = ql_get_link_ksettings, -}; - diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c deleted file mode 100644 index 6cae33072496..000000000000 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ /dev/null @@ -1,5027 +0,0 @@ -/* - * QLogic qlge NIC HBA Driver - * Copyright (c) 2003-2008 QLogic Corporation - * See LICENSE.qlge for copyright and licensing details. - * Author: Linux qlge network device driver by - * Ron Mercer <ron.mercer@qlogic.com> - */ -#include <linux/kernel.h> -#include <linux/bitops.h> -#include <linux/types.h> -#include <linux/module.h> -#include <linux/list.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> -#include <linux/pagemap.h> -#include <linux/sched.h> -#include <linux/slab.h> -#include <linux/dmapool.h> -#include <linux/mempool.h> -#include <linux/spinlock.h> -#include <linux/kthread.h> -#include <linux/interrupt.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/in.h> -#include <linux/ip.h> -#include <linux/ipv6.h> -#include <net/ipv6.h> -#include <linux/tcp.h> -#include <linux/udp.h> -#include <linux/if_arp.h> -#include <linux/if_ether.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/ethtool.h> -#include <linux/if_vlan.h> -#include <linux/skbuff.h> -#include <linux/delay.h> -#include <linux/mm.h> -#include <linux/vmalloc.h> -#include <linux/prefetch.h> -#include <net/ip6_checksum.h> - -#include "qlge.h" - -char qlge_driver_name[] = DRV_NAME; -const char qlge_driver_version[] = DRV_VERSION; - -MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>"); -MODULE_DESCRIPTION(DRV_STRING " "); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); - -static const u32 default_msg = - NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | -/* NETIF_MSG_TIMER | */ - NETIF_MSG_IFDOWN | - NETIF_MSG_IFUP | - NETIF_MSG_RX_ERR | - NETIF_MSG_TX_ERR | -/* NETIF_MSG_TX_QUEUED | */ -/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */ -/* NETIF_MSG_PKTDATA | */ - NETIF_MSG_HW | NETIF_MSG_WOL | 0; - -static int debug = -1; /* defaults above */ -module_param(debug, int, 0664); -MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); - -#define MSIX_IRQ 0 -#define MSI_IRQ 1 -#define LEG_IRQ 2 -static int qlge_irq_type = MSIX_IRQ; -module_param(qlge_irq_type, int, 0664); -MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); - -static int qlge_mpi_coredump; -module_param(qlge_mpi_coredump, int, 0); -MODULE_PARM_DESC(qlge_mpi_coredump, - "Option to enable MPI firmware dump. " - "Default is OFF - Do Not allocate memory. "); - -static int qlge_force_coredump; -module_param(qlge_force_coredump, int, 0); -MODULE_PARM_DESC(qlge_force_coredump, - "Option to allow force of firmware core dump. " - "Default is OFF - Do not allow."); - -static const struct pci_device_id qlge_pci_tbl[] = { - {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)}, - {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)}, - /* required last entry */ - {0,} -}; - -MODULE_DEVICE_TABLE(pci, qlge_pci_tbl); - -static int ql_wol(struct ql_adapter *); -static void qlge_set_multicast_list(struct net_device *); -static int ql_adapter_down(struct ql_adapter *); -static int ql_adapter_up(struct ql_adapter *); - -/* This hardware semaphore causes exclusive access to - * resources shared between the NIC driver, MPI firmware, - * FCOE firmware and the FC driver. - */ -static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask) -{ - u32 sem_bits = 0; - - switch (sem_mask) { - case SEM_XGMAC0_MASK: - sem_bits = SEM_SET << SEM_XGMAC0_SHIFT; - break; - case SEM_XGMAC1_MASK: - sem_bits = SEM_SET << SEM_XGMAC1_SHIFT; - break; - case SEM_ICB_MASK: - sem_bits = SEM_SET << SEM_ICB_SHIFT; - break; - case SEM_MAC_ADDR_MASK: - sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT; - break; - case SEM_FLASH_MASK: - sem_bits = SEM_SET << SEM_FLASH_SHIFT; - break; - case SEM_PROBE_MASK: - sem_bits = SEM_SET << SEM_PROBE_SHIFT; - break; - case SEM_RT_IDX_MASK: - sem_bits = SEM_SET << SEM_RT_IDX_SHIFT; - break; - case SEM_PROC_REG_MASK: - sem_bits = SEM_SET << SEM_PROC_REG_SHIFT; - break; - default: - netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n"); - return -EINVAL; - } - - ql_write32(qdev, SEM, sem_bits | sem_mask); - return !(ql_read32(qdev, SEM) & sem_bits); -} - -int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask) -{ - unsigned int wait_count = 30; - do { - if (!ql_sem_trylock(qdev, sem_mask)) - return 0; - udelay(100); - } while (--wait_count); - return -ETIMEDOUT; -} - -void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask) -{ - ql_write32(qdev, SEM, sem_mask); - ql_read32(qdev, SEM); /* flush */ -} - -/* This function waits for a specific bit to come ready - * in a given register. It is used mostly by the initialize - * process, but is also used in kernel thread API such as - * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid. - */ -int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit) -{ - u32 temp; - int count = UDELAY_COUNT; - - while (count) { - temp = ql_read32(qdev, reg); - - /* check for errors */ - if (temp & err_bit) { - netif_alert(qdev, probe, qdev->ndev, - "register 0x%.08x access error, value = 0x%.08x!.\n", - reg, temp); - return -EIO; - } else if (temp & bit) - return 0; - udelay(UDELAY_DELAY); - count--; - } - netif_alert(qdev, probe, qdev->ndev, - "Timed out waiting for reg %x to come ready.\n", reg); - return -ETIMEDOUT; -} - -/* The CFG register is used to download TX and RX control blocks - * to the chip. This function waits for an operation to complete. - */ -static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit) -{ - int count = UDELAY_COUNT; - u32 temp; - - while (count) { - temp = ql_read32(qdev, CFG); - if (temp & CFG_LE) - return -EIO; - if (!(temp & bit)) - return 0; - udelay(UDELAY_DELAY); - count--; - } - return -ETIMEDOUT; -} - - -/* Used to issue init control blocks to hw. Maps control block, - * sets address, triggers download, waits for completion. - */ -int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, - u16 q_id) -{ - u64 map; - int status = 0; - int direction; - u32 mask; - u32 value; - - direction = - (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE : - PCI_DMA_FROMDEVICE; - - map = pci_map_single(qdev->pdev, ptr, size, direction); - if (pci_dma_mapping_error(qdev->pdev, map)) { - netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n"); - return -ENOMEM; - } - - status = ql_sem_spinlock(qdev, SEM_ICB_MASK); - if (status) - return status; - - status = ql_wait_cfg(qdev, bit); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Timed out waiting for CFG to come ready.\n"); - goto exit; - } - - ql_write32(qdev, ICB_L, (u32) map); - ql_write32(qdev, ICB_H, (u32) (map >> 32)); - - mask = CFG_Q_MASK | (bit << 16); - value = bit | (q_id << CFG_Q_SHIFT); - ql_write32(qdev, CFG, (mask | value)); - - /* - * Wait for the bit to clear after signaling hw. - */ - status = ql_wait_cfg(qdev, bit); -exit: - ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */ - pci_unmap_single(qdev->pdev, map, size, direction); - return status; -} - -/* Get a specific MAC address from the CAM. Used for debug and reg dump. */ -int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, - u32 *value) -{ - u32 offset = 0; - int status; - - switch (type) { - case MAC_ADDR_TYPE_MULTI_MAC: - case MAC_ADDR_TYPE_CAM_MAC: - { - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ - (index << MAC_ADDR_IDX_SHIFT) | /* index */ - MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MR, 0); - if (status) - goto exit; - *value++ = ql_read32(qdev, MAC_ADDR_DATA); - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ - (index << MAC_ADDR_IDX_SHIFT) | /* index */ - MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MR, 0); - if (status) - goto exit; - *value++ = ql_read32(qdev, MAC_ADDR_DATA); - if (type == MAC_ADDR_TYPE_CAM_MAC) { - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ - (index << MAC_ADDR_IDX_SHIFT) | /* index */ - MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ - status = - ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, - MAC_ADDR_MR, 0); - if (status) - goto exit; - *value++ = ql_read32(qdev, MAC_ADDR_DATA); - } - break; - } - case MAC_ADDR_TYPE_VLAN: - case MAC_ADDR_TYPE_MULTI_FLTR: - default: - netif_crit(qdev, ifup, qdev->ndev, - "Address type %d not yet supported.\n", type); - status = -EPERM; - } -exit: - return status; -} - -/* Set up a MAC, multicast or VLAN address for the - * inbound frame matching. - */ -static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, - u16 index) -{ - u32 offset = 0; - int status = 0; - - switch (type) { - case MAC_ADDR_TYPE_MULTI_MAC: - { - u32 upper = (addr[0] << 8) | addr[1]; - u32 lower = (addr[2] << 24) | (addr[3] << 16) | - (addr[4] << 8) | (addr[5]); - - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset++) | - (index << MAC_ADDR_IDX_SHIFT) | - type | MAC_ADDR_E); - ql_write32(qdev, MAC_ADDR_DATA, lower); - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset++) | - (index << MAC_ADDR_IDX_SHIFT) | - type | MAC_ADDR_E); - - ql_write32(qdev, MAC_ADDR_DATA, upper); - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - break; - } - case MAC_ADDR_TYPE_CAM_MAC: - { - u32 cam_output; - u32 upper = (addr[0] << 8) | addr[1]; - u32 lower = - (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | - (addr[5]); - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ - (index << MAC_ADDR_IDX_SHIFT) | /* index */ - type); /* type */ - ql_write32(qdev, MAC_ADDR_DATA, lower); - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ - (index << MAC_ADDR_IDX_SHIFT) | /* index */ - type); /* type */ - ql_write32(qdev, MAC_ADDR_DATA, upper); - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */ - (index << MAC_ADDR_IDX_SHIFT) | /* index */ - type); /* type */ - /* This field should also include the queue id - and possibly the function id. Right now we hardcode - the route field to NIC core. - */ - cam_output = (CAM_OUT_ROUTE_NIC | - (qdev-> - func << CAM_OUT_FUNC_SHIFT) | - (0 << CAM_OUT_CQ_ID_SHIFT)); - if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) - cam_output |= CAM_OUT_RV; - /* route to NIC core */ - ql_write32(qdev, MAC_ADDR_DATA, cam_output); - break; - } - case MAC_ADDR_TYPE_VLAN: - { - u32 enable_bit = *((u32 *) &addr[0]); - /* For VLAN, the addr actually holds a bit that - * either enables or disables the vlan id we are - * addressing. It's either MAC_ADDR_E on or off. - * That's bit-27 we're talking about. - */ - status = - ql_wait_reg_rdy(qdev, - MAC_ADDR_IDX, MAC_ADDR_MW, 0); - if (status) - goto exit; - ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */ - (index << MAC_ADDR_IDX_SHIFT) | /* index */ - type | /* type */ - enable_bit); /* enable/disable */ - break; - } - case MAC_ADDR_TYPE_MULTI_FLTR: - default: - netif_crit(qdev, ifup, qdev->ndev, - "Address type %d not yet supported.\n", type); - status = -EPERM; - } -exit: - return status; -} - -/* Set or clear MAC address in hardware. We sometimes - * have to clear it to prevent wrong frame routing - * especially in a bonding environment. - */ -static int ql_set_mac_addr(struct ql_adapter *qdev, int set) -{ - int status; - char zero_mac_addr[ETH_ALEN]; - char *addr; - - if (set) { - addr = &qdev->current_mac_addr[0]; - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Set Mac addr %pM\n", addr); - } else { - eth_zero_addr(zero_mac_addr); - addr = &zero_mac_addr[0]; - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Clearing MAC address\n"); - } - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - return status; - status = ql_set_mac_addr_reg(qdev, (u8 *) addr, - MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - netif_err(qdev, ifup, qdev->ndev, - "Failed to init mac address.\n"); - return status; -} - -void ql_link_on(struct ql_adapter *qdev) -{ - netif_err(qdev, link, qdev->ndev, "Link is up.\n"); - netif_carrier_on(qdev->ndev); - ql_set_mac_addr(qdev, 1); -} - -void ql_link_off(struct ql_adapter *qdev) -{ - netif_err(qdev, link, qdev->ndev, "Link is down.\n"); - netif_carrier_off(qdev->ndev); - ql_set_mac_addr(qdev, 0); -} - -/* Get a specific frame routing value from the CAM. - * Used for debug and reg dump. - */ -int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value) -{ - int status = 0; - - status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); - if (status) - goto exit; - - ql_write32(qdev, RT_IDX, - RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT)); - status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0); - if (status) - goto exit; - *value = ql_read32(qdev, RT_DATA); -exit: - return status; -} - -/* The NIC function for this chip has 16 routing indexes. Each one can be used - * to route different frame types to various inbound queues. We send broadcast/ - * multicast/error frames to the default queue for slow handling, - * and CAM hit/RSS frames to the fast handling queues. - */ -static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, - int enable) -{ - int status = -EINVAL; /* Return error if no mask match. */ - u32 value = 0; - - switch (mask) { - case RT_IDX_CAM_HIT: - { - value = RT_IDX_DST_CAM_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */ - break; - } - case RT_IDX_VALID: /* Promiscuous Mode frames. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */ - break; - } - case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */ - break; - } - case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_IP_CSUM_ERR_SLOT << - RT_IDX_IDX_SHIFT); /* index */ - break; - } - case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_TCP_UDP_CSUM_ERR_SLOT << - RT_IDX_IDX_SHIFT); /* index */ - break; - } - case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */ - break; - } - case RT_IDX_MCAST: /* Pass up All Multicast frames. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */ - break; - } - case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ - break; - } - case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */ - { - value = RT_IDX_DST_RSS | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ - break; - } - case 0: /* Clear the E-bit on an entry. */ - { - value = RT_IDX_DST_DFLT_Q | /* dest */ - RT_IDX_TYPE_NICQ | /* type */ - (index << RT_IDX_IDX_SHIFT);/* index */ - break; - } - default: - netif_err(qdev, ifup, qdev->ndev, - "Mask type %d not yet supported.\n", mask); - status = -EPERM; - goto exit; - } - - if (value) { - status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); - if (status) - goto exit; - value |= (enable ? RT_IDX_E : 0); - ql_write32(qdev, RT_IDX, value); - ql_write32(qdev, RT_DATA, enable ? mask : 0); - } -exit: - return status; -} - -static void ql_enable_interrupts(struct ql_adapter *qdev) -{ - ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI); -} - -static void ql_disable_interrupts(struct ql_adapter *qdev) -{ - ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16)); -} - -/* If we're running with multiple MSI-X vectors then we enable on the fly. - * Otherwise, we may have multiple outstanding workers and don't want to - * enable until the last one finishes. In this case, the irq_cnt gets - * incremented every time we queue a worker and decremented every time - * a worker finishes. Once it hits zero we enable the interrupt. - */ -u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) -{ - u32 var = 0; - unsigned long hw_flags = 0; - struct intr_context *ctx = qdev->intr_context + intr; - - if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) { - /* Always enable if we're MSIX multi interrupts and - * it's not the default (zeroeth) interrupt. - */ - ql_write32(qdev, INTR_EN, - ctx->intr_en_mask); - var = ql_read32(qdev, STS); - return var; - } - - spin_lock_irqsave(&qdev->hw_lock, hw_flags); - if (atomic_dec_and_test(&ctx->irq_cnt)) { - ql_write32(qdev, INTR_EN, - ctx->intr_en_mask); - var = ql_read32(qdev, STS); - } - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); - return var; -} - -static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) -{ - u32 var = 0; - struct intr_context *ctx; - - /* HW disables for us if we're MSIX multi interrupts and - * it's not the default (zeroeth) interrupt. - */ - if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) - return 0; - - ctx = qdev->intr_context + intr; - spin_lock(&qdev->hw_lock); - if (!atomic_read(&ctx->irq_cnt)) { - ql_write32(qdev, INTR_EN, - ctx->intr_dis_mask); - var = ql_read32(qdev, STS); - } - atomic_inc(&ctx->irq_cnt); - spin_unlock(&qdev->hw_lock); - return var; -} - -static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev) -{ - int i; - for (i = 0; i < qdev->intr_count; i++) { - /* The enable call does a atomic_dec_and_test - * and enables only if the result is zero. - * So we precharge it here. - */ - if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) || - i == 0)) - atomic_set(&qdev->intr_context[i].irq_cnt, 1); - ql_enable_completion_interrupt(qdev, i); - } - -} - -static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str) -{ - int status, i; - u16 csum = 0; - __le16 *flash = (__le16 *)&qdev->flash; - - status = strncmp((char *)&qdev->flash, str, 4); - if (status) { - netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n"); - return status; - } - - for (i = 0; i < size; i++) - csum += le16_to_cpu(*flash++); - - if (csum) - netif_err(qdev, ifup, qdev->ndev, - "Invalid flash checksum, csum = 0x%.04x.\n", csum); - - return csum; -} - -static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data) -{ - int status = 0; - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, - FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); - if (status) - goto exit; - /* set up for reg read */ - ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset); - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, - FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); - if (status) - goto exit; - /* This data is stored on flash as an array of - * __le32. Since ql_read32() returns cpu endian - * we need to swap it back. - */ - *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA)); -exit: - return status; -} - -static int ql_get_8000_flash_params(struct ql_adapter *qdev) -{ - u32 i, size; - int status; - __le32 *p = (__le32 *)&qdev->flash; - u32 offset; - u8 mac_addr[6]; - - /* Get flash offset for function and adjust - * for dword access. - */ - if (!qdev->port) - offset = FUNC0_FLASH_OFFSET / sizeof(u32); - else - offset = FUNC1_FLASH_OFFSET / sizeof(u32); - - if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) - return -ETIMEDOUT; - - size = sizeof(struct flash_params_8000) / sizeof(u32); - for (i = 0; i < size; i++, p++) { - status = ql_read_flash_word(qdev, i+offset, p); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Error reading flash.\n"); - goto exit; - } - } - - status = ql_validate_flash(qdev, - sizeof(struct flash_params_8000) / sizeof(u16), - "8000"); - if (status) { - netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); - status = -EINVAL; - goto exit; - } - - /* Extract either manufacturer or BOFM modified - * MAC address. - */ - if (qdev->flash.flash_params_8000.data_type1 == 2) - memcpy(mac_addr, - qdev->flash.flash_params_8000.mac_addr1, - qdev->ndev->addr_len); - else - memcpy(mac_addr, - qdev->flash.flash_params_8000.mac_addr, - qdev->ndev->addr_len); - - if (!is_valid_ether_addr(mac_addr)) { - netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n"); - status = -EINVAL; - goto exit; - } - - memcpy(qdev->ndev->dev_addr, - mac_addr, - qdev->ndev->addr_len); - -exit: - ql_sem_unlock(qdev, SEM_FLASH_MASK); - return status; -} - -static int ql_get_8012_flash_params(struct ql_adapter *qdev) -{ - int i; - int status; - __le32 *p = (__le32 *)&qdev->flash; - u32 offset = 0; - u32 size = sizeof(struct flash_params_8012) / sizeof(u32); - - /* Second function's parameters follow the first - * function's. - */ - if (qdev->port) - offset = size; - - if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) - return -ETIMEDOUT; - - for (i = 0; i < size; i++, p++) { - status = ql_read_flash_word(qdev, i+offset, p); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Error reading flash.\n"); - goto exit; - } - - } - - status = ql_validate_flash(qdev, - sizeof(struct flash_params_8012) / sizeof(u16), - "8012"); - if (status) { - netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); - status = -EINVAL; - goto exit; - } - - if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) { - status = -EINVAL; - goto exit; - } - - memcpy(qdev->ndev->dev_addr, - qdev->flash.flash_params_8012.mac_addr, - qdev->ndev->addr_len); - -exit: - ql_sem_unlock(qdev, SEM_FLASH_MASK); - return status; -} - -/* xgmac register are located behind the xgmac_addr and xgmac_data - * register pair. Each read/write requires us to wait for the ready - * bit before reading/writing the data. - */ -static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data) -{ - int status; - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, - XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); - if (status) - return status; - /* write the data to the data reg */ - ql_write32(qdev, XGMAC_DATA, data); - /* trigger the write */ - ql_write32(qdev, XGMAC_ADDR, reg); - return status; -} - -/* xgmac register are located behind the xgmac_addr and xgmac_data - * register pair. Each read/write requires us to wait for the ready - * bit before reading/writing the data. - */ -int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data) -{ - int status = 0; - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, - XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); - if (status) - goto exit; - /* set up for reg read */ - ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R); - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, - XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); - if (status) - goto exit; - /* get the data */ - *data = ql_read32(qdev, XGMAC_DATA); -exit: - return status; -} - -/* This is used for reading the 64-bit statistics regs. */ -int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data) -{ - int status = 0; - u32 hi = 0; - u32 lo = 0; - - status = ql_read_xgmac_reg(qdev, reg, &lo); - if (status) - goto exit; - - status = ql_read_xgmac_reg(qdev, reg + 4, &hi); - if (status) - goto exit; - - *data = (u64) lo | ((u64) hi << 32); - -exit: - return status; -} - -static int ql_8000_port_initialize(struct ql_adapter *qdev) -{ - int status; - /* - * Get MPI firmware version for driver banner - * and ethool info. - */ - status = ql_mb_about_fw(qdev); - if (status) - goto exit; - status = ql_mb_get_fw_state(qdev); - if (status) - goto exit; - /* Wake up a worker to get/set the TX/RX frame sizes. */ - queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0); -exit: - return status; -} - -/* Take the MAC Core out of reset. - * Enable statistics counting. - * Take the transmitter/receiver out of reset. - * This functionality may be done in the MPI firmware at a - * later date. - */ -static int ql_8012_port_initialize(struct ql_adapter *qdev) -{ - int status = 0; - u32 data; - - if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) { - /* Another function has the semaphore, so - * wait for the port init bit to come ready. - */ - netif_info(qdev, link, qdev->ndev, - "Another function has the semaphore, so wait for the port init bit to come ready.\n"); - status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0); - if (status) { - netif_crit(qdev, link, qdev->ndev, - "Port initialize timed out.\n"); - } - return status; - } - - netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n"); - /* Set the core reset. */ - status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data); - if (status) - goto end; - data |= GLOBAL_CFG_RESET; - status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); - if (status) - goto end; - - /* Clear the core reset and turn on jumbo for receiver. */ - data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */ - data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */ - data |= GLOBAL_CFG_TX_STAT_EN; - data |= GLOBAL_CFG_RX_STAT_EN; - status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); - if (status) - goto end; - - /* Enable transmitter, and clear it's reset. */ - status = ql_read_xgmac_reg(qdev, TX_CFG, &data); - if (status) - goto end; - data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */ - data |= TX_CFG_EN; /* Enable the transmitter. */ - status = ql_write_xgmac_reg(qdev, TX_CFG, data); - if (status) - goto end; - - /* Enable receiver and clear it's reset. */ - status = ql_read_xgmac_reg(qdev, RX_CFG, &data); - if (status) - goto end; - data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */ - data |= RX_CFG_EN; /* Enable the receiver. */ - status = ql_write_xgmac_reg(qdev, RX_CFG, data); - if (status) - goto end; - - /* Turn on jumbo. */ - status = - ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16)); - if (status) - goto end; - status = - ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580); - if (status) - goto end; - - /* Signal to the world that the port is enabled. */ - ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init)); -end: - ql_sem_unlock(qdev, qdev->xg_sem_mask); - return status; -} - -static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev) -{ - return PAGE_SIZE << qdev->lbq_buf_order; -} - -/* Get the next large buffer. */ -static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) -{ - struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx]; - rx_ring->lbq_curr_idx++; - if (rx_ring->lbq_curr_idx == rx_ring->lbq_len) - rx_ring->lbq_curr_idx = 0; - rx_ring->lbq_free_cnt++; - return lbq_desc; -} - -static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev, - struct rx_ring *rx_ring) -{ - struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); - - pci_dma_sync_single_for_cpu(qdev->pdev, - dma_unmap_addr(lbq_desc, mapaddr), - rx_ring->lbq_buf_size, - PCI_DMA_FROMDEVICE); - - /* If it's the last chunk of our master page then - * we unmap it. - */ - if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size) - == ql_lbq_block_size(qdev)) - pci_unmap_page(qdev->pdev, - lbq_desc->p.pg_chunk.map, - ql_lbq_block_size(qdev), - PCI_DMA_FROMDEVICE); - return lbq_desc; -} - -/* Get the next small buffer. */ -static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) -{ - struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx]; - rx_ring->sbq_curr_idx++; - if (rx_ring->sbq_curr_idx == rx_ring->sbq_len) - rx_ring->sbq_curr_idx = 0; - rx_ring->sbq_free_cnt++; - return sbq_desc; -} - -/* Update an rx ring index. */ -static void ql_update_cq(struct rx_ring *rx_ring) -{ - rx_ring->cnsmr_idx++; - rx_ring->curr_entry++; - if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) { - rx_ring->cnsmr_idx = 0; - rx_ring->curr_entry = rx_ring->cq_base; - } -} - -static void ql_write_cq_idx(struct rx_ring *rx_ring) -{ - ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); -} - -static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, - struct bq_desc *lbq_desc) -{ - if (!rx_ring->pg_chunk.page) { - u64 map; - rx_ring->pg_chunk.page = alloc_pages(__GFP_COMP | GFP_ATOMIC, - qdev->lbq_buf_order); - if (unlikely(!rx_ring->pg_chunk.page)) { - netif_err(qdev, drv, qdev->ndev, - "page allocation failed.\n"); - return -ENOMEM; - } - rx_ring->pg_chunk.offset = 0; - map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page, - 0, ql_lbq_block_size(qdev), - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(qdev->pdev, map)) { - __free_pages(rx_ring->pg_chunk.page, - qdev->lbq_buf_order); - rx_ring->pg_chunk.page = NULL; - netif_err(qdev, drv, qdev->ndev, - "PCI mapping failed.\n"); - return -ENOMEM; - } - rx_ring->pg_chunk.map = map; - rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page); - } - - /* Copy the current master pg_chunk info - * to the current descriptor. - */ - lbq_desc->p.pg_chunk = rx_ring->pg_chunk; - - /* Adjust the master page chunk for next - * buffer get. - */ - rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size; - if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) { - rx_ring->pg_chunk.page = NULL; - lbq_desc->p.pg_chunk.last_flag = 1; - } else { - rx_ring->pg_chunk.va += rx_ring->lbq_buf_size; - get_page(rx_ring->pg_chunk.page); - lbq_desc->p.pg_chunk.last_flag = 0; - } - return 0; -} -/* Process (refill) a large buffer queue. */ -static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) -{ - u32 clean_idx = rx_ring->lbq_clean_idx; - u32 start_idx = clean_idx; - struct bq_desc *lbq_desc; - u64 map; - int i; - - while (rx_ring->lbq_free_cnt > 32) { - for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "lbq: try cleaning clean_idx = %d.\n", - clean_idx); - lbq_desc = &rx_ring->lbq[clean_idx]; - if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { - rx_ring->lbq_clean_idx = clean_idx; - netif_err(qdev, ifup, qdev->ndev, - "Could not get a page chunk, i=%d, clean_idx =%d .\n", - i, clean_idx); - return; - } - - map = lbq_desc->p.pg_chunk.map + - lbq_desc->p.pg_chunk.offset; - dma_unmap_addr_set(lbq_desc, mapaddr, map); - dma_unmap_len_set(lbq_desc, maplen, - rx_ring->lbq_buf_size); - *lbq_desc->addr = cpu_to_le64(map); - - pci_dma_sync_single_for_device(qdev->pdev, map, - rx_ring->lbq_buf_size, - PCI_DMA_FROMDEVICE); - clean_idx++; - if (clean_idx == rx_ring->lbq_len) - clean_idx = 0; - } - - rx_ring->lbq_clean_idx = clean_idx; - rx_ring->lbq_prod_idx += 16; - if (rx_ring->lbq_prod_idx == rx_ring->lbq_len) - rx_ring->lbq_prod_idx = 0; - rx_ring->lbq_free_cnt -= 16; - } - - if (start_idx != clean_idx) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "lbq: updating prod idx = %d.\n", - rx_ring->lbq_prod_idx); - ql_write_db_reg(rx_ring->lbq_prod_idx, - rx_ring->lbq_prod_idx_db_reg); - } -} - -/* Process (refill) a small buffer queue. */ -static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) -{ - u32 clean_idx = rx_ring->sbq_clean_idx; - u32 start_idx = clean_idx; - struct bq_desc *sbq_desc; - u64 map; - int i; - - while (rx_ring->sbq_free_cnt > 16) { - for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) { - sbq_desc = &rx_ring->sbq[clean_idx]; - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "sbq: try cleaning clean_idx = %d.\n", - clean_idx); - if (sbq_desc->p.skb == NULL) { - netif_printk(qdev, rx_status, KERN_DEBUG, - qdev->ndev, - "sbq: getting new skb for index %d.\n", - sbq_desc->index); - sbq_desc->p.skb = - netdev_alloc_skb(qdev->ndev, - SMALL_BUFFER_SIZE); - if (sbq_desc->p.skb == NULL) { - rx_ring->sbq_clean_idx = clean_idx; - return; - } - skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD); - map = pci_map_single(qdev->pdev, - sbq_desc->p.skb->data, - rx_ring->sbq_buf_size, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(qdev->pdev, map)) { - netif_err(qdev, ifup, qdev->ndev, - "PCI mapping failed.\n"); - rx_ring->sbq_clean_idx = clean_idx; - dev_kfree_skb_any(sbq_desc->p.skb); - sbq_desc->p.skb = NULL; - return; - } - dma_unmap_addr_set(sbq_desc, mapaddr, map); - dma_unmap_len_set(sbq_desc, maplen, - rx_ring->sbq_buf_size); - *sbq_desc->addr = cpu_to_le64(map); - } - - clean_idx++; - if (clean_idx == rx_ring->sbq_len) - clean_idx = 0; - } - rx_ring->sbq_clean_idx = clean_idx; - rx_ring->sbq_prod_idx += 16; - if (rx_ring->sbq_prod_idx == rx_ring->sbq_len) - rx_ring->sbq_prod_idx = 0; - rx_ring->sbq_free_cnt -= 16; - } - - if (start_idx != clean_idx) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "sbq: updating prod idx = %d.\n", - rx_ring->sbq_prod_idx); - ql_write_db_reg(rx_ring->sbq_prod_idx, - rx_ring->sbq_prod_idx_db_reg); - } -} - -static void ql_update_buffer_queues(struct ql_adapter *qdev, - struct rx_ring *rx_ring) -{ - ql_update_sbq(qdev, rx_ring); - ql_update_lbq(qdev, rx_ring); -} - -/* Unmaps tx buffers. Can be called from send() if a pci mapping - * fails at some stage, or from the interrupt when a tx completes. - */ -static void ql_unmap_send(struct ql_adapter *qdev, - struct tx_ring_desc *tx_ring_desc, int mapped) -{ - int i; - for (i = 0; i < mapped; i++) { - if (i == 0 || (i == 7 && mapped > 7)) { - /* - * Unmap the skb->data area, or the - * external sglist (AKA the Outbound - * Address List (OAL)). - * If its the zeroeth element, then it's - * the skb->data area. If it's the 7th - * element and there is more than 6 frags, - * then its an OAL. - */ - if (i == 7) { - netif_printk(qdev, tx_done, KERN_DEBUG, - qdev->ndev, - "unmapping OAL area.\n"); - } - pci_unmap_single(qdev->pdev, - dma_unmap_addr(&tx_ring_desc->map[i], - mapaddr), - dma_unmap_len(&tx_ring_desc->map[i], - maplen), - PCI_DMA_TODEVICE); - } else { - netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev, - "unmapping frag %d.\n", i); - pci_unmap_page(qdev->pdev, - dma_unmap_addr(&tx_ring_desc->map[i], - mapaddr), - dma_unmap_len(&tx_ring_desc->map[i], - maplen), PCI_DMA_TODEVICE); - } - } - -} - -/* Map the buffers for this transmit. This will return - * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. - */ -static int ql_map_send(struct ql_adapter *qdev, - struct ob_mac_iocb_req *mac_iocb_ptr, - struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc) -{ - int len = skb_headlen(skb); - dma_addr_t map; - int frag_idx, err, map_idx = 0; - struct tx_buf_desc *tbd = mac_iocb_ptr->tbd; - int frag_cnt = skb_shinfo(skb)->nr_frags; - - if (frag_cnt) { - netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, - "frag_cnt = %d.\n", frag_cnt); - } - /* - * Map the skb buffer first. - */ - map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); - - err = pci_dma_mapping_error(qdev->pdev, map); - if (err) { - netif_err(qdev, tx_queued, qdev->ndev, - "PCI mapping failed with error: %d\n", err); - - return NETDEV_TX_BUSY; - } - - tbd->len = cpu_to_le32(len); - tbd->addr = cpu_to_le64(map); - dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); - dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len); - map_idx++; - - /* - * This loop fills the remainder of the 8 address descriptors - * in the IOCB. If there are more than 7 fragments, then the - * eighth address desc will point to an external list (OAL). - * When this happens, the remainder of the frags will be stored - * in this list. - */ - for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx]; - tbd++; - if (frag_idx == 6 && frag_cnt > 7) { - /* Let's tack on an sglist. - * Our control block will now - * look like this: - * iocb->seg[0] = skb->data - * iocb->seg[1] = frag[0] - * iocb->seg[2] = frag[1] - * iocb->seg[3] = frag[2] - * iocb->seg[4] = frag[3] - * iocb->seg[5] = frag[4] - * iocb->seg[6] = frag[5] - * iocb->seg[7] = ptr to OAL (external sglist) - * oal->seg[0] = frag[6] - * oal->seg[1] = frag[7] - * oal->seg[2] = frag[8] - * oal->seg[3] = frag[9] - * oal->seg[4] = frag[10] - * etc... - */ - /* Tack on the OAL in the eighth segment of IOCB. */ - map = pci_map_single(qdev->pdev, &tx_ring_desc->oal, - sizeof(struct oal), - PCI_DMA_TODEVICE); - err = pci_dma_mapping_error(qdev->pdev, map); - if (err) { - netif_err(qdev, tx_queued, qdev->ndev, - "PCI mapping outbound address list with error: %d\n", - err); - goto map_error; - } - - tbd->addr = cpu_to_le64(map); - /* - * The length is the number of fragments - * that remain to be mapped times the length - * of our sglist (OAL). - */ - tbd->len = - cpu_to_le32((sizeof(struct tx_buf_desc) * - (frag_cnt - frag_idx)) | TX_DESC_C); - dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, - map); - dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, - sizeof(struct oal)); - tbd = (struct tx_buf_desc *)&tx_ring_desc->oal; - map_idx++; - } - - map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), - DMA_TO_DEVICE); - - err = dma_mapping_error(&qdev->pdev->dev, map); - if (err) { - netif_err(qdev, tx_queued, qdev->ndev, - "PCI mapping frags failed with error: %d.\n", - err); - goto map_error; - } - - tbd->addr = cpu_to_le64(map); - tbd->len = cpu_to_le32(skb_frag_size(frag)); - dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); - dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, - skb_frag_size(frag)); - - } - /* Save the number of segments we've mapped. */ - tx_ring_desc->map_cnt = map_idx; - /* Terminate the last segment. */ - tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E); - return NETDEV_TX_OK; - -map_error: - /* - * If the first frag mapping failed, then i will be zero. - * This causes the unmap of the skb->data area. Otherwise - * we pass in the number of frags that mapped successfully - * so they can be umapped. - */ - ql_unmap_send(qdev, tx_ring_desc, map_idx); - return NETDEV_TX_BUSY; -} - -/* Categorizing receive firmware frame errors */ -static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err, - struct rx_ring *rx_ring) -{ - struct nic_stats *stats = &qdev->nic_stats; - - stats->rx_err_count++; - rx_ring->rx_errors++; - - switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) { - case IB_MAC_IOCB_RSP_ERR_CODE_ERR: - stats->rx_code_err++; - break; - case IB_MAC_IOCB_RSP_ERR_OVERSIZE: - stats->rx_oversize_err++; - break; - case IB_MAC_IOCB_RSP_ERR_UNDERSIZE: - stats->rx_undersize_err++; - break; - case IB_MAC_IOCB_RSP_ERR_PREAMBLE: - stats->rx_preamble_err++; - break; - case IB_MAC_IOCB_RSP_ERR_FRAME_LEN: - stats->rx_frame_len_err++; - break; - case IB_MAC_IOCB_RSP_ERR_CRC: - stats->rx_crc_err++; - default: - break; - } -} - -/** - * ql_update_mac_hdr_len - helper routine to update the mac header length - * based on vlan tags if present - */ -static void ql_update_mac_hdr_len(struct ql_adapter *qdev, - struct ib_mac_iocb_rsp *ib_mac_rsp, - void *page, size_t *len) -{ - u16 *tags; - - if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) - return; - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) { - tags = (u16 *)page; - /* Look for stacked vlan tags in ethertype field */ - if (tags[6] == ETH_P_8021Q && - tags[8] == ETH_P_8021Q) - *len += 2 * VLAN_HLEN; - else - *len += VLAN_HLEN; - } -} - -/* Process an inbound completion from an rx ring. */ -static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, - struct rx_ring *rx_ring, - struct ib_mac_iocb_rsp *ib_mac_rsp, - u32 length, - u16 vlan_id) -{ - struct sk_buff *skb; - struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); - struct napi_struct *napi = &rx_ring->napi; - - /* Frame error, so drop the packet. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { - ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); - put_page(lbq_desc->p.pg_chunk.page); - return; - } - napi->dev = qdev->ndev; - - skb = napi_get_frags(napi); - if (!skb) { - netif_err(qdev, drv, qdev->ndev, - "Couldn't get an skb, exiting.\n"); - rx_ring->rx_dropped++; - put_page(lbq_desc->p.pg_chunk.page); - return; - } - prefetch(lbq_desc->p.pg_chunk.va); - __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - lbq_desc->p.pg_chunk.page, - lbq_desc->p.pg_chunk.offset, - length); - - skb->len += length; - skb->data_len += length; - skb->truesize += length; - skb_shinfo(skb)->nr_frags++; - - rx_ring->rx_packets++; - rx_ring->rx_bytes += length; - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb_record_rx_queue(skb, rx_ring->cq_id); - if (vlan_id != 0xffff) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id); - napi_gro_frags(napi); -} - -/* Process an inbound completion from an rx ring. */ -static void ql_process_mac_rx_page(struct ql_adapter *qdev, - struct rx_ring *rx_ring, - struct ib_mac_iocb_rsp *ib_mac_rsp, - u32 length, - u16 vlan_id) -{ - struct net_device *ndev = qdev->ndev; - struct sk_buff *skb = NULL; - void *addr; - struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); - struct napi_struct *napi = &rx_ring->napi; - size_t hlen = ETH_HLEN; - - skb = netdev_alloc_skb(ndev, length); - if (!skb) { - rx_ring->rx_dropped++; - put_page(lbq_desc->p.pg_chunk.page); - return; - } - - addr = lbq_desc->p.pg_chunk.va; - prefetch(addr); - - /* Frame error, so drop the packet. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { - ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); - goto err_out; - } - - /* Update the MAC header length*/ - ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen); - - /* The max framesize filter on this chip is set higher than - * MTU since FCoE uses 2k frames. - */ - if (skb->len > ndev->mtu + hlen) { - netif_err(qdev, drv, qdev->ndev, - "Segment too small, dropping.\n"); - rx_ring->rx_dropped++; - goto err_out; - } - skb_put_data(skb, addr, hlen); - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", - length); - skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, - lbq_desc->p.pg_chunk.offset + hlen, - length - hlen); - skb->len += length - hlen; - skb->data_len += length - hlen; - skb->truesize += length - hlen; - - rx_ring->rx_packets++; - rx_ring->rx_bytes += skb->len; - skb->protocol = eth_type_trans(skb, ndev); - skb_checksum_none_assert(skb); - - if ((ndev->features & NETIF_F_RXCSUM) && - !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { - /* TCP frame. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "TCP checksum done!\n"); - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && - (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { - /* Unfragmented ipv4 UDP frame. */ - struct iphdr *iph = - (struct iphdr *)((u8 *)addr + hlen); - if (!(iph->frag_off & - htons(IP_MF|IP_OFFSET))) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - netif_printk(qdev, rx_status, KERN_DEBUG, - qdev->ndev, - "UDP checksum done!\n"); - } - } - } - - skb_record_rx_queue(skb, rx_ring->cq_id); - if (vlan_id != 0xffff) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id); - if (skb->ip_summed == CHECKSUM_UNNECESSARY) - napi_gro_receive(napi, skb); - else - netif_receive_skb(skb); - return; -err_out: - dev_kfree_skb_any(skb); - put_page(lbq_desc->p.pg_chunk.page); -} - -/* Process an inbound completion from an rx ring. */ -static void ql_process_mac_rx_skb(struct ql_adapter *qdev, - struct rx_ring *rx_ring, - struct ib_mac_iocb_rsp *ib_mac_rsp, - u32 length, - u16 vlan_id) -{ - struct net_device *ndev = qdev->ndev; - struct sk_buff *skb = NULL; - struct sk_buff *new_skb = NULL; - struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring); - - skb = sbq_desc->p.skb; - /* Allocate new_skb and copy */ - new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN); - if (new_skb == NULL) { - rx_ring->rx_dropped++; - return; - } - skb_reserve(new_skb, NET_IP_ALIGN); - - pci_dma_sync_single_for_cpu(qdev->pdev, - dma_unmap_addr(sbq_desc, mapaddr), - dma_unmap_len(sbq_desc, maplen), - PCI_DMA_FROMDEVICE); - - skb_put_data(new_skb, skb->data, length); - - pci_dma_sync_single_for_device(qdev->pdev, - dma_unmap_addr(sbq_desc, mapaddr), - dma_unmap_len(sbq_desc, maplen), - PCI_DMA_FROMDEVICE); - skb = new_skb; - - /* Frame error, so drop the packet. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { - ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); - dev_kfree_skb_any(skb); - return; - } - - /* loopback self test for ethtool */ - if (test_bit(QL_SELFTEST, &qdev->flags)) { - ql_check_lb_frame(qdev, skb); - dev_kfree_skb_any(skb); - return; - } - - /* The max framesize filter on this chip is set higher than - * MTU since FCoE uses 2k frames. - */ - if (skb->len > ndev->mtu + ETH_HLEN) { - dev_kfree_skb_any(skb); - rx_ring->rx_dropped++; - return; - } - - prefetch(skb->data); - if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "%s Multicast.\n", - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_HASH ? "Hash" : - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_REG ? "Registered" : - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); - } - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Promiscuous Packet.\n"); - - rx_ring->rx_packets++; - rx_ring->rx_bytes += skb->len; - skb->protocol = eth_type_trans(skb, ndev); - skb_checksum_none_assert(skb); - - /* If rx checksum is on, and there are no - * csum or frame errors. - */ - if ((ndev->features & NETIF_F_RXCSUM) && - !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { - /* TCP frame. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "TCP checksum done!\n"); - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && - (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { - /* Unfragmented ipv4 UDP frame. */ - struct iphdr *iph = (struct iphdr *) skb->data; - if (!(iph->frag_off & - htons(IP_MF|IP_OFFSET))) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - netif_printk(qdev, rx_status, KERN_DEBUG, - qdev->ndev, - "UDP checksum done!\n"); - } - } - } - - skb_record_rx_queue(skb, rx_ring->cq_id); - if (vlan_id != 0xffff) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id); - if (skb->ip_summed == CHECKSUM_UNNECESSARY) - napi_gro_receive(&rx_ring->napi, skb); - else - netif_receive_skb(skb); -} - -static void ql_realign_skb(struct sk_buff *skb, int len) -{ - void *temp_addr = skb->data; - - /* Undo the skb_reserve(skb,32) we did before - * giving to hardware, and realign data on - * a 2-byte boundary. - */ - skb->data -= QLGE_SB_PAD - NET_IP_ALIGN; - skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN; - memmove(skb->data, temp_addr, len); -} - -/* - * This function builds an skb for the given inbound - * completion. It will be rewritten for readability in the near - * future, but for not it works well. - */ -static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, - struct rx_ring *rx_ring, - struct ib_mac_iocb_rsp *ib_mac_rsp) -{ - struct bq_desc *lbq_desc; - struct bq_desc *sbq_desc; - struct sk_buff *skb = NULL; - u32 length = le32_to_cpu(ib_mac_rsp->data_len); - u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len); - size_t hlen = ETH_HLEN; - - /* - * Handle the header buffer if present. - */ - if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV && - ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Header of %d bytes in small buffer.\n", hdr_len); - /* - * Headers fit nicely into a small buffer. - */ - sbq_desc = ql_get_curr_sbuf(rx_ring); - pci_unmap_single(qdev->pdev, - dma_unmap_addr(sbq_desc, mapaddr), - dma_unmap_len(sbq_desc, maplen), - PCI_DMA_FROMDEVICE); - skb = sbq_desc->p.skb; - ql_realign_skb(skb, hdr_len); - skb_put(skb, hdr_len); - sbq_desc->p.skb = NULL; - } - - /* - * Handle the data buffer(s). - */ - if (unlikely(!length)) { /* Is there data too? */ - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "No Data buffer in this packet.\n"); - return skb; - } - - if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { - if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Headers in small, data of %d bytes in small, combine them.\n", - length); - /* - * Data is less than small buffer size so it's - * stuffed in a small buffer. - * For this case we append the data - * from the "data" small buffer to the "header" small - * buffer. - */ - sbq_desc = ql_get_curr_sbuf(rx_ring); - pci_dma_sync_single_for_cpu(qdev->pdev, - dma_unmap_addr - (sbq_desc, mapaddr), - dma_unmap_len - (sbq_desc, maplen), - PCI_DMA_FROMDEVICE); - skb_put_data(skb, sbq_desc->p.skb->data, length); - pci_dma_sync_single_for_device(qdev->pdev, - dma_unmap_addr - (sbq_desc, - mapaddr), - dma_unmap_len - (sbq_desc, - maplen), - PCI_DMA_FROMDEVICE); - } else { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "%d bytes in a single small buffer.\n", - length); - sbq_desc = ql_get_curr_sbuf(rx_ring); - skb = sbq_desc->p.skb; - ql_realign_skb(skb, length); - skb_put(skb, length); - pci_unmap_single(qdev->pdev, - dma_unmap_addr(sbq_desc, - mapaddr), - dma_unmap_len(sbq_desc, - maplen), - PCI_DMA_FROMDEVICE); - sbq_desc->p.skb = NULL; - } - } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { - if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Header in small, %d bytes in large. Chain large to small!\n", - length); - /* - * The data is in a single large buffer. We - * chain it to the header buffer's skb and let - * it rip. - */ - lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Chaining page at offset = %d, for %d bytes to skb.\n", - lbq_desc->p.pg_chunk.offset, length); - skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, - lbq_desc->p.pg_chunk.offset, - length); - skb->len += length; - skb->data_len += length; - skb->truesize += length; - } else { - /* - * The headers and data are in a single large buffer. We - * copy it to a new skb and let it go. This can happen with - * jumbo mtu on a non-TCP/UDP frame. - */ - lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); - skb = netdev_alloc_skb(qdev->ndev, length); - if (skb == NULL) { - netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev, - "No skb available, drop the packet.\n"); - return NULL; - } - pci_unmap_page(qdev->pdev, - dma_unmap_addr(lbq_desc, - mapaddr), - dma_unmap_len(lbq_desc, maplen), - PCI_DMA_FROMDEVICE); - skb_reserve(skb, NET_IP_ALIGN); - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", - length); - skb_fill_page_desc(skb, 0, - lbq_desc->p.pg_chunk.page, - lbq_desc->p.pg_chunk.offset, - length); - skb->len += length; - skb->data_len += length; - skb->truesize += length; - ql_update_mac_hdr_len(qdev, ib_mac_rsp, - lbq_desc->p.pg_chunk.va, - &hlen); - __pskb_pull_tail(skb, hlen); - } - } else { - /* - * The data is in a chain of large buffers - * pointed to by a small buffer. We loop - * thru and chain them to the our small header - * buffer's skb. - * frags: There are 18 max frags and our small - * buffer will hold 32 of them. The thing is, - * we'll use 3 max for our 9000 byte jumbo - * frames. If the MTU goes up we could - * eventually be in trouble. - */ - int size, i = 0; - sbq_desc = ql_get_curr_sbuf(rx_ring); - pci_unmap_single(qdev->pdev, - dma_unmap_addr(sbq_desc, mapaddr), - dma_unmap_len(sbq_desc, maplen), - PCI_DMA_FROMDEVICE); - if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { - /* - * This is an non TCP/UDP IP frame, so - * the headers aren't split into a small - * buffer. We have to use the small buffer - * that contains our sg list as our skb to - * send upstairs. Copy the sg list here to - * a local buffer and use it to find the - * pages to chain. - */ - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "%d bytes of headers & data in chain of large.\n", - length); - skb = sbq_desc->p.skb; - sbq_desc->p.skb = NULL; - skb_reserve(skb, NET_IP_ALIGN); - } - do { - lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); - size = (length < rx_ring->lbq_buf_size) ? length : - rx_ring->lbq_buf_size; - - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Adding page %d to skb for %d bytes.\n", - i, size); - skb_fill_page_desc(skb, i, - lbq_desc->p.pg_chunk.page, - lbq_desc->p.pg_chunk.offset, - size); - skb->len += size; - skb->data_len += size; - skb->truesize += size; - length -= size; - i++; - } while (length > 0); - ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va, - &hlen); - __pskb_pull_tail(skb, hlen); - } - return skb; -} - -/* Process an inbound completion from an rx ring. */ -static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, - struct rx_ring *rx_ring, - struct ib_mac_iocb_rsp *ib_mac_rsp, - u16 vlan_id) -{ - struct net_device *ndev = qdev->ndev; - struct sk_buff *skb = NULL; - - QL_DUMP_IB_MAC_RSP(ib_mac_rsp); - - skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp); - if (unlikely(!skb)) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "No skb available, drop packet.\n"); - rx_ring->rx_dropped++; - return; - } - - /* Frame error, so drop the packet. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { - ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); - dev_kfree_skb_any(skb); - return; - } - - /* The max framesize filter on this chip is set higher than - * MTU since FCoE uses 2k frames. - */ - if (skb->len > ndev->mtu + ETH_HLEN) { - dev_kfree_skb_any(skb); - rx_ring->rx_dropped++; - return; - } - - /* loopback self test for ethtool */ - if (test_bit(QL_SELFTEST, &qdev->flags)) { - ql_check_lb_frame(qdev, skb); - dev_kfree_skb_any(skb); - return; - } - - prefetch(skb->data); - if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n", - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_HASH ? "Hash" : - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_REG ? "Registered" : - (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == - IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); - rx_ring->rx_multicast++; - } - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Promiscuous Packet.\n"); - } - - skb->protocol = eth_type_trans(skb, ndev); - skb_checksum_none_assert(skb); - - /* If rx checksum is on, and there are no - * csum or frame errors. - */ - if ((ndev->features & NETIF_F_RXCSUM) && - !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { - /* TCP frame. */ - if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "TCP checksum done!\n"); - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && - (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { - /* Unfragmented ipv4 UDP frame. */ - struct iphdr *iph = (struct iphdr *) skb->data; - if (!(iph->frag_off & - htons(IP_MF|IP_OFFSET))) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "TCP checksum done!\n"); - } - } - } - - rx_ring->rx_packets++; - rx_ring->rx_bytes += skb->len; - skb_record_rx_queue(skb, rx_ring->cq_id); - if (vlan_id != 0xffff) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id); - if (skb->ip_summed == CHECKSUM_UNNECESSARY) - napi_gro_receive(&rx_ring->napi, skb); - else - netif_receive_skb(skb); -} - -/* Process an inbound completion from an rx ring. */ -static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, - struct rx_ring *rx_ring, - struct ib_mac_iocb_rsp *ib_mac_rsp) -{ - u32 length = le32_to_cpu(ib_mac_rsp->data_len); - u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && - (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ? - ((le16_to_cpu(ib_mac_rsp->vlan_id) & - IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff; - - QL_DUMP_IB_MAC_RSP(ib_mac_rsp); - - if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { - /* The data and headers are split into - * separate buffers. - */ - ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, - vlan_id); - } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { - /* The data fit in a single small buffer. - * Allocate a new skb, copy the data and - * return the buffer to the free pool. - */ - ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, - length, vlan_id); - } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) && - !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) && - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) { - /* TCP packet in a page chunk that's been checksummed. - * Tack it on to our GRO skb and let it go. - */ - ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, - length, vlan_id); - } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { - /* Non-TCP packet in a page chunk. Allocate an - * skb, tack it on frags, and send it up. - */ - ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, - length, vlan_id); - } else { - /* Non-TCP/UDP large frames that span multiple buffers - * can be processed corrrectly by the split frame logic. - */ - ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, - vlan_id); - } - - return (unsigned long)length; -} - -/* Process an outbound completion from an rx ring. */ -static void ql_process_mac_tx_intr(struct ql_adapter *qdev, - struct ob_mac_iocb_rsp *mac_rsp) -{ - struct tx_ring *tx_ring; - struct tx_ring_desc *tx_ring_desc; - - QL_DUMP_OB_MAC_RSP(mac_rsp); - tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; - tx_ring_desc = &tx_ring->q[mac_rsp->tid]; - ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); - tx_ring->tx_bytes += (tx_ring_desc->skb)->len; - tx_ring->tx_packets++; - dev_kfree_skb(tx_ring_desc->skb); - tx_ring_desc->skb = NULL; - - if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | - OB_MAC_IOCB_RSP_S | - OB_MAC_IOCB_RSP_L | - OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) { - if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) { - netif_warn(qdev, tx_done, qdev->ndev, - "Total descriptor length did not match transfer length.\n"); - } - if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) { - netif_warn(qdev, tx_done, qdev->ndev, - "Frame too short to be valid, not sent.\n"); - } - if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) { - netif_warn(qdev, tx_done, qdev->ndev, - "Frame too long, but sent anyway.\n"); - } - if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) { - netif_warn(qdev, tx_done, qdev->ndev, - "PCI backplane error. Frame not sent.\n"); - } - } - atomic_inc(&tx_ring->tx_count); -} - -/* Fire up a handler to reset the MPI processor. */ -void ql_queue_fw_error(struct ql_adapter *qdev) -{ - ql_link_off(qdev); - queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0); -} - -void ql_queue_asic_error(struct ql_adapter *qdev) -{ - ql_link_off(qdev); - ql_disable_interrupts(qdev); - /* Clear adapter up bit to signal the recovery - * process that it shouldn't kill the reset worker - * thread - */ - clear_bit(QL_ADAPTER_UP, &qdev->flags); - /* Set asic recovery bit to indicate reset process that we are - * in fatal error recovery process rather than normal close - */ - set_bit(QL_ASIC_RECOVERY, &qdev->flags); - queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); -} - -static void ql_process_chip_ae_intr(struct ql_adapter *qdev, - struct ib_ae_iocb_rsp *ib_ae_rsp) -{ - switch (ib_ae_rsp->event) { - case MGMT_ERR_EVENT: - netif_err(qdev, rx_err, qdev->ndev, - "Management Processor Fatal Error.\n"); - ql_queue_fw_error(qdev); - return; - - case CAM_LOOKUP_ERR_EVENT: - netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n"); - netdev_err(qdev->ndev, "This event shouldn't occur.\n"); - ql_queue_asic_error(qdev); - return; - - case SOFT_ECC_ERROR_EVENT: - netdev_err(qdev->ndev, "Soft ECC error detected.\n"); - ql_queue_asic_error(qdev); - break; - - case PCI_ERR_ANON_BUF_RD: - netdev_err(qdev->ndev, "PCI error occurred when reading " - "anonymous buffers from rx_ring %d.\n", - ib_ae_rsp->q_id); - ql_queue_asic_error(qdev); - break; - - default: - netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n", - ib_ae_rsp->event); - ql_queue_asic_error(qdev); - break; - } -} - -static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) -{ - struct ql_adapter *qdev = rx_ring->qdev; - u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); - struct ob_mac_iocb_rsp *net_rsp = NULL; - int count = 0; - - struct tx_ring *tx_ring; - /* While there are entries in the completion queue. */ - while (prod != rx_ring->cnsmr_idx) { - - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "cq_id = %d, prod = %d, cnsmr = %d\n", - rx_ring->cq_id, prod, rx_ring->cnsmr_idx); - - net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; - rmb(); - switch (net_rsp->opcode) { - - case OPCODE_OB_MAC_TSO_IOCB: - case OPCODE_OB_MAC_IOCB: - ql_process_mac_tx_intr(qdev, net_rsp); - break; - default: - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Hit default case, not handled! dropping the packet, opcode = %x.\n", - net_rsp->opcode); - } - count++; - ql_update_cq(rx_ring); - prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); - } - if (!net_rsp) - return 0; - ql_write_cq_idx(rx_ring); - tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; - if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { - if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) - /* - * The queue got stopped because the tx_ring was full. - * Wake it up, because it's now at least 25% empty. - */ - netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); - } - - return count; -} - -static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) -{ - struct ql_adapter *qdev = rx_ring->qdev; - u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); - struct ql_net_rsp_iocb *net_rsp; - int count = 0; - - /* While there are entries in the completion queue. */ - while (prod != rx_ring->cnsmr_idx) { - - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "cq_id = %d, prod = %d, cnsmr = %d\n", - rx_ring->cq_id, prod, rx_ring->cnsmr_idx); - - net_rsp = rx_ring->curr_entry; - rmb(); - switch (net_rsp->opcode) { - case OPCODE_IB_MAC_IOCB: - ql_process_mac_rx_intr(qdev, rx_ring, - (struct ib_mac_iocb_rsp *) - net_rsp); - break; - - case OPCODE_IB_AE_IOCB: - ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *) - net_rsp); - break; - default: - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Hit default case, not handled! dropping the packet, opcode = %x.\n", - net_rsp->opcode); - break; - } - count++; - ql_update_cq(rx_ring); - prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); - if (count == budget) - break; - } - ql_update_buffer_queues(qdev, rx_ring); - ql_write_cq_idx(rx_ring); - return count; -} - -static int ql_napi_poll_msix(struct napi_struct *napi, int budget) -{ - struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi); - struct ql_adapter *qdev = rx_ring->qdev; - struct rx_ring *trx_ring; - int i, work_done = 0; - struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id]; - - netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id); - - /* Service the TX rings first. They start - * right after the RSS rings. */ - for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { - trx_ring = &qdev->rx_ring[i]; - /* If this TX completion ring belongs to this vector and - * it's not empty then service it. - */ - if ((ctx->irq_mask & (1 << trx_ring->cq_id)) && - (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) != - trx_ring->cnsmr_idx)) { - netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, - "%s: Servicing TX completion ring %d.\n", - __func__, trx_ring->cq_id); - ql_clean_outbound_rx_ring(trx_ring); - } - } - - /* - * Now service the RSS ring if it's active. - */ - if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != - rx_ring->cnsmr_idx) { - netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, - "%s: Servicing RX completion ring %d.\n", - __func__, rx_ring->cq_id); - work_done = ql_clean_inbound_rx_ring(rx_ring, budget); - } - - if (work_done < budget) { - napi_complete_done(napi, work_done); - ql_enable_completion_interrupt(qdev, rx_ring->irq); - } - return work_done; -} - -static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - if (features & NETIF_F_HW_VLAN_CTAG_RX) { - ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | - NIC_RCV_CFG_VLAN_MATCH_AND_NON); - } else { - ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK); - } -} - -/** - * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter - * based on the features to enable/disable hardware vlan accel - */ -static int qlge_update_hw_vlan_features(struct net_device *ndev, - netdev_features_t features) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - int status = 0; - bool need_restart = netif_running(ndev); - - if (need_restart) { - status = ql_adapter_down(qdev); - if (status) { - netif_err(qdev, link, qdev->ndev, - "Failed to bring down the adapter\n"); - return status; - } - } - - /* update the features with resent change */ - ndev->features = features; - - if (need_restart) { - status = ql_adapter_up(qdev); - if (status) { - netif_err(qdev, link, qdev->ndev, - "Failed to bring up the adapter\n"); - return status; - } - } - - return status; -} - -static int qlge_set_features(struct net_device *ndev, - netdev_features_t features) -{ - netdev_features_t changed = ndev->features ^ features; - int err; - - if (changed & NETIF_F_HW_VLAN_CTAG_RX) { - /* Update the behavior of vlan accel in the adapter */ - err = qlge_update_hw_vlan_features(ndev, features); - if (err) - return err; - - qlge_vlan_mode(ndev, features); - } - - return 0; -} - -static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid) -{ - u32 enable_bit = MAC_ADDR_E; - int err; - - err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit, - MAC_ADDR_TYPE_VLAN, vid); - if (err) - netif_err(qdev, ifup, qdev->ndev, - "Failed to init vlan address.\n"); - return err; -} - -static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - int status; - int err; - - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - return status; - - err = __qlge_vlan_rx_add_vid(qdev, vid); - set_bit(vid, qdev->active_vlans); - - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); - - return err; -} - -static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid) -{ - u32 enable_bit = 0; - int err; - - err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit, - MAC_ADDR_TYPE_VLAN, vid); - if (err) - netif_err(qdev, ifup, qdev->ndev, - "Failed to clear vlan address.\n"); - return err; -} - -static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - int status; - int err; - - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - return status; - - err = __qlge_vlan_rx_kill_vid(qdev, vid); - clear_bit(vid, qdev->active_vlans); - - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); - - return err; -} - -static void qlge_restore_vlan(struct ql_adapter *qdev) -{ - int status; - u16 vid; - - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - return; - - for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID) - __qlge_vlan_rx_add_vid(qdev, vid); - - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); -} - -/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */ -static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) -{ - struct rx_ring *rx_ring = dev_id; - napi_schedule(&rx_ring->napi); - return IRQ_HANDLED; -} - -/* This handles a fatal error, MPI activity, and the default - * rx_ring in an MSI-X multiple vector environment. - * In MSI/Legacy environment it also process the rest of - * the rx_rings. - */ -static irqreturn_t qlge_isr(int irq, void *dev_id) -{ - struct rx_ring *rx_ring = dev_id; - struct ql_adapter *qdev = rx_ring->qdev; - struct intr_context *intr_context = &qdev->intr_context[0]; - u32 var; - int work_done = 0; - - spin_lock(&qdev->hw_lock); - if (atomic_read(&qdev->intr_context[0].irq_cnt)) { - netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, - "Shared Interrupt, Not ours!\n"); - spin_unlock(&qdev->hw_lock); - return IRQ_NONE; - } - spin_unlock(&qdev->hw_lock); - - var = ql_disable_completion_interrupt(qdev, intr_context->intr); - - /* - * Check for fatal error. - */ - if (var & STS_FE) { - ql_queue_asic_error(qdev); - netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var); - var = ql_read32(qdev, ERR_STS); - netdev_err(qdev->ndev, "Resetting chip. " - "Error Status Register = 0x%x\n", var); - return IRQ_HANDLED; - } - - /* - * Check MPI processor activity. - */ - if ((var & STS_PI) && - (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) { - /* - * We've got an async event or mailbox completion. - * Handle it and clear the source of the interrupt. - */ - netif_err(qdev, intr, qdev->ndev, - "Got MPI processor interrupt.\n"); - ql_disable_completion_interrupt(qdev, intr_context->intr); - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); - queue_delayed_work_on(smp_processor_id(), - qdev->workqueue, &qdev->mpi_work, 0); - work_done++; - } - - /* - * Get the bit-mask that shows the active queues for this - * pass. Compare it to the queues that this irq services - * and call napi if there's a match. - */ - var = ql_read32(qdev, ISR1); - if (var & intr_context->irq_mask) { - netif_info(qdev, intr, qdev->ndev, - "Waking handler for rx_ring[0].\n"); - ql_disable_completion_interrupt(qdev, intr_context->intr); - napi_schedule(&rx_ring->napi); - work_done++; - } - ql_enable_completion_interrupt(qdev, intr_context->intr); - return work_done ? IRQ_HANDLED : IRQ_NONE; -} - -static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) -{ - - if (skb_is_gso(skb)) { - int err; - __be16 l3_proto = vlan_get_protocol(skb); - - err = skb_cow_head(skb, 0); - if (err < 0) - return err; - - mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; - mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC; - mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); - mac_iocb_ptr->total_hdrs_len = - cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb)); - mac_iocb_ptr->net_trans_offset = - cpu_to_le16(skb_network_offset(skb) | - skb_transport_offset(skb) - << OB_MAC_TRANSPORT_HDR_SHIFT); - mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); - mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO; - if (likely(l3_proto == htons(ETH_P_IP))) { - struct iphdr *iph = ip_hdr(skb); - iph->check = 0; - mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); - } else if (l3_proto == htons(ETH_P_IPV6)) { - mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6; - tcp_hdr(skb)->check = - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); - } - return 1; - } - return 0; -} - -static void ql_hw_csum_setup(struct sk_buff *skb, - struct ob_mac_tso_iocb_req *mac_iocb_ptr) -{ - int len; - struct iphdr *iph = ip_hdr(skb); - __sum16 *check; - mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; - mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); - mac_iocb_ptr->net_trans_offset = - cpu_to_le16(skb_network_offset(skb) | - skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT); - - mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; - len = (ntohs(iph->tot_len) - (iph->ihl << 2)); - if (likely(iph->protocol == IPPROTO_TCP)) { - check = &(tcp_hdr(skb)->check); - mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC; - mac_iocb_ptr->total_hdrs_len = - cpu_to_le16(skb_transport_offset(skb) + - (tcp_hdr(skb)->doff << 2)); - } else { - check = &(udp_hdr(skb)->check); - mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC; - mac_iocb_ptr->total_hdrs_len = - cpu_to_le16(skb_transport_offset(skb) + - sizeof(struct udphdr)); - } - *check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, len, iph->protocol, 0); -} - -static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) -{ - struct tx_ring_desc *tx_ring_desc; - struct ob_mac_iocb_req *mac_iocb_ptr; - struct ql_adapter *qdev = netdev_priv(ndev); - int tso; - struct tx_ring *tx_ring; - u32 tx_ring_idx = (u32) skb->queue_mapping; - - tx_ring = &qdev->tx_ring[tx_ring_idx]; - - if (skb_padto(skb, ETH_ZLEN)) - return NETDEV_TX_OK; - - if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { - netif_info(qdev, tx_queued, qdev->ndev, - "%s: BUG! shutting down tx queue %d due to lack of resources.\n", - __func__, tx_ring_idx); - netif_stop_subqueue(ndev, tx_ring->wq_id); - tx_ring->tx_errors++; - return NETDEV_TX_BUSY; - } - tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; - mac_iocb_ptr = tx_ring_desc->queue_entry; - memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr)); - - mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; - mac_iocb_ptr->tid = tx_ring_desc->index; - /* We use the upper 32-bits to store the tx queue for this IO. - * When we get the completion we can use it to establish the context. - */ - mac_iocb_ptr->txq_idx = tx_ring_idx; - tx_ring_desc->skb = skb; - - mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len); - - if (skb_vlan_tag_present(skb)) { - netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, - "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb)); - mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V; - mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); - } - tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); - if (tso < 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) { - ql_hw_csum_setup(skb, - (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); - } - if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != - NETDEV_TX_OK) { - netif_err(qdev, tx_queued, qdev->ndev, - "Could not map the segments.\n"); - tx_ring->tx_errors++; - return NETDEV_TX_BUSY; - } - QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); - tx_ring->prod_idx++; - if (tx_ring->prod_idx == tx_ring->wq_len) - tx_ring->prod_idx = 0; - wmb(); - - ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); - netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, - "tx queued, slot %d, len %d\n", - tx_ring->prod_idx, skb->len); - - atomic_dec(&tx_ring->tx_count); - - if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { - netif_stop_subqueue(ndev, tx_ring->wq_id); - if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) - /* - * The queue got stopped because the tx_ring was full. - * Wake it up, because it's now at least 25% empty. - */ - netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); - } - return NETDEV_TX_OK; -} - - -static void ql_free_shadow_space(struct ql_adapter *qdev) -{ - if (qdev->rx_ring_shadow_reg_area) { - pci_free_consistent(qdev->pdev, - PAGE_SIZE, - qdev->rx_ring_shadow_reg_area, - qdev->rx_ring_shadow_reg_dma); - qdev->rx_ring_shadow_reg_area = NULL; - } - if (qdev->tx_ring_shadow_reg_area) { - pci_free_consistent(qdev->pdev, - PAGE_SIZE, - qdev->tx_ring_shadow_reg_area, - qdev->tx_ring_shadow_reg_dma); - qdev->tx_ring_shadow_reg_area = NULL; - } -} - -static int ql_alloc_shadow_space(struct ql_adapter *qdev) -{ - qdev->rx_ring_shadow_reg_area = - pci_zalloc_consistent(qdev->pdev, PAGE_SIZE, - &qdev->rx_ring_shadow_reg_dma); - if (qdev->rx_ring_shadow_reg_area == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "Allocation of RX shadow space failed.\n"); - return -ENOMEM; - } - - qdev->tx_ring_shadow_reg_area = - pci_zalloc_consistent(qdev->pdev, PAGE_SIZE, - &qdev->tx_ring_shadow_reg_dma); - if (qdev->tx_ring_shadow_reg_area == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "Allocation of TX shadow space failed.\n"); - goto err_wqp_sh_area; - } - return 0; - -err_wqp_sh_area: - pci_free_consistent(qdev->pdev, - PAGE_SIZE, - qdev->rx_ring_shadow_reg_area, - qdev->rx_ring_shadow_reg_dma); - return -ENOMEM; -} - -static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) -{ - struct tx_ring_desc *tx_ring_desc; - int i; - struct ob_mac_iocb_req *mac_iocb_ptr; - - mac_iocb_ptr = tx_ring->wq_base; - tx_ring_desc = tx_ring->q; - for (i = 0; i < tx_ring->wq_len; i++) { - tx_ring_desc->index = i; - tx_ring_desc->skb = NULL; - tx_ring_desc->queue_entry = mac_iocb_ptr; - mac_iocb_ptr++; - tx_ring_desc++; - } - atomic_set(&tx_ring->tx_count, tx_ring->wq_len); -} - -static void ql_free_tx_resources(struct ql_adapter *qdev, - struct tx_ring *tx_ring) -{ - if (tx_ring->wq_base) { - pci_free_consistent(qdev->pdev, tx_ring->wq_size, - tx_ring->wq_base, tx_ring->wq_base_dma); - tx_ring->wq_base = NULL; - } - kfree(tx_ring->q); - tx_ring->q = NULL; -} - -static int ql_alloc_tx_resources(struct ql_adapter *qdev, - struct tx_ring *tx_ring) -{ - tx_ring->wq_base = - pci_alloc_consistent(qdev->pdev, tx_ring->wq_size, - &tx_ring->wq_base_dma); - - if ((tx_ring->wq_base == NULL) || - tx_ring->wq_base_dma & WQ_ADDR_ALIGN) - goto pci_alloc_err; - - tx_ring->q = - kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc), - GFP_KERNEL); - if (tx_ring->q == NULL) - goto err; - - return 0; -err: - pci_free_consistent(qdev->pdev, tx_ring->wq_size, - tx_ring->wq_base, tx_ring->wq_base_dma); - tx_ring->wq_base = NULL; -pci_alloc_err: - netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n"); - return -ENOMEM; -} - -static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) -{ - struct bq_desc *lbq_desc; - - uint32_t curr_idx, clean_idx; - - curr_idx = rx_ring->lbq_curr_idx; - clean_idx = rx_ring->lbq_clean_idx; - while (curr_idx != clean_idx) { - lbq_desc = &rx_ring->lbq[curr_idx]; - - if (lbq_desc->p.pg_chunk.last_flag) { - pci_unmap_page(qdev->pdev, - lbq_desc->p.pg_chunk.map, - ql_lbq_block_size(qdev), - PCI_DMA_FROMDEVICE); - lbq_desc->p.pg_chunk.last_flag = 0; - } - - put_page(lbq_desc->p.pg_chunk.page); - lbq_desc->p.pg_chunk.page = NULL; - - if (++curr_idx == rx_ring->lbq_len) - curr_idx = 0; - - } - if (rx_ring->pg_chunk.page) { - pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map, - ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); - put_page(rx_ring->pg_chunk.page); - rx_ring->pg_chunk.page = NULL; - } -} - -static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) -{ - int i; - struct bq_desc *sbq_desc; - - for (i = 0; i < rx_ring->sbq_len; i++) { - sbq_desc = &rx_ring->sbq[i]; - if (sbq_desc == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "sbq_desc %d is NULL.\n", i); - return; - } - if (sbq_desc->p.skb) { - pci_unmap_single(qdev->pdev, - dma_unmap_addr(sbq_desc, mapaddr), - dma_unmap_len(sbq_desc, maplen), - PCI_DMA_FROMDEVICE); - dev_kfree_skb(sbq_desc->p.skb); - sbq_desc->p.skb = NULL; - } - } -} - -/* Free all large and small rx buffers associated - * with the completion queues for this device. - */ -static void ql_free_rx_buffers(struct ql_adapter *qdev) -{ - int i; - struct rx_ring *rx_ring; - - for (i = 0; i < qdev->rx_ring_count; i++) { - rx_ring = &qdev->rx_ring[i]; - if (rx_ring->lbq) - ql_free_lbq_buffers(qdev, rx_ring); - if (rx_ring->sbq) - ql_free_sbq_buffers(qdev, rx_ring); - } -} - -static void ql_alloc_rx_buffers(struct ql_adapter *qdev) -{ - struct rx_ring *rx_ring; - int i; - - for (i = 0; i < qdev->rx_ring_count; i++) { - rx_ring = &qdev->rx_ring[i]; - if (rx_ring->type != TX_Q) - ql_update_buffer_queues(qdev, rx_ring); - } -} - -static void ql_init_lbq_ring(struct ql_adapter *qdev, - struct rx_ring *rx_ring) -{ - int i; - struct bq_desc *lbq_desc; - __le64 *bq = rx_ring->lbq_base; - - memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc)); - for (i = 0; i < rx_ring->lbq_len; i++) { - lbq_desc = &rx_ring->lbq[i]; - memset(lbq_desc, 0, sizeof(*lbq_desc)); - lbq_desc->index = i; - lbq_desc->addr = bq; - bq++; - } -} - -static void ql_init_sbq_ring(struct ql_adapter *qdev, - struct rx_ring *rx_ring) -{ - int i; - struct bq_desc *sbq_desc; - __le64 *bq = rx_ring->sbq_base; - - memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc)); - for (i = 0; i < rx_ring->sbq_len; i++) { - sbq_desc = &rx_ring->sbq[i]; - memset(sbq_desc, 0, sizeof(*sbq_desc)); - sbq_desc->index = i; - sbq_desc->addr = bq; - bq++; - } -} - -static void ql_free_rx_resources(struct ql_adapter *qdev, - struct rx_ring *rx_ring) -{ - /* Free the small buffer queue. */ - if (rx_ring->sbq_base) { - pci_free_consistent(qdev->pdev, - rx_ring->sbq_size, - rx_ring->sbq_base, rx_ring->sbq_base_dma); - rx_ring->sbq_base = NULL; - } - - /* Free the small buffer queue control blocks. */ - kfree(rx_ring->sbq); - rx_ring->sbq = NULL; - - /* Free the large buffer queue. */ - if (rx_ring->lbq_base) { - pci_free_consistent(qdev->pdev, - rx_ring->lbq_size, - rx_ring->lbq_base, rx_ring->lbq_base_dma); - rx_ring->lbq_base = NULL; - } - - /* Free the large buffer queue control blocks. */ - kfree(rx_ring->lbq); - rx_ring->lbq = NULL; - - /* Free the rx queue. */ - if (rx_ring->cq_base) { - pci_free_consistent(qdev->pdev, - rx_ring->cq_size, - rx_ring->cq_base, rx_ring->cq_base_dma); - rx_ring->cq_base = NULL; - } -} - -/* Allocate queues and buffers for this completions queue based - * on the values in the parameter structure. */ -static int ql_alloc_rx_resources(struct ql_adapter *qdev, - struct rx_ring *rx_ring) -{ - - /* - * Allocate the completion queue for this rx_ring. - */ - rx_ring->cq_base = - pci_alloc_consistent(qdev->pdev, rx_ring->cq_size, - &rx_ring->cq_base_dma); - - if (rx_ring->cq_base == NULL) { - netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n"); - return -ENOMEM; - } - - if (rx_ring->sbq_len) { - /* - * Allocate small buffer queue. - */ - rx_ring->sbq_base = - pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size, - &rx_ring->sbq_base_dma); - - if (rx_ring->sbq_base == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "Small buffer queue allocation failed.\n"); - goto err_mem; - } - - /* - * Allocate small buffer queue control blocks. - */ - rx_ring->sbq = kmalloc_array(rx_ring->sbq_len, - sizeof(struct bq_desc), - GFP_KERNEL); - if (rx_ring->sbq == NULL) - goto err_mem; - - ql_init_sbq_ring(qdev, rx_ring); - } - - if (rx_ring->lbq_len) { - /* - * Allocate large buffer queue. - */ - rx_ring->lbq_base = - pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size, - &rx_ring->lbq_base_dma); - - if (rx_ring->lbq_base == NULL) { - netif_err(qdev, ifup, qdev->ndev, - "Large buffer queue allocation failed.\n"); - goto err_mem; - } - /* - * Allocate large buffer queue control blocks. - */ - rx_ring->lbq = kmalloc_array(rx_ring->lbq_len, - sizeof(struct bq_desc), - GFP_KERNEL); - if (rx_ring->lbq == NULL) - goto err_mem; - - ql_init_lbq_ring(qdev, rx_ring); - } - - return 0; - -err_mem: - ql_free_rx_resources(qdev, rx_ring); - return -ENOMEM; -} - -static void ql_tx_ring_clean(struct ql_adapter *qdev) -{ - struct tx_ring *tx_ring; - struct tx_ring_desc *tx_ring_desc; - int i, j; - - /* - * Loop through all queues and free - * any resources. - */ - for (j = 0; j < qdev->tx_ring_count; j++) { - tx_ring = &qdev->tx_ring[j]; - for (i = 0; i < tx_ring->wq_len; i++) { - tx_ring_desc = &tx_ring->q[i]; - if (tx_ring_desc && tx_ring_desc->skb) { - netif_err(qdev, ifdown, qdev->ndev, - "Freeing lost SKB %p, from queue %d, index %d.\n", - tx_ring_desc->skb, j, - tx_ring_desc->index); - ql_unmap_send(qdev, tx_ring_desc, - tx_ring_desc->map_cnt); - dev_kfree_skb(tx_ring_desc->skb); - tx_ring_desc->skb = NULL; - } - } - } -} - -static void ql_free_mem_resources(struct ql_adapter *qdev) -{ - int i; - - for (i = 0; i < qdev->tx_ring_count; i++) - ql_free_tx_resources(qdev, &qdev->tx_ring[i]); - for (i = 0; i < qdev->rx_ring_count; i++) - ql_free_rx_resources(qdev, &qdev->rx_ring[i]); - ql_free_shadow_space(qdev); -} - -static int ql_alloc_mem_resources(struct ql_adapter *qdev) -{ - int i; - - /* Allocate space for our shadow registers and such. */ - if (ql_alloc_shadow_space(qdev)) - return -ENOMEM; - - for (i = 0; i < qdev->rx_ring_count; i++) { - if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) { - netif_err(qdev, ifup, qdev->ndev, - "RX resource allocation failed.\n"); - goto err_mem; - } - } - /* Allocate tx queue resources */ - for (i = 0; i < qdev->tx_ring_count; i++) { - if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) { - netif_err(qdev, ifup, qdev->ndev, - "TX resource allocation failed.\n"); - goto err_mem; - } - } - return 0; - -err_mem: - ql_free_mem_resources(qdev); - return -ENOMEM; -} - -/* Set up the rx ring control block and pass it to the chip. - * The control block is defined as - * "Completion Queue Initialization Control Block", or cqicb. - */ -static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) -{ - struct cqicb *cqicb = &rx_ring->cqicb; - void *shadow_reg = qdev->rx_ring_shadow_reg_area + - (rx_ring->cq_id * RX_RING_SHADOW_SPACE); - u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma + - (rx_ring->cq_id * RX_RING_SHADOW_SPACE); - void __iomem *doorbell_area = - qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); - int err = 0; - u16 bq_len; - u64 tmp; - __le64 *base_indirect_ptr; - int page_entries; - - /* Set up the shadow registers for this ring. */ - rx_ring->prod_idx_sh_reg = shadow_reg; - rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma; - *rx_ring->prod_idx_sh_reg = 0; - shadow_reg += sizeof(u64); - shadow_reg_dma += sizeof(u64); - rx_ring->lbq_base_indirect = shadow_reg; - rx_ring->lbq_base_indirect_dma = shadow_reg_dma; - shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); - shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); - rx_ring->sbq_base_indirect = shadow_reg; - rx_ring->sbq_base_indirect_dma = shadow_reg_dma; - - /* PCI doorbell mem area + 0x00 for consumer index register */ - rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area; - rx_ring->cnsmr_idx = 0; - rx_ring->curr_entry = rx_ring->cq_base; - - /* PCI doorbell mem area + 0x04 for valid register */ - rx_ring->valid_db_reg = doorbell_area + 0x04; - - /* PCI doorbell mem area + 0x18 for large buffer consumer */ - rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18); - - /* PCI doorbell mem area + 0x1c */ - rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c); - - memset((void *)cqicb, 0, sizeof(struct cqicb)); - cqicb->msix_vect = rx_ring->irq; - - bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len; - cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT); - - cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma); - - cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma); - - /* - * Set up the control block load flags. - */ - cqicb->flags = FLAGS_LC | /* Load queue base address */ - FLAGS_LV | /* Load MSI-X vector */ - FLAGS_LI; /* Load irq delay values */ - if (rx_ring->lbq_len) { - cqicb->flags |= FLAGS_LL; /* Load lbq values */ - tmp = (u64)rx_ring->lbq_base_dma; - base_indirect_ptr = rx_ring->lbq_base_indirect; - page_entries = 0; - do { - *base_indirect_ptr = cpu_to_le64(tmp); - tmp += DB_PAGE_SIZE; - base_indirect_ptr++; - page_entries++; - } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); - cqicb->lbq_addr = - cpu_to_le64(rx_ring->lbq_base_indirect_dma); - bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : - (u16) rx_ring->lbq_buf_size; - cqicb->lbq_buf_size = cpu_to_le16(bq_len); - bq_len = (rx_ring->lbq_len == 65536) ? 0 : - (u16) rx_ring->lbq_len; - cqicb->lbq_len = cpu_to_le16(bq_len); - rx_ring->lbq_prod_idx = 0; - rx_ring->lbq_curr_idx = 0; - rx_ring->lbq_clean_idx = 0; - rx_ring->lbq_free_cnt = rx_ring->lbq_len; - } - if (rx_ring->sbq_len) { - cqicb->flags |= FLAGS_LS; /* Load sbq values */ - tmp = (u64)rx_ring->sbq_base_dma; - base_indirect_ptr = rx_ring->sbq_base_indirect; - page_entries = 0; - do { - *base_indirect_ptr = cpu_to_le64(tmp); - tmp += DB_PAGE_SIZE; - base_indirect_ptr++; - page_entries++; - } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len)); - cqicb->sbq_addr = - cpu_to_le64(rx_ring->sbq_base_indirect_dma); - cqicb->sbq_buf_size = - cpu_to_le16((u16)(rx_ring->sbq_buf_size)); - bq_len = (rx_ring->sbq_len == 65536) ? 0 : - (u16) rx_ring->sbq_len; - cqicb->sbq_len = cpu_to_le16(bq_len); - rx_ring->sbq_prod_idx = 0; - rx_ring->sbq_curr_idx = 0; - rx_ring->sbq_clean_idx = 0; - rx_ring->sbq_free_cnt = rx_ring->sbq_len; - } - switch (rx_ring->type) { - case TX_Q: - cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); - cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames); - break; - case RX_Q: - /* Inbound completion handling rx_rings run in - * separate NAPI contexts. - */ - netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix, - 64); - cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); - cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames); - break; - default: - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Invalid rx_ring->type = %d.\n", rx_ring->type); - } - err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb), - CFG_LCQ, rx_ring->cq_id); - if (err) { - netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n"); - return err; - } - return err; -} - -static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) -{ - struct wqicb *wqicb = (struct wqicb *)tx_ring; - void __iomem *doorbell_area = - qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id); - void *shadow_reg = qdev->tx_ring_shadow_reg_area + - (tx_ring->wq_id * sizeof(u64)); - u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma + - (tx_ring->wq_id * sizeof(u64)); - int err = 0; - - /* - * Assign doorbell registers for this tx_ring. - */ - /* TX PCI doorbell mem area for tx producer index */ - tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area; - tx_ring->prod_idx = 0; - /* TX PCI doorbell mem area + 0x04 */ - tx_ring->valid_db_reg = doorbell_area + 0x04; - - /* - * Assign shadow registers for this tx_ring. - */ - tx_ring->cnsmr_idx_sh_reg = shadow_reg; - tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma; - - wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT); - wqicb->flags = cpu_to_le16(Q_FLAGS_LC | - Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO); - wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id); - wqicb->rid = 0; - wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma); - - wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma); - - ql_init_tx_ring(qdev, tx_ring); - - err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ, - (u16) tx_ring->wq_id); - if (err) { - netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n"); - return err; - } - return err; -} - -static void ql_disable_msix(struct ql_adapter *qdev) -{ - if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { - pci_disable_msix(qdev->pdev); - clear_bit(QL_MSIX_ENABLED, &qdev->flags); - kfree(qdev->msi_x_entry); - qdev->msi_x_entry = NULL; - } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) { - pci_disable_msi(qdev->pdev); - clear_bit(QL_MSI_ENABLED, &qdev->flags); - } -} - -/* We start by trying to get the number of vectors - * stored in qdev->intr_count. If we don't get that - * many then we reduce the count and try again. - */ -static void ql_enable_msix(struct ql_adapter *qdev) -{ - int i, err; - - /* Get the MSIX vectors. */ - if (qlge_irq_type == MSIX_IRQ) { - /* Try to alloc space for the msix struct, - * if it fails then go to MSI/legacy. - */ - qdev->msi_x_entry = kcalloc(qdev->intr_count, - sizeof(struct msix_entry), - GFP_KERNEL); - if (!qdev->msi_x_entry) { - qlge_irq_type = MSI_IRQ; - goto msi; - } - - for (i = 0; i < qdev->intr_count; i++) - qdev->msi_x_entry[i].entry = i; - - err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry, - 1, qdev->intr_count); - if (err < 0) { - kfree(qdev->msi_x_entry); - qdev->msi_x_entry = NULL; - netif_warn(qdev, ifup, qdev->ndev, - "MSI-X Enable failed, trying MSI.\n"); - qlge_irq_type = MSI_IRQ; - } else { - qdev->intr_count = err; - set_bit(QL_MSIX_ENABLED, &qdev->flags); - netif_info(qdev, ifup, qdev->ndev, - "MSI-X Enabled, got %d vectors.\n", - qdev->intr_count); - return; - } - } -msi: - qdev->intr_count = 1; - if (qlge_irq_type == MSI_IRQ) { - if (!pci_enable_msi(qdev->pdev)) { - set_bit(QL_MSI_ENABLED, &qdev->flags); - netif_info(qdev, ifup, qdev->ndev, - "Running with MSI interrupts.\n"); - return; - } - } - qlge_irq_type = LEG_IRQ; - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "Running with legacy interrupts.\n"); -} - -/* Each vector services 1 RSS ring and and 1 or more - * TX completion rings. This function loops through - * the TX completion rings and assigns the vector that - * will service it. An example would be if there are - * 2 vectors (so 2 RSS rings) and 8 TX completion rings. - * This would mean that vector 0 would service RSS ring 0 - * and TX completion rings 0,1,2 and 3. Vector 1 would - * service RSS ring 1 and TX completion rings 4,5,6 and 7. - */ -static void ql_set_tx_vect(struct ql_adapter *qdev) -{ - int i, j, vect; - u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; - - if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { - /* Assign irq vectors to TX rx_rings.*/ - for (vect = 0, j = 0, i = qdev->rss_ring_count; - i < qdev->rx_ring_count; i++) { - if (j == tx_rings_per_vector) { - vect++; - j = 0; - } - qdev->rx_ring[i].irq = vect; - j++; - } - } else { - /* For single vector all rings have an irq - * of zero. - */ - for (i = 0; i < qdev->rx_ring_count; i++) - qdev->rx_ring[i].irq = 0; - } -} - -/* Set the interrupt mask for this vector. Each vector - * will service 1 RSS ring and 1 or more TX completion - * rings. This function sets up a bit mask per vector - * that indicates which rings it services. - */ -static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx) -{ - int j, vect = ctx->intr; - u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; - - if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { - /* Add the RSS ring serviced by this vector - * to the mask. - */ - ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id); - /* Add the TX ring(s) serviced by this vector - * to the mask. */ - for (j = 0; j < tx_rings_per_vector; j++) { - ctx->irq_mask |= - (1 << qdev->rx_ring[qdev->rss_ring_count + - (vect * tx_rings_per_vector) + j].cq_id); - } - } else { - /* For single vector we just shift each queue's - * ID into the mask. - */ - for (j = 0; j < qdev->rx_ring_count; j++) - ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id); - } -} - -/* - * Here we build the intr_context structures based on - * our rx_ring count and intr vector count. - * The intr_context structure is used to hook each vector - * to possibly different handlers. - */ -static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev) -{ - int i = 0; - struct intr_context *intr_context = &qdev->intr_context[0]; - - if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { - /* Each rx_ring has it's - * own intr_context since we have separate - * vectors for each queue. - */ - for (i = 0; i < qdev->intr_count; i++, intr_context++) { - qdev->rx_ring[i].irq = i; - intr_context->intr = i; - intr_context->qdev = qdev; - /* Set up this vector's bit-mask that indicates - * which queues it services. - */ - ql_set_irq_mask(qdev, intr_context); - /* - * We set up each vectors enable/disable/read bits so - * there's no bit/mask calculations in the critical path. - */ - intr_context->intr_en_mask = - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | - INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD - | i; - intr_context->intr_dis_mask = - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | - INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK | - INTR_EN_IHD | i; - intr_context->intr_read_mask = - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | - INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD | - i; - if (i == 0) { - /* The first vector/queue handles - * broadcast/multicast, fatal errors, - * and firmware events. This in addition - * to normal inbound NAPI processing. - */ - intr_context->handler = qlge_isr; - sprintf(intr_context->name, "%s-rx-%d", - qdev->ndev->name, i); - } else { - /* - * Inbound queues handle unicast frames only. - */ - intr_context->handler = qlge_msix_rx_isr; - sprintf(intr_context->name, "%s-rx-%d", - qdev->ndev->name, i); - } - } - } else { - /* - * All rx_rings use the same intr_context since - * there is only one vector. - */ - intr_context->intr = 0; - intr_context->qdev = qdev; - /* - * We set up each vectors enable/disable/read bits so - * there's no bit/mask calculations in the critical path. - */ - intr_context->intr_en_mask = - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE; - intr_context->intr_dis_mask = - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | - INTR_EN_TYPE_DISABLE; - intr_context->intr_read_mask = - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ; - /* - * Single interrupt means one handler for all rings. - */ - intr_context->handler = qlge_isr; - sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name); - /* Set up this vector's bit-mask that indicates - * which queues it services. In this case there is - * a single vector so it will service all RSS and - * TX completion rings. - */ - ql_set_irq_mask(qdev, intr_context); - } - /* Tell the TX completion rings which MSIx vector - * they will be using. - */ - ql_set_tx_vect(qdev); -} - -static void ql_free_irq(struct ql_adapter *qdev) -{ - int i; - struct intr_context *intr_context = &qdev->intr_context[0]; - - for (i = 0; i < qdev->intr_count; i++, intr_context++) { - if (intr_context->hooked) { - if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { - free_irq(qdev->msi_x_entry[i].vector, - &qdev->rx_ring[i]); - } else { - free_irq(qdev->pdev->irq, &qdev->rx_ring[0]); - } - } - } - ql_disable_msix(qdev); -} - -static int ql_request_irq(struct ql_adapter *qdev) -{ - int i; - int status = 0; - struct pci_dev *pdev = qdev->pdev; - struct intr_context *intr_context = &qdev->intr_context[0]; - - ql_resolve_queues_to_irqs(qdev); - - for (i = 0; i < qdev->intr_count; i++, intr_context++) { - atomic_set(&intr_context->irq_cnt, 0); - if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { - status = request_irq(qdev->msi_x_entry[i].vector, - intr_context->handler, - 0, - intr_context->name, - &qdev->rx_ring[i]); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed request for MSIX interrupt %d.\n", - i); - goto err_irq; - } - } else { - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "trying msi or legacy interrupts.\n"); - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "%s: irq = %d.\n", __func__, pdev->irq); - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "%s: context->name = %s.\n", __func__, - intr_context->name); - netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, - "%s: dev_id = 0x%p.\n", __func__, - &qdev->rx_ring[0]); - status = - request_irq(pdev->irq, qlge_isr, - test_bit(QL_MSI_ENABLED, - &qdev-> - flags) ? 0 : IRQF_SHARED, - intr_context->name, &qdev->rx_ring[0]); - if (status) - goto err_irq; - - netif_err(qdev, ifup, qdev->ndev, - "Hooked intr %d, queue type %s, with name %s.\n", - i, - qdev->rx_ring[0].type == DEFAULT_Q ? - "DEFAULT_Q" : - qdev->rx_ring[0].type == TX_Q ? "TX_Q" : - qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "", - intr_context->name); - } - intr_context->hooked = 1; - } - return status; -err_irq: - netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n"); - ql_free_irq(qdev); - return status; -} - -static int ql_start_rss(struct ql_adapter *qdev) -{ - static const u8 init_hash_seed[] = { - 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, - 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, - 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, - 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, - 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa - }; - struct ricb *ricb = &qdev->ricb; - int status = 0; - int i; - u8 *hash_id = (u8 *) ricb->hash_cq_id; - - memset((void *)ricb, 0, sizeof(*ricb)); - - ricb->base_cq = RSS_L4K; - ricb->flags = - (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6); - ricb->mask = cpu_to_le16((u16)(0x3ff)); - - /* - * Fill out the Indirection Table. - */ - for (i = 0; i < 1024; i++) - hash_id[i] = (i & (qdev->rss_ring_count - 1)); - - memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40); - memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16); - - status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0); - if (status) { - netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n"); - return status; - } - return status; -} - -static int ql_clear_routing_entries(struct ql_adapter *qdev) -{ - int i, status = 0; - - status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); - if (status) - return status; - /* Clear all the entries in the routing table. */ - for (i = 0; i < 16; i++) { - status = ql_set_routing_reg(qdev, i, 0, 0); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init routing register for CAM packets.\n"); - break; - } - } - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); - return status; -} - -/* Initialize the frame-to-queue routing. */ -static int ql_route_initialize(struct ql_adapter *qdev) -{ - int status = 0; - - /* Clear all the entries in the routing table. */ - status = ql_clear_routing_entries(qdev); - if (status) - return status; - - status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); - if (status) - return status; - - status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT, - RT_IDX_IP_CSUM_ERR, 1); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init routing register " - "for IP CSUM error packets.\n"); - goto exit; - } - status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT, - RT_IDX_TU_CSUM_ERR, 1); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init routing register " - "for TCP/UDP CSUM error packets.\n"); - goto exit; - } - status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init routing register for broadcast packets.\n"); - goto exit; - } - /* If we have more than one inbound queue, then turn on RSS in the - * routing block. - */ - if (qdev->rss_ring_count > 1) { - status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT, - RT_IDX_RSS_MATCH, 1); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init routing register for MATCH RSS packets.\n"); - goto exit; - } - } - - status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT, - RT_IDX_CAM_HIT, 1); - if (status) - netif_err(qdev, ifup, qdev->ndev, - "Failed to init routing register for CAM packets.\n"); -exit: - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); - return status; -} - -int ql_cam_route_initialize(struct ql_adapter *qdev) -{ - int status, set; - - /* If check if the link is up and use to - * determine if we are setting or clearing - * the MAC address in the CAM. - */ - set = ql_read32(qdev, STS); - set &= qdev->port_link_up; - status = ql_set_mac_addr(qdev, set); - if (status) { - netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n"); - return status; - } - - status = ql_route_initialize(qdev); - if (status) - netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n"); - - return status; -} - -static int ql_adapter_initialize(struct ql_adapter *qdev) -{ - u32 value, mask; - int i; - int status = 0; - - /* - * Set up the System register to halt on errors. - */ - value = SYS_EFE | SYS_FAE; - mask = value << 16; - ql_write32(qdev, SYS, mask | value); - - /* Set the default queue, and VLAN behavior. */ - value = NIC_RCV_CFG_DFQ; - mask = NIC_RCV_CFG_DFQ_MASK; - if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) { - value |= NIC_RCV_CFG_RV; - mask |= (NIC_RCV_CFG_RV << 16); - } - ql_write32(qdev, NIC_RCV_CFG, (mask | value)); - - /* Set the MPI interrupt to enabled. */ - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); - - /* Enable the function, set pagesize, enable error checking. */ - value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND | - FSC_EC | FSC_VM_PAGE_4K; - value |= SPLT_SETTING; - - /* Set/clear header splitting. */ - mask = FSC_VM_PAGESIZE_MASK | - FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16); - ql_write32(qdev, FSC, mask | value); - - ql_write32(qdev, SPLT_HDR, SPLT_LEN); - - /* Set RX packet routing to use port/pci function on which the - * packet arrived on in addition to usual frame routing. - * This is helpful on bonding where both interfaces can have - * the same MAC address. - */ - ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ); - /* Reroute all packets to our Interface. - * They may have been routed to MPI firmware - * due to WOL. - */ - value = ql_read32(qdev, MGMT_RCV_CFG); - value &= ~MGMT_RCV_CFG_RM; - mask = 0xffff0000; - - /* Sticky reg needs clearing due to WOL. */ - ql_write32(qdev, MGMT_RCV_CFG, mask); - ql_write32(qdev, MGMT_RCV_CFG, mask | value); - - /* Default WOL is enable on Mezz cards */ - if (qdev->pdev->subsystem_device == 0x0068 || - qdev->pdev->subsystem_device == 0x0180) - qdev->wol = WAKE_MAGIC; - - /* Start up the rx queues. */ - for (i = 0; i < qdev->rx_ring_count; i++) { - status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to start rx ring[%d].\n", i); - return status; - } - } - - /* If there is more than one inbound completion queue - * then download a RICB to configure RSS. - */ - if (qdev->rss_ring_count > 1) { - status = ql_start_rss(qdev); - if (status) { - netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n"); - return status; - } - } - - /* Start up the tx queues. */ - for (i = 0; i < qdev->tx_ring_count; i++) { - status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to start tx ring[%d].\n", i); - return status; - } - } - - /* Initialize the port and set the max framesize. */ - status = qdev->nic_ops->port_initialize(qdev); - if (status) - netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n"); - - /* Set up the MAC address and frame routing filter. */ - status = ql_cam_route_initialize(qdev); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init CAM/Routing tables.\n"); - return status; - } - - /* Start NAPI for the RSS queues. */ - for (i = 0; i < qdev->rss_ring_count; i++) - napi_enable(&qdev->rx_ring[i].napi); - - return status; -} - -/* Issue soft reset to chip. */ -static int ql_adapter_reset(struct ql_adapter *qdev) -{ - u32 value; - int status = 0; - unsigned long end_jiffies; - - /* Clear all the entries in the routing table. */ - status = ql_clear_routing_entries(qdev); - if (status) { - netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n"); - return status; - } - - /* Check if bit is set then skip the mailbox command and - * clear the bit, else we are in normal reset process. - */ - if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) { - /* Stop management traffic. */ - ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP); - - /* Wait for the NIC and MGMNT FIFOs to empty. */ - ql_wait_fifo_empty(qdev); - } else - clear_bit(QL_ASIC_RECOVERY, &qdev->flags); - - ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); - - end_jiffies = jiffies + usecs_to_jiffies(30); - do { - value = ql_read32(qdev, RST_FO); - if ((value & RST_FO_FR) == 0) - break; - cpu_relax(); - } while (time_before(jiffies, end_jiffies)); - - if (value & RST_FO_FR) { - netif_err(qdev, ifdown, qdev->ndev, - "ETIMEDOUT!!! errored out of resetting the chip!\n"); - status = -ETIMEDOUT; - } - - /* Resume management traffic. */ - ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME); - return status; -} - -static void ql_display_dev_info(struct net_device *ndev) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - netif_info(qdev, probe, qdev->ndev, - "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, " - "XG Roll = %d, XG Rev = %d.\n", - qdev->func, - qdev->port, - qdev->chip_rev_id & 0x0000000f, - qdev->chip_rev_id >> 4 & 0x0000000f, - qdev->chip_rev_id >> 8 & 0x0000000f, - qdev->chip_rev_id >> 12 & 0x0000000f); - netif_info(qdev, probe, qdev->ndev, - "MAC address %pM\n", ndev->dev_addr); -} - -static int ql_wol(struct ql_adapter *qdev) -{ - int status = 0; - u32 wol = MB_WOL_DISABLE; - - /* The CAM is still intact after a reset, but if we - * are doing WOL, then we may need to program the - * routing regs. We would also need to issue the mailbox - * commands to instruct the MPI what to do per the ethtool - * settings. - */ - - if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST | - WAKE_MCAST | WAKE_BCAST)) { - netif_err(qdev, ifdown, qdev->ndev, - "Unsupported WOL parameter. qdev->wol = 0x%x.\n", - qdev->wol); - return -EINVAL; - } - - if (qdev->wol & WAKE_MAGIC) { - status = ql_mb_wol_set_magic(qdev, 1); - if (status) { - netif_err(qdev, ifdown, qdev->ndev, - "Failed to set magic packet on %s.\n", - qdev->ndev->name); - return status; - } else - netif_info(qdev, drv, qdev->ndev, - "Enabled magic packet successfully on %s.\n", - qdev->ndev->name); - - wol |= MB_WOL_MAGIC_PKT; - } - - if (qdev->wol) { - wol |= MB_WOL_MODE_ON; - status = ql_mb_wol_mode(qdev, wol); - netif_err(qdev, drv, qdev->ndev, - "WOL %s (wol code 0x%x) on %s\n", - (status == 0) ? "Successfully set" : "Failed", - wol, qdev->ndev->name); - } - - return status; -} - -static void ql_cancel_all_work_sync(struct ql_adapter *qdev) -{ - - /* Don't kill the reset worker thread if we - * are in the process of recovery. - */ - if (test_bit(QL_ADAPTER_UP, &qdev->flags)) - cancel_delayed_work_sync(&qdev->asic_reset_work); - cancel_delayed_work_sync(&qdev->mpi_reset_work); - cancel_delayed_work_sync(&qdev->mpi_work); - cancel_delayed_work_sync(&qdev->mpi_idc_work); - cancel_delayed_work_sync(&qdev->mpi_core_to_log); - cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); -} - -static int ql_adapter_down(struct ql_adapter *qdev) -{ - int i, status = 0; - - ql_link_off(qdev); - - ql_cancel_all_work_sync(qdev); - - for (i = 0; i < qdev->rss_ring_count; i++) - napi_disable(&qdev->rx_ring[i].napi); - - clear_bit(QL_ADAPTER_UP, &qdev->flags); - - ql_disable_interrupts(qdev); - - ql_tx_ring_clean(qdev); - - /* Call netif_napi_del() from common point. - */ - for (i = 0; i < qdev->rss_ring_count; i++) - netif_napi_del(&qdev->rx_ring[i].napi); - - status = ql_adapter_reset(qdev); - if (status) - netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n", - qdev->func); - ql_free_rx_buffers(qdev); - - return status; -} - -static int ql_adapter_up(struct ql_adapter *qdev) -{ - int err = 0; - - err = ql_adapter_initialize(qdev); - if (err) { - netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n"); - goto err_init; - } - set_bit(QL_ADAPTER_UP, &qdev->flags); - ql_alloc_rx_buffers(qdev); - /* If the port is initialized and the - * link is up the turn on the carrier. - */ - if ((ql_read32(qdev, STS) & qdev->port_init) && - (ql_read32(qdev, STS) & qdev->port_link_up)) - ql_link_on(qdev); - /* Restore rx mode. */ - clear_bit(QL_ALLMULTI, &qdev->flags); - clear_bit(QL_PROMISCUOUS, &qdev->flags); - qlge_set_multicast_list(qdev->ndev); - - /* Restore vlan setting. */ - qlge_restore_vlan(qdev); - - ql_enable_interrupts(qdev); - ql_enable_all_completion_interrupts(qdev); - netif_tx_start_all_queues(qdev->ndev); - - return 0; -err_init: - ql_adapter_reset(qdev); - return err; -} - -static void ql_release_adapter_resources(struct ql_adapter *qdev) -{ - ql_free_mem_resources(qdev); - ql_free_irq(qdev); -} - -static int ql_get_adapter_resources(struct ql_adapter *qdev) -{ - int status = 0; - - if (ql_alloc_mem_resources(qdev)) { - netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n"); - return -ENOMEM; - } - status = ql_request_irq(qdev); - return status; -} - -static int qlge_close(struct net_device *ndev) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - - /* If we hit pci_channel_io_perm_failure - * failure condition, then we already - * brought the adapter down. - */ - if (test_bit(QL_EEH_FATAL, &qdev->flags)) { - netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n"); - clear_bit(QL_EEH_FATAL, &qdev->flags); - return 0; - } - - /* - * Wait for device to recover from a reset. - * (Rarely happens, but possible.) - */ - while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) - msleep(1); - ql_adapter_down(qdev); - ql_release_adapter_resources(qdev); - return 0; -} - -static int ql_configure_rings(struct ql_adapter *qdev) -{ - int i; - struct rx_ring *rx_ring; - struct tx_ring *tx_ring; - int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus()); - unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ? - LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; - - qdev->lbq_buf_order = get_order(lbq_buf_len); - - /* In a perfect world we have one RSS ring for each CPU - * and each has it's own vector. To do that we ask for - * cpu_cnt vectors. ql_enable_msix() will adjust the - * vector count to what we actually get. We then - * allocate an RSS ring for each. - * Essentially, we are doing min(cpu_count, msix_vector_count). - */ - qdev->intr_count = cpu_cnt; - ql_enable_msix(qdev); - /* Adjust the RSS ring count to the actual vector count. */ - qdev->rss_ring_count = qdev->intr_count; - qdev->tx_ring_count = cpu_cnt; - qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count; - - for (i = 0; i < qdev->tx_ring_count; i++) { - tx_ring = &qdev->tx_ring[i]; - memset((void *)tx_ring, 0, sizeof(*tx_ring)); - tx_ring->qdev = qdev; - tx_ring->wq_id = i; - tx_ring->wq_len = qdev->tx_ring_size; - tx_ring->wq_size = - tx_ring->wq_len * sizeof(struct ob_mac_iocb_req); - - /* - * The completion queue ID for the tx rings start - * immediately after the rss rings. - */ - tx_ring->cq_id = qdev->rss_ring_count + i; - } - - for (i = 0; i < qdev->rx_ring_count; i++) { - rx_ring = &qdev->rx_ring[i]; - memset((void *)rx_ring, 0, sizeof(*rx_ring)); - rx_ring->qdev = qdev; - rx_ring->cq_id = i; - rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */ - if (i < qdev->rss_ring_count) { - /* - * Inbound (RSS) queues. - */ - rx_ring->cq_len = qdev->rx_ring_size; - rx_ring->cq_size = - rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); - rx_ring->lbq_len = NUM_LARGE_BUFFERS; - rx_ring->lbq_size = - rx_ring->lbq_len * sizeof(__le64); - rx_ring->lbq_buf_size = (u16)lbq_buf_len; - rx_ring->sbq_len = NUM_SMALL_BUFFERS; - rx_ring->sbq_size = - rx_ring->sbq_len * sizeof(__le64); - rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE; - rx_ring->type = RX_Q; - } else { - /* - * Outbound queue handles outbound completions only. - */ - /* outbound cq is same size as tx_ring it services. */ - rx_ring->cq_len = qdev->tx_ring_size; - rx_ring->cq_size = - rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); - rx_ring->lbq_len = 0; - rx_ring->lbq_size = 0; - rx_ring->lbq_buf_size = 0; - rx_ring->sbq_len = 0; - rx_ring->sbq_size = 0; - rx_ring->sbq_buf_size = 0; - rx_ring->type = TX_Q; - } - } - return 0; -} - -static int qlge_open(struct net_device *ndev) -{ - int err = 0; - struct ql_adapter *qdev = netdev_priv(ndev); - - err = ql_adapter_reset(qdev); - if (err) - return err; - - err = ql_configure_rings(qdev); - if (err) - return err; - - err = ql_get_adapter_resources(qdev); - if (err) - goto error_up; - - err = ql_adapter_up(qdev); - if (err) - goto error_up; - - return err; - -error_up: - ql_release_adapter_resources(qdev); - return err; -} - -static int ql_change_rx_buffers(struct ql_adapter *qdev) -{ - struct rx_ring *rx_ring; - int i, status; - u32 lbq_buf_len; - - /* Wait for an outstanding reset to complete. */ - if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { - int i = 4; - - while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { - netif_err(qdev, ifup, qdev->ndev, - "Waiting for adapter UP...\n"); - ssleep(1); - } - - if (!i) { - netif_err(qdev, ifup, qdev->ndev, - "Timed out waiting for adapter UP\n"); - return -ETIMEDOUT; - } - } - - status = ql_adapter_down(qdev); - if (status) - goto error; - - /* Get the new rx buffer size. */ - lbq_buf_len = (qdev->ndev->mtu > 1500) ? - LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; - qdev->lbq_buf_order = get_order(lbq_buf_len); - - for (i = 0; i < qdev->rss_ring_count; i++) { - rx_ring = &qdev->rx_ring[i]; - /* Set the new size. */ - rx_ring->lbq_buf_size = lbq_buf_len; - } - - status = ql_adapter_up(qdev); - if (status) - goto error; - - return status; -error: - netif_alert(qdev, ifup, qdev->ndev, - "Driver up/down cycle failed, closing device.\n"); - set_bit(QL_ADAPTER_UP, &qdev->flags); - dev_close(qdev->ndev); - return status; -} - -static int qlge_change_mtu(struct net_device *ndev, int new_mtu) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - int status; - - if (ndev->mtu == 1500 && new_mtu == 9000) { - netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n"); - } else if (ndev->mtu == 9000 && new_mtu == 1500) { - netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n"); - } else - return -EINVAL; - - queue_delayed_work(qdev->workqueue, - &qdev->mpi_port_cfg_work, 3*HZ); - - ndev->mtu = new_mtu; - - if (!netif_running(qdev->ndev)) { - return 0; - } - - status = ql_change_rx_buffers(qdev); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Changing MTU failed.\n"); - } - - return status; -} - -static struct net_device_stats *qlge_get_stats(struct net_device - *ndev) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - struct rx_ring *rx_ring = &qdev->rx_ring[0]; - struct tx_ring *tx_ring = &qdev->tx_ring[0]; - unsigned long pkts, mcast, dropped, errors, bytes; - int i; - - /* Get RX stats. */ - pkts = mcast = dropped = errors = bytes = 0; - for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { - pkts += rx_ring->rx_packets; - bytes += rx_ring->rx_bytes; - dropped += rx_ring->rx_dropped; - errors += rx_ring->rx_errors; - mcast += rx_ring->rx_multicast; - } - ndev->stats.rx_packets = pkts; - ndev->stats.rx_bytes = bytes; - ndev->stats.rx_dropped = dropped; - ndev->stats.rx_errors = errors; - ndev->stats.multicast = mcast; - - /* Get TX stats. */ - pkts = errors = bytes = 0; - for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) { - pkts += tx_ring->tx_packets; - bytes += tx_ring->tx_bytes; - errors += tx_ring->tx_errors; - } - ndev->stats.tx_packets = pkts; - ndev->stats.tx_bytes = bytes; - ndev->stats.tx_errors = errors; - return &ndev->stats; -} - -static void qlge_set_multicast_list(struct net_device *ndev) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - struct netdev_hw_addr *ha; - int i, status; - - status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); - if (status) - return; - /* - * Set or clear promiscuous mode if a - * transition is taking place. - */ - if (ndev->flags & IFF_PROMISC) { - if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) { - if (ql_set_routing_reg - (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) { - netif_err(qdev, hw, qdev->ndev, - "Failed to set promiscuous mode.\n"); - } else { - set_bit(QL_PROMISCUOUS, &qdev->flags); - } - } - } else { - if (test_bit(QL_PROMISCUOUS, &qdev->flags)) { - if (ql_set_routing_reg - (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) { - netif_err(qdev, hw, qdev->ndev, - "Failed to clear promiscuous mode.\n"); - } else { - clear_bit(QL_PROMISCUOUS, &qdev->flags); - } - } - } - - /* - * Set or clear all multicast mode if a - * transition is taking place. - */ - if ((ndev->flags & IFF_ALLMULTI) || - (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) { - if (!test_bit(QL_ALLMULTI, &qdev->flags)) { - if (ql_set_routing_reg - (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) { - netif_err(qdev, hw, qdev->ndev, - "Failed to set all-multi mode.\n"); - } else { - set_bit(QL_ALLMULTI, &qdev->flags); - } - } - } else { - if (test_bit(QL_ALLMULTI, &qdev->flags)) { - if (ql_set_routing_reg - (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) { - netif_err(qdev, hw, qdev->ndev, - "Failed to clear all-multi mode.\n"); - } else { - clear_bit(QL_ALLMULTI, &qdev->flags); - } - } - } - - if (!netdev_mc_empty(ndev)) { - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - goto exit; - i = 0; - netdev_for_each_mc_addr(ha, ndev) { - if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr, - MAC_ADDR_TYPE_MULTI_MAC, i)) { - netif_err(qdev, hw, qdev->ndev, - "Failed to loadmulticast address.\n"); - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); - goto exit; - } - i++; - } - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); - if (ql_set_routing_reg - (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) { - netif_err(qdev, hw, qdev->ndev, - "Failed to set multicast match mode.\n"); - } else { - set_bit(QL_ALLMULTI, &qdev->flags); - } - } -exit: - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); -} - -static int qlge_set_mac_address(struct net_device *ndev, void *p) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - struct sockaddr *addr = p; - int status; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); - /* Update local copy of current mac address. */ - memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); - - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); - if (status) - return status; - status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, - MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); - if (status) - netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n"); - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); - return status; -} - -static void qlge_tx_timeout(struct net_device *ndev) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - ql_queue_asic_error(qdev); -} - -static void ql_asic_reset_work(struct work_struct *work) -{ - struct ql_adapter *qdev = - container_of(work, struct ql_adapter, asic_reset_work.work); - int status; - rtnl_lock(); - status = ql_adapter_down(qdev); - if (status) - goto error; - - status = ql_adapter_up(qdev); - if (status) - goto error; - - /* Restore rx mode. */ - clear_bit(QL_ALLMULTI, &qdev->flags); - clear_bit(QL_PROMISCUOUS, &qdev->flags); - qlge_set_multicast_list(qdev->ndev); - - rtnl_unlock(); - return; -error: - netif_alert(qdev, ifup, qdev->ndev, - "Driver up/down cycle failed, closing device\n"); - - set_bit(QL_ADAPTER_UP, &qdev->flags); - dev_close(qdev->ndev); - rtnl_unlock(); -} - -static const struct nic_operations qla8012_nic_ops = { - .get_flash = ql_get_8012_flash_params, - .port_initialize = ql_8012_port_initialize, -}; - -static const struct nic_operations qla8000_nic_ops = { - .get_flash = ql_get_8000_flash_params, - .port_initialize = ql_8000_port_initialize, -}; - -/* Find the pcie function number for the other NIC - * on this chip. Since both NIC functions share a - * common firmware we have the lowest enabled function - * do any common work. Examples would be resetting - * after a fatal firmware error, or doing a firmware - * coredump. - */ -static int ql_get_alt_pcie_func(struct ql_adapter *qdev) -{ - int status = 0; - u32 temp; - u32 nic_func1, nic_func2; - - status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG, - &temp); - if (status) - return status; - - nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) & - MPI_TEST_NIC_FUNC_MASK); - nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) & - MPI_TEST_NIC_FUNC_MASK); - - if (qdev->func == nic_func1) - qdev->alt_func = nic_func2; - else if (qdev->func == nic_func2) - qdev->alt_func = nic_func1; - else - status = -EIO; - - return status; -} - -static int ql_get_board_info(struct ql_adapter *qdev) -{ - int status; - qdev->func = - (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT; - if (qdev->func > 3) - return -EIO; - - status = ql_get_alt_pcie_func(qdev); - if (status) - return status; - - qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1; - if (qdev->port) { - qdev->xg_sem_mask = SEM_XGMAC1_MASK; - qdev->port_link_up = STS_PL1; - qdev->port_init = STS_PI1; - qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI; - qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO; - } else { - qdev->xg_sem_mask = SEM_XGMAC0_MASK; - qdev->port_link_up = STS_PL0; - qdev->port_init = STS_PI0; - qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI; - qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO; - } - qdev->chip_rev_id = ql_read32(qdev, REV_ID); - qdev->device_id = qdev->pdev->device; - if (qdev->device_id == QLGE_DEVICE_ID_8012) - qdev->nic_ops = &qla8012_nic_ops; - else if (qdev->device_id == QLGE_DEVICE_ID_8000) - qdev->nic_ops = &qla8000_nic_ops; - return status; -} - -static void ql_release_all(struct pci_dev *pdev) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - - if (qdev->workqueue) { - destroy_workqueue(qdev->workqueue); - qdev->workqueue = NULL; - } - - if (qdev->reg_base) - iounmap(qdev->reg_base); - if (qdev->doorbell_area) - iounmap(qdev->doorbell_area); - vfree(qdev->mpi_coredump); - pci_release_regions(pdev); -} - -static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev, - int cards_found) -{ - struct ql_adapter *qdev = netdev_priv(ndev); - int err = 0; - - memset((void *)qdev, 0, sizeof(*qdev)); - err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "PCI device enable failed.\n"); - return err; - } - - qdev->ndev = ndev; - qdev->pdev = pdev; - pci_set_drvdata(pdev, ndev); - - /* Set PCIe read request size */ - err = pcie_set_readrq(pdev, 4096); - if (err) { - dev_err(&pdev->dev, "Set readrq failed.\n"); - goto err_out1; - } - - err = pci_request_regions(pdev, DRV_NAME); - if (err) { - dev_err(&pdev->dev, "PCI region request failed.\n"); - return err; - } - - pci_set_master(pdev); - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { - set_bit(QL_DMA64, &qdev->flags); - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - } - - if (err) { - dev_err(&pdev->dev, "No usable DMA configuration.\n"); - goto err_out2; - } - - /* Set PCIe reset type for EEH to fundamental. */ - pdev->needs_freset = 1; - pci_save_state(pdev); - qdev->reg_base = - ioremap_nocache(pci_resource_start(pdev, 1), - pci_resource_len(pdev, 1)); - if (!qdev->reg_base) { - dev_err(&pdev->dev, "Register mapping failed.\n"); - err = -ENOMEM; - goto err_out2; - } - - qdev->doorbell_area_size = pci_resource_len(pdev, 3); - qdev->doorbell_area = - ioremap_nocache(pci_resource_start(pdev, 3), - pci_resource_len(pdev, 3)); - if (!qdev->doorbell_area) { - dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); - err = -ENOMEM; - goto err_out2; - } - - err = ql_get_board_info(qdev); - if (err) { - dev_err(&pdev->dev, "Register access failed.\n"); - err = -EIO; - goto err_out2; - } - qdev->msg_enable = netif_msg_init(debug, default_msg); - spin_lock_init(&qdev->hw_lock); - spin_lock_init(&qdev->stats_lock); - - if (qlge_mpi_coredump) { - qdev->mpi_coredump = - vmalloc(sizeof(struct ql_mpi_coredump)); - if (qdev->mpi_coredump == NULL) { - err = -ENOMEM; - goto err_out2; - } - if (qlge_force_coredump) - set_bit(QL_FRC_COREDUMP, &qdev->flags); - } - /* make sure the EEPROM is good */ - err = qdev->nic_ops->get_flash(qdev); - if (err) { - dev_err(&pdev->dev, "Invalid FLASH.\n"); - goto err_out2; - } - - /* Keep local copy of current mac address. */ - memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); - - /* Set up the default ring sizes. */ - qdev->tx_ring_size = NUM_TX_RING_ENTRIES; - qdev->rx_ring_size = NUM_RX_RING_ENTRIES; - - /* Set up the coalescing parameters. */ - qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT; - qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT; - qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; - qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; - - /* - * Set up the operating parameters. - */ - qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, - ndev->name); - if (!qdev->workqueue) { - err = -ENOMEM; - goto err_out2; - } - - INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); - INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); - INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); - INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); - INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); - INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log); - init_completion(&qdev->ide_completion); - mutex_init(&qdev->mpi_mutex); - - if (!cards_found) { - dev_info(&pdev->dev, "%s\n", DRV_STRING); - dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n", - DRV_NAME, DRV_VERSION); - } - return 0; -err_out2: - ql_release_all(pdev); -err_out1: - pci_disable_device(pdev); - return err; -} - -static const struct net_device_ops qlge_netdev_ops = { - .ndo_open = qlge_open, - .ndo_stop = qlge_close, - .ndo_start_xmit = qlge_send, - .ndo_change_mtu = qlge_change_mtu, - .ndo_get_stats = qlge_get_stats, - .ndo_set_rx_mode = qlge_set_multicast_list, - .ndo_set_mac_address = qlge_set_mac_address, - .ndo_validate_addr = eth_validate_addr, - .ndo_tx_timeout = qlge_tx_timeout, - .ndo_set_features = qlge_set_features, - .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, -}; - -static void ql_timer(struct timer_list *t) -{ - struct ql_adapter *qdev = from_timer(qdev, t, timer); - u32 var = 0; - - var = ql_read32(qdev, STS); - if (pci_channel_offline(qdev->pdev)) { - netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var); - return; - } - - mod_timer(&qdev->timer, jiffies + (5*HZ)); -} - -static int qlge_probe(struct pci_dev *pdev, - const struct pci_device_id *pci_entry) -{ - struct net_device *ndev = NULL; - struct ql_adapter *qdev = NULL; - static int cards_found = 0; - int err = 0; - - ndev = alloc_etherdev_mq(sizeof(struct ql_adapter), - min(MAX_CPUS, netif_get_num_default_rss_queues())); - if (!ndev) - return -ENOMEM; - - err = ql_init_device(pdev, ndev, cards_found); - if (err < 0) { - free_netdev(ndev); - return err; - } - - qdev = netdev_priv(ndev); - SET_NETDEV_DEV(ndev, &pdev->dev); - ndev->hw_features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_TSO | - NETIF_F_TSO_ECN | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_RXCSUM; - ndev->features = ndev->hw_features; - ndev->vlan_features = ndev->hw_features; - /* vlan gets same features (except vlan filter) */ - ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX); - - if (test_bit(QL_DMA64, &qdev->flags)) - ndev->features |= NETIF_F_HIGHDMA; - - /* - * Set up net_device structure. - */ - ndev->tx_queue_len = qdev->tx_ring_size; - ndev->irq = pdev->irq; - - ndev->netdev_ops = &qlge_netdev_ops; - ndev->ethtool_ops = &qlge_ethtool_ops; - ndev->watchdog_timeo = 10 * HZ; - - /* MTU range: this driver only supports 1500 or 9000, so this only - * filters out values above or below, and we'll rely on - * qlge_change_mtu to make sure only 1500 or 9000 are allowed - */ - ndev->min_mtu = ETH_DATA_LEN; - ndev->max_mtu = 9000; - - err = register_netdev(ndev); - if (err) { - dev_err(&pdev->dev, "net device registration failed.\n"); - ql_release_all(pdev); - pci_disable_device(pdev); - free_netdev(ndev); - return err; - } - /* Start up the timer to trigger EEH if - * the bus goes dead - */ - timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE); - mod_timer(&qdev->timer, jiffies + (5*HZ)); - ql_link_off(qdev); - ql_display_dev_info(ndev); - atomic_set(&qdev->lb_count, 0); - cards_found++; - return 0; -} - -netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev) -{ - return qlge_send(skb, ndev); -} - -int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget) -{ - return ql_clean_inbound_rx_ring(rx_ring, budget); -} - -static void qlge_remove(struct pci_dev *pdev) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - del_timer_sync(&qdev->timer); - ql_cancel_all_work_sync(qdev); - unregister_netdev(ndev); - ql_release_all(pdev); - pci_disable_device(pdev); - free_netdev(ndev); -} - -/* Clean up resources without touching hardware. */ -static void ql_eeh_close(struct net_device *ndev) -{ - int i; - struct ql_adapter *qdev = netdev_priv(ndev); - - if (netif_carrier_ok(ndev)) { - netif_carrier_off(ndev); - netif_stop_queue(ndev); - } - - /* Disabling the timer */ - ql_cancel_all_work_sync(qdev); - - for (i = 0; i < qdev->rss_ring_count; i++) - netif_napi_del(&qdev->rx_ring[i].napi); - - clear_bit(QL_ADAPTER_UP, &qdev->flags); - ql_tx_ring_clean(qdev); - ql_free_rx_buffers(qdev); - ql_release_adapter_resources(qdev); -} - -/* - * This callback is called by the PCI subsystem whenever - * a PCI bus error is detected. - */ -static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, - enum pci_channel_state state) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - - switch (state) { - case pci_channel_io_normal: - return PCI_ERS_RESULT_CAN_RECOVER; - case pci_channel_io_frozen: - netif_device_detach(ndev); - del_timer_sync(&qdev->timer); - if (netif_running(ndev)) - ql_eeh_close(ndev); - pci_disable_device(pdev); - return PCI_ERS_RESULT_NEED_RESET; - case pci_channel_io_perm_failure: - dev_err(&pdev->dev, - "%s: pci_channel_io_perm_failure.\n", __func__); - del_timer_sync(&qdev->timer); - ql_eeh_close(ndev); - set_bit(QL_EEH_FATAL, &qdev->flags); - return PCI_ERS_RESULT_DISCONNECT; - } - - /* Request a slot reset. */ - return PCI_ERS_RESULT_NEED_RESET; -} - -/* - * This callback is called after the PCI buss has been reset. - * Basically, this tries to restart the card from scratch. - * This is a shortened version of the device probe/discovery code, - * it resembles the first-half of the () routine. - */ -static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - - pdev->error_state = pci_channel_io_normal; - - pci_restore_state(pdev); - if (pci_enable_device(pdev)) { - netif_err(qdev, ifup, qdev->ndev, - "Cannot re-enable PCI device after reset.\n"); - return PCI_ERS_RESULT_DISCONNECT; - } - pci_set_master(pdev); - - if (ql_adapter_reset(qdev)) { - netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n"); - set_bit(QL_EEH_FATAL, &qdev->flags); - return PCI_ERS_RESULT_DISCONNECT; - } - - return PCI_ERS_RESULT_RECOVERED; -} - -static void qlge_io_resume(struct pci_dev *pdev) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - int err = 0; - - if (netif_running(ndev)) { - err = qlge_open(ndev); - if (err) { - netif_err(qdev, ifup, qdev->ndev, - "Device initialization failed after reset.\n"); - return; - } - } else { - netif_err(qdev, ifup, qdev->ndev, - "Device was not running prior to EEH.\n"); - } - mod_timer(&qdev->timer, jiffies + (5*HZ)); - netif_device_attach(ndev); -} - -static const struct pci_error_handlers qlge_err_handler = { - .error_detected = qlge_io_error_detected, - .slot_reset = qlge_io_slot_reset, - .resume = qlge_io_resume, -}; - -static int qlge_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - int err; - - netif_device_detach(ndev); - del_timer_sync(&qdev->timer); - - if (netif_running(ndev)) { - err = ql_adapter_down(qdev); - if (!err) - return err; - } - - ql_wol(qdev); - err = pci_save_state(pdev); - if (err) - return err; - - pci_disable_device(pdev); - - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - - return 0; -} - -#ifdef CONFIG_PM -static int qlge_resume(struct pci_dev *pdev) -{ - struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - int err; - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - err = pci_enable_device(pdev); - if (err) { - netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n"); - return err; - } - pci_set_master(pdev); - - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - - if (netif_running(ndev)) { - err = ql_adapter_up(qdev); - if (err) - return err; - } - - mod_timer(&qdev->timer, jiffies + (5*HZ)); - netif_device_attach(ndev); - - return 0; -} -#endif /* CONFIG_PM */ - -static void qlge_shutdown(struct pci_dev *pdev) -{ - qlge_suspend(pdev, PMSG_SUSPEND); -} - -static struct pci_driver qlge_driver = { - .name = DRV_NAME, - .id_table = qlge_pci_tbl, - .probe = qlge_probe, - .remove = qlge_remove, -#ifdef CONFIG_PM - .suspend = qlge_suspend, - .resume = qlge_resume, -#endif - .shutdown = qlge_shutdown, - .err_handler = &qlge_err_handler -}; - -module_pci_driver(qlge_driver); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c deleted file mode 100644 index 957c72985a06..000000000000 --- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c +++ /dev/null @@ -1,1285 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include "qlge.h" - -int ql_unpause_mpi_risc(struct ql_adapter *qdev) -{ - u32 tmp; - - /* Un-pause the RISC */ - tmp = ql_read32(qdev, CSR); - if (!(tmp & CSR_RP)) - return -EIO; - - ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE); - return 0; -} - -int ql_pause_mpi_risc(struct ql_adapter *qdev) -{ - u32 tmp; - int count = UDELAY_COUNT; - - /* Pause the RISC */ - ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE); - do { - tmp = ql_read32(qdev, CSR); - if (tmp & CSR_RP) - break; - mdelay(UDELAY_DELAY); - count--; - } while (count); - return (count == 0) ? -ETIMEDOUT : 0; -} - -int ql_hard_reset_mpi_risc(struct ql_adapter *qdev) -{ - u32 tmp; - int count = UDELAY_COUNT; - - /* Reset the RISC */ - ql_write32(qdev, CSR, CSR_CMD_SET_RST); - do { - tmp = ql_read32(qdev, CSR); - if (tmp & CSR_RR) { - ql_write32(qdev, CSR, CSR_CMD_CLR_RST); - break; - } - mdelay(UDELAY_DELAY); - count--; - } while (count); - return (count == 0) ? -ETIMEDOUT : 0; -} - -int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data) -{ - int status; - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); - if (status) - goto exit; - /* set up for reg read */ - ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R); - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); - if (status) - goto exit; - /* get the data */ - *data = ql_read32(qdev, PROC_DATA); -exit: - return status; -} - -int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data) -{ - int status = 0; - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); - if (status) - goto exit; - /* write the data to the data reg */ - ql_write32(qdev, PROC_DATA, data); - /* trigger the write */ - ql_write32(qdev, PROC_ADDR, reg); - /* wait for reg to come ready */ - status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); - if (status) - goto exit; -exit: - return status; -} - -int ql_soft_reset_mpi_risc(struct ql_adapter *qdev) -{ - int status; - status = ql_write_mpi_reg(qdev, 0x00001010, 1); - return status; -} - -/* Determine if we are in charge of the firwmare. If - * we are the lower of the 2 NIC pcie functions, or if - * we are the higher function and the lower function - * is not enabled. - */ -int ql_own_firmware(struct ql_adapter *qdev) -{ - u32 temp; - - /* If we are the lower of the 2 NIC functions - * on the chip the we are responsible for - * core dump and firmware reset after an error. - */ - if (qdev->func < qdev->alt_func) - return 1; - - /* If we are the higher of the 2 NIC functions - * on the chip and the lower function is not - * enabled, then we are responsible for - * core dump and firmware reset after an error. - */ - temp = ql_read32(qdev, STS); - if (!(temp & (1 << (8 + qdev->alt_func)))) - return 1; - - return 0; - -} - -static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int i, status; - - status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); - if (status) - return -EBUSY; - for (i = 0; i < mbcp->out_count; i++) { - status = - ql_read_mpi_reg(qdev, qdev->mailbox_out + i, - &mbcp->mbox_out[i]); - if (status) { - netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n"); - break; - } - } - ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */ - return status; -} - -/* Wait for a single mailbox command to complete. - * Returns zero on success. - */ -static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev) -{ - int count = 100; - u32 value; - - do { - value = ql_read32(qdev, STS); - if (value & STS_PI) - return 0; - mdelay(UDELAY_DELAY); /* 100ms */ - } while (--count); - return -ETIMEDOUT; -} - -/* Execute a single mailbox command. - * Caller must hold PROC_ADDR semaphore. - */ -static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int i, status; - - /* - * Make sure there's nothing pending. - * This shouldn't happen. - */ - if (ql_read32(qdev, CSR) & CSR_HRI) - return -EIO; - - status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); - if (status) - return status; - - /* - * Fill the outbound mailboxes. - */ - for (i = 0; i < mbcp->in_count; i++) { - status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i, - mbcp->mbox_in[i]); - if (status) - goto end; - } - /* - * Wake up the MPI firmware. - */ - ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT); -end: - ql_sem_unlock(qdev, SEM_PROC_REG_MASK); - return status; -} - -/* We are being asked by firmware to accept - * a change to the port. This is only - * a change to max frame sizes (Tx/Rx), pause - * parameters, or loopback mode. We wake up a worker - * to handler processing this since a mailbox command - * will need to be sent to ACK the request. - */ -static int ql_idc_req_aen(struct ql_adapter *qdev) -{ - int status; - struct mbox_params *mbcp = &qdev->idc_mbc; - - netif_err(qdev, drv, qdev->ndev, "Enter!\n"); - /* Get the status data and start up a thread to - * handle the request. - */ - mbcp->out_count = 4; - status = ql_get_mb_sts(qdev, mbcp); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Could not read MPI, resetting ASIC!\n"); - ql_queue_asic_error(qdev); - } else { - /* Begin polled mode early so - * we don't get another interrupt - * when we leave mpi_worker. - */ - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); - queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0); - } - return status; -} - -/* Process an inter-device event completion. - * If good, signal the caller's completion. - */ -static int ql_idc_cmplt_aen(struct ql_adapter *qdev) -{ - int status; - struct mbox_params *mbcp = &qdev->idc_mbc; - mbcp->out_count = 4; - status = ql_get_mb_sts(qdev, mbcp); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Could not read MPI, resetting RISC!\n"); - ql_queue_fw_error(qdev); - } else - /* Wake up the sleeping mpi_idc_work thread that is - * waiting for this event. - */ - complete(&qdev->ide_completion); - - return status; -} - -static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - mbcp->out_count = 2; - - status = ql_get_mb_sts(qdev, mbcp); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "%s: Could not get mailbox status.\n", __func__); - return; - } - - qdev->link_status = mbcp->mbox_out[1]; - netif_err(qdev, drv, qdev->ndev, "Link Up.\n"); - - /* If we're coming back from an IDC event - * then set up the CAM and frame routing. - */ - if (test_bit(QL_CAM_RT_SET, &qdev->flags)) { - status = ql_cam_route_initialize(qdev); - if (status) { - netif_err(qdev, ifup, qdev->ndev, - "Failed to init CAM/Routing tables.\n"); - return; - } else - clear_bit(QL_CAM_RT_SET, &qdev->flags); - } - - /* Queue up a worker to check the frame - * size information, and fix it if it's not - * to our liking. - */ - if (!test_bit(QL_PORT_CFG, &qdev->flags)) { - netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n"); - set_bit(QL_PORT_CFG, &qdev->flags); - /* Begin polled mode early so - * we don't get another interrupt - * when we leave mpi_worker dpc. - */ - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); - queue_delayed_work(qdev->workqueue, - &qdev->mpi_port_cfg_work, 0); - } - - ql_link_on(qdev); -} - -static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - - mbcp->out_count = 3; - - status = ql_get_mb_sts(qdev, mbcp); - if (status) - netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n"); - - ql_link_off(qdev); -} - -static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - - mbcp->out_count = 5; - - status = ql_get_mb_sts(qdev, mbcp); - if (status) - netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n"); - else - netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n"); - - return status; -} - -static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - - mbcp->out_count = 1; - - status = ql_get_mb_sts(qdev, mbcp); - if (status) - netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n"); - else - netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n"); - - return status; -} - -static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - - mbcp->out_count = 6; - - status = ql_get_mb_sts(qdev, mbcp); - if (status) - netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n"); - else { - int i; - netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n"); - for (i = 0; i < mbcp->out_count; i++) - netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n", - i, mbcp->mbox_out[i]); - - } - - return status; -} - -static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - - mbcp->out_count = 2; - - status = ql_get_mb_sts(qdev, mbcp); - if (status) { - netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n"); - } else { - netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n", - mbcp->mbox_out[1]); - qdev->fw_rev_id = mbcp->mbox_out[1]; - status = ql_cam_route_initialize(qdev); - if (status) - netif_err(qdev, ifup, qdev->ndev, - "Failed to init CAM/Routing tables.\n"); - } -} - -/* Process an async event and clear it unless it's an - * error condition. - * This can get called iteratively from the mpi_work thread - * when events arrive via an interrupt. - * It also gets called when a mailbox command is polling for - * it's completion. */ -static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - int orig_count = mbcp->out_count; - - /* Just get mailbox zero for now. */ - mbcp->out_count = 1; - status = ql_get_mb_sts(qdev, mbcp); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Could not read MPI, resetting ASIC!\n"); - ql_queue_asic_error(qdev); - goto end; - } - - switch (mbcp->mbox_out[0]) { - - /* This case is only active when we arrive here - * as a result of issuing a mailbox command to - * the firmware. - */ - case MB_CMD_STS_INTRMDT: - case MB_CMD_STS_GOOD: - case MB_CMD_STS_INVLD_CMD: - case MB_CMD_STS_XFC_ERR: - case MB_CMD_STS_CSUM_ERR: - case MB_CMD_STS_ERR: - case MB_CMD_STS_PARAM_ERR: - /* We can only get mailbox status if we're polling from an - * unfinished command. Get the rest of the status data and - * return back to the caller. - * We only end up here when we're polling for a mailbox - * command completion. - */ - mbcp->out_count = orig_count; - status = ql_get_mb_sts(qdev, mbcp); - return status; - - /* We are being asked by firmware to accept - * a change to the port. This is only - * a change to max frame sizes (Tx/Rx), pause - * parameters, or loopback mode. - */ - case AEN_IDC_REQ: - status = ql_idc_req_aen(qdev); - break; - - /* Process and inbound IDC event. - * This will happen when we're trying to - * change tx/rx max frame size, change pause - * parameters or loopback mode. - */ - case AEN_IDC_CMPLT: - case AEN_IDC_EXT: - status = ql_idc_cmplt_aen(qdev); - break; - - case AEN_LINK_UP: - ql_link_up(qdev, mbcp); - break; - - case AEN_LINK_DOWN: - ql_link_down(qdev, mbcp); - break; - - case AEN_FW_INIT_DONE: - /* If we're in process on executing the firmware, - * then convert the status to normal mailbox status. - */ - if (mbcp->mbox_in[0] == MB_CMD_EX_FW) { - mbcp->out_count = orig_count; - status = ql_get_mb_sts(qdev, mbcp); - mbcp->mbox_out[0] = MB_CMD_STS_GOOD; - return status; - } - ql_init_fw_done(qdev, mbcp); - break; - - case AEN_AEN_SFP_IN: - ql_sfp_in(qdev, mbcp); - break; - - case AEN_AEN_SFP_OUT: - ql_sfp_out(qdev, mbcp); - break; - - /* This event can arrive at boot time or after an - * MPI reset if the firmware failed to initialize. - */ - case AEN_FW_INIT_FAIL: - /* If we're in process on executing the firmware, - * then convert the status to normal mailbox status. - */ - if (mbcp->mbox_in[0] == MB_CMD_EX_FW) { - mbcp->out_count = orig_count; - status = ql_get_mb_sts(qdev, mbcp); - mbcp->mbox_out[0] = MB_CMD_STS_ERR; - return status; - } - netif_err(qdev, drv, qdev->ndev, - "Firmware initialization failed.\n"); - status = -EIO; - ql_queue_fw_error(qdev); - break; - - case AEN_SYS_ERR: - netif_err(qdev, drv, qdev->ndev, "System Error.\n"); - ql_queue_fw_error(qdev); - status = -EIO; - break; - - case AEN_AEN_LOST: - ql_aen_lost(qdev, mbcp); - break; - - case AEN_DCBX_CHG: - /* Need to support AEN 8110 */ - break; - default: - netif_err(qdev, drv, qdev->ndev, - "Unsupported AE %.08x.\n", mbcp->mbox_out[0]); - /* Clear the MPI firmware status. */ - } -end: - ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); - /* Restore the original mailbox count to - * what the caller asked for. This can get - * changed when a mailbox command is waiting - * for a response and an AEN arrives and - * is handled. - * */ - mbcp->out_count = orig_count; - return status; -} - -/* Execute a single mailbox command. - * mbcp is a pointer to an array of u32. Each - * element in the array contains the value for it's - * respective mailbox register. - */ -static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) -{ - int status; - unsigned long count; - - mutex_lock(&qdev->mpi_mutex); - - /* Begin polled mode for MPI */ - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); - - /* Load the mailbox registers and wake up MPI RISC. */ - status = ql_exec_mb_cmd(qdev, mbcp); - if (status) - goto end; - - - /* If we're generating a system error, then there's nothing - * to wait for. - */ - if (mbcp->mbox_in[0] == MB_CMD_MAKE_SYS_ERR) - goto end; - - /* Wait for the command to complete. We loop - * here because some AEN might arrive while - * we're waiting for the mailbox command to - * complete. If more than 5 seconds expire we can - * assume something is wrong. */ - count = jiffies + HZ * MAILBOX_TIMEOUT; - do { - /* Wait for the interrupt to come in. */ - status = ql_wait_mbx_cmd_cmplt(qdev); - if (status) - continue; - - /* Process the event. If it's an AEN, it - * will be handled in-line or a worker - * will be spawned. If it's our completion - * we will catch it below. - */ - status = ql_mpi_handler(qdev, mbcp); - if (status) - goto end; - - /* It's either the completion for our mailbox - * command complete or an AEN. If it's our - * completion then get out. - */ - if (((mbcp->mbox_out[0] & 0x0000f000) == - MB_CMD_STS_GOOD) || - ((mbcp->mbox_out[0] & 0x0000f000) == - MB_CMD_STS_INTRMDT)) - goto done; - } while (time_before(jiffies, count)); - - netif_err(qdev, drv, qdev->ndev, - "Timed out waiting for mailbox complete.\n"); - status = -ETIMEDOUT; - goto end; - -done: - - /* Now we can clear the interrupt condition - * and look at our status. - */ - ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); - - if (((mbcp->mbox_out[0] & 0x0000f000) != - MB_CMD_STS_GOOD) && - ((mbcp->mbox_out[0] & 0x0000f000) != - MB_CMD_STS_INTRMDT)) { - status = -EIO; - } -end: - /* End polled mode for MPI */ - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); - mutex_unlock(&qdev->mpi_mutex); - return status; -} - -/* Get MPI firmware version. This will be used for - * driver banner and for ethtool info. - * Returns zero on success. - */ -int ql_mb_about_fw(struct ql_adapter *qdev) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status = 0; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 1; - mbcp->out_count = 3; - - mbcp->mbox_in[0] = MB_CMD_ABOUT_FW; - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, - "Failed about firmware command\n"); - status = -EIO; - } - - /* Store the firmware version */ - qdev->fw_rev_id = mbcp->mbox_out[1]; - - return status; -} - -/* Get functional state for MPI firmware. - * Returns zero on success. - */ -int ql_mb_get_fw_state(struct ql_adapter *qdev) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status = 0; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 1; - mbcp->out_count = 2; - - mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE; - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, - "Failed Get Firmware State.\n"); - status = -EIO; - } - - /* If bit zero is set in mbx 1 then the firmware is - * running, but not initialized. This should never - * happen. - */ - if (mbcp->mbox_out[1] & 1) { - netif_err(qdev, drv, qdev->ndev, - "Firmware waiting for initialization.\n"); - status = -EIO; - } - - return status; -} - -/* Send and ACK mailbox command to the firmware to - * let it continue with the change. - */ -static int ql_mb_idc_ack(struct ql_adapter *qdev) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status = 0; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 5; - mbcp->out_count = 1; - - mbcp->mbox_in[0] = MB_CMD_IDC_ACK; - mbcp->mbox_in[1] = qdev->idc_mbc.mbox_out[1]; - mbcp->mbox_in[2] = qdev->idc_mbc.mbox_out[2]; - mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3]; - mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4]; - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n"); - status = -EIO; - } - return status; -} - -/* Get link settings and maximum frame size settings - * for the current port. - * Most likely will block. - */ -int ql_mb_set_port_cfg(struct ql_adapter *qdev) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status = 0; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 3; - mbcp->out_count = 1; - - mbcp->mbox_in[0] = MB_CMD_SET_PORT_CFG; - mbcp->mbox_in[1] = qdev->link_config; - mbcp->mbox_in[2] = qdev->max_frame_size; - - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) { - netif_err(qdev, drv, qdev->ndev, - "Port Config sent, wait for IDC.\n"); - } else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, - "Failed Set Port Configuration.\n"); - status = -EIO; - } - return status; -} - -static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr, - u32 size) -{ - int status = 0; - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 9; - mbcp->out_count = 1; - - mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM; - mbcp->mbox_in[1] = LSW(addr); - mbcp->mbox_in[2] = MSW(req_dma); - mbcp->mbox_in[3] = LSW(req_dma); - mbcp->mbox_in[4] = MSW(size); - mbcp->mbox_in[5] = LSW(size); - mbcp->mbox_in[6] = MSW(MSD(req_dma)); - mbcp->mbox_in[7] = LSW(MSD(req_dma)); - mbcp->mbox_in[8] = MSW(addr); - - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n"); - status = -EIO; - } - return status; -} - -/* Issue a mailbox command to dump RISC RAM. */ -int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, - u32 ram_addr, int word_count) -{ - int status; - char *my_buf; - dma_addr_t buf_dma; - - my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32), - &buf_dma); - if (!my_buf) - return -EIO; - - status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count); - if (!status) - memcpy(buf, my_buf, word_count * sizeof(u32)); - - pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf, - buf_dma); - return status; -} - -/* Get link settings and maximum frame size settings - * for the current port. - * Most likely will block. - */ -int ql_mb_get_port_cfg(struct ql_adapter *qdev) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status = 0; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 1; - mbcp->out_count = 3; - - mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG; - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, - "Failed Get Port Configuration.\n"); - status = -EIO; - } else { - netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, - "Passed Get Port Configuration.\n"); - qdev->link_config = mbcp->mbox_out[1]; - qdev->max_frame_size = mbcp->mbox_out[2]; - } - return status; -} - -int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 2; - mbcp->out_count = 1; - - mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE; - mbcp->mbox_in[1] = wol; - - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n"); - status = -EIO; - } - return status; -} - -int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status; - u8 *addr = qdev->ndev->dev_addr; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 8; - mbcp->out_count = 1; - - mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC; - if (enable_wol) { - mbcp->mbox_in[1] = (u32)addr[0]; - mbcp->mbox_in[2] = (u32)addr[1]; - mbcp->mbox_in[3] = (u32)addr[2]; - mbcp->mbox_in[4] = (u32)addr[3]; - mbcp->mbox_in[5] = (u32)addr[4]; - mbcp->mbox_in[6] = (u32)addr[5]; - mbcp->mbox_in[7] = 0; - } else { - mbcp->mbox_in[1] = 0; - mbcp->mbox_in[2] = 1; - mbcp->mbox_in[3] = 1; - mbcp->mbox_in[4] = 1; - mbcp->mbox_in[5] = 1; - mbcp->mbox_in[6] = 1; - mbcp->mbox_in[7] = 0; - } - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n"); - status = -EIO; - } - return status; -} - -/* IDC - Inter Device Communication... - * Some firmware commands require consent of adjacent FCOE - * function. This function waits for the OK, or a - * counter-request for a little more time.i - * The firmware will complete the request if the other - * function doesn't respond. - */ -static int ql_idc_wait(struct ql_adapter *qdev) -{ - int status = -ETIMEDOUT; - long wait_time = 1 * HZ; - struct mbox_params *mbcp = &qdev->idc_mbc; - do { - /* Wait here for the command to complete - * via the IDC process. - */ - wait_time = - wait_for_completion_timeout(&qdev->ide_completion, - wait_time); - if (!wait_time) { - netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n"); - break; - } - /* Now examine the response from the IDC process. - * We might have a good completion or a request for - * more wait time. - */ - if (mbcp->mbox_out[0] == AEN_IDC_EXT) { - netif_err(qdev, drv, qdev->ndev, - "IDC Time Extension from function.\n"); - wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f; - } else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) { - netif_err(qdev, drv, qdev->ndev, "IDC Success.\n"); - status = 0; - break; - } else { - netif_err(qdev, drv, qdev->ndev, - "IDC: Invalid State 0x%.04x.\n", - mbcp->mbox_out[0]); - status = -EIO; - break; - } - } while (wait_time); - - return status; -} - -int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 2; - mbcp->out_count = 1; - - mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG; - mbcp->mbox_in[1] = led_config; - - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, - "Failed to set LED Configuration.\n"); - status = -EIO; - } - - return status; -} - -int ql_mb_get_led_cfg(struct ql_adapter *qdev) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 1; - mbcp->out_count = 2; - - mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG; - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { - netif_err(qdev, drv, qdev->ndev, - "Failed to get LED Configuration.\n"); - status = -EIO; - } else - qdev->led_config = mbcp->mbox_out[1]; - - return status; -} - -int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status; - - memset(mbcp, 0, sizeof(struct mbox_params)); - - mbcp->in_count = 1; - mbcp->out_count = 2; - - mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL; - mbcp->mbox_in[1] = control; - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) - return status; - - if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { - netif_err(qdev, drv, qdev->ndev, - "Command not supported by firmware.\n"); - status = -EINVAL; - } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { - /* This indicates that the firmware is - * already in the state we are trying to - * change it to. - */ - netif_err(qdev, drv, qdev->ndev, - "Command parameters make no change.\n"); - } - return status; -} - -/* Returns a negative error code or the mailbox command status. */ -static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control) -{ - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int status; - - memset(mbcp, 0, sizeof(struct mbox_params)); - *control = 0; - - mbcp->in_count = 1; - mbcp->out_count = 1; - - mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL; - - status = ql_mailbox_command(qdev, mbcp); - if (status) - return status; - - if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) { - *control = mbcp->mbox_in[1]; - return status; - } - - if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { - netif_err(qdev, drv, qdev->ndev, - "Command not supported by firmware.\n"); - status = -EINVAL; - } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { - netif_err(qdev, drv, qdev->ndev, - "Failed to get MPI traffic control.\n"); - status = -EIO; - } - return status; -} - -int ql_wait_fifo_empty(struct ql_adapter *qdev) -{ - int count = 5; - u32 mgmnt_fifo_empty; - u32 nic_fifo_empty; - - do { - nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE; - ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty); - mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY; - if (nic_fifo_empty && mgmnt_fifo_empty) - return 0; - msleep(100); - } while (count-- > 0); - return -ETIMEDOUT; -} - -/* API called in work thread context to set new TX/RX - * maximum frame size values to match MTU. - */ -static int ql_set_port_cfg(struct ql_adapter *qdev) -{ - int status; - status = ql_mb_set_port_cfg(qdev); - if (status) - return status; - status = ql_idc_wait(qdev); - return status; -} - -/* The following routines are worker threads that process - * events that may sleep waiting for completion. - */ - -/* This thread gets the maximum TX and RX frame size values - * from the firmware and, if necessary, changes them to match - * the MTU setting. - */ -void ql_mpi_port_cfg_work(struct work_struct *work) -{ - struct ql_adapter *qdev = - container_of(work, struct ql_adapter, mpi_port_cfg_work.work); - int status; - - status = ql_mb_get_port_cfg(qdev); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Bug: Failed to get port config data.\n"); - goto err; - } - - if (qdev->link_config & CFG_JUMBO_FRAME_SIZE && - qdev->max_frame_size == - CFG_DEFAULT_MAX_FRAME_SIZE) - goto end; - - qdev->link_config |= CFG_JUMBO_FRAME_SIZE; - qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE; - status = ql_set_port_cfg(qdev); - if (status) { - netif_err(qdev, drv, qdev->ndev, - "Bug: Failed to set port config data.\n"); - goto err; - } -end: - clear_bit(QL_PORT_CFG, &qdev->flags); - return; -err: - ql_queue_fw_error(qdev); - goto end; -} - -/* Process an inter-device request. This is issues by - * the firmware in response to another function requesting - * a change to the port. We set a flag to indicate a change - * has been made and then send a mailbox command ACKing - * the change request. - */ -void ql_mpi_idc_work(struct work_struct *work) -{ - struct ql_adapter *qdev = - container_of(work, struct ql_adapter, mpi_idc_work.work); - int status; - struct mbox_params *mbcp = &qdev->idc_mbc; - u32 aen; - int timeout; - - aen = mbcp->mbox_out[1] >> 16; - timeout = (mbcp->mbox_out[1] >> 8) & 0xf; - - switch (aen) { - default: - netif_err(qdev, drv, qdev->ndev, - "Bug: Unhandled IDC action.\n"); - break; - case MB_CMD_PORT_RESET: - case MB_CMD_STOP_FW: - ql_link_off(qdev); - /* Fall through */ - case MB_CMD_SET_PORT_CFG: - /* Signal the resulting link up AEN - * that the frame routing and mac addr - * needs to be set. - * */ - set_bit(QL_CAM_RT_SET, &qdev->flags); - /* Do ACK if required */ - if (timeout) { - status = ql_mb_idc_ack(qdev); - if (status) - netif_err(qdev, drv, qdev->ndev, - "Bug: No pending IDC!\n"); - } else { - netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, - "IDC ACK not required\n"); - status = 0; /* success */ - } - break; - - /* These sub-commands issued by another (FCoE) - * function are requesting to do an operation - * on the shared resource (MPI environment). - * We currently don't issue these so we just - * ACK the request. - */ - case MB_CMD_IOP_RESTART_MPI: - case MB_CMD_IOP_PREP_LINK_DOWN: - /* Drop the link, reload the routing - * table when link comes up. - */ - ql_link_off(qdev); - set_bit(QL_CAM_RT_SET, &qdev->flags); - /* Fall through. */ - case MB_CMD_IOP_DVR_START: - case MB_CMD_IOP_FLASH_ACC: - case MB_CMD_IOP_CORE_DUMP_MPI: - case MB_CMD_IOP_PREP_UPDATE_MPI: - case MB_CMD_IOP_COMP_UPDATE_MPI: - case MB_CMD_IOP_NONE: /* an IDC without params */ - /* Do ACK if required */ - if (timeout) { - status = ql_mb_idc_ack(qdev); - if (status) - netif_err(qdev, drv, qdev->ndev, - "Bug: No pending IDC!\n"); - } else { - netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, - "IDC ACK not required\n"); - status = 0; /* success */ - } - break; - } -} - -void ql_mpi_work(struct work_struct *work) -{ - struct ql_adapter *qdev = - container_of(work, struct ql_adapter, mpi_work.work); - struct mbox_params mbc; - struct mbox_params *mbcp = &mbc; - int err = 0; - - mutex_lock(&qdev->mpi_mutex); - /* Begin polled mode for MPI */ - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); - - while (ql_read32(qdev, STS) & STS_PI) { - memset(mbcp, 0, sizeof(struct mbox_params)); - mbcp->out_count = 1; - /* Don't continue if an async event - * did not complete properly. - */ - err = ql_mpi_handler(qdev, mbcp); - if (err) - break; - } - - /* End polled mode for MPI */ - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); - mutex_unlock(&qdev->mpi_mutex); - ql_enable_completion_interrupt(qdev, 0); -} - -void ql_mpi_reset_work(struct work_struct *work) -{ - struct ql_adapter *qdev = - container_of(work, struct ql_adapter, mpi_reset_work.work); - cancel_delayed_work_sync(&qdev->mpi_work); - cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); - cancel_delayed_work_sync(&qdev->mpi_idc_work); - /* If we're not the dominant NIC function, - * then there is nothing to do. - */ - if (!ql_own_firmware(qdev)) { - netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n"); - return; - } - - if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) { - netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n"); - qdev->core_is_dumped = 1; - queue_delayed_work(qdev->workqueue, - &qdev->mpi_core_to_log, 5 * HZ); - } - ql_soft_reset_mpi_risc(qdev); -} |