diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-05 10:11:24 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-05 10:11:24 -0700 |
commit | 687ee0ad4e897e29f4b41f7a20c866d74c5e0660 (patch) | |
tree | b31a2af35c24a54823674cdd126993b80daeac67 /drivers/net/ethernet/qlogic/qed/qed_dev.c | |
parent | 3ddf40e8c31964b744ff10abb48c8e36a83ec6e7 (diff) | |
parent | 03a1eabc3f54469abd4f1784182851b2e29630cc (diff) | |
download | talos-obmc-linux-687ee0ad4e897e29f4b41f7a20c866d74c5e0660.tar.gz talos-obmc-linux-687ee0ad4e897e29f4b41f7a20c866d74c5e0660.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
1) BBR TCP congestion control, from Neal Cardwell, Yuchung Cheng and
co. at Google. https://lwn.net/Articles/701165/
2) Do TCP Small Queues for retransmits, from Eric Dumazet.
3) Support collect_md mode for all IPV4 and IPV6 tunnels, from Alexei
Starovoitov.
4) Allow cls_flower to classify packets in ip tunnels, from Amir Vadai.
5) Support DSA tagging in older mv88e6xxx switches, from Andrew Lunn.
6) Support GMAC protocol in iwlwifi mwm, from Ayala Beker.
7) Support ndo_poll_controller in mlx5, from Calvin Owens.
8) Move VRF processing to an output hook and allow l3mdev to be
loopback, from David Ahern.
9) Support SOCK_DESTROY for UDP sockets. Also from David Ahern.
10) Congestion control in RXRPC, from David Howells.
11) Support geneve RX offload in ixgbe, from Emil Tantilov.
12) When hitting pressure for new incoming TCP data SKBs, perform a
partial rathern than a full purge of the OFO queue (which could be
huge). From Eric Dumazet.
13) Convert XFRM state and policy lookups to RCU, from Florian Westphal.
14) Support RX network flow classification to igb, from Gangfeng Huang.
15) Hardware offloading of eBPF in nfp driver, from Jakub Kicinski.
16) New skbmod packet action, from Jamal Hadi Salim.
17) Remove some inefficiencies in snmp proc output, from Jia He.
18) Add FIB notifications to properly propagate route changes to
hardware which is doing forwarding offloading. From Jiri Pirko.
19) New dsa driver for qca8xxx chips, from John Crispin.
20) Implement RFC7559 ipv6 router solicitation backoff, from Maciej
Żenczykowski.
21) Add L3 mode to ipvlan, from Mahesh Bandewar.
22) Support 802.1ad in mlx4, from Moshe Shemesh.
23) Support hardware LRO in mediatek driver, from Nelson Chang.
24) Add TC offloading to mlx5, from Or Gerlitz.
25) Convert various drivers to ethtool ksettings interfaces, from
Philippe Reynes.
26) TX max rate limiting for cxgb4, from Rahul Lakkireddy.
27) NAPI support for ath10k, from Rajkumar Manoharan.
28) Support XDP in mlx5, from Rana Shahout and Saeed Mahameed.
29) UDP replicast support in TIPC, from Richard Alpe.
30) Per-queue statistics for qed driver, from Sudarsana Reddy Kalluru.
31) Support BQL in thunderx driver, from Sunil Goutham.
32) TSO support in alx driver, from Tobias Regnery.
33) Add stream parser engine and use it in kcm.
34) Support async DHCP replies in ipconfig module, from Uwe
Kleine-König.
35) DSA port fast aging for mv88e6xxx driver, from Vivien Didelot.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1715 commits)
mlxsw: switchx2: Fix misuse of hard_header_len
mlxsw: spectrum: Fix misuse of hard_header_len
net/faraday: Stop NCSI device on shutdown
net/ncsi: Introduce ncsi_stop_dev()
net/ncsi: Rework the channel monitoring
net/ncsi: Allow to extend NCSI request properties
net/ncsi: Rework request index allocation
net/ncsi: Don't probe on the reserved channel ID (0x1f)
net/ncsi: Introduce NCSI_RESERVED_CHANNEL
net/ncsi: Avoid unused-value build warning from ia64-linux-gcc
net: Add netdev all_adj_list refcnt propagation to fix panic
net: phy: Add Edge-rate driver for Microsemi PHYs.
vmxnet3: Wake queue from reset work
i40e: avoid NULL pointer dereference and recursive errors on early PCI error
qed: Add RoCE ll2 & GSI support
qed: Add support for memory registeration verbs
qed: Add support for QP verbs
qed: PD,PKEY and CQ verb support
qed: Add support for RoCE hw init
qede: Add qedr framework
...
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed/qed_dev.c')
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_dev.c | 489 |
1 files changed, 334 insertions, 155 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 0e4f4a9306b5..754f6a908858 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -29,14 +29,18 @@ #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_int.h" +#include "qed_ll2.h" #include "qed_mcp.h" #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_sriov.h" #include "qed_vf.h" +#include "qed_roce.h" -static spinlock_t qm_lock; -static bool qm_lock_init = false; +static DEFINE_SPINLOCK(qm_lock); + +#define QED_MIN_DPIS (4) +#define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS) /* API common to all protocols */ enum BAR_ID { @@ -44,8 +48,7 @@ enum BAR_ID { BAR_ID_1 /* Used for doorbells */ }; -static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, - enum BAR_ID bar_id) +static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id) { u32 bar_reg = (bar_id == BAR_ID_0 ? PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); @@ -70,8 +73,7 @@ static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, } } -void qed_init_dp(struct qed_dev *cdev, - u32 dp_module, u8 dp_level) +void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level) { u32 i; @@ -150,6 +152,9 @@ void qed_resc_free(struct qed_dev *cdev) qed_eq_free(p_hwfn, p_hwfn->p_eq); qed_consq_free(p_hwfn, p_hwfn->p_consq); qed_int_free(p_hwfn); +#ifdef CONFIG_QED_LL2 + qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info); +#endif qed_iov_free(p_hwfn); qed_dmae_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info); @@ -343,7 +348,6 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable) return 0; alloc_err: - DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n"); qed_qm_info_free(p_hwfn); return -ENOMEM; } @@ -407,6 +411,9 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) int qed_resc_alloc(struct qed_dev *cdev) { +#ifdef CONFIG_QED_LL2 + struct qed_ll2_info *p_ll2_info; +#endif struct qed_consq *p_consq; struct qed_eq *p_eq; int i, rc = 0; @@ -427,18 +434,12 @@ int qed_resc_alloc(struct qed_dev *cdev) RESC_NUM(p_hwfn, QED_L2_QUEUE); p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL); - if (!p_hwfn->p_tx_cids) { - DP_NOTICE(p_hwfn, - "Failed to allocate memory for Tx Cids\n"); + if (!p_hwfn->p_tx_cids) goto alloc_no_mem; - } p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL); - if (!p_hwfn->p_rx_cids) { - DP_NOTICE(p_hwfn, - "Failed to allocate memory for Rx Cids\n"); + if (!p_hwfn->p_rx_cids) goto alloc_no_mem; - } } for_each_hwfn(cdev, i) { @@ -523,29 +524,29 @@ int qed_resc_alloc(struct qed_dev *cdev) goto alloc_no_mem; p_hwfn->p_consq = p_consq; +#ifdef CONFIG_QED_LL2 + if (p_hwfn->using_ll2) { + p_ll2_info = qed_ll2_alloc(p_hwfn); + if (!p_ll2_info) + goto alloc_no_mem; + p_hwfn->p_ll2_info = p_ll2_info; + } +#endif + /* DMA info initialization */ rc = qed_dmae_info_alloc(p_hwfn); - if (rc) { - DP_NOTICE(p_hwfn, - "Failed to allocate memory for dmae_info structure\n"); + if (rc) goto alloc_err; - } /* DCBX initialization */ rc = qed_dcbx_info_alloc(p_hwfn); - if (rc) { - DP_NOTICE(p_hwfn, - "Failed to allocate memory for dcbx structure\n"); + if (rc) goto alloc_err; - } } cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); - if (!cdev->reset_stats) { - DP_NOTICE(cdev, "Failed to allocate reset statistics\n"); - rc = -ENOMEM; - goto alloc_err; - } + if (!cdev->reset_stats) + goto alloc_no_mem; return 0; @@ -580,6 +581,10 @@ void qed_resc_setup(struct qed_dev *cdev) qed_int_setup(p_hwfn, p_hwfn->p_main_ptt); qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt); +#ifdef CONFIG_QED_LL2 + if (p_hwfn->using_ll2) + qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info); +#endif } } @@ -605,9 +610,8 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, /* Make sure notification is not set before initiating final cleanup */ if (REG_RD(p_hwfn, addr)) { - DP_NOTICE( - p_hwfn, - "Unexpected; Found final cleanup notification before initiating final cleanup\n"); + DP_NOTICE(p_hwfn, + "Unexpected; Found final cleanup notification before initiating final cleanup\n"); REG_WR(p_hwfn, addr, 0); } @@ -701,17 +705,14 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev) continue; qed_init_cau_sb_entry(p_hwfn, &sb_entry, - p_block->function_id, - 0, 0); - STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, - sb_entry); + p_block->function_id, 0, 0); + STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry); } } } static int qed_hw_init_common(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - int hw_mode) + struct qed_ptt *p_ptt, int hw_mode) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct qed_qm_common_rt_init_params params; @@ -759,7 +760,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, qed_port_unpretend(p_hwfn, p_ptt); rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); - if (rc != 0) + if (rc) return rc; qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); @@ -780,6 +781,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); + qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); + qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); + qed_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0); } /* pretend to original PF */ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); @@ -787,38 +791,141 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, return rc; } -static int qed_hw_init_port(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - int hw_mode) +static int +qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus) { - int rc = 0; + u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size; + u32 dpi_bit_shift, dpi_count; + u32 min_dpis; - rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode); - if (rc != 0) - return rc; + /* Calculate DPI size */ + dpi_page_size_1 = QED_WID_SIZE * n_cpus; + dpi_page_size_2 = max_t(u32, QED_WID_SIZE, PAGE_SIZE); + dpi_page_size = max_t(u32, dpi_page_size_1, dpi_page_size_2); + dpi_page_size = roundup_pow_of_two(dpi_page_size); + dpi_bit_shift = ilog2(dpi_page_size / 4096); - if (hw_mode & (1 << MODE_MF_SI)) { - u8 pf_id = 0; + dpi_count = pwm_region_size / dpi_page_size; - if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) { - DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, - "PF[%08x] is first eth on engine\n", pf_id); + min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis; + min_dpis = max_t(u32, QED_MIN_DPIS, min_dpis); - /* We should have configured BIT for ppfid, i.e., the - * relative function number in the port. But there's a - * bug in LLH in BB where the ppfid is actually engine - * based, so we need to take this into account. - */ - qed_wr(p_hwfn, p_ptt, - NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id); - } + p_hwfn->dpi_size = dpi_page_size; + p_hwfn->dpi_count = dpi_count; + + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift); + + if (dpi_count < min_dpis) + return -EINVAL; + + return 0; +} + +enum QED_ROCE_EDPM_MODE { + QED_ROCE_EDPM_MODE_ENABLE = 0, + QED_ROCE_EDPM_MODE_FORCE_ON = 1, + QED_ROCE_EDPM_MODE_DISABLE = 2, +}; - /* Take the protocol-based hit vector if there is a hit, - * otherwise take the other vector. +static int +qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 pwm_regsize, norm_regsize; + u32 non_pwm_conn, min_addr_reg1; + u32 db_bar_size, n_cpus; + u32 roce_edpm_mode; + u32 pf_dems_shift; + int rc = 0; + u8 cond; + + db_bar_size = qed_hw_bar_size(p_hwfn, BAR_ID_1); + if (p_hwfn->cdev->num_hwfns > 1) + db_bar_size /= 2; + + /* Calculate doorbell regions */ + non_pwm_conn = qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) + + qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE, + NULL) + + qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, + NULL); + norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, 4096); + min_addr_reg1 = norm_regsize / 4096; + pwm_regsize = db_bar_size - norm_regsize; + + /* Check that the normal and PWM sizes are valid */ + if (db_bar_size < norm_regsize) { + DP_ERR(p_hwfn->cdev, + "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n", + db_bar_size, norm_regsize); + return -EINVAL; + } + + if (pwm_regsize < QED_MIN_PWM_REGION) { + DP_ERR(p_hwfn->cdev, + "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n", + pwm_regsize, + QED_MIN_PWM_REGION, db_bar_size, norm_regsize); + return -EINVAL; + } + + /* Calculate number of DPIs */ + roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode; + if ((roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE) || + ((roce_edpm_mode == QED_ROCE_EDPM_MODE_FORCE_ON))) { + /* Either EDPM is mandatory, or we are attempting to allocate a + * WID per CPU. */ - qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2); + n_cpus = num_active_cpus(); + rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); } - return rc; + + cond = (rc && (roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE)) || + (roce_edpm_mode == QED_ROCE_EDPM_MODE_DISABLE); + if (cond || p_hwfn->dcbx_no_edpm) { + /* Either EDPM is disabled from user configuration, or it is + * disabled via DCBx, or it is not mandatory and we failed to + * allocated a WID per CPU. + */ + n_cpus = 1; + rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); + + if (cond) + qed_rdma_dpm_bar(p_hwfn, p_ptt); + } + + DP_INFO(p_hwfn, + "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n", + norm_regsize, + pwm_regsize, + p_hwfn->dpi_size, + p_hwfn->dpi_count, + ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ? + "disabled" : "enabled"); + + if (rc) { + DP_ERR(p_hwfn, + "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n", + p_hwfn->dpi_count, + p_hwfn->pf_params.rdma_pf_params.min_dpis); + return -EINVAL; + } + + p_hwfn->dpi_start_offset = norm_regsize; + + /* DEMS size is configured log2 of DWORDs, hence the division by 4 */ + pf_dems_shift = ilog2(QED_PF_DEMS_SIZE / 4); + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift); + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1); + + return 0; +} + +static int qed_hw_init_port(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, int hw_mode) +{ + return qed_init_run(p_hwfn, p_ptt, PHASE_PORT, + p_hwfn->port_id, hw_mode); } static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, @@ -848,7 +955,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, qed_int_igu_init_rt(p_hwfn); /* Set VLAN in NIG if needed */ - if (hw_mode & (1 << MODE_MF_SD)) { + if (hw_mode & BIT(MODE_MF_SD)) { DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n"); STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, @@ -856,7 +963,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, } /* Enable classification by MAC if needed */ - if (hw_mode & (1 << MODE_MF_SI)) { + if (hw_mode & BIT(MODE_MF_SI)) { DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring TAGMAC_CLS_TYPE\n"); STORE_RT_REG(p_hwfn, @@ -871,7 +978,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, /* Cleanup chip from previous driver if such remains exist */ rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false); - if (rc != 0) + if (rc) return rc; /* PF Init sequence */ @@ -887,20 +994,9 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, /* Pure runtime initializations - directly to the HW */ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); - if (hw_mode & (1 << MODE_MF_SI)) { - u8 pf_id = 0; - u32 val = 0; - - if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) { - if (p_hwfn->rel_pf_id == pf_id) { - DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, - "PF[%d] is first ETH on engine\n", - pf_id); - val = 1; - } - qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val); - } - } + rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt); + if (rc) + return rc; if (b_hw_start) { /* enable interrupts */ @@ -950,8 +1046,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn, /* Read shadow of current MFW mailbox */ qed_mcp_read_mb(p_hwfn, p_main_ptt); memcpy(p_hwfn->mcp_info->mfw_mb_shadow, - p_hwfn->mcp_info->mfw_mb_cur, - p_hwfn->mcp_info->mfw_mb_length); + p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length); } int qed_hw_init(struct qed_dev *cdev, @@ -971,7 +1066,7 @@ int qed_hw_init(struct qed_dev *cdev, if (IS_PF(cdev)) { rc = qed_init_fw_data(cdev, bin_fw_data); - if (rc != 0) + if (rc) return rc; } @@ -988,8 +1083,7 @@ int qed_hw_init(struct qed_dev *cdev, qed_calc_hw_mode(p_hwfn); - rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, - &load_code); + rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code); if (rc) { DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n"); return rc; @@ -1004,11 +1098,6 @@ int qed_hw_init(struct qed_dev *cdev, p_hwfn->first_on_engine = (load_code == FW_MSG_CODE_DRV_LOAD_ENGINE); - if (!qm_lock_init) { - spin_lock_init(&qm_lock); - qm_lock_init = true; - } - switch (load_code) { case FW_MSG_CODE_DRV_LOAD_ENGINE: rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, @@ -1071,9 +1160,8 @@ int qed_hw_init(struct qed_dev *cdev, } #define QED_HW_STOP_RETRY_LIMIT (10) -static inline void qed_hw_timers_stop(struct qed_dev *cdev, - struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +static void qed_hw_timers_stop(struct qed_dev *cdev, + struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { int i; @@ -1084,8 +1172,7 @@ static inline void qed_hw_timers_stop(struct qed_dev *cdev, for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { if ((!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN)) && - (!qed_rd(p_hwfn, p_ptt, - TM_REG_PF_SCAN_ACTIVE_TASK))) + (!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK))) break; /* Dependent on number of connection/tasks, possibly @@ -1190,8 +1277,7 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev) } DP_VERBOSE(p_hwfn, - NETIF_MSG_IFDOWN, - "Shutting down the fastpath\n"); + NETIF_MSG_IFDOWN, "Shutting down the fastpath\n"); qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); @@ -1219,14 +1305,13 @@ void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); } -static int qed_reg_assert(struct qed_hwfn *hwfn, - struct qed_ptt *ptt, u32 reg, - bool expected) +static int qed_reg_assert(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 reg, bool expected) { - u32 assert_val = qed_rd(hwfn, ptt, reg); + u32 assert_val = qed_rd(p_hwfn, p_ptt, reg); if (assert_val != expected) { - DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n", + DP_NOTICE(p_hwfn, "Value at address 0x%08x != 0x%08x\n", reg, expected); return -EINVAL; } @@ -1306,8 +1391,7 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) /* Clean Previous errors if such exist */ qed_wr(p_hwfn, p_hwfn->p_main_ptt, - PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, - 1 << p_hwfn->abs_pf_id); + PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id); /* enable internal target-read */ qed_wr(p_hwfn, p_hwfn->p_main_ptt, @@ -1317,7 +1401,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) static void get_function_id(struct qed_hwfn *p_hwfn) { /* ME Register */ - p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR); + p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn, + PXP_PF_ME_OPAQUE_ADDR); p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); @@ -1326,6 +1411,10 @@ static void get_function_id(struct qed_hwfn *p_hwfn) PXP_CONCRETE_FID_PFID); p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, PXP_CONCRETE_FID_PORT); + + DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, + "Read ME register: Concrete 0x%08x Opaque 0x%04x\n", + p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid); } static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) @@ -1333,6 +1422,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) u32 *feat_num = p_hwfn->hw_info.feat_num; int num_features = 1; +#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) + /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide the + * status blocks equally between L2 / RoCE but with consideration as + * to how many l2 queues / cnqs we have + */ + if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { + num_features++; + + feat_num[QED_RDMA_CNQ] = + min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features, + RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); + } +#endif feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features, RESC_NUM(p_hwfn, QED_L2_QUEUE)); @@ -1373,6 +1475,10 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) / num_funcs; resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs; + resc_num[QED_LL2_QUEUE] = MAX_NUM_LL2_RX_QUEUES / num_funcs; + resc_num[QED_RDMA_CNQ_RAM] = NUM_OF_CMDQS_CQS / num_funcs; + resc_num[QED_RDMA_STATS_QUEUE] = RDMA_NUM_STATISTIC_COUNTERS_BB / + num_funcs; for (i = 0; i < QED_MAX_RESC; i++) resc_start[i] = resc_num[i] * enabled_func_idx; @@ -1396,7 +1502,8 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) "RL = %d start = %d\n" "MAC = %d start = %d\n" "VLAN = %d start = %d\n" - "ILT = %d start = %d\n", + "ILT = %d start = %d\n" + "LL2_QUEUE = %d start = %d\n", p_hwfn->hw_info.resc_num[QED_SB], p_hwfn->hw_info.resc_start[QED_SB], p_hwfn->hw_info.resc_num[QED_L2_QUEUE], @@ -1412,13 +1519,14 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) p_hwfn->hw_info.resc_num[QED_VLAN], p_hwfn->hw_info.resc_start[QED_VLAN], p_hwfn->hw_info.resc_num[QED_ILT], - p_hwfn->hw_info.resc_start[QED_ILT]); + p_hwfn->hw_info.resc_start[QED_ILT], + RESC_NUM(p_hwfn, QED_LL2_QUEUE), + RESC_START(p_hwfn, QED_LL2_QUEUE)); return 0; } -static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; @@ -1472,8 +1580,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G; break; default: - DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", - core_cfg); + DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg); break; } @@ -1484,11 +1591,11 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + offsetof(struct nvm_cfg1_port, speed_cap_mask)); - link->speed.advertised_speeds = - link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; + link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; + link->speed.advertised_speeds = link_temp; - p_hwfn->mcp_info->link_capabilities.speed_capabilities = - link->speed.advertised_speeds; + link_temp = link->speed.advertised_speeds; + p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp; link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + @@ -1517,8 +1624,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, link->speed.forced_speed = 100000; break; default: - DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", - link_temp); + DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp); } link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; @@ -1628,10 +1734,10 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, - "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n", + "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n", p_hwfn->rel_pf_id, p_hwfn->abs_pf_id, - p_hwfn->num_funcs_on_engine); + p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); } static int @@ -1703,10 +1809,9 @@ static int qed_get_dev_info(struct qed_dev *cdev) u32 tmp; /* Read Vendor Id / Device Id */ - pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, - &cdev->vendor_id); - pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, - &cdev->device_id); + pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id); + pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id); + cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_NUM); cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, @@ -1771,10 +1876,8 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, /* Allocate PTT pool */ rc = qed_ptt_pool_alloc(p_hwfn); - if (rc) { - DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n"); + if (rc) goto err0; - } /* Allocate the main PTT */ p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); @@ -1782,7 +1885,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, /* First hwfn learns basic information, e.g., number of hwfns */ if (!p_hwfn->my_id) { rc = qed_get_dev_info(p_hwfn->cdev); - if (rc != 0) + if (rc) goto err1; } @@ -1804,10 +1907,8 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, /* Allocate the init RT array and initialize the init-ops engine */ rc = qed_init_alloc(p_hwfn); - if (rc) { - DP_NOTICE(p_hwfn, "Failed to allocate the init array\n"); + if (rc) goto err2; - } return rc; err2: @@ -2015,10 +2116,8 @@ qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain) p_virt = dma_alloc_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL); - if (!p_virt) { - DP_NOTICE(cdev, "Failed to allocate chain memory\n"); + if (!p_virt) return -ENOMEM; - } if (i == 0) { qed_chain_init_mem(p_chain, p_virt, p_phys); @@ -2048,10 +2147,8 @@ qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain) p_virt = dma_alloc_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL); - if (!p_virt) { - DP_NOTICE(cdev, "Failed to allocate chain memory\n"); + if (!p_virt) return -ENOMEM; - } qed_chain_init_mem(p_chain, p_virt, p_phys); qed_chain_reset(p_chain); @@ -2068,13 +2165,9 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) void *p_virt = NULL; size = page_cnt * sizeof(*pp_virt_addr_tbl); - pp_virt_addr_tbl = vmalloc(size); - if (!pp_virt_addr_tbl) { - DP_NOTICE(cdev, - "Failed to allocate memory for the chain virtual addresses table\n"); + pp_virt_addr_tbl = vzalloc(size); + if (!pp_virt_addr_tbl) return -ENOMEM; - } - memset(pp_virt_addr_tbl, 0, size); /* The allocation of the PBL table is done with its full size, since it * is expected to be successive. @@ -2087,19 +2180,15 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) size, &p_pbl_phys, GFP_KERNEL); qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_virt_addr_tbl); - if (!p_pbl_virt) { - DP_NOTICE(cdev, "Failed to allocate chain pbl memory\n"); + if (!p_pbl_virt) return -ENOMEM; - } for (i = 0; i < page_cnt; i++) { p_virt = dma_alloc_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL); - if (!p_virt) { - DP_NOTICE(cdev, "Failed to allocate chain memory\n"); + if (!p_virt) return -ENOMEM; - } if (i == 0) { qed_chain_init_mem(p_chain, p_virt, p_phys); @@ -2134,7 +2223,8 @@ int qed_chain_alloc(struct qed_dev *cdev, rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt); if (rc) { DP_NOTICE(cdev, - "Cannot allocate a chain with the given arguments:\n" + "Cannot allocate a chain with the given arguments:\n"); + DP_NOTICE(cdev, "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", intended_use, mode, cnt_type, num_elems, elem_size); return rc; @@ -2183,8 +2273,7 @@ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) return 0; } -int qed_fw_vport(struct qed_hwfn *p_hwfn, - u8 src_id, u8 *dst_id) +int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id) { if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) { u8 min, max; @@ -2203,8 +2292,7 @@ int qed_fw_vport(struct qed_hwfn *p_hwfn, return 0; } -int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, - u8 src_id, u8 *dst_id) +int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id) { if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) { u8 min, max; @@ -2223,6 +2311,98 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, return 0; } +static void qed_llh_mac_to_filter(u32 *p_high, u32 *p_low, + u8 *p_filter) +{ + *p_high = p_filter[1] | (p_filter[0] << 8); + *p_low = p_filter[5] | (p_filter[4] << 8) | + (p_filter[3] << 16) | (p_filter[2] << 24); +} + +int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 *p_filter) +{ + u32 high = 0, low = 0, en; + int i; + + if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + return 0; + + qed_llh_mac_to_filter(&high, &low, p_filter); + + /* Find a free entry and utilize it */ + for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { + en = qed_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)); + if (en) + continue; + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + 2 * i * sizeof(u32), low); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + (2 * i + 1) * sizeof(u32), high); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + + i * sizeof(u32), 0); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1); + break; + } + if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) { + DP_NOTICE(p_hwfn, + "Failed to find an empty LLH filter to utilize\n"); + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "mac: %pM is added at %d\n", + p_filter, i); + + return 0; +} + +void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 *p_filter) +{ + u32 high = 0, low = 0; + int i; + + if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + return; + + qed_llh_mac_to_filter(&high, &low, p_filter); + + /* Find the entry and clean it */ + for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { + if (qed_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + 2 * i * sizeof(u32)) != low) + continue; + if (qed_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + (2 * i + 1) * sizeof(u32)) != high) + continue; + + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + (2 * i + 1) * sizeof(u32), 0); + + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "mac: %pM is removed from %d\n", + p_filter, i); + break; + } + if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) + DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n"); +} + static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 hw_addr, void *p_eth_qzone, size_t eth_qzone_size, u8 timeset) @@ -2386,8 +2566,7 @@ static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn, * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. */ static int qed_init_wfq_param(struct qed_hwfn *p_hwfn, - u16 vport_id, u32 req_rate, - u32 min_pf_rate) + u16 vport_id, u32 req_rate, u32 min_pf_rate) { u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; int non_requested_count = 0, req_count = 0, i, num_vports; @@ -2471,7 +2650,7 @@ static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn, rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); - if (rc == 0) + if (!rc) qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, p_link->min_pf_rate); else |