diff options
author | Devesh Sharma <devesh.sharma@broadcom.com> | 2019-02-22 07:16:19 -0500 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-02-22 11:17:22 -0700 |
commit | c50866e2853a0315ae9669691406c614f5d848b2 (patch) | |
tree | cfa957f641a2c6a77b1c301b49609f9752d31792 /drivers/infiniband/hw/bnxt_re/qplib_fp.c | |
parent | 2612d723aadcf8281f9bf8305657129bd9f3cd57 (diff) | |
download | blackbird-op-linux-c50866e2853a0315ae9669691406c614f5d848b2.tar.gz blackbird-op-linux-c50866e2853a0315ae9669691406c614f5d848b2.zip |
bnxt_re: fix the regression due to changes in alloc_pbl
While adding the use of for_each_sg_dma_page iterator for Brodcom's rdma
driver, there was a regression added in the __alloc_pbl path. The change
left bnxt_re in DOA state in for-next branch.
Fixing the regression to avoid the host crash when a user space object is
created. Restricting the unconditional access to hwq.pg_arr when hwq is
initialized for user space objects.
Fixes: 161ebe2498d4 ("RDMA/bnxt_re: Use for_each_sg_dma_page iterator on umem SGL")
Reported-by: Gal Pressman <galpress@amazon.com>
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Devesh Sharma <devesh.sharma@broadcom.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband/hw/bnxt_re/qplib_fp.c')
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/qplib_fp.c | 20 |
1 files changed, 6 insertions, 14 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 77eb3d556006..71c34d5b0ac0 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -862,18 +862,18 @@ exit: int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; - struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr; - struct cmdq_create_qp req; - struct creq_create_qp_resp resp; - struct bnxt_qplib_pbl *pbl; - struct sq_psn_search **psn_search_ptr; unsigned long int psn_search, poff = 0; + struct sq_psn_search **psn_search_ptr; struct bnxt_qplib_q *sq = &qp->sq; struct bnxt_qplib_q *rq = &qp->rq; int i, rc, req_size, psn_sz = 0; + struct sq_send **hw_sq_send_ptr; + struct creq_create_qp_resp resp; struct bnxt_qplib_hwq *xrrq; u16 cmd_flags = 0, max_ssge; - u32 sw_prod, qp_flags = 0; + struct cmdq_create_qp req; + struct bnxt_qplib_pbl *pbl; + u32 qp_flags = 0; u16 max_rsge; RCFW_CMD_PREP(req, CREATE_QP, cmd_flags); @@ -948,14 +948,6 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G : CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K); - /* initialize all SQ WQEs to LOCAL_INVALID (sq prep for hw fetch) */ - hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr; - for (sw_prod = 0; sw_prod < sq->hwq.max_elements; sw_prod++) { - hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)] - [get_sqe_idx(sw_prod)]; - hw_sq_send_hdr->wqe_type = SQ_BASE_WQE_TYPE_LOCAL_INVALID; - } - if (qp->scq) req.scq_cid = cpu_to_le32(qp->scq->id); |