diff options
Diffstat (limited to 'drivers/infiniband/hw/qedr')
-rw-r--r-- | drivers/infiniband/hw/qedr/main.c | 61 | ||||
-rw-r--r-- | drivers/infiniband/hw/qedr/qedr_hsi_rdma.h | 125 | ||||
-rw-r--r-- | drivers/infiniband/hw/qedr/verbs.c | 15 |
3 files changed, 87 insertions, 114 deletions
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 50812b33291b..db4bf97c0e15 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -264,7 +264,7 @@ static int qedr_register_device(struct qedr_dev *dev) static int qedr_alloc_mem_sb(struct qedr_dev *dev, struct qed_sb_info *sb_info, u16 sb_id) { - struct status_block *sb_virt; + struct status_block_e4 *sb_virt; dma_addr_t sb_phys; int rc; @@ -430,59 +430,16 @@ static void qedr_remove_sysfiles(struct qedr_dev *dev) static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev) { - struct pci_dev *bridge; - u32 ctl2, cap2; - u16 flags; - int rc; - - bridge = pdev->bus->self; - if (!bridge) - goto disable; - - /* Check atomic routing support all the way to root complex */ - while (bridge->bus->parent) { - rc = pcie_capability_read_word(bridge, PCI_EXP_FLAGS, &flags); - if (rc || ((flags & PCI_EXP_FLAGS_VERS) < 2)) - goto disable; - - rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap2); - if (rc) - goto disable; + int rc = pci_enable_atomic_ops_to_root(pdev, + PCI_EXP_DEVCAP2_ATOMIC_COMP64); - rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl2); - if (rc) - goto disable; - - if (!(cap2 & PCI_EXP_DEVCAP2_ATOMIC_ROUTE) || - (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)) - goto disable; - bridge = bridge->bus->parent->self; + if (rc) { + dev->atomic_cap = IB_ATOMIC_NONE; + DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n"); + } else { + dev->atomic_cap = IB_ATOMIC_GLOB; + DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n"); } - - rc = pcie_capability_read_word(bridge, PCI_EXP_FLAGS, &flags); - if (rc || ((flags & PCI_EXP_FLAGS_VERS) < 2)) - goto disable; - - rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap2); - if (rc || !(cap2 & PCI_EXP_DEVCAP2_ATOMIC_COMP64)) - goto disable; - - /* Set atomic operations */ - pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2, - PCI_EXP_DEVCTL2_ATOMIC_REQ); - dev->atomic_cap = IB_ATOMIC_GLOB; - - DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n"); - - return; - -disable: - pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2, - PCI_EXP_DEVCTL2_ATOMIC_REQ); - dev->atomic_cap = IB_ATOMIC_NONE; - - DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n"); - } static const struct qed_rdma_ops *qed_ops; diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h index b7587f10e7de..78b49002fbd2 100644 --- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h +++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h @@ -164,6 +164,13 @@ struct rdma_srq_sge { __le32 l_key; }; +/* Rdma doorbell data for flags update */ +struct rdma_pwm_flags_data { + __le16 icid; /* internal CID */ + u8 agg_flags; /* aggregative flags */ + u8 reserved; +}; + /* Rdma doorbell data for SQ and RQ */ struct rdma_pwm_val16_data { __le16 icid; @@ -180,12 +187,16 @@ struct rdma_pwm_val32_data { __le16 icid; u8 agg_flags; u8 params; -#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3 -#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0 -#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1 -#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2 -#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x1F -#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 3 +#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3 +#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0 +#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1 +#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2 +#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_MASK 0x1 +#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_SHIFT 3 +#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_MASK 0x1 +#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_SHIFT 4 +#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x7 +#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 5 __le32 value; }; @@ -478,23 +489,25 @@ struct rdma_sq_fmr_wqe { __le16 dif_app_tag_mask; __le16 dif_runt_crc_value; __le16 dif_flags; -#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0 -#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1 -#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5 -#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6 -#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0x1FF -#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 7 - __le32 Reserved5; +#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0 +#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1 +#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5 +#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6 +#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT 7 +#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0xFF +#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8 + __le32 reserved5; }; /* First element (16 bytes) of fmr wqe */ @@ -558,23 +571,25 @@ struct rdma_sq_fmr_wqe_3rd { __le16 dif_app_tag_mask; __le16 dif_runt_crc_value; __le16 dif_flags; -#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0 -#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5 -#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6 -#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF -#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7 - __le32 Reserved5; +#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0 +#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5 +#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6 +#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_SHIFT 7 +#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0xFF +#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8 + __le32 reserved5; }; struct rdma_sq_local_inv_wqe { @@ -606,20 +621,22 @@ struct rdma_sq_rdma_wqe { __le32 xrc_srq; u8 req_type; u8 flags; -#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0 -#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1 -#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2 -#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3 -#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4 -#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5 -#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x3 -#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 6 +#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0 +#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3 +#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5 +#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT 6 +#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 7 u8 wqe_size; u8 prev_wqe_size; struct regpair remote_va; diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index b26aa88dab48..53f00dbf313f 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -604,12 +604,11 @@ static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev, return ERR_PTR(-ENOMEM); for (i = 0; i < pbl_info->num_pbls; i++) { - va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, - &pa, flags); + va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size, + &pa, flags); if (!va) goto err; - memset(va, 0, pbl_info->pbl_size); pbl_table[i].va = va; pbl_table[i].pa = pa; } @@ -3040,7 +3039,7 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, swqe->wqe_size = 2; swqe2 = qed_chain_produce(&qp->sq.pbl); - swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data); + swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data)); length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2, wr, bad_wr); swqe->length = cpu_to_le32(length); @@ -3471,9 +3470,9 @@ static int qedr_poll_cq_req(struct qedr_dev *dev, break; case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR: if (qp->state != QED_ROCE_QP_STATE_ERR) - DP_ERR(dev, - "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", - cq->icid, qp->icid); + DP_DEBUG(dev, QEDR_MSG_CQ, + "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, IB_WC_WR_FLUSH_ERR, 1); break; @@ -3591,7 +3590,7 @@ static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp, wc->byte_len = le32_to_cpu(resp->length); if (resp->flags & QEDR_RESP_IMM) { - wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key); + wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key)); wc->wc_flags |= IB_WC_WITH_IMM; if (resp->flags & QEDR_RESP_RDMA) |