summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/qedr
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/qedr')
-rw-r--r--drivers/infiniband/hw/qedr/main.c7
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi_rdma.h4
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c49
3 files changed, 41 insertions, 19 deletions
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index db4bf97c0e15..f9a645c869ce 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -90,8 +90,8 @@ static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num)
dev_hold(qdev->ndev);
/* The HW vendor's device driver must guarantee
- * that this function returns NULL before the net device reaches
- * NETDEV_UNREGISTER_FINAL state.
+ * that this function returns NULL before the net device has finished
+ * NETDEV_UNREGISTER state.
*/
return qdev->ndev;
}
@@ -833,7 +833,8 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
if (!dev->num_cnq) {
- DP_ERR(dev, "not enough CNQ resources.\n");
+ DP_ERR(dev, "Failed. At least one CNQ is required.\n");
+ rc = -ENOMEM;
goto init_err;
}
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
index 78b49002fbd2..b816c80df50b 100644
--- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
+++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
@@ -45,7 +45,7 @@ struct rdma_cqe_responder {
__le32 imm_data_or_inv_r_Key;
__le32 length;
__le32 imm_data_hi;
- __le16 rq_cons;
+ __le16 rq_cons_or_srq_id;
u8 flags;
#define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK 0x1
#define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0
@@ -115,6 +115,7 @@ enum rdma_cqe_requester_status_enum {
RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR,
RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
+ RDMA_CQE_REQ_STS_XRC_VOILATION_ERR,
MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
};
@@ -136,6 +137,7 @@ enum rdma_cqe_type {
RDMA_CQE_TYPE_REQUESTER,
RDMA_CQE_TYPE_RESPONDER_RQ,
RDMA_CQE_TYPE_RESPONDER_SRQ,
+ RDMA_CQE_TYPE_RESPONDER_XRC_SRQ,
RDMA_CQE_TYPE_INVALID,
MAX_RDMA_CQE_TYPE
};
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 875b17272d65..f9c3cc71f5c0 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1841,14 +1841,15 @@ static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
static int qedr_update_qp_state(struct qedr_dev *dev,
struct qedr_qp *qp,
+ enum qed_roce_qp_state cur_state,
enum qed_roce_qp_state new_state)
{
int status = 0;
- if (new_state == qp->state)
+ if (new_state == cur_state)
return 0;
- switch (qp->state) {
+ switch (cur_state) {
case QED_ROCE_QP_STATE_RESET:
switch (new_state) {
case QED_ROCE_QP_STATE_INIT:
@@ -1955,6 +1956,7 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
enum ib_qp_state old_qp_state, new_qp_state;
+ enum qed_roce_qp_state cur_state;
int rc = 0;
DP_DEBUG(dev, QEDR_MSG_QP,
@@ -2086,18 +2088,23 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
SET_FIELD(qp_params.modify_flags,
QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
- qp_params.ack_timeout = attr->timeout;
- if (attr->timeout) {
- u32 temp;
-
- temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
- /* FW requires [msec] */
- qp_params.ack_timeout = temp;
- } else {
- /* Infinite */
+ /* The received timeout value is an exponent used like this:
+ * "12.7.34 LOCAL ACK TIMEOUT
+ * Value representing the transport (ACK) timeout for use by
+ * the remote, expressed as: 4.096 * 2^timeout [usec]"
+ * The FW expects timeout in msec so we need to divide the usec
+ * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
+ * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
+ * The value of zero means infinite so we use a 'max_t' to make
+ * sure that sub 1 msec values will be configured as 1 msec.
+ */
+ if (attr->timeout)
+ qp_params.ack_timeout =
+ 1 << max_t(int, attr->timeout - 8, 0);
+ else
qp_params.ack_timeout = 0;
- }
}
+
if (attr_mask & IB_QP_RETRY_CNT) {
SET_FIELD(qp_params.modify_flags,
QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
@@ -2170,13 +2177,25 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
qp->dest_qp_num = attr->dest_qp_num;
}
+ cur_state = qp->state;
+
+ /* Update the QP state before the actual ramrod to prevent a race with
+ * fast path. Modifying the QP state to error will cause the device to
+ * flush the CQEs and while polling the flushed CQEs will considered as
+ * a potential issue if the QP isn't in error state.
+ */
+ if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
+ !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
+ qp->state = QED_ROCE_QP_STATE_ERR;
+
if (qp->qp_type != IB_QPT_GSI)
rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
qp->qed_qp, &qp_params);
if (attr_mask & IB_QP_STATE) {
if ((qp->qp_type != IB_QPT_GSI) && (!udata))
- rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
+ rc = qedr_update_qp_state(dev, qp, cur_state,
+ qp_params.new_state);
qp->state = qp_params.new_state;
}
@@ -3695,7 +3714,7 @@ static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
struct rdma_cqe_responder *resp, int *update)
{
- if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
+ if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
consume_cqe(cq);
*update |= 1;
}
@@ -3710,7 +3729,7 @@ static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
cnt = process_resp_flush(qp, cq, num_entries, wc,
- resp->rq_cons);
+ resp->rq_cons_or_srq_id);
try_consume_resp_cqe(cq, qp, resp, update);
} else {
cnt = process_resp_one(dev, qp, cq, wc, resp);
OpenPOWER on IntegriCloud