diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-23 08:27:57 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-23 08:27:57 -0800 |
commit | 4cc4b9323f43458c9277e082f90316570431881e (patch) | |
tree | edb24959f70da772bd0c9bbce6d1636f7d75c392 /drivers/infiniband/hw/qib | |
parent | a57eaa1f25bb3e1d0aaf8906460053b9509c74a8 (diff) | |
parent | db690328a7df0b507f7d59de0c7e1bbe8f4b9e6a (diff) | |
download | talos-obmc-linux-4cc4b9323f43458c9277e082f90316570431881e.tar.gz talos-obmc-linux-4cc4b9323f43458c9277e082f90316570431881e.zip |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull rdma updates from Doug Ledford:
"First set of updates for 4.11 kernel merge window
- Add new Broadcom bnxt_re RoCE driver
- rxe driver updates
- ioctl cleanups
- ETH_P_IBOE declaration cleanup
- IPoIB changes
- Add port state cache
- Allow srpt driver to accept guids as port names in config
- Update to hfi1 driver
- Update to srp driver
- Lots of misc minor changes all over"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (114 commits)
RDMA/bnxt_re: fix for "bnxt_en: Update to firmware interface spec 1.7.0."
rdma_cm: fail iwarp accepts w/o connection params
IB/srp: Drain the send queue before destroying a QP
IB/core: Add support for draining IB_POLL_DIRECT completion queues
IB/srp: Improve an error path
IB/srp: Make a diagnostic message more informative
IB/srp: Document locking conventions
IB/srp: Fix race conditions related to task management
IB/srp: Avoid that duplicate responses trigger a kernel bug
IB/SRP: Avoid using IB_MR_TYPE_SG_GAPS
RDMA/qedr: Fix some error handling
RDMA/bnxt_re: add DCB dependency
IB/hns: include linux/module.h
IB/vmw_pvrdma: Expose vendor error to ULPs
vmw_pvrdma: switch to pci_alloc_irq_vectors
IB/hfi1: use size_t for passing array length
IB/ipoib: Remove redudant label
IB/ipoib: remove the unnecessary memory free
IB/mthca: switch to pci_alloc_irq_vectors
IB/hfi1: Code reuse with memdup_copy
...
Diffstat (limited to 'drivers/infiniband/hw/qib')
-rw-r--r-- | drivers/infiniband/hw/qib/qib_common.h | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_iba7322.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_pcie.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_qp.c | 135 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_qsfp.c | 10 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_qsfp.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_rc.c | 179 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_ruc.c | 47 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_uc.c | 15 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_ud.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_user_sdma.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_verbs.c | 96 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_verbs.h | 10 |
13 files changed, 67 insertions, 453 deletions
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h index 1d6e63eb1146..a4a1f56ce824 100644 --- a/drivers/infiniband/hw/qib/qib_common.h +++ b/drivers/infiniband/hw/qib/qib_common.h @@ -742,11 +742,7 @@ struct qib_tid_session_member { #define SIZE_OF_CRC 1 #define QIB_DEFAULT_P_KEY 0xFFFF -#define QIB_AETH_CREDIT_SHIFT 24 -#define QIB_AETH_CREDIT_MASK 0x1F -#define QIB_AETH_CREDIT_INVAL 0x1F #define QIB_PSN_MASK 0xFFFFFF -#define QIB_MSN_MASK 0xFFFFFF #define QIB_EAGER_TID_ID QLOGIC_IB_I_TID_MASK #define QIB_MULTICAST_QPN 0xFFFFFF diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index c4a3616062f1..9cc97bd42775 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -2893,7 +2893,6 @@ static void qib_setup_7322_cleanup(struct qib_devdata *dd) dd->cspec->gpio_mask &= ~mask; qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); - qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data); } } } diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index 6abe1c621aa4..c379b8342a09 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c @@ -682,13 +682,6 @@ qib_pci_slot_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_CAN_RECOVER; } -static pci_ers_result_t -qib_pci_link_reset(struct pci_dev *pdev) -{ - qib_devinfo(pdev, "QIB link_reset function called, ignored\n"); - return PCI_ERS_RESULT_CAN_RECOVER; -} - static void qib_pci_resume(struct pci_dev *pdev) { @@ -707,7 +700,6 @@ qib_pci_resume(struct pci_dev *pdev) const struct pci_error_handlers qib_pci_err_handler = { .error_detected = qib_pci_error_detected, .mmio_enabled = qib_pci_mmio_enabled, - .link_reset = qib_pci_link_reset, .slot_reset = qib_pci_slot_reset, .resume = qib_pci_resume, }; diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 99d31efe4c2f..2ac0c0f79e74 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -61,43 +61,6 @@ static inline unsigned find_next_offset(struct rvt_qpn_table *qpt, return off; } -/* - * Convert the AETH credit code into the number of credits. - */ -static u32 credit_table[31] = { - 0, /* 0 */ - 1, /* 1 */ - 2, /* 2 */ - 3, /* 3 */ - 4, /* 4 */ - 6, /* 5 */ - 8, /* 6 */ - 12, /* 7 */ - 16, /* 8 */ - 24, /* 9 */ - 32, /* A */ - 48, /* B */ - 64, /* C */ - 96, /* D */ - 128, /* E */ - 192, /* F */ - 256, /* 10 */ - 384, /* 11 */ - 512, /* 12 */ - 768, /* 13 */ - 1024, /* 14 */ - 1536, /* 15 */ - 2048, /* 16 */ - 3072, /* 17 */ - 4096, /* 18 */ - 6144, /* 19 */ - 8192, /* 1A */ - 12288, /* 1B */ - 16384, /* 1C */ - 24576, /* 1D */ - 32768 /* 1E */ -}; - const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = { [IB_WR_RDMA_WRITE] = { .length = sizeof(struct ib_rdma_wr), @@ -354,66 +317,6 @@ u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) return ib_mtu_enum_to_int(pmtu); } -/** - * qib_compute_aeth - compute the AETH (syndrome + MSN) - * @qp: the queue pair to compute the AETH for - * - * Returns the AETH. - */ -__be32 qib_compute_aeth(struct rvt_qp *qp) -{ - u32 aeth = qp->r_msn & QIB_MSN_MASK; - - if (qp->ibqp.srq) { - /* - * Shared receive queues don't generate credits. - * Set the credit field to the invalid value. - */ - aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT; - } else { - u32 min, max, x; - u32 credits; - struct rvt_rwq *wq = qp->r_rq.wq; - u32 head; - u32 tail; - - /* sanity check pointers before trusting them */ - head = wq->head; - if (head >= qp->r_rq.size) - head = 0; - tail = wq->tail; - if (tail >= qp->r_rq.size) - tail = 0; - /* - * Compute the number of credits available (RWQEs). - * XXX Not holding the r_rq.lock here so there is a small - * chance that the pair of reads are not atomic. - */ - credits = head - tail; - if ((int)credits < 0) - credits += qp->r_rq.size; - /* - * Binary search the credit table to find the code to - * use. - */ - min = 0; - max = 31; - for (;;) { - x = (min + max) / 2; - if (credit_table[x] == credits) - break; - if (credit_table[x] > credits) - max = x; - else if (min == x) - break; - else - min = x; - } - aeth |= x << QIB_AETH_CREDIT_SHIFT; - } - return cpu_to_be32(aeth); -} - void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp) { struct qib_qp_priv *priv; @@ -448,7 +351,6 @@ void qib_stop_send_queue(struct rvt_qp *qp) struct qib_qp_priv *priv = qp->priv; cancel_work_sync(&priv->s_work); - del_timer_sync(&qp->s_timer); } void qib_quiesce_qp(struct rvt_qp *qp) @@ -474,43 +376,6 @@ void qib_flush_qp_waiters(struct rvt_qp *qp) } /** - * qib_get_credit - flush the send work queue of a QP - * @qp: the qp who's send work queue to flush - * @aeth: the Acknowledge Extended Transport Header - * - * The QP s_lock should be held. - */ -void qib_get_credit(struct rvt_qp *qp, u32 aeth) -{ - u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK; - - /* - * If the credit is invalid, we can send - * as many packets as we like. Otherwise, we have to - * honor the credit field. - */ - if (credit == QIB_AETH_CREDIT_INVAL) { - if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) { - qp->s_flags |= RVT_S_UNLIMITED_CREDIT; - if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) { - qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT; - qib_schedule_send(qp); - } - } - } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) { - /* Compute new LSN (i.e., MSN + credit) */ - credit = (aeth + credit_table[credit]) & QIB_MSN_MASK; - if (qib_cmp24(credit, qp->s_lsn) > 0) { - qp->s_lsn = credit; - if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) { - qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT; - qib_schedule_send(qp); - } - } - } -} - -/** * qib_check_send_wqe - validate wr/wqe * @qp - The qp * @wqe - The built wqe diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c index 4c7c3c84a741..295d40a83bb6 100644 --- a/drivers/infiniband/hw/qib/qib_qsfp.c +++ b/drivers/infiniband/hw/qib/qib_qsfp.c @@ -485,16 +485,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd, dd->f_gpio_mod(dd, mask, mask, mask); } -void qib_qsfp_deinit(struct qib_qsfp_data *qd) -{ - /* - * There is nothing to do here for now. our work is scheduled - * with queue_work(), and flush_workqueue() from remove_one - * will block until all work setup with queue_work() - * completes. - */ -} - int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len) { struct qib_qsfp_cache cd; diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h index 91908f533a2b..ad8dbd6ac0cf 100644 --- a/drivers/infiniband/hw/qib/qib_qsfp.h +++ b/drivers/infiniband/hw/qib/qib_qsfp.h @@ -186,4 +186,3 @@ extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, extern int qib_qsfp_mod_present(struct qib_pportdata *ppd); extern void qib_qsfp_init(struct qib_qsfp_data *qd, void (*fevent)(struct work_struct *)); -extern void qib_qsfp_deinit(struct qib_qsfp_data *qd); diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 031433cb7206..12658e3fe154 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -38,7 +38,6 @@ /* cut down ridiculously long IB macro names */ #define OP(x) IB_OPCODE_RC_##x -static void rc_timeout(unsigned long arg); static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 psn, u32 pmtu) @@ -50,19 +49,10 @@ static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, ss->sg_list = wqe->sg_list + 1; ss->num_sge = wqe->wr.num_sge; ss->total_len = wqe->length; - qib_skip_sge(ss, len, 0); + rvt_skip_sge(ss, len, false); return wqe->length - len; } -static void start_timer(struct rvt_qp *qp) -{ - qp->s_flags |= RVT_S_TIMER; - qp->s_timer.function = rc_timeout; - /* 4.096 usec. * (1 << qp->timeout) */ - qp->s_timer.expires = jiffies + qp->timeout_jiffies; - add_timer(&qp->s_timer); -} - /** * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read) * @dev: the device for this QP @@ -144,7 +134,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); e->sent = 1; } - ohdr->u.aeth = qib_compute_aeth(qp); + ohdr->u.aeth = rvt_compute_aeth(qp); hwords++; qp->s_ack_rdma_psn = e->psn; bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK; @@ -153,7 +143,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, qp->s_cur_sge = NULL; len = 0; qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); - ohdr->u.at.aeth = qib_compute_aeth(qp); + ohdr->u.at.aeth = rvt_compute_aeth(qp); ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth); hwords += sizeof(ohdr->u.at) / sizeof(u32); bth2 = e->psn & QIB_PSN_MASK; @@ -174,7 +164,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, if (len > pmtu) len = pmtu; else { - ohdr->u.aeth = qib_compute_aeth(qp); + ohdr->u.aeth = rvt_compute_aeth(qp); hwords++; qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); e = &qp->s_ack_queue[qp->s_tail_ack_queue]; @@ -197,11 +187,11 @@ normal: qp->s_cur_sge = NULL; if (qp->s_nak_state) ohdr->u.aeth = - cpu_to_be32((qp->r_msn & QIB_MSN_MASK) | + cpu_to_be32((qp->r_msn & IB_MSN_MASK) | (qp->s_nak_state << - QIB_AETH_CREDIT_SHIFT)); + IB_AETH_CREDIT_SHIFT)); else - ohdr->u.aeth = qib_compute_aeth(qp); + ohdr->u.aeth = rvt_compute_aeth(qp); hwords++; len = 0; bth0 = OP(ACKNOWLEDGE) << 24; @@ -257,7 +247,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) goto bail; /* We are in the error state, flush the work request. */ smp_read_barrier_depends(); /* see post_one_send() */ - if (qp->s_last == ACCESS_ONCE(qp->s_head)) + if (qp->s_last == READ_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (atomic_read(&priv->s_dma_busy)) { @@ -303,7 +293,8 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) newreq = 0; if (qp->s_cur == qp->s_tail) { /* Check if send work queue is empty. */ - if (qp->s_tail == qp->s_head) + smp_read_barrier_depends(); /* see post_one_send() */ + if (qp->s_tail == READ_ONCE(qp->s_head)) goto bail; /* * If a fence is requested, wait for previous @@ -330,7 +321,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) case IB_WR_SEND_WITH_IMM: /* If no credit, return. */ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && - qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { + rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; goto bail; } @@ -361,7 +352,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) case IB_WR_RDMA_WRITE_WITH_IMM: /* If no credit, return. */ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && - qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { + rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; goto bail; } @@ -657,11 +648,11 @@ void qib_send_rc_ack(struct rvt_qp *qp) if (qp->s_mig_state == IB_MIG_MIGRATED) bth0 |= IB_BTH_MIG_REQ; if (qp->r_nak_state) - ohdr->u.aeth = cpu_to_be32((qp->r_msn & QIB_MSN_MASK) | + ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) | (qp->r_nak_state << - QIB_AETH_CREDIT_SHIFT)); + IB_AETH_CREDIT_SHIFT)); else - ohdr->u.aeth = qib_compute_aeth(qp); + ohdr->u.aeth = rvt_compute_aeth(qp); lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 | qp->remote_ah_attr.sl << 4; hdr.lrh[0] = cpu_to_be16(lrh0); @@ -836,7 +827,7 @@ done: * Back up requester to resend the last un-ACKed request. * The QP r_lock and s_lock should be held and interrupts disabled. */ -static void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait) +void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait) { struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); struct qib_ibport *ibp; @@ -869,46 +860,6 @@ static void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait) } /* - * This is called from s_timer for missing responses. - */ -static void rc_timeout(unsigned long arg) -{ - struct rvt_qp *qp = (struct rvt_qp *)arg; - struct qib_ibport *ibp; - unsigned long flags; - - spin_lock_irqsave(&qp->r_lock, flags); - spin_lock(&qp->s_lock); - if (qp->s_flags & RVT_S_TIMER) { - ibp = to_iport(qp->ibqp.device, qp->port_num); - ibp->rvp.n_rc_timeouts++; - qp->s_flags &= ~RVT_S_TIMER; - del_timer(&qp->s_timer); - qib_restart_rc(qp, qp->s_last_psn + 1, 1); - qib_schedule_send(qp); - } - spin_unlock(&qp->s_lock); - spin_unlock_irqrestore(&qp->r_lock, flags); -} - -/* - * This is called from s_timer for RNR timeouts. - */ -void qib_rc_rnr_retry(unsigned long arg) -{ - struct rvt_qp *qp = (struct rvt_qp *)arg; - unsigned long flags; - - spin_lock_irqsave(&qp->s_lock, flags); - if (qp->s_flags & RVT_S_WAIT_RNR) { - qp->s_flags &= ~RVT_S_WAIT_RNR; - del_timer(&qp->s_timer); - qib_schedule_send(qp); - } - spin_unlock_irqrestore(&qp->s_lock, flags); -} - -/* * Set qp->s_sending_psn to the next PSN after the given one. * This would be psn+1 except when RDMA reads are present. */ @@ -944,7 +895,7 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr) u32 opcode; u32 psn; - if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND)) + if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK)) return; /* Find out where the BTH is */ @@ -971,7 +922,7 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr) if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && !(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) && (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) - start_timer(qp); + rvt_add_retry_timer(qp); while (qp->s_last != qp->s_acked) { u32 s_last; @@ -1084,12 +1035,6 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, u32 ack_psn; int diff; - /* Remove QP from retry timer */ - if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { - qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); - del_timer(&qp->s_timer); - } - /* * Note that NAKs implicitly ACK outstanding SEND and RDMA write * requests and implicitly NAK RDMA read and atomic requests issued @@ -1097,7 +1042,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, * request but will include an ACK'ed request(s). */ ack_psn = psn; - if (aeth >> 29) + if (aeth >> IB_AETH_NAK_SHIFT) ack_psn--; wqe = rvt_get_swqe_ptr(qp, qp->s_acked); ibp = to_iport(qp->ibqp.device, qp->port_num); @@ -1177,7 +1122,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, break; } - switch (aeth >> 29) { + switch (aeth >> IB_AETH_NAK_SHIFT) { case 0: /* ACK */ this_cpu_inc(*ibp->rvp.rc_acks); if (qp->s_acked != qp->s_tail) { @@ -1185,27 +1130,30 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, * We are expecting more ACKs so * reset the retransmit timer. */ - start_timer(qp); + rvt_mod_retry_timer(qp); /* * We can stop resending the earlier packets and * continue with the next packet the receiver wants. */ if (qib_cmp24(qp->s_psn, psn) <= 0) reset_psn(qp, psn + 1); - } else if (qib_cmp24(qp->s_psn, psn) <= 0) { - qp->s_state = OP(SEND_LAST); - qp->s_psn = psn + 1; + } else { + /* No more acks - kill all timers */ + rvt_stop_rc_timers(qp); + if (qib_cmp24(qp->s_psn, psn) <= 0) { + qp->s_state = OP(SEND_LAST); + qp->s_psn = psn + 1; + } } if (qp->s_flags & RVT_S_WAIT_ACK) { qp->s_flags &= ~RVT_S_WAIT_ACK; qib_schedule_send(qp); } - qib_get_credit(qp, aeth); + rvt_get_credit(qp, aeth); qp->s_rnr_retry = qp->s_rnr_retry_cnt; qp->s_retry = qp->s_retry_cnt; update_last_psn(qp, psn); - ret = 1; - goto bail; + return 1; case 1: /* RNR NAK */ ibp->rvp.n_rnr_naks++; @@ -1228,21 +1176,17 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, reset_psn(qp, psn); qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK); - qp->s_flags |= RVT_S_WAIT_RNR; - qp->s_timer.function = qib_rc_rnr_retry; - qp->s_timer.expires = jiffies + usecs_to_jiffies( - ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) & - QIB_AETH_CREDIT_MASK]); - add_timer(&qp->s_timer); - goto bail; + rvt_stop_rc_timers(qp); + rvt_add_rnr_timer(qp, aeth); + return 0; case 3: /* NAK */ if (qp->s_acked == qp->s_tail) goto bail; /* The last valid PSN is the previous PSN. */ update_last_psn(qp, psn - 1); - switch ((aeth >> QIB_AETH_CREDIT_SHIFT) & - QIB_AETH_CREDIT_MASK) { + switch ((aeth >> IB_AETH_CREDIT_SHIFT) & + IB_AETH_CREDIT_MASK) { case 0: /* PSN sequence error */ ibp->rvp.n_seq_naks++; /* @@ -1290,6 +1234,7 @@ reserved: } bail: + rvt_stop_rc_timers(qp); return ret; } @@ -1303,10 +1248,7 @@ static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn, struct rvt_swqe *wqe; /* Remove QP from retry timer */ - if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { - qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); - del_timer(&qp->s_timer); - } + rvt_stop_rc_timers(qp); wqe = rvt_get_swqe_ptr(qp, qp->s_acked); @@ -1390,7 +1332,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, /* Ignore invalid responses. */ smp_read_barrier_depends(); /* see post_one_send */ - if (qib_cmp24(psn, ACCESS_ONCE(qp->s_next_psn)) >= 0) + if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0) goto ack_done; /* Ignore duplicate responses. */ @@ -1399,8 +1341,8 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, /* Update credits for "ghost" ACKs */ if (diff == 0 && opcode == OP(ACKNOWLEDGE)) { aeth = be32_to_cpu(ohdr->u.aeth); - if ((aeth >> 29) == 0) - qib_get_credit(qp, aeth); + if ((aeth >> IB_AETH_NAK_SHIFT) == 0) + rvt_get_credit(qp, aeth); } goto ack_done; } @@ -1461,8 +1403,7 @@ read_middle: * We got a response so update the timeout. * 4.096 usec. * (1 << qp->timeout) */ - qp->s_flags |= RVT_S_TIMER; - mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies); + rvt_mod_retry_timer(qp); if (qp->s_flags & RVT_S_WAIT_ACK) { qp->s_flags &= ~RVT_S_WAIT_ACK; qib_schedule_send(qp); @@ -1764,25 +1705,6 @@ send_ack: return 0; } -void qib_rc_error(struct rvt_qp *qp, enum ib_wc_status err) -{ - unsigned long flags; - int lastwqe; - - spin_lock_irqsave(&qp->s_lock, flags); - lastwqe = rvt_error_qp(qp, err); - spin_unlock_irqrestore(&qp->s_lock, flags); - - if (lastwqe) { - struct ib_event ev; - - ev.device = qp->ibqp.device; - ev.element.qp = &qp->ibqp; - ev.event = IB_EVENT_QP_LAST_WQE_REACHED; - qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); - } -} - static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n) { unsigned next; @@ -1894,17 +1816,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr, break; } - if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) { - qp->r_flags |= RVT_R_COMM_EST; - if (qp->ibqp.event_handler) { - struct ib_event ev; - - ev.device = qp->ibqp.device; - ev.element.qp = &qp->ibqp; - ev.event = IB_EVENT_COMM_EST; - qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); - } - } + if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) + rvt_comm_est(qp); /* OK, process the packet. */ switch (opcode) { @@ -2196,7 +2109,7 @@ rnr_nak: return; nack_op_err: - qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); + rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR; qp->r_ack_psn = qp->r_psn; /* Queue NAK for later */ @@ -2210,7 +2123,7 @@ nack_op_err: nack_inv_unlck: spin_unlock_irqrestore(&qp->s_lock, flags); nack_inv: - qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); + rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); qp->r_nak_state = IB_NAK_INVALID_REQUEST; qp->r_ack_psn = qp->r_psn; /* Queue NAK for later */ @@ -2224,7 +2137,7 @@ nack_inv: nack_acc_unlck: spin_unlock_irqrestore(&qp->s_lock, flags); nack_acc: - qib_rc_error(qp, IB_WC_LOC_PROT_ERR); + rvt_rc_error(qp, IB_WC_LOC_PROT_ERR); qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; qp->r_ack_psn = qp->r_psn; send_ack: diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index e54a2feeeb10..17655cc3e6fe 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c @@ -38,44 +38,6 @@ #include "qib_mad.h" /* - * Convert the AETH RNR timeout code into the number of microseconds. - */ -const u32 ib_qib_rnr_table[32] = { - 655360, /* 00: 655.36 */ - 10, /* 01: .01 */ - 20, /* 02 .02 */ - 30, /* 03: .03 */ - 40, /* 04: .04 */ - 60, /* 05: .06 */ - 80, /* 06: .08 */ - 120, /* 07: .12 */ - 160, /* 08: .16 */ - 240, /* 09: .24 */ - 320, /* 0A: .32 */ - 480, /* 0B: .48 */ - 640, /* 0C: .64 */ - 960, /* 0D: .96 */ - 1280, /* 0E: 1.28 */ - 1920, /* 0F: 1.92 */ - 2560, /* 10: 2.56 */ - 3840, /* 11: 3.84 */ - 5120, /* 12: 5.12 */ - 7680, /* 13: 7.68 */ - 10240, /* 14: 10.24 */ - 15360, /* 15: 15.36 */ - 20480, /* 16: 20.48 */ - 30720, /* 17: 30.72 */ - 40960, /* 18: 40.96 */ - 61440, /* 19: 61.44 */ - 81920, /* 1A: 81.92 */ - 122880, /* 1B: 122.88 */ - 163840, /* 1C: 163.84 */ - 245760, /* 1D: 245.76 */ - 327680, /* 1E: 327.68 */ - 491520 /* 1F: 491.52 */ -}; - -/* * Validate a RWQE and fill in the SGE state. * Return 1 if OK. */ @@ -599,11 +561,8 @@ rnr_nak: spin_lock_irqsave(&sqp->s_lock, flags); if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK)) goto clr_busy; - sqp->s_flags |= RVT_S_WAIT_RNR; - sqp->s_timer.function = qib_rc_rnr_retry; - sqp->s_timer.expires = jiffies + - usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]); - add_timer(&sqp->s_timer); + rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer << + IB_AETH_CREDIT_SHIFT); goto clr_busy; op_err: @@ -621,7 +580,7 @@ acc_err: wc.status = IB_WC_LOC_PROT_ERR; err: /* responder goes to error state */ - qib_rc_error(qp, wc.status); + rvt_rc_error(qp, wc.status); serr: spin_lock_irqsave(&sqp->s_lock, flags); diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index 5b2d483451ad..b337b60fc40d 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c @@ -325,17 +325,8 @@ inv: goto inv; } - if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) { - qp->r_flags |= RVT_R_COMM_EST; - if (qp->ibqp.event_handler) { - struct ib_event ev; - - ev.device = qp->ibqp.device; - ev.element.qp = &qp->ibqp; - ev.event = IB_EVENT_COMM_EST; - qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); - } - } + if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) + rvt_comm_est(qp); /* OK, process the packet. */ switch (opcode) { @@ -527,7 +518,7 @@ drop: return; op_err: - qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); + rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); return; } diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index f45cad1198b0..ddd4e7458750 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c @@ -152,7 +152,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ret = qib_get_rwqe(qp, 0); if (ret < 0) { - qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); + rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); goto bail_unlock; } if (!ret) { @@ -177,7 +177,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) sizeof(grh), 1); wc.wc_flags |= IB_WC_GRH; } else - qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); + rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true); ssge.sg_list = swqe->sg_list + 1; ssge.sge = *swqe->sg_list; ssge.num_sge = swqe->wr.num_sge; @@ -548,7 +548,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr, ret = qib_get_rwqe(qp, 0); if (ret < 0) { - qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); + rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); return; } if (!ret) { @@ -567,7 +567,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr, sizeof(struct ib_grh), 1); wc.wc_flags |= IB_WC_GRH; } else - qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); + rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true); qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1); rvt_put_ss(&qp->r_sge); if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c index 3e0677c51276..926f3c8eba69 100644 --- a/drivers/infiniband/hw/qib/qib_user_sdma.c +++ b/drivers/infiniband/hw/qib/qib_user_sdma.c @@ -144,8 +144,8 @@ qib_user_sdma_rb_search(struct rb_root *root, pid_t pid) struct rb_node *node = root->rb_node; while (node) { - sdma_rb_node = container_of(node, - struct qib_user_sdma_rb_node, node); + sdma_rb_node = rb_entry(node, struct qib_user_sdma_rb_node, + node); if (pid < sdma_rb_node->pid) node = node->rb_left; else if (pid > sdma_rb_node->pid) @@ -164,7 +164,7 @@ qib_user_sdma_rb_insert(struct rb_root *root, struct qib_user_sdma_rb_node *new) struct qib_user_sdma_rb_node *got; while (*node) { - got = container_of(*node, struct qib_user_sdma_rb_node, node); + got = rb_entry(*node, struct qib_user_sdma_rb_node, node); parent = *node; if (new->pid < got->pid) node = &((*node)->rb_left); diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 4b54c0ddd08a..b0b78e1cec92 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -129,78 +129,16 @@ void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release) struct rvt_sge *sge = &ss->sge; while (length) { - u32 len = sge->length; + u32 len = rvt_get_sge_length(sge, length); - if (len > length) - len = length; - if (len > sge->sge_length) - len = sge->sge_length; - BUG_ON(len == 0); + WARN_ON_ONCE(len == 0); memcpy(sge->vaddr, data, len); - sge->vaddr += len; - sge->length -= len; - sge->sge_length -= len; - if (sge->sge_length == 0) { - if (release) - rvt_put_mr(sge->mr); - if (--ss->num_sge) - *sge = *ss->sg_list++; - } else if (sge->length == 0 && sge->mr->lkey) { - if (++sge->n >= RVT_SEGSZ) { - if (++sge->m >= sge->mr->mapsz) - break; - sge->n = 0; - } - sge->vaddr = - sge->mr->map[sge->m]->segs[sge->n].vaddr; - sge->length = - sge->mr->map[sge->m]->segs[sge->n].length; - } + rvt_update_sge(ss, len, release); data += len; length -= len; } } -/** - * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func - * @ss: the SGE state - * @length: the number of bytes to skip - */ -void qib_skip_sge(struct rvt_sge_state *ss, u32 length, int release) -{ - struct rvt_sge *sge = &ss->sge; - - while (length) { - u32 len = sge->length; - - if (len > length) - len = length; - if (len > sge->sge_length) - len = sge->sge_length; - BUG_ON(len == 0); - sge->vaddr += len; - sge->length -= len; - sge->sge_length -= len; - if (sge->sge_length == 0) { - if (release) - rvt_put_mr(sge->mr); - if (--ss->num_sge) - *sge = *ss->sg_list++; - } else if (sge->length == 0 && sge->mr->lkey) { - if (++sge->n >= RVT_SEGSZ) { - if (++sge->m >= sge->mr->mapsz) - break; - sge->n = 0; - } - sge->vaddr = - sge->mr->map[sge->m]->segs[sge->n].vaddr; - sge->length = - sge->mr->map[sge->m]->segs[sge->n].length; - } - length -= len; - } -} - /* * Count the number of DMA descriptors needed to send length bytes of data. * Don't modify the qib_sge_state to get the count. @@ -468,27 +406,6 @@ static void mem_timer(unsigned long data) } } -static void update_sge(struct rvt_sge_state *ss, u32 length) -{ - struct rvt_sge *sge = &ss->sge; - - sge->vaddr += length; - sge->length -= length; - sge->sge_length -= length; - if (sge->sge_length == 0) { - if (--ss->num_sge) - *sge = *ss->sg_list++; - } else if (sge->length == 0 && sge->mr->lkey) { - if (++sge->n >= RVT_SEGSZ) { - if (++sge->m >= sge->mr->mapsz) - return; - sge->n = 0; - } - sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; - sge->length = sge->mr->map[sge->m]->segs[sge->n].length; - } -} - #ifdef __LITTLE_ENDIAN static inline u32 get_upper_bits(u32 data, u32 shift) { @@ -646,11 +563,11 @@ static void copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss, data = clear_upper_bytes(v, extra, 0); } } - update_sge(ss, len); + rvt_update_sge(ss, len, false); length -= len; } /* Update address before sending packet. */ - update_sge(ss, length); + rvt_update_sge(ss, length, false); if (flush_wc) { /* must flush early everything before trigger word */ qib_flush_wc(); @@ -1069,7 +986,7 @@ static int qib_verbs_send_pio(struct rvt_qp *qp, struct ib_header *ibhdr, u32 *addr = (u32 *) ss->sge.vaddr; /* Update address before sending packet. */ - update_sge(ss, len); + rvt_update_sge(ss, len, false); if (flush_wc) { qib_pio_copy(piobuf, addr, dwords - 1); /* must flush early everything before trigger word */ @@ -1659,6 +1576,7 @@ int qib_register_ib_device(struct qib_devdata *dd) dd->verbs_dev.rdi.driver_f.stop_send_queue = qib_stop_send_queue; dd->verbs_dev.rdi.driver_f.flush_qp_waiters = qib_flush_qp_waiters; dd->verbs_dev.rdi.driver_f.notify_error_qp = qib_notify_error_qp; + dd->verbs_dev.rdi.driver_f.notify_restart_rc = qib_restart_rc; dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = qib_mtu_to_path_mtu; dd->verbs_dev.rdi.driver_f.mtu_from_qp = qib_mtu_from_qp; dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = qib_get_pmtu_from_attr; diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 94fd30fdedac..212e8ce71be8 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -270,8 +270,6 @@ int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords, int qib_get_counters(struct qib_pportdata *ppd, struct qib_verbs_counters *cntrs); -__be32 qib_compute_aeth(struct rvt_qp *qp); - /* * Functions provided by qib driver for rdmavt to use */ @@ -281,7 +279,7 @@ void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp); void qib_notify_qp_reset(struct rvt_qp *qp); int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, enum ib_qp_type type, u8 port, gfp_t gfp); - +void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait); #ifdef CONFIG_DEBUG_FS struct qib_qp_iter; @@ -294,8 +292,6 @@ void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter); #endif -void qib_get_credit(struct rvt_qp *qp, u32 aeth); - unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult); void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail); @@ -308,8 +304,6 @@ int qib_verbs_send(struct rvt_qp *qp, struct ib_header *hdr, void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release); -void qib_skip_sge(struct rvt_sge_state *ss, u32 length, int release); - void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr, int has_grh, void *data, u32 tlen, struct rvt_qp *qp); @@ -326,8 +320,6 @@ void qib_rc_rnr_retry(unsigned long arg); void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr); -void qib_rc_error(struct rvt_qp *qp, enum ib_wc_status err); - int qib_post_ud_send(struct rvt_qp *qp, struct ib_send_wr *wr); void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr, |