diff options
author | Ralph Campbell <ralph.campbell@qlogic.com> | 2007-07-06 12:48:23 -0700 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-07-09 20:12:26 -0700 |
commit | 4fc570bcbe77f823aae183dd824869f79e74cc97 (patch) | |
tree | 3955946608a973b06db8b7e5b3ec319354a6fa73 /drivers/infiniband | |
parent | 06cc85086e6896939f8c68f8518224748f6b0b2f (diff) | |
download | talos-op-linux-4fc570bcbe77f823aae183dd824869f79e74cc97.tar.gz talos-op-linux-4fc570bcbe77f823aae183dd824869f79e74cc97.zip |
IB/ipath: Add barrier before updating WC head in shared memory
Add a barrier to make sure the CPU doesn't reorder writes to memory,
since user programs can be polling on the head index update and the
entry should be written before that.
Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_cq.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_ruc.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_srq.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_ud.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_verbs.c | 2 |
5 files changed, 12 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c index 9014ef63eedc..a6f04d27ec57 100644 --- a/drivers/infiniband/hw/ipath/ipath_cq.c +++ b/drivers/infiniband/hw/ipath/ipath_cq.c @@ -90,6 +90,8 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) wc->queue[head].sl = entry->sl; wc->queue[head].dlid_path_bits = entry->dlid_path_bits; wc->queue[head].port_num = entry->port_num; + /* Make sure queue entry is written before the head index. */ + smp_wmb(); wc->head = next; if (cq->notify == IB_CQ_NEXT_COMP || @@ -139,7 +141,8 @@ int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) if (tail == wc->head) break; - + /* Make sure entry is read after head index is read. */ + smp_rmb(); qp = ipath_lookup_qpn(&to_idev(cq->ibcq.device)->qp_table, wc->queue[tail].qp_num); entry->qp = &qp->ibqp; diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c index 854deb56ac02..85256747d8a1 100644 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c @@ -194,6 +194,8 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) ret = 0; goto bail; } + /* Make sure entry is read after head index is read. */ + smp_rmb(); wqe = get_rwqe_ptr(rq, tail); if (++tail >= rq->size) tail = 0; diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c index 14cbbd633d34..40c36ec19016 100644 --- a/drivers/infiniband/hw/ipath/ipath_srq.c +++ b/drivers/infiniband/hw/ipath/ipath_srq.c @@ -80,6 +80,8 @@ int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, wqe->num_sge = wr->num_sge; for (i = 0; i < wr->num_sge; i++) wqe->sg_list[i] = wr->sg_list[i]; + /* Make sure queue entry is written before the head index. */ + smp_wmb(); wq->head = next; spin_unlock_irqrestore(&srq->rq.lock, flags); } diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c index 38ba771b3efe..f9a3338a5fb7 100644 --- a/drivers/infiniband/hw/ipath/ipath_ud.c +++ b/drivers/infiniband/hw/ipath/ipath_ud.c @@ -176,6 +176,8 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, dev->n_pkt_drops++; goto bail_sge; } + /* Make sure entry is read after head index is read. */ + smp_rmb(); wqe = get_rwqe_ptr(rq, tail); if (++tail >= rq->size) tail = 0; diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 0aecded6af86..c76ea0e0b024 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -327,6 +327,8 @@ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, wqe->num_sge = wr->num_sge; for (i = 0; i < wr->num_sge; i++) wqe->sg_list[i] = wr->sg_list[i]; + /* Make sure queue entry is written before the head index. */ + smp_wmb(); wq->head = next; spin_unlock_irqrestore(&qp->r_rq.lock, flags); } |