summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca
diff options
context:
space:
mode:
authorDave Jones <davej@redhat.com>2006-09-05 17:20:21 -0400
committerDave Jones <davej@redhat.com>2006-09-05 17:20:21 -0400
commit115b384cf87249d76adb0b21aca11ee22128927d (patch)
treef39a2a54863e9d82d1196906f92c82ab5991c6af /drivers/infiniband/hw/mthca
parent8eb7925f93af75e66a240d148efdec212f95bcb7 (diff)
parentc336923b668fdcf0312efbec3b44895d713f4d81 (diff)
downloadblackbird-op-linux-115b384cf87249d76adb0b21aca11ee22128927d.tar.gz
blackbird-op-linux-115b384cf87249d76adb0b21aca11ee22128927d.zip
Merge ../linus
Diffstat (limited to 'drivers/infiniband/hw/mthca')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_allocator.c30
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c11
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c54
5 files changed, 70 insertions, 35 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c
index 9ba3211cef7c..f930e55b58fc 100644
--- a/drivers/infiniband/hw/mthca/mthca_allocator.c
+++ b/drivers/infiniband/hw/mthca/mthca_allocator.c
@@ -41,9 +41,11 @@
/* Trivial bitmap-based allocator */
u32 mthca_alloc(struct mthca_alloc *alloc)
{
+ unsigned long flags;
u32 obj;
- spin_lock(&alloc->lock);
+ spin_lock_irqsave(&alloc->lock, flags);
+
obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last);
if (obj >= alloc->max) {
alloc->top = (alloc->top + alloc->max) & alloc->mask;
@@ -56,19 +58,24 @@ u32 mthca_alloc(struct mthca_alloc *alloc)
} else
obj = -1;
- spin_unlock(&alloc->lock);
+ spin_unlock_irqrestore(&alloc->lock, flags);
return obj;
}
void mthca_free(struct mthca_alloc *alloc, u32 obj)
{
+ unsigned long flags;
+
obj &= alloc->max - 1;
- spin_lock(&alloc->lock);
+
+ spin_lock_irqsave(&alloc->lock, flags);
+
clear_bit(obj, alloc->table);
alloc->last = min(alloc->last, obj);
alloc->top = (alloc->top + alloc->max) & alloc->mask;
- spin_unlock(&alloc->lock);
+
+ spin_unlock_irqrestore(&alloc->lock, flags);
}
int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,
@@ -108,14 +115,15 @@ void mthca_alloc_cleanup(struct mthca_alloc *alloc)
* serialize access to the array.
*/
+#define MTHCA_ARRAY_MASK (PAGE_SIZE / sizeof (void *) - 1)
+
void *mthca_array_get(struct mthca_array *array, int index)
{
int p = (index * sizeof (void *)) >> PAGE_SHIFT;
- if (array->page_list[p].page) {
- int i = index & (PAGE_SIZE / sizeof (void *) - 1);
- return array->page_list[p].page[i];
- } else
+ if (array->page_list[p].page)
+ return array->page_list[p].page[index & MTHCA_ARRAY_MASK];
+ else
return NULL;
}
@@ -130,8 +138,7 @@ int mthca_array_set(struct mthca_array *array, int index, void *value)
if (!array->page_list[p].page)
return -ENOMEM;
- array->page_list[p].page[index & (PAGE_SIZE / sizeof (void *) - 1)] =
- value;
+ array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value;
++array->page_list[p].used;
return 0;
@@ -144,7 +151,8 @@ void mthca_array_clear(struct mthca_array *array, int index)
if (--array->page_list[p].used == 0) {
free_page((unsigned long) array->page_list[p].page);
array->page_list[p].page = NULL;
- }
+ } else
+ array->page_list[p].page[index & MTHCA_ARRAY_MASK] = NULL;
if (array->page_list[p].used < 0)
pr_debug("Array %p index %d page %d with ref count %d < 0\n",
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 557cde3a4563..7b82c1907f04 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -967,12 +967,12 @@ static struct {
} mthca_hca_table[] = {
[TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 4, 0),
.flags = 0 },
- [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 400),
+ [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 600),
.flags = MTHCA_FLAG_PCIE },
- [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 0),
+ [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 400),
.flags = MTHCA_FLAG_MEMFREE |
MTHCA_FLAG_PCIE },
- [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 0, 800),
+ [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 1, 0),
.flags = MTHCA_FLAG_MEMFREE |
MTHCA_FLAG_PCIE |
MTHCA_FLAG_SINAI_OPT }
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 230ae21db8fd..265b1d1c4a62 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1287,11 +1287,7 @@ int mthca_register_device(struct mthca_dev *dev)
(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
+ (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
dev->ib_dev.node_type = IB_NODE_CA;
dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
dev->ib_dev.dma_device = &dev->pdev->dev;
@@ -1316,6 +1312,11 @@ int mthca_register_device(struct mthca_dev *dev)
dev->ib_dev.modify_srq = mthca_modify_srq;
dev->ib_dev.query_srq = mthca_query_srq;
dev->ib_dev.destroy_srq = mthca_destroy_srq;
+ dev->ib_dev.uverbs_cmd_mask |=
+ (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
if (mthca_is_memfree(dev))
dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 8de2887ba15c..9a5bece3fa5c 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -136,8 +136,8 @@ struct mthca_ah {
* We have one global lock that protects dev->cq/qp_table. Each
* struct mthca_cq/qp also has its own lock. An individual qp lock
* may be taken inside of an individual cq lock. Both cqs attached to
- * a qp may be locked, with the send cq locked first. No other
- * nesting should be done.
+ * a qp may be locked, with the cq with the lower cqn locked first.
+ * No other nesting should be done.
*
* Each struct mthca_cq/qp also has an ref count, protected by the
* corresponding table lock. The pointer from the cq/qp_table to the
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index cd8b6721ac9c..2e8f6f36e0a5 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -99,6 +99,10 @@ enum {
MTHCA_QP_BIT_RSC = 1 << 3
};
+enum {
+ MTHCA_SEND_DOORBELL_FENCE = 1 << 5
+};
+
struct mthca_qp_path {
__be32 port_pkey;
u8 rnr_retry;
@@ -1259,6 +1263,32 @@ int mthca_alloc_qp(struct mthca_dev *dev,
return 0;
}
+static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
+{
+ if (send_cq == recv_cq)
+ spin_lock_irq(&send_cq->lock);
+ else if (send_cq->cqn < recv_cq->cqn) {
+ spin_lock_irq(&send_cq->lock);
+ spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
+ } else {
+ spin_lock_irq(&recv_cq->lock);
+ spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
+ }
+}
+
+static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
+{
+ if (send_cq == recv_cq)
+ spin_unlock_irq(&send_cq->lock);
+ else if (send_cq->cqn < recv_cq->cqn) {
+ spin_unlock(&recv_cq->lock);
+ spin_unlock_irq(&send_cq->lock);
+ } else {
+ spin_unlock(&send_cq->lock);
+ spin_unlock_irq(&recv_cq->lock);
+ }
+}
+
int mthca_alloc_sqp(struct mthca_dev *dev,
struct mthca_pd *pd,
struct mthca_cq *send_cq,
@@ -1311,17 +1341,13 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
* Lock CQs here, so that CQ polling code can do QP lookup
* without taking a lock.
*/
- spin_lock_irq(&send_cq->lock);
- if (send_cq != recv_cq)
- spin_lock(&recv_cq->lock);
+ mthca_lock_cqs(send_cq, recv_cq);
spin_lock(&dev->qp_table.lock);
mthca_array_clear(&dev->qp_table.qp, mqpn);
spin_unlock(&dev->qp_table.lock);
- if (send_cq != recv_cq)
- spin_unlock(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ mthca_unlock_cqs(send_cq, recv_cq);
err_out:
dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
@@ -1355,9 +1381,7 @@ void mthca_free_qp(struct mthca_dev *dev,
* Lock CQs here, so that CQ polling code can do QP lookup
* without taking a lock.
*/
- spin_lock_irq(&send_cq->lock);
- if (send_cq != recv_cq)
- spin_lock(&recv_cq->lock);
+ mthca_lock_cqs(send_cq, recv_cq);
spin_lock(&dev->qp_table.lock);
mthca_array_clear(&dev->qp_table.qp,
@@ -1365,9 +1389,7 @@ void mthca_free_qp(struct mthca_dev *dev,
--qp->refcount;
spin_unlock(&dev->qp_table.lock);
- if (send_cq != recv_cq)
- spin_unlock(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ mthca_unlock_cqs(send_cq, recv_cq);
wait_event(qp->wait, !get_qp_refcount(dev, qp));
@@ -1502,7 +1524,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int i;
int size;
int size0 = 0;
- u32 f0 = 0;
+ u32 f0;
int ind;
u8 op0 = 0;
@@ -1686,6 +1708,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
if (!size0) {
size0 = size;
op0 = mthca_opcode[wr->opcode];
+ f0 = wr->send_flags & IB_SEND_FENCE ?
+ MTHCA_SEND_DOORBELL_FENCE : 0;
}
++ind;
@@ -1843,7 +1867,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int i;
int size;
int size0 = 0;
- u32 f0 = 0;
+ u32 f0;
int ind;
u8 op0 = 0;
@@ -2051,6 +2075,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
if (!size0) {
size0 = size;
op0 = mthca_opcode[wr->opcode];
+ f0 = wr->send_flags & IB_SEND_FENCE ?
+ MTHCA_SEND_DOORBELL_FENCE : 0;
}
++ind;
OpenPOWER on IntegriCloud