diff options
author | Jiri Kosina <jkosina@suse.cz> | 2010-06-16 18:08:13 +0200 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2010-06-16 18:08:13 +0200 |
commit | f1bbbb6912662b9f6070c5bfc4ca9eb1f06a9d5b (patch) | |
tree | c2c130a74be25b0b2dff992e1a195e2728bdaadd /drivers/infiniband/hw/cxgb4 | |
parent | fd0961ff67727482bb20ca7e8ea97b83e9de2ddb (diff) | |
parent | 7e27d6e778cd87b6f2415515d7127eba53fe5d02 (diff) | |
download | blackbird-op-linux-f1bbbb6912662b9f6070c5bfc4ca9eb1f06a9d5b.tar.gz blackbird-op-linux-f1bbbb6912662b9f6070c5bfc4ca9eb1f06a9d5b.zip |
Merge branch 'master' into for-next
Diffstat (limited to 'drivers/infiniband/hw/cxgb4')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/cq.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/device.c | 50 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/mem.c | 11 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/provider.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/qp.c | 33 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/t4.h | 76 |
7 files changed, 105 insertions, 76 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index fb1aafcc294f..2447f5295482 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -373,6 +373,7 @@ static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe, V_CQE_SWCQE(SW_CQE(hw_cqe)) | V_CQE_OPCODE(FW_RI_READ_REQ) | V_CQE_TYPE(1)); + read_cqe->bits_type_ts = hw_cqe->bits_type_ts; } /* @@ -780,6 +781,9 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, /* account for the status page. */ entries++; + /* IQ needs one extra entry to differentiate full vs empty. */ + entries++; + /* * entries must be multiple of 16 for HW. */ @@ -801,7 +805,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, chp->rhp = rhp; chp->cq.size--; /* status page */ - chp->ibcq.cqe = chp->cq.size; + chp->ibcq.cqe = chp->cq.size - 1; spin_lock_init(&chp->lock); atomic_set(&chp->refcnt, 1); init_waitqueue_head(&chp->wait); diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index be23b5eab13b..d870f9c17c1e 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -306,7 +306,8 @@ static void c4iw_remove(struct c4iw_dev *dev) PDBG("%s c4iw_dev %p\n", __func__, dev); cancel_delayed_work_sync(&dev->db_drop_task); list_del(&dev->entry); - c4iw_unregister_device(dev); + if (dev->registered) + c4iw_unregister_device(dev); c4iw_rdev_close(&dev->rdev); idr_destroy(&dev->cqidr); idr_destroy(&dev->qpidr); @@ -343,12 +344,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) list_add_tail(&devp->entry, &dev_list); mutex_unlock(&dev_mutex); - if (c4iw_register_device(devp)) { - printk(KERN_ERR MOD "Unable to register device\n"); - mutex_lock(&dev_mutex); - c4iw_remove(devp); - mutex_unlock(&dev_mutex); - } if (c4iw_debugfs_root) { devp->debugfs_root = debugfs_create_dir( pci_name(devp->rdev.lldi.pdev), @@ -379,9 +374,6 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) for (i = 0; i < dev->rdev.lldi.nrxq; i++) PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]); - - printk(KERN_INFO MOD "Initialized device %s\n", - pci_name(dev->rdev.lldi.pdev)); out: return dev; } @@ -471,7 +463,41 @@ nomem: static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) { + struct c4iw_dev *dev = handle; + PDBG("%s new_state %u\n", __func__, new_state); + switch (new_state) { + case CXGB4_STATE_UP: + printk(KERN_INFO MOD "%s: Up\n", pci_name(dev->rdev.lldi.pdev)); + if (!dev->registered) { + int ret; + ret = c4iw_register_device(dev); + if (ret) + printk(KERN_ERR MOD + "%s: RDMA registration failed: %d\n", + pci_name(dev->rdev.lldi.pdev), ret); + } + break; + case CXGB4_STATE_DOWN: + printk(KERN_INFO MOD "%s: Down\n", + pci_name(dev->rdev.lldi.pdev)); + if (dev->registered) + c4iw_unregister_device(dev); + break; + case CXGB4_STATE_START_RECOVERY: + printk(KERN_INFO MOD "%s: Fatal Error\n", + pci_name(dev->rdev.lldi.pdev)); + if (dev->registered) + c4iw_unregister_device(dev); + break; + case CXGB4_STATE_DETACH: + printk(KERN_INFO MOD "%s: Detach\n", + pci_name(dev->rdev.lldi.pdev)); + mutex_lock(&dev_mutex); + c4iw_remove(dev); + mutex_unlock(&dev_mutex); + break; + } return 0; } @@ -504,14 +530,12 @@ static void __exit c4iw_exit_module(void) { struct c4iw_dev *dev, *tmp; - cxgb4_unregister_uld(CXGB4_ULD_RDMA); - mutex_lock(&dev_mutex); list_for_each_entry_safe(dev, tmp, &dev_list, entry) { c4iw_remove(dev); } mutex_unlock(&dev_mutex); - + cxgb4_unregister_uld(CXGB4_ULD_RDMA); c4iw_cm_term(); debugfs_remove_recursive(c4iw_debugfs_root); } diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index a6269981e815..277ab589b44d 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -152,6 +152,7 @@ struct c4iw_dev { struct list_head entry; struct delayed_work db_drop_task; struct dentry *debugfs_root; + u8 registered; }; static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index e54ff6d25691..7f94da1a2437 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -712,8 +712,10 @@ struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth) php = to_c4iw_pd(pd); rhp = php->rhp; mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); - if (!mhp) + if (!mhp) { + ret = -ENOMEM; goto err; + } mhp->rhp = rhp; ret = alloc_pbl(mhp, pbl_depth); @@ -730,8 +732,10 @@ struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth) mhp->attr.state = 1; mmid = (stag) >> 8; mhp->ibmr.rkey = mhp->ibmr.lkey = stag; - if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) + if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { + ret = -ENOMEM; goto err3; + } PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); return &(mhp->ibmr); @@ -755,9 +759,6 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device, dma_addr_t dma_addr; int size = sizeof *c4pl + page_list_len * sizeof(u64); - if (page_list_len > T4_MAX_FR_DEPTH) - return ERR_PTR(-EINVAL); - c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size, &dma_addr, GFP_KERNEL); if (!c4pl) diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index dfc49020bb9c..8f645c83a125 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -486,7 +486,7 @@ int c4iw_register_device(struct c4iw_dev *dev) dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref; dev->ibdev.iwcm->get_qp = c4iw_get_qp; - ret = ib_register_device(&dev->ibdev); + ret = ib_register_device(&dev->ibdev, NULL); if (ret) goto bail1; @@ -496,6 +496,7 @@ int c4iw_register_device(struct c4iw_dev *dev) if (ret) goto bail2; } + dev->registered = 1; return 0; bail2: ib_unregister_device(&dev->ibdev); @@ -514,5 +515,6 @@ void c4iw_unregister_device(struct c4iw_dev *dev) c4iw_class_attributes[i]); ib_unregister_device(&dev->ibdev); kfree(dev->ibdev.iwcm); + dev->registered = 0; return; } diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index b321835dcf6e..646a2a5711f2 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -572,9 +572,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, err = build_rdma_write(wqe, wr, &len16); break; case IB_WR_RDMA_READ: + case IB_WR_RDMA_READ_WITH_INV: fw_opcode = FW_RI_RDMA_READ_WR; swsqe->opcode = FW_RI_READ_REQ; - fw_flags = 0; + if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) + fw_flags |= FW_RI_RDMA_READ_INVALIDATE; + else + fw_flags = 0; err = build_rdma_read(wqe, wr, &len16); if (err) break; @@ -588,6 +592,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, err = build_fastreg(wqe, wr, &len16); break; case IB_WR_LOCAL_INV: + if (wr->send_flags & IB_SEND_FENCE) + fw_flags |= FW_RI_LOCAL_FENCE_FLAG; fw_opcode = FW_RI_INV_LSTAG_WR; swsqe->opcode = FW_RI_LOCAL_INV; err = build_inv_stag(wqe, wr, &len16); @@ -1339,7 +1345,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) wait_event(qhp->wait, !qhp->ep); remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); - remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid); atomic_dec(&qhp->refcnt); wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); @@ -1442,30 +1447,26 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, if (ret) goto err2; - ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.rq.qid); - if (ret) - goto err3; - if (udata) { mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); if (!mm1) { ret = -ENOMEM; - goto err4; + goto err3; } mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); if (!mm2) { ret = -ENOMEM; - goto err5; + goto err4; } mm3 = kmalloc(sizeof *mm3, GFP_KERNEL); if (!mm3) { ret = -ENOMEM; - goto err6; + goto err5; } mm4 = kmalloc(sizeof *mm4, GFP_KERNEL); if (!mm4) { ret = -ENOMEM; - goto err7; + goto err6; } uresp.qid_mask = rhp->rdev.qpmask; @@ -1487,7 +1488,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, spin_unlock(&ucontext->mmap_lock); ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); if (ret) - goto err8; + goto err7; mm1->key = uresp.sq_key; mm1->addr = virt_to_phys(qhp->wq.sq.queue); mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); @@ -1511,16 +1512,14 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, qhp->wq.sq.qid); return &qhp->ibqp; -err8: - kfree(mm4); err7: - kfree(mm3); + kfree(mm4); err6: - kfree(mm2); + kfree(mm3); err5: - kfree(mm1); + kfree(mm2); err4: - remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid); + kfree(mm1); err3: remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); err2: diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index d0e8af352408..1057cb96302e 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h @@ -41,11 +41,13 @@ #define T4_MAX_NUM_QP (1<<16) #define T4_MAX_NUM_CQ (1<<15) #define T4_MAX_NUM_PD (1<<15) -#define T4_MAX_PBL_SIZE 256 -#define T4_MAX_RQ_SIZE 1024 -#define T4_MAX_SQ_SIZE 1024 -#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE-1) -#define T4_MAX_CQ_DEPTH 8192 +#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1) +#define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES) +#define T4_MAX_IQ_SIZE (65520 - 1) +#define T4_MAX_RQ_SIZE (8192 - T4_EQ_STATUS_ENTRIES) +#define T4_MAX_SQ_SIZE (T4_MAX_EQ_SIZE - 1) +#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1) +#define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1) #define T4_MAX_NUM_STAG (1<<15) #define T4_MAX_MR_SIZE (~0ULL - 1) #define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ @@ -79,12 +81,11 @@ struct t4_status_page { sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \ sizeof(struct fw_ri_immd))) -#define T4_MAX_FR_DEPTH 255 +#define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64)) #define T4_RQ_NUM_SLOTS 2 #define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS) -#define T4_MAX_RECV_SGE ((T4_RQ_NUM_BYTES - sizeof(struct fw_ri_recv_wr) - \ - sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) +#define T4_MAX_RECV_SGE 4 union t4_wr { struct fw_ri_res_wr res; @@ -434,7 +435,7 @@ struct t4_cq { struct c4iw_rdev *rdev; u64 ugts; size_t memsize; - u64 timestamp; + __be64 bits_type_ts; u32 cqid; u16 size; /* including status page */ u16 cidx; @@ -449,25 +450,17 @@ struct t4_cq { static inline int t4_arm_cq(struct t4_cq *cq, int se) { u32 val; - u16 inc; - - do { - /* - * inc must be less the both the max update value -and- - * the size of the CQ. - */ - inc = cq->cidx_inc <= CIDXINC_MASK ? cq->cidx_inc : - CIDXINC_MASK; - inc = inc <= (cq->size - 1) ? inc : (cq->size - 1); - if (inc == cq->cidx_inc) - val = SEINTARM(se) | CIDXINC(inc) | TIMERREG(6) | - INGRESSQID(cq->cqid); - else - val = SEINTARM(0) | CIDXINC(inc) | TIMERREG(7) | - INGRESSQID(cq->cqid); - cq->cidx_inc -= inc; + + while (cq->cidx_inc > CIDXINC_MASK) { + val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) | + INGRESSQID(cq->cqid); writel(val, cq->gts); - } while (cq->cidx_inc); + cq->cidx_inc -= CIDXINC_MASK; + } + val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) | + INGRESSQID(cq->cqid); + writel(val, cq->gts); + cq->cidx_inc = 0; return 0; } @@ -487,7 +480,9 @@ static inline void t4_swcq_consume(struct t4_cq *cq) static inline void t4_hwcq_consume(struct t4_cq *cq) { - cq->cidx_inc++; + cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts; + if (++cq->cidx_inc == cq->size) + cq->cidx_inc = 0; if (++cq->cidx == cq->size) { cq->cidx = 0; cq->gen ^= 1; @@ -501,20 +496,23 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) { - int ret = 0; - u64 bits_type_ts = be64_to_cpu(cq->queue[cq->cidx].bits_type_ts); + int ret; + u16 prev_cidx; - if (G_CQE_GENBIT(bits_type_ts) == cq->gen) { - *cqe = &cq->queue[cq->cidx]; - cq->timestamp = G_CQE_TS(bits_type_ts); - } else if (G_CQE_TS(bits_type_ts) > cq->timestamp) - ret = -EOVERFLOW; + if (cq->cidx == 0) + prev_cidx = cq->size - 1; else - ret = -ENODATA; - if (ret == -EOVERFLOW) { - printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); + prev_cidx = cq->cidx - 1; + + if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) { + ret = -EOVERFLOW; cq->error = 1; - } + printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); + } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { + *cqe = &cq->queue[cq->cidx]; + ret = 0; + } else + ret = -ENODATA; return ret; } |