diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-12-10 14:42:22 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-12-10 14:42:22 -0800 |
commit | 0bd0f1e6d40aa16c4d507b1fff27163a7e7711f5 (patch) | |
tree | e4b54cc162edc66f0d530ac278ce786a4c57c114 /drivers | |
parent | a80c47daa818ead2644917d72ff2a934dd64fd16 (diff) | |
parent | ab5cdc31630c7596d81ca8fbe7d695f10666f39b (diff) | |
download | talos-obmc-linux-0bd0f1e6d40aa16c4d507b1fff27163a7e7711f5.tar.gz talos-obmc-linux-0bd0f1e6d40aa16c4d507b1fff27163a7e7711f5.zip |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull rdma fixes from Doug Ledford:
"Most are minor to important fixes.
There is one performance enhancement that I took on the grounds that
failing to check if other processes can run before running what's
intended to be a background, idle-time task is a bug, even though the
primary effect of the fix is to improve performance (and it was a very
simple patch)"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
IB/mlx5: Postpone remove_keys under knowledge of coming preemption
IB/mlx4: Use vmalloc for WR buffers when needed
IB/mlx4: Use correct order of variables in log message
iser-target: Remove explicit mlx4 work-around
mlx4: Expose correct max_sge_rd limit
IB/mad: Require CM send method for everything except ClassPortInfo
IB/cma: Add a missing rcu_read_unlock()
IB core: Fix ib_sg_to_pages()
IB/srp: Fix srp_map_sg_fr()
IB/srp: Fix indirect data buffer rkey endianness
IB/srp: Initialize dma_length in srp_map_idb
IB/srp: Fix possible send queue overflow
IB/srp: Fix a memory leak
IB/sa: Put netlink request into the request list before sending
IB/iser: use sector_div instead of do_div
IB/core: use RCU for uverbs id lookup
IB/qib: Minor fixes to qib per SFF 8636
IB/core: Fix user mode post wr corruption
IB/qib: Fix qib_mr structure
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/core/cma.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/core/mad.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/core/sa_query.c | 32 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs_cmd.c | 27 | ||||
-rw-r--r-- | drivers/infiniband/core/verbs.c | 43 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 19 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/srq.c | 11 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 14 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_qsfp.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_verbs.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_verbs.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.c | 13 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 48 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.h | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/cmd.c | 2 |
16 files changed, 132 insertions, 102 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 944cd90417bc..d2d5d004f16d 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1126,10 +1126,7 @@ static bool validate_ipv4_net_dev(struct net_device *net_dev, rcu_read_lock(); err = fib_lookup(dev_net(net_dev), &fl4, &res, 0); - if (err) - return false; - - ret = FIB_RES_DEV(res) == net_dev; + ret = err == 0 && FIB_RES_DEV(res) == net_dev; rcu_read_unlock(); return ret; diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 8d8af7a41a30..2281de122038 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -1811,6 +1811,11 @@ static int validate_mad(const struct ib_mad_hdr *mad_hdr, if (qp_num == 0) valid = 1; } else { + /* CM attributes other than ClassPortInfo only use Send method */ + if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) && + (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) && + (mad_hdr->method != IB_MGMT_METHOD_SEND)) + goto out; /* Filter GSI packets sent to QP0 */ if (qp_num != 0) valid = 1; diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 2aba774f835b..a95a32ba596e 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -512,7 +512,7 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask) return len; } -static int ib_nl_send_msg(struct ib_sa_query *query) +static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) { struct sk_buff *skb = NULL; struct nlmsghdr *nlh; @@ -526,7 +526,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query) if (len <= 0) return -EMSGSIZE; - skb = nlmsg_new(len, GFP_KERNEL); + skb = nlmsg_new(len, gfp_mask); if (!skb) return -ENOMEM; @@ -544,7 +544,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query) /* Repair the nlmsg header length */ nlmsg_end(skb, nlh); - ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, GFP_KERNEL); + ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask); if (!ret) ret = len; else @@ -553,7 +553,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query) return ret; } -static int ib_nl_make_request(struct ib_sa_query *query) +static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) { unsigned long flags; unsigned long delay; @@ -562,25 +562,27 @@ static int ib_nl_make_request(struct ib_sa_query *query) INIT_LIST_HEAD(&query->list); query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); + /* Put the request on the list first.*/ spin_lock_irqsave(&ib_nl_request_lock, flags); - ret = ib_nl_send_msg(query); - if (ret <= 0) { - ret = -EIO; - goto request_out; - } else { - ret = 0; - } - delay = msecs_to_jiffies(sa_local_svc_timeout_ms); query->timeout = delay + jiffies; list_add_tail(&query->list, &ib_nl_request_list); /* Start the timeout if this is the only request */ if (ib_nl_request_list.next == &query->list) queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); - -request_out: spin_unlock_irqrestore(&ib_nl_request_lock, flags); + ret = ib_nl_send_msg(query, gfp_mask); + if (ret <= 0) { + ret = -EIO; + /* Remove the request */ + spin_lock_irqsave(&ib_nl_request_lock, flags); + list_del(&query->list); + spin_unlock_irqrestore(&ib_nl_request_lock, flags); + } else { + ret = 0; + } + return ret; } @@ -1108,7 +1110,7 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) { if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) { - if (!ib_nl_make_request(query)) + if (!ib_nl_make_request(query, gfp_mask)) return id; } ib_sa_disable_local_svc(query); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 94816aeb95a0..1c02deab068f 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -62,9 +62,11 @@ static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; * The ib_uobject locking scheme is as follows: * * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it - * needs to be held during all idr operations. When an object is + * needs to be held during all idr write operations. When an object is * looked up, a reference must be taken on the object's kref before - * dropping this lock. + * dropping this lock. For read operations, the rcu_read_lock() + * and rcu_write_lock() but similarly the kref reference is grabbed + * before the rcu_read_unlock(). * * - Each object also has an rwsem. This rwsem must be held for * reading while an operation that uses the object is performed. @@ -96,7 +98,7 @@ static void init_uobj(struct ib_uobject *uobj, u64 user_handle, static void release_uobj(struct kref *kref) { - kfree(container_of(kref, struct ib_uobject, ref)); + kfree_rcu(container_of(kref, struct ib_uobject, ref), rcu); } static void put_uobj(struct ib_uobject *uobj) @@ -145,7 +147,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, { struct ib_uobject *uobj; - spin_lock(&ib_uverbs_idr_lock); + rcu_read_lock(); uobj = idr_find(idr, id); if (uobj) { if (uobj->context == context) @@ -153,7 +155,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, else uobj = NULL; } - spin_unlock(&ib_uverbs_idr_lock); + rcu_read_unlock(); return uobj; } @@ -2446,6 +2448,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, int i, sg_ind; int is_ud; ssize_t ret = -EINVAL; + size_t next_size; if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; @@ -2490,7 +2493,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, goto out_put; } - ud = alloc_wr(sizeof(*ud), user_wr->num_sge); + next_size = sizeof(*ud); + ud = alloc_wr(next_size, user_wr->num_sge); if (!ud) { ret = -ENOMEM; goto out_put; @@ -2511,7 +2515,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, user_wr->opcode == IB_WR_RDMA_READ) { struct ib_rdma_wr *rdma; - rdma = alloc_wr(sizeof(*rdma), user_wr->num_sge); + next_size = sizeof(*rdma); + rdma = alloc_wr(next_size, user_wr->num_sge); if (!rdma) { ret = -ENOMEM; goto out_put; @@ -2525,7 +2530,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { struct ib_atomic_wr *atomic; - atomic = alloc_wr(sizeof(*atomic), user_wr->num_sge); + next_size = sizeof(*atomic); + atomic = alloc_wr(next_size, user_wr->num_sge); if (!atomic) { ret = -ENOMEM; goto out_put; @@ -2540,7 +2546,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, } else if (user_wr->opcode == IB_WR_SEND || user_wr->opcode == IB_WR_SEND_WITH_IMM || user_wr->opcode == IB_WR_SEND_WITH_INV) { - next = alloc_wr(sizeof(*next), user_wr->num_sge); + next_size = sizeof(*next); + next = alloc_wr(next_size, user_wr->num_sge); if (!next) { ret = -ENOMEM; goto out_put; @@ -2572,7 +2579,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, if (next->num_sge) { next->sg_list = (void *) next + - ALIGN(sizeof *next, sizeof (struct ib_sge)); + ALIGN(next_size, sizeof(struct ib_sge)); if (copy_from_user(next->sg_list, buf + sizeof cmd + cmd.wr_count * cmd.wqe_size + diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 043a60ee6836..545906dec26d 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1516,7 +1516,7 @@ EXPORT_SYMBOL(ib_map_mr_sg); * @sg_nents: number of entries in sg * @set_page: driver page assignment function pointer * - * Core service helper for drivers to covert the largest + * Core service helper for drivers to convert the largest * prefix of given sg list to a page vector. The sg list * prefix converted is the prefix that meet the requirements * of ib_map_mr_sg. @@ -1533,7 +1533,7 @@ int ib_sg_to_pages(struct ib_mr *mr, u64 last_end_dma_addr = 0, last_page_addr = 0; unsigned int last_page_off = 0; u64 page_mask = ~((u64)mr->page_size - 1); - int i; + int i, ret; mr->iova = sg_dma_address(&sgl[0]); mr->length = 0; @@ -1544,27 +1544,29 @@ int ib_sg_to_pages(struct ib_mr *mr, u64 end_dma_addr = dma_addr + dma_len; u64 page_addr = dma_addr & page_mask; - if (i && page_addr != dma_addr) { - if (last_end_dma_addr != dma_addr) { - /* gap */ - goto done; - - } else if (last_page_off + dma_len <= mr->page_size) { - /* chunk this fragment with the last */ - mr->length += dma_len; - last_end_dma_addr += dma_len; - last_page_off += dma_len; - continue; - } else { - /* map starting from the next page */ - page_addr = last_page_addr + mr->page_size; - dma_len -= mr->page_size - last_page_off; - } + /* + * For the second and later elements, check whether either the + * end of element i-1 or the start of element i is not aligned + * on a page boundary. + */ + if (i && (last_page_off != 0 || page_addr != dma_addr)) { + /* Stop mapping if there is a gap. */ + if (last_end_dma_addr != dma_addr) + break; + + /* + * Coalesce this element with the last. If it is small + * enough just update mr->length. Otherwise start + * mapping from the next page. + */ + goto next_page; } do { - if (unlikely(set_page(mr, page_addr))) - goto done; + ret = set_page(mr, page_addr); + if (unlikely(ret < 0)) + return i ? : ret; +next_page: page_addr += mr->page_size; } while (page_addr < end_dma_addr); @@ -1574,7 +1576,6 @@ int ib_sg_to_pages(struct ib_mr *mr, last_page_off = end_dma_addr & ~page_mask; } -done: return i; } EXPORT_SYMBOL(ib_sg_to_pages); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index f567160a4a56..97d6878f9938 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -456,7 +456,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; props->max_sge = min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg); - props->max_sge_rd = props->max_sge; + props->max_sge_rd = MLX4_MAX_SGE_RD; props->max_cq = dev->dev->quotas.cq; props->max_cqe = dev->dev->caps.max_cqes; props->max_mr = dev->dev->quotas.mpt; diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index a2e4ca56da44..13eaaf45288f 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -34,6 +34,7 @@ #include <linux/log2.h> #include <linux/slab.h> #include <linux/netdevice.h> +#include <linux/vmalloc.h> #include <rdma/ib_cache.h> #include <rdma/ib_pack.h> @@ -795,8 +796,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, if (err) goto err_mtt; - qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), gfp); - qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), gfp); + qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(u64), gfp); + if (!qp->sq.wrid) + qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64), + gfp, PAGE_KERNEL); + qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(u64), gfp); + if (!qp->rq.wrid) + qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64), + gfp, PAGE_KERNEL); if (!qp->sq.wrid || !qp->rq.wrid) { err = -ENOMEM; goto err_wrid; @@ -886,8 +893,8 @@ err_wrid: if (qp_has_rq(init_attr)) mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); } else { - kfree(qp->sq.wrid); - kfree(qp->rq.wrid); + kvfree(qp->sq.wrid); + kvfree(qp->rq.wrid); } err_mtt: @@ -1062,8 +1069,8 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, &qp->db); ib_umem_release(qp->umem); } else { - kfree(qp->sq.wrid); - kfree(qp->rq.wrid); + kvfree(qp->sq.wrid); + kvfree(qp->rq.wrid); if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) free_proxy_bufs(&dev->ib_dev, qp); diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index dce5dfe3a70e..8d133c40fa0e 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c @@ -34,6 +34,7 @@ #include <linux/mlx4/qp.h> #include <linux/mlx4/srq.h> #include <linux/slab.h> +#include <linux/vmalloc.h> #include "mlx4_ib.h" #include "user.h" @@ -172,8 +173,12 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL); if (!srq->wrid) { - err = -ENOMEM; - goto err_mtt; + srq->wrid = __vmalloc(srq->msrq.max * sizeof(u64), + GFP_KERNEL, PAGE_KERNEL); + if (!srq->wrid) { + err = -ENOMEM; + goto err_mtt; + } } } @@ -204,7 +209,7 @@ err_wrid: if (pd->uobject) mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); else - kfree(srq->wrid); + kvfree(srq->wrid); err_mtt: mlx4_mtt_cleanup(dev->dev, &srq->mtt); diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index ec8993a7b3be..6000f7aeede9 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -381,7 +381,19 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) } } } else if (ent->cur > 2 * ent->limit) { - if (!someone_adding(cache) && + /* + * The remove_keys() logic is performed as garbage collection + * task. Such task is intended to be run when no other active + * processes are running. + * + * The need_resched() will return TRUE if there are user tasks + * to be activated in near future. + * + * In such case, we don't execute remove_keys() and postpone + * the garbage collection work to try to run in next cycle, + * in order to free CPU resources to other tasks. + */ + if (!need_resched() && !someone_adding(cache) && time_after(jiffies, cache->last_add + 300 * HZ)) { remove_keys(dev, i, 1); if (ent->cur > ent->limit) diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c index 5e27f76805e2..4c7c3c84a741 100644 --- a/drivers/infiniband/hw/qib/qib_qsfp.c +++ b/drivers/infiniband/hw/qib/qib_qsfp.c @@ -292,7 +292,7 @@ int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp) qib_dev_porterr(ppd->dd, ppd->port, "QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]); - if ((peek[2] & 2) == 0) { + if ((peek[2] & 4) == 0) { /* * If cable is paged, rather than "flat memory", we need to * set the page to zero, Even if it already appears to be zero. @@ -538,7 +538,7 @@ int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len) sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n", QSFP_DATE_LEN, cd.date); sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n", - QSFP_LOT_LEN, cd.date); + QSFP_LOT_LEN, cd.lot); while (bidx < QSFP_DEFAULT_HDR_CNT) { int iidx; diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 2baf5ad251ed..bc803f33d5f6 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -329,9 +329,9 @@ struct qib_sge { struct qib_mr { struct ib_mr ibmr; struct ib_umem *umem; - struct qib_mregion mr; /* must be last */ u64 *pages; u32 npages; + struct qib_mregion mr; /* must be last */ }; /* diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index a93070210109..42f4da620f2e 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -1293,7 +1293,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { sector_t sector_off = mr_status.sig_err.sig_err_offset; - do_div(sector_off, sector_size + 8); + sector_div(sector_off, sector_size + 8); *sector = scsi_get_lba(iser_task->sc) + sector_off; pr_err("PI error found type %d at sector %llx " diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index dfbbbb28090b..8a51c3b5d657 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -157,16 +157,9 @@ isert_create_qp(struct isert_conn *isert_conn, attr.recv_cq = comp->cq; attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; - /* - * FIXME: Use devattr.max_sge - 2 for max_send_sge as - * work-around for RDMA_READs with ConnectX-2. - * - * Also, still make sure to have at least two SGEs for - * outgoing control PDU responses. - */ - attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2); - isert_conn->max_sge = attr.cap.max_send_sge; - + attr.cap.max_send_sge = device->dev_attr.max_sge; + isert_conn->max_sge = min(device->dev_attr.max_sge, + device->dev_attr.max_sge_rd); attr.cap.max_recv_sge = 1; attr.sq_sig_type = IB_SIGNAL_REQ_WR; attr.qp_type = IB_QPT_RC; diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 9909022dc6c3..3db9a659719b 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -488,7 +488,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch) struct ib_qp *qp; struct ib_fmr_pool *fmr_pool = NULL; struct srp_fr_pool *fr_pool = NULL; - const int m = 1 + dev->use_fast_reg; + const int m = dev->use_fast_reg ? 3 : 1; struct ib_cq_init_attr cq_attr = {}; int ret; @@ -994,16 +994,16 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) ret = srp_lookup_path(ch); if (ret) - return ret; + goto out; while (1) { init_completion(&ch->done); ret = srp_send_req(ch, multich); if (ret) - return ret; + goto out; ret = wait_for_completion_interruptible(&ch->done); if (ret < 0) - return ret; + goto out; /* * The CM event handling code will set status to @@ -1011,15 +1011,16 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) * back, or SRP_DLID_REDIRECT if we get a lid/qp * redirect REJ back. */ - switch (ch->status) { + ret = ch->status; + switch (ret) { case 0: ch->connected = true; - return 0; + goto out; case SRP_PORT_REDIRECT: ret = srp_lookup_path(ch); if (ret) - return ret; + goto out; break; case SRP_DLID_REDIRECT: @@ -1028,13 +1029,16 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) case SRP_STALE_CONN: shost_printk(KERN_ERR, target->scsi_host, PFX "giving up on stale connection\n"); - ch->status = -ECONNRESET; - return ch->status; + ret = -ECONNRESET; + goto out; default: - return ch->status; + goto out; } } + +out: + return ret <= 0 ? ret : -ENODEV; } static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey) @@ -1309,7 +1313,7 @@ reset_state: } static int srp_map_finish_fr(struct srp_map_state *state, - struct srp_rdma_ch *ch) + struct srp_rdma_ch *ch, int sg_nents) { struct srp_target_port *target = ch->target; struct srp_device *dev = target->srp_host->srp_dev; @@ -1324,10 +1328,10 @@ static int srp_map_finish_fr(struct srp_map_state *state, WARN_ON_ONCE(!dev->use_fast_reg); - if (state->sg_nents == 0) + if (sg_nents == 0) return 0; - if (state->sg_nents == 1 && target->global_mr) { + if (sg_nents == 1 && target->global_mr) { srp_map_desc(state, sg_dma_address(state->sg), sg_dma_len(state->sg), target->global_mr->rkey); @@ -1341,8 +1345,7 @@ static int srp_map_finish_fr(struct srp_map_state *state, rkey = ib_inc_rkey(desc->mr->rkey); ib_update_fast_reg_key(desc->mr, rkey); - n = ib_map_mr_sg(desc->mr, state->sg, state->sg_nents, - dev->mr_page_size); + n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, dev->mr_page_size); if (unlikely(n < 0)) return n; @@ -1448,16 +1451,15 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch, state->fr.next = req->fr_list; state->fr.end = req->fr_list + ch->target->cmd_sg_cnt; state->sg = scat; - state->sg_nents = scsi_sg_count(req->scmnd); - while (state->sg_nents) { + while (count) { int i, n; - n = srp_map_finish_fr(state, ch); + n = srp_map_finish_fr(state, ch, count); if (unlikely(n < 0)) return n; - state->sg_nents -= n; + count -= n; for (i = 0; i < n; i++) state->sg = sg_next(state->sg); } @@ -1517,10 +1519,12 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req, if (dev->use_fast_reg) { state.sg = idb_sg; - state.sg_nents = 1; sg_set_buf(idb_sg, req->indirect_desc, idb_len); idb_sg->dma_address = req->indirect_dma_addr; /* hack! */ - ret = srp_map_finish_fr(&state, ch); +#ifdef CONFIG_NEED_SG_DMA_LENGTH + idb_sg->dma_length = idb_sg->length; /* hack^2 */ +#endif + ret = srp_map_finish_fr(&state, ch, 1); if (ret < 0) return ret; } else if (dev->use_fmr) { @@ -1655,7 +1659,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, return ret; req->nmdesc++; } else { - idb_rkey = target->global_mr->rkey; + idb_rkey = cpu_to_be32(target->global_mr->rkey); } indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 87a2a919dc43..f6af531f9f32 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h @@ -300,10 +300,7 @@ struct srp_map_state { dma_addr_t base_dma_addr; u32 dma_len; u32 total_len; - union { - unsigned int npages; - int sg_nents; - }; + unsigned int npages; unsigned int nmdesc; unsigned int ndesc; }; diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 2177e56ed0be..d48d5793407d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1010,7 +1010,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && smp->method == IB_MGMT_METHOD_GET) || network_view) { mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n", - slave, smp->method, smp->mgmt_class, + slave, smp->mgmt_class, smp->method, network_view ? "Network" : "Host", be16_to_cpu(smp->attr_id)); return -EPERM; |