summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-30 13:01:04 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-30 13:01:04 -0700
commit32a024b9a9f3b40f84bc55a6dd35eaa770ea26a4 (patch)
treed8025cff050f009cb398c0514551962136744a5a
parent515f12b9eeed35250d793b7c874707c33f7f6e05 (diff)
parentb7165bd0d6cbb93732559be6ea8774653b204480 (diff)
downloadblackbird-op-linux-32a024b9a9f3b40f84bc55a6dd35eaa770ea26a4.tar.gz
blackbird-op-linux-32a024b9a9f3b40f84bc55a6dd35eaa770ea26a4.zip
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma fixes from Jason Gunthorpe: "A few regression and bug fixes for the patches merged in the last cycle: - hns fixes a subtle crash from the ib core SGL rework - hfi1 fixes various error handling, oops and protocol errors - bnxt_re fixes a regression where nvmeof doesn't work on some configurations - mlx5 fixes a serious 'use after free' bug in how MR caching is handled - some edge case crashers in the new statistic core code - more siw static checker fixups" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: IB/mlx5: Fix RSS Toeplitz setup to be aligned with the HW specification IB/counters: Always initialize the port counter object IB/core: Fix querying total rdma stats IB/mlx5: Prevent concurrent MR updates during invalidation IB/mlx5: Fix clean_mr() to work in the expected order IB/mlx5: Move MRs to a kernel PD when freeing them to the MR cache IB/mlx5: Use direct mkey destroy command upon UMR unreg failure IB/mlx5: Fix unreg_umr to ignore the mkey state RDMA/siw: Remove set but not used variables 'rv' IB/mlx5: Replace kfree with kvfree RDMA/bnxt_re: Honor vlan_id in GID entry comparison IB/hfi1: Drop all TID RDMA READ RESP packets after r_next_psn IB/hfi1: Field not zero-ed when allocating TID flow memory IB/hfi1: Unreserve a flushed OPFN request IB/hfi1: Check for error on call to alloc_rsm_map_table RDMA/hns: Fix sg offset non-zero issue RDMA/siw: Fix error return code in siw_init_module()
-rw-r--r--drivers/infiniband/core/counters.c11
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c7
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c13
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c14
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h7
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c11
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c43
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c15
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c23
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c7
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c13
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c3
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c1
-rw-r--r--include/rdma/rdmavt_qp.h9
17 files changed, 89 insertions, 93 deletions
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index 01faef7bc061..45d5164e9574 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -393,6 +393,9 @@ u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index)
u64 sum;
port_counter = &dev->port_data[port].port_counter;
+ if (!port_counter->hstats)
+ return 0;
+
sum = get_running_counters_hwstat_sum(dev, port, index);
sum += port_counter->hstats->value[index];
@@ -594,7 +597,7 @@ void rdma_counter_init(struct ib_device *dev)
struct rdma_port_counter *port_counter;
u32 port;
- if (!dev->ops.alloc_hw_stats || !dev->port_data)
+ if (!dev->port_data)
return;
rdma_for_each_port(dev, port) {
@@ -602,6 +605,9 @@ void rdma_counter_init(struct ib_device *dev)
port_counter->mode.mode = RDMA_COUNTER_MODE_NONE;
mutex_init(&port_counter->lock);
+ if (!dev->ops.alloc_hw_stats)
+ continue;
+
port_counter->hstats = dev->ops.alloc_hw_stats(dev, port);
if (!port_counter->hstats)
goto fail;
@@ -624,9 +630,6 @@ void rdma_counter_release(struct ib_device *dev)
struct rdma_port_counter *port_counter;
u32 port;
- if (!dev->ops.alloc_hw_stats)
- return;
-
rdma_for_each_port(dev, port) {
port_counter = &dev->port_data[port].port_counter;
kfree(port_counter->hstats);
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index a91653aabf38..098ab883733e 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -308,6 +308,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
struct bnxt_qplib_gid *gid_to_del;
+ u16 vlan_id = 0xFFFF;
/* Delete the entry from the hardware */
ctx = *context;
@@ -317,7 +318,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
if (sgid_tbl && sgid_tbl->active) {
if (ctx->idx >= sgid_tbl->max)
return -EINVAL;
- gid_to_del = &sgid_tbl->tbl[ctx->idx];
+ gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
+ vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
/* DEL_GID is called in WQ context(netdevice_event_work_handler)
* or via the ib_unregister_device path. In the former case QP1
* may not be destroyed yet, in which case just return as FW
@@ -335,7 +337,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
}
ctx->refcnt--;
if (!ctx->refcnt) {
- rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
+ rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
+ vlan_id, true);
if (rc) {
dev_err(rdev_to_dev(rdev),
"Failed to remove GID: %#x", rc);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 37928b1111df..bdbde8e22420 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -488,7 +488,7 @@ static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl,
u16 max)
{
- sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL);
+ sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
if (!sgid_tbl->tbl)
return -ENOMEM;
@@ -526,9 +526,10 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
for (i = 0; i < sgid_tbl->max; i++) {
if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
sizeof(bnxt_qplib_gid_zero)))
- bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true);
+ bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
+ sgid_tbl->tbl[i].vlan_id, true);
}
- memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
+ memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
sgid_tbl->active = 0;
@@ -537,7 +538,11 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct net_device *netdev)
{
- memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
+ u32 i;
+
+ for (i = 0; i < sgid_tbl->max; i++)
+ sgid_tbl->tbl[i].vlan_id = 0xffff;
+
memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 30c42c92fac7..fbda11a7ab1a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -111,7 +111,7 @@ struct bnxt_qplib_pd_tbl {
};
struct bnxt_qplib_sgid_tbl {
- struct bnxt_qplib_gid *tbl;
+ struct bnxt_qplib_gid_info *tbl;
u16 *hw_id;
u16 max;
u16 active;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 48793d3512ac..40296b97d21e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -213,12 +213,12 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
index, sgid_tbl->max);
return -EINVAL;
}
- memcpy(gid, &sgid_tbl->tbl[index], sizeof(*gid));
+ memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid));
return 0;
}
int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
- struct bnxt_qplib_gid *gid, bool update)
+ struct bnxt_qplib_gid *gid, u16 vlan_id, bool update)
{
struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
struct bnxt_qplib_res,
@@ -236,7 +236,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
return -ENOMEM;
}
for (index = 0; index < sgid_tbl->max; index++) {
- if (!memcmp(&sgid_tbl->tbl[index], gid, sizeof(*gid)))
+ if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) &&
+ vlan_id == sgid_tbl->tbl[index].vlan_id)
break;
}
if (index == sgid_tbl->max) {
@@ -262,8 +263,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
if (rc)
return rc;
}
- memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
+ memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero,
sizeof(bnxt_qplib_gid_zero));
+ sgid_tbl->tbl[index].vlan_id = 0xFFFF;
sgid_tbl->vlan[index] = 0;
sgid_tbl->active--;
dev_dbg(&res->pdev->dev,
@@ -296,7 +298,8 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
}
free_idx = sgid_tbl->max;
for (i = 0; i < sgid_tbl->max; i++) {
- if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid))) {
+ if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) &&
+ sgid_tbl->tbl[i].vlan_id == vlan_id) {
dev_dbg(&res->pdev->dev,
"SGID entry already exist in entry %d!\n", i);
*index = i;
@@ -351,6 +354,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
}
/* Add GID to the sgid_tbl */
memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
+ sgid_tbl->tbl[free_idx].vlan_id = vlan_id;
sgid_tbl->active++;
if (vlan_id != 0xFFFF)
sgid_tbl->vlan[free_idx] = 1;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 0ec3b12b0bcd..13d9432d5ce2 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -84,6 +84,11 @@ struct bnxt_qplib_gid {
u8 data[16];
};
+struct bnxt_qplib_gid_info {
+ struct bnxt_qplib_gid gid;
+ u16 vlan_id;
+};
+
struct bnxt_qplib_ah {
struct bnxt_qplib_gid dgid;
struct bnxt_qplib_pd *pd;
@@ -221,7 +226,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
struct bnxt_qplib_gid *gid);
int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
- struct bnxt_qplib_gid *gid, bool update);
+ struct bnxt_qplib_gid *gid, u16 vlan_id, bool update);
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id,
bool update, u32 *index);
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index d5b643a1d9fd..67052dc3100c 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -14452,7 +14452,7 @@ void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
}
-static void init_rxe(struct hfi1_devdata *dd)
+static int init_rxe(struct hfi1_devdata *dd)
{
struct rsm_map_table *rmt;
u64 val;
@@ -14461,6 +14461,9 @@ static void init_rxe(struct hfi1_devdata *dd)
write_csr(dd, RCV_ERR_MASK, ~0ull);
rmt = alloc_rsm_map_table(dd);
+ if (!rmt)
+ return -ENOMEM;
+
/* set up QOS, including the QPN map table */
init_qos(dd, rmt);
init_fecn_handling(dd, rmt);
@@ -14487,6 +14490,7 @@ static void init_rxe(struct hfi1_devdata *dd)
val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
RCV_BYPASS_HDR_SIZE_SHIFT);
write_csr(dd, RCV_BYPASS, val);
+ return 0;
}
static void init_other(struct hfi1_devdata *dd)
@@ -15024,7 +15028,10 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
goto bail_cleanup;
/* set initial RXE CSRs */
- init_rxe(dd);
+ ret = init_rxe(dd);
+ if (ret)
+ goto bail_cleanup;
+
/* set initial TXE CSRs */
init_txe(dd);
/* set initial non-RXE, non-TXE CSRs */
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 0477c14633ab..024a7c2b6124 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -1835,7 +1835,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
break;
trdma_clean_swqe(qp, wqe);
- rvt_qp_wqe_unreserve(qp, wqe);
trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
rvt_qp_complete_swqe(qp,
wqe,
@@ -1882,7 +1881,6 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
trdma_clean_swqe(qp, wqe);
- rvt_qp_wqe_unreserve(qp, wqe);
trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
rvt_qp_complete_swqe(qp,
wqe,
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 92acccaaaa86..996fc298207e 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -1620,6 +1620,7 @@ static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
flows[i].req = req;
flows[i].npagesets = 0;
flows[i].pagesets[0].mapped = 0;
+ flows[i].resync_npkts = 0;
}
req->flows = flows;
return 0;
@@ -1673,34 +1674,6 @@ static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req,
return NULL;
}
-static struct tid_rdma_flow *
-__find_flow_ranged(struct tid_rdma_request *req, u16 head, u16 tail,
- u32 psn, u16 *fidx)
-{
- for ( ; CIRC_CNT(head, tail, MAX_FLOWS);
- tail = CIRC_NEXT(tail, MAX_FLOWS)) {
- struct tid_rdma_flow *flow = &req->flows[tail];
- u32 spsn, lpsn;
-
- spsn = full_flow_psn(flow, flow->flow_state.spsn);
- lpsn = full_flow_psn(flow, flow->flow_state.lpsn);
-
- if (cmp_psn(psn, spsn) >= 0 && cmp_psn(psn, lpsn) <= 0) {
- if (fidx)
- *fidx = tail;
- return flow;
- }
- }
- return NULL;
-}
-
-static struct tid_rdma_flow *find_flow(struct tid_rdma_request *req,
- u32 psn, u16 *fidx)
-{
- return __find_flow_ranged(req, req->setup_head, req->clear_tail, psn,
- fidx);
-}
-
/* TID RDMA READ functions */
u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
struct ib_other_headers *ohdr, u32 *bth1,
@@ -2788,19 +2761,7 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
* to prevent continuous Flow Sequence errors for any
* packets that could be still in the fabric.
*/
- flow = find_flow(req, psn, NULL);
- if (!flow) {
- /*
- * We can't find the IB PSN matching the
- * received KDETH PSN. The only thing we can
- * do at this point is report the error to
- * the QP.
- */
- hfi1_kern_read_tid_flow_free(qp);
- spin_unlock(&qp->s_lock);
- rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- return ret;
- }
+ flow = &req->flows[req->clear_tail];
if (priv->s_flags & HFI1_R_TID_SW_PSN) {
diff = cmp_psn(psn,
flow->flow_state.r_next_psn);
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index 627aa46ef683..c00714c2f16a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -12,13 +12,15 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
struct ib_udata *udata, unsigned long virt,
struct hns_roce_db *db)
{
+ unsigned long page_addr = virt & PAGE_MASK;
struct hns_roce_user_db_page *page;
+ unsigned int offset;
int ret = 0;
mutex_lock(&context->page_mutex);
list_for_each_entry(page, &context->page_list, list)
- if (page->user_virt == (virt & PAGE_MASK))
+ if (page->user_virt == page_addr)
goto found;
page = kmalloc(sizeof(*page), GFP_KERNEL);
@@ -28,8 +30,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
}
refcount_set(&page->refcount, 1);
- page->user_virt = (virt & PAGE_MASK);
- page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
+ page->user_virt = page_addr;
+ page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0);
if (IS_ERR(page->umem)) {
ret = PTR_ERR(page->umem);
kfree(page);
@@ -39,10 +41,9 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
list_add(&page->list, &context->page_list);
found:
- db->dma = sg_dma_address(page->umem->sg_head.sgl) +
- (virt & ~PAGE_MASK);
- page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK;
- db->virt_addr = sg_virt(page->umem->sg_head.sgl);
+ offset = virt - page_addr;
+ db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset;
+ db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset;
db->u.user_page = page;
refcount_inc(&page->refcount);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index c482f19958b3..f6a53455bf8b 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -481,6 +481,7 @@ struct mlx5_umr_wr {
u64 length;
int access_flags;
u32 mkey;
+ u8 ignore_free_state:1;
};
static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 20ece6e0b2fc..2c77456f359f 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -545,13 +545,16 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
return;
c = order2idx(dev, mr->order);
- if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
- mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
- return;
- }
+ WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
- if (unreg_umr(dev, mr))
+ if (unreg_umr(dev, mr)) {
+ mr->allocated_from_cache = false;
+ destroy_mkey(dev, mr);
+ ent = &cache->ent[c];
+ if (ent->cur < ent->limit)
+ queue_work(cache->wq, &ent->work);
return;
+ }
ent = &cache->ent[c];
spin_lock_irq(&ent->lock);
@@ -1373,9 +1376,11 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
return 0;
umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
- MLX5_IB_SEND_UMR_FAIL_IF_FREE;
+ MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
umrwr.wr.opcode = MLX5_IB_WR_UMR;
+ umrwr.pd = dev->umrc.pd;
umrwr.mkey = mr->mmkey.key;
+ umrwr.ignore_free_state = 1;
return mlx5_ib_post_send_wait(dev, &umrwr);
}
@@ -1577,10 +1582,10 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
mr->sig = NULL;
}
- mlx5_free_priv_descs(mr);
-
- if (!allocated_from_cache)
+ if (!allocated_from_cache) {
destroy_mkey(dev, mr);
+ mlx5_free_priv_descs(mr);
+ }
}
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 5b642d81e617..81da82050d05 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -246,7 +246,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
* overwrite the same MTTs. Concurent invalidations might race us,
* but they will write 0s as well, so no difference in the end result.
*/
-
+ mutex_lock(&umem_odp->umem_mutex);
for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
/*
@@ -278,6 +278,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
idx - blk_start_idx + 1, 0,
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ATOMIC);
+ mutex_unlock(&umem_odp->umem_mutex);
/*
* We are now sure that the device will not access the
* memory. We can safely unmap it, and mark it as dirty if
@@ -1771,7 +1772,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list,
w->num_sge, 0);
- kfree(w);
+ kvfree(w);
}
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
@@ -1813,7 +1814,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
if (valid_req)
queue_work(system_unbound_wq, &work->work);
else
- kfree(work);
+ kvfree(work);
srcu_read_unlock(&dev->mr_srcu, srcu_key);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 2a97619ed603..379328b2598f 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1713,7 +1713,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
}
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
memcpy(rss_key, ucmd.rx_hash_key, len);
break;
}
@@ -4295,10 +4294,14 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
memset(umr, 0, sizeof(*umr));
- if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
- umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
- else
- umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
+ if (!umrwr->ignore_free_state) {
+ if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
+ /* fail if free */
+ umr->flags = MLX5_UMR_CHECK_FREE;
+ else
+ /* fail if not free */
+ umr->flags = MLX5_UMR_CHECK_NOT_FREE;
+ }
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index a7cde98e73e8..9ce8a1b925d2 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -220,13 +220,12 @@ static void siw_put_work(struct siw_cm_work *work)
static void siw_cep_set_inuse(struct siw_cep *cep)
{
unsigned long flags;
- int rv;
retry:
spin_lock_irqsave(&cep->lock, flags);
if (cep->in_use) {
spin_unlock_irqrestore(&cep->lock, flags);
- rv = wait_event_interruptible(cep->waitq, !cep->in_use);
+ wait_event_interruptible(cep->waitq, !cep->in_use);
if (signal_pending(current))
flush_signals(current);
goto retry;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index f55c4e80aea4..d0f140daf659 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -612,6 +612,7 @@ static __init int siw_init_module(void)
if (!siw_create_tx_threads()) {
pr_info("siw: Could not start any TX thread\n");
+ rv = -ENOMEM;
goto out_error;
}
/*
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 0eeea520a853..e06c77d76463 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -608,7 +608,7 @@ static inline void rvt_qp_wqe_reserve(
/**
* rvt_qp_wqe_unreserve - clean reserved operation
* @qp - the rvt qp
- * @wqe - the send wqe
+ * @flags - send wqe flags
*
* This decrements the reserve use count.
*
@@ -620,11 +620,9 @@ static inline void rvt_qp_wqe_reserve(
* the compiler does not juggle the order of the s_last
* ring index and the decrementing of s_reserved_used.
*/
-static inline void rvt_qp_wqe_unreserve(
- struct rvt_qp *qp,
- struct rvt_swqe *wqe)
+static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
{
- if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
+ if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
atomic_dec(&qp->s_reserved_used);
/* insure no compiler re-order up to s_last change */
smp_mb__after_atomic();
@@ -853,6 +851,7 @@ rvt_qp_complete_swqe(struct rvt_qp *qp,
u32 byte_len, last;
int flags = wqe->wr.send_flags;
+ rvt_qp_wqe_unreserve(qp, flags);
rvt_put_qp_swqe(qp, wqe);
need_completion =
OpenPOWER on IntegriCloud