diff options
Diffstat (limited to 'drivers/nvme/target/rdma.c')
-rw-r--r-- | drivers/nvme/target/rdma.c | 41 |
1 files changed, 18 insertions, 23 deletions
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index bd265aceb90c..3f7971d3706d 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -122,7 +122,6 @@ struct nvmet_rdma_device { int inline_page_count; }; -static struct workqueue_struct *nvmet_rdma_delete_wq; static bool nvmet_rdma_use_srq; module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); MODULE_PARM_DESC(use_srq, "Use shared receive queue."); @@ -504,7 +503,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) } if (rsp->req.sg != rsp->cmd->inline_sg) - sgl_free(rsp->req.sg); + nvmet_req_free_sgl(&rsp->req); if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) nvmet_rdma_process_wr_wait_list(queue); @@ -653,24 +652,24 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, { struct rdma_cm_id *cm_id = rsp->queue->cm_id; u64 addr = le64_to_cpu(sgl->addr); - u32 len = get_unaligned_le24(sgl->length); u32 key = get_unaligned_le32(sgl->key); int ret; + rsp->req.transfer_len = get_unaligned_le24(sgl->length); + /* no data command? */ - if (!len) + if (!rsp->req.transfer_len) return 0; - rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt); - if (!rsp->req.sg) - return NVME_SC_INTERNAL; + ret = nvmet_req_alloc_sgl(&rsp->req); + if (ret < 0) + goto error_out; ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, nvmet_data_dir(&rsp->req)); if (ret < 0) - return NVME_SC_INTERNAL; - rsp->req.transfer_len += len; + goto error_out; rsp->n_rdma += ret; if (invalidate) { @@ -679,6 +678,10 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, } return 0; + +error_out: + rsp->req.transfer_len = 0; + return NVME_SC_INTERNAL; } static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) @@ -746,6 +749,8 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, cmd->send_sge.addr, cmd->send_sge.length, DMA_TO_DEVICE); + cmd->req.p2p_client = &queue->dev->device->dev; + if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, &queue->nvme_sq, &nvmet_rdma_ops)) return; @@ -1268,12 +1273,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, if (queue->host_qid == 0) { /* Let inflight controller teardown complete */ - flush_workqueue(nvmet_rdma_delete_wq); + flush_scheduled_work(); } ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); if (ret) { - queue_work(nvmet_rdma_delete_wq, &queue->release_work); + schedule_work(&queue->release_work); /* Destroying rdma_cm id is not needed here */ return 0; } @@ -1338,7 +1343,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) if (disconnect) { rdma_disconnect(queue->cm_id); - queue_work(nvmet_rdma_delete_wq, &queue->release_work); + schedule_work(&queue->release_work); } } @@ -1368,7 +1373,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, mutex_unlock(&nvmet_rdma_queue_mutex); pr_err("failed to connect queue %d\n", queue->idx); - queue_work(nvmet_rdma_delete_wq, &queue->release_work); + schedule_work(&queue->release_work); } /** @@ -1650,17 +1655,8 @@ static int __init nvmet_rdma_init(void) if (ret) goto err_ib_client; - nvmet_rdma_delete_wq = alloc_workqueue("nvmet-rdma-delete-wq", - WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); - if (!nvmet_rdma_delete_wq) { - ret = -ENOMEM; - goto err_unreg_transport; - } - return 0; -err_unreg_transport: - nvmet_unregister_transport(&nvmet_rdma_ops); err_ib_client: ib_unregister_client(&nvmet_rdma_ib_client); return ret; @@ -1668,7 +1664,6 @@ err_ib_client: static void __exit nvmet_rdma_exit(void) { - destroy_workqueue(nvmet_rdma_delete_wq); nvmet_unregister_transport(&nvmet_rdma_ops); ib_unregister_client(&nvmet_rdma_ib_client); WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list)); |