diff options
author | Sagi Grimberg <sagi@grimberg.me> | 2017-03-22 17:07:30 +0200 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2017-03-30 20:35:50 -0700 |
commit | 7a56dc8888be23f44158a85b92da45d545cbf548 (patch) | |
tree | 53ff04e53d41f5225f68ec19dfe0ed26ae173be8 | |
parent | 555a65f66c3c4d9dd46a565418b0b655d861a723 (diff) | |
download | talos-obmc-linux-7a56dc8888be23f44158a85b92da45d545cbf548.tar.gz talos-obmc-linux-7a56dc8888be23f44158a85b92da45d545cbf548.zip |
iser-target: avoid posting a recv buffer twice
We pre-allocate our send-queues and might overflow them
in case we have multi work-request operations which tend
to occur for large RDMA transfers over devices with limited
allowed sg elements. When we get to a queue-full condition
we might retry again later, so track our receive buffers
so we don't repost them for a retry case.
Reported-by: Potnuri Bharat Teja <bharat@chelsio.com>
Tested-by: Potnuri Bharat Teja <bharat@chelsio.com>
Reviewed-by: Potnuri Bharat Teja <bharat@chelsio.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.h | 3 |
2 files changed, 14 insertions, 1 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 9b33c0c97468..fcbed35e95a8 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -817,6 +817,7 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count) rx_wr->sg_list = &rx_desc->rx_sg; rx_wr->num_sge = 1; rx_wr->next = rx_wr + 1; + rx_desc->in_use = false; } rx_wr--; rx_wr->next = NULL; /* mark end of work requests list */ @@ -835,6 +836,15 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc) struct ib_recv_wr *rx_wr_failed, rx_wr; int ret; + if (!rx_desc->in_use) { + /* + * if the descriptor is not in-use we already reposted it + * for recv, so just silently return + */ + return 0; + } + + rx_desc->in_use = false; rx_wr.wr_cqe = &rx_desc->rx_cqe; rx_wr.sg_list = &rx_desc->rx_sg; rx_wr.num_sge = 1; @@ -1397,6 +1407,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) return; } + rx_desc->in_use = true; + ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index c02ada57d7f5..87d994de8c91 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -60,7 +60,7 @@ #define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \ (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \ - sizeof(struct ib_cqe))) + sizeof(struct ib_cqe) + sizeof(bool))) #define ISCSI_ISER_SG_TABLESIZE 256 @@ -85,6 +85,7 @@ struct iser_rx_desc { u64 dma_addr; struct ib_sge rx_sg; struct ib_cqe rx_cqe; + bool in_use; char pad[ISER_RX_PAD_SIZE]; } __packed; |