summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2006-03-24 15:47:26 -0800
committerRoland Dreier <rolandd@cisco.com>2006-03-24 15:47:26 -0800
commitcf368713a3f3b2eb737a92d1b7186dedcc51167c (patch)
tree450791a25611cf98bde1540a3c366d0e2a6ba117
parent6f633c8d69415aabbccfcc494008e8e1300a98c1 (diff)
downloadblackbird-op-linux-cf368713a3f3b2eb737a92d1b7186dedcc51167c.tar.gz
blackbird-op-linux-cf368713a3f3b2eb737a92d1b7186dedcc51167c.zip
IB/srp: Use a fake scatterlist for non-SG SCSI commands
Since the SCSI midlayer is moving towards entirely getting rid of commands with use_sg == 0, we should treat this case as an exception. Therefore, change the IB SRP initiator to create a fake scatterlist for these commands with sg_init_one(). This simplifies the flow of DMA mapping and unmapping, since SRP can just use dma_map_sg() and dma_unmap_sg() unconditionally, rather than having to choose between the dma_{map,unmap}_sg() and dma_{map,unmap}_single() variants. Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c145
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h7
2 files changed, 75 insertions, 77 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index a13dcdf90a4f..61924cc30e55 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -503,8 +503,10 @@ err:
static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
struct srp_request *req)
{
+ struct scatterlist *scat;
struct srp_cmd *cmd = req->cmd->buf;
- int len;
+ int len, nents, count;
+ int i;
u8 fmt;
if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE)
@@ -517,82 +519,66 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
return -EINVAL;
}
- if (scmnd->use_sg) {
- struct scatterlist *scat = scmnd->request_buffer;
- int n;
- int i;
-
- n = dma_map_sg(target->srp_host->dev->dma_device,
- scat, scmnd->use_sg, scmnd->sc_data_direction);
+ /*
+ * This handling of non-SG commands can be killed when the
+ * SCSI midlayer no longer generates non-SG commands.
+ */
+ if (likely(scmnd->use_sg)) {
+ nents = scmnd->use_sg;
+ scat = scmnd->request_buffer;
+ } else {
+ nents = 1;
+ scat = &req->fake_sg;
+ sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen);
+ }
- if (n == 1) {
- struct srp_direct_buf *buf = (void *) cmd->add_data;
+ count = dma_map_sg(target->srp_host->dev->dma_device, scat, nents,
+ scmnd->sc_data_direction);
- fmt = SRP_DATA_DESC_DIRECT;
+ if (count == 1) {
+ struct srp_direct_buf *buf = (void *) cmd->add_data;
- buf->va = cpu_to_be64(sg_dma_address(scat));
- buf->key = cpu_to_be32(target->srp_host->mr->rkey);
- buf->len = cpu_to_be32(sg_dma_len(scat));
+ fmt = SRP_DATA_DESC_DIRECT;
- len = sizeof (struct srp_cmd) +
- sizeof (struct srp_direct_buf);
- } else {
- struct srp_indirect_buf *buf = (void *) cmd->add_data;
- u32 datalen = 0;
+ buf->va = cpu_to_be64(sg_dma_address(scat));
+ buf->key = cpu_to_be32(target->srp_host->mr->rkey);
+ buf->len = cpu_to_be32(sg_dma_len(scat));
- fmt = SRP_DATA_DESC_INDIRECT;
+ len = sizeof (struct srp_cmd) +
+ sizeof (struct srp_direct_buf);
+ } else {
+ struct srp_indirect_buf *buf = (void *) cmd->add_data;
+ u32 datalen = 0;
- if (scmnd->sc_data_direction == DMA_TO_DEVICE)
- cmd->data_out_desc_cnt = n;
- else
- cmd->data_in_desc_cnt = n;
+ fmt = SRP_DATA_DESC_INDIRECT;
- buf->table_desc.va = cpu_to_be64(req->cmd->dma +
- sizeof *cmd +
- sizeof *buf);
- buf->table_desc.key =
+ if (scmnd->sc_data_direction == DMA_TO_DEVICE)
+ cmd->data_out_desc_cnt = count;
+ else
+ cmd->data_in_desc_cnt = count;
+
+ buf->table_desc.va = cpu_to_be64(req->cmd->dma +
+ sizeof *cmd +
+ sizeof *buf);
+ buf->table_desc.key =
+ cpu_to_be32(target->srp_host->mr->rkey);
+ buf->table_desc.len =
+ cpu_to_be32(count * sizeof (struct srp_direct_buf));
+
+ for (i = 0; i < count; ++i) {
+ buf->desc_list[i].va = cpu_to_be64(sg_dma_address(&scat[i]));
+ buf->desc_list[i].key =
cpu_to_be32(target->srp_host->mr->rkey);
- buf->table_desc.len =
- cpu_to_be32(n * sizeof (struct srp_direct_buf));
-
- for (i = 0; i < n; ++i) {
- buf->desc_list[i].va = cpu_to_be64(sg_dma_address(&scat[i]));
- buf->desc_list[i].key =
- cpu_to_be32(target->srp_host->mr->rkey);
- buf->desc_list[i].len = cpu_to_be32(sg_dma_len(&scat[i]));
-
- datalen += sg_dma_len(&scat[i]);
- }
+ buf->desc_list[i].len = cpu_to_be32(sg_dma_len(&scat[i]));
- buf->len = cpu_to_be32(datalen);
-
- len = sizeof (struct srp_cmd) +
- sizeof (struct srp_indirect_buf) +
- n * sizeof (struct srp_direct_buf);
- }
- } else {
- struct srp_direct_buf *buf = (void *) cmd->add_data;
- dma_addr_t dma;
-
- dma = dma_map_single(target->srp_host->dev->dma_device,
- scmnd->request_buffer, scmnd->request_bufflen,
- scmnd->sc_data_direction);
- if (dma_mapping_error(dma)) {
- printk(KERN_WARNING PFX "unable to map %p/%d (dir %d)\n",
- scmnd->request_buffer, (int) scmnd->request_bufflen,
- scmnd->sc_data_direction);
- return -EINVAL;
+ datalen += sg_dma_len(&scat[i]);
}
- pci_unmap_addr_set(req, direct_mapping, dma);
+ buf->len = cpu_to_be32(datalen);
- buf->va = cpu_to_be64(dma);
- buf->key = cpu_to_be32(target->srp_host->mr->rkey);
- buf->len = cpu_to_be32(scmnd->request_bufflen);
-
- fmt = SRP_DATA_DESC_DIRECT;
-
- len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
+ len = sizeof (struct srp_cmd) +
+ sizeof (struct srp_indirect_buf) +
+ count * sizeof (struct srp_direct_buf);
}
if (scmnd->sc_data_direction == DMA_TO_DEVICE)
@@ -600,7 +586,6 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
else
cmd->buf_fmt = fmt;
-
return len;
}
@@ -608,20 +593,28 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
struct srp_target_port *target,
struct srp_request *req)
{
+ struct scatterlist *scat;
+ int nents;
+
if (!scmnd->request_buffer ||
(scmnd->sc_data_direction != DMA_TO_DEVICE &&
scmnd->sc_data_direction != DMA_FROM_DEVICE))
- return;
+ return;
- if (scmnd->use_sg)
- dma_unmap_sg(target->srp_host->dev->dma_device,
- (struct scatterlist *) scmnd->request_buffer,
- scmnd->use_sg, scmnd->sc_data_direction);
- else
- dma_unmap_single(target->srp_host->dev->dma_device,
- pci_unmap_addr(req, direct_mapping),
- scmnd->request_bufflen,
- scmnd->sc_data_direction);
+ /*
+ * This handling of non-SG commands can be killed when the
+ * SCSI midlayer no longer generates non-SG commands.
+ */
+ if (likely(scmnd->use_sg)) {
+ nents = scmnd->use_sg;
+ scat = (struct scatterlist *) scmnd->request_buffer;
+ } else {
+ nents = 1;
+ scat = (struct scatterlist *) scmnd->request_buffer;
+ }
+
+ dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents,
+ scmnd->sc_data_direction);
}
static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 4e7727df32f1..bd7f7c3115de 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -38,6 +38,7 @@
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/scatterlist.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
@@ -94,7 +95,11 @@ struct srp_request {
struct scsi_cmnd *scmnd;
struct srp_iu *cmd;
struct srp_iu *tsk_mgmt;
- DECLARE_PCI_UNMAP_ADDR(direct_mapping)
+ /*
+ * Fake scatterlist used when scmnd->use_sg==0. Can be killed
+ * when the SCSI midlayer no longer generates non-SG commands.
+ */
+ struct scatterlist fake_sg;
struct completion done;
short next;
u8 cmd_done;
OpenPOWER on IntegriCloud