summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_lib.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-01-03 08:28:41 +0300
committerJens Axboe <axboe@fb.com>2017-01-27 15:08:35 -0700
commit0a6ac4ee7c21098cb0e41cc4053b9d9ddc9e70a1 (patch)
tree54f19534fd4654192c2d9b583eb85ae97f491aa2 /drivers/scsi/scsi_lib.c
parent0fbc3e0ff623f1012e7c2af96e781eeb26bcc0d7 (diff)
downloadblackbird-op-linux-0a6ac4ee7c21098cb0e41cc4053b9d9ddc9e70a1.tar.gz
blackbird-op-linux-0a6ac4ee7c21098cb0e41cc4053b9d9ddc9e70a1.zip
scsi: respect unchecked_isa_dma for blk-mq
Currently blk-mq always allocates the sense buffer using normal GFP_KERNEL allocation. Refactor the cmd pool code to split the cmd and sense allocation and share the code to allocate the sense buffers as well as the sense buffer slab caches between the legacy and blk-mq path. Note that this switches to lazy allocation of the sense slab caches - the slab caches (not the actual allocations) won't be destroy until the scsi module is unloaded instead of keeping track of hosts using them. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.com> Acked-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r--drivers/scsi/scsi_lib.c62
1 files changed, 59 insertions, 3 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index e9e1e141af9c..3d6b364a8d82 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -39,6 +39,58 @@
struct kmem_cache *scsi_sdb_cache;
+static struct kmem_cache *scsi_sense_cache;
+static struct kmem_cache *scsi_sense_isadma_cache;
+static DEFINE_MUTEX(scsi_sense_cache_mutex);
+
+static inline struct kmem_cache *
+scsi_select_sense_cache(struct Scsi_Host *shost)
+{
+ return shost->unchecked_isa_dma ?
+ scsi_sense_isadma_cache : scsi_sense_cache;
+}
+
+void scsi_free_sense_buffer(struct Scsi_Host *shost,
+ unsigned char *sense_buffer)
+{
+ kmem_cache_free(scsi_select_sense_cache(shost), sense_buffer);
+}
+
+unsigned char *scsi_alloc_sense_buffer(struct Scsi_Host *shost, gfp_t gfp_mask,
+ int numa_node)
+{
+ return kmem_cache_alloc_node(scsi_select_sense_cache(shost), gfp_mask,
+ numa_node);
+}
+
+int scsi_init_sense_cache(struct Scsi_Host *shost)
+{
+ struct kmem_cache *cache;
+ int ret = 0;
+
+ cache = scsi_select_sense_cache(shost);
+ if (cache)
+ return 0;
+
+ mutex_lock(&scsi_sense_cache_mutex);
+ if (shost->unchecked_isa_dma) {
+ scsi_sense_isadma_cache =
+ kmem_cache_create("scsi_sense_cache(DMA)",
+ SCSI_SENSE_BUFFERSIZE, 0,
+ SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
+ if (!scsi_sense_isadma_cache)
+ ret = -ENOMEM;
+ } else {
+ scsi_sense_cache =
+ kmem_cache_create("scsi_sense_cache",
+ SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!scsi_sense_cache)
+ ret = -ENOMEM;
+ }
+
+ mutex_unlock(&scsi_sense_cache_mutex);
+ return ret;
+}
/*
* When to reinvoke queueing after a resource shortage. It's 3 msecs to
@@ -1981,10 +2033,11 @@ static int scsi_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node)
{
+ struct Scsi_Host *shost = data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
- cmd->sense_buffer = kzalloc_node(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL,
- numa_node);
+ cmd->sense_buffer =
+ scsi_alloc_sense_buffer(shost, GFP_KERNEL, numa_node);
if (!cmd->sense_buffer)
return -ENOMEM;
return 0;
@@ -1993,9 +2046,10 @@ static int scsi_init_request(void *data, struct request *rq,
static void scsi_exit_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx)
{
+ struct Scsi_Host *shost = data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
- kfree(cmd->sense_buffer);
+ scsi_free_sense_buffer(shost, cmd->sense_buffer);
}
static int scsi_map_queues(struct blk_mq_tag_set *set)
@@ -2208,6 +2262,8 @@ int __init scsi_init_queue(void)
void scsi_exit_queue(void)
{
+ kmem_cache_destroy(scsi_sense_cache);
+ kmem_cache_destroy(scsi_sense_isadma_cache);
kmem_cache_destroy(scsi_sdb_cache);
}
OpenPOWER on IntegriCloud