summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c1
-rw-r--r--block/blk-ioc.c44
-rw-r--r--block/blk-mq-sched.c16
-rw-r--r--block/blk-mq-tag.c2
-rw-r--r--block/blk-mq-tag.h6
-rw-r--r--block/blk-mq.c120
-rw-r--r--block/blk-mq.h10
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/elevator.c2
-rw-r--r--block/genhd.c5
10 files changed, 138 insertions, 70 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index b9e857f4afe8..1086dac8724c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -578,7 +578,6 @@ void blk_cleanup_queue(struct request_queue *q)
q->queue_lock = &q->__queue_lock;
spin_unlock_irq(lock);
- bdi_unregister(q->backing_dev_info);
put_disk_devt(q->disk_devt);
/* @q is and will stay empty, shutdown and put */
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 6bfa39675337..63898d229cb9 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -37,8 +37,8 @@ static void icq_free_icq_rcu(struct rcu_head *head)
}
/*
- * Exit an icq. Called with both ioc and q locked for sq, only ioc locked for
- * mq.
+ * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
+ * and queue locked for legacy.
*/
static void ioc_exit_icq(struct io_cq *icq)
{
@@ -55,7 +55,10 @@ static void ioc_exit_icq(struct io_cq *icq)
icq->flags |= ICQ_EXITED;
}
-/* Release an icq. Called with both ioc and q locked. */
+/*
+ * Release an icq. Called with ioc locked for blk-mq, and with both ioc
+ * and queue locked for legacy.
+ */
static void ioc_destroy_icq(struct io_cq *icq)
{
struct io_context *ioc = icq->ioc;
@@ -63,7 +66,6 @@ static void ioc_destroy_icq(struct io_cq *icq)
struct elevator_type *et = q->elevator->type;
lockdep_assert_held(&ioc->lock);
- lockdep_assert_held(q->queue_lock);
radix_tree_delete(&ioc->icq_tree, icq->q->id);
hlist_del_init(&icq->ioc_node);
@@ -223,24 +225,40 @@ void exit_io_context(struct task_struct *task)
put_io_context_active(ioc);
}
+static void __ioc_clear_queue(struct list_head *icq_list)
+{
+ unsigned long flags;
+
+ while (!list_empty(icq_list)) {
+ struct io_cq *icq = list_entry(icq_list->next,
+ struct io_cq, q_node);
+ struct io_context *ioc = icq->ioc;
+
+ spin_lock_irqsave(&ioc->lock, flags);
+ ioc_destroy_icq(icq);
+ spin_unlock_irqrestore(&ioc->lock, flags);
+ }
+}
+
/**
* ioc_clear_queue - break any ioc association with the specified queue
* @q: request_queue being cleared
*
- * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked.
+ * Walk @q->icq_list and exit all io_cq's.
*/
void ioc_clear_queue(struct request_queue *q)
{
- lockdep_assert_held(q->queue_lock);
+ LIST_HEAD(icq_list);
- while (!list_empty(&q->icq_list)) {
- struct io_cq *icq = list_entry(q->icq_list.next,
- struct io_cq, q_node);
- struct io_context *ioc = icq->ioc;
+ spin_lock_irq(q->queue_lock);
+ list_splice_init(&q->icq_list, &icq_list);
- spin_lock(&ioc->lock);
- ioc_destroy_icq(icq);
- spin_unlock(&ioc->lock);
+ if (q->mq_ops) {
+ spin_unlock_irq(q->queue_lock);
+ __ioc_clear_queue(&icq_list);
+ } else {
+ __ioc_clear_queue(&icq_list);
+ spin_unlock_irq(q->queue_lock);
}
}
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 98c7b061781e..09af8ff18719 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -110,15 +110,14 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
struct blk_mq_alloc_data *data)
{
struct elevator_queue *e = q->elevator;
- struct blk_mq_hw_ctx *hctx;
- struct blk_mq_ctx *ctx;
struct request *rq;
blk_queue_enter_live(q);
- ctx = blk_mq_get_ctx(q);
- hctx = blk_mq_map_queue(q, ctx->cpu);
-
- blk_mq_set_alloc_data(data, q, data->flags, ctx, hctx);
+ data->q = q;
+ if (likely(!data->ctx))
+ data->ctx = blk_mq_get_ctx(q);
+ if (likely(!data->hctx))
+ data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
if (e) {
data->flags |= BLK_MQ_REQ_INTERNAL;
@@ -135,8 +134,6 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
rq = __blk_mq_alloc_request(data, op);
} else {
rq = __blk_mq_alloc_request(data, op);
- if (rq)
- data->hctx->tags->rqs[rq->tag] = rq;
}
if (rq) {
@@ -454,7 +451,8 @@ int blk_mq_sched_setup(struct request_queue *q)
*/
ret = 0;
queue_for_each_hw_ctx(q, hctx, i) {
- hctx->sched_tags = blk_mq_alloc_rq_map(set, i, q->nr_requests, 0);
+ hctx->sched_tags = blk_mq_alloc_rq_map(set, i,
+ q->nr_requests, set->reserved_tags);
if (!hctx->sched_tags) {
ret = -ENOMEM;
break;
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 54c84363c1b2..e48bc2c72615 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -181,7 +181,7 @@ found_tag:
void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
struct blk_mq_ctx *ctx, unsigned int tag)
{
- if (tag >= tags->nr_reserved_tags) {
+ if (!blk_mq_tag_is_reserved(tags, tag)) {
const int real_tag = tag - tags->nr_reserved_tags;
BUG_ON(real_tag >= tags->nr_tags);
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 63497423c5cd..5cb51e53cc03 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -85,4 +85,10 @@ static inline void blk_mq_tag_set_rq(struct blk_mq_hw_ctx *hctx,
hctx->tags->rqs[tag] = rq;
}
+static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
+ unsigned int tag)
+{
+ return tag < tags->nr_reserved_tags;
+}
+
#endif
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 6f35b6fd4799..b2fd175e84d7 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -77,10 +77,20 @@ void blk_mq_freeze_queue_start(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
-static void blk_mq_freeze_queue_wait(struct request_queue *q)
+void blk_mq_freeze_queue_wait(struct request_queue *q)
{
wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
}
+EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
+
+int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
+ unsigned long timeout)
+{
+ return wait_event_timeout(q->mq_freeze_wq,
+ percpu_ref_is_zero(&q->q_usage_counter),
+ timeout);
+}
+EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
/*
* Guarantee no request is in use, so we can change any data structure of
@@ -236,6 +246,7 @@ struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
}
rq->tag = tag;
rq->internal_tag = -1;
+ data->hctx->tags->rqs[rq->tag] = rq;
}
blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
@@ -275,10 +286,9 @@ EXPORT_SYMBOL(blk_mq_alloc_request);
struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
unsigned int flags, unsigned int hctx_idx)
{
- struct blk_mq_hw_ctx *hctx;
- struct blk_mq_ctx *ctx;
+ struct blk_mq_alloc_data alloc_data = { .flags = flags };
struct request *rq;
- struct blk_mq_alloc_data alloc_data;
+ unsigned int cpu;
int ret;
/*
@@ -301,25 +311,23 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
* Check if the hardware context is actually mapped to anything.
* If not tell the caller that it should skip this queue.
*/
- hctx = q->queue_hw_ctx[hctx_idx];
- if (!blk_mq_hw_queue_mapped(hctx)) {
- ret = -EXDEV;
- goto out_queue_exit;
+ alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
+ if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
+ blk_queue_exit(q);
+ return ERR_PTR(-EXDEV);
}
- ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
+ cpu = cpumask_first(alloc_data.hctx->cpumask);
+ alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
- blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
- rq = __blk_mq_alloc_request(&alloc_data, rw);
- if (!rq) {
- ret = -EWOULDBLOCK;
- goto out_queue_exit;
- }
-
- return rq;
+ rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
-out_queue_exit:
+ blk_mq_put_ctx(alloc_data.ctx);
blk_queue_exit(q);
- return ERR_PTR(ret);
+
+ if (!rq)
+ return ERR_PTR(-EWOULDBLOCK);
+
+ return rq;
}
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
@@ -854,6 +862,9 @@ done:
return true;
}
+ if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
+ data.flags |= BLK_MQ_REQ_RESERVED;
+
rq->tag = blk_mq_get_tag(&data);
if (rq->tag >= 0) {
if (blk_mq_tag_busy(data.hctx)) {
@@ -867,12 +878,9 @@ done:
return false;
}
-static void blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
- struct request *rq)
+static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
+ struct request *rq)
{
- if (rq->tag == -1 || rq->internal_tag == -1)
- return;
-
blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
rq->tag = -1;
@@ -882,6 +890,26 @@ static void blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
}
}
+static void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
+ struct request *rq)
+{
+ if (rq->tag == -1 || rq->internal_tag == -1)
+ return;
+
+ __blk_mq_put_driver_tag(hctx, rq);
+}
+
+static void blk_mq_put_driver_tag(struct request *rq)
+{
+ struct blk_mq_hw_ctx *hctx;
+
+ if (rq->tag == -1 || rq->internal_tag == -1)
+ return;
+
+ hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
+ __blk_mq_put_driver_tag(hctx, rq);
+}
+
/*
* If we fail getting a driver tag because all the driver tags are already
* assigned and on the dispatch list, BUT the first entry does not have a
@@ -991,7 +1019,19 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
bd.rq = rq;
bd.list = dptr;
- bd.last = list_empty(list);
+
+ /*
+ * Flag last if we have no more requests, or if we have more
+ * but can't assign a driver tag to it.
+ */
+ if (list_empty(list))
+ bd.last = true;
+ else {
+ struct request *nxt;
+
+ nxt = list_first_entry(list, struct request, queuelist);
+ bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
+ }
ret = q->mq_ops->queue_rq(hctx, &bd);
switch (ret) {
@@ -999,7 +1039,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
queued++;
break;
case BLK_MQ_RQ_QUEUE_BUSY:
- blk_mq_put_driver_tag(hctx, rq);
+ blk_mq_put_driver_tag_hctx(hctx, rq);
list_add(&rq->queuelist, list);
__blk_mq_requeue_request(rq);
break;
@@ -1029,6 +1069,13 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
* that is where we will continue on next queue run.
*/
if (!list_empty(list)) {
+ /*
+ * If we got a driver tag for the next request already,
+ * free it again.
+ */
+ rq = list_first_entry(list, struct request, queuelist);
+ blk_mq_put_driver_tag(rq);
+
spin_lock(&hctx->lock);
list_splice_init(list, &hctx->dispatch);
spin_unlock(&hctx->lock);
@@ -1715,16 +1762,20 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
unsigned int reserved_tags)
{
struct blk_mq_tags *tags;
+ int node;
- tags = blk_mq_init_tags(nr_tags, reserved_tags,
- set->numa_node,
+ node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
+ if (node == NUMA_NO_NODE)
+ node = set->numa_node;
+
+ tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
if (!tags)
return NULL;
tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
- set->numa_node);
+ node);
if (!tags->rqs) {
blk_mq_free_tags(tags);
return NULL;
@@ -1732,7 +1783,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
- set->numa_node);
+ node);
if (!tags->static_rqs) {
kfree(tags->rqs);
blk_mq_free_tags(tags);
@@ -1752,6 +1803,11 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
{
unsigned int i, j, entries_per_page, max_order = 4;
size_t rq_size, left;
+ int node;
+
+ node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
+ if (node == NUMA_NO_NODE)
+ node = set->numa_node;
INIT_LIST_HEAD(&tags->page_list);
@@ -1773,7 +1829,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
this_order--;
do {
- page = alloc_pages_node(set->numa_node,
+ page = alloc_pages_node(node,
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
this_order);
if (page)
@@ -1806,7 +1862,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
if (set->ops->init_request) {
if (set->ops->init_request(set->driver_data,
rq, hctx_idx, i,
- set->numa_node)) {
+ node)) {
tags->static_rqs[i] = NULL;
goto fail;
}
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 24b2256186f3..088ced003c13 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -146,16 +146,6 @@ struct blk_mq_alloc_data {
struct blk_mq_hw_ctx *hctx;
};
-static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
- struct request_queue *q, unsigned int flags,
- struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx)
-{
- data->q = q;
- data->flags = flags;
- data->ctx = ctx;
- data->hctx = hctx;
-}
-
static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
{
if (data->flags & BLK_MQ_REQ_INTERNAL)
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 002af836aa87..c44b321335f3 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -815,9 +815,7 @@ static void blk_release_queue(struct kobject *kobj)
blkcg_exit_queue(q);
if (q->elevator) {
- spin_lock_irq(q->queue_lock);
ioc_clear_queue(q);
- spin_unlock_irq(q->queue_lock);
elevator_exit(q->elevator);
}
diff --git a/block/elevator.c b/block/elevator.c
index ac1c9f481a98..01139f549b5b 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -983,9 +983,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
if (old_registered)
elv_unregister_queue(q);
- spin_lock_irq(q->queue_lock);
ioc_clear_queue(q);
- spin_unlock_irq(q->queue_lock);
}
/* allocate, init and register new elevator */
diff --git a/block/genhd.c b/block/genhd.c
index 2f444b87a5f2..b26a5ea115d0 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -681,6 +681,11 @@ void del_gendisk(struct gendisk *disk)
disk->flags &= ~GENHD_FL_UP;
sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
+ /*
+ * Unregister bdi before releasing device numbers (as they can get
+ * reused and we'd get clashes in sysfs).
+ */
+ bdi_unregister(disk->queue->backing_dev_info);
blk_unregister_queue(disk);
blk_unregister_region(disk_devt(disk), disk->minors);
OpenPOWER on IntegriCloud