summaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-19 16:29:22 -0700
committerJens Axboe <axboe@kernel.dk>2012-04-20 10:06:40 +0200
commitaaf7c680682f1999ef2e574f743c45d1674a8b8a (patch)
tree82d597ddfa7456553262435174948d9fb03fffef /block/blk-core.c
parent29e2b09ab5fa790514d47838f3c05497130908b3 (diff)
downloadblackbird-op-linux-aaf7c680682f1999ef2e574f743c45d1674a8b8a.tar.gz
blackbird-op-linux-aaf7c680682f1999ef2e574f743c45d1674a8b8a.zip
block: fix elvpriv allocation failure handling
Request allocation is mempool backed to guarantee forward progress under memory pressure; unfortunately, this property got broken while adding elvpriv data. Failures during elvpriv allocation, including ioc and icq creation failures, currently make get_request() fail as whole. There's no forward progress guarantee for these allocations - they may fail indefinitely under memory pressure stalling IO and deadlocking the system. This patch updates get_request() such that elvpriv allocation failure doesn't make the whole function fail. If elvpriv allocation fails, the allocation is degraded into !ELVPRIV. This will force the request to ELEVATOR_INSERT_BACK disturbing scheduling but elvpriv alloc failures should be rare (nothing is per-request) and anything is better than deadlocking. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c53
1 files changed, 36 insertions, 17 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index f6f68b0c8302..6cf13df43c80 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -29,6 +29,7 @@
#include <linux/fault-inject.h>
#include <linux/list_sort.h>
#include <linux/delay.h>
+#include <linux/ratelimit.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
@@ -930,17 +931,6 @@ retry:
rw_flags |= REQ_IO_STAT;
spin_unlock_irq(q->queue_lock);
- /* create icq if missing */
- if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
- create_io_context(gfp_mask, q->node);
- ioc = rq_ioc(bio);
- if (!ioc)
- goto fail_alloc;
- icq = ioc_create_icq(ioc, q, gfp_mask);
- if (!icq)
- goto fail_alloc;
- }
-
/* allocate and init request */
rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
if (!rq)
@@ -949,17 +939,28 @@ retry:
blk_rq_init(q, rq);
rq->cmd_flags = rw_flags | REQ_ALLOCED;
+ /* init elvpriv */
if (rw_flags & REQ_ELVPRIV) {
- rq->elv.icq = icq;
- if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
- mempool_free(rq, q->rq.rq_pool);
- goto fail_alloc;
+ if (unlikely(et->icq_cache && !icq)) {
+ create_io_context(gfp_mask, q->node);
+ ioc = rq_ioc(bio);
+ if (!ioc)
+ goto fail_elvpriv;
+
+ icq = ioc_create_icq(ioc, q, gfp_mask);
+ if (!icq)
+ goto fail_elvpriv;
}
- /* @rq->elv.icq holds on to io_context until @rq is freed */
+
+ rq->elv.icq = icq;
+ if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
+ goto fail_elvpriv;
+
+ /* @rq->elv.icq holds io_context until @rq is freed */
if (icq)
get_io_context(icq->ioc);
}
-
+out:
/*
* ioc may be NULL here, and ioc_batching will be false. That's
* OK, if the queue is under the request limit then requests need
@@ -972,6 +973,24 @@ retry:
trace_block_getrq(q, bio, rw_flags & 1);
return rq;
+fail_elvpriv:
+ /*
+ * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
+ * and may fail indefinitely under memory pressure and thus
+ * shouldn't stall IO. Treat this request as !elvpriv. This will
+ * disturb iosched and blkcg but weird is bettern than dead.
+ */
+ printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n",
+ dev_name(q->backing_dev_info.dev));
+
+ rq->cmd_flags &= ~REQ_ELVPRIV;
+ rq->elv.icq = NULL;
+
+ spin_lock_irq(q->queue_lock);
+ rl->elvpriv--;
+ spin_unlock_irq(q->queue_lock);
+ goto out;
+
fail_alloc:
/*
* Allocation failed presumably due to memory. Undo anything we
OpenPOWER on IntegriCloud