summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-04-13 18:07:19 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-04-13 18:07:19 -0700
commitd8dd0b6d4836bce81cece60509ef3b157a420776 (patch)
tree7a28f327a15443d6c9d091f3d272abd107251ab7 /block
parent2d59dcfb54ade45cacc59a6e7bd96b8c19088c3d (diff)
parent1b2e19f17ed327af6add02978efdf354e4f8e4df (diff)
downloadtalos-obmc-linux-d8dd0b6d4836bce81cece60509ef3b157a420776.tar.gz
talos-obmc-linux-d8dd0b6d4836bce81cece60509ef3b157a420776.zip
Merge branch 'for-3.4/core' of git://git.kernel.dk/linux-block
Pull block core bits from Jens Axboe: "It's a nice and quiet round this time, since most of the tricky stuff has been pushed to 3.5 to give it more time to mature. After a few hectic block IO core changes for 3.3 and 3.2, I'm quite happy with a slow round. Really minor stuff in here, the only real functional change is making the auto-unplug threshold a per-queue entity. The threshold is set so that it's low enough that we don't hold off IO for too long, but still big enough to get a nice benefit from the batched insert (and hence queue lock cost reduction). For raid configurations, this currently breaks down." * 'for-3.4/core' of git://git.kernel.dk/linux-block: block: make auto block plug flush threshold per-disk based Documentation: Add sysfs ABI change for cfq's target latency. block: Make cfq_target_latency tunable through sysfs. block: use lockdep_assert_held for queue locking block: blk_alloc_queue_node(): use caller's GFP flags instead of GFP_KERNEL
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c5
-rw-r--r--block/blk-throttle.c2
-rw-r--r--block/cfq-iosched.c10
3 files changed, 12 insertions, 5 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 3a78b00edd71..1f61b74867e4 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -483,7 +483,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q)
return NULL;
- q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
+ q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
if (q->id < 0)
goto fail_q;
@@ -1277,7 +1277,8 @@ static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
list_for_each_entry_reverse(rq, &plug->list, queuelist) {
int el_ret;
- (*request_count)++;
+ if (rq->q == q)
+ (*request_count)++;
if (rq->q != q || !blk_rq_merge_ok(rq, bio))
continue;
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 5eed6a76721d..f2ddb94626bd 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1218,7 +1218,7 @@ void blk_throtl_drain(struct request_queue *q)
struct bio_list bl;
struct bio *bio;
- WARN_ON_ONCE(!queue_is_locked(q));
+ queue_lockdep_assert_held(q);
bio_list_init(&bl);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 457295253566..3c38536bd52c 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -295,6 +295,7 @@ struct cfq_data {
unsigned int cfq_slice_idle;
unsigned int cfq_group_idle;
unsigned int cfq_latency;
+ unsigned int cfq_target_latency;
/*
* Fallback dummy cfqq for extreme OOM conditions
@@ -604,7 +605,7 @@ cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
{
struct cfq_rb_root *st = &cfqd->grp_service_tree;
- return cfq_target_latency * cfqg->weight / st->total_weight;
+ return cfqd->cfq_target_latency * cfqg->weight / st->total_weight;
}
static inline unsigned
@@ -2271,7 +2272,8 @@ new_workload:
* to have higher weight. A more accurate thing would be to
* calculate system wide asnc/sync ratio.
*/
- tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
+ tmp = cfqd->cfq_target_latency *
+ cfqg_busy_async_queues(cfqd, cfqg);
tmp = tmp/cfqd->busy_queues;
slice = min_t(unsigned, slice, tmp);
@@ -3737,6 +3739,7 @@ static void *cfq_init_queue(struct request_queue *q)
cfqd->cfq_back_penalty = cfq_back_penalty;
cfqd->cfq_slice[0] = cfq_slice_async;
cfqd->cfq_slice[1] = cfq_slice_sync;
+ cfqd->cfq_target_latency = cfq_target_latency;
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
cfqd->cfq_slice_idle = cfq_slice_idle;
cfqd->cfq_group_idle = cfq_group_idle;
@@ -3788,6 +3791,7 @@ SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
+SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
#undef SHOW_FUNCTION
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
@@ -3821,6 +3825,7 @@ STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
UINT_MAX, 0);
STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
+STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
#undef STORE_FUNCTION
#define CFQ_ATTR(name) \
@@ -3838,6 +3843,7 @@ static struct elv_fs_entry cfq_attrs[] = {
CFQ_ATTR(slice_idle),
CFQ_ATTR(group_idle),
CFQ_ATTR(low_latency),
+ CFQ_ATTR(target_latency),
__ATTR_NULL
};
OpenPOWER on IntegriCloud