summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c10
-rw-r--r--block/blk-mq.c10
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/blk.h2
-rw-r--r--block/cfq-iosched.c17
6 files changed, 35 insertions, 8 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 7c2947128f58..0480892e97e5 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -74,7 +74,7 @@ static void blkg_free(struct blkcg_gq *blkg)
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
if (blkg->blkcg != &blkcg_root)
- blk_exit_rl(&blkg->rl);
+ blk_exit_rl(blkg->q, &blkg->rl);
blkg_rwstat_exit(&blkg->stat_ios);
blkg_rwstat_exit(&blkg->stat_bytes);
diff --git a/block/blk-core.c b/block/blk-core.c
index c7068520794b..a7421b772d0e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -648,13 +648,19 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
if (!rl->rq_pool)
return -ENOMEM;
+ if (rl != &q->root_rl)
+ WARN_ON_ONCE(!blk_get_queue(q));
+
return 0;
}
-void blk_exit_rl(struct request_list *rl)
+void blk_exit_rl(struct request_queue *q, struct request_list *rl)
{
- if (rl->rq_pool)
+ if (rl->rq_pool) {
mempool_destroy(rl->rq_pool);
+ if (rl != &q->root_rl)
+ blk_put_queue(q);
+ }
}
struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f2224ffd225d..1bcccedcc74f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2641,7 +2641,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
return ret;
}
-void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
+static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ int nr_hw_queues)
{
struct request_queue *q;
@@ -2665,6 +2666,13 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_unfreeze_queue(q);
}
+
+void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
+{
+ mutex_lock(&set->tag_list_lock);
+ __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
+ mutex_unlock(&set->tag_list_lock);
+}
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
/* Enable polling stats and return whether they were already enabled. */
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 712b018e9f54..283da7fbe034 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -809,7 +809,7 @@ static void blk_release_queue(struct kobject *kobj)
blk_free_queue_stats(q->stats);
- blk_exit_rl(&q->root_rl);
+ blk_exit_rl(q, &q->root_rl);
if (q->queue_tags)
__blk_queue_free_tags(q);
diff --git a/block/blk.h b/block/blk.h
index 2ed70228e44f..83c8e1100525 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -59,7 +59,7 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
int blk_init_rl(struct request_list *rl, struct request_queue *q,
gfp_t gfp_mask);
-void blk_exit_rl(struct request_list *rl);
+void blk_exit_rl(struct request_queue *q, struct request_list *rl);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
void blk_queue_bypass_start(struct request_queue *q);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index da69b079725f..b7e9c7feeab2 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -38,9 +38,13 @@ static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
static const int cfq_hist_divisor = 4;
/*
- * offset from end of service tree
+ * offset from end of queue service tree for idle class
*/
#define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5)
+/* offset from end of group service tree under time slice mode */
+#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
+/* offset from end of group service under IOPS mode */
+#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
/*
* below this threshold, we consider thinktime immediate
@@ -1362,6 +1366,14 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
cfqg->vfraction = max_t(unsigned, vfr, 1);
}
+static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
+{
+ if (!iops_mode(cfqd))
+ return CFQ_SLICE_MODE_GROUP_DELAY;
+ else
+ return CFQ_IOPS_MODE_GROUP_DELAY;
+}
+
static void
cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
{
@@ -1381,7 +1393,8 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
n = rb_last(&st->rb);
if (n) {
__cfqg = rb_entry_cfqg(n);
- cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
+ cfqg->vdisktime = __cfqg->vdisktime +
+ cfq_get_cfqg_vdisktime_delay(cfqd);
} else
cfqg->vdisktime = st->min_vdisktime;
cfq_group_service_tree_add(st, cfqg);
OpenPOWER on IntegriCloud