summaryrefslogtreecommitdiffstats
path: root/block/blk-sysfs.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-sysfs.c')
-rw-r--r--block/blk-sysfs.c34
1 files changed, 19 insertions, 15 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index aa41b47c22d2..9628b291f960 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -40,7 +40,7 @@ static ssize_t queue_requests_show(struct request_queue *q, char *page)
static ssize_t
queue_requests_store(struct request_queue *q, const char *page, size_t count)
{
- struct request_list *rl = &q->rq;
+ struct request_list *rl;
unsigned long nr;
int ret;
@@ -55,6 +55,9 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
q->nr_requests = nr;
blk_queue_congestion_threshold(q);
+ /* congestion isn't cgroup aware and follows root blkcg for now */
+ rl = &q->root_rl;
+
if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
blk_set_queue_congested(q, BLK_RW_SYNC);
else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
@@ -65,19 +68,22 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, BLK_RW_ASYNC);
- if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
- blk_set_queue_full(q, BLK_RW_SYNC);
- } else {
- blk_clear_queue_full(q, BLK_RW_SYNC);
- wake_up(&rl->wait[BLK_RW_SYNC]);
+ blk_queue_for_each_rl(rl, q) {
+ if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
+ blk_set_rl_full(rl, BLK_RW_SYNC);
+ } else {
+ blk_clear_rl_full(rl, BLK_RW_SYNC);
+ wake_up(&rl->wait[BLK_RW_SYNC]);
+ }
+
+ if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
+ blk_set_rl_full(rl, BLK_RW_ASYNC);
+ } else {
+ blk_clear_rl_full(rl, BLK_RW_ASYNC);
+ wake_up(&rl->wait[BLK_RW_ASYNC]);
+ }
}
- if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
- blk_set_queue_full(q, BLK_RW_ASYNC);
- } else {
- blk_clear_queue_full(q, BLK_RW_ASYNC);
- wake_up(&rl->wait[BLK_RW_ASYNC]);
- }
spin_unlock_irq(q->queue_lock);
return ret;
}
@@ -476,7 +482,6 @@ static void blk_release_queue(struct kobject *kobj)
{
struct request_queue *q =
container_of(kobj, struct request_queue, kobj);
- struct request_list *rl = &q->rq;
blk_sync_queue(q);
@@ -489,8 +494,7 @@ static void blk_release_queue(struct kobject *kobj)
elevator_exit(q->elevator);
}
- if (rl->rq_pool)
- mempool_destroy(rl->rq_pool);
+ blk_exit_rl(&q->root_rl);
if (q->queue_tags)
__blk_queue_free_tags(q);
OpenPOWER on IntegriCloud