summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-06-12 17:53:34 +1000
committerPaul Mackerras <paulus@samba.org>2006-06-12 17:53:34 +1000
commit7a0c58d0513c246ac5438ef4a55ce8b93395ae0e (patch)
treed18f58349ced242aa62e622e381e60933f71d451 /block
parent6218a761bbc27acc65248c80024875bcc06d52b1 (diff)
parent289a1e995e74734b5ec76ca8a5490058f4fecc24 (diff)
downloadblackbird-op-linux-7a0c58d0513c246ac5438ef4a55ce8b93395ae0e.tar.gz
blackbird-op-linux-7a0c58d0513c246ac5438ef4a55ce8b93395ae0e.zip
Merge branch 'merge'
Diffstat (limited to 'block')
-rw-r--r--block/as-iosched.c13
-rw-r--r--block/cfq-iosched.c62
-rw-r--r--block/deadline-iosched.c13
-rw-r--r--block/elevator.c55
-rw-r--r--block/noop-iosched.c7
5 files changed, 94 insertions, 56 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index e25a5d79ab27..a7caf35ca0c2 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1648,17 +1648,17 @@ static void as_exit_queue(elevator_t *e)
* initialize elevator private data (as_data), and alloc a arq for
* each request on the free lists
*/
-static int as_init_queue(request_queue_t *q, elevator_t *e)
+static void *as_init_queue(request_queue_t *q, elevator_t *e)
{
struct as_data *ad;
int i;
if (!arq_pool)
- return -ENOMEM;
+ return NULL;
ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node);
if (!ad)
- return -ENOMEM;
+ return NULL;
memset(ad, 0, sizeof(*ad));
ad->q = q; /* Identify what queue the data belongs to */
@@ -1667,7 +1667,7 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
GFP_KERNEL, q->node);
if (!ad->hash) {
kfree(ad);
- return -ENOMEM;
+ return NULL;
}
ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
@@ -1675,7 +1675,7 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
if (!ad->arq_pool) {
kfree(ad->hash);
kfree(ad);
- return -ENOMEM;
+ return NULL;
}
/* anticipatory scheduling helpers */
@@ -1696,14 +1696,13 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
ad->antic_expire = default_antic_expire;
ad->batch_expire[REQ_SYNC] = default_read_batch_expire;
ad->batch_expire[REQ_ASYNC] = default_write_batch_expire;
- e->elevator_data = ad;
ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC];
ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10;
if (ad->write_batch_count < 2)
ad->write_batch_count = 2;
- return 0;
+ return ad;
}
/*
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 11ce6aaf1bd0..a46d030e092a 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -133,6 +133,7 @@ struct cfq_data {
mempool_t *crq_pool;
int rq_in_driver;
+ int hw_tag;
/*
* schedule slice state info
@@ -500,10 +501,13 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
/*
* if queue was preempted, just add to front to be fair. busy_rr
- * isn't sorted.
+ * isn't sorted, but insert at the back for fairness.
*/
if (preempted || list == &cfqd->busy_rr) {
- list_add(&cfqq->cfq_list, list);
+ if (preempted)
+ list = list->prev;
+
+ list_add_tail(&cfqq->cfq_list, list);
return;
}
@@ -664,6 +668,15 @@ static void cfq_activate_request(request_queue_t *q, struct request *rq)
struct cfq_data *cfqd = q->elevator->elevator_data;
cfqd->rq_in_driver++;
+
+ /*
+ * If the depth is larger 1, it really could be queueing. But lets
+ * make the mark a little higher - idling could still be good for
+ * low queueing, and a low queueing number could also just indicate
+ * a SCSI mid layer like behaviour where limit+1 is often seen.
+ */
+ if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
+ cfqd->hw_tag = 1;
}
static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
@@ -879,6 +892,13 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
cfqq = list_entry_cfqq(cfqd->cur_rr.next);
/*
+ * If no new queues are available, check if the busy list has some
+ * before falling back to idle io.
+ */
+ if (!cfqq && !list_empty(&cfqd->busy_rr))
+ cfqq = list_entry_cfqq(cfqd->busy_rr.next);
+
+ /*
* if we have idle queues and no rt or be queues had pending
* requests, either allow immediate service if the grace period
* has passed or arm the idle grace timer
@@ -1458,7 +1478,8 @@ retry:
* set ->slice_left to allow preemption for a new process
*/
cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
- cfq_mark_cfqq_idle_window(cfqq);
+ if (!cfqd->hw_tag)
+ cfq_mark_cfqq_idle_window(cfqq);
cfq_mark_cfqq_prio_changed(cfqq);
cfq_init_prio_data(cfqq);
}
@@ -1649,7 +1670,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
{
int enable_idle = cfq_cfqq_idle_window(cfqq);
- if (!cic->ioc->task || !cfqd->cfq_slice_idle)
+ if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag)
enable_idle = 0;
else if (sample_valid(cic->ttime_samples)) {
if (cic->ttime_mean > cfqd->cfq_slice_idle)
@@ -1740,14 +1761,24 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
+ cic = crq->io_context;
+
/*
* we never wait for an async request and we don't allow preemption
* of an async request. so just return early
*/
- if (!cfq_crq_is_sync(crq))
+ if (!cfq_crq_is_sync(crq)) {
+ /*
+ * sync process issued an async request, if it's waiting
+ * then expire it and kick rq handling.
+ */
+ if (cic == cfqd->active_cic &&
+ del_timer(&cfqd->idle_slice_timer)) {
+ cfq_slice_expired(cfqd, 0);
+ cfq_start_queueing(cfqd, cfqq);
+ }
return;
-
- cic = crq->io_context;
+ }
cfq_update_io_thinktime(cfqd, cic);
cfq_update_io_seektime(cfqd, cic, crq);
@@ -2165,10 +2196,9 @@ static void cfq_idle_class_timer(unsigned long data)
* race with a non-idle queue, reset timer
*/
end = cfqd->last_end_request + CFQ_IDLE_GRACE;
- if (!time_after_eq(jiffies, end)) {
- cfqd->idle_class_timer.expires = end;
- add_timer(&cfqd->idle_class_timer);
- } else
+ if (!time_after_eq(jiffies, end))
+ mod_timer(&cfqd->idle_class_timer, end);
+ else
cfq_schedule_dispatch(cfqd);
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
@@ -2221,14 +2251,14 @@ static void cfq_exit_queue(elevator_t *e)
kfree(cfqd);
}
-static int cfq_init_queue(request_queue_t *q, elevator_t *e)
+static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
{
struct cfq_data *cfqd;
int i;
cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
if (!cfqd)
- return -ENOMEM;
+ return NULL;
memset(cfqd, 0, sizeof(*cfqd));
@@ -2258,8 +2288,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
- e->elevator_data = cfqd;
-
cfqd->queue = q;
cfqd->max_queued = q->nr_requests / 4;
@@ -2286,14 +2314,14 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
cfqd->cfq_slice_idle = cfq_slice_idle;
- return 0;
+ return cfqd;
out_crqpool:
kfree(cfqd->cfq_hash);
out_cfqhash:
kfree(cfqd->crq_hash);
out_crqhash:
kfree(cfqd);
- return -ENOMEM;
+ return NULL;
}
static void cfq_slab_kill(void)
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 399fa1e60e1f..3bd0415a9828 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -613,24 +613,24 @@ static void deadline_exit_queue(elevator_t *e)
* initialize elevator private data (deadline_data), and alloc a drq for
* each request on the free lists
*/
-static int deadline_init_queue(request_queue_t *q, elevator_t *e)
+static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
{
struct deadline_data *dd;
int i;
if (!drq_pool)
- return -ENOMEM;
+ return NULL;
dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
if (!dd)
- return -ENOMEM;
+ return NULL;
memset(dd, 0, sizeof(*dd));
dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES,
GFP_KERNEL, q->node);
if (!dd->hash) {
kfree(dd);
- return -ENOMEM;
+ return NULL;
}
dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
@@ -638,7 +638,7 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
if (!dd->drq_pool) {
kfree(dd->hash);
kfree(dd);
- return -ENOMEM;
+ return NULL;
}
for (i = 0; i < DL_HASH_ENTRIES; i++)
@@ -653,8 +653,7 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
dd->writes_starved = writes_starved;
dd->front_merges = 1;
dd->fifo_batch = fifo_batch;
- e->elevator_data = dd;
- return 0;
+ return dd;
}
static void deadline_put_request(request_queue_t *q, struct request *rq)
diff --git a/block/elevator.c b/block/elevator.c
index 8768a367fdde..a0afdd317cef 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -121,16 +121,16 @@ static struct elevator_type *elevator_get(const char *name)
return e;
}
-static int elevator_attach(request_queue_t *q, struct elevator_queue *eq)
+static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
{
- int ret = 0;
+ return eq->ops->elevator_init_fn(q, eq);
+}
+static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
+ void *data)
+{
q->elevator = eq;
-
- if (eq->ops->elevator_init_fn)
- ret = eq->ops->elevator_init_fn(q, eq);
-
- return ret;
+ eq->elevator_data = data;
}
static char chosen_elevator[16];
@@ -181,6 +181,7 @@ int elevator_init(request_queue_t *q, char *name)
struct elevator_type *e = NULL;
struct elevator_queue *eq;
int ret = 0;
+ void *data;
INIT_LIST_HEAD(&q->queue_head);
q->last_merge = NULL;
@@ -202,10 +203,13 @@ int elevator_init(request_queue_t *q, char *name)
if (!eq)
return -ENOMEM;
- ret = elevator_attach(q, eq);
- if (ret)
+ data = elevator_init_queue(q, eq);
+ if (!data) {
kobject_put(&eq->kobj);
+ return -ENOMEM;
+ }
+ elevator_attach(q, eq, data);
return ret;
}
@@ -722,13 +726,16 @@ int elv_register_queue(struct request_queue *q)
return error;
}
+static void __elv_unregister_queue(elevator_t *e)
+{
+ kobject_uevent(&e->kobj, KOBJ_REMOVE);
+ kobject_del(&e->kobj);
+}
+
void elv_unregister_queue(struct request_queue *q)
{
- if (q) {
- elevator_t *e = q->elevator;
- kobject_uevent(&e->kobj, KOBJ_REMOVE);
- kobject_del(&e->kobj);
- }
+ if (q)
+ __elv_unregister_queue(q->elevator);
}
int elv_register(struct elevator_type *e)
@@ -780,6 +787,7 @@ EXPORT_SYMBOL_GPL(elv_unregister);
static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
{
elevator_t *old_elevator, *e;
+ void *data;
/*
* Allocate new elevator
@@ -788,6 +796,12 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
if (!e)
return 0;
+ data = elevator_init_queue(q, e);
+ if (!data) {
+ kobject_put(&e->kobj);
+ return 0;
+ }
+
/*
* Turn on BYPASS and drain all requests w/ elevator private data
*/
@@ -806,19 +820,19 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
elv_drain_elevator(q);
}
- spin_unlock_irq(q->queue_lock);
-
/*
- * unregister old elevator data
+ * Remember old elevator.
*/
- elv_unregister_queue(q);
old_elevator = q->elevator;
/*
* attach and start new elevator
*/
- if (elevator_attach(q, e))
- goto fail;
+ elevator_attach(q, e, data);
+
+ spin_unlock_irq(q->queue_lock);
+
+ __elv_unregister_queue(old_elevator);
if (elv_register_queue(q))
goto fail_register;
@@ -837,7 +851,6 @@ fail_register:
*/
elevator_exit(e);
e = NULL;
-fail:
q->elevator = old_elevator;
elv_register_queue(q);
clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index f370e4a7fe6d..56a7c620574f 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -65,16 +65,15 @@ noop_latter_request(request_queue_t *q, struct request *rq)
return list_entry(rq->queuelist.next, struct request, queuelist);
}
-static int noop_init_queue(request_queue_t *q, elevator_t *e)
+static void *noop_init_queue(request_queue_t *q, elevator_t *e)
{
struct noop_data *nd;
nd = kmalloc(sizeof(*nd), GFP_KERNEL);
if (!nd)
- return -ENOMEM;
+ return NULL;
INIT_LIST_HEAD(&nd->queue);
- e->elevator_data = nd;
- return 0;
+ return nd;
}
static void noop_exit_queue(elevator_t *e)
OpenPOWER on IntegriCloud