summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-02-19 10:02:29 +0100
committerJens Axboe <jens.axboe@oracle.com>2008-02-19 10:04:00 +0100
commitffc4e7595734cf768fa60cea8a4d545dfef8231a (patch)
tree9b95aca67ea7c9e87254da501f73cca64504051d /block
parent84e9e03c55c2456799ab19f1d577e72f721fdd39 (diff)
downloadblackbird-op-linux-ffc4e7595734cf768fa60cea8a4d545dfef8231a.tar.gz
blackbird-op-linux-ffc4e7595734cf768fa60cea8a4d545dfef8231a.zip
cfq-iosched: add hlist for browsing parallel to the radix tree
It's cumbersome to browse a radix tree from start to finish, especially since we modify keys when a process exits. So add a hlist for the single purpose of browsing over all known cfq_io_contexts, used for exit, io prio change, etc. This fixes http://bugzilla.kernel.org/show_bug.cgi?id=9948 Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-ioc.c35
-rw-r--r--block/cfq-iosched.c38
2 files changed, 27 insertions, 46 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 4ae0929c6e38..e34df7c9fc36 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -17,17 +17,13 @@ static struct kmem_cache *iocontext_cachep;
static void cfq_dtor(struct io_context *ioc)
{
- struct cfq_io_context *cic[1];
- int r;
+ if (!hlist_empty(&ioc->cic_list)) {
+ struct cfq_io_context *cic;
- /*
- * We don't have a specific key to lookup with, so use the gang
- * lookup to just retrieve the first item stored. The cfq exit
- * function will iterate the full tree, so any member will do.
- */
- r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1);
- if (r > 0)
- cic[0]->dtor(ioc);
+ cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
+ cic_list);
+ cic->dtor(ioc);
+ }
}
/*
@@ -57,18 +53,16 @@ EXPORT_SYMBOL(put_io_context);
static void cfq_exit(struct io_context *ioc)
{
- struct cfq_io_context *cic[1];
- int r;
-
rcu_read_lock();
- /*
- * See comment for cfq_dtor()
- */
- r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1);
- rcu_read_unlock();
- if (r > 0)
- cic[0]->exit(ioc);
+ if (!hlist_empty(&ioc->cic_list)) {
+ struct cfq_io_context *cic;
+
+ cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
+ cic_list);
+ cic->exit(ioc);
+ }
+ rcu_read_unlock();
}
/* Called by the exitting task */
@@ -105,6 +99,7 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
ret->nr_batch_requests = 0; /* because this is 0 */
ret->aic = NULL;
INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
+ INIT_HLIST_HEAD(&ret->cic_list);
ret->ioc_data = NULL;
}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ca198e61fa65..0f962ecae91f 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1145,38 +1145,19 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
/*
* Call func for each cic attached to this ioc. Returns number of cic's seen.
*/
-#define CIC_GANG_NR 16
static unsigned int
call_for_each_cic(struct io_context *ioc,
void (*func)(struct io_context *, struct cfq_io_context *))
{
- struct cfq_io_context *cics[CIC_GANG_NR];
- unsigned long index = 0;
- unsigned int called = 0;
- int nr;
+ struct cfq_io_context *cic;
+ struct hlist_node *n;
+ int called = 0;
rcu_read_lock();
-
- do {
- int i;
-
- /*
- * Perhaps there's a better way - this just gang lookups from
- * 0 to the end, restarting after each CIC_GANG_NR from the
- * last key + 1.
- */
- nr = radix_tree_gang_lookup(&ioc->radix_root, (void **) cics,
- index, CIC_GANG_NR);
- if (!nr)
- break;
-
- called += nr;
- index = 1 + (unsigned long) cics[nr - 1]->key;
-
- for (i = 0; i < nr; i++)
- func(ioc, cics[i]);
- } while (nr == CIC_GANG_NR);
-
+ hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) {
+ func(ioc, cic);
+ called++;
+ }
rcu_read_unlock();
return called;
@@ -1190,6 +1171,7 @@ static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
spin_lock_irqsave(&ioc->lock, flags);
radix_tree_delete(&ioc->radix_root, cic->dead_key);
+ hlist_del_rcu(&cic->cic_list);
spin_unlock_irqrestore(&ioc->lock, flags);
kmem_cache_free(cfq_ioc_pool, cic);
@@ -1280,6 +1262,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
if (cic) {
cic->last_end_request = jiffies;
INIT_LIST_HEAD(&cic->queue_list);
+ INIT_HLIST_NODE(&cic->cic_list);
cic->dtor = cfq_free_io_context;
cic->exit = cfq_exit_io_context;
elv_ioc_count_inc(ioc_count);
@@ -1501,6 +1484,7 @@ cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
rcu_assign_pointer(ioc->ioc_data, NULL);
radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
+ hlist_del_rcu(&cic->cic_list);
spin_unlock_irqrestore(&ioc->lock, flags);
cfq_cic_free(cic);
@@ -1561,6 +1545,8 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
spin_lock_irqsave(&ioc->lock, flags);
ret = radix_tree_insert(&ioc->radix_root,
(unsigned long) cfqd, cic);
+ if (!ret)
+ hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
spin_unlock_irqrestore(&ioc->lock, flags);
radix_tree_preload_end();
OpenPOWER on IntegriCloud