diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2008-01-24 08:53:35 +0100 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-01-28 10:50:31 +0100 |
commit | d38ecf935fcb10264a6bc190855d9595165e6eeb (patch) | |
tree | 64e3146ef76678ad3ae8f75c32df9f25ea470953 /block | |
parent | fd0928df98b9578be8a786ac0cb78a47a5e17a20 (diff) | |
download | blackbird-op-linux-d38ecf935fcb10264a6bc190855d9595165e6eeb.tar.gz blackbird-op-linux-d38ecf935fcb10264a6bc190855d9595165e6eeb.zip |
io context sharing: preliminary support
Detach task state from ioc, instead keep track of how many processes
are accessing the ioc.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/ll_rw_blk.c | 46 |
1 files changed, 30 insertions, 16 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index b9bb02e845cd..d4550ecae443 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -3854,12 +3854,13 @@ int __init blk_dev_init(void) } /* - * IO Context helper functions + * IO Context helper functions. put_io_context() returns 1 if there are no + * more users of this io context, 0 otherwise. */ -void put_io_context(struct io_context *ioc) +int put_io_context(struct io_context *ioc) { if (ioc == NULL) - return; + return 1; BUG_ON(atomic_read(&ioc->refcount) == 0); @@ -3878,7 +3879,9 @@ void put_io_context(struct io_context *ioc) rcu_read_unlock(); kmem_cache_free(iocontext_cachep, ioc); + return 1; } + return 0; } EXPORT_SYMBOL(put_io_context); @@ -3893,15 +3896,17 @@ void exit_io_context(void) current->io_context = NULL; task_unlock(current); - ioc->task = NULL; - if (ioc->aic && ioc->aic->exit) - ioc->aic->exit(ioc->aic); - if (ioc->cic_root.rb_node != NULL) { - cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node); - cic->exit(ioc); - } + if (atomic_dec_and_test(&ioc->nr_tasks)) { + if (ioc->aic && ioc->aic->exit) + ioc->aic->exit(ioc->aic); + if (ioc->cic_root.rb_node != NULL) { + cic = rb_entry(rb_first(&ioc->cic_root), + struct cfq_io_context, rb_node); + cic->exit(ioc); + } - put_io_context(ioc); + put_io_context(ioc); + } } struct io_context *alloc_io_context(gfp_t gfp_flags, int node) @@ -3911,7 +3916,8 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node) ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); if (ret) { atomic_set(&ret->refcount, 1); - ret->task = current; + atomic_set(&ret->nr_tasks, 1); + spin_lock_init(&ret->lock); ret->ioprio_changed = 0; ret->ioprio = 0; ret->last_waited = jiffies; /* doesn't matter... */ @@ -3959,10 +3965,18 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node) */ struct io_context *get_io_context(gfp_t gfp_flags, int node) { - struct io_context *ret; - ret = current_io_context(gfp_flags, node); - if (likely(ret)) - atomic_inc(&ret->refcount); + struct io_context *ret = NULL; + + /* + * Check for unlikely race with exiting task. ioc ref count is + * zero when ioc is being detached. + */ + do { + ret = current_io_context(gfp_flags, node); + if (unlikely(!ret)) + break; + } while (!atomic_inc_not_zero(&ret->refcount)); + return ret; } EXPORT_SYMBOL(get_io_context); |