diff options
author | Jens Axboe <axboe@suse.de> | 2006-07-19 05:10:01 +0200 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2006-09-30 20:29:37 +0200 |
commit | e4313dd423148fa729571b50c06cbc0bedf5c494 (patch) | |
tree | 7524020d6d822b06aa433c1252a4926b91a8b328 /block | |
parent | 4a893e837bb470867d74c05d6c6b97bba5a96185 (diff) | |
download | blackbird-op-linux-e4313dd423148fa729571b50c06cbc0bedf5c494.tar.gz blackbird-op-linux-e4313dd423148fa729571b50c06cbc0bedf5c494.zip |
[PATCH] as-iosched: use new io context counting mechanism
It's ok if the read path is a lot more costly, as long as inc/dec is
really cheap. The inc/dec will happen for each created/freed io context,
while the reading only happens when a disk queue exits.
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block')
-rw-r--r-- | block/as-iosched.c | 9 |
1 files changed, 5 insertions, 4 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c index 66015bc79e6f..8e1fef1eafc9 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -149,7 +149,7 @@ enum arq_state { #define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2) #define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state) -static atomic_t ioc_count = ATOMIC_INIT(0); +static DEFINE_PER_CPU(unsigned long, ioc_count); static struct completion *ioc_gone; static void as_move_to_dispatch(struct as_data *ad, struct request *rq); @@ -163,7 +163,8 @@ static void as_antic_stop(struct as_data *ad); static void free_as_io_context(struct as_io_context *aic) { kfree(aic); - if (atomic_dec_and_test(&ioc_count) && ioc_gone) + elv_ioc_count_dec(ioc_count); + if (ioc_gone && !elv_ioc_count_read(ioc_count)) complete(ioc_gone); } @@ -199,7 +200,7 @@ static struct as_io_context *alloc_as_io_context(void) ret->seek_total = 0; ret->seek_samples = 0; ret->seek_mean = 0; - atomic_inc(&ioc_count); + elv_ioc_count_inc(ioc_count); } return ret; @@ -1484,7 +1485,7 @@ static void __exit as_exit(void) ioc_gone = &all_gone; /* ioc_gone's update must be visible before reading ioc_count */ smp_wmb(); - if (atomic_read(&ioc_count)) + if (elv_ioc_count_read(ioc_count)) wait_for_completion(ioc_gone); synchronize_rcu(); } |