diff options
author | Alasdair G Kergon <agk@redhat.com> | 2008-02-08 02:10:52 +0000 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2008-02-08 02:10:52 +0000 |
commit | 395b167ca0c559aa975d8bbc46a3d10edd6e17d0 (patch) | |
tree | 5b5db9ad8da4d1d28e971dece58979594f56bad2 /drivers/md | |
parent | 4e4eef64e246694a6302c3ee95ac9b60c40f877e (diff) | |
download | blackbird-op-linux-395b167ca0c559aa975d8bbc46a3d10edd6e17d0.tar.gz blackbird-op-linux-395b167ca0c559aa975d8bbc46a3d10edd6e17d0.zip |
dm crypt: move queue functions
Reorder kcryptd functions for clarity.
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-crypt.c | 54 |
1 files changed, 26 insertions, 28 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 5b83204b6594..ccc2fe19db86 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -110,6 +110,7 @@ struct crypt_config { static struct kmem_cache *_crypt_io_pool; static void clone_init(struct dm_crypt_io *, struct bio *); +static void kcryptd_queue_crypt(struct dm_crypt_io *io); /* * Different IV generation algorithms: @@ -481,25 +482,6 @@ static void crypt_dec_pending(struct dm_crypt_io *io) * starved by new requests which can block in the first stages due * to memory allocation. */ -static void kcryptd_io(struct work_struct *work); -static void kcryptd_crypt(struct work_struct *work); - -static void kcryptd_queue_io(struct dm_crypt_io *io) -{ - struct crypt_config *cc = io->target->private; - - INIT_WORK(&io->work, kcryptd_io); - queue_work(cc->io_queue, &io->work); -} - -static void kcryptd_queue_crypt(struct dm_crypt_io *io) -{ - struct crypt_config *cc = io->target->private; - - INIT_WORK(&io->work, kcryptd_crypt); - queue_work(cc->crypt_queue, &io->work); -} - static void crypt_endio(struct bio *clone, int error) { struct dm_crypt_io *io = clone->bi_private; @@ -575,6 +557,24 @@ static void kcryptd_io_write(struct dm_crypt_io *io) { } +static void kcryptd_io(struct work_struct *work) +{ + struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); + + if (bio_data_dir(io->base_bio) == READ) + kcryptd_io_read(io); + else + kcryptd_io_write(io); +} + +static void kcryptd_queue_io(struct dm_crypt_io *io) +{ + struct crypt_config *cc = io->target->private; + + INIT_WORK(&io->work, kcryptd_io); + queue_work(cc->io_queue, &io->work); +} + static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int error) { } @@ -658,24 +658,22 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) kcryptd_crypt_read_done(io, r); } -static void kcryptd_io(struct work_struct *work) +static void kcryptd_crypt(struct work_struct *work) { struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); if (bio_data_dir(io->base_bio) == READ) - kcryptd_io_read(io); + kcryptd_crypt_read_convert(io); else - kcryptd_io_write(io); + kcryptd_crypt_write_convert(io); } -static void kcryptd_crypt(struct work_struct *work) +static void kcryptd_queue_crypt(struct dm_crypt_io *io) { - struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); + struct crypt_config *cc = io->target->private; - if (bio_data_dir(io->base_bio) == READ) - kcryptd_crypt_read_convert(io); - else - kcryptd_crypt_write_convert(io); + INIT_WORK(&io->work, kcryptd_crypt); + queue_work(cc->crypt_queue, &io->work); } /* |