diff options
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r-- | drivers/md/dm-crypt.c | 663 |
1 files changed, 385 insertions, 278 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index d5216bcc4649..c6a529873d0f 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1,8 +1,8 @@ /* * Copyright (C) 2003 Jana Saout <jana@saout.de> * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> - * Copyright (C) 2006-2017 Red Hat, Inc. All rights reserved. - * Copyright (C) 2013-2017 Milan Broz <gmazyland@gmail.com> + * Copyright (C) 2006-2020 Red Hat, Inc. All rights reserved. + * Copyright (C) 2013-2020 Milan Broz <gmazyland@gmail.com> * * This file is released under the GPL. */ @@ -98,11 +98,6 @@ struct crypt_iv_operations { struct dm_crypt_request *dmreq); }; -struct iv_essiv_private { - struct crypto_shash *hash_tfm; - u8 *salt; -}; - struct iv_benbi_private { int shift; }; @@ -120,8 +115,9 @@ struct iv_tcw_private { u8 *whitening; }; -struct iv_eboiv_private { - struct crypto_cipher *tfm; +#define ELEPHANT_MAX_KEY_SIZE 32 +struct iv_elephant_private { + struct crypto_skcipher *tfm; }; /* @@ -134,6 +130,7 @@ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, enum cipher_flags { CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */ CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */ + CRYPT_ENCRYPT_PREPROCESS, /* Must preprocess data for encryption (elephant) */ }; /* @@ -152,26 +149,22 @@ struct crypt_config { struct task_struct *write_thread; struct rb_root write_tree; - char *cipher; char *cipher_string; char *cipher_auth; char *key_string; const struct crypt_iv_operations *iv_gen_ops; union { - struct iv_essiv_private essiv; struct iv_benbi_private benbi; struct iv_lmk_private lmk; struct iv_tcw_private tcw; - struct iv_eboiv_private eboiv; + struct iv_elephant_private elephant; } iv_gen_private; u64 iv_offset; unsigned int iv_size; unsigned short int sector_size; unsigned char sector_shift; - /* ESSIV: struct crypto_cipher *essiv_tfm */ - void *iv_private; union { struct crypto_skcipher **tfms; struct crypto_aead **tfms_aead; @@ -299,6 +292,11 @@ static struct crypto_aead *any_tfm_aead(struct crypt_config *cc) * eboiv: Encrypted byte-offset IV (used in Bitlocker in CBC mode) * The IV is encrypted little-endian byte-offset (with the same key * and cipher as the volume). + * + * elephant: The extended version of eboiv with additional Elephant diffuser + * used with Bitlocker CBC mode. + * This mode was used in older Windows systems + * http://download.microsoft.com/download/0/2/3/0238acaf-d3bf-4a6d-b3d6-0a0be4bbb36e/bitlockercipher200608.pdf */ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, @@ -329,157 +327,15 @@ static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv, return 0; } -/* Initialise ESSIV - compute salt but no local memory allocations */ -static int crypt_iv_essiv_init(struct crypt_config *cc) -{ - struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; - SHASH_DESC_ON_STACK(desc, essiv->hash_tfm); - struct crypto_cipher *essiv_tfm; - int err; - - desc->tfm = essiv->hash_tfm; - - err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt); - shash_desc_zero(desc); - if (err) - return err; - - essiv_tfm = cc->iv_private; - - err = crypto_cipher_setkey(essiv_tfm, essiv->salt, - crypto_shash_digestsize(essiv->hash_tfm)); - if (err) - return err; - - return 0; -} - -/* Wipe salt and reset key derived from volume key */ -static int crypt_iv_essiv_wipe(struct crypt_config *cc) -{ - struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; - unsigned salt_size = crypto_shash_digestsize(essiv->hash_tfm); - struct crypto_cipher *essiv_tfm; - int r, err = 0; - - memset(essiv->salt, 0, salt_size); - - essiv_tfm = cc->iv_private; - r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); - if (r) - err = r; - - return err; -} - -/* Allocate the cipher for ESSIV */ -static struct crypto_cipher *alloc_essiv_cipher(struct crypt_config *cc, - struct dm_target *ti, - const u8 *salt, - unsigned int saltsize) -{ - struct crypto_cipher *essiv_tfm; - int err; - - /* Setup the essiv_tfm with the given salt */ - essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, 0); - if (IS_ERR(essiv_tfm)) { - ti->error = "Error allocating crypto tfm for ESSIV"; - return essiv_tfm; - } - - if (crypto_cipher_blocksize(essiv_tfm) != cc->iv_size) { - ti->error = "Block size of ESSIV cipher does " - "not match IV size of block cipher"; - crypto_free_cipher(essiv_tfm); - return ERR_PTR(-EINVAL); - } - - err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); - if (err) { - ti->error = "Failed to set key for ESSIV cipher"; - crypto_free_cipher(essiv_tfm); - return ERR_PTR(err); - } - - return essiv_tfm; -} - -static void crypt_iv_essiv_dtr(struct crypt_config *cc) -{ - struct crypto_cipher *essiv_tfm; - struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; - - crypto_free_shash(essiv->hash_tfm); - essiv->hash_tfm = NULL; - - kzfree(essiv->salt); - essiv->salt = NULL; - - essiv_tfm = cc->iv_private; - - if (essiv_tfm) - crypto_free_cipher(essiv_tfm); - - cc->iv_private = NULL; -} - -static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, - const char *opts) -{ - struct crypto_cipher *essiv_tfm = NULL; - struct crypto_shash *hash_tfm = NULL; - u8 *salt = NULL; - int err; - - if (!opts) { - ti->error = "Digest algorithm missing for ESSIV mode"; - return -EINVAL; - } - - /* Allocate hash algorithm */ - hash_tfm = crypto_alloc_shash(opts, 0, 0); - if (IS_ERR(hash_tfm)) { - ti->error = "Error initializing ESSIV hash"; - err = PTR_ERR(hash_tfm); - goto bad; - } - - salt = kzalloc(crypto_shash_digestsize(hash_tfm), GFP_KERNEL); - if (!salt) { - ti->error = "Error kmallocing salt storage in ESSIV"; - err = -ENOMEM; - goto bad; - } - - cc->iv_gen_private.essiv.salt = salt; - cc->iv_gen_private.essiv.hash_tfm = hash_tfm; - - essiv_tfm = alloc_essiv_cipher(cc, ti, salt, - crypto_shash_digestsize(hash_tfm)); - if (IS_ERR(essiv_tfm)) { - crypt_iv_essiv_dtr(cc); - return PTR_ERR(essiv_tfm); - } - cc->iv_private = essiv_tfm; - - return 0; - -bad: - if (hash_tfm && !IS_ERR(hash_tfm)) - crypto_free_shash(hash_tfm); - kfree(salt); - return err; -} - static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, struct dm_crypt_request *dmreq) { - struct crypto_cipher *essiv_tfm = cc->iv_private; - + /* + * ESSIV encryption of the IV is now handled by the crypto API, + * so just pass the plain sector number here. + */ memset(iv, 0, cc->iv_size); *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); - crypto_cipher_encrypt_one(essiv_tfm, iv, iv); return 0; } @@ -487,8 +343,14 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, const char *opts) { - unsigned bs = crypto_skcipher_blocksize(any_tfm(cc)); - int log = ilog2(bs); + unsigned bs; + int log; + + if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags)) + bs = crypto_aead_blocksize(any_tfm_aead(cc)); + else + bs = crypto_skcipher_blocksize(any_tfm(cc)); + log = ilog2(bs); /* we need to calculate how far we must shift the sector count * to get the cipher block count, we use this shift in _gen */ @@ -847,67 +709,333 @@ static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv, return 0; } -static void crypt_iv_eboiv_dtr(struct crypt_config *cc) -{ - struct iv_eboiv_private *eboiv = &cc->iv_gen_private.eboiv; - - crypto_free_cipher(eboiv->tfm); - eboiv->tfm = NULL; -} - static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti, const char *opts) { - struct iv_eboiv_private *eboiv = &cc->iv_gen_private.eboiv; - struct crypto_cipher *tfm; - - tfm = crypto_alloc_cipher(cc->cipher, 0, 0); - if (IS_ERR(tfm)) { - ti->error = "Error allocating crypto tfm for EBOIV"; - return PTR_ERR(tfm); + if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags)) { + ti->error = "AEAD transforms not supported for EBOIV"; + return -EINVAL; } - if (crypto_cipher_blocksize(tfm) != cc->iv_size) { + if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) { ti->error = "Block size of EBOIV cipher does " "not match IV size of block cipher"; - crypto_free_cipher(tfm); return -EINVAL; } - eboiv->tfm = tfm; return 0; } -static int crypt_iv_eboiv_init(struct crypt_config *cc) +static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) { - struct iv_eboiv_private *eboiv = &cc->iv_gen_private.eboiv; + u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64)); + struct skcipher_request *req; + struct scatterlist src, dst; + struct crypto_wait wait; int err; - err = crypto_cipher_setkey(eboiv->tfm, cc->key, cc->key_size); - if (err) - return err; + req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO); + if (!req) + return -ENOMEM; - return 0; + memset(buf, 0, cc->iv_size); + *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size); + + sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size); + sg_init_one(&dst, iv, cc->iv_size); + skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf); + skcipher_request_set_callback(req, 0, crypto_req_done, &wait); + err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); + skcipher_request_free(req); + + return err; } -static int crypt_iv_eboiv_wipe(struct crypt_config *cc) +static void crypt_iv_elephant_dtr(struct crypt_config *cc) { - /* Called after cc->key is set to random key in crypt_wipe() */ - return crypt_iv_eboiv_init(cc); + struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; + + crypto_free_skcipher(elephant->tfm); + elephant->tfm = NULL; } -static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv, +static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti, + const char *opts) +{ + struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; + int r; + + elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); + if (IS_ERR(elephant->tfm)) { + r = PTR_ERR(elephant->tfm); + elephant->tfm = NULL; + return r; + } + + r = crypt_iv_eboiv_ctr(cc, ti, NULL); + if (r) + crypt_iv_elephant_dtr(cc); + return r; +} + +static void diffuser_disk_to_cpu(u32 *d, size_t n) +{ +#ifndef __LITTLE_ENDIAN + int i; + + for (i = 0; i < n; i++) + d[i] = le32_to_cpu((__le32)d[i]); +#endif +} + +static void diffuser_cpu_to_disk(__le32 *d, size_t n) +{ +#ifndef __LITTLE_ENDIAN + int i; + + for (i = 0; i < n; i++) + d[i] = cpu_to_le32((u32)d[i]); +#endif +} + +static void diffuser_a_decrypt(u32 *d, size_t n) +{ + int i, i1, i2, i3; + + for (i = 0; i < 5; i++) { + i1 = 0; + i2 = n - 2; + i3 = n - 5; + + while (i1 < (n - 1)) { + d[i1] += d[i2] ^ (d[i3] << 9 | d[i3] >> 23); + i1++; i2++; i3++; + + if (i3 >= n) + i3 -= n; + + d[i1] += d[i2] ^ d[i3]; + i1++; i2++; i3++; + + if (i2 >= n) + i2 -= n; + + d[i1] += d[i2] ^ (d[i3] << 13 | d[i3] >> 19); + i1++; i2++; i3++; + + d[i1] += d[i2] ^ d[i3]; + i1++; i2++; i3++; + } + } +} + +static void diffuser_a_encrypt(u32 *d, size_t n) +{ + int i, i1, i2, i3; + + for (i = 0; i < 5; i++) { + i1 = n - 1; + i2 = n - 2 - 1; + i3 = n - 5 - 1; + + while (i1 > 0) { + d[i1] -= d[i2] ^ d[i3]; + i1--; i2--; i3--; + + d[i1] -= d[i2] ^ (d[i3] << 13 | d[i3] >> 19); + i1--; i2--; i3--; + + if (i2 < 0) + i2 += n; + + d[i1] -= d[i2] ^ d[i3]; + i1--; i2--; i3--; + + if (i3 < 0) + i3 += n; + + d[i1] -= d[i2] ^ (d[i3] << 9 | d[i3] >> 23); + i1--; i2--; i3--; + } + } +} + +static void diffuser_b_decrypt(u32 *d, size_t n) +{ + int i, i1, i2, i3; + + for (i = 0; i < 3; i++) { + i1 = 0; + i2 = 2; + i3 = 5; + + while (i1 < (n - 1)) { + d[i1] += d[i2] ^ d[i3]; + i1++; i2++; i3++; + + d[i1] += d[i2] ^ (d[i3] << 10 | d[i3] >> 22); + i1++; i2++; i3++; + + if (i2 >= n) + i2 -= n; + + d[i1] += d[i2] ^ d[i3]; + i1++; i2++; i3++; + + if (i3 >= n) + i3 -= n; + + d[i1] += d[i2] ^ (d[i3] << 25 | d[i3] >> 7); + i1++; i2++; i3++; + } + } +} + +static void diffuser_b_encrypt(u32 *d, size_t n) +{ + int i, i1, i2, i3; + + for (i = 0; i < 3; i++) { + i1 = n - 1; + i2 = 2 - 1; + i3 = 5 - 1; + + while (i1 > 0) { + d[i1] -= d[i2] ^ (d[i3] << 25 | d[i3] >> 7); + i1--; i2--; i3--; + + if (i3 < 0) + i3 += n; + + d[i1] -= d[i2] ^ d[i3]; + i1--; i2--; i3--; + + if (i2 < 0) + i2 += n; + + d[i1] -= d[i2] ^ (d[i3] << 10 | d[i3] >> 22); + i1--; i2--; i3--; + + d[i1] -= d[i2] ^ d[i3]; + i1--; i2--; i3--; + } + } +} + +static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq) +{ + struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; + u8 *es, *ks, *data, *data2, *data_offset; + struct skcipher_request *req; + struct scatterlist *sg, *sg2, src, dst; + struct crypto_wait wait; + int i, r; + + req = skcipher_request_alloc(elephant->tfm, GFP_NOIO); + es = kzalloc(16, GFP_NOIO); /* Key for AES */ + ks = kzalloc(32, GFP_NOIO); /* Elephant sector key */ + + if (!req || !es || !ks) { + r = -ENOMEM; + goto out; + } + + *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size); + + /* E(Ks, e(s)) */ + sg_init_one(&src, es, 16); + sg_init_one(&dst, ks, 16); + skcipher_request_set_crypt(req, &src, &dst, 16, NULL); + skcipher_request_set_callback(req, 0, crypto_req_done, &wait); + r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); + if (r) + goto out; + + /* E(Ks, e'(s)) */ + es[15] = 0x80; + sg_init_one(&dst, &ks[16], 16); + r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); + if (r) + goto out; + + sg = crypt_get_sg_data(cc, dmreq->sg_out); + data = kmap_atomic(sg_page(sg)); + data_offset = data + sg->offset; + + /* Cannot modify original bio, copy to sg_out and apply Elephant to it */ + if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { + sg2 = crypt_get_sg_data(cc, dmreq->sg_in); + data2 = kmap_atomic(sg_page(sg2)); + memcpy(data_offset, data2 + sg2->offset, cc->sector_size); + kunmap_atomic(data2); + } + + if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { + diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32)); + diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); + diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); + diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32)); + } + + for (i = 0; i < (cc->sector_size / 32); i++) + crypto_xor(data_offset + i * 32, ks, 32); + + if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { + diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32)); + diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); + diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); + diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32)); + } + + kunmap_atomic(data); +out: + kzfree(ks); + kzfree(es); + skcipher_request_free(req); + return r; +} + +static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv, struct dm_crypt_request *dmreq) { - struct iv_eboiv_private *eboiv = &cc->iv_gen_private.eboiv; + int r; - memset(iv, 0, cc->iv_size); - *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector * cc->sector_size); - crypto_cipher_encrypt_one(eboiv->tfm, iv, iv); + if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { + r = crypt_iv_elephant(cc, dmreq); + if (r) + return r; + } + + return crypt_iv_eboiv_gen(cc, iv, dmreq); +} + +static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) +{ + if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) + return crypt_iv_elephant(cc, dmreq); return 0; } +static int crypt_iv_elephant_init(struct crypt_config *cc) +{ + struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; + int key_offset = cc->key_size - cc->key_extra_size; + + return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size); +} + +static int crypt_iv_elephant_wipe(struct crypt_config *cc) +{ + struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; + u8 key[ELEPHANT_MAX_KEY_SIZE]; + + memset(key, 0, cc->key_extra_size); + return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size); +} + static const struct crypt_iv_operations crypt_iv_plain_ops = { .generator = crypt_iv_plain_gen }; @@ -921,10 +1049,6 @@ static const struct crypt_iv_operations crypt_iv_plain64be_ops = { }; static const struct crypt_iv_operations crypt_iv_essiv_ops = { - .ctr = crypt_iv_essiv_ctr, - .dtr = crypt_iv_essiv_dtr, - .init = crypt_iv_essiv_init, - .wipe = crypt_iv_essiv_wipe, .generator = crypt_iv_essiv_gen }; @@ -962,12 +1086,18 @@ static struct crypt_iv_operations crypt_iv_random_ops = { static struct crypt_iv_operations crypt_iv_eboiv_ops = { .ctr = crypt_iv_eboiv_ctr, - .dtr = crypt_iv_eboiv_dtr, - .init = crypt_iv_eboiv_init, - .wipe = crypt_iv_eboiv_wipe, .generator = crypt_iv_eboiv_gen }; +static struct crypt_iv_operations crypt_iv_elephant_ops = { + .ctr = crypt_iv_elephant_ctr, + .dtr = crypt_iv_elephant_dtr, + .init = crypt_iv_elephant_init, + .wipe = crypt_iv_elephant_wipe, + .generator = crypt_iv_elephant_gen, + .post = crypt_iv_elephant_post +}; + /* * Integrity extensions */ @@ -1284,6 +1414,9 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc, r = cc->iv_gen_ops->generator(cc, org_iv, dmreq); if (r < 0) return r; + /* Data can be already preprocessed in generator */ + if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags)) + sg_in = sg_out; /* Store generated IV in integrity metadata */ if (cc->integrity_iv_size) memcpy(tag_iv, org_iv, cc->integrity_iv_size); @@ -2320,7 +2453,6 @@ static void crypt_dtr(struct dm_target *ti) if (cc->dev) dm_put_device(ti, cc->dev); - kzfree(cc->cipher); kzfree(cc->cipher_string); kzfree(cc->key_string); kzfree(cc->cipher_auth); @@ -2373,7 +2505,14 @@ static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode) cc->iv_gen_ops = &crypt_iv_null_ops; else if (strcmp(ivmode, "eboiv") == 0) cc->iv_gen_ops = &crypt_iv_eboiv_ops; - else if (strcmp(ivmode, "lmk") == 0) { + else if (strcmp(ivmode, "elephant") == 0) { + cc->iv_gen_ops = &crypt_iv_elephant_ops; + cc->key_parts = 2; + cc->key_extra_size = cc->key_size / 2; + if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE) + return -EINVAL; + set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags); + } else if (strcmp(ivmode, "lmk") == 0) { cc->iv_gen_ops = &crypt_iv_lmk_ops; /* * Version 2 and 3 is recognised according @@ -2402,52 +2541,6 @@ static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode) } /* - * Workaround to parse cipher algorithm from crypto API spec. - * The cc->cipher is currently used only in ESSIV. - * This should be probably done by crypto-api calls (once available...) - */ -static int crypt_ctr_blkdev_cipher(struct crypt_config *cc) -{ - const char *alg_name = NULL; - char *start, *end; - - if (crypt_integrity_aead(cc)) { - alg_name = crypto_tfm_alg_name(crypto_aead_tfm(any_tfm_aead(cc))); - if (!alg_name) - return -EINVAL; - if (crypt_integrity_hmac(cc)) { - alg_name = strchr(alg_name, ','); - if (!alg_name) - return -EINVAL; - } - alg_name++; - } else { - alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(any_tfm(cc))); - if (!alg_name) - return -EINVAL; - } - - start = strchr(alg_name, '('); - end = strchr(alg_name, ')'); - - if (!start && !end) { - cc->cipher = kstrdup(alg_name, GFP_KERNEL); - return cc->cipher ? 0 : -ENOMEM; - } - - if (!start || !end || ++start >= end) - return -EINVAL; - - cc->cipher = kzalloc(end - start + 1, GFP_KERNEL); - if (!cc->cipher) - return -ENOMEM; - - strncpy(cc->cipher, start, end - start); - - return 0; -} - -/* * Workaround to parse HMAC algorithm from AEAD crypto API spec. * The HMAC is needed to calculate tag size (HMAC digest size). * This should be probably done by crypto-api calls (once available...) @@ -2490,7 +2583,7 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key char **ivmode, char **ivopts) { struct crypt_config *cc = ti->private; - char *tmp, *cipher_api; + char *tmp, *cipher_api, buf[CRYPTO_MAX_ALG_NAME]; int ret = -EINVAL; cc->tfms_count = 1; @@ -2516,9 +2609,32 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key /* The rest is crypto API spec */ cipher_api = tmp; + /* Alloc AEAD, can be used only in new format. */ + if (crypt_integrity_aead(cc)) { + ret = crypt_ctr_auth_cipher(cc, cipher_api); + if (ret < 0) { + ti->error = "Invalid AEAD cipher spec"; + return -ENOMEM; + } + } + if (*ivmode && !strcmp(*ivmode, "lmk")) cc->tfms_count = 64; + if (*ivmode && !strcmp(*ivmode, "essiv")) { + if (!*ivopts) { + ti->error = "Digest algorithm missing for ESSIV mode"; + return -EINVAL; + } + ret = snprintf(buf, CRYPTO_MAX_ALG_NAME, "essiv(%s,%s)", + cipher_api, *ivopts); + if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) { + ti->error = "Cannot allocate cipher string"; + return -ENOMEM; + } + cipher_api = buf; + } + cc->key_parts = cc->tfms_count; /* Allocate cipher */ @@ -2528,23 +2644,11 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key return ret; } - /* Alloc AEAD, can be used only in new format. */ - if (crypt_integrity_aead(cc)) { - ret = crypt_ctr_auth_cipher(cc, cipher_api); - if (ret < 0) { - ti->error = "Invalid AEAD cipher spec"; - return -ENOMEM; - } + if (crypt_integrity_aead(cc)) cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc)); - } else + else cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc)); - ret = crypt_ctr_blkdev_cipher(cc); - if (ret < 0) { - ti->error = "Cannot allocate cipher string"; - return -ENOMEM; - } - return 0; } @@ -2579,10 +2683,6 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key } cc->key_parts = cc->tfms_count; - cc->cipher = kstrdup(cipher, GFP_KERNEL); - if (!cc->cipher) - goto bad_mem; - chainmode = strsep(&tmp, "-"); *ivmode = strsep(&tmp, ":"); *ivopts = tmp; @@ -2605,9 +2705,19 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key if (!cipher_api) goto bad_mem; - ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, - "%s(%s)", chainmode, cipher); - if (ret < 0) { + if (*ivmode && !strcmp(*ivmode, "essiv")) { + if (!*ivopts) { + ti->error = "Digest algorithm missing for ESSIV mode"; + kfree(cipher_api); + return -EINVAL; + } + ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, + "essiv(%s(%s),%s)", chainmode, cipher, *ivopts); + } else { + ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, + "%s(%s)", chainmode, cipher); + } + if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) { kfree(cipher_api); goto bad_mem; } @@ -2911,21 +3021,18 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } ret = -ENOMEM; - cc->io_queue = alloc_workqueue("kcryptd_io/%s", - WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, - 1, devname); + cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname); if (!cc->io_queue) { ti->error = "Couldn't create kcryptd io queue"; goto bad; } if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) - cc->crypt_queue = alloc_workqueue("kcryptd/%s", - WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, + cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1, devname); else cc->crypt_queue = alloc_workqueue("kcryptd/%s", - WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, + WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus(), devname); if (!cc->crypt_queue) { ti->error = "Couldn't create kcryptd queue"; @@ -3173,7 +3280,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type crypt_target = { .name = "crypt", - .version = {1, 19, 0}, + .version = {1, 20, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, |