diff options
Diffstat (limited to 'drivers/crypto/caam/caamalg_qi2.c')
-rw-r--r-- | drivers/crypto/caam/caamalg_qi2.c | 479 |
1 files changed, 249 insertions, 230 deletions
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 06bf32c32cbd..28669cbecf77 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -15,6 +15,7 @@ #include "key_gen.h" #include "caamalg_desc.h" #include "caamhash_desc.h" +#include "dpseci-debugfs.h" #include <linux/fsl/mc.h> #include <soc/fsl/dpaa2-io.h> #include <soc/fsl/dpaa2-fd.h> @@ -198,6 +199,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); } + /* + * In case |user key| > |derived key|, using DKP<imm,imm> would result + * in invalid opcodes (last bytes of user key) in the resulting + * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key + * addresses are needed. + */ + ctx->adata.key_virt = ctx->key; + ctx->adata.key_dma = ctx->key_dma; + + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; + data_len[0] = ctx->adata.keylen_pad; data_len[1] = ctx->cdata.keylen; @@ -209,16 +222,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ARRAY_SIZE(data_len)) < 0) return -EINVAL; - if (inl_mask & 1) - ctx->adata.key_virt = ctx->key; - else - ctx->adata.key_dma = ctx->key_dma; - - if (inl_mask & 2) - ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; - else - ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; - ctx->adata.key_inline = !!(inl_mask & 1); ctx->cdata.key_inline = !!(inl_mask & 2); @@ -247,16 +250,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) ARRAY_SIZE(data_len)) < 0) return -EINVAL; - if (inl_mask & 1) - ctx->adata.key_virt = ctx->key; - else - ctx->adata.key_dma = ctx->key_dma; - - if (inl_mask & 2) - ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; - else - ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; - ctx->adata.key_inline = !!(inl_mask & 1); ctx->cdata.key_inline = !!(inl_mask & 2); @@ -320,7 +313,6 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key, memzero_explicit(&keys, sizeof(keys)); return aead_set_sh_desc(aead); badkey: - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } @@ -329,33 +321,22 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct crypto_authenc_keys keys; - u32 flags; int err; err = crypto_authenc_extractkeys(&keys, key, keylen); if (unlikely(err)) - goto badkey; + goto out; err = -EINVAL; if (keys.enckeylen != DES3_EDE_KEY_SIZE) - goto badkey; - - flags = crypto_aead_get_flags(aead); - err = __des3_verify_key(&flags, keys.enckey); - if (unlikely(err)) { - crypto_aead_set_flags(aead, flags); goto out; - } - err = aead_setkey(aead, key, keylen); + err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?: + aead_setkey(aead, key, keylen); out: memzero_explicit(&keys, sizeof(keys)); return err; - -badkey: - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); - goto out; } static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, @@ -648,10 +629,8 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, unsigned int ivsize = crypto_aead_ivsize(aead); unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize; - if (keylen != CHACHA_KEY_SIZE + saltlen) { - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (keylen != CHACHA_KEY_SIZE + saltlen) return -EINVAL; - } ctx->cdata.key_virt = key; ctx->cdata.keylen = keylen - saltlen; @@ -719,6 +698,11 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx(authenc); + int err; + + err = crypto_gcm_check_authsize(authsize); + if (err) + return err; ctx->authsize = authsize; gcm_set_sh_desc(authenc); @@ -731,7 +715,11 @@ static int gcm_setkey(struct crypto_aead *aead, { struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *dev = ctx->dev; + int ret; + ret = aes_check_keylen(keylen); + if (ret) + return ret; print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -805,6 +793,11 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx(authenc); + int err; + + err = crypto_rfc4106_check_authsize(authsize); + if (err) + return err; ctx->authsize = authsize; rfc4106_set_sh_desc(authenc); @@ -817,9 +810,11 @@ static int rfc4106_setkey(struct crypto_aead *aead, { struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *dev = ctx->dev; + int ret; - if (keylen < 4) - return -EINVAL; + ret = aes_check_keylen(keylen - 4); + if (ret) + return ret; print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -900,6 +895,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc, { struct caam_ctx *ctx = crypto_aead_ctx(authenc); + if (authsize != 16) + return -EINVAL; + ctx->authsize = authsize; rfc4543_set_sh_desc(authenc); @@ -911,9 +909,11 @@ static int rfc4543_setkey(struct crypto_aead *aead, { struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *dev = ctx->dev; + int ret; - if (keylen < 4) - return -EINVAL; + ret = aes_check_keylen(keylen - 4); + if (ret) + return ret; print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -931,7 +931,7 @@ static int rfc4543_setkey(struct crypto_aead *aead, } static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, - unsigned int keylen) + unsigned int keylen, const u32 ctx1_iv_off) { struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); struct caam_skcipher_alg *alg = @@ -941,34 +941,11 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, struct caam_flc *flc; unsigned int ivsize = crypto_skcipher_ivsize(skcipher); u32 *desc; - u32 ctx1_iv_off = 0; - const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == - OP_ALG_AAI_CTR_MOD128) && - ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) != - OP_ALG_ALGSEL_CHACHA20); const bool is_rfc3686 = alg->caam.rfc3686; print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); - /* - * AES-CTR needs to load IV in CONTEXT1 reg - * at an offset of 128bits (16bytes) - * CONTEXT1[255:128] = IV - */ - if (ctr_mode) - ctx1_iv_off = 16; - - /* - * RFC3686 specific: - * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} - * | *key = {KEY, NONCE} - */ - if (is_rfc3686) { - ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; - keylen -= CTR_RFC3686_NONCE_SIZE; - } - ctx->cdata.keylen = keylen; ctx->cdata.key_virt = key; ctx->cdata.key_inline = true; @@ -996,11 +973,80 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, return 0; } +static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + int err; + + err = aes_check_keylen(keylen); + if (err) + return err; + + return skcipher_setkey(skcipher, key, keylen, 0); +} + +static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + u32 ctx1_iv_off; + int err; + + /* + * RFC3686 specific: + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} + * | *key = {KEY, NONCE} + */ + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; + keylen -= CTR_RFC3686_NONCE_SIZE; + + err = aes_check_keylen(keylen); + if (err) + return err; + + return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); +} + +static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + u32 ctx1_iv_off; + int err; + + /* + * AES-CTR needs to load IV in CONTEXT1 reg + * at an offset of 128bits (16bytes) + * CONTEXT1[255:128] = IV + */ + ctx1_iv_off = 16; + + err = aes_check_keylen(keylen); + if (err) + return err; + + return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); +} + +static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + if (keylen != CHACHA_KEY_SIZE) + return -EINVAL; + + return skcipher_setkey(skcipher, key, keylen, 0); +} + +static int des_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + return verify_skcipher_des_key(skcipher, key) ?: + skcipher_setkey(skcipher, key, keylen, 0); +} + static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, - const u8 *key, unsigned int keylen) + const u8 *key, unsigned int keylen) { - return unlikely(des3_verify_key(skcipher, key)) ?: - skcipher_setkey(skcipher, key, keylen); + return verify_skcipher_des3_key(skcipher, key) ?: + skcipher_setkey(skcipher, key, keylen, 0); } static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, @@ -1013,7 +1059,6 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { dev_err(dev, "key size mismatch\n"); - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -1227,10 +1272,8 @@ static void aead_encrypt_done(void *cbk_ctx, u32 status) dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); - if (unlikely(status)) { - caam_qi2_strstatus(ctx->dev, status); - ecode = -EIO; - } + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); aead_unmap(ctx->dev, edesc, req); qi_cache_free(edesc); @@ -1250,17 +1293,8 @@ static void aead_decrypt_done(void *cbk_ctx, u32 status) dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); - if (unlikely(status)) { - caam_qi2_strstatus(ctx->dev, status); - /* - * verify hw auth check passed else return -EBADMSG - */ - if ((status & JRSTA_CCBERR_ERRID_MASK) == - JRSTA_CCBERR_ERRID_ICVCHK) - ecode = -EBADMSG; - else - ecode = -EIO; - } + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); aead_unmap(ctx->dev, edesc, req); qi_cache_free(edesc); @@ -1325,18 +1359,12 @@ static int aead_decrypt(struct aead_request *req) static int ipsec_gcm_encrypt(struct aead_request *req) { - if (req->assoclen < 8) - return -EINVAL; - - return aead_encrypt(req); + return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req); } static int ipsec_gcm_decrypt(struct aead_request *req) { - if (req->assoclen < 8) - return -EINVAL; - - return aead_decrypt(req); + return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req); } static void skcipher_encrypt_done(void *cbk_ctx, u32 status) @@ -1352,10 +1380,8 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status) dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); - if (unlikely(status)) { - caam_qi2_strstatus(ctx->dev, status); - ecode = -EIO; - } + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->iv, @@ -1371,7 +1397,9 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status) * ciphertext block (CBC mode) or last counter (CTR mode). * This is used e.g. by the CTS mode. */ - memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); + if (!ecode) + memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, + ivsize); qi_cache_free(edesc); skcipher_request_complete(req, ecode); @@ -1390,10 +1418,8 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status) dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); - if (unlikely(status)) { - caam_qi2_strstatus(ctx->dev, status); - ecode = -EIO; - } + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->iv, @@ -1409,7 +1435,9 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status) * ciphertext block (CBC mode) or last counter (CTR mode). * This is used e.g. by the CTS mode. */ - memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); + if (!ecode) + memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, + ivsize); qi_cache_free(edesc); skcipher_request_complete(req, ecode); @@ -1423,6 +1451,9 @@ static int skcipher_encrypt(struct skcipher_request *req) struct caam_request *caam_req = skcipher_request_ctx(req); int ret; + if (!req->cryptlen) + return 0; + /* allocate extended descriptor */ edesc = skcipher_edesc_alloc(req); if (IS_ERR(edesc)) @@ -1451,6 +1482,8 @@ static int skcipher_decrypt(struct skcipher_request *req) struct caam_request *caam_req = skcipher_request_ctx(req); int ret; + if (!req->cryptlen) + return 0; /* allocate extended descriptor */ edesc = skcipher_edesc_alloc(req); if (IS_ERR(edesc)) @@ -1545,7 +1578,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "cbc-aes-caam-qi2", .cra_blocksize = AES_BLOCK_SIZE, }, - .setkey = skcipher_setkey, + .setkey = aes_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, @@ -1577,7 +1610,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "cbc-des-caam-qi2", .cra_blocksize = DES_BLOCK_SIZE, }, - .setkey = skcipher_setkey, + .setkey = des_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = DES_KEY_SIZE, @@ -1593,7 +1626,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "ctr-aes-caam-qi2", .cra_blocksize = 1, }, - .setkey = skcipher_setkey, + .setkey = ctr_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, @@ -1611,7 +1644,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "rfc3686-ctr-aes-caam-qi2", .cra_blocksize = 1, }, - .setkey = skcipher_setkey, + .setkey = rfc3686_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE + @@ -1650,7 +1683,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "chacha20-caam-qi2", .cra_blocksize = 1, }, - .setkey = skcipher_setkey, + .setkey = chacha20_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = CHACHA_KEY_SIZE, @@ -2422,7 +2455,7 @@ static struct caam_aead_alg driver_aeads[] = { .cra_name = "echainiv(authenc(hmac(sha256)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-" - "hmac-sha256-cbc-desi-" + "hmac-sha256-cbc-des-" "caam-qi2", .cra_blocksize = DES_BLOCK_SIZE, }, @@ -2918,6 +2951,7 @@ enum hash_optype { /** * caam_hash_ctx - ahash per-session context * @flc: Flow Contexts array + * @key: authentication key * @flc_dma: I/O virtual addresses of the Flow Contexts * @dev: dpseci device * @ctx_len: size of Context Register @@ -2925,6 +2959,7 @@ enum hash_optype { */ struct caam_hash_ctx { struct caam_flc flc[HASH_NUM_OP]; + u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; dma_addr_t flc_dma[HASH_NUM_OP]; struct device *dev; int ctx_len; @@ -2937,15 +2972,13 @@ struct caam_hash_state { dma_addr_t buf_dma; dma_addr_t ctx_dma; int ctx_dma_len; - u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; - int buflen_0; - u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; - int buflen_1; + u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; + int buflen; + int next_buflen; u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; int (*update)(struct ahash_request *req); int (*final)(struct ahash_request *req); int (*finup)(struct ahash_request *req); - int current_buf; }; struct caam_export_state { @@ -2957,42 +2990,17 @@ struct caam_export_state { int (*finup)(struct ahash_request *req); }; -static inline void switch_buf(struct caam_hash_state *state) -{ - state->current_buf ^= 1; -} - -static inline u8 *current_buf(struct caam_hash_state *state) -{ - return state->current_buf ? state->buf_1 : state->buf_0; -} - -static inline u8 *alt_buf(struct caam_hash_state *state) -{ - return state->current_buf ? state->buf_0 : state->buf_1; -} - -static inline int *current_buflen(struct caam_hash_state *state) -{ - return state->current_buf ? &state->buflen_1 : &state->buflen_0; -} - -static inline int *alt_buflen(struct caam_hash_state *state) -{ - return state->current_buf ? &state->buflen_0 : &state->buflen_1; -} - /* Map current buffer in state (if length > 0) and put it in link table */ static inline int buf_map_to_qm_sg(struct device *dev, struct dpaa2_sg_entry *qm_sg, struct caam_hash_state *state) { - int buflen = *current_buflen(state); + int buflen = state->buflen; if (!buflen) return 0; - state->buf_dma = dma_map_single(dev, current_buf(state), buflen, + state->buf_dma = dma_map_single(dev, state->buf, buflen, DMA_TO_DEVICE); if (dma_mapping_error(dev, state->buf_dma)) { dev_err(dev, "unable to map buf\n"); @@ -3094,10 +3102,7 @@ static void split_key_sh_done(void *cbk_ctx, u32 err) dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); - if (err) - caam_qi2_strstatus(res->dev, err); - - res->err = err; + res->err = err ? caam_qi2_strstatus(res->dev, err) : 0; complete(&res->completion); } @@ -3228,12 +3233,24 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, ctx->adata.key_virt = key; ctx->adata.key_inline = true; + /* + * In case |user key| > |derived key|, using DKP<imm,imm> would result + * in invalid opcodes (last bytes of user key) in the resulting + * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key + * addresses are needed. + */ + if (keylen > ctx->adata.keylen_pad) { + memcpy(ctx->key, key, keylen); + dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma, + ctx->adata.keylen_pad, + DMA_TO_DEVICE); + } + ret = ahash_set_sh_desc(ahash); kfree(hashed_key); return ret; bad_free_key: kfree(hashed_key); - crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -3250,7 +3267,7 @@ static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc, DMA_TO_DEVICE); if (state->buf_dma) { - dma_unmap_single(dev, state->buf_dma, *current_buflen(state), + dma_unmap_single(dev, state->buf_dma, state->buflen, DMA_TO_DEVICE); state->buf_dma = 0; } @@ -3282,10 +3299,8 @@ static void ahash_done(void *cbk_ctx, u32 status) dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); - if (unlikely(status)) { - caam_qi2_strstatus(ctx->dev, status); - ecode = -EIO; - } + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); memcpy(req->result, state->caam_ctx, digestsize); @@ -3310,15 +3325,21 @@ static void ahash_done_bi(void *cbk_ctx, u32 status) dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); - if (unlikely(status)) { - caam_qi2_strstatus(ctx->dev, status); - ecode = -EIO; - } + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); - switch_buf(state); qi_cache_free(edesc); + scatterwalk_map_and_copy(state->buf, req->src, + req->nbytes - state->next_buflen, + state->next_buflen, 0); + state->buflen = state->next_buflen; + + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, state->buf, + state->buflen, 1); + print_hex_dump_debug("ctx@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); @@ -3343,10 +3364,8 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status) dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); - if (unlikely(status)) { - caam_qi2_strstatus(ctx->dev, status); - ecode = -EIO; - } + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); memcpy(req->result, state->caam_ctx, digestsize); @@ -3371,15 +3390,21 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status) dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); - if (unlikely(status)) { - caam_qi2_strstatus(ctx->dev, status); - ecode = -EIO; - } + if (unlikely(status)) + ecode = caam_qi2_strstatus(ctx->dev, status); ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); - switch_buf(state); qi_cache_free(edesc); + scatterwalk_map_and_copy(state->buf, req->src, + req->nbytes - state->next_buflen, + state->next_buflen, 0); + state->buflen = state->next_buflen; + + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, state->buf, + state->buflen, 1); + print_hex_dump_debug("ctx@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); @@ -3401,16 +3426,14 @@ static int ahash_update_ctx(struct ahash_request *req) struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - u8 *buf = current_buf(state); - int *buflen = current_buflen(state); - u8 *next_buf = alt_buf(state); - int *next_buflen = alt_buflen(state), last_buflen; + u8 *buf = state->buf; + int *buflen = &state->buflen; + int *next_buflen = &state->next_buflen; int in_len = *buflen + req->nbytes, to_hash; int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index; struct ahash_edesc *edesc; int ret = 0; - last_buflen = *next_buflen; *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); to_hash = in_len - *next_buflen; @@ -3461,10 +3484,6 @@ static int ahash_update_ctx(struct ahash_request *req) if (mapped_nents) { sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_src_index, 0); - if (*next_buflen) - scatterwalk_map_and_copy(next_buf, req->src, - to_hash - *buflen, - *next_buflen, 0); } else { dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true); @@ -3503,14 +3522,11 @@ static int ahash_update_ctx(struct ahash_request *req) scatterwalk_map_and_copy(buf + *buflen, req->src, 0, req->nbytes, 0); *buflen = *next_buflen; - *next_buflen = last_buflen; - } - print_hex_dump_debug("buf@" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); - print_hex_dump_debug("next buf@" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, - 1); + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, buf, + *buflen, 1); + } return ret; unmap_ctx: @@ -3529,7 +3545,7 @@ static int ahash_final_ctx(struct ahash_request *req) struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - int buflen = *current_buflen(state); + int buflen = state->buflen; int qm_sg_bytes; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; @@ -3600,7 +3616,7 @@ static int ahash_finup_ctx(struct ahash_request *req) struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - int buflen = *current_buflen(state); + int buflen = state->buflen; int qm_sg_bytes, qm_sg_src_index; int src_nents, mapped_nents; int digestsize = crypto_ahash_digestsize(ahash); @@ -3789,8 +3805,8 @@ static int ahash_final_no_ctx(struct ahash_request *req) struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - u8 *buf = current_buf(state); - int buflen = *current_buflen(state); + u8 *buf = state->buf; + int buflen = state->buflen; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; int ret = -ENOMEM; @@ -3862,10 +3878,9 @@ static int ahash_update_no_ctx(struct ahash_request *req) struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - u8 *buf = current_buf(state); - int *buflen = current_buflen(state); - u8 *next_buf = alt_buf(state); - int *next_buflen = alt_buflen(state); + u8 *buf = state->buf; + int *buflen = &state->buflen; + int *next_buflen = &state->next_buflen; int in_len = *buflen + req->nbytes, to_hash; int qm_sg_bytes, src_nents, mapped_nents; struct ahash_edesc *edesc; @@ -3914,11 +3929,6 @@ static int ahash_update_no_ctx(struct ahash_request *req) sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0); - if (*next_buflen) - scatterwalk_map_and_copy(next_buf, req->src, - to_hash - *buflen, - *next_buflen, 0); - edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { @@ -3966,14 +3976,11 @@ static int ahash_update_no_ctx(struct ahash_request *req) scatterwalk_map_and_copy(buf + *buflen, req->src, 0, req->nbytes, 0); *buflen = *next_buflen; - *next_buflen = 0; - } - print_hex_dump_debug("buf@" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); - print_hex_dump_debug("next buf@" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, - 1); + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, buf, + *buflen, 1); + } return ret; unmap_ctx: @@ -3992,7 +3999,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - int buflen = *current_buflen(state); + int buflen = state->buflen; int qm_sg_bytes, src_nents, mapped_nents; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; @@ -4088,8 +4095,9 @@ static int ahash_update_first(struct ahash_request *req) struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - u8 *next_buf = alt_buf(state); - int *next_buflen = alt_buflen(state); + u8 *buf = state->buf; + int *buflen = &state->buflen; + int *next_buflen = &state->next_buflen; int to_hash; int src_nents, mapped_nents; struct ahash_edesc *edesc; @@ -4157,10 +4165,6 @@ static int ahash_update_first(struct ahash_request *req) dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src)); } - if (*next_buflen) - scatterwalk_map_and_copy(next_buf, req->src, to_hash, - *next_buflen, 0); - state->ctx_dma_len = ctx->ctx_len; state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, ctx->ctx_len, DMA_FROM_DEVICE); @@ -4194,14 +4198,14 @@ static int ahash_update_first(struct ahash_request *req) state->update = ahash_update_no_ctx; state->finup = ahash_finup_no_ctx; state->final = ahash_final_no_ctx; - scatterwalk_map_and_copy(next_buf, req->src, 0, + scatterwalk_map_and_copy(buf, req->src, 0, req->nbytes, 0); - switch_buf(state); - } + *buflen = *next_buflen; - print_hex_dump_debug("next buf@" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, - 1); + print_hex_dump_debug("buf@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, buf, + *buflen, 1); + } return ret; unmap_ctx: @@ -4225,10 +4229,9 @@ static int ahash_init(struct ahash_request *req) state->ctx_dma = 0; state->ctx_dma_len = 0; - state->current_buf = 0; state->buf_dma = 0; - state->buflen_0 = 0; - state->buflen_1 = 0; + state->buflen = 0; + state->next_buflen = 0; return 0; } @@ -4258,16 +4261,8 @@ static int ahash_export(struct ahash_request *req, void *out) { struct caam_hash_state *state = ahash_request_ctx(req); struct caam_export_state *export = out; - int len; - u8 *buf; - - if (state->current_buf) { - buf = state->buf_1; - len = state->buflen_1; - } else { - buf = state->buf_0; - len = state->buflen_0; - } + u8 *buf = state->buf; + int len = state->buflen; memcpy(export->buf, buf, len); memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); @@ -4285,9 +4280,9 @@ static int ahash_import(struct ahash_request *req, const void *in) const struct caam_export_state *export = in; memset(state, 0, sizeof(*state)); - memcpy(state->buf_0, export->buf, export->buflen); + memcpy(state->buf, export->buf, export->buflen); memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); - state->buflen_0 = export->buflen; + state->buflen = export->buflen; state->update = export->update; state->final = export->final; state->finup = export->finup; @@ -4466,11 +4461,27 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) ctx->dev = caam_hash->dev; + if (alg->setkey) { + ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key, + ARRAY_SIZE(ctx->key), + DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) { + dev_err(ctx->dev, "unable to map key\n"); + return -ENOMEM; + } + } + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc), DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); if (dma_mapping_error(ctx->dev, dma_addr)) { dev_err(ctx->dev, "unable to map shared descriptors\n"); + if (ctx->adata.key_dma) + dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma, + ARRAY_SIZE(ctx->key), + DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); return -ENOMEM; } @@ -4496,6 +4507,10 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm) dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc), DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); + if (ctx->adata.key_dma) + dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma, + ARRAY_SIZE(ctx->key), DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); } static struct caam_hash_alg *caam_hash_alloc(struct device *dev, @@ -4700,7 +4715,7 @@ static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv, fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK; if (unlikely(fd_err)) - dev_err(priv->dev, "FD error: %08x\n", fd_err); + dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err); /* * FD[ADDR] is guaranteed to be valid, irrespective of errors reported @@ -5098,6 +5113,8 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev) goto err_bind; } + dpaa2_dpseci_debugfs_init(priv); + /* register crypto algorithms the device supports */ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { struct caam_skcipher_alg *t_alg = driver_algs + i; @@ -5265,6 +5282,8 @@ static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev) dev = &ls_dev->dev; priv = dev_get_drvdata(dev); + dpaa2_dpseci_debugfs_exit(priv); + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { struct caam_aead_alg *t_alg = driver_aeads + i; |