diff options
Diffstat (limited to 'drivers/crypto/ccree')
-rw-r--r-- | drivers/crypto/ccree/Makefile | 2 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_aead.c | 169 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_aead.h | 3 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_buffer_mgr.c | 21 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_buffer_mgr.h | 2 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_cipher.c | 93 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_driver.c | 40 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_driver.h | 16 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_fips.c | 33 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_hash.c | 161 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_ivgen.c | 276 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_ivgen.h | 55 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_pm.c | 41 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_pm.h | 17 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_request_mgr.c | 142 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_request_mgr.h | 8 |
16 files changed, 270 insertions, 809 deletions
diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile index 145e50bdbf16..5cfda508ee41 100644 --- a/drivers/crypto/ccree/Makefile +++ b/drivers/crypto/ccree/Makefile @@ -2,7 +2,7 @@ # Copyright (C) 2012-2019 ARM Limited (or its affiliates). obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o -ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o +ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_sram_mgr.o ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o ccree-$(CONFIG_PM) += cc_pm.o diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index 7aa4cbe19a86..2fc0e0da790b 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c @@ -6,7 +6,7 @@ #include <crypto/algapi.h> #include <crypto/internal/aead.h> #include <crypto/authenc.h> -#include <crypto/des.h> +#include <crypto/internal/des.h> #include <linux/rtnetlink.h> #include "cc_driver.h" #include "cc_buffer_mgr.h" @@ -236,31 +236,17 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err) /* In case of payload authentication failure, MUST NOT * revealed the decrypted message --> zero its memory. */ - cc_zero_sgl(areq->dst, areq_ctx->cryptlen); + sg_zero_buffer(areq->dst, sg_nents(areq->dst), + areq->cryptlen, areq->assoclen); err = -EBADMSG; } - } else { /*ENCRYPT*/ - if (areq_ctx->is_icv_fragmented) { - u32 skip = areq->cryptlen + areq_ctx->dst_offset; - - cc_copy_sg_portion(dev, areq_ctx->mac_buf, - areq_ctx->dst_sgl, skip, - (skip + ctx->authsize), - CC_SG_FROM_BUF); - } + /*ENCRYPT*/ + } else if (areq_ctx->is_icv_fragmented) { + u32 skip = areq->cryptlen + areq_ctx->dst_offset; - /* If an IV was generated, copy it back to the user provided - * buffer. - */ - if (areq_ctx->backup_giv) { - if (ctx->cipher_mode == DRV_CIPHER_CTR) - memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + - CTR_RFC3686_NONCE_SIZE, - CTR_RFC3686_IV_SIZE); - else if (ctx->cipher_mode == DRV_CIPHER_CCM) - memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + - CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE); - } + cc_copy_sg_portion(dev, areq_ctx->mac_buf, areq_ctx->dst_sgl, + skip, (skip + ctx->authsize), + CC_SG_FROM_BUF); } done: aead_request_complete(areq, err); @@ -307,7 +293,8 @@ static unsigned int xcbc_setkey(struct cc_hw_desc *desc, return 4; } -static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx) +static unsigned int hmac_setkey(struct cc_hw_desc *desc, + struct cc_aead_ctx *ctx) { unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST }; unsigned int digest_ofs = 0; @@ -398,13 +385,13 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx) return -EINVAL; break; default: - dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode); + dev_dbg(dev, "Invalid auth_mode=%d\n", ctx->auth_mode); return -EINVAL; } /* Check cipher key size */ if (ctx->flow_mode == S_DIN_to_DES) { if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) { - dev_err(dev, "Invalid cipher(3DES) key size: %u\n", + dev_dbg(dev, "Invalid cipher(3DES) key size: %u\n", ctx->enc_keylen); return -EINVAL; } @@ -412,7 +399,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx) if (ctx->enc_keylen != AES_KEYSIZE_128 && ctx->enc_keylen != AES_KEYSIZE_192 && ctx->enc_keylen != AES_KEYSIZE_256) { - dev_err(dev, "Invalid cipher(AES) key size: %u\n", + dev_dbg(dev, "Invalid cipher(AES) key size: %u\n", ctx->enc_keylen); return -EINVAL; } @@ -575,7 +562,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, rc = crypto_authenc_extractkeys(&keys, key, keylen); if (rc) - goto badkey; + return rc; enckey = keys.enckey; authkey = keys.authkey; ctx->enc_keylen = keys.enckeylen; @@ -583,10 +570,9 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, if (ctx->cipher_mode == DRV_CIPHER_CTR) { /* the nonce is stored in bytes at end of key */ - rc = -EINVAL; if (ctx->enc_keylen < (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) - goto badkey; + return -EINVAL; /* Copy nonce from last 4 bytes in CTR key to * first 4 bytes in CTR IV */ @@ -604,7 +590,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, rc = validate_keys_sizes(ctx); if (rc) - goto badkey; + return rc; /* STAT_PHASE_1: Copy key to ctx */ @@ -618,7 +604,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen); if (rc) - goto badkey; + return rc; } /* STAT_PHASE_2: Create sequence */ @@ -635,8 +621,7 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, break; /* No auth. key setup */ default: dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode); - rc = -ENOTSUPP; - goto badkey; + return -ENOTSUPP; } /* STAT_PHASE_3: Submit sequence to HW */ @@ -645,51 +630,29 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len); if (rc) { dev_err(dev, "send_request() failed (rc=%d)\n", rc); - goto setkey_error; + return rc; } } /* Update STAT_PHASE_3 */ return rc; - -badkey: - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - -setkey_error: - return rc; } static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct crypto_authenc_keys keys; - u32 flags; int err; err = crypto_authenc_extractkeys(&keys, key, keylen); if (unlikely(err)) - goto badkey; + return err; - err = -EINVAL; - if (keys.enckeylen != DES3_EDE_KEY_SIZE) - goto badkey; - - flags = crypto_aead_get_flags(aead); - err = __des3_verify_key(&flags, keys.enckey); - if (unlikely(err)) { - crypto_aead_set_flags(aead, flags); - goto out; - } + err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?: + cc_aead_setkey(aead, key, keylen); - err = cc_aead_setkey(aead, key, keylen); - -out: memzero_explicit(&keys, sizeof(keys)); return err; - -badkey: - crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); - goto out; } static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, @@ -1596,7 +1559,7 @@ static int config_ccm_adata(struct aead_request *req) /* taken from crypto/ccm.c */ /* 2 <= L <= 8, so 1 <= L' <= 7. */ if (l < 2 || l > 8) { - dev_err(dev, "illegal iv value %X\n", req->iv[0]); + dev_dbg(dev, "illegal iv value %X\n", req->iv[0]); return -EINVAL; } memcpy(b0, req->iv, AES_BLOCK_SIZE); @@ -1954,7 +1917,6 @@ static int cc_proc_aead(struct aead_request *req, if (validate_data_size(ctx, direct, req)) { dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n", req->cryptlen, areq_ctx->assoclen); - crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN); return -EINVAL; } @@ -1975,9 +1937,8 @@ static int cc_proc_aead(struct aead_request *req, */ memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, CTR_RFC3686_NONCE_SIZE); - if (!areq_ctx->backup_giv) /*User none-generated IV*/ - memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, - req->iv, CTR_RFC3686_IV_SIZE); + memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv, + CTR_RFC3686_IV_SIZE); /* Initialize counter portion of counter block */ *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); @@ -2023,40 +1984,6 @@ static int cc_proc_aead(struct aead_request *req, goto exit; } - /* do we need to generate IV? */ - if (areq_ctx->backup_giv) { - /* set the DMA mapped IV address*/ - if (ctx->cipher_mode == DRV_CIPHER_CTR) { - cc_req.ivgen_dma_addr[0] = - areq_ctx->gen_ctx.iv_dma_addr + - CTR_RFC3686_NONCE_SIZE; - cc_req.ivgen_dma_addr_len = 1; - } else if (ctx->cipher_mode == DRV_CIPHER_CCM) { - /* In ccm, the IV needs to exist both inside B0 and - * inside the counter.It is also copied to iv_dma_addr - * for other reasons (like returning it to the user). - * So, using 3 (identical) IV outputs. - */ - cc_req.ivgen_dma_addr[0] = - areq_ctx->gen_ctx.iv_dma_addr + - CCM_BLOCK_IV_OFFSET; - cc_req.ivgen_dma_addr[1] = - sg_dma_address(&areq_ctx->ccm_adata_sg) + - CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET; - cc_req.ivgen_dma_addr[2] = - sg_dma_address(&areq_ctx->ccm_adata_sg) + - CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET; - cc_req.ivgen_dma_addr_len = 3; - } else { - cc_req.ivgen_dma_addr[0] = - areq_ctx->gen_ctx.iv_dma_addr; - cc_req.ivgen_dma_addr_len = 1; - } - - /* set the IV size (8/16 B long)*/ - cc_req.ivgen_size = crypto_aead_ivsize(tfm); - } - /* STAT_PHASE_2: Create sequence */ /* Load MLLI tables to SRAM if necessary */ @@ -2107,7 +2034,6 @@ static int cc_aead_encrypt(struct aead_request *req) /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; - areq_ctx->backup_giv = NULL; areq_ctx->is_gcm4543 = false; areq_ctx->plaintext_authenticate_only = false; @@ -2130,7 +2056,7 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req) int rc = -EINVAL; if (!valid_assoclen(req)) { - dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); + dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen); goto out; } @@ -2139,7 +2065,6 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req) /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; - areq_ctx->backup_giv = NULL; areq_ctx->is_gcm4543 = true; cc_proc_rfc4309_ccm(req); @@ -2161,7 +2086,6 @@ static int cc_aead_decrypt(struct aead_request *req) /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; - areq_ctx->backup_giv = NULL; areq_ctx->is_gcm4543 = false; areq_ctx->plaintext_authenticate_only = false; @@ -2182,7 +2106,7 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req) int rc = -EINVAL; if (!valid_assoclen(req)) { - dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); + dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen); goto out; } @@ -2191,7 +2115,6 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req) /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; - areq_ctx->backup_giv = NULL; areq_ctx->is_gcm4543 = true; cc_proc_rfc4309_ccm(req); @@ -2302,7 +2225,7 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req) int rc = -EINVAL; if (!valid_assoclen(req)) { - dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); + dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen); goto out; } @@ -2311,8 +2234,6 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req) /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; - areq_ctx->backup_giv = NULL; - areq_ctx->plaintext_authenticate_only = false; cc_proc_rfc4_gcm(req); @@ -2328,9 +2249,16 @@ out: static int cc_rfc4543_gcm_encrypt(struct aead_request *req) { /* Very similar to cc_aead_encrypt() above. */ - + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct device *dev = drvdata_to_dev(ctx->drvdata); struct aead_req_ctx *areq_ctx = aead_request_ctx(req); - int rc; + int rc = -EINVAL; + + if (!valid_assoclen(req)) { + dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen); + goto out; + } memset(areq_ctx, 0, sizeof(*areq_ctx)); @@ -2340,7 +2268,6 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req) /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; - areq_ctx->backup_giv = NULL; cc_proc_rfc4_gcm(req); areq_ctx->is_gcm4543 = true; @@ -2348,7 +2275,7 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req) rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT); if (rc != -EINPROGRESS && rc != -EBUSY) req->iv = areq_ctx->backup_iv; - +out: return rc; } @@ -2363,7 +2290,7 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req) int rc = -EINVAL; if (!valid_assoclen(req)) { - dev_err(dev, "invalid Assoclen:%u\n", req->assoclen); + dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen); goto out; } @@ -2372,8 +2299,6 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req) /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; - areq_ctx->backup_giv = NULL; - areq_ctx->plaintext_authenticate_only = false; cc_proc_rfc4_gcm(req); @@ -2389,9 +2314,16 @@ out: static int cc_rfc4543_gcm_decrypt(struct aead_request *req) { /* Very similar to cc_aead_decrypt() above. */ - + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); + struct device *dev = drvdata_to_dev(ctx->drvdata); struct aead_req_ctx *areq_ctx = aead_request_ctx(req); - int rc; + int rc = -EINVAL; + + if (!valid_assoclen(req)) { + dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen); + goto out; + } memset(areq_ctx, 0, sizeof(*areq_ctx)); @@ -2401,7 +2333,6 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req) /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; - areq_ctx->backup_giv = NULL; cc_proc_rfc4_gcm(req); areq_ctx->is_gcm4543 = true; @@ -2409,7 +2340,7 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req) rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT); if (rc != -EINPROGRESS && rc != -EBUSY) req->iv = areq_ctx->backup_iv; - +out: return rc; } diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h index e51724b96c56..f12169b57f9d 100644 --- a/drivers/crypto/ccree/cc_aead.h +++ b/drivers/crypto/ccree/cc_aead.h @@ -65,8 +65,7 @@ struct aead_req_ctx { unsigned int hw_iv_size ____cacheline_aligned; /* used to prevent cache coherence problem */ u8 backup_mac[MAX_MAC_SIZE]; - u8 *backup_iv; /*store iv for generated IV flow*/ - u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/ + u8 *backup_iv; /* store orig iv */ u32 assoclen; /* internal assoclen */ dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */ /* buffer for internal ccm configurations */ diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index c81ad33f9115..a72586eccd81 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c @@ -100,27 +100,6 @@ static unsigned int cc_get_sgl_nents(struct device *dev, } /** - * cc_zero_sgl() - Zero scatter scatter list data. - * - * @sgl: - */ -void cc_zero_sgl(struct scatterlist *sgl, u32 data_len) -{ - struct scatterlist *current_sg = sgl; - int sg_index = 0; - - while (sg_index <= data_len) { - if (!current_sg) { - /* reached the end of the sgl --> just return back */ - return; - } - memset(sg_virt(current_sg), 0, current_sg->length); - sg_index += current_sg->length; - current_sg = sg_next(current_sg); - } -} - -/** * cc_copy_sg_portion() - Copy scatter list data, * from to_skip to end, to dest and vice versa * diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h index a726016bdbc1..af434872c6ff 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.h +++ b/drivers/crypto/ccree/cc_buffer_mgr.h @@ -66,6 +66,4 @@ void cc_unmap_hash_request(struct device *dev, void *ctx, void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg, u32 to_skip, u32 end, enum cc_sg_cpy_direct direct); -void cc_zero_sgl(struct scatterlist *sgl, u32 data_len); - #endif /*__BUFFER_MGR_H__*/ diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index 5b58226ea24d..7d6252d892d7 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -5,7 +5,7 @@ #include <linux/module.h> #include <crypto/algapi.h> #include <crypto/internal/skcipher.h> -#include <crypto/des.h> +#include <crypto/internal/des.h> #include <crypto/xts.h> #include <crypto/sm4.h> #include <crypto/scatterwalk.h> @@ -16,7 +16,7 @@ #include "cc_cipher.h" #include "cc_request_mgr.h" -#define MAX_ABLKCIPHER_SEQ_LEN 6 +#define MAX_SKCIPHER_SEQ_LEN 6 #define template_skcipher template_u.skcipher @@ -116,10 +116,6 @@ static int validate_data_size(struct cc_cipher_ctx *ctx_p, case S_DIN_to_AES: switch (ctx_p->cipher_mode) { case DRV_CIPHER_XTS: - if (size >= AES_BLOCK_SIZE && - IS_ALIGNED(size, AES_BLOCK_SIZE)) - return 0; - break; case DRV_CIPHER_CBC_CTS: if (size >= AES_BLOCK_SIZE) return 0; @@ -295,7 +291,6 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key, /* This check the size of the protected key token */ if (keylen != sizeof(hki)) { dev_err(dev, "Unsupported protected key size %d.\n", keylen); - crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -307,8 +302,7 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key, keylen = hki.keylen; if (validate_keys_sizes(ctx_p, keylen)) { - dev_err(dev, "Unsupported key size %d.\n", keylen); - crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + dev_dbg(dev, "Unsupported key size %d.\n", keylen); return -EINVAL; } @@ -398,8 +392,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, /* STAT_PHASE_0: Init and sanity checks */ if (validate_keys_sizes(ctx_p, keylen)) { - dev_err(dev, "Unsupported key size %d.\n", keylen); - crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + dev_dbg(dev, "Unsupported key size %d.\n", keylen); return -EINVAL; } @@ -411,16 +404,9 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, * HW does the expansion on its own. */ if (ctx_p->flow_mode == S_DIN_to_DES) { - u32 tmp[DES3_EDE_EXPKEY_WORDS]; - if (keylen == DES3_EDE_KEY_SIZE && - __des3_ede_setkey(tmp, &tfm->crt_flags, key, - DES3_EDE_KEY_SIZE)) { - dev_dbg(dev, "weak 3DES key"); - return -EINVAL; - } else if (!des_ekey(tmp, key) && - (crypto_tfm_get_flags(tfm) & - CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { - tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; + if ((keylen == DES3_EDE_KEY_SIZE && + verify_skcipher_des3_key(sktfm, key)) || + verify_skcipher_des_key(sktfm, key)) { dev_dbg(dev, "weak DES key"); return -EINVAL; } @@ -534,6 +520,7 @@ static void cc_setup_readiv_desc(struct crypto_tfm *tfm, } } + static void cc_setup_state_desc(struct crypto_tfm *tfm, struct cipher_req_ctx *req_ctx, unsigned int ivsize, unsigned int nbytes, @@ -545,8 +532,6 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm, int cipher_mode = ctx_p->cipher_mode; int flow_mode = ctx_p->flow_mode; int direction = req_ctx->gen_ctx.op_type; - dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr; - unsigned int key_len = ctx_p->keylen; dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr; unsigned int du_size = nbytes; @@ -582,6 +567,47 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm, case DRV_CIPHER_XTS: case DRV_CIPHER_ESSIV: case DRV_CIPHER_BITLOCKER: + break; + default: + dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode); + } +} + + +static void cc_setup_xex_state_desc(struct crypto_tfm *tfm, + struct cipher_req_ctx *req_ctx, + unsigned int ivsize, unsigned int nbytes, + struct cc_hw_desc desc[], + unsigned int *seq_size) +{ + struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); + struct device *dev = drvdata_to_dev(ctx_p->drvdata); + int cipher_mode = ctx_p->cipher_mode; + int flow_mode = ctx_p->flow_mode; + int direction = req_ctx->gen_ctx.op_type; + dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr; + unsigned int key_len = ctx_p->keylen; + dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr; + unsigned int du_size = nbytes; + + struct cc_crypto_alg *cc_alg = + container_of(tfm->__crt_alg, struct cc_crypto_alg, + skcipher_alg.base); + + if (cc_alg->data_unit) + du_size = cc_alg->data_unit; + + switch (cipher_mode) { + case DRV_CIPHER_ECB: + break; + case DRV_CIPHER_CBC: + case DRV_CIPHER_CBC_CTS: + case DRV_CIPHER_CTR: + case DRV_CIPHER_OFB: + break; + case DRV_CIPHER_XTS: + case DRV_CIPHER_ESSIV: + case DRV_CIPHER_BITLOCKER: /* load XEX key */ hw_desc_init(&desc[*seq_size]); set_cipher_mode(&desc[*seq_size], cipher_mode); @@ -833,7 +859,7 @@ static int cc_cipher_process(struct skcipher_request *req, void *iv = req->iv; struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); struct device *dev = drvdata_to_dev(ctx_p->drvdata); - struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN]; + struct cc_hw_desc desc[MAX_SKCIPHER_SEQ_LEN]; struct cc_crypto_req cc_req = {}; int rc; unsigned int seq_len = 0; @@ -847,8 +873,7 @@ static int cc_cipher_process(struct skcipher_request *req, /* TODO: check data length according to mode */ if (validate_data_size(ctx_p, nbytes)) { - dev_err(dev, "Unsupported data size %d.\n", nbytes); - crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN); + dev_dbg(dev, "Unsupported data size %d.\n", nbytes); rc = -EINVAL; goto exit_process; } @@ -892,12 +917,14 @@ static int cc_cipher_process(struct skcipher_request *req, /* STAT_PHASE_2: Create sequence */ - /* Setup IV and XEX key used */ + /* Setup state (IV) */ cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len); /* Setup MLLI line, if needed */ cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len); /* Setup key */ cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len); + /* Setup state (IV and XEX key) */ + cc_setup_xex_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len); /* Data processing */ cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len); /* Read next IV */ @@ -945,7 +972,7 @@ static const struct cc_alg_template skcipher_algs[] = { { .name = "xts(paes)", .driver_name = "xts-paes-ccree", - .blocksize = AES_BLOCK_SIZE, + .blocksize = 1, .template_skcipher = { .setkey = cc_cipher_sethkey, .encrypt = cc_cipher_encrypt, @@ -963,7 +990,7 @@ static const struct cc_alg_template skcipher_algs[] = { { .name = "xts512(paes)", .driver_name = "xts-paes-du512-ccree", - .blocksize = AES_BLOCK_SIZE, + .blocksize = 1, .template_skcipher = { .setkey = cc_cipher_sethkey, .encrypt = cc_cipher_encrypt, @@ -982,7 +1009,7 @@ static const struct cc_alg_template skcipher_algs[] = { { .name = "xts4096(paes)", .driver_name = "xts-paes-du4096-ccree", - .blocksize = AES_BLOCK_SIZE, + .blocksize = 1, .template_skcipher = { .setkey = cc_cipher_sethkey, .encrypt = cc_cipher_encrypt, @@ -1203,7 +1230,7 @@ static const struct cc_alg_template skcipher_algs[] = { { .name = "xts(aes)", .driver_name = "xts-aes-ccree", - .blocksize = AES_BLOCK_SIZE, + .blocksize = 1, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, @@ -1220,7 +1247,7 @@ static const struct cc_alg_template skcipher_algs[] = { { .name = "xts512(aes)", .driver_name = "xts-aes-du512-ccree", - .blocksize = AES_BLOCK_SIZE, + .blocksize = 1, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, @@ -1238,7 +1265,7 @@ static const struct cc_alg_template skcipher_algs[] = { { .name = "xts4096(aes)", .driver_name = "xts-aes-du4096-ccree", - .blocksize = AES_BLOCK_SIZE, + .blocksize = 1, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 980aa04b655b..532bc95a8373 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -22,7 +22,6 @@ #include "cc_cipher.h" #include "cc_aead.h" #include "cc_hash.h" -#include "cc_ivgen.h" #include "cc_sram_mgr.h" #include "cc_pm.h" #include "cc_fips.h" @@ -134,7 +133,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id) u32 imr; /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */ - /* if driver suspended return, probebly shared interrupt */ + /* if driver suspended return, probably shared interrupt */ if (cc_pm_is_dev_suspended(dev)) return IRQ_NONE; @@ -272,6 +271,7 @@ static int init_cc_resources(struct platform_device *plat_dev) const struct cc_hw_data *hw_rev; const struct of_device_id *dev_id; struct clk *clk; + int irq; int rc = 0; new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL); @@ -338,11 +338,9 @@ static int init_cc_resources(struct platform_device *plat_dev) &req_mem_cc_regs->start, new_drvdata->cc_base); /* Then IRQ */ - new_drvdata->irq = platform_get_irq(plat_dev, 0); - if (new_drvdata->irq < 0) { - dev_err(dev, "Failed getting IRQ resource\n"); - return new_drvdata->irq; - } + irq = platform_get_irq(plat_dev, 0); + if (irq < 0) + return irq; init_completion(&new_drvdata->hw_queue_avail); @@ -421,7 +419,7 @@ static int init_cc_resources(struct platform_device *plat_dev) } break; default: - dev_err(dev, "Unsupported engines configration.\n"); + dev_err(dev, "Unsupported engines configuration.\n"); rc = -EINVAL; goto post_clk_err; } @@ -445,14 +443,13 @@ static int init_cc_resources(struct platform_device *plat_dev) dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n", hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION); /* register the driver isr function */ - rc = devm_request_irq(dev, new_drvdata->irq, cc_isr, - IRQF_SHARED, "ccree", new_drvdata); + rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "ccree", + new_drvdata); if (rc) { - dev_err(dev, "Could not register to interrupt %d\n", - new_drvdata->irq); + dev_err(dev, "Could not register to interrupt %d\n", irq); goto post_clk_err; } - dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq); + dev_dbg(dev, "Registered to IRQ: %d\n", irq); rc = init_cc_regs(new_drvdata, true); if (rc) { @@ -468,7 +465,7 @@ static int init_cc_resources(struct platform_device *plat_dev) rc = cc_fips_init(new_drvdata); if (rc) { - dev_err(dev, "CC_FIPS_INIT failed 0x%x\n", rc); + dev_err(dev, "cc_fips_init failed 0x%x\n", rc); goto post_debugfs_err; } rc = cc_sram_mgr_init(new_drvdata); @@ -493,19 +490,13 @@ static int init_cc_resources(struct platform_device *plat_dev) rc = cc_buffer_mgr_init(new_drvdata); if (rc) { - dev_err(dev, "buffer_mgr_init failed\n"); + dev_err(dev, "cc_buffer_mgr_init failed\n"); goto post_req_mgr_err; } rc = cc_pm_init(new_drvdata); if (rc) { - dev_err(dev, "ssi_power_mgr_init failed\n"); - goto post_buf_mgr_err; - } - - rc = cc_ivgen_init(new_drvdata); - if (rc) { - dev_err(dev, "cc_ivgen_init failed\n"); + dev_err(dev, "cc_pm_init failed\n"); goto post_buf_mgr_err; } @@ -513,7 +504,7 @@ static int init_cc_resources(struct platform_device *plat_dev) rc = cc_cipher_alloc(new_drvdata); if (rc) { dev_err(dev, "cc_cipher_alloc failed\n"); - goto post_ivgen_err; + goto post_buf_mgr_err; } /* hash must be allocated before aead since hash exports APIs */ @@ -544,8 +535,6 @@ post_hash_err: cc_hash_free(new_drvdata); post_cipher_err: cc_cipher_free(new_drvdata); -post_ivgen_err: - cc_ivgen_fini(new_drvdata); post_buf_mgr_err: cc_buffer_mgr_fini(new_drvdata); post_req_mgr_err: @@ -577,7 +566,6 @@ static void cleanup_cc_resources(struct platform_device *plat_dev) cc_aead_free(drvdata); cc_hash_free(drvdata); cc_cipher_free(drvdata); - cc_ivgen_fini(drvdata); cc_pm_fini(drvdata); cc_buffer_mgr_fini(drvdata); cc_req_mgr_fini(drvdata); diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h index 7cd99380bf1f..c227718ba992 100644 --- a/drivers/crypto/ccree/cc_driver.h +++ b/drivers/crypto/ccree/cc_driver.h @@ -28,7 +28,6 @@ /* Registers definitions from shared/hw/ree_include */ #include "cc_host_regs.h" -#define CC_DEV_SHA_MAX 512 #include "cc_crypto_ctx.h" #include "cc_hw_queue_defs.h" #include "cc_sram_mgr.h" @@ -126,15 +125,6 @@ struct cc_cpp_req { struct cc_crypto_req { void (*user_cb)(struct device *dev, void *req, int err); void *user_arg; - dma_addr_t ivgen_dma_addr[CC_MAX_IVGEN_DMA_ADDRESSES]; - /* For the first 'ivgen_dma_addr_len' addresses of this array, - * generated IV would be placed in it by send_request(). - * Same generated IV for all addresses! - */ - /* Amount of 'ivgen_dma_addr' elements to be filled. */ - unsigned int ivgen_dma_addr_len; - /* The generated IV size required, 8/16 B allowed. */ - unsigned int ivgen_size; struct completion seq_compl; /* request completion */ struct cc_cpp_req cpp; }; @@ -142,13 +132,11 @@ struct cc_crypto_req { /** * struct cc_drvdata - driver private data context * @cc_base: virt address of the CC registers - * @irq: device IRQ number - * @irq_mask: Interrupt mask shadow (1 for masked interrupts) + * @irq: bitmap indicating source of last interrupt */ struct cc_drvdata { void __iomem *cc_base; int irq; - u32 irq_mask; struct completion hw_queue_avail; /* wait for HW queue availability */ struct platform_device *plat_dev; cc_sram_addr_t mlli_sram_addr; @@ -158,7 +146,6 @@ struct cc_drvdata { void *aead_handle; void *request_mgr_handle; void *fips_handle; - void *ivgen_handle; void *sram_mgr_handle; void *debugfs; struct clk *clk; @@ -171,6 +158,7 @@ struct cc_drvdata { int std_bodies; bool sec_disabled; u32 comp_mask; + bool pm_on; }; struct cc_crypto_alg { diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c index 5ad3ffb7acaa..702aefc21447 100644 --- a/drivers/crypto/ccree/cc_fips.c +++ b/drivers/crypto/ccree/cc_fips.c @@ -3,6 +3,7 @@ #include <linux/kernel.h> #include <linux/fips.h> +#include <linux/notifier.h> #include "cc_driver.h" #include "cc_fips.h" @@ -11,6 +12,8 @@ static void fips_dsr(unsigned long devarg); struct cc_fips_handle { struct tasklet_struct tasklet; + struct notifier_block nb; + struct cc_drvdata *drvdata; }; /* The function called once at driver entry point to check @@ -21,7 +24,13 @@ static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata) u32 reg; reg = cc_ioread(drvdata, CC_REG(GPR_HOST)); - return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK)); + /* Did the TEE report status? */ + if (reg & CC_FIPS_SYNC_TEE_STATUS) + /* Yes. Is it OK? */ + return (reg & CC_FIPS_SYNC_MODULE_OK); + + /* No. It's either not in use or will be reported later */ + return true; } /* @@ -40,6 +49,21 @@ void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool status) cc_iowrite(drvdata, CC_REG(HOST_GPR0), val); } +/* Push REE side FIPS test failure to TEE side */ +static int cc_ree_fips_failure(struct notifier_block *nb, unsigned long unused1, + void *unused2) +{ + struct cc_fips_handle *fips_h = + container_of(nb, struct cc_fips_handle, nb); + struct cc_drvdata *drvdata = fips_h->drvdata; + struct device *dev = drvdata_to_dev(drvdata); + + cc_set_ree_fips_status(drvdata, false); + dev_info(dev, "Notifying TEE of FIPS test failure...\n"); + + return NOTIFY_OK; +} + void cc_fips_fini(struct cc_drvdata *drvdata) { struct cc_fips_handle *fips_h = drvdata->fips_handle; @@ -47,6 +71,8 @@ void cc_fips_fini(struct cc_drvdata *drvdata) if (drvdata->hw_rev < CC_HW_REV_712 || !fips_h) return; + atomic_notifier_chain_unregister(&fips_fail_notif_chain, &fips_h->nb); + /* Kill tasklet */ tasklet_kill(&fips_h->tasklet); drvdata->fips_handle = NULL; @@ -94,7 +120,7 @@ static void fips_dsr(unsigned long devarg) cc_tee_handle_fips_error(drvdata); } - /* after verifing that there is nothing to do, + /* after verifying that there is nothing to do, * unmask AXI completion interrupt. */ val = (CC_REG(HOST_IMR) & ~irq); @@ -118,6 +144,9 @@ int cc_fips_init(struct cc_drvdata *p_drvdata) dev_dbg(dev, "Initializing fips tasklet\n"); tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata); + fips_h->drvdata = p_drvdata; + fips_h->nb.notifier_call = cc_ree_fips_failure; + atomic_notifier_chain_register(&fips_fail_notif_chain, &fips_h->nb); cc_tee_handle_fips_error(p_drvdata); diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c index a6abe4e3bb0e..912e5ce5079d 100644 --- a/drivers/crypto/ccree/cc_hash.c +++ b/drivers/crypto/ccree/cc_hash.c @@ -25,27 +25,27 @@ struct cc_hash_handle { struct list_head hash_list; }; -static const u32 digest_len_init[] = { +static const u32 cc_digest_len_init[] = { 0x00000040, 0x00000000, 0x00000000, 0x00000000 }; -static const u32 md5_init[] = { +static const u32 cc_md5_init[] = { SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 }; -static const u32 sha1_init[] = { +static const u32 cc_sha1_init[] = { SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 }; -static const u32 sha224_init[] = { +static const u32 cc_sha224_init[] = { SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4, SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 }; -static const u32 sha256_init[] = { +static const u32 cc_sha256_init[] = { SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4, SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 }; -static const u32 digest_len_sha512_init[] = { +static const u32 cc_digest_len_sha512_init[] = { 0x00000080, 0x00000000, 0x00000000, 0x00000000 }; -static u64 sha384_init[] = { +static u64 cc_sha384_init[] = { SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4, SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 }; -static u64 sha512_init[] = { +static u64 cc_sha512_init[] = { SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4, SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 }; -static const u32 sm3_init[] = { +static const u32 cc_sm3_init[] = { SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE, SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA }; @@ -144,10 +144,11 @@ static void cc_init_req(struct device *dev, struct ahash_req_ctx *state, if (ctx->hash_mode == DRV_HASH_SHA512 || ctx->hash_mode == DRV_HASH_SHA384) memcpy(state->digest_bytes_len, - digest_len_sha512_init, + cc_digest_len_sha512_init, ctx->hash_len); else - memcpy(state->digest_bytes_len, digest_len_init, + memcpy(state->digest_bytes_len, + cc_digest_len_init, ctx->hash_len); } @@ -898,9 +899,6 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key, rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx); out: - if (rc) - crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); - if (ctx->key_params.key_dma_addr) { dma_unmap_single(dev, ctx->key_params.key_dma_addr, ctx->key_params.keylen, DMA_TO_DEVICE); @@ -989,9 +987,6 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash, rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx); - if (rc) - crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); - dma_unmap_single(dev, ctx->key_params.key_dma_addr, ctx->key_params.keylen, DMA_TO_DEVICE); dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n", @@ -1873,26 +1868,26 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata) int rc = 0; /* Copy-to-sram digest-len */ - cc_set_sram_desc(digest_len_init, sram_buff_ofs, - ARRAY_SIZE(digest_len_init), larval_seq, + cc_set_sram_desc(cc_digest_len_init, sram_buff_ofs, + ARRAY_SIZE(cc_digest_len_init), larval_seq, &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(digest_len_init); + sram_buff_ofs += sizeof(cc_digest_len_init); larval_seq_len = 0; if (large_sha_supported) { /* Copy-to-sram digest-len for sha384/512 */ - cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs, - ARRAY_SIZE(digest_len_sha512_init), + cc_set_sram_desc(cc_digest_len_sha512_init, sram_buff_ofs, + ARRAY_SIZE(cc_digest_len_sha512_init), larval_seq, &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(digest_len_sha512_init); + sram_buff_ofs += sizeof(cc_digest_len_sha512_init); larval_seq_len = 0; } @@ -1900,64 +1895,64 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata) hash_handle->larval_digest_sram_addr = sram_buff_ofs; /* Copy-to-sram initial SHA* digests */ - cc_set_sram_desc(md5_init, sram_buff_ofs, ARRAY_SIZE(md5_init), + cc_set_sram_desc(cc_md5_init, sram_buff_ofs, ARRAY_SIZE(cc_md5_init), larval_seq, &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(md5_init); + sram_buff_ofs += sizeof(cc_md5_init); larval_seq_len = 0; - cc_set_sram_desc(sha1_init, sram_buff_ofs, - ARRAY_SIZE(sha1_init), larval_seq, + cc_set_sram_desc(cc_sha1_init, sram_buff_ofs, + ARRAY_SIZE(cc_sha1_init), larval_seq, &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(sha1_init); + sram_buff_ofs += sizeof(cc_sha1_init); larval_seq_len = 0; - cc_set_sram_desc(sha224_init, sram_buff_ofs, - ARRAY_SIZE(sha224_init), larval_seq, + cc_set_sram_desc(cc_sha224_init, sram_buff_ofs, + ARRAY_SIZE(cc_sha224_init), larval_seq, &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(sha224_init); + sram_buff_ofs += sizeof(cc_sha224_init); larval_seq_len = 0; - cc_set_sram_desc(sha256_init, sram_buff_ofs, - ARRAY_SIZE(sha256_init), larval_seq, + cc_set_sram_desc(cc_sha256_init, sram_buff_ofs, + ARRAY_SIZE(cc_sha256_init), larval_seq, &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(sha256_init); + sram_buff_ofs += sizeof(cc_sha256_init); larval_seq_len = 0; if (sm3_supported) { - cc_set_sram_desc(sm3_init, sram_buff_ofs, - ARRAY_SIZE(sm3_init), larval_seq, + cc_set_sram_desc(cc_sm3_init, sram_buff_ofs, + ARRAY_SIZE(cc_sm3_init), larval_seq, &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(sm3_init); + sram_buff_ofs += sizeof(cc_sm3_init); larval_seq_len = 0; } if (large_sha_supported) { - cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs, - (ARRAY_SIZE(sha384_init) * 2), larval_seq, + cc_set_sram_desc((u32 *)cc_sha384_init, sram_buff_ofs, + (ARRAY_SIZE(cc_sha384_init) * 2), larval_seq, &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(sha384_init); + sram_buff_ofs += sizeof(cc_sha384_init); larval_seq_len = 0; - cc_set_sram_desc((u32 *)sha512_init, sram_buff_ofs, - (ARRAY_SIZE(sha512_init) * 2), larval_seq, + cc_set_sram_desc((u32 *)cc_sha512_init, sram_buff_ofs, + (ARRAY_SIZE(cc_sha512_init) * 2), larval_seq, &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (rc) @@ -1986,8 +1981,8 @@ static void __init cc_swap_dwords(u32 *buf, unsigned long size) */ void __init cc_hash_global_init(void) { - cc_swap_dwords((u32 *)&sha384_init, (ARRAY_SIZE(sha384_init) * 2)); - cc_swap_dwords((u32 *)&sha512_init, (ARRAY_SIZE(sha512_init) * 2)); + cc_swap_dwords((u32 *)&cc_sha384_init, (ARRAY_SIZE(cc_sha384_init) * 2)); + cc_swap_dwords((u32 *)&cc_sha512_init, (ARRAY_SIZE(cc_sha512_init) * 2)); } int cc_hash_alloc(struct cc_drvdata *drvdata) @@ -2006,18 +2001,18 @@ int cc_hash_alloc(struct cc_drvdata *drvdata) INIT_LIST_HEAD(&hash_handle->hash_list); drvdata->hash_handle = hash_handle; - sram_size_to_alloc = sizeof(digest_len_init) + - sizeof(md5_init) + - sizeof(sha1_init) + - sizeof(sha224_init) + - sizeof(sha256_init); + sram_size_to_alloc = sizeof(cc_digest_len_init) + + sizeof(cc_md5_init) + + sizeof(cc_sha1_init) + + sizeof(cc_sha224_init) + + sizeof(cc_sha256_init); if (drvdata->hw_rev >= CC_HW_REV_713) - sram_size_to_alloc += sizeof(sm3_init); + sram_size_to_alloc += sizeof(cc_sm3_init); if (drvdata->hw_rev >= CC_HW_REV_712) - sram_size_to_alloc += sizeof(digest_len_sha512_init) + - sizeof(sha384_init) + sizeof(sha512_init); + sram_size_to_alloc += sizeof(cc_digest_len_sha512_init) + + sizeof(cc_sha384_init) + sizeof(cc_sha512_init); sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc); if (sram_buff == NULL_SRAM_ADDR) { @@ -2258,22 +2253,22 @@ static const void *cc_larval_digest(struct device *dev, u32 mode) { switch (mode) { case DRV_HASH_MD5: - return md5_init; + return cc_md5_init; case DRV_HASH_SHA1: - return sha1_init; + return cc_sha1_init; case DRV_HASH_SHA224: - return sha224_init; + return cc_sha224_init; case DRV_HASH_SHA256: - return sha256_init; + return cc_sha256_init; case DRV_HASH_SHA384: - return sha384_init; + return cc_sha384_init; case DRV_HASH_SHA512: - return sha512_init; + return cc_sha512_init; case DRV_HASH_SM3: - return sm3_init; + return cc_sm3_init; default: dev_err(dev, "Invalid hash mode (%d)\n", mode); - return md5_init; + return cc_md5_init; } } @@ -2301,40 +2296,40 @@ cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode) return (hash_handle->larval_digest_sram_addr); case DRV_HASH_SHA1: return (hash_handle->larval_digest_sram_addr + - sizeof(md5_init)); + sizeof(cc_md5_init)); case DRV_HASH_SHA224: return (hash_handle->larval_digest_sram_addr + - sizeof(md5_init) + - sizeof(sha1_init)); + sizeof(cc_md5_init) + + sizeof(cc_sha1_init)); case DRV_HASH_SHA256: return (hash_handle->larval_digest_sram_addr + - sizeof(md5_init) + - sizeof(sha1_init) + - sizeof(sha224_init)); + sizeof(cc_md5_init) + + sizeof(cc_sha1_init) + + sizeof(cc_sha224_init)); case DRV_HASH_SM3: return (hash_handle->larval_digest_sram_addr + - sizeof(md5_init) + - sizeof(sha1_init) + - sizeof(sha224_init) + - sizeof(sha256_init)); + sizeof(cc_md5_init) + + sizeof(cc_sha1_init) + + sizeof(cc_sha224_init) + + sizeof(cc_sha256_init)); case DRV_HASH_SHA384: addr = (hash_handle->larval_digest_sram_addr + - sizeof(md5_init) + - sizeof(sha1_init) + - sizeof(sha224_init) + - sizeof(sha256_init)); + sizeof(cc_md5_init) + + sizeof(cc_sha1_init) + + sizeof(cc_sha224_init) + + sizeof(cc_sha256_init)); if (sm3_supported) - addr += sizeof(sm3_init); + addr += sizeof(cc_sm3_init); return addr; case DRV_HASH_SHA512: addr = (hash_handle->larval_digest_sram_addr + - sizeof(md5_init) + - sizeof(sha1_init) + - sizeof(sha224_init) + - sizeof(sha256_init) + - sizeof(sha384_init)); + sizeof(cc_md5_init) + + sizeof(cc_sha1_init) + + sizeof(cc_sha224_init) + + sizeof(cc_sha256_init) + + sizeof(cc_sha384_init)); if (sm3_supported) - addr += sizeof(sm3_init); + addr += sizeof(cc_sm3_init); return addr; default: dev_err(dev, "Invalid hash mode (%d)\n", mode); @@ -2357,11 +2352,9 @@ cc_digest_len_addr(void *drvdata, u32 mode) case DRV_HASH_SHA256: case DRV_HASH_MD5: return digest_len_addr; -#if (CC_DEV_SHA_MAX > 256) case DRV_HASH_SHA384: case DRV_HASH_SHA512: - return digest_len_addr + sizeof(digest_len_init); -#endif + return digest_len_addr + sizeof(cc_digest_len_init); default: return digest_len_addr; /*to avoid kernel crash*/ } diff --git a/drivers/crypto/ccree/cc_ivgen.c b/drivers/crypto/ccree/cc_ivgen.c deleted file mode 100644 index 99dc69383e20..000000000000 --- a/drivers/crypto/ccree/cc_ivgen.c +++ /dev/null @@ -1,276 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ - -#include <crypto/ctr.h> -#include "cc_driver.h" -#include "cc_ivgen.h" -#include "cc_request_mgr.h" -#include "cc_sram_mgr.h" -#include "cc_buffer_mgr.h" - -/* The max. size of pool *MUST* be <= SRAM total size */ -#define CC_IVPOOL_SIZE 1024 -/* The first 32B fraction of pool are dedicated to the - * next encryption "key" & "IV" for pool regeneration - */ -#define CC_IVPOOL_META_SIZE (CC_AES_IV_SIZE + AES_KEYSIZE_128) -#define CC_IVPOOL_GEN_SEQ_LEN 4 - -/** - * struct cc_ivgen_ctx -IV pool generation context - * @pool: the start address of the iv-pool resides in internal RAM - * @ctr_key_dma: address of pool's encryption key material in internal RAM - * @ctr_iv_dma: address of pool's counter iv in internal RAM - * @next_iv_ofs: the offset to the next available IV in pool - * @pool_meta: virt. address of the initial enc. key/IV - * @pool_meta_dma: phys. address of the initial enc. key/IV - */ -struct cc_ivgen_ctx { - cc_sram_addr_t pool; - cc_sram_addr_t ctr_key; - cc_sram_addr_t ctr_iv; - u32 next_iv_ofs; - u8 *pool_meta; - dma_addr_t pool_meta_dma; -}; - -/*! - * Generates CC_IVPOOL_SIZE of random bytes by - * encrypting 0's using AES128-CTR. - * - * \param ivgen iv-pool context - * \param iv_seq IN/OUT array to the descriptors sequence - * \param iv_seq_len IN/OUT pointer to the sequence length - */ -static int cc_gen_iv_pool(struct cc_ivgen_ctx *ivgen_ctx, - struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len) -{ - unsigned int idx = *iv_seq_len; - - if ((*iv_seq_len + CC_IVPOOL_GEN_SEQ_LEN) > CC_IVPOOL_SEQ_LEN) { - /* The sequence will be longer than allowed */ - return -EINVAL; - } - /* Setup key */ - hw_desc_init(&iv_seq[idx]); - set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_key, AES_KEYSIZE_128); - set_setup_mode(&iv_seq[idx], SETUP_LOAD_KEY0); - set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); - set_flow_mode(&iv_seq[idx], S_DIN_to_AES); - set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE); - set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR); - idx++; - - /* Setup cipher state */ - hw_desc_init(&iv_seq[idx]); - set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_iv, CC_AES_IV_SIZE); - set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); - set_flow_mode(&iv_seq[idx], S_DIN_to_AES); - set_setup_mode(&iv_seq[idx], SETUP_LOAD_STATE1); - set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE); - set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR); - idx++; - - /* Perform dummy encrypt to skip first block */ - hw_desc_init(&iv_seq[idx]); - set_din_const(&iv_seq[idx], 0, CC_AES_IV_SIZE); - set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_AES_IV_SIZE); - set_flow_mode(&iv_seq[idx], DIN_AES_DOUT); - idx++; - - /* Generate IV pool */ - hw_desc_init(&iv_seq[idx]); - set_din_const(&iv_seq[idx], 0, CC_IVPOOL_SIZE); - set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_IVPOOL_SIZE); - set_flow_mode(&iv_seq[idx], DIN_AES_DOUT); - idx++; - - *iv_seq_len = idx; /* Update sequence length */ - - /* queue ordering assures pool readiness */ - ivgen_ctx->next_iv_ofs = CC_IVPOOL_META_SIZE; - - return 0; -} - -/*! - * Generates the initial pool in SRAM. - * This function should be invoked when resuming driver. - * - * \param drvdata - * - * \return int Zero for success, negative value otherwise. - */ -int cc_init_iv_sram(struct cc_drvdata *drvdata) -{ - struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle; - struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN]; - unsigned int iv_seq_len = 0; - int rc; - - /* Generate initial enc. key/iv */ - get_random_bytes(ivgen_ctx->pool_meta, CC_IVPOOL_META_SIZE); - - /* The first 32B reserved for the enc. Key/IV */ - ivgen_ctx->ctr_key = ivgen_ctx->pool; - ivgen_ctx->ctr_iv = ivgen_ctx->pool + AES_KEYSIZE_128; - - /* Copy initial enc. key and IV to SRAM at a single descriptor */ - hw_desc_init(&iv_seq[iv_seq_len]); - set_din_type(&iv_seq[iv_seq_len], DMA_DLLI, ivgen_ctx->pool_meta_dma, - CC_IVPOOL_META_SIZE, NS_BIT); - set_dout_sram(&iv_seq[iv_seq_len], ivgen_ctx->pool, - CC_IVPOOL_META_SIZE); - set_flow_mode(&iv_seq[iv_seq_len], BYPASS); - iv_seq_len++; - - /* Generate initial pool */ - rc = cc_gen_iv_pool(ivgen_ctx, iv_seq, &iv_seq_len); - if (rc) - return rc; - - /* Fire-and-forget */ - return send_request_init(drvdata, iv_seq, iv_seq_len); -} - -/*! - * Free iv-pool and ivgen context. - * - * \param drvdata - */ -void cc_ivgen_fini(struct cc_drvdata *drvdata) -{ - struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle; - struct device *device = &drvdata->plat_dev->dev; - - if (!ivgen_ctx) - return; - - if (ivgen_ctx->pool_meta) { - memset(ivgen_ctx->pool_meta, 0, CC_IVPOOL_META_SIZE); - dma_free_coherent(device, CC_IVPOOL_META_SIZE, - ivgen_ctx->pool_meta, - ivgen_ctx->pool_meta_dma); - } - - ivgen_ctx->pool = NULL_SRAM_ADDR; -} - -/*! - * Allocates iv-pool and maps resources. - * This function generates the first IV pool. - * - * \param drvdata Driver's private context - * - * \return int Zero for success, negative value otherwise. - */ -int cc_ivgen_init(struct cc_drvdata *drvdata) -{ - struct cc_ivgen_ctx *ivgen_ctx; - struct device *device = &drvdata->plat_dev->dev; - int rc; - - /* Allocate "this" context */ - ivgen_ctx = devm_kzalloc(device, sizeof(*ivgen_ctx), GFP_KERNEL); - if (!ivgen_ctx) - return -ENOMEM; - - drvdata->ivgen_handle = ivgen_ctx; - - /* Allocate pool's header for initial enc. key/IV */ - ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE, - &ivgen_ctx->pool_meta_dma, - GFP_KERNEL); - if (!ivgen_ctx->pool_meta) { - dev_err(device, "Not enough memory to allocate DMA of pool_meta (%u B)\n", - CC_IVPOOL_META_SIZE); - rc = -ENOMEM; - goto out; - } - /* Allocate IV pool in SRAM */ - ivgen_ctx->pool = cc_sram_alloc(drvdata, CC_IVPOOL_SIZE); - if (ivgen_ctx->pool == NULL_SRAM_ADDR) { - dev_err(device, "SRAM pool exhausted\n"); - rc = -ENOMEM; - goto out; - } - - return cc_init_iv_sram(drvdata); - -out: - cc_ivgen_fini(drvdata); - return rc; -} - -/*! - * Acquires 16 Bytes IV from the iv-pool - * - * \param drvdata Driver private context - * \param iv_out_dma Array of physical IV out addresses - * \param iv_out_dma_len Length of iv_out_dma array (additional elements - * of iv_out_dma array are ignore) - * \param iv_out_size May be 8 or 16 bytes long - * \param iv_seq IN/OUT array to the descriptors sequence - * \param iv_seq_len IN/OUT pointer to the sequence length - * - * \return int Zero for success, negative value otherwise. - */ -int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[], - unsigned int iv_out_dma_len, unsigned int iv_out_size, - struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len) -{ - struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle; - unsigned int idx = *iv_seq_len; - struct device *dev = drvdata_to_dev(drvdata); - unsigned int t; - - if (iv_out_size != CC_AES_IV_SIZE && - iv_out_size != CTR_RFC3686_IV_SIZE) { - return -EINVAL; - } - if ((iv_out_dma_len + 1) > CC_IVPOOL_SEQ_LEN) { - /* The sequence will be longer than allowed */ - return -EINVAL; - } - - /* check that number of generated IV is limited to max dma address - * iv buffer size - */ - if (iv_out_dma_len > CC_MAX_IVGEN_DMA_ADDRESSES) { - /* The sequence will be longer than allowed */ - return -EINVAL; - } - - for (t = 0; t < iv_out_dma_len; t++) { - /* Acquire IV from pool */ - hw_desc_init(&iv_seq[idx]); - set_din_sram(&iv_seq[idx], (ivgen_ctx->pool + - ivgen_ctx->next_iv_ofs), - iv_out_size); - set_dout_dlli(&iv_seq[idx], iv_out_dma[t], iv_out_size, - NS_BIT, 0); - set_flow_mode(&iv_seq[idx], BYPASS); - idx++; - } - - /* Bypass operation is proceeded by crypto sequence, hence must - * assure bypass-write-transaction by a memory barrier - */ - hw_desc_init(&iv_seq[idx]); - set_din_no_dma(&iv_seq[idx], 0, 0xfffff0); - set_dout_no_dma(&iv_seq[idx], 0, 0, 1); - idx++; - - *iv_seq_len = idx; /* update seq length */ - - /* Update iv index */ - ivgen_ctx->next_iv_ofs += iv_out_size; - - if ((CC_IVPOOL_SIZE - ivgen_ctx->next_iv_ofs) < CC_AES_IV_SIZE) { - dev_dbg(dev, "Pool exhausted, regenerating iv-pool\n"); - /* pool is drained -regenerate it! */ - return cc_gen_iv_pool(ivgen_ctx, iv_seq, iv_seq_len); - } - - return 0; -} diff --git a/drivers/crypto/ccree/cc_ivgen.h b/drivers/crypto/ccree/cc_ivgen.h deleted file mode 100644 index a9f5e8bba4f1..000000000000 --- a/drivers/crypto/ccree/cc_ivgen.h +++ /dev/null @@ -1,55 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ - -#ifndef __CC_IVGEN_H__ -#define __CC_IVGEN_H__ - -#include "cc_hw_queue_defs.h" - -#define CC_IVPOOL_SEQ_LEN 8 - -/*! - * Allocates iv-pool and maps resources. - * This function generates the first IV pool. - * - * \param drvdata Driver's private context - * - * \return int Zero for success, negative value otherwise. - */ -int cc_ivgen_init(struct cc_drvdata *drvdata); - -/*! - * Free iv-pool and ivgen context. - * - * \param drvdata - */ -void cc_ivgen_fini(struct cc_drvdata *drvdata); - -/*! - * Generates the initial pool in SRAM. - * This function should be invoked when resuming DX driver. - * - * \param drvdata - * - * \return int Zero for success, negative value otherwise. - */ -int cc_init_iv_sram(struct cc_drvdata *drvdata); - -/*! - * Acquires 16 Bytes IV from the iv-pool - * - * \param drvdata Driver private context - * \param iv_out_dma Array of physical IV out addresses - * \param iv_out_dma_len Length of iv_out_dma array (additional elements of - * iv_out_dma array are ignore) - * \param iv_out_size May be 8 or 16 bytes long - * \param iv_seq IN/OUT array to the descriptors sequence - * \param iv_seq_len IN/OUT pointer to the sequence length - * - * \return int Zero for success, negative value otherwise. - */ -int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[], - unsigned int iv_out_dma_len, unsigned int iv_out_size, - struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len); - -#endif /*__CC_IVGEN_H__*/ diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c index 899a52f05b7a..24c368b866f6 100644 --- a/drivers/crypto/ccree/cc_pm.c +++ b/drivers/crypto/ccree/cc_pm.c @@ -8,7 +8,6 @@ #include "cc_buffer_mgr.h" #include "cc_request_mgr.h" #include "cc_sram_mgr.h" -#include "cc_ivgen.h" #include "cc_hash.h" #include "cc_pm.h" #include "cc_fips.h" @@ -23,14 +22,8 @@ const struct dev_pm_ops ccree_pm = { int cc_pm_suspend(struct device *dev) { struct cc_drvdata *drvdata = dev_get_drvdata(dev); - int rc; dev_dbg(dev, "set HOST_POWER_DOWN_EN\n"); - rc = cc_suspend_req_queue(drvdata); - if (rc) { - dev_err(dev, "cc_suspend_req_queue (%x)\n", rc); - return rc; - } fini_cc_regs(drvdata); cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE); cc_clk_off(drvdata); @@ -49,7 +42,7 @@ int cc_pm_resume(struct device *dev) dev_err(dev, "failed getting clock back on. We're toast.\n"); return rc; } - /* wait for Crytpcell reset completion */ + /* wait for Cryptocell reset completion */ if (!cc_wait_for_reset_completion(drvdata)) { dev_err(dev, "Cryptocell reset not completed"); return -EBUSY; @@ -64,16 +57,8 @@ int cc_pm_resume(struct device *dev) /* check if tee fips error occurred during power down */ cc_tee_handle_fips_error(drvdata); - rc = cc_resume_req_queue(drvdata); - if (rc) { - dev_err(dev, "cc_resume_req_queue (%x)\n", rc); - return rc; - } - - /* must be after the queue resuming as it uses the HW queue*/ cc_init_hash_sram(drvdata); - cc_init_iv_sram(drvdata); return 0; } @@ -82,28 +67,20 @@ int cc_pm_get(struct device *dev) int rc = 0; struct cc_drvdata *drvdata = dev_get_drvdata(dev); - if (cc_req_queue_suspended(drvdata)) + if (drvdata->pm_on) rc = pm_runtime_get_sync(dev); - else - pm_runtime_get_noresume(dev); - return rc; + return (rc == 1 ? 0 : rc); } -int cc_pm_put_suspend(struct device *dev) +void cc_pm_put_suspend(struct device *dev) { - int rc = 0; struct cc_drvdata *drvdata = dev_get_drvdata(dev); - if (!cc_req_queue_suspended(drvdata)) { + if (drvdata->pm_on) { pm_runtime_mark_last_busy(dev); - rc = pm_runtime_put_autosuspend(dev); - } else { - /* Something wrong happens*/ - dev_err(dev, "request to suspend already suspended queue"); - rc = -EBUSY; + pm_runtime_put_autosuspend(dev); } - return rc; } bool cc_pm_is_dev_suspended(struct device *dev) @@ -116,10 +93,10 @@ int cc_pm_init(struct cc_drvdata *drvdata) { struct device *dev = drvdata_to_dev(drvdata); - /* must be before the enabling to avoid resdundent suspending */ + /* must be before the enabling to avoid redundant suspending */ pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT); pm_runtime_use_autosuspend(dev); - /* activate the PM module */ + /* set us as active - note we won't do PM ops until cc_pm_go()! */ return pm_runtime_set_active(dev); } @@ -127,9 +104,11 @@ int cc_pm_init(struct cc_drvdata *drvdata) void cc_pm_go(struct cc_drvdata *drvdata) { pm_runtime_enable(drvdata_to_dev(drvdata)); + drvdata->pm_on = true; } void cc_pm_fini(struct cc_drvdata *drvdata) { pm_runtime_disable(drvdata_to_dev(drvdata)); + drvdata->pm_on = false; } diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h index a7d98a5da2e1..80a18e11cae4 100644 --- a/drivers/crypto/ccree/cc_pm.h +++ b/drivers/crypto/ccree/cc_pm.h @@ -21,7 +21,7 @@ void cc_pm_fini(struct cc_drvdata *drvdata); int cc_pm_suspend(struct device *dev); int cc_pm_resume(struct device *dev); int cc_pm_get(struct device *dev); -int cc_pm_put_suspend(struct device *dev); +void cc_pm_put_suspend(struct device *dev); bool cc_pm_is_dev_suspended(struct device *dev); #else @@ -35,25 +35,12 @@ static inline void cc_pm_go(struct cc_drvdata *drvdata) {} static inline void cc_pm_fini(struct cc_drvdata *drvdata) {} -static inline int cc_pm_suspend(struct device *dev) -{ - return 0; -} - -static inline int cc_pm_resume(struct device *dev) -{ - return 0; -} - static inline int cc_pm_get(struct device *dev) { return 0; } -static inline int cc_pm_put_suspend(struct device *dev) -{ - return 0; -} +static inline void cc_pm_put_suspend(struct device *dev) {} static inline bool cc_pm_is_dev_suspended(struct device *dev) { diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c index 0bc6ccb0b899..9d61e6f12478 100644 --- a/drivers/crypto/ccree/cc_request_mgr.c +++ b/drivers/crypto/ccree/cc_request_mgr.c @@ -6,7 +6,6 @@ #include "cc_driver.h" #include "cc_buffer_mgr.h" #include "cc_request_mgr.h" -#include "cc_ivgen.h" #include "cc_pm.h" #define CC_MAX_POLL_ITER 10 @@ -42,7 +41,6 @@ struct cc_req_mgr_handle { #else struct tasklet_struct comptask; #endif - bool is_runtime_suspended; }; struct cc_bl_item { @@ -231,7 +229,7 @@ static int cc_queues_status(struct cc_drvdata *drvdata, struct device *dev = drvdata_to_dev(drvdata); /* SW queue is checked only once as it will not - * be chaned during the poll because the spinlock_bh + * be changed during the poll because the spinlock_bh * is held by the thread */ if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) == @@ -276,41 +274,16 @@ static int cc_queues_status(struct cc_drvdata *drvdata, * \param len The crypto sequence length * \param add_comp If "true": add an artificial dout DMA to mark completion * - * \return int Returns -EINPROGRESS or error code */ -static int cc_do_send_request(struct cc_drvdata *drvdata, - struct cc_crypto_req *cc_req, - struct cc_hw_desc *desc, unsigned int len, - bool add_comp, bool ivgen) +static void cc_do_send_request(struct cc_drvdata *drvdata, + struct cc_crypto_req *cc_req, + struct cc_hw_desc *desc, unsigned int len, + bool add_comp) { struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; unsigned int used_sw_slots; - unsigned int iv_seq_len = 0; unsigned int total_seq_len = len; /*initial sequence length*/ - struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN]; struct device *dev = drvdata_to_dev(drvdata); - int rc; - - if (ivgen) { - dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n", - cc_req->ivgen_dma_addr_len, - &cc_req->ivgen_dma_addr[0], - &cc_req->ivgen_dma_addr[1], - &cc_req->ivgen_dma_addr[2], - cc_req->ivgen_size); - - /* Acquire IV from pool */ - rc = cc_get_iv(drvdata, cc_req->ivgen_dma_addr, - cc_req->ivgen_dma_addr_len, - cc_req->ivgen_size, iv_seq, &iv_seq_len); - - if (rc) { - dev_err(dev, "Failed to generate IV (rc=%d)\n", rc); - return rc; - } - - total_seq_len += iv_seq_len; - } used_sw_slots = ((req_mgr_h->req_queue_head - req_mgr_h->req_queue_tail) & @@ -328,14 +301,12 @@ static int cc_do_send_request(struct cc_drvdata *drvdata, /* * We are about to push command to the HW via the command registers - * that may refernece hsot memory. We need to issue a memory barrier - * to make sure there are no outstnading memory writes + * that may reference host memory. We need to issue a memory barrier + * to make sure there are no outstanding memory writes */ wmb(); /* STAT_PHASE_4: Push sequence */ - if (ivgen) - enqueue_seq(drvdata, iv_seq, iv_seq_len); enqueue_seq(drvdata, desc, len); @@ -355,9 +326,6 @@ static int cc_do_send_request(struct cc_drvdata *drvdata, /* Update the free slots in HW queue */ req_mgr_h->q_free_slots -= total_seq_len; } - - /* Operation still in process */ - return -EINPROGRESS; } static void cc_enqueue_backlog(struct cc_drvdata *drvdata, @@ -380,8 +348,6 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata) struct cc_bl_item *bli; struct cc_crypto_req *creq; void *req; - bool ivgen; - unsigned int total_len; struct device *dev = drvdata_to_dev(drvdata); int rc; @@ -406,12 +372,9 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata) bli->notif = true; } - ivgen = !!creq->ivgen_dma_addr_len; - total_len = bli->len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0); - spin_lock(&mgr->hw_lock); - rc = cc_queues_status(drvdata, mgr, total_len); + rc = cc_queues_status(drvdata, mgr, bli->len); if (rc) { /* * There is still not room in the FIFO for @@ -422,20 +385,15 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata) return; } - rc = cc_do_send_request(drvdata, &bli->creq, bli->desc, - bli->len, false, ivgen); - + cc_do_send_request(drvdata, &bli->creq, bli->desc, bli->len, + false); spin_unlock(&mgr->hw_lock); - if (rc != -EINPROGRESS) { - cc_pm_put_suspend(dev); - creq->user_cb(dev, req, rc); - } - /* Remove ourselves from the backlog list */ spin_lock(&mgr->bl_lock); list_del(&bli->list); --mgr->bl_len; + kfree(bli); } spin_unlock(&mgr->bl_lock); @@ -447,8 +405,6 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req, { int rc; struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; - bool ivgen = !!cc_req->ivgen_dma_addr_len; - unsigned int total_len = len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0); struct device *dev = drvdata_to_dev(drvdata); bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG; gfp_t flags = cc_gfp_flags(req); @@ -456,12 +412,12 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req, rc = cc_pm_get(dev); if (rc) { - dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc); + dev_err(dev, "cc_pm_get returned %x\n", rc); return rc; } spin_lock_bh(&mgr->hw_lock); - rc = cc_queues_status(drvdata, mgr, total_len); + rc = cc_queues_status(drvdata, mgr, len); #ifdef CC_DEBUG_FORCE_BACKLOG if (backlog_ok) @@ -485,9 +441,10 @@ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req, return -EBUSY; } - if (!rc) - rc = cc_do_send_request(drvdata, cc_req, desc, len, false, - ivgen); + if (!rc) { + cc_do_send_request(drvdata, cc_req, desc, len, false); + rc = -EINPROGRESS; + } spin_unlock_bh(&mgr->hw_lock); return rc; @@ -507,7 +464,7 @@ int cc_send_sync_request(struct cc_drvdata *drvdata, rc = cc_pm_get(dev); if (rc) { - dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc); + dev_err(dev, "cc_pm_get returned %x\n", rc); return rc; } @@ -527,14 +484,8 @@ int cc_send_sync_request(struct cc_drvdata *drvdata, reinit_completion(&drvdata->hw_queue_avail); } - rc = cc_do_send_request(drvdata, cc_req, desc, len, true, false); + cc_do_send_request(drvdata, cc_req, desc, len, true); spin_unlock_bh(&mgr->hw_lock); - - if (rc != -EINPROGRESS) { - cc_pm_put_suspend(dev); - return rc; - } - wait_for_completion(&cc_req->seq_compl); return 0; } @@ -567,8 +518,8 @@ int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc, /* * We are about to push command to the HW via the command registers - * that may refernece hsot memory. We need to issue a memory barrier - * to make sure there are no outstnading memory writes + * that may reference host memory. We need to issue a memory barrier + * to make sure there are no outstanding memory writes */ wmb(); enqueue_seq(drvdata, desc, len); @@ -703,7 +654,7 @@ static void comp_handler(unsigned long devarg) request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata); } - /* after verifing that there is nothing to do, + /* after verifying that there is nothing to do, * unmask AXI completion interrupt */ cc_iowrite(drvdata, CC_REG(HOST_IMR), @@ -712,52 +663,3 @@ static void comp_handler(unsigned long devarg) cc_proc_backlog(drvdata); dev_dbg(dev, "Comp. handler done.\n"); } - -/* - * resume the queue configuration - no need to take the lock as this happens - * inside the spin lock protection - */ -#if defined(CONFIG_PM) -int cc_resume_req_queue(struct cc_drvdata *drvdata) -{ - struct cc_req_mgr_handle *request_mgr_handle = - drvdata->request_mgr_handle; - - spin_lock_bh(&request_mgr_handle->hw_lock); - request_mgr_handle->is_runtime_suspended = false; - spin_unlock_bh(&request_mgr_handle->hw_lock); - - return 0; -} - -/* - * suspend the queue configuration. Since it is used for the runtime suspend - * only verify that the queue can be suspended. - */ -int cc_suspend_req_queue(struct cc_drvdata *drvdata) -{ - struct cc_req_mgr_handle *request_mgr_handle = - drvdata->request_mgr_handle; - - /* lock the send_request */ - spin_lock_bh(&request_mgr_handle->hw_lock); - if (request_mgr_handle->req_queue_head != - request_mgr_handle->req_queue_tail) { - spin_unlock_bh(&request_mgr_handle->hw_lock); - return -EBUSY; - } - request_mgr_handle->is_runtime_suspended = true; - spin_unlock_bh(&request_mgr_handle->hw_lock); - - return 0; -} - -bool cc_req_queue_suspended(struct cc_drvdata *drvdata) -{ - struct cc_req_mgr_handle *request_mgr_handle = - drvdata->request_mgr_handle; - - return request_mgr_handle->is_runtime_suspended; -} - -#endif diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h index f46cf766fe4d..ff7746aaaf35 100644 --- a/drivers/crypto/ccree/cc_request_mgr.h +++ b/drivers/crypto/ccree/cc_request_mgr.h @@ -40,12 +40,4 @@ void complete_request(struct cc_drvdata *drvdata); void cc_req_mgr_fini(struct cc_drvdata *drvdata); -#if defined(CONFIG_PM) -int cc_resume_req_queue(struct cc_drvdata *drvdata); - -int cc_suspend_req_queue(struct cc_drvdata *drvdata); - -bool cc_req_queue_suspended(struct cc_drvdata *drvdata); -#endif - #endif /*__REQUEST_MGR_H__*/ |