diff options
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/ccp/ccp-crypto-aes-cmac.c | 3 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-crypto-sha.c | 3 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-dev.c | 6 | ||||
-rw-r--r-- | drivers/crypto/marvell/cesa.c | 2 | ||||
-rw-r--r-- | drivers/crypto/marvell/cesa.h | 3 | ||||
-rw-r--r-- | drivers/crypto/marvell/hash.c | 106 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_common/adf_common_drv.h | 11 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_common/adf_ctl_drv.c | 6 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_common/adf_sriov.c | 26 | ||||
-rw-r--r-- | drivers/crypto/talitos.c | 87 |
10 files changed, 140 insertions, 113 deletions
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c index 3d9acc53d247..60fc0fa26fd3 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c @@ -225,6 +225,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out) struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); struct ccp_aes_cmac_exp_ctx state; + /* Don't let anything leak to 'out' */ + memset(&state, 0, sizeof(state)); + state.null_msg = rctx->null_msg; memcpy(state.iv, rctx->iv, sizeof(state.iv)); state.buf_count = rctx->buf_count; diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index b5ad72897dc2..8f36af62fe95 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c @@ -212,6 +212,9 @@ static int ccp_sha_export(struct ahash_request *req, void *out) struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); struct ccp_sha_exp_ctx state; + /* Don't let anything leak to 'out' */ + memset(&state, 0, sizeof(state)); + state.type = rctx->type; state.msg_bits = rctx->msg_bits; state.first = rctx->first; diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 336e5b780fcb..4dbc18727235 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -53,7 +53,7 @@ static DEFINE_RWLOCK(ccp_unit_lock); static LIST_HEAD(ccp_units); /* Round-robin counter */ -static DEFINE_RWLOCK(ccp_rr_lock); +static DEFINE_SPINLOCK(ccp_rr_lock); static struct ccp_device *ccp_rr; /* Ever-increasing value to produce unique unit numbers */ @@ -128,14 +128,14 @@ static struct ccp_device *ccp_get_device(void) */ read_lock_irqsave(&ccp_unit_lock, flags); if (!list_empty(&ccp_units)) { - write_lock_irqsave(&ccp_rr_lock, flags); + spin_lock(&ccp_rr_lock); dp = ccp_rr; if (list_is_last(&ccp_rr->entry, &ccp_units)) ccp_rr = list_first_entry(&ccp_units, struct ccp_device, entry); else ccp_rr = list_next_entry(ccp_rr, entry); - write_unlock_irqrestore(&ccp_rr_lock, flags); + spin_unlock(&ccp_rr_lock); } read_unlock_irqrestore(&ccp_unit_lock, flags); diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index c0656e7f37b5..80239ae69527 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -420,7 +420,7 @@ static int mv_cesa_probe(struct platform_device *pdev) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); cesa->regs = devm_ioremap_resource(dev, res); if (IS_ERR(cesa->regs)) - return -ENOMEM; + return PTR_ERR(cesa->regs); ret = mv_cesa_dev_dma_init(cesa); if (ret) diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index bd985e72520b..74071e45ada0 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h @@ -588,6 +588,7 @@ struct mv_cesa_ahash_dma_req { struct mv_cesa_tdma_req base; u8 *padding; dma_addr_t padding_dma; + u8 *cache; dma_addr_t cache_dma; }; @@ -609,7 +610,7 @@ struct mv_cesa_ahash_req { struct mv_cesa_ahash_std_req std; } req; struct mv_cesa_op_ctx op_tmpl; - u8 *cache; + u8 cache[CESA_MAX_HASH_BLOCK_SIZE]; unsigned int cache_ptr; u64 len; int src_nents; diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index 683cca9ac3c4..7ca2e0f9dc2e 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c @@ -45,69 +45,25 @@ mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter) return mv_cesa_req_dma_iter_next_op(&iter->base); } -static inline int mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_req *creq, - gfp_t flags) +static inline int +mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags) { - struct mv_cesa_ahash_dma_req *dreq = &creq->req.dma; - - creq->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags, - &dreq->cache_dma); - if (!creq->cache) - return -ENOMEM; - - return 0; -} - -static inline int mv_cesa_ahash_std_alloc_cache(struct mv_cesa_ahash_req *creq, - gfp_t flags) -{ - creq->cache = kzalloc(CESA_MAX_HASH_BLOCK_SIZE, flags); - if (!creq->cache) + req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags, + &req->cache_dma); + if (!req->cache) return -ENOMEM; return 0; } -static int mv_cesa_ahash_alloc_cache(struct ahash_request *req) -{ - struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); - gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? - GFP_KERNEL : GFP_ATOMIC; - int ret; - - if (creq->cache) - return 0; - - if (creq->req.base.type == CESA_DMA_REQ) - ret = mv_cesa_ahash_dma_alloc_cache(creq, flags); - else - ret = mv_cesa_ahash_std_alloc_cache(creq, flags); - - return ret; -} - -static inline void mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_req *creq) -{ - dma_pool_free(cesa_dev->dma->cache_pool, creq->cache, - creq->req.dma.cache_dma); -} - -static inline void mv_cesa_ahash_std_free_cache(struct mv_cesa_ahash_req *creq) -{ - kfree(creq->cache); -} - -static void mv_cesa_ahash_free_cache(struct mv_cesa_ahash_req *creq) +static inline void +mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req) { - if (!creq->cache) + if (!req->cache) return; - if (creq->req.base.type == CESA_DMA_REQ) - mv_cesa_ahash_dma_free_cache(creq); - else - mv_cesa_ahash_std_free_cache(creq); - - creq->cache = NULL; + dma_pool_free(cesa_dev->dma->cache_pool, req->cache, + req->cache_dma); } static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req, @@ -146,6 +102,7 @@ static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req) struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); + mv_cesa_ahash_dma_free_cache(&creq->req.dma); mv_cesa_dma_cleanup(&creq->req.dma.base); } @@ -161,8 +118,6 @@ static void mv_cesa_ahash_last_cleanup(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); - mv_cesa_ahash_free_cache(creq); - if (creq->req.base.type == CESA_DMA_REQ) mv_cesa_ahash_dma_last_cleanup(req); } @@ -445,14 +400,6 @@ static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm) static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); - int ret; - - if (((creq->cache_ptr + req->nbytes) & CESA_HASH_BLOCK_SIZE_MSK) && - !creq->last_req) { - ret = mv_cesa_ahash_alloc_cache(req); - if (ret) - return ret; - } if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) { *cached = true; @@ -505,10 +452,17 @@ mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain, gfp_t flags) { struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; + int ret; if (!creq->cache_ptr) return 0; + ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags); + if (ret) + return ret; + + memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr); + return mv_cesa_dma_add_data_transfer(chain, CESA_SA_DATA_SRAM_OFFSET, ahashdreq->cache_dma, @@ -848,10 +802,6 @@ static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash, if (!cache_ptr) return 0; - ret = mv_cesa_ahash_alloc_cache(req); - if (ret) - return ret; - memcpy(creq->cache, cache, cache_ptr); creq->cache_ptr = cache_ptr; @@ -860,9 +810,14 @@ static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash, static int mv_cesa_md5_init(struct ahash_request *req) { + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); struct mv_cesa_op_ctx tmpl = { }; mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5); + creq->state[0] = MD5_H0; + creq->state[1] = MD5_H1; + creq->state[2] = MD5_H2; + creq->state[3] = MD5_H3; mv_cesa_ahash_init(req, &tmpl, true); @@ -923,9 +878,15 @@ struct ahash_alg mv_md5_alg = { static int mv_cesa_sha1_init(struct ahash_request *req) { + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); struct mv_cesa_op_ctx tmpl = { }; mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1); + creq->state[0] = SHA1_H0; + creq->state[1] = SHA1_H1; + creq->state[2] = SHA1_H2; + creq->state[3] = SHA1_H3; + creq->state[4] = SHA1_H4; mv_cesa_ahash_init(req, &tmpl, false); @@ -986,9 +947,18 @@ struct ahash_alg mv_sha1_alg = { static int mv_cesa_sha256_init(struct ahash_request *req) { + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); struct mv_cesa_op_ctx tmpl = { }; mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256); + creq->state[0] = SHA256_H0; + creq->state[1] = SHA256_H1; + creq->state[2] = SHA256_H2; + creq->state[3] = SHA256_H3; + creq->state[4] = SHA256_H4; + creq->state[5] = SHA256_H5; + creq->state[6] = SHA256_H6; + creq->state[7] = SHA256_H7; mv_cesa_ahash_init(req, &tmpl, false); diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 0e82ce3c383e..976b01e58afb 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -236,6 +236,8 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, uint32_t vf_mask); void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); +int adf_init_pf_wq(void); +void adf_exit_pf_wq(void); #else static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs) { @@ -253,5 +255,14 @@ static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) { } + +static inline int adf_init_pf_wq(void) +{ + return 0; +} + +static inline void adf_exit_pf_wq(void) +{ +} #endif #endif diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index 5c897e6e7994..3c3f948290ca 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c @@ -462,12 +462,17 @@ static int __init adf_register_ctl_device_driver(void) if (adf_init_aer()) goto err_aer; + if (adf_init_pf_wq()) + goto err_pf_wq; + if (qat_crypto_register()) goto err_crypto_register; return 0; err_crypto_register: + adf_exit_pf_wq(); +err_pf_wq: adf_exit_aer(); err_aer: adf_chr_drv_destroy(); @@ -480,6 +485,7 @@ static void __exit adf_unregister_ctl_device_driver(void) { adf_chr_drv_destroy(); adf_exit_aer(); + adf_exit_pf_wq(); qat_crypto_unregister(); adf_clean_vf_map(false); mutex_destroy(&adf_ctl_lock); diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c index 1117a8b58280..38a0415e767d 100644 --- a/drivers/crypto/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/qat/qat_common/adf_sriov.c @@ -119,11 +119,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) int i; u32 reg; - /* Workqueue for PF2VF responses */ - pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq"); - if (!pf2vf_resp_wq) - return -ENOMEM; - for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs; i++, vf_info++) { /* This ptr will be populated when VFs will be created */ @@ -216,11 +211,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev) kfree(accel_dev->pf.vf_info); accel_dev->pf.vf_info = NULL; - - if (pf2vf_resp_wq) { - destroy_workqueue(pf2vf_resp_wq); - pf2vf_resp_wq = NULL; - } } EXPORT_SYMBOL_GPL(adf_disable_sriov); @@ -304,3 +294,19 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs) return numvfs; } EXPORT_SYMBOL_GPL(adf_sriov_configure); + +int __init adf_init_pf_wq(void) +{ + /* Workqueue for PF2VF responses */ + pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq"); + + return !pf2vf_resp_wq ? -ENOMEM : 0; +} + +void adf_exit_pf_wq(void) +{ + if (pf2vf_resp_wq) { + destroy_workqueue(pf2vf_resp_wq); + pf2vf_resp_wq = NULL; + } +} diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index a0d4a08313ae..aae05547b924 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr, ptr->eptr = upper_32_bits(dma_addr); } +static void copy_talitos_ptr(struct talitos_ptr *dst_ptr, + struct talitos_ptr *src_ptr, bool is_sec1) +{ + dst_ptr->ptr = src_ptr->ptr; + if (!is_sec1) + dst_ptr->eptr = src_ptr->eptr; +} + static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len, bool is_sec1) { @@ -1083,21 +1091,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); - /* hmac data */ desc->ptr[1].len = cpu_to_be16(areq->assoclen); if (sg_count > 1 && (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0, areq->assoclen, &edesc->link_tbl[tbl_off])) > 1) { - tbl_off += ret; - to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * sizeof(struct talitos_ptr), 0); desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP; dma_sync_single_for_device(dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); + + tbl_off += ret; } else { to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0); desc->ptr[1].j_extent = 0; @@ -1126,11 +1133,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) sg_link_tbl_len += authsize; - if (sg_count > 1 && - (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen, - sg_link_tbl_len, - &edesc->link_tbl[tbl_off])) > 1) { - tbl_off += ret; + if (sg_count == 1) { + to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) + + areq->assoclen, 0); + } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count, + areq->assoclen, sg_link_tbl_len, + &edesc->link_tbl[tbl_off])) > + 1) { desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + tbl_off * @@ -1138,8 +1147,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, dma_sync_single_for_device(dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); - } else - to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0); + tbl_off += ret; + } else { + copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0); + } /* cipher out */ desc->ptr[5].len = cpu_to_be16(cryptlen); @@ -1151,11 +1162,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, edesc->icv_ool = false; - if (sg_count > 1 && - (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count, + if (sg_count == 1) { + to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) + + areq->assoclen, 0); + } else if ((sg_count = + sg_to_link_tbl_offset(areq->dst, sg_count, areq->assoclen, cryptlen, - &edesc->link_tbl[tbl_off])) > - 1) { + &edesc->link_tbl[tbl_off])) > 1) { struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + @@ -1178,8 +1191,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, edesc->dma_len, DMA_BIDIRECTIONAL); edesc->icv_ool = true; - } else - to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0); + } else { + copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0); + } /* iv out */ map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, @@ -2629,21 +2643,11 @@ struct talitos_crypto_alg { struct talitos_alg_template algt; }; -static int talitos_cra_init(struct crypto_tfm *tfm) +static int talitos_init_common(struct talitos_ctx *ctx, + struct talitos_crypto_alg *talitos_alg) { - struct crypto_alg *alg = tfm->__crt_alg; - struct talitos_crypto_alg *talitos_alg; - struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); struct talitos_private *priv; - if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) - talitos_alg = container_of(__crypto_ahash_alg(alg), - struct talitos_crypto_alg, - algt.alg.hash); - else - talitos_alg = container_of(alg, struct talitos_crypto_alg, - algt.alg.crypto); - /* update context with ptr to dev */ ctx->dev = talitos_alg->dev; @@ -2661,10 +2665,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm) return 0; } +static int talitos_cra_init(struct crypto_tfm *tfm) +{ + struct crypto_alg *alg = tfm->__crt_alg; + struct talitos_crypto_alg *talitos_alg; + struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); + + if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) + talitos_alg = container_of(__crypto_ahash_alg(alg), + struct talitos_crypto_alg, + algt.alg.hash); + else + talitos_alg = container_of(alg, struct talitos_crypto_alg, + algt.alg.crypto); + + return talitos_init_common(ctx, talitos_alg); +} + static int talitos_cra_init_aead(struct crypto_aead *tfm) { - talitos_cra_init(crypto_aead_tfm(tfm)); - return 0; + struct aead_alg *alg = crypto_aead_alg(tfm); + struct talitos_crypto_alg *talitos_alg; + struct talitos_ctx *ctx = crypto_aead_ctx(tfm); + + talitos_alg = container_of(alg, struct talitos_crypto_alg, + algt.alg.aead); + + return talitos_init_common(ctx, talitos_alg); } static int talitos_cra_init_ahash(struct crypto_tfm *tfm) |