summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/ccree
diff options
context:
space:
mode:
authorGilad Ben-Yossef <gilad@benyossef.com>2018-01-22 09:27:02 +0000
committerHerbert Xu <herbert@gondor.apana.org.au>2018-02-15 23:26:41 +0800
commit63893811b0fcb52f6eaf9811cc08bddd46f81c3e (patch)
tree37d1e9f5633d8955aa0d7da1417da02d152dba78 /drivers/crypto/ccree
parent63ee04c8b491ee148489347e7da9fbfd982ca2bb (diff)
downloadblackbird-op-linux-63893811b0fcb52f6eaf9811cc08bddd46f81c3e.tar.gz
blackbird-op-linux-63893811b0fcb52f6eaf9811cc08bddd46f81c3e.zip
crypto: ccree - add ahash support
Add CryptoCell async. hash and HMAC support. Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/ccree')
-rw-r--r--drivers/crypto/ccree/Makefile2
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c261
-rw-r--r--drivers/crypto/ccree/cc_driver.c13
-rw-r--r--drivers/crypto/ccree/cc_driver.h1
-rw-r--r--drivers/crypto/ccree/cc_hash.c2295
-rw-r--r--drivers/crypto/ccree/cc_hash.h113
-rw-r--r--drivers/crypto/ccree/cc_pm.c4
7 files changed, 2686 insertions, 3 deletions
diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile
index a7fecadbf7cc..11094809c313 100644
--- a/drivers/crypto/ccree/Makefile
+++ b/drivers/crypto/ccree/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
-ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_ivgen.o cc_sram_mgr.o
+ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_ivgen.o cc_sram_mgr.o
ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
ccree-$(CONFIG_PM) += cc_pm.o
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 46be101ede56..bb306b4efa4c 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -9,6 +9,7 @@
#include "cc_buffer_mgr.h"
#include "cc_lli_defs.h"
#include "cc_cipher.h"
+#include "cc_hash.h"
enum dma_buffer_type {
DMA_NULL_TYPE = -1,
@@ -348,9 +349,33 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
return 0;
}
+static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
+ u8 *curr_buff, u32 curr_buff_cnt,
+ struct buffer_array *sg_data)
+{
+ dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt);
+ /* create sg for the current buffer */
+ sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
+ if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
+ dev_err(dev, "dma_map_sg() src buffer failed\n");
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
+ sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
+ areq_ctx->buff_sg->length);
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
+ areq_ctx->curr_sg = areq_ctx->buff_sg;
+ areq_ctx->in_nents = 0;
+ /* prepare for case of MLLI */
+ cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
+ false, NULL);
+ return 0;
+}
+
void cc_unmap_cipher_request(struct device *dev, void *ctx,
- unsigned int ivsize, struct scatterlist *src,
- struct scatterlist *dst)
+ unsigned int ivsize, struct scatterlist *src,
+ struct scatterlist *dst)
{
struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
@@ -472,6 +497,238 @@ cipher_exit:
return rc;
}
+int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ bool do_update, gfp_t flags)
+{
+ struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
+ struct device *dev = drvdata_to_dev(drvdata);
+ u8 *curr_buff = cc_hash_buf(areq_ctx);
+ u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
+ struct mlli_params *mlli_params = &areq_ctx->mlli_params;
+ struct buffer_array sg_data;
+ struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+ u32 dummy = 0;
+ u32 mapped_nents = 0;
+
+ dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
+ curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
+ /* Init the type of the dma buffer */
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
+ mlli_params->curr_pool = NULL;
+ sg_data.num_of_buffers = 0;
+ areq_ctx->in_nents = 0;
+
+ if (nbytes == 0 && *curr_buff_cnt == 0) {
+ /* nothing to do */
+ return 0;
+ }
+
+ /*TODO: copy data in case that buffer is enough for operation */
+ /* map the previous buffer */
+ if (*curr_buff_cnt) {
+ if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+ &sg_data)) {
+ return -ENOMEM;
+ }
+ }
+
+ if (src && nbytes > 0 && do_update) {
+ if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
+ &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents)) {
+ goto unmap_curr_buff;
+ }
+ if (src && mapped_nents == 1 &&
+ areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
+ memcpy(areq_ctx->buff_sg, src,
+ sizeof(struct scatterlist));
+ areq_ctx->buff_sg->length = nbytes;
+ areq_ctx->curr_sg = areq_ctx->buff_sg;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
+ } else {
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
+ }
+ }
+
+ /*build mlli */
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
+ mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ /* add the src data to the sg_data */
+ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
+ 0, true, &areq_ctx->mlli_nents);
+ if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
+ goto fail_unmap_din;
+ }
+ /* change the buffer index for the unmap function */
+ areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
+ dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
+ cc_dma_buf_type(areq_ctx->data_dma_buf_type));
+ return 0;
+
+fail_unmap_din:
+ dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
+
+unmap_curr_buff:
+ if (*curr_buff_cnt)
+ dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
+
+ return -ENOMEM;
+}
+
+int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ unsigned int block_size, gfp_t flags)
+{
+ struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
+ struct device *dev = drvdata_to_dev(drvdata);
+ u8 *curr_buff = cc_hash_buf(areq_ctx);
+ u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
+ u8 *next_buff = cc_next_buf(areq_ctx);
+ u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
+ struct mlli_params *mlli_params = &areq_ctx->mlli_params;
+ unsigned int update_data_len;
+ u32 total_in_len = nbytes + *curr_buff_cnt;
+ struct buffer_array sg_data;
+ struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+ unsigned int swap_index = 0;
+ u32 dummy = 0;
+ u32 mapped_nents = 0;
+
+ dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
+ curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
+ /* Init the type of the dma buffer */
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
+ mlli_params->curr_pool = NULL;
+ areq_ctx->curr_sg = NULL;
+ sg_data.num_of_buffers = 0;
+ areq_ctx->in_nents = 0;
+
+ if (total_in_len < block_size) {
+ dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
+ curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
+ areq_ctx->in_nents =
+ cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
+ sg_copy_to_buffer(src, areq_ctx->in_nents,
+ &curr_buff[*curr_buff_cnt], nbytes);
+ *curr_buff_cnt += nbytes;
+ return 1;
+ }
+
+ /* Calculate the residue size*/
+ *next_buff_cnt = total_in_len & (block_size - 1);
+ /* update data len */
+ update_data_len = total_in_len - *next_buff_cnt;
+
+ dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
+ *next_buff_cnt, update_data_len);
+
+ /* Copy the new residue to next buffer */
+ if (*next_buff_cnt) {
+ dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
+ next_buff, (update_data_len - *curr_buff_cnt),
+ *next_buff_cnt);
+ cc_copy_sg_portion(dev, next_buff, src,
+ (update_data_len - *curr_buff_cnt),
+ nbytes, CC_SG_TO_BUF);
+ /* change the buffer index for next operation */
+ swap_index = 1;
+ }
+
+ if (*curr_buff_cnt) {
+ if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+ &sg_data)) {
+ return -ENOMEM;
+ }
+ /* change the buffer index for next operation */
+ swap_index = 1;
+ }
+
+ if (update_data_len > *curr_buff_cnt) {
+ if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
+ DMA_TO_DEVICE, &areq_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
+ &mapped_nents)) {
+ goto unmap_curr_buff;
+ }
+ if (mapped_nents == 1 &&
+ areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
+ /* only one entry in the SG and no previous data */
+ memcpy(areq_ctx->buff_sg, src,
+ sizeof(struct scatterlist));
+ areq_ctx->buff_sg->length = update_data_len;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
+ areq_ctx->curr_sg = areq_ctx->buff_sg;
+ } else {
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
+ }
+ }
+
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
+ mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ /* add the src data to the sg_data */
+ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
+ (update_data_len - *curr_buff_cnt), 0, true,
+ &areq_ctx->mlli_nents);
+ if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
+ goto fail_unmap_din;
+ }
+ areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
+
+ return 0;
+
+fail_unmap_din:
+ dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
+
+unmap_curr_buff:
+ if (*curr_buff_cnt)
+ dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
+
+ return -ENOMEM;
+}
+
+void cc_unmap_hash_request(struct device *dev, void *ctx,
+ struct scatterlist *src, bool do_revert)
+{
+ struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
+ u32 *prev_len = cc_next_buf_cnt(areq_ctx);
+
+ /*In case a pool was set, a table was
+ *allocated and should be released
+ */
+ if (areq_ctx->mlli_params.curr_pool) {
+ dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
+ &areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_virt_addr);
+ dma_pool_free(areq_ctx->mlli_params.curr_pool,
+ areq_ctx->mlli_params.mlli_virt_addr,
+ areq_ctx->mlli_params.mlli_dma_addr);
+ }
+
+ if (src && areq_ctx->in_nents) {
+ dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
+ sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
+ dma_unmap_sg(dev, src,
+ areq_ctx->in_nents, DMA_TO_DEVICE);
+ }
+
+ if (*prev_len) {
+ dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
+ sg_virt(areq_ctx->buff_sg),
+ &sg_dma_address(areq_ctx->buff_sg),
+ sg_dma_len(areq_ctx->buff_sg));
+ dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
+ if (!do_revert) {
+ /* clean the previous data length for update
+ * operation
+ */
+ *prev_len = 0;
+ } else {
+ areq_ctx->buff_index ^= 1;
+ }
+ }
+}
+
int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
{
struct buff_mgr_handle *buff_mgr_handle;
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 286d0e3e8561..6e32de90b38f 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -20,6 +20,7 @@
#include "cc_buffer_mgr.h"
#include "cc_debugfs.h"
#include "cc_cipher.h"
+#include "cc_hash.h"
#include "cc_ivgen.h"
#include "cc_sram_mgr.h"
#include "cc_pm.h"
@@ -286,8 +287,17 @@ static int init_cc_resources(struct platform_device *plat_dev)
goto post_ivgen_err;
}
+ /* hash must be allocated before aead since hash exports APIs */
+ rc = cc_hash_alloc(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_hash_alloc failed\n");
+ goto post_cipher_err;
+ }
+
return 0;
+post_cipher_err:
+ cc_cipher_free(new_drvdata);
post_ivgen_err:
cc_ivgen_fini(new_drvdata);
post_power_mgr_err:
@@ -318,6 +328,7 @@ static void cleanup_cc_resources(struct platform_device *plat_dev)
struct cc_drvdata *drvdata =
(struct cc_drvdata *)platform_get_drvdata(plat_dev);
+ cc_hash_free(drvdata);
cc_cipher_free(drvdata);
cc_ivgen_fini(drvdata);
cc_pm_fini(drvdata);
@@ -406,6 +417,8 @@ static int __init ccree_init(void)
{
int ret;
+ cc_hash_global_init();
+
ret = cc_debugfs_global_init();
if (ret)
return ret;
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index cd4f62b122c5..a7098f5fde40 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -113,6 +113,7 @@ struct cc_drvdata {
cc_sram_addr_t mlli_sram_addr;
void *buff_mgr_handle;
void *cipher_handle;
+ void *hash_handle;
void *request_mgr_handle;
void *ivgen_handle;
void *sram_mgr_handle;
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
new file mode 100644
index 000000000000..26bbbc6a4019
--- /dev/null
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -0,0 +1,2295 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/internal/hash.h>
+
+#include "cc_driver.h"
+#include "cc_request_mgr.h"
+#include "cc_buffer_mgr.h"
+#include "cc_hash.h"
+#include "cc_sram_mgr.h"
+
+#define CC_MAX_HASH_SEQ_LEN 12
+#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
+
+struct cc_hash_handle {
+ cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
+ cc_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
+ struct list_head hash_list;
+};
+
+static const u32 digest_len_init[] = {
+ 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
+static const u32 md5_init[] = {
+ SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
+static const u32 sha1_init[] = {
+ SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
+static const u32 sha224_init[] = {
+ SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
+ SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
+static const u32 sha256_init[] = {
+ SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
+ SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
+#if (CC_DEV_SHA_MAX > 256)
+static const u32 digest_len_sha512_init[] = {
+ 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
+static u64 sha384_init[] = {
+ SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
+ SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
+static u64 sha512_init[] = {
+ SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
+ SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
+#endif
+
+static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size);
+
+static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size);
+
+static const void *cc_larval_digest(struct device *dev, u32 mode);
+
+struct cc_hash_alg {
+ struct list_head entry;
+ int hash_mode;
+ int hw_mode;
+ int inter_digestsize;
+ struct cc_drvdata *drvdata;
+ struct ahash_alg ahash_alg;
+};
+
+struct hash_key_req_ctx {
+ u32 keylen;
+ dma_addr_t key_dma_addr;
+};
+
+/* hash per-session context */
+struct cc_hash_ctx {
+ struct cc_drvdata *drvdata;
+ /* holds the origin digest; the digest after "setkey" if HMAC,*
+ * the initial digest if HASH.
+ */
+ u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE] ____cacheline_aligned;
+
+ dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
+ dma_addr_t digest_buff_dma_addr;
+ /* use for hmac with key large then mode block size */
+ struct hash_key_req_ctx key_params;
+ int hash_mode;
+ int hw_mode;
+ int inter_digestsize;
+ struct completion setkey_comp;
+ bool is_hmac;
+};
+
+static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
+ unsigned int flow_mode, struct cc_hw_desc desc[],
+ bool is_not_last_data, unsigned int *seq_size);
+
+static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
+{
+ if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
+ mode == DRV_HASH_SHA512) {
+ set_bytes_swap(desc, 1);
+ } else {
+ set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ }
+}
+
+static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
+ unsigned int digestsize)
+{
+ state->digest_result_dma_addr =
+ dma_map_single(dev, state->digest_result_buff,
+ digestsize, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
+ dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
+ digestsize);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
+ digestsize, state->digest_result_buff,
+ &state->digest_result_dma_addr);
+
+ return 0;
+}
+
+static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
+ struct cc_hash_ctx *ctx)
+{
+ bool is_hmac = ctx->is_hmac;
+
+ memset(state, 0, sizeof(*state));
+
+ if (is_hmac) {
+ if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
+ ctx->hw_mode != DRV_CIPHER_CMAC) {
+ dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
+ ctx->inter_digestsize,
+ DMA_BIDIRECTIONAL);
+
+ memcpy(state->digest_buff, ctx->digest_buff,
+ ctx->inter_digestsize);
+#if (CC_DEV_SHA_MAX > 256)
+ if (ctx->hash_mode == DRV_HASH_SHA512 ||
+ ctx->hash_mode == DRV_HASH_SHA384)
+ memcpy(state->digest_bytes_len,
+ digest_len_sha512_init, HASH_LEN_SIZE);
+ else
+ memcpy(state->digest_bytes_len,
+ digest_len_init, HASH_LEN_SIZE);
+#else
+ memcpy(state->digest_bytes_len, digest_len_init,
+ HASH_LEN_SIZE);
+#endif
+ }
+
+ if (ctx->hash_mode != DRV_HASH_NULL) {
+ dma_sync_single_for_cpu(dev,
+ ctx->opad_tmp_keys_dma_addr,
+ ctx->inter_digestsize,
+ DMA_BIDIRECTIONAL);
+ memcpy(state->opad_digest_buff,
+ ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
+ }
+ } else { /*hash*/
+ /* Copy the initial digests if hash flow. */
+ const void *larval = cc_larval_digest(dev, ctx->hash_mode);
+
+ memcpy(state->digest_buff, larval, ctx->inter_digestsize);
+ }
+}
+
+static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
+ struct cc_hash_ctx *ctx)
+{
+ bool is_hmac = ctx->is_hmac;
+
+ state->digest_buff_dma_addr =
+ dma_map_single(dev, state->digest_buff,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
+ dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
+ ctx->inter_digestsize, state->digest_buff);
+ return -EINVAL;
+ }
+ dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
+ ctx->inter_digestsize, state->digest_buff,
+ &state->digest_buff_dma_addr);
+
+ if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
+ state->digest_bytes_len_dma_addr =
+ dma_map_single(dev, state->digest_bytes_len,
+ HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
+ dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
+ HASH_LEN_SIZE, state->digest_bytes_len);
+ goto unmap_digest_buf;
+ }
+ dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
+ HASH_LEN_SIZE, state->digest_bytes_len,
+ &state->digest_bytes_len_dma_addr);
+ }
+
+ if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
+ state->opad_digest_dma_addr =
+ dma_map_single(dev, state->opad_digest_buff,
+ ctx->inter_digestsize,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
+ dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
+ ctx->inter_digestsize,
+ state->opad_digest_buff);
+ goto unmap_digest_len;
+ }
+ dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
+ ctx->inter_digestsize, state->opad_digest_buff,
+ &state->opad_digest_dma_addr);
+ }
+
+ return 0;
+
+unmap_digest_len:
+ if (state->digest_bytes_len_dma_addr) {
+ dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
+ HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
+ state->digest_bytes_len_dma_addr = 0;
+ }
+unmap_digest_buf:
+ if (state->digest_buff_dma_addr) {
+ dma_unmap_single(dev, state->digest_buff_dma_addr,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ state->digest_buff_dma_addr = 0;
+ }
+
+ return -EINVAL;
+}
+
+static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
+ struct cc_hash_ctx *ctx)
+{
+ if (state->digest_buff_dma_addr) {
+ dma_unmap_single(dev, state->digest_buff_dma_addr,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
+ &state->digest_buff_dma_addr);
+ state->digest_buff_dma_addr = 0;
+ }
+ if (state->digest_bytes_len_dma_addr) {
+ dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
+ HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
+ &state->digest_bytes_len_dma_addr);
+ state->digest_bytes_len_dma_addr = 0;
+ }
+ if (state->opad_digest_dma_addr) {
+ dma_unmap_single(dev, state->opad_digest_dma_addr,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
+ &state->opad_digest_dma_addr);
+ state->opad_digest_dma_addr = 0;
+ }
+}
+
+static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
+ unsigned int digestsize, u8 *result)
+{
+ if (state->digest_result_dma_addr) {
+ dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
+ DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
+ state->digest_result_buff,
+ &state->digest_result_dma_addr, digestsize);
+ memcpy(result, state->digest_result_buff, digestsize);
+ }
+ state->digest_result_dma_addr = 0;
+}
+
+static void cc_update_complete(struct device *dev, void *cc_req, int err)
+{
+ struct ahash_request *req = (struct ahash_request *)cc_req;
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ dev_dbg(dev, "req=%pK\n", req);
+
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_req(dev, state, ctx);
+ req->base.complete(&req->base, err);
+}
+
+static void cc_digest_complete(struct device *dev, void *cc_req, int err)
+{
+ struct ahash_request *req = (struct ahash_request *)cc_req;
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+
+ dev_dbg(dev, "req=%pK\n", req);
+
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ req->base.complete(&req->base, err);
+}
+
+static void cc_hash_complete(struct device *dev, void *cc_req, int err)
+{
+ struct ahash_request *req = (struct ahash_request *)cc_req;
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+
+ dev_dbg(dev, "req=%pK\n", req);
+
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ req->base.complete(&req->base, err);
+}
+
+static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
+ int idx)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+
+ /* Get final MAC result */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ /* TODO */
+ set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
+ NS_BIT, 1);
+ set_queue_last_ind(&desc[idx]);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ cc_set_endianity(ctx->hash_mode, &desc[idx]);
+ idx++;
+
+ return idx;
+}
+
+static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
+ int idx)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+
+ /* store the hash digest result in the context */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
+ NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ cc_set_endianity(ctx->hash_mode, &desc[idx]);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+
+ /* Loading hash opad xor key state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
+ ctx->inter_digestsize, NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_sram(&desc[idx],
+ cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
+ HASH_LEN_SIZE);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* Perform HASH update */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ digestsize, NS_BIT);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ return idx;
+}
+
+static int cc_hash_digest(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ u8 *result = req->result;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ bool is_hmac = ctx->is_hmac;
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ cc_sram_addr_t larval_digest_addr =
+ cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
+ int idx = 0;
+ int rc = 0;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
+ nbytes);
+
+ cc_init_req(dev, state, ctx);
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -ENOMEM;
+ }
+
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
+ flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = cc_digest_complete;
+ cc_req.user_arg = req;
+
+ /* If HMAC then load hash IPAD xor key, if HASH then load initial
+ * digest
+ */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ if (is_hmac) {
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT);
+ } else {
+ set_din_sram(&desc[idx], larval_digest_addr,
+ ctx->inter_digestsize);
+ }
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+
+ if (is_hmac) {
+ set_din_type(&desc[idx], DMA_DLLI,
+ state->digest_bytes_len_dma_addr, HASH_LEN_SIZE,
+ NS_BIT);
+ } else {
+ set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
+ if (nbytes)
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ else
+ set_cipher_do(&desc[idx], DO_PAD);
+ }
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
+
+ if (is_hmac) {
+ /* HW last hash block padding (aka. "DO_PAD") */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+ HASH_LEN_SIZE, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+ set_cipher_do(&desc[idx], DO_PAD);
+ idx++;
+
+ idx = cc_fin_hmac(desc, req, idx);
+ }
+
+ idx = cc_fin_result(desc, req, idx);
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
+ struct ahash_req_ctx *state, unsigned int idx)
+{
+ /* Restore hash digest */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Restore hash current length */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
+ HASH_LEN_SIZE, NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
+
+ return idx;
+}
+
+static int cc_hash_update(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ u32 idx = 0;
+ int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
+ "hmac" : "hash", nbytes);
+
+ if (nbytes == 0) {
+ /* no real updates required */
+ return 0;
+ }
+
+ rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
+ block_size, flags);
+ if (rc) {
+ if (rc == 1) {
+ dev_dbg(dev, " data size not require HW update %x\n",
+ nbytes);
+ /* No hardware updates are required */
+ return 0;
+ }
+ dev_err(dev, "map_ahash_request_update() failed\n");
+ return -ENOMEM;
+ }
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ cc_unmap_hash_request(dev, state, src, true);
+ return -EINVAL;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = cc_update_complete;
+ cc_req.user_arg = req;
+
+ idx = cc_restore_hash(desc, ctx, state, idx);
+
+ /* store the hash digest result in context */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+
+ /* store current hash length in context */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
+ HASH_LEN_SIZE, NS_BIT, 1);
+ set_queue_last_ind(&desc[idx]);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+ idx++;
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_hash_finup(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ u8 *result = req->result;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ bool is_hmac = ctx->is_hmac;
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ unsigned int idx = 0;
+ int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
+ nbytes);
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
+ flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = cc_hash_complete;
+ cc_req.user_arg = req;
+
+ idx = cc_restore_hash(desc, ctx, state, idx);
+
+ if (is_hmac)
+ idx = cc_fin_hmac(desc, req, idx);
+
+ idx = cc_fin_result(desc, req, idx);
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_hash_final(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ u8 *result = req->result;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ bool is_hmac = ctx->is_hmac;
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ unsigned int idx = 0;
+ int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
+ nbytes);
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0,
+ flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = cc_hash_complete;
+ cc_req.user_arg = req;
+
+ idx = cc_restore_hash(desc, ctx, state, idx);
+
+ /* "DO-PAD" must be enabled only when writing current length to HW */
+ hw_desc_init(&desc[idx]);
+ set_cipher_do(&desc[idx], DO_PAD);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
+ HASH_LEN_SIZE, NS_BIT, 0);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ idx++;
+
+ if (is_hmac)
+ idx = cc_fin_hmac(desc, req, idx);
+
+ idx = cc_fin_result(desc, req, idx);
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_hash_init(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
+
+ cc_init_req(dev, state, ctx);
+
+ return 0;
+}
+
+static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen)
+{
+ unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
+ struct cc_crypto_req cc_req = {};
+ struct cc_hash_ctx *ctx = NULL;
+ int blocksize = 0;
+ int digestsize = 0;
+ int i, idx = 0, rc = 0;
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ cc_sram_addr_t larval_addr;
+ struct device *dev;
+
+ ctx = crypto_ahash_ctx(ahash);
+ dev = drvdata_to_dev(ctx->drvdata);
+ dev_dbg(dev, "start keylen: %d", keylen);
+
+ blocksize = crypto_tfm_alg_blocksize(&ahash->base);
+ digestsize = crypto_ahash_digestsize(ahash);
+
+ larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
+
+ /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
+ * any NON-ZERO value utilizes HMAC flow
+ */
+ ctx->key_params.keylen = keylen;
+ ctx->key_params.key_dma_addr = 0;
+ ctx->is_hmac = true;
+
+ if (keylen) {
+ ctx->key_params.key_dma_addr =
+ dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
+ dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
+ key, keylen);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+
+ if (keylen > blocksize) {
+ /* Load hash initial state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_sram(&desc[idx], larval_addr,
+ ctx->inter_digestsize);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ ctx->key_params.key_dma_addr, keylen,
+ NS_BIT);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Get hashed key */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
+ digestsize, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ cc_set_endianity(ctx->hash_mode, &desc[idx]);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0, (blocksize - digestsize));
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr +
+ digestsize),
+ (blocksize - digestsize), NS_BIT, 0);
+ idx++;
+ } else {
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ ctx->key_params.key_dma_addr, keylen,
+ NS_BIT);
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
+ keylen, NS_BIT, 0);
+ idx++;
+
+ if ((blocksize - keylen)) {
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0,
+ (blocksize - keylen));
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr +
+ keylen), (blocksize - keylen),
+ NS_BIT, 0);
+ idx++;
+ }
+ }
+ } else {
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0, blocksize);
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
+ blocksize, NS_BIT, 0);
+ idx++;
+ }
+
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+ if (rc) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ goto out;
+ }
+
+ /* calc derived HMAC key */
+ for (idx = 0, i = 0; i < 2; i++) {
+ /* Load hash initial state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Prepare ipad key */
+ hw_desc_init(&desc[idx]);
+ set_xor_val(&desc[idx], hmac_pad_const[i]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ idx++;
+
+ /* Perform HASH update */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
+ blocksize, NS_BIT);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_xor_active(&desc[idx]);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
+ * of the first HASH "update" state)
+ */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ if (i > 0) /* Not first iteration */
+ set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
+ ctx->inter_digestsize, NS_BIT, 0);
+ else /* First iteration */
+ set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+ }
+
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+
+out:
+ if (rc)
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+ if (ctx->key_params.key_dma_addr) {
+ dma_unmap_single(dev, ctx->key_params.key_dma_addr,
+ ctx->key_params.keylen, DMA_TO_DEVICE);
+ dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+ }
+ return rc;
+}
+
+static int cc_xcbc_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen)
+{
+ struct cc_crypto_req cc_req = {};
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ int rc = 0;
+ unsigned int idx = 0;
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+
+ dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ctx->key_params.keylen = keylen;
+
+ ctx->key_params.key_dma_addr =
+ dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
+ dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
+ key, keylen);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+
+ ctx->is_hmac = true;
+ /* 1. Load the AES key */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
+ keylen, NS_BIT);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_key_size_aes(&desc[idx], keylen);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+ idx++;
+
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+
+ if (rc)
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+ dma_unmap_single(dev, ctx->key_params.key_dma_addr,
+ ctx->key_params.keylen, DMA_TO_DEVICE);
+ dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+
+ return rc;
+}
+
+static int cc_cmac_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen)
+{
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
+
+ ctx->is_hmac = true;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ctx->key_params.keylen = keylen;
+
+ /* STAT_PHASE_1: Copy key to ctx */
+
+ dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
+ keylen, DMA_TO_DEVICE);
+
+ memcpy(ctx->opad_tmp_keys_buff, key, keylen);
+ if (keylen == 24) {
+ memset(ctx->opad_tmp_keys_buff + 24, 0,
+ CC_AES_KEY_SIZE_MAX - 24);
+ }
+
+ dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
+ keylen, DMA_TO_DEVICE);
+
+ ctx->key_params.keylen = keylen;
+
+ return 0;
+}
+
+static void cc_free_ctx(struct cc_hash_ctx *ctx)
+{
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ if (ctx->digest_buff_dma_addr) {
+ dma_unmap_single(dev, ctx->digest_buff_dma_addr,
+ sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
+ &ctx->digest_buff_dma_addr);
+ ctx->digest_buff_dma_addr = 0;
+ }
+ if (ctx->opad_tmp_keys_dma_addr) {
+ dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
+ sizeof(ctx->opad_tmp_keys_buff),
+ DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
+ &ctx->opad_tmp_keys_dma_addr);
+ ctx->opad_tmp_keys_dma_addr = 0;
+ }
+
+ ctx->key_params.keylen = 0;
+}
+
+static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
+{
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ ctx->key_params.keylen = 0;
+
+ ctx->digest_buff_dma_addr =
+ dma_map_single(dev, (void *)ctx->digest_buff,
+ sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
+ dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
+ sizeof(ctx->digest_buff), ctx->digest_buff);
+ goto fail;
+ }
+ dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
+ sizeof(ctx->digest_buff), ctx->digest_buff,
+ &ctx->digest_buff_dma_addr);
+
+ ctx->opad_tmp_keys_dma_addr =
+ dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff,
+ sizeof(ctx->opad_tmp_keys_buff),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
+ dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
+ sizeof(ctx->opad_tmp_keys_buff),
+ ctx->opad_tmp_keys_buff);
+ goto fail;
+ }
+ dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
+ sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
+ &ctx->opad_tmp_keys_dma_addr);
+
+ ctx->is_hmac = false;
+ return 0;
+
+fail:
+ cc_free_ctx(ctx);
+ return -ENOMEM;
+}
+
+static int cc_cra_init(struct crypto_tfm *tfm)
+{
+ struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct hash_alg_common *hash_alg_common =
+ container_of(tfm->__crt_alg, struct hash_alg_common, base);
+ struct ahash_alg *ahash_alg =
+ container_of(hash_alg_common, struct ahash_alg, halg);
+ struct cc_hash_alg *cc_alg =
+ container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct ahash_req_ctx));
+
+ ctx->hash_mode = cc_alg->hash_mode;
+ ctx->hw_mode = cc_alg->hw_mode;
+ ctx->inter_digestsize = cc_alg->inter_digestsize;
+ ctx->drvdata = cc_alg->drvdata;
+
+ return cc_alloc_ctx(ctx);
+}
+
+static void cc_cra_exit(struct crypto_tfm *tfm)
+{
+ struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "cc_cra_exit");
+ cc_free_ctx(ctx);
+}
+
+static int cc_mac_update(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ int rc;
+ u32 idx = 0;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ if (req->nbytes == 0) {
+ /* no real updates required */
+ return 0;
+ }
+
+ state->xcbc_count++;
+
+ rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
+ req->nbytes, block_size, flags);
+ if (rc) {
+ if (rc == 1) {
+ dev_dbg(dev, " data size not require HW update %x\n",
+ req->nbytes);
+ /* No hardware updates are required */
+ return 0;
+ }
+ dev_err(dev, "map_ahash_request_update() failed\n");
+ return -ENOMEM;
+ }
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
+ cc_setup_xcbc(req, desc, &idx);
+ else
+ cc_setup_cmac(req, desc, &idx);
+
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
+
+ /* store the hash digest result in context */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT, 1);
+ set_queue_last_ind(&desc[idx]);
+ set_flow_mode(&desc[idx], S_AES_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+
+ /* Setup request structure */
+ cc_req.user_cb = (void *)cc_update_complete;
+ cc_req.user_arg = (void *)req;
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_mac_final(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ int idx = 0;
+ int rc = 0;
+ u32 key_size, key_len;
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ gfp_t flags = cc_gfp_flags(&req->base);
+ u32 rem_cnt = *cc_hash_buf_cnt(state);
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+ key_size = CC_AES_128_BIT_KEY_SIZE;
+ key_len = CC_AES_128_BIT_KEY_SIZE;
+ } else {
+ key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
+ ctx->key_params.keylen;
+ key_len = ctx->key_params.keylen;
+ }
+
+ dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 0, flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = (void *)cc_hash_complete;
+ cc_req.user_arg = (void *)req;
+
+ if (state->xcbc_count && rem_cnt == 0) {
+ /* Load key for ECB decryption */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
+ key_size, NS_BIT);
+ set_key_size_aes(&desc[idx], key_len);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Initiate decryption of block state to previous
+ * block_state-XOR-M[n]
+ */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT, 0);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ idx++;
+
+ /* Memory Barrier: wait for axi write to complete */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+ }
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
+ cc_setup_xcbc(req, desc, &idx);
+ else
+ cc_setup_cmac(req, desc, &idx);
+
+ if (state->xcbc_count == 0) {
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_key_size_aes(&desc[idx], key_len);
+ set_cmac_size0_mode(&desc[idx]);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ } else if (rem_cnt > 0) {
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ } else {
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ idx++;
+ }
+
+ /* Get final MAC result */
+ hw_desc_init(&desc[idx]);
+ /* TODO */
+ set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
+ digestsize, NS_BIT, 1);
+ set_queue_last_ind(&desc[idx]);
+ set_flow_mode(&desc[idx], S_AES_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ idx++;
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_mac_finup(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ int idx = 0;
+ int rc = 0;
+ u32 key_len = 0;
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
+ if (state->xcbc_count > 0 && req->nbytes == 0) {
+ dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
+ return cc_mac_final(req);
+ }
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 1, flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = (void *)cc_hash_complete;
+ cc_req.user_arg = (void *)req;
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+ key_len = CC_AES_128_BIT_KEY_SIZE;
+ cc_setup_xcbc(req, desc, &idx);
+ } else {
+ key_len = ctx->key_params.keylen;
+ cc_setup_cmac(req, desc, &idx);
+ }
+
+ if (req->nbytes == 0) {
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_key_size_aes(&desc[idx], key_len);
+ set_cmac_size0_mode(&desc[idx]);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ } else {
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ }
+
+ /* Get final MAC result */
+ hw_desc_init(&desc[idx]);
+ /* TODO */
+ set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
+ digestsize, NS_BIT, 1);
+ set_queue_last_ind(&desc[idx]);
+ set_flow_mode(&desc[idx], S_AES_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ idx++;
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_mac_digest(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ u32 key_len;
+ unsigned int idx = 0;
+ int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
+
+ cc_init_req(dev, state, ctx);
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -ENOMEM;
+ }
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 1, flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = (void *)cc_digest_complete;
+ cc_req.user_arg = (void *)req;
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+ key_len = CC_AES_128_BIT_KEY_SIZE;
+ cc_setup_xcbc(req, desc, &idx);
+ } else {
+ key_len = ctx->key_params.keylen;
+ cc_setup_cmac(req, desc, &idx);
+ }
+
+ if (req->nbytes == 0) {
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_key_size_aes(&desc[idx], key_len);
+ set_cmac_size0_mode(&desc[idx]);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ } else {
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ }
+
+ /* Get final MAC result */
+ hw_desc_init(&desc[idx]);
+ set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT, 1);
+ set_queue_last_ind(&desc[idx]);
+ set_flow_mode(&desc[idx], S_AES_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ idx++;
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_hash_export(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ u8 *curr_buff = cc_hash_buf(state);
+ u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
+ const u32 tmp = CC_EXPORT_MAGIC;
+
+ memcpy(out, &tmp, sizeof(u32));
+ out += sizeof(u32);
+
+ memcpy(out, state->digest_buff, ctx->inter_digestsize);
+ out += ctx->inter_digestsize;
+
+ memcpy(out, state->digest_bytes_len, HASH_LEN_SIZE);
+ out += HASH_LEN_SIZE;
+
+ memcpy(out, &curr_buff_cnt, sizeof(u32));
+ out += sizeof(u32);
+
+ memcpy(out, curr_buff, curr_buff_cnt);
+
+ return 0;
+}
+
+static int cc_hash_import(struct ahash_request *req, const void *in)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ u32 tmp;
+
+ memcpy(&tmp, in, sizeof(u32));
+ if (tmp != CC_EXPORT_MAGIC)
+ return -EINVAL;
+ in += sizeof(u32);
+
+ cc_init_req(dev, state, ctx);
+
+ memcpy(state->digest_buff, in, ctx->inter_digestsize);
+ in += ctx->inter_digestsize;
+
+ memcpy(state->digest_bytes_len, in, HASH_LEN_SIZE);
+ in += HASH_LEN_SIZE;
+
+ /* Sanity check the data as much as possible */
+ memcpy(&tmp, in, sizeof(u32));
+ if (tmp > CC_MAX_HASH_BLCK_SIZE)
+ return -EINVAL;
+ in += sizeof(u32);
+
+ state->buf_cnt[0] = tmp;
+ memcpy(state->buffers[0], in, tmp);
+
+ return 0;
+}
+
+struct cc_hash_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ char mac_name[CRYPTO_MAX_ALG_NAME];
+ char mac_driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ bool synchronize;
+ struct ahash_alg template_ahash;
+ int hash_mode;
+ int hw_mode;
+ int inter_digestsize;
+ struct cc_drvdata *drvdata;
+};
+
+#define CC_STATE_SIZE(_x) \
+ ((_x) + HASH_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
+
+/* hash descriptors */
+static struct cc_hash_template driver_hash[] = {
+ //Asynchronize hash template
+ {
+ .name = "sha1",
+ .driver_name = "sha1-ccree",
+ .mac_name = "hmac(sha1)",
+ .mac_driver_name = "hmac-sha1-ccree",
+ .blocksize = SHA1_BLOCK_SIZE,
+ .synchronize = false,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_SHA1,
+ .hw_mode = DRV_HASH_HW_SHA1,
+ .inter_digestsize = SHA1_DIGEST_SIZE,
+ },
+ {
+ .name = "sha256",
+ .driver_name = "sha256-ccree",
+ .mac_name = "hmac(sha256)",
+ .mac_driver_name = "hmac-sha256-ccree",
+ .blocksize = SHA256_BLOCK_SIZE,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
+ },
+ },
+ .hash_mode = DRV_HASH_SHA256,
+ .hw_mode = DRV_HASH_HW_SHA256,
+ .inter_digestsize = SHA256_DIGEST_SIZE,
+ },
+ {
+ .name = "sha224",
+ .driver_name = "sha224-ccree",
+ .mac_name = "hmac(sha224)",
+ .mac_driver_name = "hmac-sha224-ccree",
+ .blocksize = SHA224_BLOCK_SIZE,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_SHA224,
+ .hw_mode = DRV_HASH_HW_SHA256,
+ .inter_digestsize = SHA256_DIGEST_SIZE,
+ },
+#if (CC_DEV_SHA_MAX > 256)
+ {
+ .name = "sha384",
+ .driver_name = "sha384-ccree",
+ .mac_name = "hmac(sha384)",
+ .mac_driver_name = "hmac-sha384-ccree",
+ .blocksize = SHA384_BLOCK_SIZE,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_SHA384,
+ .hw_mode = DRV_HASH_HW_SHA512,
+ .inter_digestsize = SHA512_DIGEST_SIZE,
+ },
+ {
+ .name = "sha512",
+ .driver_name = "sha512-ccree",
+ .mac_name = "hmac(sha512)",
+ .mac_driver_name = "hmac-sha512-ccree",
+ .blocksize = SHA512_BLOCK_SIZE,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_SHA512,
+ .hw_mode = DRV_HASH_HW_SHA512,
+ .inter_digestsize = SHA512_DIGEST_SIZE,
+ },
+#endif
+ {
+ .name = "md5",
+ .driver_name = "md5-ccree",
+ .mac_name = "hmac(md5)",
+ .mac_driver_name = "hmac-md5-ccree",
+ .blocksize = MD5_HMAC_BLOCK_SIZE,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_MD5,
+ .hw_mode = DRV_HASH_HW_MD5,
+ .inter_digestsize = MD5_DIGEST_SIZE,
+ },
+ {
+ .mac_name = "xcbc(aes)",
+ .mac_driver_name = "xcbc-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_mac_update,
+ .final = cc_mac_final,
+ .finup = cc_mac_finup,
+ .digest = cc_mac_digest,
+ .setkey = cc_xcbc_setkey,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_NULL,
+ .hw_mode = DRV_CIPHER_XCBC_MAC,
+ .inter_digestsize = AES_BLOCK_SIZE,
+ },
+ {
+ .mac_name = "cmac(aes)",
+ .mac_driver_name = "cmac-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_mac_update,
+ .final = cc_mac_final,
+ .finup = cc_mac_finup,
+ .digest = cc_mac_digest,
+ .setkey = cc_cmac_setkey,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_NULL,
+ .hw_mode = DRV_CIPHER_CMAC,
+ .inter_digestsize = AES_BLOCK_SIZE,
+ },
+};
+
+static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
+ struct device *dev, bool keyed)
+{
+ struct cc_hash_alg *t_crypto_alg;
+ struct crypto_alg *alg;
+ struct ahash_alg *halg;
+
+ t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
+ if (!t_crypto_alg)
+ return ERR_PTR(-ENOMEM);
+
+ t_crypto_alg->ahash_alg = template->template_ahash;
+ halg = &t_crypto_alg->ahash_alg;
+ alg = &halg->halg.base;
+
+ if (keyed) {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->mac_name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->mac_driver_name);
+ } else {
+ halg->setkey = NULL;
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+ }
+ alg->cra_module = THIS_MODULE;
+ alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
+ alg->cra_priority = CC_CRA_PRIO;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_exit = cc_cra_exit;
+
+ alg->cra_init = cc_cra_init;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->cra_type = &crypto_ahash_type;
+
+ t_crypto_alg->hash_mode = template->hash_mode;
+ t_crypto_alg->hw_mode = template->hw_mode;
+ t_crypto_alg->inter_digestsize = template->inter_digestsize;
+
+ return t_crypto_alg;
+}
+
+int cc_init_hash_sram(struct cc_drvdata *drvdata)
+{
+ struct cc_hash_handle *hash_handle = drvdata->hash_handle;
+ cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
+ unsigned int larval_seq_len = 0;
+ struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
+ int rc = 0;
+
+ /* Copy-to-sram digest-len */
+ cc_set_sram_desc(digest_len_init, sram_buff_ofs,
+ ARRAY_SIZE(digest_len_init), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+
+ sram_buff_ofs += sizeof(digest_len_init);
+ larval_seq_len = 0;
+
+#if (CC_DEV_SHA_MAX > 256)
+ /* Copy-to-sram digest-len for sha384/512 */
+ cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs,
+ ARRAY_SIZE(digest_len_sha512_init),
+ larval_seq, &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+
+ sram_buff_ofs += sizeof(digest_len_sha512_init);
+ larval_seq_len = 0;
+#endif
+
+ /* The initial digests offset */
+ hash_handle->larval_digest_sram_addr = sram_buff_ofs;
+
+ /* Copy-to-sram initial SHA* digests */
+ cc_set_sram_desc(md5_init, sram_buff_ofs, ARRAY_SIZE(md5_init),
+ larval_seq, &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(md5_init);
+ larval_seq_len = 0;
+
+ cc_set_sram_desc(sha1_init, sram_buff_ofs,
+ ARRAY_SIZE(sha1_init), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(sha1_init);
+ larval_seq_len = 0;
+
+ cc_set_sram_desc(sha224_init, sram_buff_ofs,
+ ARRAY_SIZE(sha224_init), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(sha224_init);
+ larval_seq_len = 0;
+
+ cc_set_sram_desc(sha256_init, sram_buff_ofs,
+ ARRAY_SIZE(sha256_init), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(sha256_init);
+ larval_seq_len = 0;
+
+#if (CC_DEV_SHA_MAX > 256)
+ cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs,
+ (ARRAY_SIZE(sha384_init) * 2), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(sha384_init);
+ larval_seq_len = 0;
+
+ cc_set_sram_desc((u32 *)sha512_init, sram_buff_ofs,
+ (ARRAY_SIZE(sha512_init) * 2), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+#endif
+
+init_digest_const_err:
+ return rc;
+}
+
+static void __init cc_swap_dwords(u32 *buf, unsigned long size)
+{
+ int i;
+ u32 tmp;
+
+ for (i = 0; i < size; i += 2) {
+ tmp = buf[i];
+ buf[i] = buf[i + 1];
+ buf[i + 1] = tmp;
+ }
+}
+
+/*
+ * Due to the way the HW works we need to swap every
+ * double word in the SHA384 and SHA512 larval hashes
+ */
+void __init cc_hash_global_init(void)
+{
+ cc_swap_dwords((u32 *)&sha384_init, (ARRAY_SIZE(sha384_init) * 2));
+ cc_swap_dwords((u32 *)&sha512_init, (ARRAY_SIZE(sha512_init) * 2));
+}
+
+int cc_hash_alloc(struct cc_drvdata *drvdata)
+{
+ struct cc_hash_handle *hash_handle;
+ cc_sram_addr_t sram_buff;
+ u32 sram_size_to_alloc;
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc = 0;
+ int alg;
+
+ hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
+ if (!hash_handle)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&hash_handle->hash_list);
+ drvdata->hash_handle = hash_handle;
+
+ sram_size_to_alloc = sizeof(digest_len_init) +
+#if (CC_DEV_SHA_MAX > 256)
+ sizeof(digest_len_sha512_init) +
+ sizeof(sha384_init) +
+ sizeof(sha512_init) +
+#endif
+ sizeof(md5_init) +
+ sizeof(sha1_init) +
+ sizeof(sha224_init) +
+ sizeof(sha256_init);
+
+ sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
+ if (sram_buff == NULL_SRAM_ADDR) {
+ dev_err(dev, "SRAM pool exhausted\n");
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ /* The initial digest-len offset */
+ hash_handle->digest_len_sram_addr = sram_buff;
+
+ /*must be set before the alg registration as it is being used there*/
+ rc = cc_init_hash_sram(drvdata);
+ if (rc) {
+ dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
+ goto fail;
+ }
+
+ /* ahash registration */
+ for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
+ struct cc_hash_alg *t_alg;
+ int hw_mode = driver_hash[alg].hw_mode;
+
+ /* register hmac version */
+ t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
+ if (IS_ERR(t_alg)) {
+ rc = PTR_ERR(t_alg);
+ dev_err(dev, "%s alg allocation failed\n",
+ driver_hash[alg].driver_name);
+ goto fail;
+ }
+ t_alg->drvdata = drvdata;
+
+ rc = crypto_register_ahash(&t_alg->ahash_alg);
+ if (rc) {
+ dev_err(dev, "%s alg registration failed\n",
+ driver_hash[alg].driver_name);
+ kfree(t_alg);
+ goto fail;
+ } else {
+ list_add_tail(&t_alg->entry, &hash_handle->hash_list);
+ }
+
+ if (hw_mode == DRV_CIPHER_XCBC_MAC ||
+ hw_mode == DRV_CIPHER_CMAC)
+ continue;
+
+ /* register hash version */
+ t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
+ if (IS_ERR(t_alg)) {
+ rc = PTR_ERR(t_alg);
+ dev_err(dev, "%s alg allocation failed\n",
+ driver_hash[alg].driver_name);
+ goto fail;
+ }
+ t_alg->drvdata = drvdata;
+
+ rc = crypto_register_ahash(&t_alg->ahash_alg);
+ if (rc) {
+ dev_err(dev, "%s alg registration failed\n",
+ driver_hash[alg].driver_name);
+ kfree(t_alg);
+ goto fail;
+ } else {
+ list_add_tail(&t_alg->entry, &hash_handle->hash_list);
+ }
+ }
+
+ return 0;
+
+fail:
+ kfree(drvdata->hash_handle);
+ drvdata->hash_handle = NULL;
+ return rc;
+}
+
+int cc_hash_free(struct cc_drvdata *drvdata)
+{
+ struct cc_hash_alg *t_hash_alg, *hash_n;
+ struct cc_hash_handle *hash_handle = drvdata->hash_handle;
+
+ if (hash_handle) {
+ list_for_each_entry_safe(t_hash_alg, hash_n,
+ &hash_handle->hash_list, entry) {
+ crypto_unregister_ahash(&t_hash_alg->ahash_alg);
+ list_del(&t_hash_alg->entry);
+ kfree(t_hash_alg);
+ }
+
+ kfree(hash_handle);
+ drvdata->hash_handle = NULL;
+ }
+ return 0;
+}
+
+static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ unsigned int idx = *seq_size;
+ struct ahash_req_ctx *state = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ /* Setup XCBC MAC K1 */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
+ XCBC_MAC_K1_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Setup XCBC MAC K2 */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Setup XCBC MAC K3 */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Loading MAC state */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ *seq_size = idx;
+}
+
+static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ unsigned int idx = *seq_size;
+ struct ahash_req_ctx *state = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ /* Setup CMAC Key */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
+ ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
+ ctx->key_params.keylen), NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], ctx->key_params.keylen);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Load MAC state */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], ctx->key_params.keylen);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ *seq_size = idx;
+}
+
+static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
+ struct cc_hash_ctx *ctx, unsigned int flow_mode,
+ struct cc_hw_desc desc[], bool is_not_last_data,
+ unsigned int *seq_size)
+{
+ unsigned int idx = *seq_size;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ sg_dma_address(areq_ctx->curr_sg),
+ areq_ctx->curr_sg->length, NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ idx++;
+ } else {
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
+ dev_dbg(dev, " NULL mode\n");
+ /* nothing to build */
+ return;
+ }
+ /* bypass */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_len, NS_BIT);
+ set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
+ areq_ctx->mlli_params.mlli_len);
+ set_flow_mode(&desc[idx], BYPASS);
+ idx++;
+ /* process */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_MLLI,
+ ctx->drvdata->mlli_sram_addr,
+ areq_ctx->mlli_nents, NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ idx++;
+ }
+ if (is_not_last_data)
+ set_din_not_last_indication(&desc[(idx - 1)]);
+ /* return updated desc sequence size */
+ *seq_size = idx;
+}
+
+static const void *cc_larval_digest(struct device *dev, u32 mode)
+{
+ switch (mode) {
+ case DRV_HASH_MD5:
+ return md5_init;
+ case DRV_HASH_SHA1:
+ return sha1_init;
+ case DRV_HASH_SHA224:
+ return sha224_init;
+ case DRV_HASH_SHA256:
+ return sha256_init;
+#if (CC_DEV_SHA_MAX > 256)
+ case DRV_HASH_SHA384:
+ return sha384_init;
+ case DRV_HASH_SHA512:
+ return sha512_init;
+#endif
+ default:
+ dev_err(dev, "Invalid hash mode (%d)\n", mode);
+ return md5_init;
+ }
+}
+
+/*!
+ * Gets the address of the initial digest in SRAM
+ * according to the given hash mode
+ *
+ * \param drvdata
+ * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
+ *
+ * \return u32 The address of the initial digest in SRAM
+ */
+cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
+{
+ struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
+ struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
+ struct device *dev = drvdata_to_dev(_drvdata);
+
+ switch (mode) {
+ case DRV_HASH_NULL:
+ break; /*Ignore*/
+ case DRV_HASH_MD5:
+ return (hash_handle->larval_digest_sram_addr);
+ case DRV_HASH_SHA1:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init));
+ case DRV_HASH_SHA224:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init) +
+ sizeof(sha1_init));
+ case DRV_HASH_SHA256:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init) +
+ sizeof(sha1_init) +
+ sizeof(sha224_init));
+#if (CC_DEV_SHA_MAX > 256)
+ case DRV_HASH_SHA384:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init) +
+ sizeof(sha1_init) +
+ sizeof(sha224_init) +
+ sizeof(sha256_init));
+ case DRV_HASH_SHA512:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init) +
+ sizeof(sha1_init) +
+ sizeof(sha224_init) +
+ sizeof(sha256_init) +
+ sizeof(sha384_init));
+#endif
+ default:
+ dev_err(dev, "Invalid hash mode (%d)\n", mode);
+ }
+
+ /*This is valid wrong value to avoid kernel crash*/
+ return hash_handle->larval_digest_sram_addr;
+}
+
+cc_sram_addr_t
+cc_digest_len_addr(void *drvdata, u32 mode)
+{
+ struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
+ struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
+ cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
+
+ switch (mode) {
+ case DRV_HASH_SHA1:
+ case DRV_HASH_SHA224:
+ case DRV_HASH_SHA256:
+ case DRV_HASH_MD5:
+ return digest_len_addr;
+#if (CC_DEV_SHA_MAX > 256)
+ case DRV_HASH_SHA384:
+ case DRV_HASH_SHA512:
+ return digest_len_addr + sizeof(digest_len_init);
+#endif
+ default:
+ return digest_len_addr; /*to avoid kernel crash*/
+ }
+}
diff --git a/drivers/crypto/ccree/cc_hash.h b/drivers/crypto/ccree/cc_hash.h
new file mode 100644
index 000000000000..a444710d8f08
--- /dev/null
+++ b/drivers/crypto/ccree/cc_hash.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_hash.h
+ * ARM CryptoCell Hash Crypto API
+ */
+
+#ifndef __CC_HASH_H__
+#define __CC_HASH_H__
+
+#include "cc_buffer_mgr.h"
+
+#define HMAC_IPAD_CONST 0x36363636
+#define HMAC_OPAD_CONST 0x5C5C5C5C
+#if (CC_DEV_SHA_MAX > 256)
+#define HASH_LEN_SIZE 16
+#define CC_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
+#define CC_MAX_HASH_BLCK_SIZE SHA512_BLOCK_SIZE
+#else
+#define HASH_LEN_SIZE 8
+#define CC_MAX_HASH_DIGEST_SIZE SHA256_DIGEST_SIZE
+#define CC_MAX_HASH_BLCK_SIZE SHA256_BLOCK_SIZE
+#endif
+
+#define XCBC_MAC_K1_OFFSET 0
+#define XCBC_MAC_K2_OFFSET 16
+#define XCBC_MAC_K3_OFFSET 32
+
+#define CC_EXPORT_MAGIC 0xC2EE1070U
+
+/* this struct was taken from drivers/crypto/nx/nx-aes-xcbc.c and it is used
+ * for xcbc/cmac statesize
+ */
+struct aeshash_state {
+ u8 state[AES_BLOCK_SIZE];
+ unsigned int count;
+ u8 buffer[AES_BLOCK_SIZE];
+};
+
+/* ahash state */
+struct ahash_req_ctx {
+ u8 buffers[2][CC_MAX_HASH_BLCK_SIZE] ____cacheline_aligned;
+ u8 digest_result_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 opad_digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 digest_bytes_len[HASH_LEN_SIZE] ____cacheline_aligned;
+ struct async_gen_req_ctx gen_ctx ____cacheline_aligned;
+ enum cc_req_dma_buf_type data_dma_buf_type;
+ dma_addr_t opad_digest_dma_addr;
+ dma_addr_t digest_buff_dma_addr;
+ dma_addr_t digest_bytes_len_dma_addr;
+ dma_addr_t digest_result_dma_addr;
+ u32 buf_cnt[2];
+ u32 buff_index;
+ u32 xcbc_count; /* count xcbc update operatations */
+ struct scatterlist buff_sg[2];
+ struct scatterlist *curr_sg;
+ u32 in_nents;
+ u32 mlli_nents;
+ struct mlli_params mlli_params;
+};
+
+static inline u32 *cc_hash_buf_cnt(struct ahash_req_ctx *state)
+{
+ return &state->buf_cnt[state->buff_index];
+}
+
+static inline u8 *cc_hash_buf(struct ahash_req_ctx *state)
+{
+ return state->buffers[state->buff_index];
+}
+
+static inline u32 *cc_next_buf_cnt(struct ahash_req_ctx *state)
+{
+ return &state->buf_cnt[state->buff_index ^ 1];
+}
+
+static inline u8 *cc_next_buf(struct ahash_req_ctx *state)
+{
+ return state->buffers[state->buff_index ^ 1];
+}
+
+int cc_hash_alloc(struct cc_drvdata *drvdata);
+int cc_init_hash_sram(struct cc_drvdata *drvdata);
+int cc_hash_free(struct cc_drvdata *drvdata);
+
+/*!
+ * Gets the initial digest length
+ *
+ * \param drvdata
+ * \param mode The Hash mode. Supported modes:
+ * MD5/SHA1/SHA224/SHA256/SHA384/SHA512
+ *
+ * \return u32 returns the address of the initial digest length in SRAM
+ */
+cc_sram_addr_t
+cc_digest_len_addr(void *drvdata, u32 mode);
+
+/*!
+ * Gets the address of the initial digest in SRAM
+ * according to the given hash mode
+ *
+ * \param drvdata
+ * \param mode The Hash mode. Supported modes:
+ * MD5/SHA1/SHA224/SHA256/SHA384/SHA512
+ *
+ * \return u32 The address of the initial digest in SRAM
+ */
+cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode);
+
+void cc_hash_global_init(void);
+
+#endif /*__CC_HASH_H__*/
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index b2d35e030b49..d990f472e89f 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -9,6 +9,7 @@
#include "cc_request_mgr.h"
#include "cc_sram_mgr.h"
#include "cc_ivgen.h"
+#include "cc_hash.h"
#include "cc_pm.h"
#define POWER_DOWN_ENABLE 0x01
@@ -61,6 +62,9 @@ int cc_pm_resume(struct device *dev)
return rc;
}
+ /* must be after the queue resuming as it uses the HW queue*/
+ cc_init_hash_sram(drvdata);
+
cc_init_iv_sram(drvdata);
return 0;
}
OpenPOWER on IntegriCloud