summaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/bfin_crc.c2
-rw-r--r--drivers/crypto/caam/caamalg.c1904
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/caam/desc_constr.h2
-rw-r--r--drivers/crypto/caam/error.c25
-rw-r--r--drivers/crypto/caam/jr.c3
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c12
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c61
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c13
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c12
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c66
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c81
-rw-r--r--drivers/crypto/nx/nx-sha256.c208
-rw-r--r--drivers/crypto/nx/nx-sha512.c222
-rw-r--r--drivers/crypto/nx/nx.c127
-rw-r--r--drivers/crypto/nx/nx.h8
-rw-r--r--drivers/crypto/padlock-aes.c2
-rw-r--r--drivers/crypto/padlock-sha.c8
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c3
-rw-r--r--drivers/crypto/qat/qat_common/adf_dev_mgr.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c15
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_access_macros.h9
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c37
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c3
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h2
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_isr.c12
-rw-r--r--drivers/crypto/sahara.c794
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c6
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c10
31 files changed, 3074 insertions, 584 deletions
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index b099e33cb073..e96eddc0e0b3 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -21,13 +21,13 @@
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
-#include <linux/unaligned/access_ok.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
+#include <asm/unaligned.h>
#include <asm/dma.h>
#include <asm/portmux.h>
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index a80ea853701d..3187400daf31 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -60,6 +60,7 @@
#define CAAM_CRA_PRIORITY 3000
/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
+ CTR_RFC3686_NONCE_SIZE + \
SHA512_DIGEST_SIZE * 2)
/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
#define CAAM_MAX_IV_LENGTH 16
@@ -70,17 +71,34 @@
#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+/* Note: Nonce is counted in enckeylen */
+#define DESC_AEAD_CTR_RFC3686_LEN (6 * CAAM_CMD_SZ)
+
#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
+#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
+#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 23 * CAAM_CMD_SZ)
+#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 19 * CAAM_CMD_SZ)
+
+#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 14 * CAAM_CMD_SZ)
+#define DESC_RFC4106_GIVENC_LEN (DESC_RFC4106_BASE + 21 * CAAM_CMD_SZ)
+
+#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
+#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 25 * CAAM_CMD_SZ)
+#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 27 * CAAM_CMD_SZ)
+#define DESC_RFC4543_GIVENC_LEN (DESC_RFC4543_BASE + 30 * CAAM_CMD_SZ)
+
#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
20 * CAAM_CMD_SZ)
#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
15 * CAAM_CMD_SZ)
-#define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
+#define DESC_MAX_USED_BYTES (DESC_RFC4543_GIVENC_LEN + \
CAAM_MAX_KEY_SIZE)
#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
@@ -128,11 +146,13 @@ static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
/*
* For aead encrypt and decrypt, read iv for both classes
*/
-static inline void aead_append_ld_iv(u32 *desc, int ivsize)
+static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset)
{
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | ivsize);
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (ivoffset << LDST_OFFSET_SHIFT));
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+ (ivoffset << MOVE_OFFSET_SHIFT) | ivsize);
}
/*
@@ -178,35 +198,60 @@ struct caam_ctx {
};
static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
- int keys_fit_inline)
+ int keys_fit_inline, bool is_rfc3686)
{
+ u32 *nonce;
+ unsigned int enckeylen = ctx->enckeylen;
+
+ /*
+ * RFC3686 specific:
+ * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
+ * | enckeylen = encryption key size + nonce size
+ */
+ if (is_rfc3686)
+ enckeylen -= CTR_RFC3686_NONCE_SIZE;
+
if (keys_fit_inline) {
append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
ctx->split_key_len, CLASS_2 |
KEY_DEST_MDHA_SPLIT | KEY_ENC);
append_key_as_imm(desc, (void *)ctx->key +
- ctx->split_key_pad_len, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ ctx->split_key_pad_len, enckeylen,
+ enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
} else {
append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
KEY_DEST_MDHA_SPLIT | KEY_ENC);
append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ }
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686) {
+ nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
+ enckeylen);
+ append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc,
+ MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX |
+ (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
}
}
static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
- int keys_fit_inline)
+ int keys_fit_inline, bool is_rfc3686)
{
u32 *key_jump_cmd;
- init_sh_desc(desc, HDR_SHARE_SERIAL);
+ /* Note: Context registers are saved. */
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
- append_key_aead(desc, ctx, keys_fit_inline);
+ append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
set_jump_tgt_here(desc, key_jump_cmd);
}
@@ -406,10 +451,17 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
{
struct aead_tfm *tfm = &aead->base.crt_aead;
struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct crypto_tfm *ctfm = crypto_aead_tfm(aead);
+ const char *alg_name = crypto_tfm_alg_name(ctfm);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
+ bool keys_fit_inline;
u32 geniv, moveiv;
+ u32 ctx1_iv_off = 0;
u32 *desc;
+ const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = (ctr_mode &&
+ (strstr(alg_name, "rfc3686") != NULL));
if (!ctx->authsize)
return 0;
@@ -419,18 +471,36 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
return aead_null_set_sh_desc(aead);
/*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ctx1_iv_off = 16;
+
+ /*
+ * RFC3686 specific:
+ * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ */
+ if (is_rfc3686)
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+
+ /*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
+ keys_fit_inline = false;
if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen <=
+ ctx->split_key_pad_len + ctx->enckeylen +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
/* Class 2 operation */
append_operation(desc, ctx->class2_alg_type |
@@ -448,7 +518,15 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* read assoc before reading payload */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
KEY_VLF);
- aead_append_ld_iv(desc, tfm->ivsize);
+ aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+ LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
/* Class 1 operation */
append_operation(desc, ctx->class1_alg_type |
@@ -482,14 +560,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
*/
keys_fit_inline = false;
if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen <=
+ ctx->split_key_pad_len + ctx->enckeylen +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
/* aead_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
/* Class 2 operation */
append_operation(desc, ctx->class2_alg_type |
@@ -506,9 +586,22 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
KEY_VLF);
- aead_append_ld_iv(desc, tfm->ivsize);
+ aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off);
- append_dec_op1(desc, ctx->class1_alg_type);
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+ LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Choose operation */
+ if (ctr_mode)
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+ else
+ append_dec_op1(desc, ctx->class1_alg_type);
/* Read and write cryptlen bytes */
append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
@@ -538,14 +631,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
*/
keys_fit_inline = false;
if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen <=
+ ctx->split_key_pad_len + ctx->enckeylen +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
/* aead_givencrypt shared descriptor */
desc = ctx->sh_desc_givenc;
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
/* Generate IV */
geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
@@ -554,13 +649,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_SRC_INFIFO |
- MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
/* Copy IV to class 1 context */
- append_move(desc, MOVE_SRC_CLASS1CTX |
- MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
/* Return to encryption */
append_operation(desc, ctx->class2_alg_type |
@@ -576,7 +674,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
KEY_VLF);
- /* Copy iv from class 1 ctx to class 2 fifo*/
+ /* Copy iv from outfifo to class 2 fifo */
moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
@@ -584,6 +682,14 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+ LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
/* Class 1 operation */
append_operation(desc, ctx->class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
@@ -630,6 +736,912 @@ static int aead_setauthsize(struct crypto_aead *authenc,
return 0;
}
+static int gcm_set_sh_desc(struct crypto_aead *aead)
+{
+ struct aead_tfm *tfm = &aead->base.crt_aead;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool keys_fit_inline = false;
+ u32 *key_jump_cmd, *zero_payload_jump_cmd,
+ *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
+ u32 *desc;
+
+ if (!ctx->enckeylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * AES GCM encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_GCM_ENC_LEN + DESC_JOB_IO_LEN +
+ ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_enc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD | JUMP_COND_SELF);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen + cryptlen = seqinlen - ivsize */
+ append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
+
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ);
+
+ /* if cryptlen is ZERO jump to zero-payload commands */
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+ /* read IV */
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* write encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+ /* jump the zero-payload commands */
+ append_jump(desc, JUMP_TEST_ALL | 7);
+
+ /* zero-payload commands */
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+ /* if assoclen is ZERO, jump to IV reading - is the only input data */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+ /* read IV */
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
+
+ /* jump to ICV writing */
+ append_jump(desc, JUMP_TEST_ALL | 2);
+
+ /* read IV - is the only input data */
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
+ FIFOLD_TYPE_LAST1);
+
+ /* write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ keys_fit_inline = false;
+ if (DESC_GCM_DEC_LEN + DESC_JOB_IO_LEN +
+ ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_dec;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL |
+ JUMP_TEST_ALL | JUMP_COND_SHRD |
+ JUMP_COND_SELF);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+ ctx->authsize + tfm->ivsize);
+
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ);
+
+ /* read IV */
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+ /* jump to zero-payload command if cryptlen is zero */
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
+ /* if asoclen is ZERO, skip reading assoc data */
+ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /* store encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /* jump the zero-payload commands */
+ append_jump(desc, JUMP_TEST_ALL | 4);
+
+ /* zero-payload command */
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+ /* if assoclen is ZERO, jump to ICV reading */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+
+ /* read ICV */
+ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return 0;
+}
+
+static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ gcm_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+{
+ struct aead_tfm *tfm = &aead->base.crt_aead;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool keys_fit_inline = false;
+ u32 *key_jump_cmd, *move_cmd, *write_iv_cmd;
+ u32 *desc;
+ u32 geniv;
+
+ if (!ctx->enckeylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * RFC4106 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_RFC4106_ENC_LEN + DESC_JOB_IO_LEN +
+ ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_enc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* assoclen + cryptlen = seqinlen - ivsize */
+ append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
+
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
+
+ /* Read Salt */
+ append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
+ 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
+ /* Read AES-GCM-ESP IV */
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ /* Will read cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Write encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* Read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ keys_fit_inline = false;
+ if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
+ ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_dec;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL |
+ JUMP_TEST_ALL | JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+ ctx->authsize + tfm->ivsize);
+
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+
+ /* Will write cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Read Salt */
+ append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
+ 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
+ /* Read AES-GCM-ESP IV */
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ /* Will read cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /* Store payload data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* Read encrypted data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /* Read ICV */
+ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ keys_fit_inline = false;
+ if (DESC_RFC4106_GIVENC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len + ctx->enckeylen <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ /* rfc4106_givencrypt shared descriptor */
+ desc = ctx->sh_desc_givenc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+ NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ move_cmd = append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_DESCBUF |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Copy generated IV to OFIFO */
+ write_iv_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_OUTFIFO |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* ivsize + cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen = seqinlen - (ivsize + cryptlen) */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* Will write ivsize + cryptlen */
+ append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
+
+ /* Read Salt and generated IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV |
+ FIFOLD_TYPE_FLUSH1 | IMMEDIATE | 12);
+ /* Append Salt */
+ append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
+ set_move_tgt_here(desc, move_cmd);
+ set_move_tgt_here(desc, write_iv_cmd);
+ /* Blank commands. Will be overwritten by generated IV. */
+ append_cmd(desc, 0x00000000);
+ append_cmd(desc, 0x00000000);
+ /* End of blank commands */
+
+ /* No need to reload iv */
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ /* Will read cryptlen */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Store generated IV and encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* Read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4106 givenc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return 0;
+}
+
+static int rfc4106_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ rfc4106_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4543_set_sh_desc(struct crypto_aead *aead)
+{
+ struct aead_tfm *tfm = &aead->base.crt_aead;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool keys_fit_inline = false;
+ u32 *key_jump_cmd, *write_iv_cmd, *write_aad_cmd;
+ u32 *read_move_cmd, *write_move_cmd;
+ u32 *desc;
+ u32 geniv;
+
+ if (!ctx->enckeylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * RFC4543 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_RFC4543_ENC_LEN + DESC_JOB_IO_LEN +
+ ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_enc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Load AES-GMAC ESP IV into Math1 register */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
+ LDST_CLASS_DECO | tfm->ivsize);
+
+ /* Wait the DMA transaction to finish */
+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
+ (1 << JUMP_OFFSET_SHIFT));
+
+ /* Overwrite blank immediate AES-GMAC ESP IV data */
+ write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Overwrite blank immediate AAD data */
+ write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen = (seqinlen - ivsize) - cryptlen */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* Read Salt and AES-GMAC ESP IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
+ /* Append Salt */
+ append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
+ set_move_tgt_here(desc, write_iv_cmd);
+ /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
+ append_cmd(desc, 0x00000000);
+ append_cmd(desc, 0x00000000);
+ /* End of blank commands */
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD);
+
+ /* Will read cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Will write cryptlen bytes */
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Authenticate AES-GMAC ESP IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+ FIFOLD_TYPE_AAD | tfm->ivsize);
+ set_move_tgt_here(desc, write_aad_cmd);
+ /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
+ append_cmd(desc, 0x00000000);
+ append_cmd(desc, 0x00000000);
+ /* End of blank commands */
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move payload data to OFIFO */
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ keys_fit_inline = false;
+ if (DESC_RFC4543_DEC_LEN + DESC_JOB_IO_LEN +
+ ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_dec;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL |
+ JUMP_TEST_ALL | JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* Load AES-GMAC ESP IV into Math1 register */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
+ LDST_CLASS_DECO | tfm->ivsize);
+
+ /* Wait the DMA transaction to finish */
+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
+ (1 << JUMP_OFFSET_SHIFT));
+
+ /* assoclen + cryptlen = (seqinlen - ivsize) - icvsize */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, ctx->authsize);
+
+ /* Overwrite blank immediate AES-GMAC ESP IV data */
+ write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Overwrite blank immediate AAD data */
+ write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Read Salt and AES-GMAC ESP IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
+ /* Append Salt */
+ append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
+ set_move_tgt_here(desc, write_iv_cmd);
+ /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
+ append_cmd(desc, 0x00000000);
+ append_cmd(desc, 0x00000000);
+ /* End of blank commands */
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD);
+
+ /* Will read cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /* Will write cryptlen bytes */
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /* Authenticate AES-GMAC ESP IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+ FIFOLD_TYPE_AAD | tfm->ivsize);
+ set_move_tgt_here(desc, write_aad_cmd);
+ /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
+ append_cmd(desc, 0x00000000);
+ append_cmd(desc, 0x00000000);
+ /* End of blank commands */
+
+ /* Store payload data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* In-snoop cryptlen data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move payload data to OFIFO */
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Read ICV */
+ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ keys_fit_inline = false;
+ if (DESC_RFC4543_GIVENC_LEN + DESC_JOB_IO_LEN +
+ ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ /* rfc4543_givencrypt shared descriptor */
+ desc = ctx->sh_desc_givenc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, ctx->key_dma, ctx->enckeylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+ NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move generated IV to Math1 register */
+ append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_MATH1 |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Overwrite blank immediate AES-GMAC IV data */
+ write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Overwrite blank immediate AAD data */
+ write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Copy generated IV to OFIFO */
+ append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_OUTFIFO |
+ (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* ivsize + cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen = seqinlen - (ivsize + cryptlen) */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* Will write ivsize + cryptlen */
+ append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Read Salt and AES-GMAC generated IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
+ /* Append Salt */
+ append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
+ set_move_tgt_here(desc, write_iv_cmd);
+ /* Blank commands. Will be overwritten by AES-GMAC generated IV. */
+ append_cmd(desc, 0x00000000);
+ append_cmd(desc, 0x00000000);
+ /* End of blank commands */
+
+ /* No need to reload iv */
+ append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD);
+
+ /* Will read cryptlen */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Authenticate AES-GMAC IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+ FIFOLD_TYPE_AAD | tfm->ivsize);
+ set_move_tgt_here(desc, write_aad_cmd);
+ /* Blank commands. Will be overwritten by AES-GMAC IV. */
+ append_cmd(desc, 0x00000000);
+ append_cmd(desc, 0x00000000);
+ /* End of blank commands */
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move payload data to OFIFO */
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4543 givenc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return 0;
+}
+
+static int rfc4543_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ rfc4543_set_sh_desc(authenc);
+
+ return 0;
+}
+
static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
u32 authkeylen)
{
@@ -703,20 +1715,154 @@ badkey:
return -EINVAL;
}
+static int gcm_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ int ret = 0;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ memcpy(ctx->key, key, keylen);
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
+ dev_err(jrdev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+ ctx->enckeylen = keylen;
+
+ ret = gcm_set_sh_desc(aead);
+ if (ret) {
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ DMA_TO_DEVICE);
+ }
+
+ return ret;
+}
+
+static int rfc4106_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ int ret = 0;
+
+ if (keylen < 4)
+ return -EINVAL;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ memcpy(ctx->key, key, keylen);
+
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->enckeylen = keylen - 4;
+
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
+ dev_err(jrdev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+
+ ret = rfc4106_set_sh_desc(aead);
+ if (ret) {
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ DMA_TO_DEVICE);
+ }
+
+ return ret;
+}
+
+static int rfc4543_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ int ret = 0;
+
+ if (keylen < 4)
+ return -EINVAL;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ memcpy(ctx->key, key, keylen);
+
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->enckeylen = keylen - 4;
+
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
+ dev_err(jrdev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+
+ ret = rfc4543_set_sh_desc(aead);
+ if (ret) {
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+ DMA_TO_DEVICE);
+ }
+
+ return ret;
+}
+
static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const u8 *key, unsigned int keylen)
{
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
+ struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
+ const char *alg_name = crypto_tfm_alg_name(tfm);
struct device *jrdev = ctx->jrdev;
int ret = 0;
u32 *key_jump_cmd;
u32 *desc;
+ u32 *nonce;
+ u32 geniv;
+ u32 ctx1_iv_off = 0;
+ const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = (ctr_mode &&
+ (strstr(alg_name, "rfc3686") != NULL));
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ctx1_iv_off = 16;
+
+ /*
+ * RFC3686 specific:
+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ * | *key = {KEY, NONCE}
+ */
+ if (is_rfc3686) {
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+ }
memcpy(ctx->key, key, keylen);
ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
@@ -729,7 +1875,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- init_sh_desc(desc, HDR_SHARE_SERIAL);
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
@@ -739,11 +1885,31 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->enckeylen, CLASS_1 |
KEY_DEST_CLASS_REG);
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ nonce = (u32 *)(key + keylen);
+ append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX |
+ (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
set_jump_tgt_here(desc, key_jump_cmd);
/* Load iv */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | tfm->ivsize);
+ append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+ LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
/* Load operation */
append_operation(desc, ctx->class1_alg_type |
@@ -768,7 +1934,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
- init_sh_desc(desc, HDR_SHARE_SERIAL);
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
@@ -778,14 +1944,38 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->enckeylen, CLASS_1 |
KEY_DEST_CLASS_REG);
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ nonce = (u32 *)(key + keylen);
+ append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX |
+ (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
set_jump_tgt_here(desc, key_jump_cmd);
/* load IV */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | tfm->ivsize);
+ append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+ LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
/* Choose operation */
- append_dec_op1(desc, ctx->class1_alg_type);
+ if (ctr_mode)
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+ else
+ append_dec_op1(desc, ctx->class1_alg_type);
/* Perform operation */
ablkcipher_append_src_dst(desc);
@@ -804,6 +1994,83 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
+ /* ablkcipher_givencrypt shared descriptor */
+ desc = ctx->sh_desc_givenc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ /* Load Nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ nonce = (u32 *)(key + keylen);
+ append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX |
+ (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+ NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_INFIFO |
+ MOVE_DEST_CLASS1CTX |
+ (crt->ivsize << MOVE_LEN_SHIFT) |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Copy generated IV to memory */
+ append_seq_store(desc, crt->ivsize,
+ LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_u32(desc, (u32)1, LDST_IMM |
+ LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ if (ctx1_iv_off)
+ append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
+ (1 << JUMP_OFFSET_SHIFT));
+
+ /* Load operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+ ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
return ret;
}
@@ -1088,6 +2355,7 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
u32 out_options = 0, in_options;
dma_addr_t dst_dma, src_dma;
int len, sec4_sg_index = 0;
+ bool is_gcm = false;
#ifdef DEBUG
debug("assoclen %d cryptlen %d authsize %d\n",
@@ -1106,11 +2374,19 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
desc_bytes(sh_desc), 1);
#endif
+ if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+ OP_ALG_ALGSEL_AES) &&
+ ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
+ is_gcm = true;
+
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
if (all_contig) {
- src_dma = sg_dma_address(req->assoc);
+ if (is_gcm)
+ src_dma = edesc->iv_dma;
+ else
+ src_dma = sg_dma_address(req->assoc);
in_options = 0;
} else {
src_dma = edesc->sec4_sg_dma;
@@ -1164,6 +2440,7 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
u32 out_options = 0, in_options;
dma_addr_t dst_dma, src_dma;
int len, sec4_sg_index = 0;
+ bool is_gcm = false;
#ifdef DEBUG
debug("assoclen %d cryptlen %d authsize %d\n",
@@ -1181,11 +2458,19 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
desc_bytes(sh_desc), 1);
#endif
+ if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+ OP_ALG_ALGSEL_AES) &&
+ ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
+ is_gcm = true;
+
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
if (contig & GIV_SRC_CONTIG) {
- src_dma = sg_dma_address(req->assoc);
+ if (is_gcm)
+ src_dma = edesc->iv_dma;
+ else
+ src_dma = sg_dma_address(req->assoc);
in_options = 0;
} else {
src_dma = edesc->sec4_sg_dma;
@@ -1200,7 +2485,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
} else {
if (likely(req->src == req->dst)) {
dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
- edesc->assoc_nents;
+ (edesc->assoc_nents +
+ (is_gcm ? 1 + edesc->src_nents : 0));
out_options = LDST_SGF;
} else {
dst_dma = edesc->sec4_sg_dma +
@@ -1272,6 +2558,54 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
}
/*
+ * Fill in ablkcipher givencrypt job descriptor
+ */
+static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
+ struct ablkcipher_edesc *edesc,
+ struct ablkcipher_request *req,
+ bool iv_contig)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ u32 *desc = edesc->hw_desc;
+ u32 out_options, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, sec4_sg_index = 0;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ ivsize, 1);
+ print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->nbytes, 1);
+#endif
+
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ if (!edesc->src_nents) {
+ src_dma = sg_dma_address(req->src);
+ in_options = 0;
+ } else {
+ src_dma = edesc->sec4_sg_dma;
+ sec4_sg_index += edesc->src_nents;
+ in_options = LDST_SGF;
+ }
+ append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
+
+ if (iv_contig) {
+ dst_dma = edesc->iv_dma;
+ out_options = 0;
+ } else {
+ dst_dma = edesc->sec4_sg_dma +
+ sec4_sg_index * sizeof(struct sec4_sg_entry);
+ out_options = LDST_SGF;
+ }
+ append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
+}
+
+/*
* allocate and map the aead extended descriptor
*/
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
@@ -1292,6 +2626,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
int ivsize = crypto_aead_ivsize(aead);
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
unsigned int authsize = ctx->authsize;
+ bool is_gcm = false;
assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
@@ -1326,15 +2661,31 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
return ERR_PTR(-ENOMEM);
}
- /* Check if data are contiguous */
- if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
- iv_dma || src_nents || iv_dma + ivsize !=
- sg_dma_address(req->src)) {
- all_contig = false;
+ if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+ OP_ALG_ALGSEL_AES) &&
+ ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
+ is_gcm = true;
+
+ /*
+ * Check if data are contiguous.
+ * GCM expected input sequence: IV, AAD, text
+ * All other - expected input sequence: AAD, IV, text
+ */
+ if (is_gcm)
+ all_contig = (!assoc_nents &&
+ iv_dma + ivsize == sg_dma_address(req->assoc) &&
+ !src_nents && sg_dma_address(req->assoc) +
+ req->assoclen == sg_dma_address(req->src));
+ else
+ all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
+ req->assoclen == iv_dma && !src_nents &&
+ iv_dma + ivsize == sg_dma_address(req->src));
+ if (!all_contig) {
assoc_nents = assoc_nents ? : 1;
src_nents = src_nents ? : 1;
sec4_sg_len = assoc_nents + 1 + src_nents;
}
+
sec4_sg_len += dst_nents;
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
@@ -1361,14 +2712,26 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sec4_sg_index = 0;
if (!all_contig) {
- sg_to_sec4_sg(req->assoc,
- (assoc_nents ? : 1),
- edesc->sec4_sg +
- sec4_sg_index, 0);
- sec4_sg_index += assoc_nents ? : 1;
+ if (!is_gcm) {
+ sg_to_sec4_sg(req->assoc,
+ (assoc_nents ? : 1),
+ edesc->sec4_sg +
+ sec4_sg_index, 0);
+ sec4_sg_index += assoc_nents ? : 1;
+ }
+
dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
iv_dma, ivsize, 0);
sec4_sg_index += 1;
+
+ if (is_gcm) {
+ sg_to_sec4_sg(req->assoc,
+ (assoc_nents ? : 1),
+ edesc->sec4_sg +
+ sec4_sg_index, 0);
+ sec4_sg_index += assoc_nents ? : 1;
+ }
+
sg_to_sec4_sg_last(req->src,
(src_nents ? : 1),
edesc->sec4_sg +
@@ -1490,6 +2853,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
int ivsize = crypto_aead_ivsize(aead);
bool assoc_chained = false, src_chained = false, dst_chained = false;
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+ bool is_gcm = false;
assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
src_nents = sg_count(req->src, req->cryptlen, &src_chained);
@@ -1516,24 +2880,53 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
return ERR_PTR(-ENOMEM);
}
- /* Check if data are contiguous */
- if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
- iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
- contig &= ~GIV_SRC_CONTIG;
+ if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+ OP_ALG_ALGSEL_AES) &&
+ ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
+ is_gcm = true;
+
+ /*
+ * Check if data are contiguous.
+ * GCM expected input sequence: IV, AAD, text
+ * All other - expected input sequence: AAD, IV, text
+ */
+
+ if (is_gcm) {
+ if (assoc_nents || iv_dma + ivsize !=
+ sg_dma_address(req->assoc) || src_nents ||
+ sg_dma_address(req->assoc) + req->assoclen !=
+ sg_dma_address(req->src))
+ contig &= ~GIV_SRC_CONTIG;
+ } else {
+ if (assoc_nents ||
+ sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
+ src_nents || iv_dma + ivsize != sg_dma_address(req->src))
+ contig &= ~GIV_SRC_CONTIG;
+ }
+
if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
contig &= ~GIV_DST_CONTIG;
- if (unlikely(req->src != req->dst)) {
- dst_nents = dst_nents ? : 1;
- sec4_sg_len += 1;
- }
+
if (!(contig & GIV_SRC_CONTIG)) {
assoc_nents = assoc_nents ? : 1;
src_nents = src_nents ? : 1;
sec4_sg_len += assoc_nents + 1 + src_nents;
- if (likely(req->src == req->dst))
+ if (req->src == req->dst &&
+ (src_nents || iv_dma + ivsize != sg_dma_address(req->src)))
contig &= ~GIV_DST_CONTIG;
}
- sec4_sg_len += dst_nents;
+
+ /*
+ * Add new sg entries for GCM output sequence.
+ * Expected output sequence: IV, encrypted text.
+ */
+ if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
+ sec4_sg_len += 1 + src_nents;
+
+ if (unlikely(req->src != req->dst)) {
+ dst_nents = dst_nents ? : 1;
+ sec4_sg_len += 1 + dst_nents;
+ }
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
@@ -1559,18 +2952,36 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
sec4_sg_index = 0;
if (!(contig & GIV_SRC_CONTIG)) {
- sg_to_sec4_sg(req->assoc, assoc_nents,
- edesc->sec4_sg +
- sec4_sg_index, 0);
- sec4_sg_index += assoc_nents;
+ if (!is_gcm) {
+ sg_to_sec4_sg(req->assoc, assoc_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ sec4_sg_index += assoc_nents;
+ }
+
dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
iv_dma, ivsize, 0);
sec4_sg_index += 1;
+
+ if (is_gcm) {
+ sg_to_sec4_sg(req->assoc, assoc_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ sec4_sg_index += assoc_nents;
+ }
+
sg_to_sec4_sg_last(req->src, src_nents,
edesc->sec4_sg +
sec4_sg_index, 0);
sec4_sg_index += src_nents;
}
+
+ if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
+ dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
+ iv_dma, ivsize, 0);
+ sec4_sg_index += 1;
+ sg_to_sec4_sg_last(req->src, src_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ }
+
if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
iv_dma, ivsize, 0);
@@ -1814,6 +3225,151 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req)
return ret;
}
+/*
+ * allocate and map the ablkcipher extended descriptor
+ * for ablkcipher givencrypt
+ */
+static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+ struct skcipher_givcrypt_request *greq,
+ int desc_bytes,
+ bool *iv_contig_out)
+{
+ struct ablkcipher_request *req = &greq->creq;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, dst_nents = 0, sec4_sg_bytes;
+ struct ablkcipher_edesc *edesc;
+ dma_addr_t iv_dma = 0;
+ bool iv_contig = false;
+ int sgc;
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ bool src_chained = false, dst_chained = false;
+ int sec4_sg_index;
+
+ src_nents = sg_count(req->src, req->nbytes, &src_chained);
+
+ if (unlikely(req->dst != req->src))
+ dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
+
+ if (likely(req->src == req->dst)) {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL, src_chained);
+ } else {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE, src_chained);
+ sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE, dst_chained);
+ }
+
+ /*
+ * Check if iv can be contiguous with source and destination.
+ * If so, include it. If not, create scatterlist.
+ */
+ iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
+ iv_contig = true;
+ else
+ dst_nents = dst_nents ? : 1;
+ sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
+ sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(*edesc) + desc_bytes +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->src_chained = src_chained;
+ edesc->dst_nents = dst_nents;
+ edesc->dst_chained = dst_chained;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
+ desc_bytes;
+
+ sec4_sg_index = 0;
+ if (src_nents) {
+ sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+ sec4_sg_index += src_nents;
+ }
+
+ if (!iv_contig) {
+ dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
+ iv_dma, ivsize, 0);
+ sec4_sg_index += 1;
+ sg_to_sec4_sg_last(req->dst, dst_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ }
+
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ edesc->iv_dma = iv_dma;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+ sec4_sg_bytes, 1);
+#endif
+
+ *iv_contig_out = iv_contig;
+ return edesc;
+}
+
+static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
+{
+ struct ablkcipher_request *req = &creq->creq;
+ struct ablkcipher_edesc *edesc;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ bool iv_contig;
+ u32 *desc;
+ int ret = 0;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &iv_contig);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* Create and submit job descriptor*/
+ init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
+ edesc, req, iv_contig);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+ desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
+
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
#define template_aead template_u.aead
#define template_ablkcipher template_u.ablkcipher
struct caam_alg_template {
@@ -2309,17 +3865,188 @@ static struct caam_alg_template driver_algs[] = {
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
+ {
+ .name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-md5-rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "rfc4106(gcm(aes))",
+ .driver_name = "rfc4106-gcm-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = rfc4106_setkey,
+ .setauthsize = rfc4106_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ {
+ .name = "rfc4543(gcm(aes))",
+ .driver_name = "rfc4543-gcm-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = rfc4543_setkey,
+ .setauthsize = rfc4543_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ /* Galois Counter Mode */
+ {
+ .name = "gcm(aes)",
+ .driver_name = "gcm-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = gcm_setkey,
+ .setauthsize = gcm_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = NULL,
+ .geniv = "<built-in>",
+ .ivsize = 12,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
/* ablkcipher descriptor */
{
.name = "cbc(aes)",
.driver_name = "cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
.template_ablkcipher = {
.setkey = ablkcipher_setkey,
.encrypt = ablkcipher_encrypt,
.decrypt = ablkcipher_decrypt,
- .geniv = "eseqiv",
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@@ -2330,12 +4057,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "cbc(des3_ede)",
.driver_name = "cbc-3des-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
.template_ablkcipher = {
.setkey = ablkcipher_setkey,
.encrypt = ablkcipher_encrypt,
.decrypt = ablkcipher_decrypt,
- .geniv = "eseqiv",
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -2346,17 +4074,53 @@ static struct caam_alg_template driver_algs[] = {
.name = "cbc(des)",
.driver_name = "cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
.template_ablkcipher = {
.setkey = ablkcipher_setkey,
.encrypt = ablkcipher_encrypt,
.decrypt = ablkcipher_decrypt,
- .geniv = "eseqiv",
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
},
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "ctr(aes)",
+ .driver_name = "ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "chainiv",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ },
+ {
+ .name = "rfc3686(ctr(aes))",
+ .driver_name = "rfc3686-ctr-aes-caam",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = AES_MIN_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
}
};
@@ -2457,6 +4221,10 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
template->type;
switch (template->type) {
+ case CRYPTO_ALG_TYPE_GIVCIPHER:
+ alg->cra_type = &crypto_givcipher_type;
+ alg->cra_ablkcipher = template->template_ablkcipher;
+ break;
case CRYPTO_ALG_TYPE_ABLKCIPHER:
alg->cra_type = &crypto_ablkcipher_type;
alg->cra_ablkcipher = template->template_ablkcipher;
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index f227922cea38..acd7743e2603 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -28,6 +28,7 @@
#include <crypto/algapi.h>
#include <crypto/null.h>
#include <crypto/aes.h>
+#include <crypto/ctr.h>
#include <crypto/des.h>
#include <crypto/sha.h>
#include <crypto/md5.h>
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 7eec20bb3849..9f79fd7bd4d7 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -192,6 +192,8 @@ static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
PRINT_POS; \
append_cmd(desc, CMD_##op | len | options); \
}
+
+APPEND_CMD_LEN(seq_load, SEQ_LOAD)
APPEND_CMD_LEN(seq_store, SEQ_STORE)
APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 6531054a44c8..66d73bf54166 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -213,27 +213,36 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
void (*report_ssed)(struct device *jrdev, const u32 status,
const char *error);
const char *error;
- } status_src[] = {
+ } status_src[16] = {
{ NULL, "No error" },
{ NULL, NULL },
{ report_ccb_status, "CCB" },
{ report_jump_status, "Jump" },
{ report_deco_status, "DECO" },
- { NULL, NULL },
+ { NULL, "Queue Manager Interface" },
{ report_jr_status, "Job Ring" },
{ report_cond_code_status, "Condition Code" },
+ { NULL, NULL },
+ { NULL, NULL },
+ { NULL, NULL },
+ { NULL, NULL },
+ { NULL, NULL },
+ { NULL, NULL },
+ { NULL, NULL },
+ { NULL, NULL },
};
u32 ssrc = status >> JRSTA_SSRC_SHIFT;
const char *error = status_src[ssrc].error;
/*
- * If there is no further error handling function, just
- * print the error code, error string and exit. Otherwise
- * call the handler function.
+ * If there is an error handling function, call it to report the error.
+ * Otherwise print the error source name.
*/
- if (!status_src[ssrc].report_ssed)
- dev_err(jrdev, "%08x: %s: \n", status, status_src[ssrc].error);
- else
+ if (status_src[ssrc].report_ssed)
status_src[ssrc].report_ssed(jrdev, status, error);
+ else if (error)
+ dev_err(jrdev, "%d: %s\n", ssrc, error);
+ else
+ dev_err(jrdev, "%d: unknown error source\n", ssrc);
}
EXPORT_SYMBOL(caam_jr_strstatus);
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 4d18e27ffa9e..9207c907a128 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -181,8 +181,6 @@ static void caam_jr_dequeue(unsigned long devarg)
for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
sw_idx = (tail + i) & (JOBR_DEPTH - 1);
- smp_read_barrier_depends();
-
if (jrp->outring[hw_idx].desc ==
jrp->entinfo[sw_idx].desc_addr_dma)
break; /* found */
@@ -218,7 +216,6 @@ static void caam_jr_dequeue(unsigned long devarg)
if (sw_idx == tail) {
do {
tail = (tail + 1) & (JOBR_DEPTH - 1);
- smp_read_barrier_depends();
} while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
jrp->entinfo[tail].desc_addr_dma == 0);
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index cc00b52306ba..a066cc3450ae 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -72,27 +72,19 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
unsigned long irq_flags;
unsigned int processed = 0, to_process;
- u32 max_sg_len;
int rc;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
- nx_ctx->ap->sglen);
-
if (enc)
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
else
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
do {
- to_process = min_t(u64, nbytes - processed,
- nx_ctx->ap->databytelen);
- to_process = min_t(u64, to_process,
- NX_PAGE_SIZE * (max_sg_len - 1));
- to_process = to_process & ~(AES_BLOCK_SIZE - 1);
+ to_process = nbytes - processed;
- rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process,
+ rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
processed, csbcpb->cpb.aes_cbc.iv);
if (rc)
goto out;
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 5ecd4c2414aa..67f80813a06f 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -181,6 +181,7 @@ static int generate_pat(u8 *iv,
unsigned int iauth_len = 0;
u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
int rc;
+ unsigned int max_sg_len;
/* zero the ctr value */
memset(iv + 15 - iv[0], 0, iv[0] + 1);
@@ -248,10 +249,19 @@ static int generate_pat(u8 *iv,
if (!req->assoclen) {
return rc;
} else if (req->assoclen <= 14) {
- nx_insg = nx_build_sg_list(nx_insg, b1, 16, nx_ctx->ap->sglen);
- nx_outsg = nx_build_sg_list(nx_outsg, tmp, 16,
+ unsigned int len = 16;
+
+ nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
+
+ if (len != 16)
+ return -EINVAL;
+
+ nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len,
nx_ctx->ap->sglen);
+ if (len != 16)
+ return -EINVAL;
+
/* inlen should be negative, indicating to phyp that its a
* pointer to an sg list */
nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
@@ -273,21 +283,24 @@ static int generate_pat(u8 *iv,
atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
} else {
- u32 max_sg_len;
unsigned int processed = 0, to_process;
- /* page_limit: number of sg entries that fit on one page */
- max_sg_len = min_t(u32,
- nx_driver.of.max_sg_len/sizeof(struct nx_sg),
- nx_ctx->ap->sglen);
-
processed += iauth_len;
+ /* page_limit: number of sg entries that fit on one page */
+ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
do {
to_process = min_t(u32, req->assoclen - processed,
nx_ctx->ap->databytelen);
- to_process = min_t(u64, to_process,
- NX_PAGE_SIZE * (max_sg_len - 1));
+
+ nx_insg = nx_walk_and_build(nx_ctx->in_sg,
+ nx_ctx->ap->sglen,
+ req->assoc, processed,
+ &to_process);
if ((to_process + processed) < req->assoclen) {
NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
@@ -297,10 +310,6 @@ static int generate_pat(u8 *iv,
~NX_FDM_INTERMEDIATE;
}
- nx_insg = nx_walk_and_build(nx_ctx->in_sg,
- nx_ctx->ap->sglen,
- req->assoc, processed,
- to_process);
nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
sizeof(struct nx_sg);
@@ -343,7 +352,6 @@ static int ccm_nx_decrypt(struct aead_request *req,
struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
unsigned long irq_flags;
unsigned int processed = 0, to_process;
- u32 max_sg_len;
int rc = -1;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -360,19 +368,12 @@ static int ccm_nx_decrypt(struct aead_request *req,
if (rc)
goto out;
- /* page_limit: number of sg entries that fit on one page */
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
- nx_ctx->ap->sglen);
-
do {
/* to_process: the AES_BLOCK_SIZE data chunk to process in this
* update. This value is bound by sg list limits.
*/
- to_process = min_t(u64, nbytes - processed,
- nx_ctx->ap->databytelen);
- to_process = min_t(u64, to_process,
- NX_PAGE_SIZE * (max_sg_len - 1));
+ to_process = nbytes - processed;
if ((to_process + processed) < nbytes)
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
@@ -382,7 +383,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
- to_process, processed,
+ &to_process, processed,
csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc)
goto out;
@@ -427,7 +428,6 @@ static int ccm_nx_encrypt(struct aead_request *req,
unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
unsigned long irq_flags;
unsigned int processed = 0, to_process;
- u32 max_sg_len;
int rc = -1;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -437,18 +437,11 @@ static int ccm_nx_encrypt(struct aead_request *req,
if (rc)
goto out;
- /* page_limit: number of sg entries that fit on one page */
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
- nx_ctx->ap->sglen);
-
do {
/* to process: the AES_BLOCK_SIZE data chunk to process in this
* update. This value is bound by sg list limits.
*/
- to_process = min_t(u64, nbytes - processed,
- nx_ctx->ap->databytelen);
- to_process = min_t(u64, to_process,
- NX_PAGE_SIZE * (max_sg_len - 1));
+ to_process = nbytes - processed;
if ((to_process + processed) < nbytes)
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
@@ -458,7 +451,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
- to_process, processed,
+ &to_process, processed,
csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc)
goto out;
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index a37d009dc75c..2617cd4d54dd 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -90,22 +90,14 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
unsigned long irq_flags;
unsigned int processed = 0, to_process;
- u32 max_sg_len;
int rc;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
- nx_ctx->ap->sglen);
-
do {
- to_process = min_t(u64, nbytes - processed,
- nx_ctx->ap->databytelen);
- to_process = min_t(u64, to_process,
- NX_PAGE_SIZE * (max_sg_len - 1));
- to_process = to_process & ~(AES_BLOCK_SIZE - 1);
+ to_process = nbytes - processed;
- rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process,
+ rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
processed, csbcpb->cpb.aes_ctr.iv);
if (rc)
goto out;
@@ -143,6 +135,7 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
memcpy(iv + CTR_RFC3686_NONCE_SIZE,
desc->info, CTR_RFC3686_IV_SIZE);
+ iv[12] = iv[13] = iv[14] = 0;
iv[15] = 1;
desc->info = nx_ctx->priv.ctr.iv;
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index 85a8d23cf29d..cfdde8b8bc76 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -72,27 +72,19 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
unsigned long irq_flags;
unsigned int processed = 0, to_process;
- u32 max_sg_len;
int rc;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
- nx_ctx->ap->sglen);
-
if (enc)
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
else
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
do {
- to_process = min_t(u64, nbytes - processed,
- nx_ctx->ap->databytelen);
- to_process = min_t(u64, to_process,
- NX_PAGE_SIZE * (max_sg_len - 1));
- to_process = to_process & ~(AES_BLOCK_SIZE - 1);
+ to_process = nbytes - processed;
- rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process,
+ rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
processed, NULL);
if (rc)
goto out;
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 025d9a8d5b19..88c562434bc0 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -131,7 +131,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
struct nx_sg *nx_sg = nx_ctx->in_sg;
unsigned int nbytes = req->assoclen;
unsigned int processed = 0, to_process;
- u32 max_sg_len;
+ unsigned int max_sg_len;
if (nbytes <= AES_BLOCK_SIZE) {
scatterwalk_start(&walk, req->assoc);
@@ -143,8 +143,10 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
/* page_limit: number of sg entries that fit on one page */
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+ max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
nx_ctx->ap->sglen);
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
do {
/*
@@ -156,13 +158,14 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
to_process = min_t(u64, to_process,
NX_PAGE_SIZE * (max_sg_len - 1));
+ nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
+ req->assoc, processed, &to_process);
+
if ((to_process + processed) < nbytes)
NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
else
NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
- nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
- req->assoc, processed, to_process);
nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
* sizeof(struct nx_sg);
@@ -195,7 +198,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
struct nx_sg *nx_sg;
unsigned int nbytes = req->assoclen;
unsigned int processed = 0, to_process;
- u32 max_sg_len;
+ unsigned int max_sg_len;
/* Set GMAC mode */
csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
@@ -203,8 +206,10 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
/* page_limit: number of sg entries that fit on one page */
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+ max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
nx_ctx->ap->sglen);
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
/* Copy IV */
memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
@@ -219,13 +224,14 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
to_process = min_t(u64, to_process,
NX_PAGE_SIZE * (max_sg_len - 1));
+ nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
+ req->assoc, processed, &to_process);
+
if ((to_process + processed) < nbytes)
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
else
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
- req->assoc, processed, to_process);
nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
* sizeof(struct nx_sg);
@@ -264,6 +270,7 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
char out[AES_BLOCK_SIZE];
struct nx_sg *in_sg, *out_sg;
+ int len;
/* For scenarios where the input message is zero length, AES CTR mode
* may be used. Set the source data to be a single block (16B) of all
@@ -279,11 +286,22 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
else
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
+ len = AES_BLOCK_SIZE;
+
/* Encrypt the counter/IV */
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
- AES_BLOCK_SIZE, nx_ctx->ap->sglen);
- out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, sizeof(out),
+ &len, nx_ctx->ap->sglen);
+
+ if (len != AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ len = sizeof(out);
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len,
nx_ctx->ap->sglen);
+
+ if (len != sizeof(out))
+ return -EINVAL;
+
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
@@ -317,7 +335,6 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
unsigned int nbytes = req->cryptlen;
unsigned int processed = 0, to_process;
unsigned long irq_flags;
- u32 max_sg_len;
int rc = -EINVAL;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -354,33 +371,24 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
}
- /* page_limit: number of sg entries that fit on one page */
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
- nx_ctx->ap->sglen);
-
do {
- /*
- * to_process: the data chunk to process in this update.
- * This value is bound by sg list limits.
- */
- to_process = min_t(u64, nbytes - processed,
- nx_ctx->ap->databytelen);
- to_process = min_t(u64, to_process,
- NX_PAGE_SIZE * (max_sg_len - 1));
-
- if ((to_process + processed) < nbytes)
- NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
- else
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ to_process = nbytes - processed;
csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
desc.tfm = (struct crypto_blkcipher *) req->base.tfm;
rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
- req->src, to_process, processed,
+ req->src, &to_process, processed,
csbcpb->cpb.aes_gcm.iv_or_cnt);
+
if (rc)
goto out;
+ if ((to_process + processed) < nbytes)
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ else
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index 03c4bf57d066..8c2faffab4a3 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -75,6 +75,7 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
u8 keys[2][AES_BLOCK_SIZE];
u8 key[32];
int rc = 0;
+ int len;
/* Change to ECB mode */
csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
@@ -86,11 +87,20 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
memset(keys[0], 0x01, sizeof(keys[0]));
memset(keys[1], 0x03, sizeof(keys[1]));
+ len = sizeof(keys);
/* Generate K1 and K3 encrypting the patterns */
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, sizeof(keys),
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, &len,
nx_ctx->ap->sglen);
- out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, sizeof(keys),
+
+ if (len != sizeof(keys))
+ return -EINVAL;
+
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, &len,
nx_ctx->ap->sglen);
+
+ if (len != sizeof(keys))
+ return -EINVAL;
+
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
@@ -103,12 +113,23 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
/* XOr K3 with the padding for a 0 length message */
keys[1][0] ^= 0x80;
+ len = sizeof(keys[1]);
+
/* Encrypt the final result */
memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE);
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], sizeof(keys[1]),
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], &len,
nx_ctx->ap->sglen);
- out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE,
+
+ if (len != sizeof(keys[1]))
+ return -EINVAL;
+
+ len = AES_BLOCK_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
nx_ctx->ap->sglen);
+
+ if (len != AES_BLOCK_SIZE)
+ return -EINVAL;
+
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
@@ -133,6 +154,7 @@ static int nx_xcbc_init(struct shash_desc *desc)
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *out_sg;
+ int len;
nx_ctx_init(nx_ctx, HCOP_FC_AES);
@@ -144,8 +166,13 @@ static int nx_xcbc_init(struct shash_desc *desc)
memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE);
memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key);
+ len = AES_BLOCK_SIZE;
out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
- AES_BLOCK_SIZE, nx_ctx->ap->sglen);
+ &len, nx_ctx->ap->sglen);
+
+ if (len != AES_BLOCK_SIZE)
+ return -EINVAL;
+
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
return 0;
@@ -159,10 +186,11 @@ static int nx_xcbc_update(struct shash_desc *desc,
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg;
- u32 to_process, leftover, total;
- u32 max_sg_len;
+ u32 to_process = 0, leftover, total;
+ unsigned int max_sg_len;
unsigned long irq_flags;
int rc = 0;
+ int data_len;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -180,17 +208,15 @@ static int nx_xcbc_update(struct shash_desc *desc,
}
in_sg = nx_ctx->in_sg;
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+ max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
nx_ctx->ap->sglen);
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
do {
-
- /* to_process: the AES_BLOCK_SIZE data chunk to process in this
- * update */
- to_process = min_t(u64, total, nx_ctx->ap->databytelen);
- to_process = min_t(u64, to_process,
- NX_PAGE_SIZE * (max_sg_len - 1));
+ to_process = total - to_process;
to_process = to_process & ~(AES_BLOCK_SIZE - 1);
+
leftover = total - to_process;
/* the hardware will not accept a 0 byte operation for this
@@ -204,15 +230,24 @@ static int nx_xcbc_update(struct shash_desc *desc,
}
if (sctx->count) {
+ data_len = sctx->count;
in_sg = nx_build_sg_list(nx_ctx->in_sg,
(u8 *) sctx->buffer,
- sctx->count,
+ &data_len,
max_sg_len);
+ if (data_len != sctx->count)
+ return -EINVAL;
}
+
+ data_len = to_process - sctx->count;
in_sg = nx_build_sg_list(in_sg,
(u8 *) data,
- to_process - sctx->count,
+ &data_len,
max_sg_len);
+
+ if (data_len != to_process - sctx->count)
+ return -EINVAL;
+
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
sizeof(struct nx_sg);
@@ -263,6 +298,7 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
struct nx_sg *in_sg, *out_sg;
unsigned long irq_flags;
int rc = 0;
+ int len;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -285,11 +321,20 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
* this is not an intermediate operation */
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ len = sctx->count;
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
- sctx->count, nx_ctx->ap->sglen);
- out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE,
+ &len, nx_ctx->ap->sglen);
+
+ if (len != sctx->count)
+ return -EINVAL;
+
+ len = AES_BLOCK_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
nx_ctx->ap->sglen);
+ if (len != AES_BLOCK_SIZE)
+ return -EINVAL;
+
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index da0b24a7633f..23621da624c3 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -23,6 +23,7 @@
#include <crypto/sha.h>
#include <linux/module.h>
#include <asm/vio.h>
+#include <asm/byteorder.h>
#include "nx_csbcpb.h"
#include "nx.h"
@@ -32,7 +33,8 @@ static int nx_sha256_init(struct shash_desc *desc)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- struct nx_sg *out_sg;
+ int len;
+ int rc;
nx_ctx_init(nx_ctx, HCOP_FC_SHA);
@@ -41,10 +43,28 @@ static int nx_sha256_init(struct shash_desc *desc)
nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
- out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
- SHA256_DIGEST_SIZE, nx_ctx->ap->sglen);
- nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+ len = SHA256_DIGEST_SIZE;
+ rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
+ &nx_ctx->op.outlen,
+ &len,
+ (u8 *) sctx->state,
+ NX_DS_SHA256);
+
+ if (rc)
+ goto out;
+
+ sctx->state[0] = __cpu_to_be32(SHA256_H0);
+ sctx->state[1] = __cpu_to_be32(SHA256_H1);
+ sctx->state[2] = __cpu_to_be32(SHA256_H2);
+ sctx->state[3] = __cpu_to_be32(SHA256_H3);
+ sctx->state[4] = __cpu_to_be32(SHA256_H4);
+ sctx->state[5] = __cpu_to_be32(SHA256_H5);
+ sctx->state[6] = __cpu_to_be32(SHA256_H6);
+ sctx->state[7] = __cpu_to_be32(SHA256_H7);
+ sctx->count = 0;
+
+out:
return 0;
}
@@ -54,11 +74,11 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct nx_sg *in_sg;
- u64 to_process, leftover, total;
- u32 max_sg_len;
+ u64 to_process = 0, leftover, total;
unsigned long irq_flags;
int rc = 0;
+ int data_len;
+ u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -66,16 +86,16 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
* 1: < SHA256_BLOCK_SIZE: copy into state, return 0
* 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
*/
- total = sctx->count + len;
+ total = (sctx->count % SHA256_BLOCK_SIZE) + len;
if (total < SHA256_BLOCK_SIZE) {
- memcpy(sctx->buf + sctx->count, data, len);
+ memcpy(sctx->buf + buf_len, data, len);
sctx->count += len;
goto out;
}
- in_sg = nx_ctx->in_sg;
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
- nx_ctx->ap->sglen);
+ memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
do {
/*
@@ -83,34 +103,42 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
* this update. This value is also restricted by the sg list
* limits.
*/
- to_process = min_t(u64, total, nx_ctx->ap->databytelen);
- to_process = min_t(u64, to_process,
- NX_PAGE_SIZE * (max_sg_len - 1));
+ to_process = total - to_process;
to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
- leftover = total - to_process;
- if (sctx->count) {
- in_sg = nx_build_sg_list(nx_ctx->in_sg,
- (u8 *) sctx->buf,
- sctx->count, max_sg_len);
+ if (buf_len) {
+ data_len = buf_len;
+ rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+ &nx_ctx->op.inlen,
+ &data_len,
+ (u8 *) sctx->buf,
+ NX_DS_SHA256);
+
+ if (rc || data_len != buf_len)
+ goto out;
}
- in_sg = nx_build_sg_list(in_sg, (u8 *) data,
- to_process - sctx->count,
- max_sg_len);
- nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
- sizeof(struct nx_sg);
-
- if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
- /*
- * we've hit the nx chip previously and we're updating
- * again, so copy over the partial digest.
- */
- memcpy(csbcpb->cpb.sha256.input_partial_digest,
+
+ data_len = to_process - buf_len;
+ rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+ &nx_ctx->op.inlen,
+ &data_len,
+ (u8 *) data,
+ NX_DS_SHA256);
+
+ if (rc)
+ goto out;
+
+ to_process = (data_len + buf_len);
+ leftover = total - to_process;
+
+ /*
+ * we've hit the nx chip previously and we're updating
+ * again, so copy over the partial digest.
+ */
+ memcpy(csbcpb->cpb.sha256.input_partial_digest,
csbcpb->cpb.sha256.message_digest,
SHA256_DIGEST_SIZE);
- }
- NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
rc = -EINVAL;
goto out;
@@ -122,22 +150,19 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
goto out;
atomic_inc(&(nx_ctx->stats->sha256_ops));
- csbcpb->cpb.sha256.message_bit_length += (u64)
- (csbcpb->cpb.sha256.spbc * 8);
-
- /* everything after the first update is continuation */
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
total -= to_process;
- data += to_process - sctx->count;
- sctx->count = 0;
- in_sg = nx_ctx->in_sg;
+ data += to_process - buf_len;
+ buf_len = 0;
+
} while (leftover >= SHA256_BLOCK_SIZE);
/* copy the leftover back into the state struct */
if (leftover)
memcpy(sctx->buf, data, leftover);
- sctx->count = leftover;
+
+ sctx->count += len;
+ memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
@@ -148,34 +173,46 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct nx_sg *in_sg, *out_sg;
- u32 max_sg_len;
unsigned long irq_flags;
int rc;
+ int len;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen);
-
- if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+ /* final is represented by continuing the operation and indicating that
+ * this is not an intermediate operation */
+ if (sctx->count >= SHA256_BLOCK_SIZE) {
/* we've hit the nx chip previously, now we're finalizing,
* so copy over the partial digest */
- memcpy(csbcpb->cpb.sha256.input_partial_digest,
- csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
+ memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+ } else {
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
}
- /* final is represented by continuing the operation and indicating that
- * this is not an intermediate operation */
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
- csbcpb->cpb.sha256.message_bit_length += (u64)(sctx->count * 8);
+ len = sctx->count & (SHA256_BLOCK_SIZE - 1);
+ rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+ &nx_ctx->op.inlen,
+ &len,
+ (u8 *) sctx->buf,
+ NX_DS_SHA256);
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
- sctx->count, max_sg_len);
- out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA256_DIGEST_SIZE,
- max_sg_len);
- nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+ if (rc || len != (sctx->count & (SHA256_BLOCK_SIZE - 1)))
+ goto out;
+
+ len = SHA256_DIGEST_SIZE;
+ rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
+ &nx_ctx->op.outlen,
+ &len,
+ out,
+ NX_DS_SHA256);
+
+ if (rc || len != SHA256_DIGEST_SIZE)
+ goto out;
if (!nx_ctx->op.outlen) {
rc = -EINVAL;
@@ -189,8 +226,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
atomic_inc(&(nx_ctx->stats->sha256_ops));
- atomic64_add(csbcpb->cpb.sha256.message_bit_length / 8,
- &(nx_ctx->stats->sha256_bytes));
+ atomic64_add(sctx->count, &(nx_ctx->stats->sha256_bytes));
memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
@@ -200,62 +236,18 @@ out:
static int nx_sha256_export(struct shash_desc *desc, void *out)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct sha256_state *octx = out;
- unsigned long irq_flags;
-
- spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- octx->count = sctx->count +
- (csbcpb->cpb.sha256.message_bit_length / 8);
- memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
-
- /* if no data has been processed yet, we need to export SHA256's
- * initial data, in case this context gets imported into a software
- * context */
- if (csbcpb->cpb.sha256.message_bit_length)
- memcpy(octx->state, csbcpb->cpb.sha256.message_digest,
- SHA256_DIGEST_SIZE);
- else {
- octx->state[0] = SHA256_H0;
- octx->state[1] = SHA256_H1;
- octx->state[2] = SHA256_H2;
- octx->state[3] = SHA256_H3;
- octx->state[4] = SHA256_H4;
- octx->state[5] = SHA256_H5;
- octx->state[6] = SHA256_H6;
- octx->state[7] = SHA256_H7;
- }
+ memcpy(out, sctx, sizeof(*sctx));
- spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return 0;
}
static int nx_sha256_import(struct shash_desc *desc, const void *in)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- const struct sha256_state *ictx = in;
- unsigned long irq_flags;
-
- spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
+ memcpy(sctx, in, sizeof(*sctx));
- sctx->count = ictx->count & 0x3f;
- csbcpb->cpb.sha256.message_bit_length = (ictx->count & ~0x3f) * 8;
-
- if (csbcpb->cpb.sha256.message_bit_length) {
- memcpy(csbcpb->cpb.sha256.message_digest, ictx->state,
- SHA256_DIGEST_SIZE);
-
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
- }
-
- spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return 0;
}
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index 4ae5b0f221d5..b3adf1022673 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -32,7 +32,8 @@ static int nx_sha512_init(struct shash_desc *desc)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- struct nx_sg *out_sg;
+ int len;
+ int rc;
nx_ctx_init(nx_ctx, HCOP_FC_SHA);
@@ -41,10 +42,28 @@ static int nx_sha512_init(struct shash_desc *desc)
nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
- out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
- SHA512_DIGEST_SIZE, nx_ctx->ap->sglen);
- nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+ len = SHA512_DIGEST_SIZE;
+ rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
+ &nx_ctx->op.outlen,
+ &len,
+ (u8 *)sctx->state,
+ NX_DS_SHA512);
+
+ if (rc || len != SHA512_DIGEST_SIZE)
+ goto out;
+
+ sctx->state[0] = __cpu_to_be64(SHA512_H0);
+ sctx->state[1] = __cpu_to_be64(SHA512_H1);
+ sctx->state[2] = __cpu_to_be64(SHA512_H2);
+ sctx->state[3] = __cpu_to_be64(SHA512_H3);
+ sctx->state[4] = __cpu_to_be64(SHA512_H4);
+ sctx->state[5] = __cpu_to_be64(SHA512_H5);
+ sctx->state[6] = __cpu_to_be64(SHA512_H6);
+ sctx->state[7] = __cpu_to_be64(SHA512_H7);
+ sctx->count[0] = 0;
+
+out:
return 0;
}
@@ -54,11 +73,11 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct nx_sg *in_sg;
- u64 to_process, leftover, total, spbc_bits;
- u32 max_sg_len;
+ u64 to_process, leftover = 0, total;
unsigned long irq_flags;
int rc = 0;
+ int data_len;
+ u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -66,16 +85,16 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
* 1: < SHA512_BLOCK_SIZE: copy into state, return 0
* 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover
*/
- total = sctx->count[0] + len;
+ total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len;
if (total < SHA512_BLOCK_SIZE) {
- memcpy(sctx->buf + sctx->count[0], data, len);
+ memcpy(sctx->buf + buf_len, data, len);
sctx->count[0] += len;
goto out;
}
- in_sg = nx_ctx->in_sg;
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
- nx_ctx->ap->sglen);
+ memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE);
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
do {
/*
@@ -83,34 +102,43 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
* this update. This value is also restricted by the sg list
* limits.
*/
- to_process = min_t(u64, total, nx_ctx->ap->databytelen);
- to_process = min_t(u64, to_process,
- NX_PAGE_SIZE * (max_sg_len - 1));
+ to_process = total - leftover;
to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
leftover = total - to_process;
- if (sctx->count[0]) {
- in_sg = nx_build_sg_list(nx_ctx->in_sg,
- (u8 *) sctx->buf,
- sctx->count[0], max_sg_len);
+ if (buf_len) {
+ data_len = buf_len;
+ rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+ &nx_ctx->op.inlen,
+ &data_len,
+ (u8 *) sctx->buf,
+ NX_DS_SHA512);
+
+ if (rc || data_len != buf_len)
+ goto out;
}
- in_sg = nx_build_sg_list(in_sg, (u8 *) data,
- to_process - sctx->count[0],
- max_sg_len);
- nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
- sizeof(struct nx_sg);
-
- if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
- /*
- * we've hit the nx chip previously and we're updating
- * again, so copy over the partial digest.
- */
- memcpy(csbcpb->cpb.sha512.input_partial_digest,
+
+ data_len = to_process - buf_len;
+ rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+ &nx_ctx->op.inlen,
+ &data_len,
+ (u8 *) data,
+ NX_DS_SHA512);
+
+ if (rc || data_len != (to_process - buf_len))
+ goto out;
+
+ to_process = (data_len + buf_len);
+ leftover = total - to_process;
+
+ /*
+ * we've hit the nx chip previously and we're updating
+ * again, so copy over the partial digest.
+ */
+ memcpy(csbcpb->cpb.sha512.input_partial_digest,
csbcpb->cpb.sha512.message_digest,
SHA512_DIGEST_SIZE);
- }
- NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
rc = -EINVAL;
goto out;
@@ -122,24 +150,18 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
goto out;
atomic_inc(&(nx_ctx->stats->sha512_ops));
- spbc_bits = csbcpb->cpb.sha512.spbc * 8;
- csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits;
- if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits)
- csbcpb->cpb.sha512.message_bit_length_hi++;
-
- /* everything after the first update is continuation */
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
total -= to_process;
- data += to_process - sctx->count[0];
- sctx->count[0] = 0;
- in_sg = nx_ctx->in_sg;
+ data += to_process - buf_len;
+ buf_len = 0;
+
} while (leftover >= SHA512_BLOCK_SIZE);
/* copy the leftover back into the state struct */
if (leftover)
memcpy(sctx->buf, data, leftover);
- sctx->count[0] = leftover;
+ sctx->count[0] += len;
+ memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
@@ -150,39 +172,52 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct nx_sg *in_sg, *out_sg;
- u32 max_sg_len;
u64 count0;
unsigned long irq_flags;
int rc;
+ int len;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen);
-
- if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+ /* final is represented by continuing the operation and indicating that
+ * this is not an intermediate operation */
+ if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
/* we've hit the nx chip previously, now we're finalizing,
* so copy over the partial digest */
- memcpy(csbcpb->cpb.sha512.input_partial_digest,
- csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
+ memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state,
+ SHA512_DIGEST_SIZE);
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+ } else {
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
}
- /* final is represented by continuing the operation and indicating that
- * this is not an intermediate operation */
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
count0 = sctx->count[0] * 8;
- csbcpb->cpb.sha512.message_bit_length_lo += count0;
- if (csbcpb->cpb.sha512.message_bit_length_lo < count0)
- csbcpb->cpb.sha512.message_bit_length_hi++;
+ csbcpb->cpb.sha512.message_bit_length_lo = count0;
- in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, sctx->count[0],
- max_sg_len);
- out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA512_DIGEST_SIZE,
- max_sg_len);
- nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+ len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
+ rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+ &nx_ctx->op.inlen,
+ &len,
+ (u8 *)sctx->buf,
+ NX_DS_SHA512);
+
+ if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1)))
+ goto out;
+
+ len = SHA512_DIGEST_SIZE;
+ rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
+ &nx_ctx->op.outlen,
+ &len,
+ out,
+ NX_DS_SHA512);
+
+ if (rc)
+ goto out;
if (!nx_ctx->op.outlen) {
rc = -EINVAL;
@@ -195,8 +230,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
goto out;
atomic_inc(&(nx_ctx->stats->sha512_ops));
- atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo / 8,
- &(nx_ctx->stats->sha512_bytes));
+ atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes));
memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
out:
@@ -207,74 +241,18 @@ out:
static int nx_sha512_export(struct shash_desc *desc, void *out)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct sha512_state *octx = out;
- unsigned long irq_flags;
- spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+ memcpy(out, sctx, sizeof(*sctx));
- /* move message_bit_length (128 bits) into count and convert its value
- * to bytes */
- octx->count[0] = csbcpb->cpb.sha512.message_bit_length_lo >> 3 |
- ((csbcpb->cpb.sha512.message_bit_length_hi & 7) << 61);
- octx->count[1] = csbcpb->cpb.sha512.message_bit_length_hi >> 3;
-
- octx->count[0] += sctx->count[0];
- if (octx->count[0] < sctx->count[0])
- octx->count[1]++;
-
- memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
-
- /* if no data has been processed yet, we need to export SHA512's
- * initial data, in case this context gets imported into a software
- * context */
- if (csbcpb->cpb.sha512.message_bit_length_hi ||
- csbcpb->cpb.sha512.message_bit_length_lo)
- memcpy(octx->state, csbcpb->cpb.sha512.message_digest,
- SHA512_DIGEST_SIZE);
- else {
- octx->state[0] = SHA512_H0;
- octx->state[1] = SHA512_H1;
- octx->state[2] = SHA512_H2;
- octx->state[3] = SHA512_H3;
- octx->state[4] = SHA512_H4;
- octx->state[5] = SHA512_H5;
- octx->state[6] = SHA512_H6;
- octx->state[7] = SHA512_H7;
- }
-
- spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return 0;
}
static int nx_sha512_import(struct shash_desc *desc, const void *in)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
- struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- const struct sha512_state *ictx = in;
- unsigned long irq_flags;
-
- spin_lock_irqsave(&nx_ctx->lock, irq_flags);
-
- memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
- sctx->count[0] = ictx->count[0] & 0x3f;
- csbcpb->cpb.sha512.message_bit_length_lo = (ictx->count[0] & ~0x3f)
- << 3;
- csbcpb->cpb.sha512.message_bit_length_hi = ictx->count[1] << 3 |
- ictx->count[0] >> 61;
-
- if (csbcpb->cpb.sha512.message_bit_length_hi ||
- csbcpb->cpb.sha512.message_bit_length_lo) {
- memcpy(csbcpb->cpb.sha512.message_digest, ictx->state,
- SHA512_DIGEST_SIZE);
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
- }
+ memcpy(sctx, in, sizeof(*sctx));
- spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return 0;
}
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 5533fe31c90d..a392465d3e3f 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -90,7 +90,7 @@ int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx,
*/
struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
u8 *start_addr,
- unsigned int len,
+ unsigned int *len,
u32 sgmax)
{
unsigned int sg_len = 0;
@@ -106,7 +106,7 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
else
sg_addr = __pa(sg_addr);
- end_addr = sg_addr + len;
+ end_addr = sg_addr + *len;
/* each iteration will write one struct nx_sg element and add the
* length of data described by that element to sg_len. Once @len bytes
@@ -118,7 +118,7 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
* Also when using vmalloc'ed data, every time that a system page
* boundary is crossed the physical address needs to be re-calculated.
*/
- for (sg = sg_head; sg_len < len; sg++) {
+ for (sg = sg_head; sg_len < *len; sg++) {
u64 next_page;
sg->addr = sg_addr;
@@ -133,15 +133,17 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
is_vmalloc_addr(start_addr + sg_len)) {
sg_addr = page_to_phys(vmalloc_to_page(
start_addr + sg_len));
- end_addr = sg_addr + len - sg_len;
+ end_addr = sg_addr + *len - sg_len;
}
if ((sg - sg_head) == sgmax) {
pr_err("nx: scatter/gather list overflow, pid: %d\n",
current->pid);
- return NULL;
+ sg++;
+ break;
}
}
+ *len = sg_len;
/* return the moved sg_head pointer */
return sg;
@@ -160,11 +162,11 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
unsigned int sglen,
struct scatterlist *sg_src,
unsigned int start,
- unsigned int src_len)
+ unsigned int *src_len)
{
struct scatter_walk walk;
struct nx_sg *nx_sg = nx_dst;
- unsigned int n, offset = 0, len = src_len;
+ unsigned int n, offset = 0, len = *src_len;
char *dst;
/* we need to fast forward through @start bytes first */
@@ -182,27 +184,101 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
* element we're currently looking at */
scatterwalk_advance(&walk, start - offset);
- while (len && nx_sg) {
+ while (len && (nx_sg - nx_dst) < sglen) {
n = scatterwalk_clamp(&walk, len);
if (!n) {
- scatterwalk_start(&walk, sg_next(walk.sg));
+ /* In cases where we have scatterlist chain scatterwalk_sg_next
+ * handles with it properly */
+ scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg));
n = scatterwalk_clamp(&walk, len);
}
dst = scatterwalk_map(&walk);
- nx_sg = nx_build_sg_list(nx_sg, dst, n, sglen);
+ nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst));
len -= n;
scatterwalk_unmap(dst);
scatterwalk_advance(&walk, n);
scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len);
}
+ /* update to_process */
+ *src_len -= len;
/* return the moved destination pointer */
return nx_sg;
}
/**
+ * trim_sg_list - ensures the bound in sg list.
+ * @sg: sg list head
+ * @end: sg lisg end
+ * @delta: is the amount we need to crop in order to bound the list.
+ *
+ */
+static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int delta)
+{
+ while (delta && end > sg) {
+ struct nx_sg *last = end - 1;
+
+ if (last->len > delta) {
+ last->len -= delta;
+ delta = 0;
+ } else {
+ end--;
+ delta -= last->len;
+ }
+ }
+ return (sg - end) * sizeof(struct nx_sg);
+}
+
+/**
+ * nx_sha_build_sg_list - walk and build sg list to sha modes
+ * using right bounds and limits.
+ * @nx_ctx: NX crypto context for the lists we're building
+ * @nx_sg: current sg list in or out list
+ * @op_len: current op_len to be used in order to build a sg list
+ * @nbytes: number or bytes to be processed
+ * @offset: buf offset
+ * @mode: SHA256 or SHA512
+ */
+int nx_sha_build_sg_list(struct nx_crypto_ctx *nx_ctx,
+ struct nx_sg *nx_in_outsg,
+ s64 *op_len,
+ unsigned int *nbytes,
+ u8 *offset,
+ u32 mode)
+{
+ unsigned int delta = 0;
+ unsigned int total = *nbytes;
+ struct nx_sg *nx_insg = nx_in_outsg;
+ unsigned int max_sg_len;
+
+ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+ *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
+ nx_insg = nx_build_sg_list(nx_insg, offset, nbytes, max_sg_len);
+
+ switch (mode) {
+ case NX_DS_SHA256:
+ if (*nbytes < total)
+ delta = *nbytes - (*nbytes & ~(SHA256_BLOCK_SIZE - 1));
+ break;
+ case NX_DS_SHA512:
+ if (*nbytes < total)
+ delta = *nbytes - (*nbytes & ~(SHA512_BLOCK_SIZE - 1));
+ break;
+ default:
+ return -EINVAL;
+ }
+ *op_len = trim_sg_list(nx_in_outsg, nx_insg, delta);
+
+ return 0;
+}
+
+/**
* nx_build_sg_lists - walk the input scatterlists and build arrays of NX
* scatterlists based on them.
*
@@ -223,26 +299,39 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
- unsigned int nbytes,
+ unsigned int *nbytes,
unsigned int offset,
u8 *iv)
{
+ unsigned int delta = 0;
+ unsigned int total = *nbytes;
struct nx_sg *nx_insg = nx_ctx->in_sg;
struct nx_sg *nx_outsg = nx_ctx->out_sg;
+ unsigned int max_sg_len;
+
+ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
if (iv)
memcpy(iv, desc->info, AES_BLOCK_SIZE);
- nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src,
- offset, nbytes);
- nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst,
- offset, nbytes);
+ *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
+
+ nx_outsg = nx_walk_and_build(nx_outsg, max_sg_len, dst,
+ offset, nbytes);
+ nx_insg = nx_walk_and_build(nx_insg, max_sg_len, src,
+ offset, nbytes);
+
+ if (*nbytes < total)
+ delta = *nbytes - (*nbytes & ~(AES_BLOCK_SIZE - 1));
/* these lengths should be negative, which will indicate to phyp that
* the input and output parameters are scatterlists, not linear
* buffers */
- nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg);
- nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg);
+ nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta);
+ nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta);
return 0;
}
@@ -540,10 +629,10 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
/* we need an extra page for csbcpb_aead for these modes */
if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
- nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) +
+ nx_ctx->kmem_len = (5 * NX_PAGE_SIZE) +
sizeof(struct nx_csbcpb);
else
- nx_ctx->kmem_len = (3 * NX_PAGE_SIZE) +
+ nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) +
sizeof(struct nx_csbcpb);
nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL);
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index befda07ca1da..6c9ecaaead52 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -153,13 +153,15 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
u32 may_sleep);
-struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int, u32);
+int nx_sha_build_sg_list(struct nx_crypto_ctx *, struct nx_sg *,
+ s64 *, unsigned int *, u8 *, u32);
+struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32);
int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
- struct scatterlist *, struct scatterlist *, unsigned int,
+ struct scatterlist *, struct scatterlist *, unsigned int *,
unsigned int, u8 *);
struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
struct scatterlist *, unsigned int,
- unsigned int);
+ unsigned int *);
#ifdef CONFIG_DEBUG_FS
#define NX_DEBUGFS_INIT(drv) nx_debugfs_init(drv)
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 633ba945e153..c178ed8c3908 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -563,4 +563,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Ludvig");
-MODULE_ALIAS("aes");
+MODULE_ALIAS_CRYPTO("aes");
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index bace885634f2..95f7d27ce491 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -593,7 +593,7 @@ MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Ludvig");
-MODULE_ALIAS("sha1-all");
-MODULE_ALIAS("sha256-all");
-MODULE_ALIAS("sha1-padlock");
-MODULE_ALIAS("sha256-padlock");
+MODULE_ALIAS_CRYPTO("sha1-all");
+MODULE_ALIAS_CRYPTO("sha256-all");
+MODULE_ALIAS_CRYPTO("sha1-padlock");
+MODULE_ALIAS_CRYPTO("sha256-padlock");
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index fe7b3f06f6e6..2ed425664a16 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -56,8 +56,6 @@
#define PCI_VENDOR_ID_INTEL 0x8086
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
-#define ADF_DH895XCC_PMISC_BAR 1
-#define ADF_DH895XCC_ETR_BAR 2
#define ADF_PCI_MAX_BARS 3
#define ADF_DEVICE_NAME_LENGTH 32
#define ADF_ETR_MAX_RINGS_PER_BANK 16
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index c29d4c3926bf..10ce4a2854ab 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -90,7 +90,7 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev)
uint16_t ppdstat = 0, bridge_ctl = 0;
int pending = 0;
- pr_info("QAT: Reseting device qat_dev%d\n", accel_dev->accel_id);
+ pr_info("QAT: Resetting device qat_dev%d\n", accel_dev->accel_id);
pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat);
pending = ppdstat & PCI_EXP_DEVSTA_TRPND;
if (pending) {
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 244d73378f0e..7ee93f881db6 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -52,6 +52,7 @@
#include <linux/pci.h>
#include <linux/cdev.h>
#include <linux/uaccess.h>
+#include <linux/crypto.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
@@ -487,4 +488,4 @@ module_exit(adf_unregister_ctl_device_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
-MODULE_ALIAS("intel_qat");
+MODULE_ALIAS_CRYPTO("intel_qat");
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
index ae71555c0868..4a0a829d4500 100644
--- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
@@ -129,12 +129,13 @@ struct adf_accel_dev *adf_devmgr_get_first(void)
* Function returns acceleration device associated with the given pci device.
* To be used by QAT device specific drivers.
*
- * Return: pinter to accel_dev or NULL if not found.
+ * Return: pointer to accel_dev or NULL if not found.
*/
struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
{
struct list_head *itr;
+ mutex_lock(&table_lock);
list_for_each(itr, &accel_table) {
struct adf_accel_dev *ptr =
list_entry(itr, struct adf_accel_dev, list);
@@ -144,6 +145,7 @@ struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
return ptr;
}
}
+ mutex_unlock(&table_lock);
return NULL;
}
EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
@@ -152,6 +154,7 @@ struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
{
struct list_head *itr;
+ mutex_lock(&table_lock);
list_for_each(itr, &accel_table) {
struct adf_accel_dev *ptr =
list_entry(itr, struct adf_accel_dev, list);
@@ -161,6 +164,7 @@ struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
return ptr;
}
}
+ mutex_unlock(&table_lock);
return NULL;
}
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 9dd2cb72a4e8..7dd54aaee9fa 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -376,8 +376,9 @@ static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
return 0;
}
-static void adf_enable_coalesc(struct adf_etr_bank_data *bank,
- const char *section, uint32_t bank_num_in_accel)
+static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
+ const char *section,
+ uint32_t bank_num_in_accel)
{
if (adf_get_cfg_int(bank->accel_dev, section,
ADF_ETRMGR_COALESCE_TIMER_FORMAT,
@@ -396,7 +397,7 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_etr_ring_data *ring;
struct adf_etr_ring_data *tx_ring;
- uint32_t i, coalesc_enabled;
+ uint32_t i, coalesc_enabled = 0;
memset(bank, 0, sizeof(*bank));
bank->bank_number = bank_num;
@@ -407,10 +408,10 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
/* Enable IRQ coalescing always. This will allow to use
* the optimised flag and coalesc register.
* If it is disabled in the config file just use min time value */
- if (adf_get_cfg_int(accel_dev, "Accelerator0",
- ADF_ETRMGR_COALESCING_ENABLED_FORMAT,
- bank_num, &coalesc_enabled) && coalesc_enabled)
- adf_enable_coalesc(bank, "Accelerator0", bank_num);
+ if ((adf_get_cfg_int(accel_dev, "Accelerator0",
+ ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
+ &coalesc_enabled) == 0) && coalesc_enabled)
+ adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
else
bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
index 91d88d676580..160c9a36c919 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
@@ -83,14 +83,14 @@
#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
-/* Valid internal msg size values internal */
+/* Valid internal msg size values */
#define ADF_MSG_SIZE_32 0x01
#define ADF_MSG_SIZE_64 0x02
#define ADF_MSG_SIZE_128 0x04
#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
-/* Size to bytes conversion macros for ring and msg values */
+/* Size to bytes conversion macros for ring and msg size values */
#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
@@ -100,8 +100,11 @@
#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
ADF_RING_SIZE_4K : SIZE)
#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
+#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
+ SIZE) & ~0x4)
+/* Max outstanding requests */
#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
- ((((1 << (RING_SIZE - 1)) << 4) >> MSG_SIZE) - 1)
+ ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1)
#define BUILD_RING_CONFIG(size) \
((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
| (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 9e9619cd4a79..19eea1c832ac 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -161,7 +161,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
__be64 *hash512_state_out;
int i, offset;
- memset(auth_state.data, '\0', MAX_AUTH_STATE_SIZE + 64);
+ memzero_explicit(auth_state.data, MAX_AUTH_STATE_SIZE + 64);
shash->tfm = ctx->hash_tfm;
shash->flags = 0x0;
@@ -174,13 +174,13 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
memcpy(ipad, buff, digest_size);
memcpy(opad, buff, digest_size);
- memset(ipad + digest_size, 0, block_size - digest_size);
- memset(opad + digest_size, 0, block_size - digest_size);
+ memzero_explicit(ipad + digest_size, block_size - digest_size);
+ memzero_explicit(opad + digest_size, block_size - digest_size);
} else {
memcpy(ipad, auth_key, auth_keylen);
memcpy(opad, auth_key, auth_keylen);
- memset(ipad + auth_keylen, 0, block_size - auth_keylen);
- memset(opad + auth_keylen, 0, block_size - auth_keylen);
+ memzero_explicit(ipad + auth_keylen, block_size - auth_keylen);
+ memzero_explicit(opad + auth_keylen, block_size - auth_keylen);
}
for (i = 0; i < block_size; i++) {
@@ -254,6 +254,8 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
default:
return -EFAULT;
}
+ memzero_explicit(ipad, block_size);
+ memzero_explicit(opad, block_size);
return 0;
}
@@ -466,7 +468,6 @@ static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
break;
default:
goto bad_key;
- break;
}
if (qat_alg_init_enc_session(ctx, alg, &keys))
@@ -493,12 +494,12 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
if (ctx->enc_cd) {
/* rekeying */
dev = &GET_DEV(ctx->inst->accel_dev);
- memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
- memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
- memset(&ctx->enc_fw_req_tmpl, 0,
- sizeof(struct icp_qat_fw_la_bulk_req));
- memset(&ctx->dec_fw_req_tmpl, 0,
- sizeof(struct icp_qat_fw_la_bulk_req));
+ memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
+ memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
+ memzero_explicit(&ctx->enc_fw_req_tmpl,
+ sizeof(struct icp_qat_fw_la_bulk_req));
+ memzero_explicit(&ctx->dec_fw_req_tmpl,
+ sizeof(struct icp_qat_fw_la_bulk_req));
} else {
/* new key */
int node = get_current_node();
@@ -535,10 +536,12 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
return 0;
out_free_all:
+ memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->dec_cd, ctx->dec_cd_paddr);
ctx->dec_cd = NULL;
out_free_enc:
+ memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->enc_cd, ctx->enc_cd_paddr);
ctx->enc_cd = NULL;
@@ -836,7 +839,7 @@ static int qat_alg_init(struct crypto_tfm *tfm,
{
struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
- memset(ctx, '\0', sizeof(*ctx));
+ memzero_explicit(ctx, sizeof(*ctx));
ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
if (IS_ERR(ctx->hash_tfm))
return -EFAULT;
@@ -876,12 +879,16 @@ static void qat_alg_exit(struct crypto_tfm *tfm)
return;
dev = &GET_DEV(inst->accel_dev);
- if (ctx->enc_cd)
+ if (ctx->enc_cd) {
+ memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->enc_cd, ctx->enc_cd_paddr);
- if (ctx->dec_cd)
+ }
+ if (ctx->dec_cd) {
+ memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->dec_cd, ctx->dec_cd_paddr);
+ }
qat_crypto_put_instance(inst);
}
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index 9b8a31521ff3..b818c19713bf 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -679,7 +679,8 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
struct icp_qat_fw_loader_handle *handle;
struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct adf_bar *bar = &pci_info->pci_bars[ADF_DH895XCC_PMISC_BAR];
+ struct adf_bar *bar =
+ &pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)];
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index 65dd1ff93d3b..01e0be21e93a 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -48,6 +48,8 @@
#define ADF_DH895x_HW_DATA_H_
/* PCIe configuration space */
+#define ADF_DH895XCC_PMISC_BAR 1
+#define ADF_DH895XCC_ETR_BAR 2
#define ADF_DH895XCC_RX_RINGS_OFFSET 8
#define ADF_DH895XCC_TX_RINGS_MASK 0xFF
#define ADF_DH895XCC_FUSECTL_OFFSET 0x40
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
index d96ee21b9b77..fe8f89697ad8 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
@@ -186,10 +186,8 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
accel_dev->accel_pci_dev.msix_entries.names = names;
return 0;
err:
- for (i = 0; i < msix_num_entries; i++) {
- if (*(names + i))
- kfree(*(names + i));
- }
+ for (i = 0; i < msix_num_entries; i++)
+ kfree(*(names + i));
kfree(entries);
kfree(names);
return -ENOMEM;
@@ -203,10 +201,8 @@ static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
int i;
kfree(accel_dev->accel_pci_dev.msix_entries.entries);
- for (i = 0; i < msix_num_entries; i++) {
- if (*(names + i))
- kfree(*(names + i));
- }
+ for (i = 0; i < msix_num_entries; i++)
+ kfree(*(names + i));
kfree(names);
}
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 164e1ec624e3..579f539e5975 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -3,6 +3,7 @@
*
* Support for SAHARA cryptographic accelerator.
*
+ * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
* Copyright (c) 2013 Vista Silicon S.L.
* Author: Javier Martin <javier.martin@vista-silicon.com>
*
@@ -15,6 +16,10 @@
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
#include <linux/clk.h>
#include <linux/crypto.h>
@@ -22,12 +27,19 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
+#include <linux/kthread.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#define SHA_BUFFER_LEN PAGE_SIZE
+#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+
#define SAHARA_NAME "sahara"
#define SAHARA_VERSION_3 3
+#define SAHARA_VERSION_4 4
#define SAHARA_TIMEOUT_MS 1000
#define SAHARA_MAX_HW_DESC 2
#define SAHARA_MAX_HW_LINK 20
@@ -36,7 +48,6 @@
#define FLAGS_ENCRYPT BIT(0)
#define FLAGS_CBC BIT(1)
#define FLAGS_NEW_KEY BIT(3)
-#define FLAGS_BUSY 4
#define SAHARA_HDR_BASE 0x00800000
#define SAHARA_HDR_SKHA_ALG_AES 0
@@ -50,6 +61,23 @@
#define SAHARA_HDR_CHA_MDHA (2 << 28)
#define SAHARA_HDR_PARITY_BIT (1 << 31)
+#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
+#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
+#define SAHARA_HDR_MDHA_HASH 0xA0850000
+#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
+#define SAHARA_HDR_MDHA_ALG_SHA1 0
+#define SAHARA_HDR_MDHA_ALG_MD5 1
+#define SAHARA_HDR_MDHA_ALG_SHA256 2
+#define SAHARA_HDR_MDHA_ALG_SHA224 3
+#define SAHARA_HDR_MDHA_PDATA (1 << 2)
+#define SAHARA_HDR_MDHA_HMAC (1 << 3)
+#define SAHARA_HDR_MDHA_INIT (1 << 5)
+#define SAHARA_HDR_MDHA_IPAD (1 << 6)
+#define SAHARA_HDR_MDHA_OPAD (1 << 7)
+#define SAHARA_HDR_MDHA_SWAP (1 << 8)
+#define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
+#define SAHARA_HDR_MDHA_SSL (1 << 10)
+
/* SAHARA can only process one request at a time */
#define SAHARA_QUEUE_LENGTH 1
@@ -117,31 +145,74 @@ struct sahara_hw_link {
};
struct sahara_ctx {
- struct sahara_dev *dev;
unsigned long flags;
+
+ /* AES-specific context */
int keylen;
u8 key[AES_KEYSIZE_128];
struct crypto_ablkcipher *fallback;
+
+ /* SHA-specific context */
+ struct crypto_shash *shash_fallback;
};
struct sahara_aes_reqctx {
unsigned long mode;
};
+/*
+ * struct sahara_sha_reqctx - private data per request
+ * @buf: holds data for requests smaller than block_size
+ * @rembuf: used to prepare one block_size-aligned request
+ * @context: hw-specific context for request. Digest is extracted from this
+ * @mode: specifies what type of hw-descriptor needs to be built
+ * @digest_size: length of digest for this request
+ * @context_size: length of hw-context for this request.
+ * Always digest_size + 4
+ * @buf_cnt: number of bytes saved in buf
+ * @sg_in_idx: number of hw links
+ * @in_sg: scatterlist for input data
+ * @in_sg_chain: scatterlists for chained input data
+ * @in_sg_chained: specifies if chained scatterlists are used or not
+ * @total: total number of bytes for transfer
+ * @last: is this the last block
+ * @first: is this the first block
+ * @active: inside a transfer
+ */
+struct sahara_sha_reqctx {
+ u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
+ u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
+ u8 context[SHA256_DIGEST_SIZE + 4];
+ struct mutex mutex;
+ unsigned int mode;
+ unsigned int digest_size;
+ unsigned int context_size;
+ unsigned int buf_cnt;
+ unsigned int sg_in_idx;
+ struct scatterlist *in_sg;
+ struct scatterlist in_sg_chain[2];
+ bool in_sg_chained;
+ size_t total;
+ unsigned int last;
+ unsigned int first;
+ unsigned int active;
+};
+
struct sahara_dev {
struct device *device;
+ unsigned int version;
void __iomem *regs_base;
struct clk *clk_ipg;
struct clk *clk_ahb;
+ struct mutex queue_mutex;
+ struct task_struct *kthread;
+ struct completion dma_completion;
struct sahara_ctx *ctx;
spinlock_t lock;
struct crypto_queue queue;
unsigned long flags;
- struct tasklet_struct done_task;
- struct tasklet_struct queue_task;
-
struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
@@ -151,10 +222,12 @@ struct sahara_dev {
u8 *iv_base;
dma_addr_t iv_phys_base;
+ u8 *context_base;
+ dma_addr_t context_phys_base;
+
struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
- struct ablkcipher_request *req;
size_t total;
struct scatterlist *in_sg;
unsigned int nb_in_sg;
@@ -162,7 +235,6 @@ struct sahara_dev {
unsigned int nb_out_sg;
u32 error;
- struct timer_list watchdog;
};
static struct sahara_dev *dev_ptr;
@@ -401,34 +473,6 @@ static void sahara_dump_links(struct sahara_dev *dev)
dev_dbg(dev->device, "\n");
}
-static void sahara_aes_done_task(unsigned long data)
-{
- struct sahara_dev *dev = (struct sahara_dev *)data;
-
- dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
- DMA_TO_DEVICE);
- dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
- DMA_FROM_DEVICE);
-
- spin_lock(&dev->lock);
- clear_bit(FLAGS_BUSY, &dev->flags);
- spin_unlock(&dev->lock);
-
- dev->req->base.complete(&dev->req->base, dev->error);
-}
-
-static void sahara_watchdog(unsigned long data)
-{
- struct sahara_dev *dev = (struct sahara_dev *)data;
- unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
- unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
-
- sahara_decode_status(dev, stat);
- sahara_decode_error(dev, err);
- dev->error = -ETIMEDOUT;
- sahara_aes_done_task(data);
-}
-
static int sahara_hw_descriptor_create(struct sahara_dev *dev)
{
struct sahara_ctx *ctx = dev->ctx;
@@ -512,9 +556,6 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
sahara_dump_descriptors(dev);
sahara_dump_links(dev);
- /* Start processing descriptor chain. */
- mod_timer(&dev->watchdog,
- jiffies + msecs_to_jiffies(SAHARA_TIMEOUT_MS));
sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
return 0;
@@ -529,37 +570,19 @@ unmap_in:
return -EINVAL;
}
-static void sahara_aes_queue_task(unsigned long data)
+static int sahara_aes_process(struct ablkcipher_request *req)
{
- struct sahara_dev *dev = (struct sahara_dev *)data;
- struct crypto_async_request *async_req, *backlog;
+ struct sahara_dev *dev = dev_ptr;
struct sahara_ctx *ctx;
struct sahara_aes_reqctx *rctx;
- struct ablkcipher_request *req;
int ret;
- spin_lock(&dev->lock);
- backlog = crypto_get_backlog(&dev->queue);
- async_req = crypto_dequeue_request(&dev->queue);
- if (!async_req)
- clear_bit(FLAGS_BUSY, &dev->flags);
- spin_unlock(&dev->lock);
-
- if (!async_req)
- return;
-
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
-
- req = ablkcipher_request_cast(async_req);
-
/* Request is ready to be dispatched by the device */
dev_dbg(dev->device,
"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
req->nbytes, req->src, req->dst);
/* assign new request to device */
- dev->req = req;
dev->total = req->nbytes;
dev->in_sg = req->src;
dev->out_sg = req->dst;
@@ -573,16 +596,25 @@ static void sahara_aes_queue_task(unsigned long data)
memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
/* assign new context to device */
- ctx->dev = dev;
dev->ctx = ctx;
+ reinit_completion(&dev->dma_completion);
+
ret = sahara_hw_descriptor_create(dev);
- if (ret < 0) {
- spin_lock(&dev->lock);
- clear_bit(FLAGS_BUSY, &dev->flags);
- spin_unlock(&dev->lock);
- dev->req->base.complete(&dev->req->base, ret);
+
+ ret = wait_for_completion_timeout(&dev->dma_completion,
+ msecs_to_jiffies(SAHARA_TIMEOUT_MS));
+ if (!ret) {
+ dev_err(dev->device, "AES timeout\n");
+ return -ETIMEDOUT;
}
+
+ dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
+ DMA_TO_DEVICE);
+ dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_FROM_DEVICE);
+
+ return 0;
}
static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
@@ -624,12 +656,9 @@ static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
- struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
struct sahara_dev *dev = dev_ptr;
int err = 0;
- int busy;
dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
@@ -640,16 +669,13 @@ static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
return -EINVAL;
}
- ctx->dev = dev;
-
rctx->mode = mode;
- spin_lock_bh(&dev->lock);
+
+ mutex_lock(&dev->queue_mutex);
err = ablkcipher_enqueue_request(&dev->queue, req);
- busy = test_and_set_bit(FLAGS_BUSY, &dev->flags);
- spin_unlock_bh(&dev->lock);
+ mutex_unlock(&dev->queue_mutex);
- if (!busy)
- tasklet_schedule(&dev->queue_task);
+ wake_up_process(dev->kthread);
return err;
}
@@ -752,6 +778,484 @@ static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
ctx->fallback = NULL;
}
+static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
+ struct sahara_sha_reqctx *rctx)
+{
+ u32 hdr = 0;
+
+ hdr = rctx->mode;
+
+ if (rctx->first) {
+ hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
+ hdr |= SAHARA_HDR_MDHA_INIT;
+ } else {
+ hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
+ }
+
+ if (rctx->last)
+ hdr |= SAHARA_HDR_MDHA_PDATA;
+
+ if (hweight_long(hdr) % 2 == 0)
+ hdr |= SAHARA_HDR_PARITY_BIT;
+
+ return hdr;
+}
+
+static int sahara_sha_hw_links_create(struct sahara_dev *dev,
+ struct sahara_sha_reqctx *rctx,
+ int start)
+{
+ struct scatterlist *sg;
+ unsigned int i;
+ int ret;
+
+ dev->in_sg = rctx->in_sg;
+
+ dev->nb_in_sg = sahara_sg_length(dev->in_sg, rctx->total);
+ if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
+ dev_err(dev->device, "not enough hw links (%d)\n",
+ dev->nb_in_sg + dev->nb_out_sg);
+ return -EINVAL;
+ }
+
+ if (rctx->in_sg_chained) {
+ i = start;
+ sg = dev->in_sg;
+ while (sg) {
+ ret = dma_map_sg(dev->device, sg, 1,
+ DMA_TO_DEVICE);
+ if (!ret)
+ return -EFAULT;
+
+ dev->hw_link[i]->len = sg->length;
+ dev->hw_link[i]->p = sg->dma_address;
+ dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
+ sg = sg_next(sg);
+ i += 1;
+ }
+ dev->hw_link[i-1]->next = 0;
+ } else {
+ sg = dev->in_sg;
+ ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_TO_DEVICE);
+ if (!ret)
+ return -EFAULT;
+
+ for (i = start; i < dev->nb_in_sg + start; i++) {
+ dev->hw_link[i]->len = sg->length;
+ dev->hw_link[i]->p = sg->dma_address;
+ if (i == (dev->nb_in_sg + start - 1)) {
+ dev->hw_link[i]->next = 0;
+ } else {
+ dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
+ sg = sg_next(sg);
+ }
+ }
+ }
+
+ return i;
+}
+
+static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
+ struct sahara_sha_reqctx *rctx,
+ struct ahash_request *req,
+ int index)
+{
+ unsigned result_len;
+ int i = index;
+
+ if (rctx->first)
+ /* Create initial descriptor: #8*/
+ dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
+ else
+ /* Create hash descriptor: #10. Must follow #6. */
+ dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
+
+ dev->hw_desc[index]->len1 = rctx->total;
+ if (dev->hw_desc[index]->len1 == 0) {
+ /* if len1 is 0, p1 must be 0, too */
+ dev->hw_desc[index]->p1 = 0;
+ rctx->sg_in_idx = 0;
+ } else {
+ /* Create input links */
+ dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
+ i = sahara_sha_hw_links_create(dev, rctx, index);
+
+ rctx->sg_in_idx = index;
+ if (i < 0)
+ return i;
+ }
+
+ dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
+
+ /* Save the context for the next operation */
+ result_len = rctx->context_size;
+ dev->hw_link[i]->p = dev->context_phys_base;
+
+ dev->hw_link[i]->len = result_len;
+ dev->hw_desc[index]->len2 = result_len;
+
+ dev->hw_link[i]->next = 0;
+
+ return 0;
+}
+
+/*
+ * Load descriptor aka #6
+ *
+ * To load a previously saved context back to the MDHA unit
+ *
+ * p1: Saved Context
+ * p2: NULL
+ *
+ */
+static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
+ struct sahara_sha_reqctx *rctx,
+ struct ahash_request *req,
+ int index)
+{
+ dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
+
+ dev->hw_desc[index]->len1 = rctx->context_size;
+ dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
+ dev->hw_desc[index]->len2 = 0;
+ dev->hw_desc[index]->p2 = 0;
+
+ dev->hw_link[index]->len = rctx->context_size;
+ dev->hw_link[index]->p = dev->context_phys_base;
+ dev->hw_link[index]->next = 0;
+
+ return 0;
+}
+
+static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
+{
+ if (!sg || !sg->length)
+ return nbytes;
+
+ while (nbytes && sg) {
+ if (nbytes <= sg->length) {
+ sg->length = nbytes;
+ sg_mark_end(sg);
+ break;
+ }
+ nbytes -= sg->length;
+ sg = scatterwalk_sg_next(sg);
+ }
+
+ return nbytes;
+}
+
+static int sahara_sha_prepare_request(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+ unsigned int hash_later;
+ unsigned int block_size;
+ unsigned int len;
+
+ block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+
+ /* append bytes from previous operation */
+ len = rctx->buf_cnt + req->nbytes;
+
+ /* only the last transfer can be padded in hardware */
+ if (!rctx->last && (len < block_size)) {
+ /* to few data, save for next operation */
+ scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
+ 0, req->nbytes, 0);
+ rctx->buf_cnt += req->nbytes;
+
+ return 0;
+ }
+
+ /* add data from previous operation first */
+ if (rctx->buf_cnt)
+ memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
+
+ /* data must always be a multiple of block_size */
+ hash_later = rctx->last ? 0 : len & (block_size - 1);
+ if (hash_later) {
+ unsigned int offset = req->nbytes - hash_later;
+ /* Save remaining bytes for later use */
+ scatterwalk_map_and_copy(rctx->buf, req->src, offset,
+ hash_later, 0);
+ }
+
+ /* nbytes should now be multiple of blocksize */
+ req->nbytes = req->nbytes - hash_later;
+
+ sahara_walk_and_recalc(req->src, req->nbytes);
+
+ /* have data from previous operation and current */
+ if (rctx->buf_cnt && req->nbytes) {
+ sg_init_table(rctx->in_sg_chain, 2);
+ sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
+
+ scatterwalk_sg_chain(rctx->in_sg_chain, 2, req->src);
+
+ rctx->total = req->nbytes + rctx->buf_cnt;
+ rctx->in_sg = rctx->in_sg_chain;
+
+ rctx->in_sg_chained = true;
+ req->src = rctx->in_sg_chain;
+ /* only data from previous operation */
+ } else if (rctx->buf_cnt) {
+ if (req->src)
+ rctx->in_sg = req->src;
+ else
+ rctx->in_sg = rctx->in_sg_chain;
+ /* buf was copied into rembuf above */
+ sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
+ rctx->total = rctx->buf_cnt;
+ rctx->in_sg_chained = false;
+ /* no data from previous operation */
+ } else {
+ rctx->in_sg = req->src;
+ rctx->total = req->nbytes;
+ req->src = rctx->in_sg;
+ rctx->in_sg_chained = false;
+ }
+
+ /* on next call, we only have the remaining data in the buffer */
+ rctx->buf_cnt = hash_later;
+
+ return -EINPROGRESS;
+}
+
+static void sahara_sha_unmap_sg(struct sahara_dev *dev,
+ struct sahara_sha_reqctx *rctx)
+{
+ struct scatterlist *sg;
+
+ if (rctx->in_sg_chained) {
+ sg = dev->in_sg;
+ while (sg) {
+ dma_unmap_sg(dev->device, sg, 1, DMA_TO_DEVICE);
+ sg = sg_next(sg);
+ }
+ } else {
+ dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_TO_DEVICE);
+ }
+}
+
+static int sahara_sha_process(struct ahash_request *req)
+{
+ struct sahara_dev *dev = dev_ptr;
+ struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+ int ret = -EINPROGRESS;
+
+ ret = sahara_sha_prepare_request(req);
+ if (!ret)
+ return ret;
+
+ if (rctx->first) {
+ sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
+ dev->hw_desc[0]->next = 0;
+ rctx->first = 0;
+ } else {
+ memcpy(dev->context_base, rctx->context, rctx->context_size);
+
+ sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
+ dev->hw_desc[0]->next = dev->hw_phys_desc[1];
+ sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
+ dev->hw_desc[1]->next = 0;
+ }
+
+ sahara_dump_descriptors(dev);
+ sahara_dump_links(dev);
+
+ reinit_completion(&dev->dma_completion);
+
+ sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
+
+ ret = wait_for_completion_timeout(&dev->dma_completion,
+ msecs_to_jiffies(SAHARA_TIMEOUT_MS));
+ if (!ret) {
+ dev_err(dev->device, "SHA timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ if (rctx->sg_in_idx)
+ sahara_sha_unmap_sg(dev, rctx);
+
+ memcpy(rctx->context, dev->context_base, rctx->context_size);
+
+ if (req->result)
+ memcpy(req->result, rctx->context, rctx->digest_size);
+
+ return 0;
+}
+
+static int sahara_queue_manage(void *data)
+{
+ struct sahara_dev *dev = (struct sahara_dev *)data;
+ struct crypto_async_request *async_req;
+ int ret = 0;
+
+ do {
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ mutex_lock(&dev->queue_mutex);
+ async_req = crypto_dequeue_request(&dev->queue);
+ mutex_unlock(&dev->queue_mutex);
+
+ if (async_req) {
+ if (crypto_tfm_alg_type(async_req->tfm) ==
+ CRYPTO_ALG_TYPE_AHASH) {
+ struct ahash_request *req =
+ ahash_request_cast(async_req);
+
+ ret = sahara_sha_process(req);
+ } else {
+ struct ablkcipher_request *req =
+ ablkcipher_request_cast(async_req);
+
+ ret = sahara_aes_process(req);
+ }
+
+ async_req->complete(async_req, ret);
+
+ continue;
+ }
+
+ schedule();
+ } while (!kthread_should_stop());
+
+ return 0;
+}
+
+static int sahara_sha_enqueue(struct ahash_request *req, int last)
+{
+ struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct sahara_dev *dev = dev_ptr;
+ int ret;
+
+ if (!req->nbytes && !last)
+ return 0;
+
+ mutex_lock(&rctx->mutex);
+ rctx->last = last;
+
+ if (!rctx->active) {
+ rctx->active = 1;
+ rctx->first = 1;
+ }
+
+ mutex_lock(&dev->queue_mutex);
+ ret = crypto_enqueue_request(&dev->queue, &req->base);
+ mutex_unlock(&dev->queue_mutex);
+
+ wake_up_process(dev->kthread);
+ mutex_unlock(&rctx->mutex);
+
+ return ret;
+}
+
+static int sahara_sha_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+
+ memset(rctx, 0, sizeof(*rctx));
+
+ switch (crypto_ahash_digestsize(tfm)) {
+ case SHA1_DIGEST_SIZE:
+ rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
+ rctx->digest_size = SHA1_DIGEST_SIZE;
+ break;
+ case SHA256_DIGEST_SIZE:
+ rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
+ rctx->digest_size = SHA256_DIGEST_SIZE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rctx->context_size = rctx->digest_size + 4;
+ rctx->active = 0;
+
+ mutex_init(&rctx->mutex);
+
+ return 0;
+}
+
+static int sahara_sha_update(struct ahash_request *req)
+{
+ return sahara_sha_enqueue(req, 0);
+}
+
+static int sahara_sha_final(struct ahash_request *req)
+{
+ req->nbytes = 0;
+ return sahara_sha_enqueue(req, 1);
+}
+
+static int sahara_sha_finup(struct ahash_request *req)
+{
+ return sahara_sha_enqueue(req, 1);
+}
+
+static int sahara_sha_digest(struct ahash_request *req)
+{
+ sahara_sha_init(req);
+
+ return sahara_sha_finup(req);
+}
+
+static int sahara_sha_export(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+
+ memcpy(out, ctx, sizeof(struct sahara_ctx));
+ memcpy(out + sizeof(struct sahara_sha_reqctx), rctx,
+ sizeof(struct sahara_sha_reqctx));
+
+ return 0;
+}
+
+static int sahara_sha_import(struct ahash_request *req, const void *in)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+
+ memcpy(ctx, in, sizeof(struct sahara_ctx));
+ memcpy(rctx, in + sizeof(struct sahara_sha_reqctx),
+ sizeof(struct sahara_sha_reqctx));
+
+ return 0;
+}
+
+static int sahara_sha_cra_init(struct crypto_tfm *tfm)
+{
+ const char *name = crypto_tfm_alg_name(tfm);
+ struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->shash_fallback = crypto_alloc_shash(name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->shash_fallback)) {
+ pr_err("Error allocating fallback algo %s\n", name);
+ return PTR_ERR(ctx->shash_fallback);
+ }
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct sahara_sha_reqctx) +
+ SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
+
+ return 0;
+}
+
+static void sahara_sha_cra_exit(struct crypto_tfm *tfm)
+{
+ struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_shash(ctx->shash_fallback);
+ ctx->shash_fallback = NULL;
+}
+
static struct crypto_alg aes_algs[] = {
{
.cra_name = "ecb(aes)",
@@ -797,14 +1301,66 @@ static struct crypto_alg aes_algs[] = {
}
};
+static struct ahash_alg sha_v3_algs[] = {
+{
+ .init = sahara_sha_init,
+ .update = sahara_sha_update,
+ .final = sahara_sha_final,
+ .finup = sahara_sha_finup,
+ .digest = sahara_sha_digest,
+ .export = sahara_sha_export,
+ .import = sahara_sha_import,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sahara-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sahara_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = sahara_sha_cra_init,
+ .cra_exit = sahara_sha_cra_exit,
+ }
+},
+};
+
+static struct ahash_alg sha_v4_algs[] = {
+{
+ .init = sahara_sha_init,
+ .update = sahara_sha_update,
+ .final = sahara_sha_final,
+ .finup = sahara_sha_finup,
+ .digest = sahara_sha_digest,
+ .export = sahara_sha_export,
+ .import = sahara_sha_import,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sahara-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sahara_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = sahara_sha_cra_init,
+ .cra_exit = sahara_sha_cra_exit,
+ }
+},
+};
+
static irqreturn_t sahara_irq_handler(int irq, void *data)
{
struct sahara_dev *dev = (struct sahara_dev *)data;
unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
- del_timer(&dev->watchdog);
-
sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
SAHARA_REG_CMD);
@@ -819,7 +1375,7 @@ static irqreturn_t sahara_irq_handler(int irq, void *data)
dev->error = -EINVAL;
}
- tasklet_schedule(&dev->done_task);
+ complete(&dev->dma_completion);
return IRQ_HANDLED;
}
@@ -827,7 +1383,8 @@ static irqreturn_t sahara_irq_handler(int irq, void *data)
static int sahara_register_algs(struct sahara_dev *dev)
{
- int err, i, j;
+ int err;
+ unsigned int i, j, k, l;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
INIT_LIST_HEAD(&aes_algs[i].cra_list);
@@ -836,8 +1393,29 @@ static int sahara_register_algs(struct sahara_dev *dev)
goto err_aes_algs;
}
+ for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
+ err = crypto_register_ahash(&sha_v3_algs[k]);
+ if (err)
+ goto err_sha_v3_algs;
+ }
+
+ if (dev->version > SAHARA_VERSION_3)
+ for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
+ err = crypto_register_ahash(&sha_v4_algs[l]);
+ if (err)
+ goto err_sha_v4_algs;
+ }
+
return 0;
+err_sha_v4_algs:
+ for (j = 0; j < l; j++)
+ crypto_unregister_ahash(&sha_v4_algs[j]);
+
+err_sha_v3_algs:
+ for (j = 0; j < k; j++)
+ crypto_unregister_ahash(&sha_v4_algs[j]);
+
err_aes_algs:
for (j = 0; j < i; j++)
crypto_unregister_alg(&aes_algs[j]);
@@ -847,10 +1425,17 @@ err_aes_algs:
static void sahara_unregister_algs(struct sahara_dev *dev)
{
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
crypto_unregister_alg(&aes_algs[i]);
+
+ for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
+ crypto_unregister_ahash(&sha_v3_algs[i]);
+
+ if (dev->version > SAHARA_VERSION_3)
+ for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
+ crypto_unregister_ahash(&sha_v4_algs[i]);
}
static struct platform_device_id sahara_platform_ids[] = {
@@ -860,6 +1445,7 @@ static struct platform_device_id sahara_platform_ids[] = {
MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
static struct of_device_id sahara_dt_ids[] = {
+ { .compatible = "fsl,imx53-sahara" },
{ .compatible = "fsl,imx27-sahara" },
{ /* sentinel */ }
};
@@ -939,6 +1525,16 @@ static int sahara_probe(struct platform_device *pdev)
dev->iv_base = dev->key_base + AES_KEYSIZE_128;
dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
+ /* Allocate space for context: largest digest + message length field */
+ dev->context_base = dma_alloc_coherent(&pdev->dev,
+ SHA256_DIGEST_SIZE + 4,
+ &dev->context_phys_base, GFP_KERNEL);
+ if (!dev->context_base) {
+ dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
+ err = -ENOMEM;
+ goto err_key;
+ }
+
/* Allocate space for HW links */
dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
@@ -956,28 +1552,40 @@ static int sahara_probe(struct platform_device *pdev)
crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
+ spin_lock_init(&dev->lock);
+ mutex_init(&dev->queue_mutex);
+
dev_ptr = dev;
- tasklet_init(&dev->queue_task, sahara_aes_queue_task,
- (unsigned long)dev);
- tasklet_init(&dev->done_task, sahara_aes_done_task,
- (unsigned long)dev);
+ dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
+ if (IS_ERR(dev->kthread)) {
+ err = PTR_ERR(dev->kthread);
+ goto err_link;
+ }
- init_timer(&dev->watchdog);
- dev->watchdog.function = &sahara_watchdog;
- dev->watchdog.data = (unsigned long)dev;
+ init_completion(&dev->dma_completion);
clk_prepare_enable(dev->clk_ipg);
clk_prepare_enable(dev->clk_ahb);
version = sahara_read(dev, SAHARA_REG_VERSION);
- if (version != SAHARA_VERSION_3) {
+ if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
+ if (version != SAHARA_VERSION_3)
+ err = -ENODEV;
+ } else if (of_device_is_compatible(pdev->dev.of_node,
+ "fsl,imx53-sahara")) {
+ if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
+ err = -ENODEV;
+ version = (version >> 8) & 0xff;
+ }
+ if (err == -ENODEV) {
dev_err(&pdev->dev, "SAHARA version %d not supported\n",
- version);
- err = -ENODEV;
+ version);
goto err_algs;
}
+ dev->version = version;
+
sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
SAHARA_REG_CMD);
sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
@@ -1000,11 +1608,15 @@ err_algs:
dev->hw_link[0], dev->hw_phys_link[0]);
clk_disable_unprepare(dev->clk_ipg);
clk_disable_unprepare(dev->clk_ahb);
+ kthread_stop(dev->kthread);
dev_ptr = NULL;
err_link:
dma_free_coherent(&pdev->dev,
2 * AES_KEYSIZE_128,
dev->key_base, dev->key_phys_base);
+ dma_free_coherent(&pdev->dev,
+ SHA256_DIGEST_SIZE,
+ dev->context_base, dev->context_phys_base);
err_key:
dma_free_coherent(&pdev->dev,
SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
@@ -1027,8 +1639,7 @@ static int sahara_remove(struct platform_device *pdev)
SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
dev->hw_desc[0], dev->hw_phys_desc[0]);
- tasklet_kill(&dev->done_task);
- tasklet_kill(&dev->queue_task);
+ kthread_stop(dev->kthread);
sahara_unregister_algs(dev);
@@ -1055,4 +1666,5 @@ module_platform_driver(sahara_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
+MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 92105f3dc8e0..7c035de9055e 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1688,6 +1688,7 @@ static void ux500_cryp_shutdown(struct platform_device *pdev)
}
+#ifdef CONFIG_PM_SLEEP
static int ux500_cryp_suspend(struct device *dev)
{
int ret;
@@ -1768,6 +1769,7 @@ static int ux500_cryp_resume(struct device *dev)
return ret;
}
+#endif
static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume);
@@ -1810,7 +1812,7 @@ module_exit(ux500_cryp_mod_fini);
module_param(cryp_mode, int, 0);
MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine.");
-MODULE_ALIAS("aes-all");
-MODULE_ALIAS("des-all");
+MODULE_ALIAS_CRYPTO("aes-all");
+MODULE_ALIAS_CRYPTO("des-all");
MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 1c73f4fbc252..76ecc8d143d0 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1881,6 +1881,7 @@ static void ux500_hash_shutdown(struct platform_device *pdev)
__func__);
}
+#ifdef CONFIG_PM_SLEEP
/**
* ux500_hash_suspend - Function that suspends the hash device.
* @dev: Device to suspend.
@@ -1949,6 +1950,7 @@ static int ux500_hash_resume(struct device *dev)
return ret;
}
+#endif
static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
@@ -1995,7 +1997,7 @@ module_exit(ux500_hash_mod_fini);
MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("sha1-all");
-MODULE_ALIAS("sha256-all");
-MODULE_ALIAS("hmac-sha1-all");
-MODULE_ALIAS("hmac-sha256-all");
+MODULE_ALIAS_CRYPTO("sha1-all");
+MODULE_ALIAS_CRYPTO("sha256-all");
+MODULE_ALIAS_CRYPTO("hmac-sha1-all");
+MODULE_ALIAS_CRYPTO("hmac-sha256-all");
OpenPOWER on IntegriCloud