diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-24 09:05:32 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-24 09:05:32 -0700 |
commit | a23a334bd547e9462d9ca4a74608519a1e928848 (patch) | |
tree | e3d4f4423130f0d74f141c9bbd0c0874690e38b3 /drivers | |
parent | a642285014df03b8f320399d515bf3b779af07ac (diff) | |
parent | acdca31dba86c4f426460aa000d13930a00549b7 (diff) | |
download | talos-obmc-linux-a23a334bd547e9462d9ca4a74608519a1e928848.tar.gz talos-obmc-linux-a23a334bd547e9462d9ca4a74608519a1e928848.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (34 commits)
crypto: caam - ablkcipher support
crypto: caam - faster aead implementation
crypto: caam - structure renaming
crypto: caam - shorter names
crypto: talitos - don't bad_key in ablkcipher setkey
crypto: talitos - remove unused giv from ablkcipher methods
crypto: talitos - don't set done notification in hot path
crypto: talitos - ensure request ordering within a single tfm
crypto: gf128mul - fix call to memset()
crypto: s390 - support hardware accelerated SHA-224
crypto: algif_hash - Handle initial af_alg_make_sg error correctly
crypto: sha1_generic - use SHA1_BLOCK_SIZE
hwrng: ppc4xx - add support for ppc4xx TRNG
crypto: crypto4xx - Perform read/modify/write on device control register
crypto: caam - fix build warning when DEBUG_FS not configured
crypto: arc4 - Fixed coding style issues
crypto: crc32c - Fixed coding style issue
crypto: omap-sham - do not schedule tasklet if there is no active requests
crypto: omap-sham - clear device flags when finishing request
crypto: omap-sham - irq handler must not clear error code
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/char/hw_random/Kconfig | 12 | ||||
-rw-r--r-- | drivers/char/hw_random/Makefile | 1 | ||||
-rw-r--r-- | drivers/char/hw_random/nomadik-rng.c | 3 | ||||
-rw-r--r-- | drivers/char/hw_random/omap-rng.c | 6 | ||||
-rw-r--r-- | drivers/char/hw_random/ppc4xx-rng.c | 156 | ||||
-rw-r--r-- | drivers/char/hw_random/timeriomem-rng.c | 3 | ||||
-rw-r--r-- | drivers/crypto/amcc/crypto4xx_core.c | 5 | ||||
-rw-r--r-- | drivers/crypto/caam/caamalg.c | 1832 | ||||
-rw-r--r-- | drivers/crypto/caam/compat.h | 1 | ||||
-rw-r--r-- | drivers/crypto/caam/ctrl.c | 4 | ||||
-rw-r--r-- | drivers/crypto/caam/desc_constr.h | 58 | ||||
-rw-r--r-- | drivers/crypto/omap-sham.c | 180 | ||||
-rw-r--r-- | drivers/crypto/talitos.c | 47 |
13 files changed, 1756 insertions, 552 deletions
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index a60043b3e409..1d2ebc7a4947 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -210,3 +210,15 @@ config HW_RANDOM_PICOXCELL module will be called picoxcell-rng. If unsure, say Y. + +config HW_RANDOM_PPC4XX + tristate "PowerPC 4xx generic true random number generator support" + depends on HW_RANDOM && PPC && 4xx + ---help--- + This driver provides the kernel-side support for the TRNG hardware + found in the security function of some PowerPC 4xx SoCs. + + To compile this driver as a module, choose M here: the + module will be called ppc4xx-rng. + + If unsure, say N. diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 3db4eb8b19c0..c88f244c8a71 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -20,3 +20,4 @@ obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o +obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c index dd1d143eb8ea..52e08ca3ccd7 100644 --- a/drivers/char/hw_random/nomadik-rng.c +++ b/drivers/char/hw_random/nomadik-rng.c @@ -55,7 +55,7 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id) ret = amba_request_regions(dev, dev->dev.init_name); if (ret) - return ret; + goto out_clk; ret = -ENOMEM; base = ioremap(dev->res.start, resource_size(&dev->res)); if (!base) @@ -70,6 +70,7 @@ out_unmap: iounmap(base); out_release: amba_release_regions(dev); +out_clk: clk_disable(rng_clk); clk_put(rng_clk); return ret; diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 2cc755a64302..b757fac3cd1f 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -113,8 +113,10 @@ static int __devinit omap_rng_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENOENT; + if (!res) { + ret = -ENOENT; + goto err_region; + } if (!request_mem_region(res->start, resource_size(res), pdev->name)) { ret = -EBUSY; diff --git a/drivers/char/hw_random/ppc4xx-rng.c b/drivers/char/hw_random/ppc4xx-rng.c new file mode 100644 index 000000000000..b8afa6a4ff67 --- /dev/null +++ b/drivers/char/hw_random/ppc4xx-rng.c @@ -0,0 +1,156 @@ +/* + * Generic PowerPC 44x RNG driver + * + * Copyright 2011 IBM Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/hw_random.h> +#include <linux/delay.h> +#include <linux/of_platform.h> +#include <asm/io.h> + +#define PPC4XX_TRNG_DEV_CTRL 0x60080 + +#define PPC4XX_TRNGE 0x00020000 +#define PPC4XX_TRNG_CTRL 0x0008 +#define PPC4XX_TRNG_CTRL_DALM 0x20 +#define PPC4XX_TRNG_STAT 0x0004 +#define PPC4XX_TRNG_STAT_B 0x1 +#define PPC4XX_TRNG_DATA 0x0000 + +#define MODULE_NAME "ppc4xx_rng" + +static int ppc4xx_rng_data_present(struct hwrng *rng, int wait) +{ + void __iomem *rng_regs = (void __iomem *) rng->priv; + int busy, i, present = 0; + + for (i = 0; i < 20; i++) { + busy = (in_le32(rng_regs + PPC4XX_TRNG_STAT) & PPC4XX_TRNG_STAT_B); + if (!busy || !wait) { + present = 1; + break; + } + udelay(10); + } + return present; +} + +static int ppc4xx_rng_data_read(struct hwrng *rng, u32 *data) +{ + void __iomem *rng_regs = (void __iomem *) rng->priv; + *data = in_le32(rng_regs + PPC4XX_TRNG_DATA); + return 4; +} + +static int ppc4xx_rng_enable(int enable) +{ + struct device_node *ctrl; + void __iomem *ctrl_reg; + int err = 0; + u32 val; + + /* Find the main crypto device node and map it to turn the TRNG on */ + ctrl = of_find_compatible_node(NULL, NULL, "amcc,ppc4xx-crypto"); + if (!ctrl) + return -ENODEV; + + ctrl_reg = of_iomap(ctrl, 0); + if (!ctrl_reg) { + err = -ENODEV; + goto out; + } + + val = in_le32(ctrl_reg + PPC4XX_TRNG_DEV_CTRL); + + if (enable) + val |= PPC4XX_TRNGE; + else + val = val & ~PPC4XX_TRNGE; + + out_le32(ctrl_reg + PPC4XX_TRNG_DEV_CTRL, val); + iounmap(ctrl_reg); + +out: + of_node_put(ctrl); + + return err; +} + +static struct hwrng ppc4xx_rng = { + .name = MODULE_NAME, + .data_present = ppc4xx_rng_data_present, + .data_read = ppc4xx_rng_data_read, +}; + +static int __devinit ppc4xx_rng_probe(struct platform_device *dev) +{ + void __iomem *rng_regs; + int err = 0; + + rng_regs = of_iomap(dev->dev.of_node, 0); + if (!rng_regs) + return -ENODEV; + + err = ppc4xx_rng_enable(1); + if (err) + return err; + + out_le32(rng_regs + PPC4XX_TRNG_CTRL, PPC4XX_TRNG_CTRL_DALM); + ppc4xx_rng.priv = (unsigned long) rng_regs; + + err = hwrng_register(&ppc4xx_rng); + + return err; +} + +static int __devexit ppc4xx_rng_remove(struct platform_device *dev) +{ + void __iomem *rng_regs = (void __iomem *) ppc4xx_rng.priv; + + hwrng_unregister(&ppc4xx_rng); + ppc4xx_rng_enable(0); + iounmap(rng_regs); + + return 0; +} + +static struct of_device_id ppc4xx_rng_match[] = { + { .compatible = "ppc4xx-rng", }, + { .compatible = "amcc,ppc460ex-rng", }, + { .compatible = "amcc,ppc440epx-rng", }, + {}, +}; + +static struct platform_driver ppc4xx_rng_driver = { + .driver = { + .name = MODULE_NAME, + .owner = THIS_MODULE, + .of_match_table = ppc4xx_rng_match, + }, + .probe = ppc4xx_rng_probe, + .remove = ppc4xx_rng_remove, +}; + +static int __init ppc4xx_rng_init(void) +{ + return platform_driver_register(&ppc4xx_rng_driver); +} +module_init(ppc4xx_rng_init); + +static void __exit ppc4xx_rng_exit(void) +{ + platform_driver_unregister(&ppc4xx_rng_driver); +} +module_exit(ppc4xx_rng_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Josh Boyer <jwboyer@linux.vnet.ibm.com>"); +MODULE_DESCRIPTION("HW RNG driver for PPC 4xx processors"); diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c index a94e930575f2..a8428e6f64a9 100644 --- a/drivers/char/hw_random/timeriomem-rng.c +++ b/drivers/char/hw_random/timeriomem-rng.c @@ -100,8 +100,7 @@ static int __devinit timeriomem_rng_probe(struct platform_device *pdev) timeriomem_rng_data = pdev->dev.platform_data; - timeriomem_rng_data->address = ioremap(res->start, - res->end - res->start + 1); + timeriomem_rng_data->address = ioremap(res->start, resource_size(res)); if (!timeriomem_rng_data->address) return -EIO; diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 18912521a7a5..1d103f997dc2 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -51,6 +51,7 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev) union ce_io_threshold io_threshold; u32 rand_num; union ce_pe_dma_cfg pe_dma_cfg; + u32 device_ctrl; writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG); /* setup pe dma, include reset sg, pdr and pe, then release reset */ @@ -84,7 +85,9 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev) writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE); ring_ctrl.w = 0; writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL); - writel(PPC4XX_DC_3DES_EN, dev->ce_base + CRYPTO4XX_DEVICE_CTRL); + device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL); + device_ctrl |= PPC4XX_DC_3DES_EN; + writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL); writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE); writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE); part_ring_size.w = 0; diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 676d957c22b0..4159265b453b 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -62,10 +62,22 @@ #define CAAM_MAX_IV_LENGTH 16 /* length of descriptors text */ -#define DESC_AEAD_SHARED_TEXT_LEN 4 -#define DESC_AEAD_ENCRYPT_TEXT_LEN 21 -#define DESC_AEAD_DECRYPT_TEXT_LEN 24 -#define DESC_AEAD_GIVENCRYPT_TEXT_LEN 27 +#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 3 + CAAM_PTR_SZ * 3) + +#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) +#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) +#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ) +#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) + +#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) +#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ + 20 * CAAM_CMD_SZ) +#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \ + 15 * CAAM_CMD_SZ) + +#define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \ + CAAM_MAX_KEY_SIZE) +#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) #ifdef DEBUG /* for print_hex_dumps with line references */ @@ -76,30 +88,366 @@ #define debug(format, arg...) #endif +/* Set DK bit in class 1 operation if shared */ +static inline void append_dec_op1(u32 *desc, u32 type) +{ + u32 *jump_cmd, *uncond_jump_cmd; + + jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); + append_operation(desc, type | OP_ALG_AS_INITFINAL | + OP_ALG_DECRYPT); + uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); + set_jump_tgt_here(desc, jump_cmd); + append_operation(desc, type | OP_ALG_AS_INITFINAL | + OP_ALG_DECRYPT | OP_ALG_AAI_DK); + set_jump_tgt_here(desc, uncond_jump_cmd); +} + +/* + * Wait for completion of class 1 key loading before allowing + * error propagation + */ +static inline void append_dec_shr_done(u32 *desc) +{ + u32 *jump_cmd; + + jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL); + set_jump_tgt_here(desc, jump_cmd); + append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD); +} + +/* + * For aead functions, read payload and write payload, + * both of which are specified in req->src and req->dst + */ +static inline void aead_append_src_dst(u32 *desc, u32 msg_type) +{ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | + KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); +} + +/* + * For aead encrypt and decrypt, read iv for both classes + */ +static inline void aead_append_ld_iv(u32 *desc, int ivsize) +{ + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | + LDST_CLASS_1_CCB | ivsize); + append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize); +} + +/* + * For ablkcipher encrypt and decrypt, read from req->src and + * write to req->dst + */ +static inline void ablkcipher_append_src_dst(u32 *desc) +{ + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \ + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | \ + KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); \ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); \ +} + +/* + * If all data, including src (with assoc and iv) or dst (with iv only) are + * contiguous + */ +#define GIV_SRC_CONTIG 1 +#define GIV_DST_CONTIG (1 << 1) + /* * per-session context */ struct caam_ctx { struct device *jrdev; - u32 *sh_desc; - dma_addr_t shared_desc_phys; + u32 sh_desc_enc[DESC_MAX_USED_LEN]; + u32 sh_desc_dec[DESC_MAX_USED_LEN]; + u32 sh_desc_givenc[DESC_MAX_USED_LEN]; + dma_addr_t sh_desc_enc_dma; + dma_addr_t sh_desc_dec_dma; + dma_addr_t sh_desc_givenc_dma; u32 class1_alg_type; u32 class2_alg_type; u32 alg_op; - u8 *key; - dma_addr_t key_phys; + u8 key[CAAM_MAX_KEY_SIZE]; + dma_addr_t key_dma; unsigned int enckeylen; unsigned int split_key_len; unsigned int split_key_pad_len; unsigned int authsize; }; -static int aead_authenc_setauthsize(struct crypto_aead *authenc, +static void append_key_aead(u32 *desc, struct caam_ctx *ctx, + int keys_fit_inline) +{ + if (keys_fit_inline) { + append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, + ctx->split_key_len, CLASS_2 | + KEY_DEST_MDHA_SPLIT | KEY_ENC); + append_key_as_imm(desc, (void *)ctx->key + + ctx->split_key_pad_len, ctx->enckeylen, + ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); + } else { + append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | + KEY_DEST_MDHA_SPLIT | KEY_ENC); + append_key(desc, ctx->key_dma + ctx->split_key_pad_len, + ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); + } +} + +static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, + int keys_fit_inline) +{ + u32 *key_jump_cmd; + + init_sh_desc(desc, HDR_SHARE_WAIT); + + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + + append_key_aead(desc, ctx, keys_fit_inline); + + set_jump_tgt_here(desc, key_jump_cmd); + + /* Propagate errors from shared to job descriptor */ + append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD); +} + +static int aead_set_sh_desc(struct crypto_aead *aead) +{ + struct aead_tfm *tfm = &aead->base.crt_aead; + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool keys_fit_inline = 0; + u32 *key_jump_cmd, *jump_cmd; + u32 geniv, moveiv; + u32 *desc; + + if (!ctx->enckeylen || !ctx->authsize) + return 0; + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN + + ctx->split_key_pad_len + ctx->enckeylen <= + CAAM_DESC_BYTES_MAX) + keys_fit_inline = 1; + + /* aead_encrypt shared descriptor */ + desc = ctx->sh_desc_enc; + + init_sh_desc_key_aead(desc, ctx, keys_fit_inline); + + /* Class 2 operation */ + append_operation(desc, ctx->class2_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + + /* cryptlen = seqoutlen - authsize */ + append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); + + /* assoclen + cryptlen = seqinlen - ivsize */ + append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); + + /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */ + append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); + + /* read assoc before reading payload */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | + KEY_VLF); + aead_append_ld_iv(desc, tfm->ivsize); + + /* Class 1 operation */ + append_operation(desc, ctx->class1_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + + /* Read and write cryptlen bytes */ + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); + + /* Write ICV */ + append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | + LDST_SRCDST_BYTE_CONTEXT); + + ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN + + ctx->split_key_pad_len + ctx->enckeylen <= + CAAM_DESC_BYTES_MAX) + keys_fit_inline = 1; + + desc = ctx->sh_desc_dec; + + /* aead_decrypt shared descriptor */ + init_sh_desc(desc, HDR_SHARE_WAIT); + + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + + append_key_aead(desc, ctx, keys_fit_inline); + + /* Only propagate error immediately if shared */ + jump_cmd = append_jump(desc, JUMP_TEST_ALL); + set_jump_tgt_here(desc, key_jump_cmd); + append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD); + set_jump_tgt_here(desc, jump_cmd); + + /* Class 2 operation */ + append_operation(desc, ctx->class2_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); + + /* assoclen + cryptlen = seqinlen - ivsize */ + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, + ctx->authsize + tfm->ivsize) + /* assoclen = (assoclen + cryptlen) - cryptlen */ + append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); + append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); + + /* read assoc before reading payload */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | + KEY_VLF); + + aead_append_ld_iv(desc, tfm->ivsize); + + append_dec_op1(desc, ctx->class1_alg_type); + + /* Read and write cryptlen bytes */ + append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); + aead_append_src_dst(desc, FIFOLD_TYPE_MSG); + + /* Load ICV */ + append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); + append_dec_shr_done(desc); + + ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN + + ctx->split_key_pad_len + ctx->enckeylen <= + CAAM_DESC_BYTES_MAX) + keys_fit_inline = 1; + + /* aead_givencrypt shared descriptor */ + desc = ctx->sh_desc_givenc; + + init_sh_desc_key_aead(desc, ctx, keys_fit_inline); + + /* Generate IV */ + geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | + NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | + NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); + append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); + append_move(desc, MOVE_SRC_INFIFO | + MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT)); + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); + + /* Copy IV to class 1 context */ + append_move(desc, MOVE_SRC_CLASS1CTX | + MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT)); + + /* Return to encryption */ + append_operation(desc, ctx->class2_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + + /* ivsize + cryptlen = seqoutlen - authsize */ + append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); + + /* assoclen = seqinlen - (ivsize + cryptlen) */ + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); + + /* read assoc before reading payload */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | + KEY_VLF); + + /* Copy iv from class 1 ctx to class 2 fifo*/ + moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 | + NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); + append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); + append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB | + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); + + /* Class 1 operation */ + append_operation(desc, ctx->class1_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + + /* Will write ivsize + cryptlen */ + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + + /* Not need to reload iv */ + append_seq_fifo_load(desc, tfm->ivsize, + FIFOLD_CLASS_SKIP); + + /* Will read cryptlen */ + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); + + /* Write ICV */ + append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | + LDST_SRCDST_BYTE_CONTEXT); + + ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + return 0; +} + +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx(authenc); ctx->authsize = authsize; + aead_set_sh_desc(authenc); return 0; } @@ -117,6 +465,7 @@ static void split_key_done(struct device *dev, u32 *desc, u32 err, #ifdef DEBUG dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif + if (err) { char tmp[CAAM_ERROR_STR_MAX]; @@ -220,73 +569,7 @@ static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen) return ret; } -static int build_sh_desc_ipsec(struct caam_ctx *ctx) -{ - struct device *jrdev = ctx->jrdev; - u32 *sh_desc; - u32 *jump_cmd; - bool keys_fit_inline = 0; - - /* - * largest Job Descriptor and its Shared Descriptor - * must both fit into the 64-word Descriptor h/w Buffer - */ - if ((DESC_AEAD_GIVENCRYPT_TEXT_LEN + - DESC_AEAD_SHARED_TEXT_LEN) * CAAM_CMD_SZ + - ctx->split_key_pad_len + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) - keys_fit_inline = 1; - - /* build shared descriptor for this session */ - sh_desc = kmalloc(CAAM_CMD_SZ * DESC_AEAD_SHARED_TEXT_LEN + - (keys_fit_inline ? - ctx->split_key_pad_len + ctx->enckeylen : - CAAM_PTR_SZ * 2), GFP_DMA | GFP_KERNEL); - if (!sh_desc) { - dev_err(jrdev, "could not allocate shared descriptor\n"); - return -ENOMEM; - } - - init_sh_desc(sh_desc, HDR_SAVECTX | HDR_SHARE_SERIAL); - - jump_cmd = append_jump(sh_desc, CLASS_BOTH | JUMP_TEST_ALL | - JUMP_COND_SHRD | JUMP_COND_SELF); - - /* - * process keys, starting with class 2/authentication. - */ - if (keys_fit_inline) { - append_key_as_imm(sh_desc, ctx->key, ctx->split_key_pad_len, - ctx->split_key_len, - CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); - - append_key_as_imm(sh_desc, (void *)ctx->key + - ctx->split_key_pad_len, ctx->enckeylen, - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); - } else { - append_key(sh_desc, ctx->key_phys, ctx->split_key_len, CLASS_2 | - KEY_DEST_MDHA_SPLIT | KEY_ENC); - append_key(sh_desc, ctx->key_phys + ctx->split_key_pad_len, - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); - } - - /* update jump cmd now that we are at the jump target */ - set_jump_tgt_here(sh_desc, jump_cmd); - - ctx->shared_desc_phys = dma_map_single(jrdev, sh_desc, - desc_bytes(sh_desc), - DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, ctx->shared_desc_phys)) { - dev_err(jrdev, "unable to map shared descriptor\n"); - kfree(sh_desc); - return -ENOMEM; - } - - ctx->sh_desc = sh_desc; - - return 0; -} - -static int aead_authenc_setkey(struct crypto_aead *aead, +static int aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ @@ -326,27 +609,19 @@ static int aead_authenc_setkey(struct crypto_aead *aead, print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); #endif - ctx->key = kmalloc(ctx->split_key_pad_len + enckeylen, - GFP_KERNEL | GFP_DMA); - if (!ctx->key) { - dev_err(jrdev, "could not allocate key output memory\n"); - return -ENOMEM; - } ret = gen_split_key(ctx, key, authkeylen); if (ret) { - kfree(ctx->key); goto badkey; } /* postpend encryption key to auth split key */ memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen); - ctx->key_phys = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + + ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + enckeylen, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, ctx->key_phys)) { + if (dma_mapping_error(jrdev, ctx->key_dma)) { dev_err(jrdev, "unable to map key i/o memory\n"); - kfree(ctx->key); return -ENOMEM; } #ifdef DEBUG @@ -357,11 +632,10 @@ static int aead_authenc_setkey(struct crypto_aead *aead, ctx->enckeylen = enckeylen; - ret = build_sh_desc_ipsec(ctx); + ret = aead_set_sh_desc(aead); if (ret) { - dma_unmap_single(jrdev, ctx->key_phys, ctx->split_key_pad_len + + dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len + enckeylen, DMA_TO_DEVICE); - kfree(ctx->key); } return ret; @@ -370,6 +644,119 @@ badkey: return -EINVAL; } +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, + const u8 *key, unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher; + struct device *jrdev = ctx->jrdev; + int ret = 0; + u32 *key_jump_cmd, *jump_cmd; + u32 *desc; + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); +#endif + + memcpy(ctx->key, key, keylen); + ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->key_dma)) { + dev_err(jrdev, "unable to map key i/o memory\n"); + return -ENOMEM; + } + ctx->enckeylen = keylen; + + /* ablkcipher_encrypt shared descriptor */ + desc = ctx->sh_desc_enc; + init_sh_desc(desc, HDR_SHARE_WAIT); + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + + /* Load class1 key only */ + append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, + ctx->enckeylen, CLASS_1 | + KEY_DEST_CLASS_REG); + + set_jump_tgt_here(desc, key_jump_cmd); + + /* Propagate errors from shared to job descriptor */ + append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD); + + /* Load iv */ + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | + LDST_CLASS_1_CCB | tfm->ivsize); + + /* Load operation */ + append_operation(desc, ctx->class1_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + + /* Perform operation */ + ablkcipher_append_src_dst(desc); + + ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + /* ablkcipher_decrypt shared descriptor */ + desc = ctx->sh_desc_dec; + + init_sh_desc(desc, HDR_SHARE_WAIT); + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + + /* Load class1 key only */ + append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, + ctx->enckeylen, CLASS_1 | + KEY_DEST_CLASS_REG); + + /* For aead, only propagate error immediately if shared */ + jump_cmd = append_jump(desc, JUMP_TEST_ALL); + set_jump_tgt_here(desc, key_jump_cmd); + append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD); + set_jump_tgt_here(desc, jump_cmd); + + /* load IV */ + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | + LDST_CLASS_1_CCB | tfm->ivsize); + + /* Choose operation */ + append_dec_op1(desc, ctx->class1_alg_type); + + /* Perform operation */ + ablkcipher_append_src_dst(desc); + + /* Wait for key to load before allowing propagating error */ + append_dec_shr_done(desc); + + ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + return ret; +} + struct link_tbl_entry { u64 ptr; u32 len; @@ -379,64 +766,109 @@ struct link_tbl_entry { }; /* - * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor + * aead_edesc - s/w-extended aead descriptor + * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist * @src_nents: number of segments in input scatterlist * @dst_nents: number of segments in output scatterlist - * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist + * @iv_dma: dma address of iv for checking continuity and link table * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) * @link_tbl_bytes: length of dma mapped link_tbl space * @link_tbl_dma: bus physical mapped address of h/w link table * @hw_desc: the h/w job descriptor followed by any referenced link tables */ -struct ipsec_esp_edesc { +struct aead_edesc { int assoc_nents; int src_nents; int dst_nents; + dma_addr_t iv_dma; int link_tbl_bytes; dma_addr_t link_tbl_dma; struct link_tbl_entry *link_tbl; u32 hw_desc[0]; }; -static void ipsec_esp_unmap(struct device *dev, - struct ipsec_esp_edesc *edesc, - struct aead_request *areq) -{ - dma_unmap_sg(dev, areq->assoc, edesc->assoc_nents, DMA_TO_DEVICE); +/* + * ablkcipher_edesc - s/w-extended ablkcipher descriptor + * @src_nents: number of segments in input scatterlist + * @dst_nents: number of segments in output scatterlist + * @iv_dma: dma address of iv for checking continuity and link table + * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) + * @link_tbl_bytes: length of dma mapped link_tbl space + * @link_tbl_dma: bus physical mapped address of h/w link table + * @hw_desc: the h/w job descriptor followed by any referenced link tables + */ +struct ablkcipher_edesc { + int src_nents; + int dst_nents; + dma_addr_t iv_dma; + int link_tbl_bytes; + dma_addr_t link_tbl_dma; + struct link_tbl_entry *link_tbl; + u32 hw_desc[0]; +}; - if (unlikely(areq->dst != areq->src)) { - dma_unmap_sg(dev, areq->src, edesc->src_nents, - DMA_TO_DEVICE); - dma_unmap_sg(dev, areq->dst, edesc->dst_nents, - DMA_FROM_DEVICE); +static void caam_unmap(struct device *dev, struct scatterlist *src, + struct scatterlist *dst, int src_nents, int dst_nents, + dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma, + int link_tbl_bytes) +{ + if (unlikely(dst != src)) { + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); } else { - dma_unmap_sg(dev, areq->src, edesc->src_nents, - DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); } - if (edesc->link_tbl_bytes) - dma_unmap_single(dev, edesc->link_tbl_dma, - edesc->link_tbl_bytes, + if (iv_dma) + dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); + if (link_tbl_bytes) + dma_unmap_single(dev, link_tbl_dma, link_tbl_bytes, DMA_TO_DEVICE); } -/* - * ipsec_esp descriptor callbacks - */ -static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err, +static void aead_unmap(struct device *dev, + struct aead_edesc *edesc, + struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + int ivsize = crypto_aead_ivsize(aead); + + dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE); + + caam_unmap(dev, req->src, req->dst, + edesc->src_nents, edesc->dst_nents, + edesc->iv_dma, ivsize, edesc->link_tbl_dma, + edesc->link_tbl_bytes); +} + +static void ablkcipher_unmap(struct device *dev, + struct ablkcipher_edesc *edesc, + struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + + caam_unmap(dev, req->src, req->dst, + edesc->src_nents, edesc->dst_nents, + edesc->iv_dma, ivsize, edesc->link_tbl_dma, + edesc->link_tbl_bytes); +} + +static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, void *context) { - struct aead_request *areq = context; - struct ipsec_esp_edesc *edesc; + struct aead_request *req = context; + struct aead_edesc *edesc; #ifdef DEBUG - struct crypto_aead *aead = crypto_aead_reqtfm(areq); - int ivsize = crypto_aead_ivsize(aead); + struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx(aead); + int ivsize = crypto_aead_ivsize(aead); dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif - edesc = (struct ipsec_esp_edesc *)((char *)desc - - offsetof(struct ipsec_esp_edesc, hw_desc)); + + edesc = (struct aead_edesc *)((char *)desc - + offsetof(struct aead_edesc, hw_desc)); if (err) { char tmp[CAAM_ERROR_STR_MAX]; @@ -444,39 +876,50 @@ static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err, dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); } - ipsec_esp_unmap(jrdev, edesc, areq); + aead_unmap(jrdev, edesc, req); #ifdef DEBUG print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc), - areq->assoclen , 1); + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), + req->assoclen , 1); print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize, + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize, edesc->src_nents ? 100 : ivsize, 1); print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src), - edesc->src_nents ? 100 : areq->cryptlen + + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), + edesc->src_nents ? 100 : req->cryptlen + ctx->authsize + 4, 1); #endif kfree(edesc); - aead_request_complete(areq, err); + aead_request_complete(req, err); } -static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err, +static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, void *context) { - struct aead_request *areq = context; - struct ipsec_esp_edesc *edesc; + struct aead_request *req = context; + struct aead_edesc *edesc; #ifdef DEBUG - struct crypto_aead *aead = crypto_aead_reqtfm(areq); + struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx(aead); + int ivsize = crypto_aead_ivsize(aead); dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif - edesc = (struct ipsec_esp_edesc *)((char *)desc - - offsetof(struct ipsec_esp_edesc, hw_desc)); + + edesc = (struct aead_edesc *)((char *)desc - + offsetof(struct aead_edesc, hw_desc)); + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, + ivsize, 1); + print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), + req->cryptlen, 1); +#endif if (err) { char tmp[CAAM_ERROR_STR_MAX]; @@ -484,7 +927,7 @@ static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err, dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); } - ipsec_esp_unmap(jrdev, edesc, areq); + aead_unmap(jrdev, edesc, req); /* * verify hw auth check passed else return -EBADMSG @@ -495,255 +938,413 @@ static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err, #ifdef DEBUG print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, - ((char *)sg_virt(areq->assoc) - sizeof(struct iphdr)), - sizeof(struct iphdr) + areq->assoclen + - ((areq->cryptlen > 1500) ? 1500 : areq->cryptlen) + + ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)), + sizeof(struct iphdr) + req->assoclen + + ((req->cryptlen > 1500) ? 1500 : req->cryptlen) + ctx->authsize + 36, 1); if (!err && edesc->link_tbl_bytes) { - struct scatterlist *sg = sg_last(areq->src, edesc->src_nents); + struct scatterlist *sg = sg_last(req->src, edesc->src_nents); print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), sg->length + ctx->authsize + 16, 1); } #endif + kfree(edesc); - aead_request_complete(areq, err); + aead_request_complete(req, err); +} + +static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + struct ablkcipher_request *req = context; + struct ablkcipher_edesc *edesc; +#ifdef DEBUG + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); +#endif + + edesc = (struct ablkcipher_edesc *)((char *)desc - + offsetof(struct ablkcipher_edesc, hw_desc)); + + if (err) { + char tmp[CAAM_ERROR_STR_MAX]; + + dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); + } + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->info, + edesc->src_nents > 1 ? 100 : ivsize, 1); + print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), + edesc->dst_nents > 1 ? 100 : req->nbytes, 1); +#endif + + ablkcipher_unmap(jrdev, edesc, req); + kfree(edesc); + + ablkcipher_request_complete(req, err); +} + +static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + struct ablkcipher_request *req = context; + struct ablkcipher_edesc *edesc; +#ifdef DEBUG + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); +#endif + + edesc = (struct ablkcipher_edesc *)((char *)desc - + offsetof(struct ablkcipher_edesc, hw_desc)); + if (err) { + char tmp[CAAM_ERROR_STR_MAX]; + + dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); + } + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->info, + ivsize, 1); + print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), + edesc->dst_nents > 1 ? 100 : req->nbytes, 1); +#endif + + ablkcipher_unmap(jrdev, edesc, req); + kfree(edesc); + + ablkcipher_request_complete(req, err); +} + +static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr, + dma_addr_t dma, u32 len, u32 offset) +{ + link_tbl_ptr->ptr = dma; + link_tbl_ptr->len = len; + link_tbl_ptr->reserved = 0; + link_tbl_ptr->buf_pool_id = 0; + link_tbl_ptr->offset = offset; +#ifdef DEBUG + print_hex_dump(KERN_ERR, "link_tbl_ptr@"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, link_tbl_ptr, + sizeof(struct link_tbl_entry), 1); +#endif } /* * convert scatterlist to h/w link table format - * scatterlist must have been previously dma mapped + * but does not have final bit; instead, returns last entry */ -static void sg_to_link_tbl(struct scatterlist *sg, int sg_count, - struct link_tbl_entry *link_tbl_ptr, u32 offset) +static struct link_tbl_entry *sg_to_link_tbl(struct scatterlist *sg, + int sg_count, struct link_tbl_entry + *link_tbl_ptr, u32 offset) { while (sg_count) { - link_tbl_ptr->ptr = sg_dma_address(sg); - link_tbl_ptr->len = sg_dma_len(sg); - link_tbl_ptr->reserved = 0; - link_tbl_ptr->buf_pool_id = 0; - link_tbl_ptr->offset = offset; + sg_to_link_tbl_one(link_tbl_ptr, sg_dma_address(sg), + sg_dma_len(sg), offset); link_tbl_ptr++; sg = sg_next(sg); sg_count--; } + return link_tbl_ptr - 1; +} - /* set Final bit (marks end of link table) */ - link_tbl_ptr--; +/* + * convert scatterlist to h/w link table format + * scatterlist must have been previously dma mapped + */ +static void sg_to_link_tbl_last(struct scatterlist *sg, int sg_count, + struct link_tbl_entry *link_tbl_ptr, u32 offset) +{ + link_tbl_ptr = sg_to_link_tbl(sg, sg_count, link_tbl_ptr, offset); link_tbl_ptr->len |= 0x40000000; } /* - * fill in and submit ipsec_esp job descriptor + * Fill in aead job descriptor */ -static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, - u32 encrypt, - void (*callback) (struct device *dev, u32 *desc, - u32 err, void *context)) +static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, + struct aead_edesc *edesc, + struct aead_request *req, + bool all_contig, bool encrypt) { - struct crypto_aead *aead = crypto_aead_reqtfm(areq); + struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx(aead); - struct device *jrdev = ctx->jrdev; - u32 *desc = edesc->hw_desc, options; - int ret, sg_count, assoc_sg_count; int ivsize = crypto_aead_ivsize(aead); int authsize = ctx->authsize; - dma_addr_t ptr, dst_dma, src_dma; -#ifdef DEBUG - u32 *sh_desc = ctx->sh_desc; + u32 *desc = edesc->hw_desc; + u32 out_options = 0, in_options; + dma_addr_t dst_dma, src_dma; + int len, link_tbl_index = 0; +#ifdef DEBUG debug("assoclen %d cryptlen %d authsize %d\n", - areq->assoclen, areq->cryptlen, authsize); + req->assoclen, req->cryptlen, authsize); print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc), - areq->assoclen , 1); + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), + req->assoclen , 1); print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize, + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, edesc->src_nents ? 100 : ivsize, 1); print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src), - edesc->src_nents ? 100 : areq->cryptlen + authsize, 1); + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), + edesc->src_nents ? 100 : req->cryptlen, 1); print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, desc_bytes(sh_desc), 1); #endif - assoc_sg_count = dma_map_sg(jrdev, areq->assoc, edesc->assoc_nents ?: 1, - DMA_TO_DEVICE); - if (areq->src == areq->dst) - sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1, - DMA_BIDIRECTIONAL); - else - sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1, - DMA_TO_DEVICE); - /* start auth operation */ - append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL | - (encrypt ? : OP_ALG_ICV_ON)); + len = desc_len(sh_desc); + init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); - /* Load FIFO with data for Class 2 CHA */ - options = FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG; - if (!edesc->assoc_nents) { - ptr = sg_dma_address(areq->assoc); + if (all_contig) { + src_dma = sg_dma_address(req->assoc); + in_options = 0; } else { - sg_to_link_tbl(areq->assoc, edesc->assoc_nents, - edesc->link_tbl, 0); - ptr = edesc->link_tbl_dma; - options |= LDST_SGF; + src_dma = edesc->link_tbl_dma; + link_tbl_index += (edesc->assoc_nents ? : 1) + 1 + + (edesc->src_nents ? : 1); + in_options = LDST_SGF; } - append_fifo_load(desc, ptr, areq->assoclen, options); - - /* copy iv from cipher/class1 input context to class2 infifo */ - append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize); - - if (!encrypt) { - u32 *jump_cmd, *uncond_jump_cmd; - - /* JUMP if shared */ - jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); + if (encrypt) + append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + + req->cryptlen - authsize, in_options); + else + append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + + req->cryptlen, in_options); - /* start class 1 (cipher) operation, non-shared version */ - append_operation(desc, ctx->class1_alg_type | - OP_ALG_AS_INITFINAL); + if (likely(req->src == req->dst)) { + if (all_contig) { + dst_dma = sg_dma_address(req->src); + } else { + dst_dma = src_dma + sizeof(struct link_tbl_entry) * + ((edesc->assoc_nents ? : 1) + 1); + out_options = LDST_SGF; + } + } else { + if (!edesc->dst_nents) { + dst_dma = sg_dma_address(req->dst); + } else { + dst_dma = edesc->link_tbl_dma + + link_tbl_index * + sizeof(struct link_tbl_entry); + out_options = LDST_SGF; + } + } + if (encrypt) + append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); + else + append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, + out_options); +} - uncond_jump_cmd = append_jump(desc, 0); +/* + * Fill in aead givencrypt job descriptor + */ +static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, + struct aead_edesc *edesc, + struct aead_request *req, + int contig) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + int ivsize = crypto_aead_ivsize(aead); + int authsize = ctx->authsize; + u32 *desc = edesc->hw_desc; + u32 out_options = 0, in_options; + dma_addr_t dst_dma, src_dma; + int len, link_tbl_index = 0; - set_jump_tgt_here(desc, jump_cmd); +#ifdef DEBUG + debug("assoclen %d cryptlen %d authsize %d\n", + req->assoclen, req->cryptlen, authsize); + print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), + req->assoclen , 1); + print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); + print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), + edesc->src_nents > 1 ? 100 : req->cryptlen, 1); + print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, + desc_bytes(sh_desc), 1); +#endif - /* start class 1 (cipher) operation, shared version */ - append_operation(desc, ctx->class1_alg_type | - OP_ALG_AS_INITFINAL | OP_ALG_AAI_DK); - set_jump_tgt_here(desc, uncond_jump_cmd); - } else - append_operation(desc, ctx->class1_alg_type | - OP_ALG_AS_INITFINAL | encrypt); + len = desc_len(sh_desc); + init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); - /* load payload & instruct to class2 to snoop class 1 if encrypting */ - options = 0; - if (!edesc->src_nents) { - src_dma = sg_dma_address(areq->src); + if (contig & GIV_SRC_CONTIG) { + src_dma = sg_dma_address(req->assoc); + in_options = 0; } else { - sg_to_link_tbl(areq->src, edesc->src_nents, edesc->link_tbl + - edesc->assoc_nents, 0); - src_dma = edesc->link_tbl_dma + edesc->assoc_nents * - sizeof(struct link_tbl_entry); - options |= LDST_SGF; + src_dma = edesc->link_tbl_dma; + link_tbl_index += edesc->assoc_nents + 1 + edesc->src_nents; + in_options = LDST_SGF; } - append_seq_in_ptr(desc, src_dma, areq->cryptlen + authsize, options); - append_seq_fifo_load(desc, areq->cryptlen, FIFOLD_CLASS_BOTH | - FIFOLD_TYPE_LASTBOTH | - (encrypt ? FIFOLD_TYPE_MSG1OUT2 - : FIFOLD_TYPE_MSG)); - - /* specify destination */ - if (areq->src == areq->dst) { - dst_dma = src_dma; + append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + + req->cryptlen - authsize, in_options); + + if (contig & GIV_DST_CONTIG) { + dst_dma = edesc->iv_dma; } else { - sg_count = dma_map_sg(jrdev, areq->dst, edesc->dst_nents ? : 1, - DMA_FROM_DEVICE); - if (!edesc->dst_nents) { - dst_dma = sg_dma_address(areq->dst); - options = 0; + if (likely(req->src == req->dst)) { + dst_dma = src_dma + sizeof(struct link_tbl_entry) * + edesc->assoc_nents; + out_options = LDST_SGF; } else { - sg_to_link_tbl(areq->dst, edesc->dst_nents, - edesc->link_tbl + edesc->assoc_nents + - edesc->src_nents, 0); - dst_dma = edesc->link_tbl_dma + (edesc->assoc_nents + - edesc->src_nents) * + dst_dma = edesc->link_tbl_dma + + link_tbl_index * sizeof(struct link_tbl_entry); - options = LDST_SGF; + out_options = LDST_SGF; } } - append_seq_out_ptr(desc, dst_dma, areq->cryptlen + authsize, options); - append_seq_fifo_store(desc, areq->cryptlen, FIFOST_TYPE_MESSAGE_DATA); - /* ICV */ - if (encrypt) - append_seq_store(desc, authsize, LDST_CLASS_2_CCB | - LDST_SRCDST_BYTE_CONTEXT); - else - append_seq_fifo_load(desc, authsize, FIFOLD_CLASS_CLASS2 | - FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); + append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options); +} + +/* + * Fill in ablkcipher job descriptor + */ +static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, + struct ablkcipher_edesc *edesc, + struct ablkcipher_request *req, + bool iv_contig) +{ + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + u32 *desc = edesc->hw_desc; + u32 out_options = 0, in_options; + dma_addr_t dst_dma, src_dma; + int len, link_tbl_index = 0; #ifdef DEBUG - debug("job_desc_len %d\n", desc_len(desc)); - print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc) , 1); - print_hex_dump(KERN_ERR, "jdlinkt@"xstr(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl, - edesc->link_tbl_bytes, 1); + print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->info, + ivsize, 1); + print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), + edesc->src_nents ? 100 : req->nbytes, 1); #endif - ret = caam_jr_enqueue(jrdev, desc, callback, areq); - if (!ret) - ret = -EINPROGRESS; - else { - ipsec_esp_unmap(jrdev, edesc, areq); - kfree(edesc); + len = desc_len(sh_desc); + init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); + + if (iv_contig) { + src_dma = edesc->iv_dma; + in_options = 0; + } else { + src_dma = edesc->link_tbl_dma; + link_tbl_index += (iv_contig ? 0 : 1) + edesc->src_nents; + in_options = LDST_SGF; } + append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); - return ret; + if (likely(req->src == req->dst)) { + if (!edesc->src_nents && iv_contig) { + dst_dma = sg_dma_address(req->src); + } else { + dst_dma = edesc->link_tbl_dma + + sizeof(struct link_tbl_entry); + out_options = LDST_SGF; + } + } else { + if (!edesc->dst_nents) { + dst_dma = sg_dma_address(req->dst); + } else { + dst_dma = edesc->link_tbl_dma + + link_tbl_index * sizeof(struct link_tbl_entry); + out_options = LDST_SGF; + } + } + append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options); } /* * derive number of elements in scatterlist */ -static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained) +static int sg_count(struct scatterlist *sg_list, int nbytes) { struct scatterlist *sg = sg_list; int sg_nents = 0; - *chained = 0; while (nbytes > 0) { sg_nents++; nbytes -= sg->length; if (!sg_is_last(sg) && (sg + 1)->length == 0) - *chained = 1; + BUG(); /* Not support chaining */ sg = scatterwalk_sg_next(sg); } + if (likely(sg_nents == 1)) + return 0; + return sg_nents; } /* - * allocate and map the ipsec_esp extended descriptor + * allocate and map the aead extended descriptor */ -static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, - int desc_bytes) +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, + int desc_bytes, bool *all_contig_ptr) { - struct crypto_aead *aead = crypto_aead_reqtfm(areq); + struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; - gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : - GFP_ATOMIC; - int assoc_nents, src_nents, dst_nents = 0, chained, link_tbl_bytes; - struct ipsec_esp_edesc *edesc; - - assoc_nents = sg_count(areq->assoc, areq->assoclen, &chained); - BUG_ON(chained); - if (likely(assoc_nents == 1)) - assoc_nents = 0; - - src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize, - &chained); - BUG_ON(chained); - if (src_nents == 1) - src_nents = 0; - - if (unlikely(areq->dst != areq->src)) { - dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize, - &chained); - BUG_ON(chained); - if (dst_nents == 1) - dst_nents = 0; + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; + int assoc_nents, src_nents, dst_nents = 0; + struct aead_edesc *edesc; + dma_addr_t iv_dma = 0; + int sgc; + bool all_contig = true; + int ivsize = crypto_aead_ivsize(aead); + int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; + + assoc_nents = sg_count(req->assoc, req->assoclen); + src_nents = sg_count(req->src, req->cryptlen); + + if (unlikely(req->dst != req->src)) + dst_nents = sg_count(req->dst, req->cryptlen); + + sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, + DMA_BIDIRECTIONAL); + if (likely(req->src == req->dst)) { + sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, + DMA_BIDIRECTIONAL); + } else { + sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, + DMA_TO_DEVICE); + sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, + DMA_FROM_DEVICE); } - link_tbl_bytes = (assoc_nents + src_nents + dst_nents) * - sizeof(struct link_tbl_entry); - debug("link_tbl_bytes %d\n", link_tbl_bytes); + /* Check if data are contiguous */ + iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); + if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != + iv_dma || src_nents || iv_dma + ivsize != + sg_dma_address(req->src)) { + all_contig = false; + assoc_nents = assoc_nents ? : 1; + src_nents = src_nents ? : 1; + link_tbl_len = assoc_nents + 1 + src_nents; + } + link_tbl_len += dst_nents; + + link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(struct ipsec_esp_edesc) + desc_bytes + + edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + link_tbl_bytes, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); @@ -753,142 +1354,450 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, edesc->assoc_nents = assoc_nents; edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; - edesc->link_tbl = (void *)edesc + sizeof(struct ipsec_esp_edesc) + + edesc->iv_dma = iv_dma; + edesc->link_tbl_bytes = link_tbl_bytes; + edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + desc_bytes; edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, link_tbl_bytes, DMA_TO_DEVICE); - edesc->link_tbl_bytes = link_tbl_bytes; + *all_contig_ptr = all_contig; + + link_tbl_index = 0; + if (!all_contig) { + sg_to_link_tbl(req->assoc, + (assoc_nents ? : 1), + edesc->link_tbl + + link_tbl_index, 0); + link_tbl_index += assoc_nents ? : 1; + sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, + iv_dma, ivsize, 0); + link_tbl_index += 1; + sg_to_link_tbl_last(req->src, + (src_nents ? : 1), + edesc->link_tbl + + link_tbl_index, 0); + link_tbl_index += src_nents ? : 1; + } + if (dst_nents) { + sg_to_link_tbl_last(req->dst, dst_nents, + edesc->link_tbl + link_tbl_index, 0); + } return edesc; } -static int aead_authenc_encrypt(struct aead_request *areq) +static int aead_encrypt(struct aead_request *req) { - struct ipsec_esp_edesc *edesc; - struct crypto_aead *aead = crypto_aead_reqtfm(areq); + struct aead_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; - int ivsize = crypto_aead_ivsize(aead); + bool all_contig; u32 *desc; - dma_addr_t iv_dma; + int ret = 0; + + req->cryptlen += ctx->authsize; /* allocate extended descriptor */ - edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_ENCRYPT_TEXT_LEN * - CAAM_CMD_SZ); + edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * + CAAM_CMD_SZ, &all_contig); if (IS_ERR(edesc)) return PTR_ERR(edesc); - desc = edesc->hw_desc; - - /* insert shared descriptor pointer */ - init_job_desc_shared(desc, ctx->shared_desc_phys, - desc_len(ctx->sh_desc), HDR_SHARE_DEFER); - - iv_dma = dma_map_single(jrdev, areq->iv, ivsize, DMA_TO_DEVICE); - /* check dma error */ + /* Create and submit job descriptor */ + init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, + all_contig, true); +#ifdef DEBUG + print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); +#endif - append_load(desc, iv_dma, ivsize, - LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); + desc = edesc->hw_desc; + ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + aead_unmap(jrdev, edesc, req); + kfree(edesc); + } - return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done); + return ret; } -static int aead_authenc_decrypt(struct aead_request *req) +static int aead_decrypt(struct aead_request *req) { + struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); - int ivsize = crypto_aead_ivsize(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; - struct ipsec_esp_edesc *edesc; + bool all_contig; u32 *desc; - dma_addr_t iv_dma; - - req->cryptlen -= ctx->authsize; + int ret = 0; /* allocate extended descriptor */ - edesc = ipsec_esp_edesc_alloc(req, DESC_AEAD_DECRYPT_TEXT_LEN * - CAAM_CMD_SZ); + edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * + CAAM_CMD_SZ, &all_contig); if (IS_ERR(edesc)) return PTR_ERR(edesc); +#ifdef DEBUG + print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), + req->cryptlen, 1); +#endif + + /* Create and submit job descriptor*/ + init_aead_job(ctx->sh_desc_dec, + ctx->sh_desc_dec_dma, edesc, req, all_contig, false); +#ifdef DEBUG + print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); +#endif + desc = edesc->hw_desc; + ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + aead_unmap(jrdev, edesc, req); + kfree(edesc); + } - /* insert shared descriptor pointer */ - init_job_desc_shared(desc, ctx->shared_desc_phys, - desc_len(ctx->sh_desc), HDR_SHARE_DEFER); + return ret; +} - iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); - /* check dma error */ +/* + * allocate and map the aead extended descriptor for aead givencrypt + */ +static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request + *greq, int desc_bytes, + u32 *contig_ptr) +{ + struct aead_request *req = &greq->areq; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; + int assoc_nents, src_nents, dst_nents = 0; + struct aead_edesc *edesc; + dma_addr_t iv_dma = 0; + int sgc; + u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG; + int ivsize = crypto_aead_ivsize(aead); + int link_tbl_index, link_tbl_len = 0, link_tbl_bytes; + + assoc_nents = sg_count(req->assoc, req->assoclen); + src_nents = sg_count(req->src, req->cryptlen); - append_load(desc, iv_dma, ivsize, - LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); + if (unlikely(req->dst != req->src)) + dst_nents = sg_count(req->dst, req->cryptlen); - return ipsec_esp(edesc, req, !OP_ALG_ENCRYPT, ipsec_esp_decrypt_done); + sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, + DMA_BIDIRECTIONAL); + if (likely(req->src == req->dst)) { + sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, + DMA_BIDIRECTIONAL); + } else { + sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, + DMA_TO_DEVICE); + sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, + DMA_FROM_DEVICE); + } + + /* Check if data are contiguous */ + iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); + if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != + iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src)) + contig &= ~GIV_SRC_CONTIG; + if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst)) + contig &= ~GIV_DST_CONTIG; + if (unlikely(req->src != req->dst)) { + dst_nents = dst_nents ? : 1; + link_tbl_len += 1; + } + if (!(contig & GIV_SRC_CONTIG)) { + assoc_nents = assoc_nents ? : 1; + src_nents = src_nents ? : 1; + link_tbl_len += assoc_nents + 1 + src_nents; + if (likely(req->src == req->dst)) + contig &= ~GIV_DST_CONTIG; + } + link_tbl_len += dst_nents; + + link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry); + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + + link_tbl_bytes, GFP_DMA | flags); + if (!edesc) { + dev_err(jrdev, "could not allocate extended descriptor\n"); + return ERR_PTR(-ENOMEM); + } + + edesc->assoc_nents = assoc_nents; + edesc->src_nents = src_nents; + edesc->dst_nents = dst_nents; + edesc->iv_dma = iv_dma; + edesc->link_tbl_bytes = link_tbl_bytes; + edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) + + desc_bytes; + edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, + link_tbl_bytes, DMA_TO_DEVICE); + *contig_ptr = contig; + + link_tbl_index = 0; + if (!(contig & GIV_SRC_CONTIG)) { + sg_to_link_tbl(req->assoc, assoc_nents, + edesc->link_tbl + + link_tbl_index, 0); + link_tbl_index += assoc_nents; + sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, + iv_dma, ivsize, 0); + link_tbl_index += 1; + sg_to_link_tbl_last(req->src, src_nents, + edesc->link_tbl + + link_tbl_index, 0); + link_tbl_index += src_nents; + } + if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) { + sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index, + iv_dma, ivsize, 0); + link_tbl_index += 1; + sg_to_link_tbl_last(req->dst, dst_nents, + edesc->link_tbl + link_tbl_index, 0); + } + + return edesc; } -static int aead_authenc_givencrypt(struct aead_givcrypt_request *req) +static int aead_givencrypt(struct aead_givcrypt_request *areq) { - struct aead_request *areq = &req->areq; - struct ipsec_esp_edesc *edesc; - struct crypto_aead *aead = crypto_aead_reqtfm(areq); + struct aead_request *req = &areq->areq; + struct aead_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; - int ivsize = crypto_aead_ivsize(aead); - dma_addr_t iv_dma; + u32 contig; u32 *desc; + int ret = 0; - iv_dma = dma_map_single(jrdev, req->giv, ivsize, DMA_FROM_DEVICE); - - debug("%s: giv %p\n", __func__, req->giv); + req->cryptlen += ctx->authsize; /* allocate extended descriptor */ - edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_GIVENCRYPT_TEXT_LEN * - CAAM_CMD_SZ); + edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * + CAAM_CMD_SZ, &contig); + if (IS_ERR(edesc)) return PTR_ERR(edesc); +#ifdef DEBUG + print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), + req->cryptlen, 1); +#endif + + /* Create and submit job descriptor*/ + init_aead_giv_job(ctx->sh_desc_givenc, + ctx->sh_desc_givenc_dma, edesc, req, contig); +#ifdef DEBUG + print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); +#endif + desc = edesc->hw_desc; + ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + aead_unmap(jrdev, edesc, req); + kfree(edesc); + } - /* insert shared descriptor pointer */ - init_job_desc_shared(desc, ctx->shared_desc_phys, - desc_len(ctx->sh_desc), HDR_SHARE_DEFER); + return ret; +} - /* - * LOAD IMM Info FIFO - * to DECO, Last, Padding, Random, Message, 16 bytes - */ - append_load_imm_u32(desc, NFIFOENTRY_DEST_DECO | NFIFOENTRY_LC1 | - NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | - NFIFOENTRY_PTYPE_RND | ivsize, - LDST_SRCDST_WORD_INFO_FIFO); +/* + * allocate and map the ablkcipher extended descriptor for ablkcipher + */ +static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request + *req, int desc_bytes, + bool *iv_contig_out) +{ + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + struct device *jrdev = ctx->jrdev; + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP)) ? + GFP_KERNEL : GFP_ATOMIC; + int src_nents, dst_nents = 0, link_tbl_bytes; + struct ablkcipher_edesc *edesc; + dma_addr_t iv_dma = 0; + bool iv_contig = false; + int sgc; + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + int link_tbl_index; + + src_nents = sg_count(req->src, req->nbytes); + + if (unlikely(req->dst != req->src)) + dst_nents = sg_count(req->dst, req->nbytes); + + if (likely(req->src == req->dst)) { + sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, + DMA_BIDIRECTIONAL); + } else { + sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, + DMA_TO_DEVICE); + sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, + DMA_FROM_DEVICE); + } /* - * disable info fifo entries since the above serves as the entry - * this way, the MOVE command won't generate an entry. - * Note that this isn't required in more recent versions of - * SEC as a MOVE that doesn't do info FIFO entries is available. + * Check if iv can be contiguous with source and destination. + * If so, include it. If not, create scatterlist. */ - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); + iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); + if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src)) + iv_contig = true; + else + src_nents = src_nents ? : 1; + link_tbl_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * + sizeof(struct link_tbl_entry); - /* MOVE DECO Alignment -> C1 Context 16 bytes */ - append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | ivsize); + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes + + link_tbl_bytes, GFP_DMA | flags); + if (!edesc) { + dev_err(jrdev, "could not allocate extended descriptor\n"); + return ERR_PTR(-ENOMEM); + } - /* re-enable info fifo entries */ - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); + edesc->src_nents = src_nents; + edesc->dst_nents = dst_nents; + edesc->link_tbl_bytes = link_tbl_bytes; + edesc->link_tbl = (void *)edesc + sizeof(struct ablkcipher_edesc) + + desc_bytes; + + link_tbl_index = 0; + if (!iv_contig) { + sg_to_link_tbl_one(edesc->link_tbl, iv_dma, ivsize, 0); + sg_to_link_tbl_last(req->src, src_nents, + edesc->link_tbl + 1, 0); + link_tbl_index += 1 + src_nents; + } + + if (unlikely(dst_nents)) { + sg_to_link_tbl_last(req->dst, dst_nents, + edesc->link_tbl + link_tbl_index, 0); + } + + edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, + link_tbl_bytes, DMA_TO_DEVICE); + edesc->iv_dma = iv_dma; + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ablkcipher link_tbl@"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl, + link_tbl_bytes, 1); +#endif + + *iv_contig_out = iv_contig; + return edesc; +} + +static int ablkcipher_encrypt(struct ablkcipher_request *req) +{ + struct ablkcipher_edesc *edesc; + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + struct device *jrdev = ctx->jrdev; + bool iv_contig; + u32 *desc; + int ret = 0; + + /* allocate extended descriptor */ + edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * + CAAM_CMD_SZ, &iv_contig); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); - /* MOVE C1 Context -> OFIFO 16 bytes */ - append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | ivsize); + /* Create and submit job descriptor*/ + init_ablkcipher_job(ctx->sh_desc_enc, + ctx->sh_desc_enc_dma, edesc, req, iv_contig); +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); +#endif + desc = edesc->hw_desc; + ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); - append_fifo_store(desc, iv_dma, ivsize, FIFOST_TYPE_MESSAGE_DATA); + if (!ret) { + ret = -EINPROGRESS; + } else { + ablkcipher_unmap(jrdev, edesc, req); + kfree(edesc); + } - return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done); + return ret; } +static int ablkcipher_decrypt(struct ablkcipher_request *req) +{ + struct ablkcipher_edesc *edesc; + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + struct device *jrdev = ctx->jrdev; + bool iv_contig; + u32 *desc; + int ret = 0; + + /* allocate extended descriptor */ + edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * + CAAM_CMD_SZ, &iv_contig); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* Create and submit job descriptor*/ + init_ablkcipher_job(ctx->sh_desc_dec, + ctx->sh_desc_dec_dma, edesc, req, iv_contig); + desc = edesc->hw_desc; +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); +#endif + + ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + ablkcipher_unmap(jrdev, edesc, req); + kfree(edesc); + } + + return ret; +} + +#define template_aead template_u.aead +#define template_ablkcipher template_u.ablkcipher struct caam_alg_template { char name[CRYPTO_MAX_ALG_NAME]; char driver_name[CRYPTO_MAX_ALG_NAME]; unsigned int blocksize; - struct aead_alg aead; + u32 type; + union { + struct ablkcipher_alg ablkcipher; + struct aead_alg aead; + struct blkcipher_alg blkcipher; + struct cipher_alg cipher; + struct compress_alg compress; + struct rng_alg rng; + } template_u; u32 class1_alg_type; u32 class2_alg_type; u32 alg_op; @@ -900,12 +1809,13 @@ static struct caam_alg_template driver_algs[] = { .name = "authenc(hmac(sha1),cbc(aes))", .driver_name = "authenc-hmac-sha1-cbc-aes-caam", .blocksize = AES_BLOCK_SIZE, - .aead = { - .setkey = aead_authenc_setkey, - .setauthsize = aead_authenc_setauthsize, - .encrypt = aead_authenc_encrypt, - .decrypt = aead_authenc_decrypt, - .givencrypt = aead_authenc_givencrypt, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, .geniv = "<built-in>", .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -918,12 +1828,13 @@ static struct caam_alg_template driver_algs[] = { .name = "authenc(hmac(sha256),cbc(aes))", .driver_name = "authenc-hmac-sha256-cbc-aes-caam", .blocksize = AES_BLOCK_SIZE, - .aead = { - .setkey = aead_authenc_setkey, - .setauthsize = aead_authenc_setauthsize, - .encrypt = aead_authenc_encrypt, - .decrypt = aead_authenc_decrypt, - .givencrypt = aead_authenc_givencrypt, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, .geniv = "<built-in>", .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -937,12 +1848,13 @@ static struct caam_alg_template driver_algs[] = { .name = "authenc(hmac(sha512),cbc(aes))", .driver_name = "authenc-hmac-sha512-cbc-aes-caam", .blocksize = AES_BLOCK_SIZE, - .aead = { - .setkey = aead_authenc_setkey, - .setauthsize = aead_authenc_setauthsize, - .encrypt = aead_authenc_encrypt, - .decrypt = aead_authenc_decrypt, - .givencrypt = aead_authenc_givencrypt, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, .geniv = "<built-in>", .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, @@ -956,12 +1868,13 @@ static struct caam_alg_template driver_algs[] = { .name = "authenc(hmac(sha1),cbc(des3_ede))", .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam", .blocksize = DES3_EDE_BLOCK_SIZE, - .aead = { - .setkey = aead_authenc_setkey, - .setauthsize = aead_authenc_setauthsize, - .encrypt = aead_authenc_encrypt, - .decrypt = aead_authenc_decrypt, - .givencrypt = aead_authenc_givencrypt, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, .geniv = "<built-in>", .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -974,12 +1887,13 @@ static struct caam_alg_template driver_algs[] = { .name = "authenc(hmac(sha256),cbc(des3_ede))", .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam", .blocksize = DES3_EDE_BLOCK_SIZE, - .aead = { - .setkey = aead_authenc_setkey, - .setauthsize = aead_authenc_setauthsize, - .encrypt = aead_authenc_encrypt, - .decrypt = aead_authenc_decrypt, - .givencrypt = aead_authenc_givencrypt, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, .geniv = "<built-in>", .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -993,12 +1907,13 @@ static struct caam_alg_template driver_algs[] = { .name = "authenc(hmac(sha512),cbc(des3_ede))", .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam", .blocksize = DES3_EDE_BLOCK_SIZE, - .aead = { - .setkey = aead_authenc_setkey, - .setauthsize = aead_authenc_setauthsize, - .encrypt = aead_authenc_encrypt, - .decrypt = aead_authenc_decrypt, - .givencrypt = aead_authenc_givencrypt, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, .geniv = "<built-in>", .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, @@ -1012,12 +1927,13 @@ static struct caam_alg_template driver_algs[] = { .name = "authenc(hmac(sha1),cbc(des))", .driver_name = "authenc-hmac-sha1-cbc-des-caam", .blocksize = DES_BLOCK_SIZE, - .aead = { - .setkey = aead_authenc_setkey, - .setauthsize = aead_authenc_setauthsize, - .encrypt = aead_authenc_encrypt, - .decrypt = aead_authenc_decrypt, - .givencrypt = aead_authenc_givencrypt, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, .geniv = "<built-in>", .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -1030,12 +1946,13 @@ static struct caam_alg_template driver_algs[] = { .name = "authenc(hmac(sha256),cbc(des))", .driver_name = "authenc-hmac-sha256-cbc-des-caam", .blocksize = DES_BLOCK_SIZE, - .aead = { - .setkey = aead_authenc_setkey, - .setauthsize = aead_authenc_setauthsize, - .encrypt = aead_authenc_encrypt, - .decrypt = aead_authenc_decrypt, - .givencrypt = aead_authenc_givencrypt, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, .geniv = "<built-in>", .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -1049,12 +1966,13 @@ static struct caam_alg_template driver_algs[] = { .name = "authenc(hmac(sha512),cbc(des))", .driver_name = "authenc-hmac-sha512-cbc-des-caam", .blocksize = DES_BLOCK_SIZE, - .aead = { - .setkey = aead_authenc_setkey, - .setauthsize = aead_authenc_setauthsize, - .encrypt = aead_authenc_encrypt, - .decrypt = aead_authenc_decrypt, - .givencrypt = aead_authenc_givencrypt, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_givencrypt, .geniv = "<built-in>", .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, @@ -1064,6 +1982,55 @@ static struct caam_alg_template driver_algs[] = { OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, }, + /* ablkcipher descriptor */ + { + .name = "cbc(aes)", + .driver_name = "cbc-aes-caam", + .blocksize = AES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .template_ablkcipher = { + .setkey = ablkcipher_setkey, + .encrypt = ablkcipher_encrypt, + .decrypt = ablkcipher_decrypt, + .geniv = "eseqiv", + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + }, + { + .name = "cbc(des3_ede)", + .driver_name = "cbc-3des-caam", + .blocksize = DES3_EDE_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .template_ablkcipher = { + .setkey = ablkcipher_setkey, + .encrypt = ablkcipher_encrypt, + .decrypt = ablkcipher_decrypt, + .geniv = "eseqiv", + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + }, + { + .name = "cbc(des)", + .driver_name = "cbc-des-caam", + .blocksize = DES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .template_ablkcipher = { + .setkey = ablkcipher_setkey, + .encrypt = ablkcipher_encrypt, + .decrypt = ablkcipher_decrypt, + .geniv = "eseqiv", + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + } }; struct caam_crypto_alg { @@ -1102,16 +2069,19 @@ static void caam_cra_exit(struct crypto_tfm *tfm) { struct caam_ctx *ctx = crypto_tfm_ctx(tfm); - if (!dma_mapping_error(ctx->jrdev, ctx->shared_desc_phys)) - dma_unmap_single(ctx->jrdev, ctx->shared_desc_phys, - desc_bytes(ctx->sh_desc), DMA_TO_DEVICE); - kfree(ctx->sh_desc); - - if (!dma_mapping_error(ctx->jrdev, ctx->key_phys)) - dma_unmap_single(ctx->jrdev, ctx->key_phys, - ctx->split_key_pad_len + ctx->enckeylen, + if (ctx->sh_desc_enc_dma && + !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma)) + dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma, + desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE); + if (ctx->sh_desc_dec_dma && + !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma)) + dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma, + desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE); + if (ctx->sh_desc_givenc_dma && + !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma)) + dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, + desc_bytes(ctx->sh_desc_givenc), DMA_TO_DEVICE); - kfree(ctx->key); } static void __exit caam_algapi_exit(void) @@ -1175,12 +2145,20 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, alg->cra_init = caam_cra_init; alg->cra_exit = caam_cra_exit; alg->cra_priority = CAAM_CRA_PRIORITY; - alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; alg->cra_blocksize = template->blocksize; alg->cra_alignmask = 0; - alg->cra_type = &crypto_aead_type; alg->cra_ctxsize = sizeof(struct caam_ctx); - alg->cra_u.aead = template->aead; + alg->cra_flags = CRYPTO_ALG_ASYNC | template->type; + switch (template->type) { + case CRYPTO_ALG_TYPE_ABLKCIPHER: + alg->cra_type = &crypto_ablkcipher_type; + alg->cra_ablkcipher = template->template_ablkcipher; + break; + case CRYPTO_ALG_TYPE_AEAD: + alg->cra_type = &crypto_aead_type; + alg->cra_aead = template->template_aead; + break; + } t_alg->class1_alg_type = template->class1_alg_type; t_alg->class2_alg_type = template->class2_alg_type; diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index 950450346f70..d38f2afaa966 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h @@ -31,5 +31,6 @@ #include <crypto/aead.h> #include <crypto/authenc.h> #include <crypto/scatterwalk.h> +#include <crypto/internal/skcipher.h> #endif /* !defined(CAAM_COMPAT_H) */ diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 9009713a3c2e..fc2d9ed22470 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -52,9 +52,11 @@ static int caam_probe(struct platform_device *pdev) struct caam_ctrl __iomem *ctrl; struct caam_full __iomem *topregs; struct caam_drv_private *ctrlpriv; - struct caam_perfmon *perfmon; struct caam_deco **deco; u32 deconum; +#ifdef CONFIG_DEBUG_FS + struct caam_perfmon *perfmon; +#endif ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL); if (!ctrlpriv) diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index 46915800c26f..0991323cf3fd 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h @@ -9,7 +9,7 @@ #define IMMEDIATE (1 << 23) #define CAAM_CMD_SZ sizeof(u32) #define CAAM_PTR_SZ sizeof(dma_addr_t) -#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * 64) +#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE) #ifdef DEBUG #define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\ @@ -18,6 +18,9 @@ #define PRINT_POS #endif +#define SET_OK_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \ + LDST_SRCDST_WORD_DECOCTRL | \ + (LDOFF_CHG_SHARE_OK_PROP << LDST_OFFSET_SHIFT)) #define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \ LDST_SRCDST_WORD_DECOCTRL | \ (LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) @@ -203,3 +206,56 @@ static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \ append_cmd(desc, immediate); \ } APPEND_CMD_RAW_IMM(load, LOAD, u32); + +/* + * Append math command. Only the last part of destination and source need to + * be specified + */ +#define APPEND_MATH(op, desc, dest, src_0, src_1, len) \ +append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \ + MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32) (len & MATH_LEN_MASK)); + +#define append_math_add(desc, dest, src0, src1, len) \ + APPEND_MATH(ADD, desc, dest, src0, src1, len) +#define append_math_sub(desc, dest, src0, src1, len) \ + APPEND_MATH(SUB, desc, dest, src0, src1, len) +#define append_math_add_c(desc, dest, src0, src1, len) \ + APPEND_MATH(ADDC, desc, dest, src0, src1, len) +#define append_math_sub_b(desc, dest, src0, src1, len) \ + APPEND_MATH(SUBB, desc, dest, src0, src1, len) +#define append_math_and(desc, dest, src0, src1, len) \ + APPEND_MATH(AND, desc, dest, src0, src1, len) +#define append_math_or(desc, dest, src0, src1, len) \ + APPEND_MATH(OR, desc, dest, src0, src1, len) +#define append_math_xor(desc, dest, src0, src1, len) \ + APPEND_MATH(XOR, desc, dest, src0, src1, len) +#define append_math_lshift(desc, dest, src0, src1, len) \ + APPEND_MATH(LSHIFT, desc, dest, src0, src1, len) +#define append_math_rshift(desc, dest, src0, src1, len) \ + APPEND_MATH(RSHIFT, desc, dest, src0, src1, len) + +/* Exactly one source is IMM. Data is passed in as u32 value */ +#define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \ +do { \ + APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \ + append_cmd(desc, data); \ +} while (0); + +#define append_math_add_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data) +#define append_math_sub_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(SUB, desc, dest, src0, src1, data) +#define append_math_add_c_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(ADDC, desc, dest, src0, src1, data) +#define append_math_sub_b_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(SUBB, desc, dest, src0, src1, data) +#define append_math_and_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(AND, desc, dest, src0, src1, data) +#define append_math_or_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(OR, desc, dest, src0, src1, data) +#define append_math_xor_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(XOR, desc, dest, src0, src1, data) +#define append_math_lshift_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data) +#define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \ + APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data) diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index ba8f1ea84c5e..6399a8f1938a 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -72,17 +72,20 @@ #define DEFAULT_TIMEOUT_INTERVAL HZ -#define FLAGS_FINUP 0x0002 -#define FLAGS_FINAL 0x0004 -#define FLAGS_SG 0x0008 -#define FLAGS_SHA1 0x0010 -#define FLAGS_DMA_ACTIVE 0x0020 -#define FLAGS_OUTPUT_READY 0x0040 -#define FLAGS_INIT 0x0100 -#define FLAGS_CPU 0x0200 -#define FLAGS_HMAC 0x0400 -#define FLAGS_ERROR 0x0800 -#define FLAGS_BUSY 0x1000 +/* mostly device flags */ +#define FLAGS_BUSY 0 +#define FLAGS_FINAL 1 +#define FLAGS_DMA_ACTIVE 2 +#define FLAGS_OUTPUT_READY 3 +#define FLAGS_INIT 4 +#define FLAGS_CPU 5 +#define FLAGS_DMA_READY 6 +/* context flags */ +#define FLAGS_FINUP 16 +#define FLAGS_SG 17 +#define FLAGS_SHA1 18 +#define FLAGS_HMAC 19 +#define FLAGS_ERROR 20 #define OP_UPDATE 1 #define OP_FINAL 2 @@ -144,7 +147,6 @@ struct omap_sham_dev { int dma; int dma_lch; struct tasklet_struct done_task; - struct tasklet_struct queue_task; unsigned long flags; struct crypto_queue queue; @@ -223,7 +225,7 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req) if (!hash) return; - if (likely(ctx->flags & FLAGS_SHA1)) { + if (likely(ctx->flags & BIT(FLAGS_SHA1))) { /* SHA1 results are in big endian */ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) hash[i] = be32_to_cpu(in[i]); @@ -238,7 +240,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd) { clk_enable(dd->iclk); - if (!(dd->flags & FLAGS_INIT)) { + if (!test_bit(FLAGS_INIT, &dd->flags)) { omap_sham_write_mask(dd, SHA_REG_MASK, SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); @@ -246,7 +248,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd) SHA_REG_SYSSTATUS_RESETDONE)) return -ETIMEDOUT; - dd->flags |= FLAGS_INIT; + set_bit(FLAGS_INIT, &dd->flags); dd->err = 0; } @@ -269,7 +271,7 @@ static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, * Setting ALGO_CONST only for the first iteration * and CLOSE_HASH only for the last one. */ - if (ctx->flags & FLAGS_SHA1) + if (ctx->flags & BIT(FLAGS_SHA1)) val |= SHA_REG_CTRL_ALGO; if (!ctx->digcnt) val |= SHA_REG_CTRL_ALGO_CONST; @@ -301,7 +303,9 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, return -ETIMEDOUT; if (final) - ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ + set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ + + set_bit(FLAGS_CPU, &dd->flags); len32 = DIV_ROUND_UP(length, sizeof(u32)); @@ -334,9 +338,9 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, ctx->digcnt += length; if (final) - ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ + set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ - dd->flags |= FLAGS_DMA_ACTIVE; + set_bit(FLAGS_DMA_ACTIVE, &dd->flags); omap_start_dma(dd->dma_lch); @@ -392,7 +396,7 @@ static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd, return -EINVAL; } - ctx->flags &= ~FLAGS_SG; + ctx->flags &= ~BIT(FLAGS_SG); /* next call does not fail... so no unmap in the case of error */ return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final); @@ -406,7 +410,7 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) omap_sham_append_sg(ctx); - final = (ctx->flags & FLAGS_FINUP) && !ctx->total; + final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n", ctx->bufcnt, ctx->digcnt, final); @@ -452,7 +456,7 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd) length = min(ctx->total, sg->length); if (sg_is_last(sg)) { - if (!(ctx->flags & FLAGS_FINUP)) { + if (!(ctx->flags & BIT(FLAGS_FINUP))) { /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */ tail = length & (SHA1_MD5_BLOCK_SIZE - 1); /* without finup() we need one block to close hash */ @@ -467,12 +471,12 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd) return -EINVAL; } - ctx->flags |= FLAGS_SG; + ctx->flags |= BIT(FLAGS_SG); ctx->total -= length; ctx->offset = length; /* offset where to start slow */ - final = (ctx->flags & FLAGS_FINUP) && !ctx->total; + final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; /* next call does not fail... so no unmap in the case of error */ return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final); @@ -495,7 +499,7 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); omap_stop_dma(dd->dma_lch); - if (ctx->flags & FLAGS_SG) { + if (ctx->flags & BIT(FLAGS_SG)) { dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); if (ctx->sg->length == ctx->offset) { ctx->sg = sg_next(ctx->sg); @@ -537,18 +541,18 @@ static int omap_sham_init(struct ahash_request *req) crypto_ahash_digestsize(tfm)); if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE) - ctx->flags |= FLAGS_SHA1; + ctx->flags |= BIT(FLAGS_SHA1); ctx->bufcnt = 0; ctx->digcnt = 0; ctx->buflen = BUFLEN; - if (tctx->flags & FLAGS_HMAC) { + if (tctx->flags & BIT(FLAGS_HMAC)) { struct omap_sham_hmac_ctx *bctx = tctx->base; memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE); ctx->bufcnt = SHA1_MD5_BLOCK_SIZE; - ctx->flags |= FLAGS_HMAC; + ctx->flags |= BIT(FLAGS_HMAC); } return 0; @@ -562,9 +566,9 @@ static int omap_sham_update_req(struct omap_sham_dev *dd) int err; dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", - ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0); + ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0); - if (ctx->flags & FLAGS_CPU) + if (ctx->flags & BIT(FLAGS_CPU)) err = omap_sham_update_cpu(dd); else err = omap_sham_update_dma_start(dd); @@ -624,7 +628,7 @@ static int omap_sham_finish(struct ahash_request *req) if (ctx->digcnt) { omap_sham_copy_ready_hash(req); - if (ctx->flags & FLAGS_HMAC) + if (ctx->flags & BIT(FLAGS_HMAC)) err = omap_sham_finish_hmac(req); } @@ -639,18 +643,23 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) struct omap_sham_dev *dd = ctx->dd; if (!err) { - omap_sham_copy_hash(ctx->dd->req, 1); - if (ctx->flags & FLAGS_FINAL) + omap_sham_copy_hash(req, 1); + if (test_bit(FLAGS_FINAL, &dd->flags)) err = omap_sham_finish(req); } else { - ctx->flags |= FLAGS_ERROR; + ctx->flags |= BIT(FLAGS_ERROR); } + /* atomic operation is not needed here */ + dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) | + BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY)); clk_disable(dd->iclk); - dd->flags &= ~FLAGS_BUSY; if (req->base.complete) req->base.complete(&req->base, err); + + /* handle new request */ + tasklet_schedule(&dd->done_task); } static int omap_sham_handle_queue(struct omap_sham_dev *dd, @@ -658,21 +667,20 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, { struct crypto_async_request *async_req, *backlog; struct omap_sham_reqctx *ctx; - struct ahash_request *prev_req; unsigned long flags; int err = 0, ret = 0; spin_lock_irqsave(&dd->lock, flags); if (req) ret = ahash_enqueue_request(&dd->queue, req); - if (dd->flags & FLAGS_BUSY) { + if (test_bit(FLAGS_BUSY, &dd->flags)) { spin_unlock_irqrestore(&dd->lock, flags); return ret; } backlog = crypto_get_backlog(&dd->queue); async_req = crypto_dequeue_request(&dd->queue); if (async_req) - dd->flags |= FLAGS_BUSY; + set_bit(FLAGS_BUSY, &dd->flags); spin_unlock_irqrestore(&dd->lock, flags); if (!async_req) @@ -682,16 +690,12 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, backlog->complete(backlog, -EINPROGRESS); req = ahash_request_cast(async_req); - - prev_req = dd->req; dd->req = req; - ctx = ahash_request_ctx(req); dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", ctx->op, req->nbytes); - err = omap_sham_hw_init(dd); if (err) goto err1; @@ -712,18 +716,16 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, if (ctx->op == OP_UPDATE) { err = omap_sham_update_req(dd); - if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP)) + if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP))) /* no final() after finup() */ err = omap_sham_final_req(dd); } else if (ctx->op == OP_FINAL) { err = omap_sham_final_req(dd); } err1: - if (err != -EINPROGRESS) { + if (err != -EINPROGRESS) /* done_task will not finish it, so do it here */ omap_sham_finish_req(req, err); - tasklet_schedule(&dd->queue_task); - } dev_dbg(dd->dev, "exit, err: %d\n", err); @@ -752,7 +754,7 @@ static int omap_sham_update(struct ahash_request *req) ctx->sg = req->src; ctx->offset = 0; - if (ctx->flags & FLAGS_FINUP) { + if (ctx->flags & BIT(FLAGS_FINUP)) { if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) { /* * OMAP HW accel works only with buffers >= 9 @@ -765,7 +767,7 @@ static int omap_sham_update(struct ahash_request *req) /* * faster to use CPU for short transfers */ - ctx->flags |= FLAGS_CPU; + ctx->flags |= BIT(FLAGS_CPU); } } else if (ctx->bufcnt + ctx->total < ctx->buflen) { omap_sham_append_sg(ctx); @@ -802,9 +804,9 @@ static int omap_sham_final(struct ahash_request *req) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); - ctx->flags |= FLAGS_FINUP; + ctx->flags |= BIT(FLAGS_FINUP); - if (ctx->flags & FLAGS_ERROR) + if (ctx->flags & BIT(FLAGS_ERROR)) return 0; /* uncompleted hash is not needed */ /* OMAP HW accel works only with buffers >= 9 */ @@ -823,7 +825,7 @@ static int omap_sham_finup(struct ahash_request *req) struct omap_sham_reqctx *ctx = ahash_request_ctx(req); int err1, err2; - ctx->flags |= FLAGS_FINUP; + ctx->flags |= BIT(FLAGS_FINUP); err1 = omap_sham_update(req); if (err1 == -EINPROGRESS || err1 == -EBUSY) @@ -895,7 +897,7 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) if (alg_base) { struct omap_sham_hmac_ctx *bctx = tctx->base; - tctx->flags |= FLAGS_HMAC; + tctx->flags |= BIT(FLAGS_HMAC); bctx->shash = crypto_alloc_shash(alg_base, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(bctx->shash)) { @@ -932,7 +934,7 @@ static void omap_sham_cra_exit(struct crypto_tfm *tfm) crypto_free_shash(tctx->fallback); tctx->fallback = NULL; - if (tctx->flags & FLAGS_HMAC) { + if (tctx->flags & BIT(FLAGS_HMAC)) { struct omap_sham_hmac_ctx *bctx = tctx->base; crypto_free_shash(bctx->shash); } @@ -1036,51 +1038,46 @@ static struct ahash_alg algs[] = { static void omap_sham_done_task(unsigned long data) { struct omap_sham_dev *dd = (struct omap_sham_dev *)data; - struct ahash_request *req = dd->req; - struct omap_sham_reqctx *ctx = ahash_request_ctx(req); - int ready = 0, err = 0; + int err = 0; - if (ctx->flags & FLAGS_OUTPUT_READY) { - ctx->flags &= ~FLAGS_OUTPUT_READY; - ready = 1; + if (!test_bit(FLAGS_BUSY, &dd->flags)) { + omap_sham_handle_queue(dd, NULL); + return; } - if (dd->flags & FLAGS_DMA_ACTIVE) { - dd->flags &= ~FLAGS_DMA_ACTIVE; - omap_sham_update_dma_stop(dd); - if (!dd->err) + if (test_bit(FLAGS_CPU, &dd->flags)) { + if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) + goto finish; + } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) { + if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { + omap_sham_update_dma_stop(dd); + if (dd->err) { + err = dd->err; + goto finish; + } + } + if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) { + /* hash or semi-hash ready */ + clear_bit(FLAGS_DMA_READY, &dd->flags); err = omap_sham_update_dma_start(dd); + if (err != -EINPROGRESS) + goto finish; + } } - err = dd->err ? : err; - - if (err != -EINPROGRESS && (ready || err)) { - dev_dbg(dd->dev, "update done: err: %d\n", err); - /* finish curent request */ - omap_sham_finish_req(req, err); - /* start new request */ - omap_sham_handle_queue(dd, NULL); - } -} - -static void omap_sham_queue_task(unsigned long data) -{ - struct omap_sham_dev *dd = (struct omap_sham_dev *)data; + return; - omap_sham_handle_queue(dd, NULL); +finish: + dev_dbg(dd->dev, "update done: err: %d\n", err); + /* finish curent request */ + omap_sham_finish_req(dd->req, err); } static irqreturn_t omap_sham_irq(int irq, void *dev_id) { struct omap_sham_dev *dd = dev_id; - struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); - - if (!ctx) { - dev_err(dd->dev, "unknown interrupt.\n"); - return IRQ_HANDLED; - } - if (unlikely(ctx->flags & FLAGS_FINAL)) + if (unlikely(test_bit(FLAGS_FINAL, &dd->flags))) /* final -> allow device to go to power-saving mode */ omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH); @@ -1088,8 +1085,12 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id) SHA_REG_CTRL_OUTPUT_READY); omap_sham_read(dd, SHA_REG_CTRL); - ctx->flags |= FLAGS_OUTPUT_READY; - dd->err = 0; + if (!test_bit(FLAGS_BUSY, &dd->flags)) { + dev_warn(dd->dev, "Interrupt when no active requests.\n"); + return IRQ_HANDLED; + } + + set_bit(FLAGS_OUTPUT_READY, &dd->flags); tasklet_schedule(&dd->done_task); return IRQ_HANDLED; @@ -1102,9 +1103,10 @@ static void omap_sham_dma_callback(int lch, u16 ch_status, void *data) if (ch_status != OMAP_DMA_BLOCK_IRQ) { pr_err("omap-sham DMA error status: 0x%hx\n", ch_status); dd->err = -EIO; - dd->flags &= ~FLAGS_INIT; /* request to re-initialize */ + clear_bit(FLAGS_INIT, &dd->flags);/* request to re-initialize */ } + set_bit(FLAGS_DMA_READY, &dd->flags); tasklet_schedule(&dd->done_task); } @@ -1151,7 +1153,6 @@ static int __devinit omap_sham_probe(struct platform_device *pdev) INIT_LIST_HEAD(&dd->list); spin_lock_init(&dd->lock); tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd); - tasklet_init(&dd->queue_task, omap_sham_queue_task, (unsigned long)dd); crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH); dd->irq = -1; @@ -1260,7 +1261,6 @@ static int __devexit omap_sham_remove(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(algs); i++) crypto_unregister_ahash(&algs[i]); tasklet_kill(&dd->done_task); - tasklet_kill(&dd->queue_task); iounmap(dd->io_base); clk_put(dd->iclk); omap_sham_dma_cleanup(dd); diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 854e2632f9a6..8a0bb417aa11 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1,7 +1,7 @@ /* * talitos - Freescale Integrated Security Engine (SEC) device driver * - * Copyright (c) 2008-2010 Freescale Semiconductor, Inc. + * Copyright (c) 2008-2011 Freescale Semiconductor, Inc. * * Scatterlist Crypto API glue code copied from files with the following: * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au> @@ -282,6 +282,7 @@ static int init_device(struct device *dev) /** * talitos_submit - submits a descriptor to the device for processing * @dev: the SEC device to be used + * @ch: the SEC device channel to be used * @desc: the descriptor to be processed by the device * @callback: whom to call when processing is complete * @context: a handle for use by caller (optional) @@ -290,7 +291,7 @@ static int init_device(struct device *dev) * callback must check err and feedback in descriptor header * for device processing status. */ -static int talitos_submit(struct device *dev, struct talitos_desc *desc, +static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, void (*callback)(struct device *dev, struct talitos_desc *desc, void *context, int error), @@ -298,15 +299,9 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc, { struct talitos_private *priv = dev_get_drvdata(dev); struct talitos_request *request; - unsigned long flags, ch; + unsigned long flags; int head; - /* select done notification */ - desc->hdr |= DESC_HDR_DONE_NOTIFY; - - /* emulate SEC's round-robin channel fifo polling scheme */ - ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1); - spin_lock_irqsave(&priv->chan[ch].head_lock, flags); if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) { @@ -706,6 +701,7 @@ static void talitos_unregister_rng(struct device *dev) struct talitos_ctx { struct device *dev; + int ch; __be32 desc_hdr_template; u8 key[TALITOS_MAX_KEY_SIZE]; u8 iv[TALITOS_MAX_IV_LENGTH]; @@ -1117,7 +1113,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0, DMA_FROM_DEVICE); - ret = talitos_submit(dev, desc, callback, areq); + ret = talitos_submit(dev, ctx->ch, desc, callback, areq); if (ret != -EINPROGRESS) { ipsec_esp_unmap(dev, edesc, areq); kfree(edesc); @@ -1382,22 +1378,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen) { struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); - struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher); - - if (keylen > TALITOS_MAX_KEY_SIZE) - goto badkey; - - if (keylen < alg->min_keysize || keylen > alg->max_keysize) - goto badkey; memcpy(&ctx->key, key, keylen); ctx->keylen = keylen; return 0; - -badkey: - crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; } static void common_nonsnoop_unmap(struct device *dev, @@ -1433,7 +1418,6 @@ static void ablkcipher_done(struct device *dev, static int common_nonsnoop(struct talitos_edesc *edesc, struct ablkcipher_request *areq, - u8 *giv, void (*callback) (struct device *dev, struct talitos_desc *desc, void *context, int error)) @@ -1453,7 +1437,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, /* cipher iv */ ivsize = crypto_ablkcipher_ivsize(cipher); - map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0, + map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, areq->info, 0, DMA_TO_DEVICE); /* cipher key */ @@ -1524,7 +1508,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, to_talitos_ptr(&desc->ptr[6], 0); desc->ptr[6].j_extent = 0; - ret = talitos_submit(dev, desc, callback, areq); + ret = talitos_submit(dev, ctx->ch, desc, callback, areq); if (ret != -EINPROGRESS) { common_nonsnoop_unmap(dev, edesc, areq); kfree(edesc); @@ -1556,7 +1540,7 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq) /* set encrypt */ edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; - return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); + return common_nonsnoop(edesc, areq, ablkcipher_done); } static int ablkcipher_decrypt(struct ablkcipher_request *areq) @@ -1572,7 +1556,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq) edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; - return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); + return common_nonsnoop(edesc, areq, ablkcipher_done); } static void common_nonsnoop_hash_unmap(struct device *dev, @@ -1703,7 +1687,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, /* last DWORD empty */ desc->ptr[6] = zero_entry; - ret = talitos_submit(dev, desc, callback, areq); + ret = talitos_submit(dev, ctx->ch, desc, callback, areq); if (ret != -EINPROGRESS) { common_nonsnoop_hash_unmap(dev, edesc, areq); kfree(edesc); @@ -2244,6 +2228,7 @@ static int talitos_cra_init(struct crypto_tfm *tfm) struct crypto_alg *alg = tfm->__crt_alg; struct talitos_crypto_alg *talitos_alg; struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); + struct talitos_private *priv; if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) talitos_alg = container_of(__crypto_ahash_alg(alg), @@ -2256,9 +2241,17 @@ static int talitos_cra_init(struct crypto_tfm *tfm) /* update context with ptr to dev */ ctx->dev = talitos_alg->dev; + /* assign SEC channel to tfm in round-robin fashion */ + priv = dev_get_drvdata(ctx->dev); + ctx->ch = atomic_inc_return(&priv->last_chan) & + (priv->num_channels - 1); + /* copy descriptor header template value */ ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template; + /* select done notification */ + ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY; + return 0; } |