summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/qce
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/qce')
-rw-r--r--drivers/crypto/qce/Makefile7
-rw-r--r--drivers/crypto/qce/cipher.h8
-rw-r--r--drivers/crypto/qce/common.c256
-rw-r--r--drivers/crypto/qce/common.h3
-rw-r--r--drivers/crypto/qce/core.c10
-rw-r--r--drivers/crypto/qce/dma.c10
-rw-r--r--drivers/crypto/qce/dma.h3
-rw-r--r--drivers/crypto/qce/sha.c4
-rw-r--r--drivers/crypto/qce/skcipher.c (renamed from drivers/crypto/qce/ablkcipher.c)246
9 files changed, 289 insertions, 258 deletions
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
index 19a7f899acff..14ade8a7d664 100644
--- a/drivers/crypto/qce/Makefile
+++ b/drivers/crypto/qce/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o
qcrypto-objs := core.o \
common.o \
- dma.o \
- sha.o \
- ablkcipher.o
+ dma.o
+
+qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SHA) += sha.o
+qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) += skcipher.o
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index 5cab8f0706a8..7770660bc853 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -45,12 +45,12 @@ struct qce_cipher_reqctx {
unsigned int cryptlen;
};
-static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm)
+static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- return container_of(alg, struct qce_alg_template, alg.crypto);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ return container_of(alg, struct qce_alg_template, alg.skcipher);
}
-extern const struct qce_algo_ops ablkcipher_ops;
+extern const struct qce_algo_ops skcipher_ops;
#endif /* _CIPHER_H_ */
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 3fb510164326..629e7f34dc09 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -45,52 +45,56 @@ qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len)
qce_write(qce, offset + i * sizeof(u32), 0);
}
-static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
+static u32 qce_config_reg(struct qce_device *qce, int little)
{
- u32 cfg = 0;
+ u32 beats = (qce->burst_size >> 3) - 1;
+ u32 pipe_pair = qce->pipe_pair_id;
+ u32 config;
- if (IS_AES(flags)) {
- if (aes_key_size == AES_KEYSIZE_128)
- cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
- else if (aes_key_size == AES_KEYSIZE_256)
- cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
- }
+ config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
+ config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
+ BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
+ config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
+ config &= ~HIGH_SPD_EN_N_SHIFT;
- if (IS_AES(flags))
- cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
- else if (IS_DES(flags) || IS_3DES(flags))
- cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
+ if (little)
+ config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
- if (IS_DES(flags))
- cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
+ return config;
+}
- if (IS_3DES(flags))
- cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
+void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
+{
+ __be32 *d = dst;
+ const u8 *s = src;
+ unsigned int n;
- switch (flags & QCE_MODE_MASK) {
- case QCE_MODE_ECB:
- cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_CBC:
- cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_CTR:
- cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_XTS:
- cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
- break;
- case QCE_MODE_CCM:
- cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
- cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
- break;
- default:
- return ~0;
+ n = len / sizeof(u32);
+ for (; n > 0; n--) {
+ *d = cpu_to_be32p((const __u32 *) s);
+ s += sizeof(__u32);
+ d++;
}
+}
- return cfg;
+static void qce_setup_config(struct qce_device *qce)
+{
+ u32 config;
+
+ /* get big endianness */
+ config = qce_config_reg(qce, 0);
+
+ /* clear status */
+ qce_write(qce, REG_STATUS, 0);
+ qce_write(qce, REG_CONFIG, config);
+}
+
+static inline void qce_crypto_go(struct qce_device *qce)
+{
+ qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
}
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
{
u32 cfg = 0;
@@ -137,88 +141,6 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
return cfg;
}
-static u32 qce_config_reg(struct qce_device *qce, int little)
-{
- u32 beats = (qce->burst_size >> 3) - 1;
- u32 pipe_pair = qce->pipe_pair_id;
- u32 config;
-
- config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
- config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
- BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
- config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
- config &= ~HIGH_SPD_EN_N_SHIFT;
-
- if (little)
- config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
-
- return config;
-}
-
-void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
-{
- __be32 *d = dst;
- const u8 *s = src;
- unsigned int n;
-
- n = len / sizeof(u32);
- for (; n > 0; n--) {
- *d = cpu_to_be32p((const __u32 *) s);
- s += sizeof(__u32);
- d++;
- }
-}
-
-static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
-{
- u8 swap[QCE_AES_IV_LENGTH];
- u32 i, j;
-
- if (ivsize > QCE_AES_IV_LENGTH)
- return;
-
- memset(swap, 0, QCE_AES_IV_LENGTH);
-
- for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
- i < QCE_AES_IV_LENGTH; i++, j--)
- swap[i] = src[j];
-
- qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
-}
-
-static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
- unsigned int enckeylen, unsigned int cryptlen)
-{
- u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
- unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
- unsigned int xtsdusize;
-
- qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
- enckeylen / 2);
- qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
-
- /* xts du size 512B */
- xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
- qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
-}
-
-static void qce_setup_config(struct qce_device *qce)
-{
- u32 config;
-
- /* get big endianness */
- config = qce_config_reg(qce, 0);
-
- /* clear status */
- qce_write(qce, REG_STATUS, 0);
- qce_write(qce, REG_CONFIG, config);
-}
-
-static inline void qce_crypto_go(struct qce_device *qce)
-{
- qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
-}
-
static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
u32 totallen, u32 offset)
{
@@ -303,14 +225,95 @@ go_proc:
return 0;
}
+#endif
+
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
+static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
+{
+ u32 cfg = 0;
+
+ if (IS_AES(flags)) {
+ if (aes_key_size == AES_KEYSIZE_128)
+ cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
+ else if (aes_key_size == AES_KEYSIZE_256)
+ cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
+ }
+
+ if (IS_AES(flags))
+ cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
+ else if (IS_DES(flags) || IS_3DES(flags))
+ cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
+
+ if (IS_DES(flags))
+ cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
+
+ if (IS_3DES(flags))
+ cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
+
+ switch (flags & QCE_MODE_MASK) {
+ case QCE_MODE_ECB:
+ cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CBC:
+ cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CTR:
+ cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_XTS:
+ cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
+ break;
+ case QCE_MODE_CCM:
+ cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
+ cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
+ break;
+ default:
+ return ~0;
+ }
+
+ return cfg;
+}
+
+static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
+{
+ u8 swap[QCE_AES_IV_LENGTH];
+ u32 i, j;
+
+ if (ivsize > QCE_AES_IV_LENGTH)
+ return;
+
+ memset(swap, 0, QCE_AES_IV_LENGTH);
+
+ for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
+ i < QCE_AES_IV_LENGTH; i++, j--)
+ swap[i] = src[j];
+
+ qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
+}
+
+static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
+ unsigned int enckeylen, unsigned int cryptlen)
+{
+ u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
+ unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
+ unsigned int xtsdusize;
+
+ qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
+ enckeylen / 2);
+ qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
+
+ /* xts du size 512B */
+ xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
+ qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
+}
-static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req,
+static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
u32 totallen, u32 offset)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
__be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
__be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
@@ -384,15 +387,20 @@ static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req,
return 0;
}
+#endif
int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
u32 offset)
{
switch (type) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- return qce_setup_regs_ablkcipher(async_req, totallen, offset);
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ return qce_setup_regs_skcipher(async_req, totallen, offset);
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
case CRYPTO_ALG_TYPE_AHASH:
return qce_setup_regs_ahash(async_req, totallen, offset);
+#endif
default:
return -EINVAL;
}
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
index 47fb523357ac..282d4317470d 100644
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <crypto/aes.h>
#include <crypto/hash.h>
+#include <crypto/internal/skcipher.h>
/* key size in bytes */
#define QCE_SHA_HMAC_KEY_SIZE 64
@@ -79,7 +80,7 @@ struct qce_alg_template {
unsigned long alg_flags;
const u32 *std_iv;
union {
- struct crypto_alg crypto;
+ struct skcipher_alg skcipher;
struct ahash_alg ahash;
} alg;
struct qce_device *qce;
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index ef1d74e8ddb2..cb6d61eb7302 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -22,8 +22,12 @@
#define QCE_QUEUE_LENGTH 1
static const struct qce_algo_ops *qce_ops[] = {
- &ablkcipher_ops,
+#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
+ &skcipher_ops,
+#endif
+#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
&ahash_ops,
+#endif
};
static void qce_unregister_algs(struct qce_device *qce)
@@ -167,7 +171,6 @@ static int qce_crypto_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct qce_device *qce;
- struct resource *res;
int ret;
qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
@@ -177,8 +180,7 @@ static int qce_crypto_probe(struct platform_device *pdev)
qce->dev = dev;
platform_set_drvdata(pdev, qce);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- qce->base = devm_ioremap_resource(&pdev->dev, res);
+ qce->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(qce->base))
return PTR_ERR(qce->base);
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 0984a719144d..7da893dc00e7 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -12,11 +12,11 @@ int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
{
int ret;
- dma->txchan = dma_request_slave_channel_reason(dev, "tx");
+ dma->txchan = dma_request_chan(dev, "tx");
if (IS_ERR(dma->txchan))
return PTR_ERR(dma->txchan);
- dma->rxchan = dma_request_slave_channel_reason(dev, "rx");
+ dma->rxchan = dma_request_chan(dev, "rx");
if (IS_ERR(dma->rxchan)) {
ret = PTR_ERR(dma->rxchan);
goto error_rx;
@@ -47,7 +47,8 @@ void qce_dma_release(struct qce_dma_data *dma)
}
struct scatterlist *
-qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
+ int max_ents)
{
struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
@@ -60,12 +61,13 @@ qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
if (!sg)
return ERR_PTR(-EINVAL);
- while (new_sgl && sg) {
+ while (new_sgl && sg && max_ents) {
sg_set_page(sg, sg_page(new_sgl), new_sgl->length,
new_sgl->offset);
sg_last = sg;
sg = sg_next(sg);
new_sgl = sg_next(new_sgl);
+ max_ents--;
}
return sg_last;
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index 1e25a9e0e6f8..ed25a0d9829e 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -42,6 +42,7 @@ int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
void qce_dma_issue_pending(struct qce_dma_data *dma);
int qce_dma_terminate_all(struct qce_dma_data *dma);
struct scatterlist *
-qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add,
+ int max_ents);
#endif /* _DMA_H_ */
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 0853e74583ad..1ab62e7d5f3c 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -396,8 +396,6 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ahash_request_set_crypt(req, &sg, ctx->authkey, keylen);
ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
- if (ret)
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
kfree(buf);
err_free_req:
@@ -495,7 +493,7 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def,
base = &alg->halg.base;
base->cra_blocksize = def->blocksize;
base->cra_priority = 300;
- base->cra_flags = CRYPTO_ALG_ASYNC;
+ base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
base->cra_ctxsize = sizeof(struct qce_sha_ctx);
base->cra_alignmask = 0;
base->cra_module = THIS_MODULE;
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/skcipher.c
index a976210ba41c..4217b745f124 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -7,20 +7,21 @@
#include <linux/interrupt.h>
#include <linux/types.h>
#include <crypto/aes.h>
-#include <crypto/des.h>
+#include <crypto/internal/des.h>
#include <crypto/internal/skcipher.h>
#include "cipher.h"
-static LIST_HEAD(ablkcipher_algs);
+static LIST_HEAD(skcipher_algs);
-static void qce_ablkcipher_done(void *data)
+static void qce_skcipher_done(void *data)
{
struct crypto_async_request *async_req = data;
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
+ struct qce_result_dump *result_buf = qce->dma.result_buf;
enum dma_data_direction dir_src, dir_dst;
u32 status;
int error;
@@ -32,7 +33,7 @@ static void qce_ablkcipher_done(void *data)
error = qce_dma_terminate_all(&qce->dma);
if (error)
- dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
+ dev_dbg(qce->dev, "skcipher dma termination error (%d)\n",
error);
if (diff_dst)
@@ -43,18 +44,19 @@ static void qce_ablkcipher_done(void *data)
error = qce_check_status(qce, &status);
if (error < 0)
- dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
+ dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
+ memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize);
qce->async_req_done(tmpl->qce, error);
}
static int
-qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
+qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
{
- struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
enum dma_data_direction dir_src, dir_dst;
struct scatterlist *sg;
@@ -62,17 +64,17 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
gfp_t gfp;
int ret;
- rctx->iv = req->info;
- rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- rctx->cryptlen = req->nbytes;
+ rctx->iv = req->iv;
+ rctx->ivsize = crypto_skcipher_ivsize(skcipher);
+ rctx->cryptlen = req->cryptlen;
diff_dst = (req->src != req->dst) ? true : false;
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
- rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
+ rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (diff_dst)
- rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
else
rctx->dst_nents = rctx->src_nents;
if (rctx->src_nents < 0) {
@@ -95,13 +97,13 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
- sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
+ sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, rctx->dst_nents - 1);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto error_free;
}
- sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
+ sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, 1);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto error_free;
@@ -125,13 +127,13 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
rctx->dst_sg, rctx->dst_nents,
- qce_ablkcipher_done, async_req);
+ qce_skcipher_done, async_req);
if (ret)
goto error_unmap_src;
qce_dma_issue_pending(&qce->dma);
- ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
+ ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
if (ret)
goto error_terminate;
@@ -149,32 +151,23 @@ error_free:
return ret;
}
-static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
+static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- unsigned long flags = to_cipher_tmpl(tfm)->alg_flags;
+ unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
int ret;
if (!key || !keylen)
return -EINVAL;
- if (IS_AES(flags)) {
- switch (keylen) {
- case AES_KEYSIZE_128:
- case AES_KEYSIZE_256:
- break;
- default:
- goto fallback;
- }
- } else if (IS_DES(flags)) {
- u32 tmp[DES_EXPKEY_WORDS];
-
- ret = des_ekey(tmp, key);
- if (!ret && (crypto_ablkcipher_get_flags(ablk) &
- CRYPTO_TFM_REQ_FORBID_WEAK_KEYS))
- goto weakkey;
+ switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ goto fallback;
}
ctx->enc_keylen = keylen;
@@ -185,51 +178,60 @@ fallback:
if (!ret)
ctx->enc_keylen = keylen;
return ret;
-weakkey:
- crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY);
- return -EINVAL;
}
-static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
+static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key,
+ unsigned int keylen)
+{
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
+ int err;
+
+ err = verify_skcipher_des_key(ablk, key);
+ if (err)
+ return err;
+
+ ctx->enc_keylen = keylen;
+ memcpy(ctx->enc_key, key, keylen);
+ return 0;
+}
+
+static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key,
unsigned int keylen)
{
- struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk);
- u32 flags;
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
int err;
- flags = crypto_ablkcipher_get_flags(ablk);
- err = __des3_verify_key(&flags, key);
- if (unlikely(err)) {
- crypto_ablkcipher_set_flags(ablk, flags);
+ err = verify_skcipher_des3_key(ablk, key);
+ if (err)
return err;
- }
ctx->enc_keylen = keylen;
memcpy(ctx->enc_key, key, keylen);
return 0;
}
-static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
+static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+ int keylen;
int ret;
rctx->flags = tmpl->alg_flags;
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
+ keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
- if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
- ctx->enc_keylen != AES_KEYSIZE_256) {
+ if (IS_AES(rctx->flags) && keylen != AES_KEYSIZE_128 &&
+ keylen != AES_KEYSIZE_256) {
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
ret = encrypt ? crypto_skcipher_encrypt(subreq) :
crypto_skcipher_decrypt(subreq);
skcipher_request_zero(subreq);
@@ -239,46 +241,54 @@ static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
}
-static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
+static int qce_skcipher_encrypt(struct skcipher_request *req)
{
- return qce_ablkcipher_crypt(req, 1);
+ return qce_skcipher_crypt(req, 1);
}
-static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
+static int qce_skcipher_decrypt(struct skcipher_request *req)
{
- return qce_ablkcipher_crypt(req, 0);
+ return qce_skcipher_crypt(req, 0);
}
-static int qce_ablkcipher_init(struct crypto_tfm *tfm)
+static int qce_skcipher_init(struct crypto_skcipher *tfm)
{
- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
memset(ctx, 0, sizeof(*ctx));
- tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx));
+ return 0;
+}
- ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(tfm),
+static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm)
+{
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ qce_skcipher_init(tfm);
+ ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
0, CRYPTO_ALG_NEED_FALLBACK);
return PTR_ERR_OR_ZERO(ctx->fallback);
}
-static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
+static void qce_skcipher_exit(struct crypto_skcipher *tfm)
{
- struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_sync_skcipher(ctx->fallback);
}
-struct qce_ablkcipher_def {
+struct qce_skcipher_def {
unsigned long flags;
const char *name;
const char *drv_name;
unsigned int blocksize;
+ unsigned int chunksize;
unsigned int ivsize;
unsigned int min_keysize;
unsigned int max_keysize;
};
-static const struct qce_ablkcipher_def ablkcipher_def[] = {
+static const struct qce_skcipher_def skcipher_def[] = {
{
.flags = QCE_ALG_AES | QCE_MODE_ECB,
.name = "ecb(aes)",
@@ -301,7 +311,8 @@ static const struct qce_ablkcipher_def ablkcipher_def[] = {
.flags = QCE_ALG_AES | QCE_MODE_CTR,
.name = "ctr(aes)",
.drv_name = "ctr-aes-qce",
- .blocksize = AES_BLOCK_SIZE,
+ .blocksize = 1,
+ .chunksize = AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -312,8 +323,8 @@ static const struct qce_ablkcipher_def ablkcipher_def[] = {
.drv_name = "xts-aes-qce",
.blocksize = AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
},
{
.flags = QCE_ALG_DES | QCE_MODE_ECB,
@@ -353,89 +364,96 @@ static const struct qce_ablkcipher_def ablkcipher_def[] = {
},
};
-static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
+static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
struct qce_device *qce)
{
struct qce_alg_template *tmpl;
- struct crypto_alg *alg;
+ struct skcipher_alg *alg;
int ret;
tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
if (!tmpl)
return -ENOMEM;
- alg = &tmpl->alg.crypto;
+ alg = &tmpl->alg.skcipher;
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->drv_name);
- alg->cra_blocksize = def->blocksize;
- alg->cra_ablkcipher.ivsize = def->ivsize;
- alg->cra_ablkcipher.min_keysize = def->min_keysize;
- alg->cra_ablkcipher.max_keysize = def->max_keysize;
- alg->cra_ablkcipher.setkey = IS_3DES(def->flags) ?
- qce_des3_setkey : qce_ablkcipher_setkey;
- alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
- alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
-
- alg->cra_priority = 300;
- alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK;
- alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
- alg->cra_alignmask = 0;
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_module = THIS_MODULE;
- alg->cra_init = qce_ablkcipher_init;
- alg->cra_exit = qce_ablkcipher_exit;
+ alg->base.cra_blocksize = def->blocksize;
+ alg->chunksize = def->chunksize;
+ alg->ivsize = def->ivsize;
+ alg->min_keysize = def->min_keysize;
+ alg->max_keysize = def->max_keysize;
+ alg->setkey = IS_3DES(def->flags) ? qce_des3_setkey :
+ IS_DES(def->flags) ? qce_des_setkey :
+ qce_skcipher_setkey;
+ alg->encrypt = qce_skcipher_encrypt;
+ alg->decrypt = qce_skcipher_decrypt;
+
+ alg->base.cra_priority = 300;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
+ alg->base.cra_alignmask = 0;
+ alg->base.cra_module = THIS_MODULE;
+
+ if (IS_AES(def->flags)) {
+ alg->base.cra_flags |= CRYPTO_ALG_NEED_FALLBACK;
+ alg->init = qce_skcipher_init_fallback;
+ alg->exit = qce_skcipher_exit;
+ } else {
+ alg->init = qce_skcipher_init;
+ }
INIT_LIST_HEAD(&tmpl->entry);
- tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
+ tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
tmpl->alg_flags = def->flags;
tmpl->qce = qce;
- ret = crypto_register_alg(alg);
+ ret = crypto_register_skcipher(alg);
if (ret) {
kfree(tmpl);
- dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
+ dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
return ret;
}
- list_add_tail(&tmpl->entry, &ablkcipher_algs);
- dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
+ list_add_tail(&tmpl->entry, &skcipher_algs);
+ dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
return 0;
}
-static void qce_ablkcipher_unregister(struct qce_device *qce)
+static void qce_skcipher_unregister(struct qce_device *qce)
{
struct qce_alg_template *tmpl, *n;
- list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
- crypto_unregister_alg(&tmpl->alg.crypto);
+ list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) {
+ crypto_unregister_skcipher(&tmpl->alg.skcipher);
list_del(&tmpl->entry);
kfree(tmpl);
}
}
-static int qce_ablkcipher_register(struct qce_device *qce)
+static int qce_skcipher_register(struct qce_device *qce)
{
int ret, i;
- for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
- ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
+ for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) {
+ ret = qce_skcipher_register_one(&skcipher_def[i], qce);
if (ret)
goto err;
}
return 0;
err:
- qce_ablkcipher_unregister(qce);
+ qce_skcipher_unregister(qce);
return ret;
}
-const struct qce_algo_ops ablkcipher_ops = {
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .register_algs = qce_ablkcipher_register,
- .unregister_algs = qce_ablkcipher_unregister,
- .async_req_handle = qce_ablkcipher_async_req_handle,
+const struct qce_algo_ops skcipher_ops = {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .register_algs = qce_skcipher_register,
+ .unregister_algs = qce_skcipher_unregister,
+ .async_req_handle = qce_skcipher_async_req_handle,
};
OpenPOWER on IntegriCloud