summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/omap-aes.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/omap-aes.c')
-rw-r--r--drivers/crypto/omap-aes.c342
1 files changed, 161 insertions, 181 deletions
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 45a4647f7030..824ddf2a66ff 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -142,8 +142,8 @@ int omap_aes_write_ctrl(struct omap_aes_dev *dd)
__le32_to_cpu(dd->ctx->key[i]));
}
- if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info)
- omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4);
+ if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->iv)
+ omap_aes_write_n(dd, AES_REG_IV(dd, 0), (void *)dd->req->iv, 4);
if ((dd->flags & (FLAGS_GCM)) && dd->aead_req->iv) {
rctx = aead_request_ctx(dd->aead_req);
@@ -269,13 +269,14 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
struct scatterlist *out_sg,
int in_sg_len, int out_sg_len)
{
- struct dma_async_tx_descriptor *tx_in, *tx_out;
+ struct dma_async_tx_descriptor *tx_in, *tx_out = NULL, *cb_desc;
struct dma_slave_config cfg;
int ret;
if (dd->pio_only) {
scatterwalk_start(&dd->in_walk, dd->in_sg);
- scatterwalk_start(&dd->out_walk, dd->out_sg);
+ if (out_sg_len)
+ scatterwalk_start(&dd->out_walk, dd->out_sg);
/* Enable DATAIN interrupt and let it take
care of the rest */
@@ -312,34 +313,45 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
/* No callback necessary */
tx_in->callback_param = dd;
+ tx_in->callback = NULL;
/* OUT */
- ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
- if (ret) {
- dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
- ret);
- return ret;
- }
+ if (out_sg_len) {
+ ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
+ if (ret) {
+ dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
+ ret);
+ return ret;
+ }
- tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
- DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!tx_out) {
- dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
- return -EINVAL;
+ tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg,
+ out_sg_len,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx_out) {
+ dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
+ return -EINVAL;
+ }
+
+ cb_desc = tx_out;
+ } else {
+ cb_desc = tx_in;
}
if (dd->flags & FLAGS_GCM)
- tx_out->callback = omap_aes_gcm_dma_out_callback;
+ cb_desc->callback = omap_aes_gcm_dma_out_callback;
else
- tx_out->callback = omap_aes_dma_out_callback;
- tx_out->callback_param = dd;
+ cb_desc->callback = omap_aes_dma_out_callback;
+ cb_desc->callback_param = dd;
+
dmaengine_submit(tx_in);
- dmaengine_submit(tx_out);
+ if (tx_out)
+ dmaengine_submit(tx_out);
dma_async_issue_pending(dd->dma_lch_in);
- dma_async_issue_pending(dd->dma_lch_out);
+ if (out_sg_len)
+ dma_async_issue_pending(dd->dma_lch_out);
/* start DMA */
dd->pdata->trigger(dd, dd->total);
@@ -361,11 +373,13 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
return -EINVAL;
}
- err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
- DMA_FROM_DEVICE);
- if (!err) {
- dev_err(dd->dev, "dma_map_sg() error\n");
- return -EINVAL;
+ if (dd->out_sg_len) {
+ err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+ DMA_FROM_DEVICE);
+ if (!err) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ return -EINVAL;
+ }
}
}
@@ -373,8 +387,9 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
dd->out_sg_len);
if (err && !dd->pio_only) {
dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
- dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
- DMA_FROM_DEVICE);
+ if (dd->out_sg_len)
+ dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+ DMA_FROM_DEVICE);
}
return err;
@@ -382,11 +397,11 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
{
- struct ablkcipher_request *req = dd->req;
+ struct skcipher_request *req = dd->req;
pr_debug("err: %d\n", err);
- crypto_finalize_ablkcipher_request(dd->engine, req, err);
+ crypto_finalize_skcipher_request(dd->engine, req, err);
pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
@@ -403,10 +418,10 @@ int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
}
static int omap_aes_handle_queue(struct omap_aes_dev *dd,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
if (req)
- return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
+ return crypto_transfer_skcipher_request_to_engine(dd->engine, req);
return 0;
}
@@ -414,10 +429,10 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
static int omap_aes_prepare_req(struct crypto_engine *engine,
void *areq)
{
- struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
- struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
- struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
+ struct omap_aes_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
+ struct omap_aes_reqctx *rctx = skcipher_request_ctx(req);
struct omap_aes_dev *dd = rctx->dd;
int ret;
u16 flags;
@@ -427,8 +442,8 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
/* assign new request to device */
dd->req = req;
- dd->total = req->nbytes;
- dd->total_save = req->nbytes;
+ dd->total = req->cryptlen;
+ dd->total_save = req->cryptlen;
dd->in_sg = req->src;
dd->out_sg = req->dst;
dd->orig_out = req->dst;
@@ -469,8 +484,8 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
static int omap_aes_crypt_req(struct crypto_engine *engine,
void *areq)
{
- struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
- struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
+ struct omap_aes_reqctx *rctx = skcipher_request_ctx(req);
struct omap_aes_dev *dd = rctx->dd;
if (!dd)
@@ -479,6 +494,14 @@ static int omap_aes_crypt_req(struct crypto_engine *engine,
return omap_aes_crypt_dma_start(dd);
}
+static void omap_aes_copy_ivout(struct omap_aes_dev *dd, u8 *ivbuf)
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ ((u32 *)ivbuf)[i] = omap_aes_read(dd, AES_REG_IV(dd, i));
+}
+
static void omap_aes_done_task(unsigned long data)
{
struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
@@ -494,37 +517,44 @@ static void omap_aes_done_task(unsigned long data)
omap_aes_crypt_dma_stop(dd);
}
- omap_crypto_cleanup(dd->in_sgl, NULL, 0, dd->total_save,
+ omap_crypto_cleanup(dd->in_sg, NULL, 0, dd->total_save,
FLAGS_IN_DATA_ST_SHIFT, dd->flags);
- omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save,
+ omap_crypto_cleanup(dd->out_sg, dd->orig_out, 0, dd->total_save,
FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
+ /* Update IV output */
+ if (dd->flags & (FLAGS_CBC | FLAGS_CTR))
+ omap_aes_copy_ivout(dd, dd->req->iv);
+
omap_aes_finish_req(dd, 0);
pr_debug("exit\n");
}
-static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+static int omap_aes_crypt(struct skcipher_request *req, unsigned long mode)
{
- struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
- struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct omap_aes_ctx *ctx = crypto_skcipher_ctx(
+ crypto_skcipher_reqtfm(req));
+ struct omap_aes_reqctx *rctx = skcipher_request_ctx(req);
struct omap_aes_dev *dd;
int ret;
- pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
+ if ((req->cryptlen % AES_BLOCK_SIZE) && !(mode & FLAGS_CTR))
+ return -EINVAL;
+
+ pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->cryptlen,
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
- if (req->nbytes < aes_fallback_sz) {
+ if (req->cryptlen < aes_fallback_sz) {
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags, NULL,
NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->nbytes, req->info);
+ req->cryptlen, req->iv);
if (mode & FLAGS_ENCRYPT)
ret = crypto_skcipher_encrypt(subreq);
@@ -545,10 +575,10 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
/* ********************** ALG API ************************************ */
-static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int omap_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret;
if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
@@ -571,32 +601,32 @@ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
-static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
+static int omap_aes_ecb_encrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, FLAGS_ENCRYPT);
}
-static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
+static int omap_aes_ecb_decrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, 0);
}
-static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
+static int omap_aes_cbc_encrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
}
-static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
+static int omap_aes_cbc_decrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, FLAGS_CBC);
}
-static int omap_aes_ctr_encrypt(struct ablkcipher_request *req)
+static int omap_aes_ctr_encrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR);
}
-static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
+static int omap_aes_ctr_decrypt(struct skcipher_request *req)
{
return omap_aes_crypt(req, FLAGS_CTR);
}
@@ -606,10 +636,10 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
static int omap_aes_crypt_req(struct crypto_engine *engine,
void *req);
-static int omap_aes_cra_init(struct crypto_tfm *tfm)
+static int omap_aes_init_tfm(struct crypto_skcipher *tfm)
{
- const char *name = crypto_tfm_alg_name(tfm);
- struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_sync_skcipher *blk;
blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@@ -618,7 +648,7 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm)
ctx->fallback = blk;
- tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx));
ctx->enginectx.op.prepare_request = omap_aes_prepare_req;
ctx->enginectx.op.unprepare_request = NULL;
@@ -627,39 +657,9 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
-{
- struct omap_aes_dev *dd = NULL;
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
- int err;
-
- /* Find AES device, currently picks the first device */
- spin_lock_bh(&list_lock);
- list_for_each_entry(dd, &dev_list, list) {
- break;
- }
- spin_unlock_bh(&list_lock);
-
- err = pm_runtime_get_sync(dd->dev);
- if (err < 0) {
- dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
- __func__, err);
- return err;
- }
-
- tfm->reqsize = sizeof(struct omap_aes_reqctx);
- ctx->ctr = crypto_alloc_skcipher("ecb(aes)", 0, 0);
- if (IS_ERR(ctx->ctr)) {
- pr_warn("could not load aes driver for encrypting IV\n");
- return PTR_ERR(ctx->ctr);
- }
-
- return 0;
-}
-
-static void omap_aes_cra_exit(struct crypto_tfm *tfm)
+static void omap_aes_exit_tfm(struct crypto_skcipher *tfm)
{
- struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
if (ctx->fallback)
crypto_free_sync_skcipher(ctx->fallback);
@@ -667,90 +667,71 @@ static void omap_aes_cra_exit(struct crypto_tfm *tfm)
ctx->fallback = NULL;
}
-static void omap_aes_gcm_cra_exit(struct crypto_aead *tfm)
-{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
-
- omap_aes_cra_exit(crypto_aead_tfm(tfm));
-
- if (ctx->ctr)
- crypto_free_skcipher(ctx->ctr);
-}
-
/* ********************** ALGS ************************************ */
-static struct crypto_alg algs_ecb_cbc[] = {
+static struct skcipher_alg algs_ecb_cbc[] = {
{
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-omap",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_aes_cra_init,
- .cra_exit = omap_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = omap_aes_setkey,
- .encrypt = omap_aes_ecb_encrypt,
- .decrypt = omap_aes_ecb_decrypt,
- }
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = omap_aes_setkey,
+ .encrypt = omap_aes_ecb_encrypt,
+ .decrypt = omap_aes_ecb_decrypt,
+ .init = omap_aes_init_tfm,
+ .exit = omap_aes_exit_tfm,
},
{
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-omap",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_aes_cra_init,
- .cra_exit = omap_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = omap_aes_setkey,
- .encrypt = omap_aes_cbc_encrypt,
- .decrypt = omap_aes_cbc_decrypt,
- }
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = omap_aes_setkey,
+ .encrypt = omap_aes_cbc_encrypt,
+ .decrypt = omap_aes_cbc_decrypt,
+ .init = omap_aes_init_tfm,
+ .exit = omap_aes_exit_tfm,
}
};
-static struct crypto_alg algs_ctr[] = {
+static struct skcipher_alg algs_ctr[] = {
{
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-omap",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = omap_aes_cra_init,
- .cra_exit = omap_aes_cra_exit,
- .cra_u.ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = omap_aes_setkey,
- .encrypt = omap_aes_ctr_encrypt,
- .decrypt = omap_aes_ctr_decrypt,
- }
-} ,
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = omap_aes_setkey,
+ .encrypt = omap_aes_ctr_encrypt,
+ .decrypt = omap_aes_ctr_decrypt,
+ .init = omap_aes_init_tfm,
+ .exit = omap_aes_exit_tfm,
+}
};
static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
@@ -769,15 +750,15 @@ static struct aead_alg algs_aead_gcm[] = {
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx),
.cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
},
.init = omap_aes_gcm_cra_init,
- .exit = omap_aes_gcm_cra_exit,
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = omap_aes_gcm_setkey,
+ .setauthsize = omap_aes_gcm_setauthsize,
.encrypt = omap_aes_gcm_encrypt,
.decrypt = omap_aes_gcm_decrypt,
},
@@ -789,15 +770,15 @@ static struct aead_alg algs_aead_gcm[] = {
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx),
.cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
},
.init = omap_aes_gcm_cra_init,
- .exit = omap_aes_gcm_cra_exit,
.maxauthsize = AES_BLOCK_SIZE,
.ivsize = GCM_RFC4106_IV_SIZE,
.setkey = omap_aes_4106gcm_setkey,
+ .setauthsize = omap_aes_4106gcm_setauthsize,
.encrypt = omap_aes_4106gcm_encrypt,
.decrypt = omap_aes_4106gcm_decrypt,
},
@@ -1121,7 +1102,7 @@ static int omap_aes_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct omap_aes_dev *dd;
- struct crypto_alg *algp;
+ struct skcipher_alg *algp;
struct aead_alg *aalg;
struct resource res;
int err = -ENOMEM, i, j, irq = -1;
@@ -1180,7 +1161,6 @@ static int omap_aes_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "can't get IRQ resource\n");
err = irq;
goto err_irq;
}
@@ -1216,9 +1196,9 @@ static int omap_aes_probe(struct platform_device *pdev)
for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
algp = &dd->pdata->algs_info[i].algs_list[j];
- pr_debug("reg alg: %s\n", algp->cra_name);
+ pr_debug("reg alg: %s\n", algp->base.cra_name);
- err = crypto_register_alg(algp);
+ err = crypto_register_skcipher(algp);
if (err)
goto err_algs;
@@ -1231,9 +1211,8 @@ static int omap_aes_probe(struct platform_device *pdev)
!dd->pdata->aead_algs_info->registered) {
for (i = 0; i < dd->pdata->aead_algs_info->size; i++) {
aalg = &dd->pdata->aead_algs_info->algs_list[i];
- algp = &aalg->base;
- pr_debug("reg alg: %s\n", algp->cra_name);
+ pr_debug("reg alg: %s\n", aalg->base.cra_name);
err = crypto_register_aead(aalg);
if (err)
@@ -1258,7 +1237,7 @@ err_aead_algs:
err_algs:
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_alg(
+ crypto_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
err_engine:
@@ -1291,7 +1270,7 @@ static int omap_aes_remove(struct platform_device *pdev)
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_alg(
+ crypto_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
for (i = dd->pdata->aead_algs_info->size - 1; i >= 0; i--) {
@@ -1304,7 +1283,8 @@ static int omap_aes_remove(struct platform_device *pdev)
tasklet_kill(&dd->done_task);
omap_aes_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
- dd = NULL;
+
+ sysfs_remove_group(&dd->dev->kobj, &omap_aes_attr_group);
return 0;
}
OpenPOWER on IntegriCloud