summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig2
-rw-r--r--crypto/algif_hash.c73
-rw-r--r--crypto/async_tx/async_pq.c8
-rw-r--r--crypto/blkcipher.c3
-rw-r--r--crypto/crct10dif_generic.c5
-rw-r--r--crypto/cryptd.c12
-rw-r--r--crypto/crypto_engine.c203
-rw-r--r--crypto/drbg.c31
-rw-r--r--crypto/echainiv.c115
-rw-r--r--crypto/gcm.c2
-rw-r--r--crypto/ghash-generic.c13
-rw-r--r--crypto/mcryptd.c7
-rw-r--r--crypto/rsa-pkcs1pad.c41
-rw-r--r--crypto/rsa_helper.c4
-rw-r--r--crypto/sha3_generic.c16
-rw-r--r--crypto/testmgr.c24
-rw-r--r--crypto/testmgr.h4
-rw-r--r--crypto/xor.c41
-rw-r--r--crypto/xts.c2
19 files changed, 354 insertions, 252 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index a9377bef25e3..84d71482bf08 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -439,7 +439,7 @@ config CRYPTO_CRC32C_INTEL
config CRYPT_CRC32C_VPMSUM
tristate "CRC32c CRC algorithm (powerpc64)"
- depends on PPC64
+ depends on PPC64 && ALTIVEC
select CRYPTO_HASH
select CRC32
help
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 68a5ceaa04c8..2d8466f9e49b 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -39,6 +39,37 @@ struct algif_hash_tfm {
bool has_key;
};
+static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx)
+{
+ unsigned ds;
+
+ if (ctx->result)
+ return 0;
+
+ ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
+
+ ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL);
+ if (!ctx->result)
+ return -ENOMEM;
+
+ memset(ctx->result, 0, ds);
+
+ return 0;
+}
+
+static void hash_free_result(struct sock *sk, struct hash_ctx *ctx)
+{
+ unsigned ds;
+
+ if (!ctx->result)
+ return;
+
+ ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
+
+ sock_kzfree_s(sk, ctx->result, ds);
+ ctx->result = NULL;
+}
+
static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
size_t ignored)
{
@@ -54,6 +85,9 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
lock_sock(sk);
if (!ctx->more) {
+ if ((msg->msg_flags & MSG_MORE))
+ hash_free_result(sk, ctx);
+
err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
&ctx->completion);
if (err)
@@ -90,6 +124,10 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
ctx->more = msg->msg_flags & MSG_MORE;
if (!ctx->more) {
+ err = hash_alloc_result(sk, ctx);
+ if (err)
+ goto unlock;
+
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
&ctx->completion);
@@ -116,6 +154,13 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
sg_init_table(ctx->sgl.sg, 1);
sg_set_page(ctx->sgl.sg, page, size, offset);
+ if (!(flags & MSG_MORE)) {
+ err = hash_alloc_result(sk, ctx);
+ if (err)
+ goto unlock;
+ } else if (!ctx->more)
+ hash_free_result(sk, ctx);
+
ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size);
if (!(flags & MSG_MORE)) {
@@ -153,6 +198,7 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
+ bool result;
int err;
if (len > ds)
@@ -161,17 +207,29 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
msg->msg_flags |= MSG_TRUNC;
lock_sock(sk);
+ result = ctx->result;
+ err = hash_alloc_result(sk, ctx);
+ if (err)
+ goto unlock;
+
+ ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
+
if (ctx->more) {
ctx->more = 0;
- ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
&ctx->completion);
if (err)
goto unlock;
+ } else if (!result) {
+ err = af_alg_wait_for_completion(
+ crypto_ahash_digest(&ctx->req),
+ &ctx->completion);
}
err = memcpy_to_msg(msg, ctx->result, len);
+ hash_free_result(sk, ctx);
+
unlock:
release_sock(sk);
@@ -394,8 +452,7 @@ static void hash_sock_destruct(struct sock *sk)
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
- sock_kzfree_s(sk, ctx->result,
- crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)));
+ hash_free_result(sk, ctx);
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
}
@@ -407,20 +464,12 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
struct algif_hash_tfm *tfm = private;
struct crypto_ahash *hash = tfm->hash;
unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
- unsigned ds = crypto_ahash_digestsize(hash);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL);
- if (!ctx->result) {
- sock_kfree_s(sk, ctx, len);
- return -ENOMEM;
- }
-
- memset(ctx->result, 0, ds);
-
+ ctx->result = NULL;
ctx->len = len;
ctx->more = 0;
af_alg_init_completion(&ctx->completion);
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 08b3ac68952b..f83de99d7d71 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -368,8 +368,6 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
-
- return tx;
} else {
struct page *p_src = P(blocks, disks);
struct page *q_src = Q(blocks, disks);
@@ -424,9 +422,11 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
submit->cb_param = cb_param_orig;
submit->flags = flags_orig;
async_tx_sync_epilog(submit);
-
- return NULL;
+ tx = NULL;
}
+ dmaengine_unmap_put(unmap);
+
+ return tx;
}
EXPORT_SYMBOL_GPL(async_syndrome_val);
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 369999530108..a832426820e8 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -233,6 +233,8 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
return blkcipher_walk_done(desc, walk, -EINVAL);
}
+ bsize = min(walk->walk_blocksize, n);
+
walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
BLKCIPHER_WALK_DIFF);
if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
@@ -245,7 +247,6 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
}
}
- bsize = min(walk->walk_blocksize, n);
n = scatterwalk_clamp(&walk->in, n);
n = scatterwalk_clamp(&walk->out, n);
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
index c1229614c7e3..8e94e29dc6fc 100644
--- a/crypto/crct10dif_generic.c
+++ b/crypto/crct10dif_generic.c
@@ -107,10 +107,7 @@ static struct shash_alg alg = {
static int __init crct10dif_mod_init(void)
{
- int ret;
-
- ret = crypto_register_shash(&alg);
- return ret;
+ return crypto_register_shash(&alg);
}
static void __exit crct10dif_mod_fini(void)
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index cf8037a87b2d..0c654e59f215 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -631,9 +631,14 @@ static int cryptd_hash_export(struct ahash_request *req, void *out)
static int cryptd_hash_import(struct ahash_request *req, const void *in)
{
- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct shash_desc *desc = cryptd_shash_desc(req);
+
+ desc->tfm = ctx->child;
+ desc->flags = req->base.flags;
- return crypto_shash_import(&rctx->desc, in);
+ return crypto_shash_import(desc, in);
}
static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
@@ -733,13 +738,14 @@ static void cryptd_aead_crypt(struct aead_request *req,
rctx = aead_request_ctx(req);
compl = rctx->complete;
+ tfm = crypto_aead_reqtfm(req);
+
if (unlikely(err == -EINPROGRESS))
goto out;
aead_request_set_tfm(req, child);
err = crypt( req );
out:
- tfm = crypto_aead_reqtfm(req);
ctx = crypto_aead_ctx(tfm);
refcnt = atomic_read(&ctx->refcnt);
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index a55c82dd48ef..6989ba0046df 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -14,13 +14,12 @@
#include <linux/err.h>
#include <linux/delay.h>
+#include <crypto/engine.h>
+#include <crypto/internal/hash.h>
#include "internal.h"
#define CRYPTO_ENGINE_MAX_QLEN 10
-void crypto_finalize_request(struct crypto_engine *engine,
- struct ablkcipher_request *req, int err);
-
/**
* crypto_pump_requests - dequeue one request from engine queue to process
* @engine: the hardware engine
@@ -34,10 +33,11 @@ static void crypto_pump_requests(struct crypto_engine *engine,
bool in_kthread)
{
struct crypto_async_request *async_req, *backlog;
- struct ablkcipher_request *req;
+ struct ahash_request *hreq;
+ struct ablkcipher_request *breq;
unsigned long flags;
bool was_busy = false;
- int ret;
+ int ret, rtype;
spin_lock_irqsave(&engine->queue_lock, flags);
@@ -47,7 +47,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
/* If another context is idling then defer */
if (engine->idling) {
- queue_kthread_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(&engine->kworker, &engine->pump_requests);
goto out;
}
@@ -58,7 +58,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
/* Only do teardown in the thread */
if (!in_kthread) {
- queue_kthread_work(&engine->kworker,
+ kthread_queue_work(&engine->kworker,
&engine->pump_requests);
goto out;
}
@@ -82,9 +82,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
if (!async_req)
goto out;
- req = ablkcipher_request_cast(async_req);
-
- engine->cur_req = req;
+ engine->cur_req = async_req;
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -95,6 +93,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
spin_unlock_irqrestore(&engine->queue_lock, flags);
+ rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
/* Until here we get the request need to be encrypted successfully */
if (!was_busy && engine->prepare_crypt_hardware) {
ret = engine->prepare_crypt_hardware(engine);
@@ -104,24 +103,55 @@ static void crypto_pump_requests(struct crypto_engine *engine,
}
}
- if (engine->prepare_request) {
- ret = engine->prepare_request(engine, engine->cur_req);
+ switch (rtype) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ hreq = ahash_request_cast(engine->cur_req);
+ if (engine->prepare_hash_request) {
+ ret = engine->prepare_hash_request(engine, hreq);
+ if (ret) {
+ pr_err("failed to prepare request: %d\n", ret);
+ goto req_err;
+ }
+ engine->cur_req_prepared = true;
+ }
+ ret = engine->hash_one_request(engine, hreq);
if (ret) {
- pr_err("failed to prepare request: %d\n", ret);
+ pr_err("failed to hash one request from queue\n");
goto req_err;
}
- engine->cur_req_prepared = true;
- }
-
- ret = engine->crypt_one_request(engine, engine->cur_req);
- if (ret) {
- pr_err("failed to crypt one request from queue\n");
- goto req_err;
+ return;
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ breq = ablkcipher_request_cast(engine->cur_req);
+ if (engine->prepare_cipher_request) {
+ ret = engine->prepare_cipher_request(engine, breq);
+ if (ret) {
+ pr_err("failed to prepare request: %d\n", ret);
+ goto req_err;
+ }
+ engine->cur_req_prepared = true;
+ }
+ ret = engine->cipher_one_request(engine, breq);
+ if (ret) {
+ pr_err("failed to cipher one request from queue\n");
+ goto req_err;
+ }
+ return;
+ default:
+ pr_err("failed to prepare request of unknown type\n");
+ return;
}
- return;
req_err:
- crypto_finalize_request(engine, engine->cur_req, ret);
+ switch (rtype) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ hreq = ahash_request_cast(engine->cur_req);
+ crypto_finalize_hash_request(engine, hreq, ret);
+ break;
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ breq = ablkcipher_request_cast(engine->cur_req);
+ crypto_finalize_cipher_request(engine, breq, ret);
+ break;
+ }
return;
out:
@@ -137,12 +167,14 @@ static void crypto_pump_work(struct kthread_work *work)
}
/**
- * crypto_transfer_request - transfer the new request into the engine queue
+ * crypto_transfer_cipher_request - transfer the new request into the
+ * enginequeue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
-int crypto_transfer_request(struct crypto_engine *engine,
- struct ablkcipher_request *req, bool need_pump)
+int crypto_transfer_cipher_request(struct crypto_engine *engine,
+ struct ablkcipher_request *req,
+ bool need_pump)
{
unsigned long flags;
int ret;
@@ -157,51 +189,130 @@ int crypto_transfer_request(struct crypto_engine *engine,
ret = ablkcipher_enqueue_request(&engine->queue, req);
if (!engine->busy && need_pump)
- queue_kthread_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(&engine->kworker, &engine->pump_requests);
+
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request);
+
+/**
+ * crypto_transfer_cipher_request_to_engine - transfer one request to list
+ * into the engine queue
+ * @engine: the hardware engine
+ * @req: the request need to be listed into the engine queue
+ */
+int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ return crypto_transfer_cipher_request(engine, req, true);
+}
+EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine);
+
+/**
+ * crypto_transfer_hash_request - transfer the new request into the
+ * enginequeue
+ * @engine: the hardware engine
+ * @req: the request need to be listed into the engine queue
+ */
+int crypto_transfer_hash_request(struct crypto_engine *engine,
+ struct ahash_request *req, bool need_pump)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&engine->queue_lock, flags);
+
+ if (!engine->running) {
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+ return -ESHUTDOWN;
+ }
+
+ ret = ahash_enqueue_request(&engine->queue, req);
+
+ if (!engine->busy && need_pump)
+ kthread_queue_work(&engine->kworker, &engine->pump_requests);
spin_unlock_irqrestore(&engine->queue_lock, flags);
return ret;
}
-EXPORT_SYMBOL_GPL(crypto_transfer_request);
+EXPORT_SYMBOL_GPL(crypto_transfer_hash_request);
/**
- * crypto_transfer_request_to_engine - transfer one request to list into the
- * engine queue
+ * crypto_transfer_hash_request_to_engine - transfer one request to list
+ * into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
-int crypto_transfer_request_to_engine(struct crypto_engine *engine,
- struct ablkcipher_request *req)
+int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
+ struct ahash_request *req)
{
- return crypto_transfer_request(engine, req, true);
+ return crypto_transfer_hash_request(engine, req, true);
}
-EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine);
+EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
/**
- * crypto_finalize_request - finalize one request if the request is done
+ * crypto_finalize_cipher_request - finalize one request if the request is done
* @engine: the hardware engine
* @req: the request need to be finalized
* @err: error number
*/
-void crypto_finalize_request(struct crypto_engine *engine,
- struct ablkcipher_request *req, int err)
+void crypto_finalize_cipher_request(struct crypto_engine *engine,
+ struct ablkcipher_request *req, int err)
{
unsigned long flags;
bool finalize_cur_req = false;
int ret;
spin_lock_irqsave(&engine->queue_lock, flags);
- if (engine->cur_req == req)
+ if (engine->cur_req == &req->base)
finalize_cur_req = true;
spin_unlock_irqrestore(&engine->queue_lock, flags);
if (finalize_cur_req) {
- if (engine->cur_req_prepared && engine->unprepare_request) {
- ret = engine->unprepare_request(engine, req);
+ if (engine->cur_req_prepared &&
+ engine->unprepare_cipher_request) {
+ ret = engine->unprepare_cipher_request(engine, req);
if (ret)
pr_err("failed to unprepare request\n");
}
+ spin_lock_irqsave(&engine->queue_lock, flags);
+ engine->cur_req = NULL;
+ engine->cur_req_prepared = false;
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+ }
+
+ req->base.complete(&req->base, err);
+
+ kthread_queue_work(&engine->kworker, &engine->pump_requests);
+}
+EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
+
+/**
+ * crypto_finalize_hash_request - finalize one request if the request is done
+ * @engine: the hardware engine
+ * @req: the request need to be finalized
+ * @err: error number
+ */
+void crypto_finalize_hash_request(struct crypto_engine *engine,
+ struct ahash_request *req, int err)
+{
+ unsigned long flags;
+ bool finalize_cur_req = false;
+ int ret;
+ spin_lock_irqsave(&engine->queue_lock, flags);
+ if (engine->cur_req == &req->base)
+ finalize_cur_req = true;
+ spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+ if (finalize_cur_req) {
+ if (engine->cur_req_prepared &&
+ engine->unprepare_hash_request) {
+ ret = engine->unprepare_hash_request(engine, req);
+ if (ret)
+ pr_err("failed to unprepare request\n");
+ }
spin_lock_irqsave(&engine->queue_lock, flags);
engine->cur_req = NULL;
engine->cur_req_prepared = false;
@@ -210,9 +321,9 @@ void crypto_finalize_request(struct crypto_engine *engine,
req->base.complete(&req->base, err);
- queue_kthread_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(&engine->kworker, &engine->pump_requests);
}
-EXPORT_SYMBOL_GPL(crypto_finalize_request);
+EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
/**
* crypto_engine_start - start the hardware engine
@@ -234,7 +345,7 @@ int crypto_engine_start(struct crypto_engine *engine)
engine->running = true;
spin_unlock_irqrestore(&engine->queue_lock, flags);
- queue_kthread_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(&engine->kworker, &engine->pump_requests);
return 0;
}
@@ -249,7 +360,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_start);
int crypto_engine_stop(struct crypto_engine *engine)
{
unsigned long flags;
- unsigned limit = 500;
+ unsigned int limit = 500;
int ret = 0;
spin_lock_irqsave(&engine->queue_lock, flags);
@@ -311,7 +422,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
spin_lock_init(&engine->queue_lock);
- init_kthread_worker(&engine->kworker);
+ kthread_init_worker(&engine->kworker);
engine->kworker_task = kthread_run(kthread_worker_fn,
&engine->kworker, "%s",
engine->name);
@@ -319,7 +430,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
dev_err(dev, "failed to create crypto request pump task\n");
return NULL;
}
- init_kthread_work(&engine->pump_requests, crypto_pump_work);
+ kthread_init_work(&engine->pump_requests, crypto_pump_work);
if (engine->rt) {
dev_info(dev, "will run requests pump with realtime priority\n");
@@ -344,7 +455,7 @@ int crypto_engine_exit(struct crypto_engine *engine)
if (ret)
return ret;
- flush_kthread_worker(&engine->kworker);
+ kthread_flush_worker(&engine->kworker);
kthread_stop(engine->kworker_task);
return 0;
diff --git a/crypto/drbg.c b/crypto/drbg.c
index f752da3a7c75..fb33f7d3b052 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1178,12 +1178,16 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
goto err;
drbg->Vbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
- if (!drbg->Vbuf)
+ if (!drbg->Vbuf) {
+ ret = -ENOMEM;
goto fini;
+ }
drbg->V = PTR_ALIGN(drbg->Vbuf, ret + 1);
drbg->Cbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
- if (!drbg->Cbuf)
+ if (!drbg->Cbuf) {
+ ret = -ENOMEM;
goto fini;
+ }
drbg->C = PTR_ALIGN(drbg->Cbuf, ret + 1);
/* scratchpad is only generated for CTR and Hash */
if (drbg->core->flags & DRBG_HMAC)
@@ -1199,8 +1203,10 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
if (0 < sb_size) {
drbg->scratchpadbuf = kzalloc(sb_size + ret, GFP_KERNEL);
- if (!drbg->scratchpadbuf)
+ if (!drbg->scratchpadbuf) {
+ ret = -ENOMEM;
goto fini;
+ }
drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1);
}
@@ -1917,6 +1923,8 @@ static inline int __init drbg_healthcheck_sanity(void)
return -ENOMEM;
mutex_init(&drbg->drbg_mutex);
+ drbg->core = &drbg_cores[coreref];
+ drbg->reseed_threshold = drbg_max_requests(drbg);
/*
* if the following tests fail, it is likely that there is a buffer
@@ -1926,12 +1934,6 @@ static inline int __init drbg_healthcheck_sanity(void)
* grave bug.
*/
- /* get a valid instance of DRBG for following tests */
- ret = drbg_instantiate(drbg, NULL, coreref, pr);
- if (ret) {
- rc = ret;
- goto outbuf;
- }
max_addtllen = drbg_max_addtl(drbg);
max_request_bytes = drbg_max_request_bytes(drbg);
drbg_string_fill(&addtl, buf, max_addtllen + 1);
@@ -1941,10 +1943,9 @@ static inline int __init drbg_healthcheck_sanity(void)
/* overflow max_bits */
len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL);
BUG_ON(0 < len);
- drbg_uninstantiate(drbg);
/* overflow max addtllen with personalization string */
- ret = drbg_instantiate(drbg, &addtl, coreref, pr);
+ ret = drbg_seed(drbg, &addtl, false);
BUG_ON(0 == ret);
/* all tests passed */
rc = 0;
@@ -1952,9 +1953,7 @@ static inline int __init drbg_healthcheck_sanity(void)
pr_devel("DRBG: Sanity tests for failure code paths successfully "
"completed\n");
- drbg_uninstantiate(drbg);
-outbuf:
- kzfree(drbg);
+ kfree(drbg);
return rc;
}
@@ -2006,7 +2005,7 @@ static int __init drbg_init(void)
{
unsigned int i = 0; /* pointer to drbg_algs */
unsigned int j = 0; /* pointer to drbg_cores */
- int ret = -EFAULT;
+ int ret;
ret = drbg_healthcheck_sanity();
if (ret)
@@ -2016,7 +2015,7 @@ static int __init drbg_init(void)
pr_info("DRBG: Cannot register all DRBG types"
"(slots needed: %zu, slots available: %zu)\n",
ARRAY_SIZE(drbg_cores) * 2, ARRAY_SIZE(drbg_algs));
- return ret;
+ return -EFAULT;
}
/*
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index 1b01fe98e91f..e3d889b122e0 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -1,8 +1,8 @@
/*
* echainiv: Encrypted Chain IV Generator
*
- * This generator generates an IV based on a sequence number by xoring it
- * with a salt and then encrypting it with the same key as used to encrypt
+ * This generator generates an IV based on a sequence number by multiplying
+ * it with a salt and then encrypting it with the same key as used to encrypt
* the plain text. This algorithm requires that the block size be equal
* to the IV size. It is mainly useful for CBC.
*
@@ -24,81 +24,17 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/percpu.h>
-#include <linux/spinlock.h>
+#include <linux/slab.h>
#include <linux/string.h>
-#define MAX_IV_SIZE 16
-
-static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
-
-/* We don't care if we get preempted and read/write IVs from the next CPU. */
-static void echainiv_read_iv(u8 *dst, unsigned size)
-{
- u32 *a = (u32 *)dst;
- u32 __percpu *b = echainiv_iv;
-
- for (; size >= 4; size -= 4) {
- *a++ = this_cpu_read(*b);
- b++;
- }
-}
-
-static void echainiv_write_iv(const u8 *src, unsigned size)
-{
- const u32 *a = (const u32 *)src;
- u32 __percpu *b = echainiv_iv;
-
- for (; size >= 4; size -= 4) {
- this_cpu_write(*b, *a);
- a++;
- b++;
- }
-}
-
-static void echainiv_encrypt_complete2(struct aead_request *req, int err)
-{
- struct aead_request *subreq = aead_request_ctx(req);
- struct crypto_aead *geniv;
- unsigned int ivsize;
-
- if (err == -EINPROGRESS)
- return;
-
- if (err)
- goto out;
-
- geniv = crypto_aead_reqtfm(req);
- ivsize = crypto_aead_ivsize(geniv);
-
- echainiv_write_iv(subreq->iv, ivsize);
-
- if (req->iv != subreq->iv)
- memcpy(req->iv, subreq->iv, ivsize);
-
-out:
- if (req->iv != subreq->iv)
- kzfree(subreq->iv);
-}
-
-static void echainiv_encrypt_complete(struct crypto_async_request *base,
- int err)
-{
- struct aead_request *req = base->data;
-
- echainiv_encrypt_complete2(req, err);
- aead_request_complete(req, err);
-}
-
static int echainiv_encrypt(struct aead_request *req)
{
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
struct aead_request *subreq = aead_request_ctx(req);
- crypto_completion_t compl;
- void *data;
+ __be64 nseqno;
+ u64 seqno;
u8 *info;
unsigned int ivsize = crypto_aead_ivsize(geniv);
int err;
@@ -108,8 +44,6 @@ static int echainiv_encrypt(struct aead_request *req)
aead_request_set_tfm(subreq, ctx->child);
- compl = echainiv_encrypt_complete;
- data = req;
info = req->iv;
if (req->src != req->dst) {
@@ -127,29 +61,30 @@ static int echainiv_encrypt(struct aead_request *req)
return err;
}
- if (unlikely(!IS_ALIGNED((unsigned long)info,
- crypto_aead_alignmask(geniv) + 1))) {
- info = kmalloc(ivsize, req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
- GFP_ATOMIC);
- if (!info)
- return -ENOMEM;
-
- memcpy(info, req->iv, ivsize);
- }
-
- aead_request_set_callback(subreq, req->base.flags, compl, data);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
aead_request_set_crypt(subreq, req->dst, req->dst,
req->cryptlen, info);
aead_request_set_ad(subreq, req->assoclen);
- crypto_xor(info, ctx->salt, ivsize);
+ memcpy(&nseqno, info + ivsize - 8, 8);
+ seqno = be64_to_cpu(nseqno);
+ memset(info, 0, ivsize);
+
scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
- echainiv_read_iv(info, ivsize);
- err = crypto_aead_encrypt(subreq);
- echainiv_encrypt_complete2(req, err);
- return err;
+ do {
+ u64 a;
+
+ memcpy(&a, ctx->salt + ivsize - 8, 8);
+
+ a |= 1;
+ a *= seqno;
+
+ memcpy(info + ivsize - 8, &a, 8);
+ } while ((ivsize -= 8));
+
+ return crypto_aead_encrypt(subreq);
}
static int echainiv_decrypt(struct aead_request *req)
@@ -196,8 +131,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
alg = crypto_spawn_aead_alg(spawn);
err = -EINVAL;
- if (inst->alg.ivsize & (sizeof(u32) - 1) ||
- inst->alg.ivsize > MAX_IV_SIZE)
+ if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
goto free_inst;
inst->alg.encrypt = echainiv_encrypt;
@@ -206,7 +140,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
inst->alg.init = aead_init_geniv;
inst->alg.exit = aead_exit_geniv;
- inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
inst->alg.base.cra_ctxsize += inst->alg.ivsize;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 70a892e87ccb..f624ac98c94e 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -117,7 +117,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
struct crypto_skcipher *ctr = ctx->ctr;
struct {
be128 hash;
- u8 iv[8];
+ u8 iv[16];
struct crypto_gcm_setkey_result result;
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index bac70995e064..12ad3e3a84e3 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -14,24 +14,13 @@
#include <crypto/algapi.h>
#include <crypto/gf128mul.h>
+#include <crypto/ghash.h>
#include <crypto/internal/hash.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#define GHASH_BLOCK_SIZE 16
-#define GHASH_DIGEST_SIZE 16
-
-struct ghash_ctx {
- struct gf128mul_4k *gf128;
-};
-
-struct ghash_desc_ctx {
- u8 buffer[GHASH_BLOCK_SIZE];
- u32 bytes;
-};
-
static int ghash_init(struct shash_desc *desc)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index 86fb59b109a9..94ee44acd465 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -612,12 +612,7 @@ EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
int ahash_mcryptd_digest(struct ahash_request *desc)
{
- int err;
-
- err = crypto_ahash_init(desc) ?:
- ahash_mcryptd_finup(desc);
-
- return err;
+ return crypto_ahash_init(desc) ?: ahash_mcryptd_finup(desc);
}
int ahash_mcryptd_update(struct ahash_request *desc)
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 877019a6d3ea..8baab4307f7b 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -298,41 +298,48 @@ static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ unsigned int dst_len;
unsigned int pos;
-
- if (err == -EOVERFLOW)
- /* Decrypted value had no leading 0 byte */
- err = -EINVAL;
+ u8 *out_buf;
if (err)
goto done;
- if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
- err = -EINVAL;
+ err = -EINVAL;
+ dst_len = req_ctx->child_req.dst_len;
+ if (dst_len < ctx->key_size - 1)
goto done;
+
+ out_buf = req_ctx->out_buf;
+ if (dst_len == ctx->key_size) {
+ if (out_buf[0] != 0x00)
+ /* Decrypted value had no leading 0 byte */
+ goto done;
+
+ dst_len--;
+ out_buf++;
}
- if (req_ctx->out_buf[0] != 0x02) {
- err = -EINVAL;
+ if (out_buf[0] != 0x02)
goto done;
- }
- for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
- if (req_ctx->out_buf[pos] == 0x00)
+
+ for (pos = 1; pos < dst_len; pos++)
+ if (out_buf[pos] == 0x00)
break;
- if (pos < 9 || pos == req_ctx->child_req.dst_len) {
- err = -EINVAL;
+ if (pos < 9 || pos == dst_len)
goto done;
- }
pos++;
- if (req->dst_len < req_ctx->child_req.dst_len - pos)
+ err = 0;
+
+ if (req->dst_len < dst_len - pos)
err = -EOVERFLOW;
- req->dst_len = req_ctx->child_req.dst_len - pos;
+ req->dst_len = dst_len - pos;
if (!err)
sg_copy_from_buffer(req->dst,
sg_nents_for_len(req->dst, req->dst_len),
- req_ctx->out_buf + pos, req->dst_len);
+ out_buf + pos, req->dst_len);
done:
kzfree(req_ctx->out_buf);
diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c
index 4df6451e7543..0b66dc824606 100644
--- a/crypto/rsa_helper.c
+++ b/crypto/rsa_helper.c
@@ -35,8 +35,8 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
n_sz--;
}
- /* In FIPS mode only allow key size 2K & 3K */
- if (n_sz != 256 && n_sz != 384) {
+ /* In FIPS mode only allow key size 2K and higher */
+ if (n_sz < 256) {
pr_err("RSA: key size not allowed in FIPS mode\n");
return -EINVAL;
}
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index 62264397a2d2..7e8ed96236ce 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -24,14 +24,14 @@
#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y))))
static const u64 keccakf_rndc[24] = {
- 0x0000000000000001, 0x0000000000008082, 0x800000000000808a,
- 0x8000000080008000, 0x000000000000808b, 0x0000000080000001,
- 0x8000000080008081, 0x8000000000008009, 0x000000000000008a,
- 0x0000000000000088, 0x0000000080008009, 0x000000008000000a,
- 0x000000008000808b, 0x800000000000008b, 0x8000000000008089,
- 0x8000000000008003, 0x8000000000008002, 0x8000000000000080,
- 0x000000000000800a, 0x800000008000000a, 0x8000000080008081,
- 0x8000000000008080, 0x0000000080000001, 0x8000000080008008
+ 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL,
+ 0x8000000080008000ULL, 0x000000000000808bULL, 0x0000000080000001ULL,
+ 0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008aULL,
+ 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL,
+ 0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL,
+ 0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL,
+ 0x000000000000800aULL, 0x800000008000000aULL, 0x8000000080008081ULL,
+ 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
};
static const int keccakf_rotc[24] = {
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 5c9d5a5e7b65..62dffa0028ac 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -209,16 +209,19 @@ static int ahash_partial_update(struct ahash_request **preq,
char *state;
struct ahash_request *req;
int statesize, ret = -EINVAL;
+ const char guard[] = { 0x00, 0xba, 0xad, 0x00 };
req = *preq;
statesize = crypto_ahash_statesize(
crypto_ahash_reqtfm(req));
- state = kmalloc(statesize, GFP_KERNEL);
+ state = kmalloc(statesize + sizeof(guard), GFP_KERNEL);
if (!state) {
pr_err("alt: hash: Failed to alloc state for %s\n", algo);
goto out_nostate;
}
+ memcpy(state + statesize, guard, sizeof(guard));
ret = crypto_ahash_export(req, state);
+ WARN_ON(memcmp(state + statesize, guard, sizeof(guard)));
if (ret) {
pr_err("alt: hash: Failed to export() for %s\n", algo);
goto out;
@@ -665,7 +668,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
memcpy(key, template[i].key, template[i].klen);
ret = crypto_aead_setkey(tfm, key, template[i].klen);
- if (!ret == template[i].fail) {
+ if (template[i].fail == !ret) {
pr_err("alg: aead%s: setkey failed on test %d for %s: flags=%x\n",
d, j, algo, crypto_aead_get_flags(tfm));
goto out;
@@ -770,7 +773,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
memcpy(key, template[i].key, template[i].klen);
ret = crypto_aead_setkey(tfm, key, template[i].klen);
- if (!ret == template[i].fail) {
+ if (template[i].fail == !ret) {
pr_err("alg: aead%s: setkey failed on chunk test %d for %s: flags=%x\n",
d, j, algo, crypto_aead_get_flags(tfm));
goto out;
@@ -1008,6 +1011,9 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
if (template[i].np)
continue;
+ if (fips_enabled && template[i].fips_skip)
+ continue;
+
j++;
ret = -EINVAL;
@@ -1023,7 +1029,7 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
ret = crypto_cipher_setkey(tfm, template[i].key,
template[i].klen);
- if (!ret == template[i].fail) {
+ if (template[i].fail == !ret) {
printk(KERN_ERR "alg: cipher: setkey failed "
"on test %d for %s: flags=%x\n", j,
algo, crypto_cipher_get_flags(tfm));
@@ -1112,6 +1118,9 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
if (template[i].np && !template[i].also_non_np)
continue;
+ if (fips_enabled && template[i].fips_skip)
+ continue;
+
if (template[i].iv)
memcpy(iv, template[i].iv, ivsize);
else
@@ -1133,7 +1142,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
ret = crypto_skcipher_setkey(tfm, template[i].key,
template[i].klen);
- if (!ret == template[i].fail) {
+ if (template[i].fail == !ret) {
pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n",
d, j, algo, crypto_skcipher_get_flags(tfm));
goto out;
@@ -1198,6 +1207,9 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
if (!template[i].np)
continue;
+ if (fips_enabled && template[i].fips_skip)
+ continue;
+
if (template[i].iv)
memcpy(iv, template[i].iv, ivsize);
else
@@ -1211,7 +1223,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
ret = crypto_skcipher_setkey(tfm, template[i].key,
template[i].klen);
- if (!ret == template[i].fail) {
+ if (template[i].fail == !ret) {
pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n",
d, j, algo, crypto_skcipher_get_flags(tfm));
goto out;
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index acb6bbff781a..e64a4ef9d8ca 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -59,6 +59,7 @@ struct hash_testvec {
* @tap: How to distribute data in @np SGs
* @also_non_np: if set to 1, the test will be also done without
* splitting data in @np SGs
+ * @fips_skip: Skip the test vector in FIPS mode
*/
struct cipher_testvec {
@@ -75,6 +76,7 @@ struct cipher_testvec {
unsigned char klen;
unsigned short ilen;
unsigned short rlen;
+ bool fips_skip;
};
struct aead_testvec {
@@ -18224,6 +18226,7 @@ static struct cipher_testvec aes_xts_enc_tv_template[] = {
"\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.klen = 32,
+ .fips_skip = 1,
.iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.input = "\x00\x00\x00\x00\x00\x00\x00\x00"
@@ -18566,6 +18569,7 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = {
"\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.klen = 32,
+ .fips_skip = 1,
.iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
.input = "\x91\x7c\xf6\x9e\xbd\x68\xb2\xec"
diff --git a/crypto/xor.c b/crypto/xor.c
index 35d6b3adf230..263af9fb45ea 100644
--- a/crypto/xor.c
+++ b/crypto/xor.c
@@ -24,6 +24,10 @@
#include <linux/preempt.h>
#include <asm/xor.h>
+#ifndef XOR_SELECT_TEMPLATE
+#define XOR_SELECT_TEMPLATE(x) (x)
+#endif
+
/* The xor routines to use. */
static struct xor_block_template *active_template;
@@ -109,6 +113,15 @@ calibrate_xor_blocks(void)
void *b1, *b2;
struct xor_block_template *f, *fastest;
+ fastest = XOR_SELECT_TEMPLATE(NULL);
+
+ if (fastest) {
+ printk(KERN_INFO "xor: automatically using best "
+ "checksumming function %-10s\n",
+ fastest->name);
+ goto out;
+ }
+
/*
* Note: Since the memory is not actually used for _anything_ but to
* test the XOR speed, we don't really want kmemcheck to warn about
@@ -126,36 +139,22 @@ calibrate_xor_blocks(void)
* all the possible functions, just test the best one
*/
- fastest = NULL;
-
-#ifdef XOR_SELECT_TEMPLATE
- fastest = XOR_SELECT_TEMPLATE(fastest);
-#endif
-
#define xor_speed(templ) do_xor_speed((templ), b1, b2)
- if (fastest) {
- printk(KERN_INFO "xor: automatically using best "
- "checksumming function:\n");
- xor_speed(fastest);
- goto out;
- } else {
- printk(KERN_INFO "xor: measuring software checksum speed\n");
- XOR_TRY_TEMPLATES;
- fastest = template_list;
- for (f = fastest; f; f = f->next)
- if (f->speed > fastest->speed)
- fastest = f;
- }
+ printk(KERN_INFO "xor: measuring software checksum speed\n");
+ XOR_TRY_TEMPLATES;
+ fastest = template_list;
+ for (f = fastest; f; f = f->next)
+ if (f->speed > fastest->speed)
+ fastest = f;
printk(KERN_INFO "xor: using function: %s (%d.%03d MB/sec)\n",
fastest->name, fastest->speed / 1000, fastest->speed % 1000);
#undef xor_speed
- out:
free_pages((unsigned long)b1, 2);
-
+out:
active_template = fastest;
return 0;
}
diff --git a/crypto/xts.c b/crypto/xts.c
index 26ba5833b994..305343f22a02 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -5,7 +5,7 @@
*
* Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
*
- * Based om ecb.c
+ * Based on ecb.c
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
OpenPOWER on IntegriCloud