diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2017-10-03 11:09:16 +0200 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2017-10-03 11:09:16 +0200 |
commit | 0d3c24e936feefeca854073ccb40613cd6eba9a9 (patch) | |
tree | 1f675397b924846740b0931b066ddce6f3d7eb3d /crypto | |
parent | 1af0838de60e723cb02253ecc9b555c30f8f6a6f (diff) | |
parent | ebec44a2456fbe5fe18aae88f6010f6878f0cb4a (diff) | |
download | talos-obmc-linux-0d3c24e936feefeca854073ccb40613cd6eba9a9.tar.gz talos-obmc-linux-0d3c24e936feefeca854073ccb40613cd6eba9a9.zip |
Merge airlied/drm-next into drm-misc-next
Just catching up with upstream.
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/Kconfig | 2 | ||||
-rw-r--r-- | crypto/af_alg.c | 691 | ||||
-rw-r--r-- | crypto/ahash.c | 29 | ||||
-rw-r--r-- | crypto/algapi.c | 25 | ||||
-rw-r--r-- | crypto/algif_aead.c | 864 | ||||
-rw-r--r-- | crypto/algif_skcipher.c | 829 | ||||
-rw-r--r-- | crypto/chacha20_generic.c | 9 | ||||
-rw-r--r-- | crypto/ctr.c | 3 | ||||
-rw-r--r-- | crypto/drbg.c | 8 | ||||
-rw-r--r-- | crypto/ecdh.c | 51 | ||||
-rw-r--r-- | crypto/pcbc.c | 12 | ||||
-rw-r--r-- | crypto/rng.c | 6 | ||||
-rw-r--r-- | crypto/scompress.c | 55 | ||||
-rw-r--r-- | crypto/serpent_generic.c | 77 | ||||
-rw-r--r-- | crypto/tcrypt.c | 8 | ||||
-rw-r--r-- | crypto/testmgr.h | 7 |
16 files changed, 1246 insertions, 1430 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index caa770e535a2..0a121f9ddf8e 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1753,6 +1753,8 @@ config CRYPTO_USER_API_AEAD tristate "User-space interface for AEAD cipher algorithms" depends on NET select CRYPTO_AEAD + select CRYPTO_BLKCIPHER + select CRYPTO_NULL select CRYPTO_USER_API help This option enables the user-spaces interface for AEAD diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 92a3d540d920..337cf382718e 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -21,6 +21,7 @@ #include <linux/module.h> #include <linux/net.h> #include <linux/rwsem.h> +#include <linux/sched/signal.h> #include <linux/security.h> struct alg_type_list { @@ -507,6 +508,696 @@ void af_alg_complete(struct crypto_async_request *req, int err) } EXPORT_SYMBOL_GPL(af_alg_complete); +/** + * af_alg_alloc_tsgl - allocate the TX SGL + * + * @sk socket of connection to user space + * @return: 0 upon success, < 0 upon error + */ +int af_alg_alloc_tsgl(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + struct af_alg_tsgl *sgl; + struct scatterlist *sg = NULL; + + sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list); + if (!list_empty(&ctx->tsgl_list)) + sg = sgl->sg; + + if (!sg || sgl->cur >= MAX_SGL_ENTS) { + sgl = sock_kmalloc(sk, sizeof(*sgl) + + sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), + GFP_KERNEL); + if (!sgl) + return -ENOMEM; + + sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); + sgl->cur = 0; + + if (sg) + sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); + + list_add_tail(&sgl->list, &ctx->tsgl_list); + } + + return 0; +} +EXPORT_SYMBOL_GPL(af_alg_alloc_tsgl); + +/** + * aead_count_tsgl - Count number of TX SG entries + * + * The counting starts from the beginning of the SGL to @bytes. If + * an offset is provided, the counting of the SG entries starts at the offset. + * + * @sk socket of connection to user space + * @bytes Count the number of SG entries holding given number of bytes. + * @offset Start the counting of SG entries from the given offset. + * @return Number of TX SG entries found given the constraints + */ +unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset) +{ + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + struct af_alg_tsgl *sgl, *tmp; + unsigned int i; + unsigned int sgl_count = 0; + + if (!bytes) + return 0; + + list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) { + struct scatterlist *sg = sgl->sg; + + for (i = 0; i < sgl->cur; i++) { + size_t bytes_count; + + /* Skip offset */ + if (offset >= sg[i].length) { + offset -= sg[i].length; + bytes -= sg[i].length; + continue; + } + + bytes_count = sg[i].length - offset; + + offset = 0; + sgl_count++; + + /* If we have seen requested number of bytes, stop */ + if (bytes_count >= bytes) + return sgl_count; + + bytes -= bytes_count; + } + } + + return sgl_count; +} +EXPORT_SYMBOL_GPL(af_alg_count_tsgl); + +/** + * aead_pull_tsgl - Release the specified buffers from TX SGL + * + * If @dst is non-null, reassign the pages to dst. The caller must release + * the pages. If @dst_offset is given only reassign the pages to @dst starting + * at the @dst_offset (byte). The caller must ensure that @dst is large + * enough (e.g. by using af_alg_count_tsgl with the same offset). + * + * @sk socket of connection to user space + * @used Number of bytes to pull from TX SGL + * @dst If non-NULL, buffer is reassigned to dst SGL instead of releasing. The + * caller must release the buffers in dst. + * @dst_offset Reassign the TX SGL from given offset. All buffers before + * reaching the offset is released. + */ +void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, + size_t dst_offset) +{ + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + struct af_alg_tsgl *sgl; + struct scatterlist *sg; + unsigned int i, j = 0; + + while (!list_empty(&ctx->tsgl_list)) { + sgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, + list); + sg = sgl->sg; + + for (i = 0; i < sgl->cur; i++) { + size_t plen = min_t(size_t, used, sg[i].length); + struct page *page = sg_page(sg + i); + + if (!page) + continue; + + /* + * Assumption: caller created af_alg_count_tsgl(len) + * SG entries in dst. + */ + if (dst) { + if (dst_offset >= plen) { + /* discard page before offset */ + dst_offset -= plen; + } else { + /* reassign page to dst after offset */ + get_page(page); + sg_set_page(dst + j, page, + plen - dst_offset, + sg[i].offset + dst_offset); + dst_offset = 0; + j++; + } + } + + sg[i].length -= plen; + sg[i].offset += plen; + + used -= plen; + ctx->used -= plen; + + if (sg[i].length) + return; + + put_page(page); + sg_assign_page(sg + i, NULL); + } + + list_del(&sgl->list); + sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) * + (MAX_SGL_ENTS + 1)); + } + + if (!ctx->used) + ctx->merge = 0; +} +EXPORT_SYMBOL_GPL(af_alg_pull_tsgl); + +/** + * af_alg_free_areq_sgls - Release TX and RX SGLs of the request + * + * @areq Request holding the TX and RX SGL + */ +void af_alg_free_areq_sgls(struct af_alg_async_req *areq) +{ + struct sock *sk = areq->sk; + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + struct af_alg_rsgl *rsgl, *tmp; + struct scatterlist *tsgl; + struct scatterlist *sg; + unsigned int i; + + list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) { + ctx->rcvused -= rsgl->sg_num_bytes; + af_alg_free_sg(&rsgl->sgl); + list_del(&rsgl->list); + if (rsgl != &areq->first_rsgl) + sock_kfree_s(sk, rsgl, sizeof(*rsgl)); + } + + tsgl = areq->tsgl; + for_each_sg(tsgl, sg, areq->tsgl_entries, i) { + if (!sg_page(sg)) + continue; + put_page(sg_page(sg)); + } + + if (areq->tsgl && areq->tsgl_entries) + sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl)); +} +EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls); + +/** + * af_alg_wait_for_wmem - wait for availability of writable memory + * + * @sk socket of connection to user space + * @flags If MSG_DONTWAIT is set, then only report if function would sleep + * @return 0 when writable memory is available, < 0 upon error + */ +int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + int err = -ERESTARTSYS; + long timeout; + + if (flags & MSG_DONTWAIT) + return -EAGAIN; + + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); + + add_wait_queue(sk_sleep(sk), &wait); + for (;;) { + if (signal_pending(current)) + break; + timeout = MAX_SCHEDULE_TIMEOUT; + if (sk_wait_event(sk, &timeout, af_alg_writable(sk), &wait)) { + err = 0; + break; + } + } + remove_wait_queue(sk_sleep(sk), &wait); + + return err; +} +EXPORT_SYMBOL_GPL(af_alg_wait_for_wmem); + +/** + * af_alg_wmem_wakeup - wakeup caller when writable memory is available + * + * @sk socket of connection to user space + */ +void af_alg_wmem_wakeup(struct sock *sk) +{ + struct socket_wq *wq; + + if (!af_alg_writable(sk)) + return; + + rcu_read_lock(); + wq = rcu_dereference(sk->sk_wq); + if (skwq_has_sleeper(wq)) + wake_up_interruptible_sync_poll(&wq->wait, POLLIN | + POLLRDNORM | + POLLRDBAND); + sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup); + +/** + * af_alg_wait_for_data - wait for availability of TX data + * + * @sk socket of connection to user space + * @flags If MSG_DONTWAIT is set, then only report if function would sleep + * @return 0 when writable memory is available, < 0 upon error + */ +int af_alg_wait_for_data(struct sock *sk, unsigned flags) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + long timeout; + int err = -ERESTARTSYS; + + if (flags & MSG_DONTWAIT) + return -EAGAIN; + + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); + + add_wait_queue(sk_sleep(sk), &wait); + for (;;) { + if (signal_pending(current)) + break; + timeout = MAX_SCHEDULE_TIMEOUT; + if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more), + &wait)) { + err = 0; + break; + } + } + remove_wait_queue(sk_sleep(sk), &wait); + + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); + + return err; +} +EXPORT_SYMBOL_GPL(af_alg_wait_for_data); + +/** + * af_alg_data_wakeup - wakeup caller when new data can be sent to kernel + * + * @sk socket of connection to user space + */ + +void af_alg_data_wakeup(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + struct socket_wq *wq; + + if (!ctx->used) + return; + + rcu_read_lock(); + wq = rcu_dereference(sk->sk_wq); + if (skwq_has_sleeper(wq)) + wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | + POLLRDNORM | + POLLRDBAND); + sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(af_alg_data_wakeup); + +/** + * af_alg_sendmsg - implementation of sendmsg system call handler + * + * The sendmsg system call handler obtains the user data and stores it + * in ctx->tsgl_list. This implies allocation of the required numbers of + * struct af_alg_tsgl. + * + * In addition, the ctx is filled with the information sent via CMSG. + * + * @sock socket of connection to user space + * @msg message from user space + * @size size of message from user space + * @ivsize the size of the IV for the cipher operation to verify that the + * user-space-provided IV has the right size + * @return the number of copied data upon success, < 0 upon error + */ +int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, + unsigned int ivsize) +{ + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + struct af_alg_tsgl *sgl; + struct af_alg_control con = {}; + long copied = 0; + bool enc = 0; + bool init = 0; + int err = 0; + + if (msg->msg_controllen) { + err = af_alg_cmsg_send(msg, &con); + if (err) + return err; + + init = 1; + switch (con.op) { + case ALG_OP_ENCRYPT: + enc = 1; + break; + case ALG_OP_DECRYPT: + enc = 0; + break; + default: + return -EINVAL; + } + + if (con.iv && con.iv->ivlen != ivsize) + return -EINVAL; + } + + lock_sock(sk); + if (!ctx->more && ctx->used) { + err = -EINVAL; + goto unlock; + } + + if (init) { + ctx->enc = enc; + if (con.iv) + memcpy(ctx->iv, con.iv->iv, ivsize); + + ctx->aead_assoclen = con.aead_assoclen; + } + + while (size) { + struct scatterlist *sg; + size_t len = size; + size_t plen; + + /* use the existing memory in an allocated page */ + if (ctx->merge) { + sgl = list_entry(ctx->tsgl_list.prev, + struct af_alg_tsgl, list); + sg = sgl->sg + sgl->cur - 1; + len = min_t(size_t, len, + PAGE_SIZE - sg->offset - sg->length); + + err = memcpy_from_msg(page_address(sg_page(sg)) + + sg->offset + sg->length, + msg, len); + if (err) + goto unlock; + + sg->length += len; + ctx->merge = (sg->offset + sg->length) & + (PAGE_SIZE - 1); + + ctx->used += len; + copied += len; + size -= len; + continue; + } + + if (!af_alg_writable(sk)) { + err = af_alg_wait_for_wmem(sk, msg->msg_flags); + if (err) + goto unlock; + } + + /* allocate a new page */ + len = min_t(unsigned long, len, af_alg_sndbuf(sk)); + + err = af_alg_alloc_tsgl(sk); + if (err) + goto unlock; + + sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, + list); + sg = sgl->sg; + if (sgl->cur) + sg_unmark_end(sg + sgl->cur - 1); + + do { + unsigned int i = sgl->cur; + + plen = min_t(size_t, len, PAGE_SIZE); + + sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); + if (!sg_page(sg + i)) { + err = -ENOMEM; + goto unlock; + } + + err = memcpy_from_msg(page_address(sg_page(sg + i)), + msg, plen); + if (err) { + __free_page(sg_page(sg + i)); + sg_assign_page(sg + i, NULL); + goto unlock; + } + + sg[i].length = plen; + len -= plen; + ctx->used += plen; + copied += plen; + size -= plen; + sgl->cur++; + } while (len && sgl->cur < MAX_SGL_ENTS); + + if (!size) + sg_mark_end(sg + sgl->cur - 1); + + ctx->merge = plen & (PAGE_SIZE - 1); + } + + err = 0; + + ctx->more = msg->msg_flags & MSG_MORE; + +unlock: + af_alg_data_wakeup(sk); + release_sock(sk); + + return copied ?: err; +} +EXPORT_SYMBOL_GPL(af_alg_sendmsg); + +/** + * af_alg_sendpage - sendpage system call handler + * + * This is a generic implementation of sendpage to fill ctx->tsgl_list. + */ +ssize_t af_alg_sendpage(struct socket *sock, struct page *page, + int offset, size_t size, int flags) +{ + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + struct af_alg_tsgl *sgl; + int err = -EINVAL; + + if (flags & MSG_SENDPAGE_NOTLAST) + flags |= MSG_MORE; + + lock_sock(sk); + if (!ctx->more && ctx->used) + goto unlock; + + if (!size) + goto done; + + if (!af_alg_writable(sk)) { + err = af_alg_wait_for_wmem(sk, flags); + if (err) + goto unlock; + } + + err = af_alg_alloc_tsgl(sk); + if (err) + goto unlock; + + ctx->merge = 0; + sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list); + + if (sgl->cur) + sg_unmark_end(sgl->sg + sgl->cur - 1); + + sg_mark_end(sgl->sg + sgl->cur); + + get_page(page); + sg_set_page(sgl->sg + sgl->cur, page, size, offset); + sgl->cur++; + ctx->used += size; + +done: + ctx->more = flags & MSG_MORE; + +unlock: + af_alg_data_wakeup(sk); + release_sock(sk); + + return err ?: size; +} +EXPORT_SYMBOL_GPL(af_alg_sendpage); + +/** + * af_alg_async_cb - AIO callback handler + * + * This handler cleans up the struct af_alg_async_req upon completion of the + * AIO operation. + * + * The number of bytes to be generated with the AIO operation must be set + * in areq->outlen before the AIO callback handler is invoked. + */ +void af_alg_async_cb(struct crypto_async_request *_req, int err) +{ + struct af_alg_async_req *areq = _req->data; + struct sock *sk = areq->sk; + struct kiocb *iocb = areq->iocb; + unsigned int resultlen; + + lock_sock(sk); + + /* Buffer size written by crypto operation. */ + resultlen = areq->outlen; + + af_alg_free_areq_sgls(areq); + sock_kfree_s(sk, areq, areq->areqlen); + __sock_put(sk); + + iocb->ki_complete(iocb, err ? err : resultlen, 0); + + release_sock(sk); +} +EXPORT_SYMBOL_GPL(af_alg_async_cb); + +/** + * af_alg_poll - poll system call handler + */ +unsigned int af_alg_poll(struct file *file, struct socket *sock, + poll_table *wait) +{ + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + unsigned int mask; + + sock_poll_wait(file, sk_sleep(sk), wait); + mask = 0; + + if (!ctx->more || ctx->used) + mask |= POLLIN | POLLRDNORM; + + if (af_alg_writable(sk)) + mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + + return mask; +} +EXPORT_SYMBOL_GPL(af_alg_poll); + +/** + * af_alg_alloc_areq - allocate struct af_alg_async_req + * + * @sk socket of connection to user space + * @areqlen size of struct af_alg_async_req + crypto_*_reqsize + * @return allocated data structure or ERR_PTR upon error + */ +struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, + unsigned int areqlen) +{ + struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL); + + if (unlikely(!areq)) + return ERR_PTR(-ENOMEM); + + areq->areqlen = areqlen; + areq->sk = sk; + areq->last_rsgl = NULL; + INIT_LIST_HEAD(&areq->rsgl_list); + areq->tsgl = NULL; + areq->tsgl_entries = 0; + + return areq; +} +EXPORT_SYMBOL_GPL(af_alg_alloc_areq); + +/** + * af_alg_get_rsgl - create the RX SGL for the output data from the crypto + * operation + * + * @sk socket of connection to user space + * @msg user space message + * @flags flags used to invoke recvmsg with + * @areq instance of the cryptographic request that will hold the RX SGL + * @maxsize maximum number of bytes to be pulled from user space + * @outlen number of bytes in the RX SGL + * @return 0 on success, < 0 upon error + */ +int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, + struct af_alg_async_req *areq, size_t maxsize, + size_t *outlen) +{ + struct alg_sock *ask = alg_sk(sk); + struct af_alg_ctx *ctx = ask->private; + size_t len = 0; + + while (maxsize > len && msg_data_left(msg)) { + struct af_alg_rsgl *rsgl; + size_t seglen; + int err; + + /* limit the amount of readable buffers */ + if (!af_alg_readable(sk)) + break; + + if (!ctx->used) { + err = af_alg_wait_for_data(sk, flags); + if (err) + return err; + } + + seglen = min_t(size_t, (maxsize - len), + msg_data_left(msg)); + + if (list_empty(&areq->rsgl_list)) { + rsgl = &areq->first_rsgl; + } else { + rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); + if (unlikely(!rsgl)) + return -ENOMEM; + } + + rsgl->sgl.npages = 0; + list_add_tail(&rsgl->list, &areq->rsgl_list); + + /* make one iovec available as scatterlist */ + err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); + if (err < 0) + return err; + + /* chain the new scatterlist with previous one */ + if (areq->last_rsgl) + af_alg_link_sg(&areq->last_rsgl->sgl, &rsgl->sgl); + + areq->last_rsgl = rsgl; + len += err; + ctx->rcvused += err; + rsgl->sg_num_bytes = err; + iov_iter_advance(&msg->msg_iter, err); + } + + *outlen = len; + return 0; +} +EXPORT_SYMBOL_GPL(af_alg_get_rsgl); + static int __init af_alg_init(void) { int err = proto_register(&alg_proto, 0); diff --git a/crypto/ahash.c b/crypto/ahash.c index 826cd7ab4d4a..5e8666e6ccae 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -588,6 +588,35 @@ int crypto_unregister_ahash(struct ahash_alg *alg) } EXPORT_SYMBOL_GPL(crypto_unregister_ahash); +int crypto_register_ahashes(struct ahash_alg *algs, int count) +{ + int i, ret; + + for (i = 0; i < count; i++) { + ret = crypto_register_ahash(&algs[i]); + if (ret) + goto err; + } + + return 0; + +err: + for (--i; i >= 0; --i) + crypto_unregister_ahash(&algs[i]); + + return ret; +} +EXPORT_SYMBOL_GPL(crypto_register_ahashes); + +void crypto_unregister_ahashes(struct ahash_alg *algs, int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_unregister_ahash(&algs[i]); +} +EXPORT_SYMBOL_GPL(crypto_unregister_ahashes); + int ahash_register_instance(struct crypto_template *tmpl, struct ahash_instance *inst) { diff --git a/crypto/algapi.c b/crypto/algapi.c index e4cc7615a139..aa699ff6c876 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -975,13 +975,15 @@ void crypto_inc(u8 *a, unsigned int size) } EXPORT_SYMBOL_GPL(crypto_inc); -void __crypto_xor(u8 *dst, const u8 *src, unsigned int len) +void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len) { int relalign = 0; if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) { int size = sizeof(unsigned long); - int d = ((unsigned long)dst ^ (unsigned long)src) & (size - 1); + int d = (((unsigned long)dst ^ (unsigned long)src1) | + ((unsigned long)dst ^ (unsigned long)src2)) & + (size - 1); relalign = d ? 1 << __ffs(d) : size; @@ -992,34 +994,37 @@ void __crypto_xor(u8 *dst, const u8 *src, unsigned int len) * process the remainder of the input using optimal strides. */ while (((unsigned long)dst & (relalign - 1)) && len > 0) { - *dst++ ^= *src++; + *dst++ = *src1++ ^ *src2++; len--; } } while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) { - *(u64 *)dst ^= *(u64 *)src; + *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2; dst += 8; - src += 8; + src1 += 8; + src2 += 8; len -= 8; } while (len >= 4 && !(relalign & 3)) { - *(u32 *)dst ^= *(u32 *)src; + *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2; dst += 4; - src += 4; + src1 += 4; + src2 += 4; len -= 4; } while (len >= 2 && !(relalign & 1)) { - *(u16 *)dst ^= *(u16 *)src; + *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2; dst += 2; - src += 2; + src1 += 2; + src2 += 2; len -= 2; } while (len--) - *dst++ ^= *src++; + *dst++ = *src1++ ^ *src2++; } EXPORT_SYMBOL_GPL(__crypto_xor); diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index be117495eb43..516b38c3a169 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -5,88 +5,56 @@ * * This file provides the user-space API for AEAD ciphers. * - * This file is derived from algif_skcipher.c. - * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. + * + * The following concept of the memory management is used: + * + * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is + * filled by user space with the data submitted via sendpage/sendmsg. Filling + * up the TX SGL does not cause a crypto operation -- the data will only be + * tracked by the kernel. Upon receipt of one recvmsg call, the caller must + * provide a buffer which is tracked with the RX SGL. + * + * During the processing of the recvmsg operation, the cipher request is + * allocated and prepared. As part of the recvmsg operation, the processed + * TX buffers are extracted from the TX SGL into a separate SGL. + * + * After the completion of the crypto operation, the RX SGL and the cipher + * request is released. The extracted TX SGL parts are released together with + * the RX SGL release. */ #include <crypto/internal/aead.h> #include <crypto/scatterwalk.h> #include <crypto/if_alg.h> +#include <crypto/skcipher.h> +#include <crypto/null.h> #include <linux/init.h> #include <linux/list.h> #include <linux/kernel.h> -#include <linux/sched/signal.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/net.h> #include <net/sock.h> -struct aead_sg_list { - unsigned int cur; - struct scatterlist sg[ALG_MAX_PAGES]; -}; - -struct aead_async_rsgl { - struct af_alg_sgl sgl; - struct list_head list; -}; - -struct aead_async_req { - struct scatterlist *tsgl; - struct aead_async_rsgl first_rsgl; - struct list_head list; - struct kiocb *iocb; - struct sock *sk; - unsigned int tsgls; - char iv[]; -}; - struct aead_tfm { struct crypto_aead *aead; bool has_key; + struct crypto_skcipher *null_tfm; }; -struct aead_ctx { - struct aead_sg_list tsgl; - struct aead_async_rsgl first_rsgl; - struct list_head list; - - void *iv; - - struct af_alg_completion completion; - - unsigned long used; - - unsigned int len; - bool more; - bool merge; - bool enc; - - size_t aead_assoclen; - struct aead_request aead_req; -}; - -static inline int aead_sndbuf(struct sock *sk) +static inline bool aead_sufficient_data(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - - return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - - ctx->used, 0); -} - -static inline bool aead_writable(struct sock *sk) -{ - return PAGE_SIZE <= aead_sndbuf(sk); -} - -static inline bool aead_sufficient_data(struct aead_ctx *ctx) -{ - unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); + struct sock *psk = ask->parent; + struct alg_sock *pask = alg_sk(psk); + struct af_alg_ctx *ctx = ask->private; + struct aead_tfm *aeadc = pask->private; + struct crypto_aead *tfm = aeadc->aead; + unsigned int as = crypto_aead_authsize(tfm); /* * The minimum amount of memory needed for an AEAD cipher is @@ -95,484 +63,58 @@ static inline bool aead_sufficient_data(struct aead_ctx *ctx) return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as); } -static void aead_reset_ctx(struct aead_ctx *ctx) -{ - struct aead_sg_list *sgl = &ctx->tsgl; - - sg_init_table(sgl->sg, ALG_MAX_PAGES); - sgl->cur = 0; - ctx->used = 0; - ctx->more = 0; - ctx->merge = 0; -} - -static void aead_put_sgl(struct sock *sk) -{ - struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - struct aead_sg_list *sgl = &ctx->tsgl; - struct scatterlist *sg = sgl->sg; - unsigned int i; - - for (i = 0; i < sgl->cur; i++) { - if (!sg_page(sg + i)) - continue; - - put_page(sg_page(sg + i)); - sg_assign_page(sg + i, NULL); - } - aead_reset_ctx(ctx); -} - -static void aead_wmem_wakeup(struct sock *sk) -{ - struct socket_wq *wq; - - if (!aead_writable(sk)) - return; - - rcu_read_lock(); - wq = rcu_dereference(sk->sk_wq); - if (skwq_has_sleeper(wq)) - wake_up_interruptible_sync_poll(&wq->wait, POLLIN | - POLLRDNORM | - POLLRDBAND); - sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); - rcu_read_unlock(); -} - -static int aead_wait_for_data(struct sock *sk, unsigned flags) -{ - DEFINE_WAIT_FUNC(wait, woken_wake_function); - struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - long timeout; - int err = -ERESTARTSYS; - - if (flags & MSG_DONTWAIT) - return -EAGAIN; - - sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); - add_wait_queue(sk_sleep(sk), &wait); - for (;;) { - if (signal_pending(current)) - break; - timeout = MAX_SCHEDULE_TIMEOUT; - if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) { - err = 0; - break; - } - } - remove_wait_queue(sk_sleep(sk), &wait); - - sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); - - return err; -} - -static void aead_data_wakeup(struct sock *sk) -{ - struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - struct socket_wq *wq; - - if (ctx->more) - return; - if (!ctx->used) - return; - - rcu_read_lock(); - wq = rcu_dereference(sk->sk_wq); - if (skwq_has_sleeper(wq)) - wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | - POLLRDNORM | - POLLRDBAND); - sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); - rcu_read_unlock(); -} - static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - unsigned ivsize = - crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req)); - struct aead_sg_list *sgl = &ctx->tsgl; - struct af_alg_control con = {}; - long copied = 0; - bool enc = 0; - bool init = 0; - int err = -EINVAL; - - if (msg->msg_controllen) { - err = af_alg_cmsg_send(msg, &con); - if (err) - return err; - - init = 1; - switch (con.op) { - case ALG_OP_ENCRYPT: - enc = 1; - break; - case ALG_OP_DECRYPT: - enc = 0; - break; - default: - return -EINVAL; - } - - if (con.iv && con.iv->ivlen != ivsize) - return -EINVAL; - } - - lock_sock(sk); - if (!ctx->more && ctx->used) - goto unlock; - - if (init) { - ctx->enc = enc; - if (con.iv) - memcpy(ctx->iv, con.iv->iv, ivsize); - - ctx->aead_assoclen = con.aead_assoclen; - } - - while (size) { - size_t len = size; - struct scatterlist *sg = NULL; - - /* use the existing memory in an allocated page */ - if (ctx->merge) { - sg = sgl->sg + sgl->cur - 1; - len = min_t(unsigned long, len, - PAGE_SIZE - sg->offset - sg->length); - err = memcpy_from_msg(page_address(sg_page(sg)) + - sg->offset + sg->length, - msg, len); - if (err) - goto unlock; - - sg->length += len; - ctx->merge = (sg->offset + sg->length) & - (PAGE_SIZE - 1); - - ctx->used += len; - copied += len; - size -= len; - continue; - } - - if (!aead_writable(sk)) { - /* user space sent too much data */ - aead_put_sgl(sk); - err = -EMSGSIZE; - goto unlock; - } - - /* allocate a new page */ - len = min_t(unsigned long, size, aead_sndbuf(sk)); - while (len) { - size_t plen = 0; - - if (sgl->cur >= ALG_MAX_PAGES) { - aead_put_sgl(sk); - err = -E2BIG; - goto unlock; - } - - sg = sgl->sg + sgl->cur; - plen = min_t(size_t, len, PAGE_SIZE); - - sg_assign_page(sg, alloc_page(GFP_KERNEL)); - err = -ENOMEM; - if (!sg_page(sg)) - goto unlock; - - err = memcpy_from_msg(page_address(sg_page(sg)), - msg, plen); - if (err) { - __free_page(sg_page(sg)); - sg_assign_page(sg, NULL); - goto unlock; - } - - sg->offset = 0; - sg->length = plen; - len -= plen; - ctx->used += plen; - copied += plen; - sgl->cur++; - size -= plen; - ctx->merge = plen & (PAGE_SIZE - 1); - } - } - - err = 0; - - ctx->more = msg->msg_flags & MSG_MORE; - if (!ctx->more && !aead_sufficient_data(ctx)) { - aead_put_sgl(sk); - err = -EMSGSIZE; - } - -unlock: - aead_data_wakeup(sk); - release_sock(sk); - - return err ?: copied; -} - -static ssize_t aead_sendpage(struct socket *sock, struct page *page, - int offset, size_t size, int flags) -{ - struct sock *sk = sock->sk; - struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - struct aead_sg_list *sgl = &ctx->tsgl; - int err = -EINVAL; - - if (flags & MSG_SENDPAGE_NOTLAST) - flags |= MSG_MORE; - - if (sgl->cur >= ALG_MAX_PAGES) - return -E2BIG; - - lock_sock(sk); - if (!ctx->more && ctx->used) - goto unlock; - - if (!size) - goto done; - - if (!aead_writable(sk)) { - /* user space sent too much data */ - aead_put_sgl(sk); - err = -EMSGSIZE; - goto unlock; - } - - ctx->merge = 0; - - get_page(page); - sg_set_page(sgl->sg + sgl->cur, page, size, offset); - sgl->cur++; - ctx->used += size; - - err = 0; - -done: - ctx->more = flags & MSG_MORE; - if (!ctx->more && !aead_sufficient_data(ctx)) { - aead_put_sgl(sk); - err = -EMSGSIZE; - } + struct sock *psk = ask->parent; + struct alg_sock *pask = alg_sk(psk); + struct aead_tfm *aeadc = pask->private; + struct crypto_aead *tfm = aeadc->aead; + unsigned int ivsize = crypto_aead_ivsize(tfm); -unlock: - aead_data_wakeup(sk); - release_sock(sk); - - return err ?: size; + return af_alg_sendmsg(sock, msg, size, ivsize); } -#define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \ - ((char *)req + sizeof(struct aead_request) + \ - crypto_aead_reqsize(tfm)) - - #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \ - crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \ - sizeof(struct aead_request) - -static void aead_async_cb(struct crypto_async_request *_req, int err) +static int crypto_aead_copy_sgl(struct crypto_skcipher *null_tfm, + struct scatterlist *src, + struct scatterlist *dst, unsigned int len) { - struct aead_request *req = _req->data; - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct aead_async_req *areq = GET_ASYM_REQ(req, tfm); - struct sock *sk = areq->sk; - struct scatterlist *sg = areq->tsgl; - struct aead_async_rsgl *rsgl; - struct kiocb *iocb = areq->iocb; - unsigned int i, reqlen = GET_REQ_SIZE(tfm); - - list_for_each_entry(rsgl, &areq->list, list) { - af_alg_free_sg(&rsgl->sgl); - if (rsgl != &areq->first_rsgl) - sock_kfree_s(sk, rsgl, sizeof(*rsgl)); - } + SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm); - for (i = 0; i < areq->tsgls; i++) - put_page(sg_page(sg + i)); + skcipher_request_set_tfm(skreq, null_tfm); + skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG, + NULL, NULL); + skcipher_request_set_crypt(skreq, src, dst, len, NULL); - sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls); - sock_kfree_s(sk, req, reqlen); - __sock_put(sk); - iocb->ki_complete(iocb, err, err); + return crypto_skcipher_encrypt(skreq); } -static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, - int flags) +static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, + size_t ignored, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req); - struct aead_async_req *areq; - struct aead_request *req = NULL; - struct aead_sg_list *sgl = &ctx->tsgl; - struct aead_async_rsgl *last_rsgl = NULL, *rsgl; + struct sock *psk = ask->parent; + struct alg_sock *pask = alg_sk(psk); + struct af_alg_ctx *ctx = ask->private; + struct aead_tfm *aeadc = pask->private; + struct crypto_aead *tfm = aeadc->aead; + struct crypto_skcipher *null_tfm = aeadc->null_tfm; unsigned int as = crypto_aead_authsize(tfm); - unsigned int i, reqlen = GET_REQ_SIZE(tfm); - int err = -ENOMEM; - unsigned long used; - size_t outlen = 0; - size_t usedpages = 0; - - lock_sock(sk); - if (ctx->more) { - err = aead_wait_for_data(sk, flags); - if (err) - goto unlock; - } - - if (!aead_sufficient_data(ctx)) - goto unlock; - - used = ctx->used; - if (ctx->enc) - outlen = used + as; - else - outlen = used - as; - - req = sock_kmalloc(sk, reqlen, GFP_KERNEL); - if (unlikely(!req)) - goto unlock; - - areq = GET_ASYM_REQ(req, tfm); - memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl)); - INIT_LIST_HEAD(&areq->list); - areq->iocb = msg->msg_iocb; - areq->sk = sk; - memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm)); - aead_request_set_tfm(req, tfm); - aead_request_set_ad(req, ctx->aead_assoclen); - aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - aead_async_cb, req); - used -= ctx->aead_assoclen; - - /* take over all tx sgls from ctx */ - areq->tsgl = sock_kmalloc(sk, - sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1), - GFP_KERNEL); - if (unlikely(!areq->tsgl)) - goto free; - - sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1)); - for (i = 0; i < sgl->cur; i++) - sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]), - sgl->sg[i].length, sgl->sg[i].offset); - - areq->tsgls = sgl->cur; - - /* create rx sgls */ - while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) { - size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), - (outlen - usedpages)); - - if (list_empty(&areq->list)) { - rsgl = &areq->first_rsgl; - - } else { - rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); - if (unlikely(!rsgl)) { - err = -ENOMEM; - goto free; - } - } - rsgl->sgl.npages = 0; - list_add_tail(&rsgl->list, &areq->list); - - /* make one iovec available as scatterlist */ - err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); - if (err < 0) - goto free; - - usedpages += err; - - /* chain the new scatterlist with previous one */ - if (last_rsgl) - af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); - - last_rsgl = rsgl; - - iov_iter_advance(&msg->msg_iter, err); - } - - /* ensure output buffer is sufficiently large */ - if (usedpages < outlen) { - err = -EINVAL; - goto unlock; - } - - aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used, - areq->iv); - err = ctx->enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); - if (err) { - if (err == -EINPROGRESS) { - sock_hold(sk); - err = -EIOCBQUEUED; - aead_reset_ctx(ctx); - goto unlock; - } else if (err == -EBADMSG) { - aead_put_sgl(sk); - } - goto free; - } - aead_put_sgl(sk); - -free: - list_for_each_entry(rsgl, &areq->list, list) { - af_alg_free_sg(&rsgl->sgl); - if (rsgl != &areq->first_rsgl) - sock_kfree_s(sk, rsgl, sizeof(*rsgl)); - } - if (areq->tsgl) - sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls); - if (req) - sock_kfree_s(sk, req, reqlen); -unlock: - aead_wmem_wakeup(sk); - release_sock(sk); - return err ? err : outlen; -} - -static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) -{ - struct sock *sk = sock->sk; - struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); - struct aead_sg_list *sgl = &ctx->tsgl; - struct aead_async_rsgl *last_rsgl = NULL; - struct aead_async_rsgl *rsgl, *tmp; - int err = -EINVAL; - unsigned long used = 0; - size_t outlen = 0; - size_t usedpages = 0; - - lock_sock(sk); + struct af_alg_async_req *areq; + struct af_alg_tsgl *tsgl; + struct scatterlist *src; + int err = 0; + size_t used = 0; /* [in] TX bufs to be en/decrypted */ + size_t outlen = 0; /* [out] RX bufs produced by kernel */ + size_t usedpages = 0; /* [in] RX bufs to be used from user */ + size_t processed = 0; /* [in] TX bufs to be consumed */ /* - * Please see documentation of aead_request_set_crypt for the - * description of the AEAD memory structure expected from the caller. + * Data length provided by caller via sendmsg/sendpage that has not + * yet been processed. */ - - if (ctx->more) { - err = aead_wait_for_data(sk, flags); - if (err) - goto unlock; - } - - /* data length provided by caller via sendmsg/sendpage */ used = ctx->used; /* @@ -584,8 +126,8 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) * the error message in sendmsg/sendpage and still call recvmsg. This * check here protects the kernel integrity. */ - if (!aead_sufficient_data(ctx)) - goto unlock; + if (!aead_sufficient_data(sk)) + return -EINVAL; /* * Calculate the minimum output buffer size holding the result of the @@ -606,104 +148,191 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) */ used -= ctx->aead_assoclen; - /* convert iovecs of output buffers into scatterlists */ - while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) { - size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), - (outlen - usedpages)); - - if (list_empty(&ctx->list)) { - rsgl = &ctx->first_rsgl; - } else { - rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); - if (unlikely(!rsgl)) { - err = -ENOMEM; - goto unlock; - } - } - rsgl->sgl.npages = 0; - list_add_tail(&rsgl->list, &ctx->list); + /* Allocate cipher request for current operation. */ + areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) + + crypto_aead_reqsize(tfm)); + if (IS_ERR(areq)) + return PTR_ERR(areq); - /* make one iovec available as scatterlist */ - err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); - if (err < 0) - goto unlock; - usedpages += err; - /* chain the new scatterlist with previous one */ - if (last_rsgl) - af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); + /* convert iovecs of output buffers into RX SGL */ + err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages); + if (err) + goto free; - last_rsgl = rsgl; + /* + * Ensure output buffer is sufficiently large. If the caller provides + * less buffer space, only use the relative required input size. This + * allows AIO operation where the caller sent all data to be processed + * and the AIO operation performs the operation on the different chunks + * of the input data. + */ + if (usedpages < outlen) { + size_t less = outlen - usedpages; - iov_iter_advance(&msg->msg_iter, err); + if (used < less) { + err = -EINVAL; + goto free; + } + used -= less; + outlen -= less; } - /* ensure output buffer is sufficiently large */ - if (usedpages < outlen) { - err = -EINVAL; - goto unlock; - } + processed = used + ctx->aead_assoclen; + tsgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, list); - sg_mark_end(sgl->sg + sgl->cur - 1); - aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg, - used, ctx->iv); - aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen); + /* + * Copy of AAD from source to destination + * + * The AAD is copied to the destination buffer without change. Even + * when user space uses an in-place cipher operation, the kernel + * will copy the data as it does not see whether such in-place operation + * is initiated. + * + * To ensure efficiency, the following implementation ensure that the + * ciphers are invoked to perform a crypto operation in-place. This + * is achieved by memory management specified as follows. + */ - err = af_alg_wait_for_completion(ctx->enc ? - crypto_aead_encrypt(&ctx->aead_req) : - crypto_aead_decrypt(&ctx->aead_req), - &ctx->completion); + /* Use the RX SGL as source (and destination) for crypto op. */ + src = areq->first_rsgl.sgl.sg; + + if (ctx->enc) { + /* + * Encryption operation - The in-place cipher operation is + * achieved by the following operation: + * + * TX SGL: AAD || PT + * | | + * | copy | + * v v + * RX SGL: AAD || PT || Tag + */ + err = crypto_aead_copy_sgl(null_tfm, tsgl->sg, + areq->first_rsgl.sgl.sg, processed); + if (err) + goto free; + af_alg_pull_tsgl(sk, processed, NULL, 0); + } else { + /* + * Decryption operation - To achieve an in-place cipher + * operation, the following SGL structure is used: + * + * TX SGL: AAD || CT || Tag + * | | ^ + * | copy | | Create SGL link. + * v v | + * RX SGL: AAD || CT ----+ + */ + + /* Copy AAD || CT to RX SGL buffer for in-place operation. */ + err = crypto_aead_copy_sgl(null_tfm, tsgl->sg, + areq->first_rsgl.sgl.sg, outlen); + if (err) + goto free; - if (err) { - /* EBADMSG implies a valid cipher operation took place */ - if (err == -EBADMSG) - aead_put_sgl(sk); + /* Create TX SGL for tag and chain it to RX SGL. */ + areq->tsgl_entries = af_alg_count_tsgl(sk, processed, + processed - as); + if (!areq->tsgl_entries) + areq->tsgl_entries = 1; + areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * + areq->tsgl_entries, + GFP_KERNEL); + if (!areq->tsgl) { + err = -ENOMEM; + goto free; + } + sg_init_table(areq->tsgl, areq->tsgl_entries); + + /* Release TX SGL, except for tag data and reassign tag data. */ + af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as); + + /* chain the areq TX SGL holding the tag with RX SGL */ + if (usedpages) { + /* RX SGL present */ + struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl; + + sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); + sg_chain(sgl_prev->sg, sgl_prev->npages + 1, + areq->tsgl); + } else + /* no RX SGL present (e.g. authentication only) */ + src = areq->tsgl; + } - goto unlock; + /* Initialize the crypto operation */ + aead_request_set_crypt(&areq->cra_u.aead_req, src, + areq->first_rsgl.sgl.sg, used, ctx->iv); + aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen); + aead_request_set_tfm(&areq->cra_u.aead_req, tfm); + + if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { + /* AIO operation */ + areq->iocb = msg->msg_iocb; + aead_request_set_callback(&areq->cra_u.aead_req, + CRYPTO_TFM_REQ_MAY_BACKLOG, + af_alg_async_cb, areq); + err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) : + crypto_aead_decrypt(&areq->cra_u.aead_req); + } else { + /* Synchronous operation */ + aead_request_set_callback(&areq->cra_u.aead_req, + CRYPTO_TFM_REQ_MAY_BACKLOG, + af_alg_complete, &ctx->completion); + err = af_alg_wait_for_completion(ctx->enc ? + crypto_aead_encrypt(&areq->cra_u.aead_req) : + crypto_aead_decrypt(&areq->cra_u.aead_req), + &ctx->completion); } - aead_put_sgl(sk); - err = 0; + /* AIO operation in progress */ + if (err == -EINPROGRESS) { + sock_hold(sk); -unlock: - list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) { - af_alg_free_sg(&rsgl->sgl); - list_del(&rsgl->list); - if (rsgl != &ctx->first_rsgl) - sock_kfree_s(sk, rsgl, sizeof(*rsgl)); + /* Remember output size that will be generated. */ + areq->outlen = outlen; + + return -EIOCBQUEUED; } - INIT_LIST_HEAD(&ctx->list); - aead_wmem_wakeup(sk); - release_sock(sk); - return err ? err : outlen; -} +free: + af_alg_free_areq_sgls(areq); + sock_kfree_s(sk, areq, areq->areqlen); -static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, - int flags) -{ - return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ? - aead_recvmsg_async(sock, msg, flags) : - aead_recvmsg_sync(sock, msg, flags); + return err ? err : outlen; } -static unsigned int aead_poll(struct file *file, struct socket *sock, - poll_table *wait) +static int aead_recvmsg(struct socket *sock, struct msghdr *msg, + size_t ignored, int flags) { struct sock *sk = sock->sk; - struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - unsigned int mask; - - sock_poll_wait(file, sk_sleep(sk), wait); - mask = 0; + int ret = 0; - if (!ctx->more) - mask |= POLLIN | POLLRDNORM; + lock_sock(sk); + while (msg_data_left(msg)) { + int err = _aead_recvmsg(sock, msg, ignored, flags); + + /* + * This error covers -EIOCBQUEUED which implies that we can + * only handle one AIO request. If the caller wants to have + * multiple AIO requests in parallel, he must make multiple + * separate AIO calls. + * + * Also return the error if no data has been processed so far. + */ + if (err <= 0) { + if (err == -EIOCBQUEUED || err == -EBADMSG || !ret) + ret = err; + goto out; + } - if (aead_writable(sk)) - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + ret += err; + } - return mask; +out: + af_alg_wmem_wakeup(sk); + release_sock(sk); + return ret; } static struct proto_ops algif_aead_ops = { @@ -723,9 +352,9 @@ static struct proto_ops algif_aead_ops = { .release = af_alg_release, .sendmsg = aead_sendmsg, - .sendpage = aead_sendpage, + .sendpage = af_alg_sendpage, .recvmsg = aead_recvmsg, - .poll = aead_poll, + .poll = af_alg_poll, }; static int aead_check_key(struct socket *sock) @@ -787,7 +416,7 @@ static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page, if (err) return err; - return aead_sendpage(sock, page, offset, size, flags); + return af_alg_sendpage(sock, page, offset, size, flags); } static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg, @@ -821,13 +450,14 @@ static struct proto_ops algif_aead_ops_nokey = { .sendmsg = aead_sendmsg_nokey, .sendpage = aead_sendpage_nokey, .recvmsg = aead_recvmsg_nokey, - .poll = aead_poll, + .poll = af_alg_poll, }; static void *aead_bind(const char *name, u32 type, u32 mask) { struct aead_tfm *tfm; struct crypto_aead *aead; + struct crypto_skcipher *null_tfm; tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); if (!tfm) @@ -839,7 +469,15 @@ static void *aead_bind(const char *name, u32 type, u32 mask) return ERR_CAST(aead); } + null_tfm = crypto_get_default_null_skcipher2(); + if (IS_ERR(null_tfm)) { + crypto_free_aead(aead); + kfree(tfm); + return ERR_CAST(null_tfm); + } + tfm->aead = aead; + tfm->null_tfm = null_tfm; return tfm; } @@ -873,12 +511,15 @@ static int aead_setkey(void *private, const u8 *key, unsigned int keylen) static void aead_sock_destruct(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - unsigned int ivlen = crypto_aead_ivsize( - crypto_aead_reqtfm(&ctx->aead_req)); - - WARN_ON(refcount_read(&sk->sk_refcnt) != 0); - aead_put_sgl(sk); + struct af_alg_ctx *ctx = ask->private; + struct sock *psk = ask->parent; + struct alg_sock *pask = alg_sk(psk); + struct aead_tfm *aeadc = pask->private; + struct crypto_aead *tfm = aeadc->aead; + unsigned int ivlen = crypto_aead_ivsize(tfm); + + af_alg_pull_tsgl(sk, ctx->used, NULL, 0); + crypto_put_default_null_skcipher2(); sock_kzfree_s(sk, ctx->iv, ivlen); sock_kfree_s(sk, ctx, ctx->len); af_alg_release_parent(sk); @@ -886,11 +527,11 @@ static void aead_sock_destruct(struct sock *sk) static int aead_accept_parent_nokey(void *private, struct sock *sk) { - struct aead_ctx *ctx; + struct af_alg_ctx *ctx; struct alg_sock *ask = alg_sk(sk); struct aead_tfm *tfm = private; struct crypto_aead *aead = tfm->aead; - unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(aead); + unsigned int len = sizeof(*ctx); unsigned int ivlen = crypto_aead_ivsize(aead); ctx = sock_kmalloc(sk, len, GFP_KERNEL); @@ -905,23 +546,18 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) } memset(ctx->iv, 0, ivlen); + INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; ctx->used = 0; + ctx->rcvused = 0; ctx->more = 0; ctx->merge = 0; ctx->enc = 0; - ctx->tsgl.cur = 0; ctx->aead_assoclen = 0; af_alg_init_completion(&ctx->completion); - sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES); - INIT_LIST_HEAD(&ctx->list); ask->private = ctx; - aead_request_set_tfm(&ctx->aead_req, aead); - aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, - af_alg_complete, &ctx->completion); - sk->sk_destruct = aead_sock_destruct; return 0; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 43839b00fe6c..8ae4170aaeb4 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -10,6 +10,21 @@ * Software Foundation; either version 2 of the License, or (at your option) * any later version. * + * The following concept of the memory management is used: + * + * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is + * filled by user space with the data submitted via sendpage/sendmsg. Filling + * up the TX SGL does not cause a crypto operation -- the data will only be + * tracked by the kernel. Upon receipt of one recvmsg call, the caller must + * provide a buffer which is tracked with the RX SGL. + * + * During the processing of the recvmsg operation, the cipher request is + * allocated and prepared. As part of the recvmsg operation, the processed + * TX buffers are extracted from the TX SGL into a separate SGL. + * + * After the completion of the crypto operation, the RX SGL and the cipher + * request is released. The extracted TX SGL parts are released together with + * the RX SGL release. */ #include <crypto/scatterwalk.h> @@ -18,279 +33,16 @@ #include <linux/init.h> #include <linux/list.h> #include <linux/kernel.h> -#include <linux/sched/signal.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/net.h> #include <net/sock.h> -struct skcipher_sg_list { - struct list_head list; - - int cur; - - struct scatterlist sg[0]; -}; - struct skcipher_tfm { struct crypto_skcipher *skcipher; bool has_key; }; -struct skcipher_ctx { - struct list_head tsgl; - struct af_alg_sgl rsgl; - - void *iv; - - struct af_alg_completion completion; - - atomic_t inflight; - size_t used; - - unsigned int len; - bool more; - bool merge; - bool enc; - - struct skcipher_request req; -}; - -struct skcipher_async_rsgl { - struct af_alg_sgl sgl; - struct list_head list; -}; - -struct skcipher_async_req { - struct kiocb *iocb; - struct skcipher_async_rsgl first_sgl; - struct list_head list; - struct scatterlist *tsg; - atomic_t *inflight; - struct skcipher_request req; -}; - -#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \ - sizeof(struct scatterlist) - 1) - -static void skcipher_free_async_sgls(struct skcipher_async_req *sreq) -{ - struct skcipher_async_rsgl *rsgl, *tmp; - struct scatterlist *sgl; - struct scatterlist *sg; - int i, n; - - list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) { - af_alg_free_sg(&rsgl->sgl); - if (rsgl != &sreq->first_sgl) - kfree(rsgl); - } - sgl = sreq->tsg; - n = sg_nents(sgl); - for_each_sg(sgl, sg, n, i) - put_page(sg_page(sg)); - - kfree(sreq->tsg); -} - -static void skcipher_async_cb(struct crypto_async_request *req, int err) -{ - struct skcipher_async_req *sreq = req->data; - struct kiocb *iocb = sreq->iocb; - - atomic_dec(sreq->inflight); - skcipher_free_async_sgls(sreq); - kzfree(sreq); - iocb->ki_complete(iocb, err, err); -} - -static inline int skcipher_sndbuf(struct sock *sk) -{ - struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - - return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - - ctx->used, 0); -} - -static inline bool skcipher_writable(struct sock *sk) -{ - return PAGE_SIZE <= skcipher_sndbuf(sk); -} - -static int skcipher_alloc_sgl(struct sock *sk) -{ - struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - struct skcipher_sg_list *sgl; - struct scatterlist *sg = NULL; - - sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); - if (!list_empty(&ctx->tsgl)) - sg = sgl->sg; - - if (!sg || sgl->cur >= MAX_SGL_ENTS) { - sgl = sock_kmalloc(sk, sizeof(*sgl) + - sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), - GFP_KERNEL); - if (!sgl) - return -ENOMEM; - - sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); - sgl->cur = 0; - - if (sg) - sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); - - list_add_tail(&sgl->list, &ctx->tsgl); - } - - return 0; -} - -static void skcipher_pull_sgl(struct sock *sk, size_t used, int put) -{ - struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - struct skcipher_sg_list *sgl; - struct scatterlist *sg; - int i; - - while (!list_empty(&ctx->tsgl)) { - sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, - list); - sg = sgl->sg; - - for (i = 0; i < sgl->cur; i++) { - size_t plen = min_t(size_t, used, sg[i].length); - - if (!sg_page(sg + i)) - continue; - - sg[i].length -= plen; - sg[i].offset += plen; - - used -= plen; - ctx->used -= plen; - - if (sg[i].length) - return; - if (put) - put_page(sg_page(sg + i)); - sg_assign_page(sg + i, NULL); - } - - list_del(&sgl->list); - sock_kfree_s(sk, sgl, - sizeof(*sgl) + sizeof(sgl->sg[0]) * - (MAX_SGL_ENTS + 1)); - } - - if (!ctx->used) - ctx->merge = 0; -} - -static void skcipher_free_sgl(struct sock *sk) -{ - struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - - skcipher_pull_sgl(sk, ctx->used, 1); -} - -static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) -{ - DEFINE_WAIT_FUNC(wait, woken_wake_function); - int err = -ERESTARTSYS; - long timeout; - - if (flags & MSG_DONTWAIT) - return -EAGAIN; - - sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); - - add_wait_queue(sk_sleep(sk), &wait); - for (;;) { - if (signal_pending(current)) - break; - timeout = MAX_SCHEDULE_TIMEOUT; - if (sk_wait_event(sk, &timeout, skcipher_writable(sk), &wait)) { - err = 0; - break; - } - } - remove_wait_queue(sk_sleep(sk), &wait); - - return err; -} - -static void skcipher_wmem_wakeup(struct sock *sk) -{ - struct socket_wq *wq; - - if (!skcipher_writable(sk)) - return; - - rcu_read_lock(); - wq = rcu_dereference(sk->sk_wq); - if (skwq_has_sleeper(wq)) - wake_up_interruptible_sync_poll(&wq->wait, POLLIN | - POLLRDNORM | - POLLRDBAND); - sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); - rcu_read_unlock(); -} - -static int skcipher_wait_for_data(struct sock *sk, unsigned flags) -{ - DEFINE_WAIT_FUNC(wait, woken_wake_function); - struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - long timeout; - int err = -ERESTARTSYS; - - if (flags & MSG_DONTWAIT) { - return -EAGAIN; - } - - sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); - - add_wait_queue(sk_sleep(sk), &wait); - for (;;) { - if (signal_pending(current)) - break; - timeout = MAX_SCHEDULE_TIMEOUT; - if (sk_wait_event(sk, &timeout, ctx->used, &wait)) { - err = 0; - break; - } - } - remove_wait_queue(sk_sleep(sk), &wait); - - sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); - - return err; -} - -static void skcipher_data_wakeup(struct sock *sk) -{ - struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - struct socket_wq *wq; - - if (!ctx->used) - return; - - rcu_read_lock(); - wq = rcu_dereference(sk->sk_wq); - if (skwq_has_sleeper(wq)) - wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | - POLLRDNORM | - POLLRDBAND); - sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); - rcu_read_unlock(); -} - static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { @@ -298,445 +50,143 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, struct alg_sock *ask = alg_sk(sk); struct sock *psk = ask->parent; struct alg_sock *pask = alg_sk(psk); - struct skcipher_ctx *ctx = ask->private; struct skcipher_tfm *skc = pask->private; struct crypto_skcipher *tfm = skc->skcipher; unsigned ivsize = crypto_skcipher_ivsize(tfm); - struct skcipher_sg_list *sgl; - struct af_alg_control con = {}; - long copied = 0; - bool enc = 0; - bool init = 0; - int err; - int i; - - if (msg->msg_controllen) { - err = af_alg_cmsg_send(msg, &con); - if (err) - return err; - - init = 1; - switch (con.op) { - case ALG_OP_ENCRYPT: - enc = 1; - break; - case ALG_OP_DECRYPT: - enc = 0; - break; - default: - return -EINVAL; - } - - if (con.iv && con.iv->ivlen != ivsize) - return -EINVAL; - } - - err = -EINVAL; - - lock_sock(sk); - if (!ctx->more && ctx->used) - goto unlock; - - if (init) { - ctx->enc = enc; - if (con.iv) - memcpy(ctx->iv, con.iv->iv, ivsize); - } - - while (size) { - struct scatterlist *sg; - unsigned long len = size; - size_t plen; - - if (ctx->merge) { - sgl = list_entry(ctx->tsgl.prev, - struct skcipher_sg_list, list); - sg = sgl->sg + sgl->cur - 1; - len = min_t(unsigned long, len, - PAGE_SIZE - sg->offset - sg->length); - - err = memcpy_from_msg(page_address(sg_page(sg)) + - sg->offset + sg->length, - msg, len); - if (err) - goto unlock; - - sg->length += len; - ctx->merge = (sg->offset + sg->length) & - (PAGE_SIZE - 1); - - ctx->used += len; - copied += len; - size -= len; - continue; - } - if (!skcipher_writable(sk)) { - err = skcipher_wait_for_wmem(sk, msg->msg_flags); - if (err) - goto unlock; - } - - len = min_t(unsigned long, len, skcipher_sndbuf(sk)); - - err = skcipher_alloc_sgl(sk); - if (err) - goto unlock; - - sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); - sg = sgl->sg; - if (sgl->cur) - sg_unmark_end(sg + sgl->cur - 1); - do { - i = sgl->cur; - plen = min_t(size_t, len, PAGE_SIZE); - - sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); - err = -ENOMEM; - if (!sg_page(sg + i)) - goto unlock; - - err = memcpy_from_msg(page_address(sg_page(sg + i)), - msg, plen); - if (err) { - __free_page(sg_page(sg + i)); - sg_assign_page(sg + i, NULL); - goto unlock; - } - - sg[i].length = plen; - len -= plen; - ctx->used += plen; - copied += plen; - size -= plen; - sgl->cur++; - } while (len && sgl->cur < MAX_SGL_ENTS); - - if (!size) - sg_mark_end(sg + sgl->cur - 1); - - ctx->merge = plen & (PAGE_SIZE - 1); - } - - err = 0; - - ctx->more = msg->msg_flags & MSG_MORE; - -unlock: - skcipher_data_wakeup(sk); - release_sock(sk); - - return copied ?: err; + return af_alg_sendmsg(sock, msg, size, ivsize); } -static ssize_t skcipher_sendpage(struct socket *sock, struct page *page, - int offset, size_t size, int flags) -{ - struct sock *sk = sock->sk; - struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - struct skcipher_sg_list *sgl; - int err = -EINVAL; - - if (flags & MSG_SENDPAGE_NOTLAST) - flags |= MSG_MORE; - - lock_sock(sk); - if (!ctx->more && ctx->used) - goto unlock; - - if (!size) - goto done; - - if (!skcipher_writable(sk)) { - err = skcipher_wait_for_wmem(sk, flags); - if (err) - goto unlock; - } - - err = skcipher_alloc_sgl(sk); - if (err) - goto unlock; - - ctx->merge = 0; - sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); - - if (sgl->cur) - sg_unmark_end(sgl->sg + sgl->cur - 1); - - sg_mark_end(sgl->sg + sgl->cur); - get_page(page); - sg_set_page(sgl->sg + sgl->cur, page, size, offset); - sgl->cur++; - ctx->used += size; - -done: - ctx->more = flags & MSG_MORE; - -unlock: - skcipher_data_wakeup(sk); - release_sock(sk); - - return err ?: size; -} - -static int skcipher_all_sg_nents(struct skcipher_ctx *ctx) -{ - struct skcipher_sg_list *sgl; - struct scatterlist *sg; - int nents = 0; - - list_for_each_entry(sgl, &ctx->tsgl, list) { - sg = sgl->sg; - - while (!sg->length) - sg++; - - nents += sg_nents(sg); - } - return nents; -} - -static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, - int flags) +static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, + size_t ignored, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct sock *psk = ask->parent; struct alg_sock *pask = alg_sk(psk); - struct skcipher_ctx *ctx = ask->private; + struct af_alg_ctx *ctx = ask->private; struct skcipher_tfm *skc = pask->private; struct crypto_skcipher *tfm = skc->skcipher; - struct skcipher_sg_list *sgl; - struct scatterlist *sg; - struct skcipher_async_req *sreq; - struct skcipher_request *req; - struct skcipher_async_rsgl *last_rsgl = NULL; - unsigned int txbufs = 0, len = 0, tx_nents; - unsigned int reqsize = crypto_skcipher_reqsize(tfm); - unsigned int ivsize = crypto_skcipher_ivsize(tfm); - int err = -ENOMEM; - bool mark = false; - char *iv; - - sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL); - if (unlikely(!sreq)) - goto out; - - req = &sreq->req; - iv = (char *)(req + 1) + reqsize; - sreq->iocb = msg->msg_iocb; - INIT_LIST_HEAD(&sreq->list); - sreq->inflight = &ctx->inflight; + unsigned int bs = crypto_skcipher_blocksize(tfm); + struct af_alg_async_req *areq; + int err = 0; + size_t len = 0; - lock_sock(sk); - tx_nents = skcipher_all_sg_nents(ctx); - sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL); - if (unlikely(!sreq->tsg)) - goto unlock; - sg_init_table(sreq->tsg, tx_nents); - memcpy(iv, ctx->iv, ivsize); - skcipher_request_set_tfm(req, tfm); - skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, - skcipher_async_cb, sreq); - - while (iov_iter_count(&msg->msg_iter)) { - struct skcipher_async_rsgl *rsgl; - int used; - - if (!ctx->used) { - err = skcipher_wait_for_data(sk, flags); - if (err) - goto free; - } - sgl = list_first_entry(&ctx->tsgl, - struct skcipher_sg_list, list); - sg = sgl->sg; - - while (!sg->length) - sg++; - - used = min_t(unsigned long, ctx->used, - iov_iter_count(&msg->msg_iter)); - used = min_t(unsigned long, used, sg->length); - - if (txbufs == tx_nents) { - struct scatterlist *tmp; - int x; - /* Ran out of tx slots in async request - * need to expand */ - tmp = kcalloc(tx_nents * 2, sizeof(*tmp), - GFP_KERNEL); - if (!tmp) { - err = -ENOMEM; - goto free; - } - - sg_init_table(tmp, tx_nents * 2); - for (x = 0; x < tx_nents; x++) - sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]), - sreq->tsg[x].length, - sreq->tsg[x].offset); - kfree(sreq->tsg); - sreq->tsg = tmp; - tx_nents *= 2; - mark = true; - } - /* Need to take over the tx sgl from ctx - * to the asynch req - these sgls will be freed later */ - sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length, - sg->offset); - - if (list_empty(&sreq->list)) { - rsgl = &sreq->first_sgl; - list_add_tail(&rsgl->list, &sreq->list); - } else { - rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL); - if (!rsgl) { - err = -ENOMEM; - goto free; - } - list_add_tail(&rsgl->list, &sreq->list); - } + /* Allocate cipher request for current operation. */ + areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) + + crypto_skcipher_reqsize(tfm)); + if (IS_ERR(areq)) + return PTR_ERR(areq); - used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used); - err = used; - if (used < 0) - goto free; - if (last_rsgl) - af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); - - last_rsgl = rsgl; - len += used; - skcipher_pull_sgl(sk, used, 0); - iov_iter_advance(&msg->msg_iter, used); + /* convert iovecs of output buffers into RX SGL */ + err = af_alg_get_rsgl(sk, msg, flags, areq, -1, &len); + if (err) + goto free; + + /* Process only as much RX buffers for which we have TX data */ + if (len > ctx->used) + len = ctx->used; + + /* + * If more buffers are to be expected to be processed, process only + * full block size buffers. + */ + if (ctx->more || len < ctx->used) + len -= len % bs; + + /* + * Create a per request TX SGL for this request which tracks the + * SG entries from the global TX SGL. + */ + areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0); + if (!areq->tsgl_entries) + areq->tsgl_entries = 1; + areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries, + GFP_KERNEL); + if (!areq->tsgl) { + err = -ENOMEM; + goto free; + } + sg_init_table(areq->tsgl, areq->tsgl_entries); + af_alg_pull_tsgl(sk, len, areq->tsgl, 0); + + /* Initialize the crypto operation */ + skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm); + skcipher_request_set_crypt(&areq->cra_u.skcipher_req, areq->tsgl, + areq->first_rsgl.sgl.sg, len, ctx->iv); + + if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { + /* AIO operation */ + areq->iocb = msg->msg_iocb; + skcipher_request_set_callback(&areq->cra_u.skcipher_req, + CRYPTO_TFM_REQ_MAY_SLEEP, + af_alg_async_cb, areq); + err = ctx->enc ? + crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) : + crypto_skcipher_decrypt(&areq->cra_u.skcipher_req); + } else { + /* Synchronous operation */ + skcipher_request_set_callback(&areq->cra_u.skcipher_req, + CRYPTO_TFM_REQ_MAY_SLEEP | + CRYPTO_TFM_REQ_MAY_BACKLOG, + af_alg_complete, + &ctx->completion); + err = af_alg_wait_for_completion(ctx->enc ? + crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) : + crypto_skcipher_decrypt(&areq->cra_u.skcipher_req), + &ctx->completion); } - if (mark) - sg_mark_end(sreq->tsg + txbufs - 1); - - skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, - len, iv); - err = ctx->enc ? crypto_skcipher_encrypt(req) : - crypto_skcipher_decrypt(req); + /* AIO operation in progress */ if (err == -EINPROGRESS) { - atomic_inc(&ctx->inflight); - err = -EIOCBQUEUED; - sreq = NULL; - goto unlock; + sock_hold(sk); + + /* Remember output size that will be generated. */ + areq->outlen = len; + + return -EIOCBQUEUED; } + free: - skcipher_free_async_sgls(sreq); -unlock: - skcipher_wmem_wakeup(sk); - release_sock(sk); - kzfree(sreq); -out: - return err; + af_alg_free_areq_sgls(areq); + sock_kfree_s(sk, areq, areq->areqlen); + + return err ? err : len; } -static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg, - int flags) +static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg, + size_t ignored, int flags) { struct sock *sk = sock->sk; - struct alg_sock *ask = alg_sk(sk); - struct sock *psk = ask->parent; - struct alg_sock *pask = alg_sk(psk); - struct skcipher_ctx *ctx = ask->private; - struct skcipher_tfm *skc = pask->private; - struct crypto_skcipher *tfm = skc->skcipher; - unsigned bs = crypto_skcipher_blocksize(tfm); - struct skcipher_sg_list *sgl; - struct scatterlist *sg; - int err = -EAGAIN; - int used; - long copied = 0; + int ret = 0; lock_sock(sk); while (msg_data_left(msg)) { - if (!ctx->used) { - err = skcipher_wait_for_data(sk, flags); - if (err) - goto unlock; + int err = _skcipher_recvmsg(sock, msg, ignored, flags); + + /* + * This error covers -EIOCBQUEUED which implies that we can + * only handle one AIO request. If the caller wants to have + * multiple AIO requests in parallel, he must make multiple + * separate AIO calls. + * + * Also return the error if no data has been processed so far. + */ + if (err <= 0) { + if (err == -EIOCBQUEUED || !ret) + ret = err; + goto out; } - used = min_t(unsigned long, ctx->used, msg_data_left(msg)); - - used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used); - err = used; - if (err < 0) - goto unlock; - - if (ctx->more || used < ctx->used) - used -= used % bs; - - err = -EINVAL; - if (!used) - goto free; - - sgl = list_first_entry(&ctx->tsgl, - struct skcipher_sg_list, list); - sg = sgl->sg; - - while (!sg->length) - sg++; - - skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used, - ctx->iv); - - err = af_alg_wait_for_completion( - ctx->enc ? - crypto_skcipher_encrypt(&ctx->req) : - crypto_skcipher_decrypt(&ctx->req), - &ctx->completion); - -free: - af_alg_free_sg(&ctx->rsgl); - - if (err) - goto unlock; - - copied += used; - skcipher_pull_sgl(sk, used, 1); - iov_iter_advance(&msg->msg_iter, used); + ret += err; } - err = 0; - -unlock: - skcipher_wmem_wakeup(sk); +out: + af_alg_wmem_wakeup(sk); release_sock(sk); - - return copied ?: err; -} - -static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg, - size_t ignored, int flags) -{ - return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ? - skcipher_recvmsg_async(sock, msg, flags) : - skcipher_recvmsg_sync(sock, msg, flags); + return ret; } -static unsigned int skcipher_poll(struct file *file, struct socket *sock, - poll_table *wait) -{ - struct sock *sk = sock->sk; - struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - unsigned int mask; - - sock_poll_wait(file, sk_sleep(sk), wait); - mask = 0; - - if (ctx->used) - mask |= POLLIN | POLLRDNORM; - - if (skcipher_writable(sk)) - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; - - return mask; -} static struct proto_ops algif_skcipher_ops = { .family = PF_ALG, @@ -755,9 +205,9 @@ static struct proto_ops algif_skcipher_ops = { .release = af_alg_release, .sendmsg = skcipher_sendmsg, - .sendpage = skcipher_sendpage, + .sendpage = af_alg_sendpage, .recvmsg = skcipher_recvmsg, - .poll = skcipher_poll, + .poll = af_alg_poll, }; static int skcipher_check_key(struct socket *sock) @@ -819,7 +269,7 @@ static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page, if (err) return err; - return skcipher_sendpage(sock, page, offset, size, flags); + return af_alg_sendpage(sock, page, offset, size, flags); } static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg, @@ -853,7 +303,7 @@ static struct proto_ops algif_skcipher_ops_nokey = { .sendmsg = skcipher_sendmsg_nokey, .sendpage = skcipher_sendpage_nokey, .recvmsg = skcipher_recvmsg_nokey, - .poll = skcipher_poll, + .poll = af_alg_poll, }; static void *skcipher_bind(const char *name, u32 type, u32 mask) @@ -895,26 +345,16 @@ static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) return err; } -static void skcipher_wait(struct sock *sk) -{ - struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - int ctr = 0; - - while (atomic_read(&ctx->inflight) && ctr++ < 100) - msleep(100); -} - static void skcipher_sock_destruct(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req); - - if (atomic_read(&ctx->inflight)) - skcipher_wait(sk); + struct af_alg_ctx *ctx = ask->private; + struct sock *psk = ask->parent; + struct alg_sock *pask = alg_sk(psk); + struct skcipher_tfm *skc = pask->private; + struct crypto_skcipher *tfm = skc->skcipher; - skcipher_free_sgl(sk); + af_alg_pull_tsgl(sk, ctx->used, NULL, 0); sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); sock_kfree_s(sk, ctx, ctx->len); af_alg_release_parent(sk); @@ -922,11 +362,11 @@ static void skcipher_sock_destruct(struct sock *sk) static int skcipher_accept_parent_nokey(void *private, struct sock *sk) { - struct skcipher_ctx *ctx; + struct af_alg_ctx *ctx; struct alg_sock *ask = alg_sk(sk); struct skcipher_tfm *tfm = private; struct crypto_skcipher *skcipher = tfm->skcipher; - unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher); + unsigned int len = sizeof(*ctx); ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) @@ -941,22 +381,17 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher)); - INIT_LIST_HEAD(&ctx->tsgl); + INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; ctx->used = 0; + ctx->rcvused = 0; ctx->more = 0; ctx->merge = 0; ctx->enc = 0; - atomic_set(&ctx->inflight, 0); af_alg_init_completion(&ctx->completion); ask->private = ctx; - skcipher_request_set_tfm(&ctx->req, skcipher); - skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP | - CRYPTO_TFM_REQ_MAY_BACKLOG, - af_alg_complete, &ctx->completion); - sk->sk_destruct = skcipher_sock_destruct; return 0; diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c index 8b3c04d625c3..4a45fa4890c0 100644 --- a/crypto/chacha20_generic.c +++ b/crypto/chacha20_generic.c @@ -91,9 +91,14 @@ int crypto_chacha20_crypt(struct skcipher_request *req) crypto_chacha20_init(state, ctx, walk.iv); while (walk.nbytes > 0) { + unsigned int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); + chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr, - walk.nbytes); - err = skcipher_walk_done(&walk, 0); + nbytes); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } return err; diff --git a/crypto/ctr.c b/crypto/ctr.c index 477d9226ccaa..854d924f9d8e 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -65,8 +65,7 @@ static void crypto_ctr_crypt_final(struct blkcipher_walk *walk, unsigned int nbytes = walk->nbytes; crypto_cipher_encrypt_one(tfm, keystream, ctrblk); - crypto_xor(keystream, src, nbytes); - memcpy(dst, keystream, nbytes); + crypto_xor_cpy(dst, keystream, src, nbytes); crypto_inc(ctrblk, bsize); } diff --git a/crypto/drbg.c b/crypto/drbg.c index 633a88e93ab0..70018397e59a 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1133,10 +1133,10 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg) { if (!drbg) return; - kzfree(drbg->V); - drbg->Vbuf = NULL; - kzfree(drbg->C); - drbg->Cbuf = NULL; + kzfree(drbg->Vbuf); + drbg->V = NULL; + kzfree(drbg->Cbuf); + drbg->C = NULL; kzfree(drbg->scratchpadbuf); drbg->scratchpadbuf = NULL; drbg->reseed_ctr = 0; diff --git a/crypto/ecdh.c b/crypto/ecdh.c index 61c7708905d0..4271fc77d261 100644 --- a/crypto/ecdh.c +++ b/crypto/ecdh.c @@ -20,8 +20,6 @@ struct ecdh_ctx { unsigned int curve_id; unsigned int ndigits; u64 private_key[ECC_MAX_DIGITS]; - u64 public_key[2 * ECC_MAX_DIGITS]; - u64 shared_secret[ECC_MAX_DIGITS]; }; static inline struct ecdh_ctx *ecdh_get_ctx(struct crypto_kpp *tfm) @@ -70,41 +68,58 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, static int ecdh_compute_value(struct kpp_request *req) { - int ret = 0; struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); - size_t copied, nbytes; + u64 *public_key; + u64 *shared_secret = NULL; void *buf; + size_t copied, nbytes, public_key_sz; + int ret = -ENOMEM; nbytes = ctx->ndigits << ECC_DIGITS_TO_BYTES_SHIFT; + /* Public part is a point thus it has both coordinates */ + public_key_sz = 2 * nbytes; + + public_key = kmalloc(public_key_sz, GFP_KERNEL); + if (!public_key) + return -ENOMEM; if (req->src) { - copied = sg_copy_to_buffer(req->src, 1, ctx->public_key, - 2 * nbytes); - if (copied != 2 * nbytes) - return -EINVAL; + shared_secret = kmalloc(nbytes, GFP_KERNEL); + if (!shared_secret) + goto free_pubkey; + + copied = sg_copy_to_buffer(req->src, 1, public_key, + public_key_sz); + if (copied != public_key_sz) { + ret = -EINVAL; + goto free_all; + } ret = crypto_ecdh_shared_secret(ctx->curve_id, ctx->ndigits, - ctx->private_key, - ctx->public_key, - ctx->shared_secret); + ctx->private_key, public_key, + shared_secret); - buf = ctx->shared_secret; + buf = shared_secret; } else { ret = ecc_make_pub_key(ctx->curve_id, ctx->ndigits, - ctx->private_key, ctx->public_key); - buf = ctx->public_key; - /* Public part is a point thus it has both coordinates */ - nbytes *= 2; + ctx->private_key, public_key); + buf = public_key; + nbytes = public_key_sz; } if (ret < 0) - return ret; + goto free_all; copied = sg_copy_from_buffer(req->dst, 1, buf, nbytes); if (copied != nbytes) - return -EINVAL; + ret = -EINVAL; + /* fall through */ +free_all: + kzfree(shared_secret); +free_pubkey: + kfree(public_key); return ret; } diff --git a/crypto/pcbc.c b/crypto/pcbc.c index 29dd2b4a3b85..d9e45a958720 100644 --- a/crypto/pcbc.c +++ b/crypto/pcbc.c @@ -55,8 +55,7 @@ static int crypto_pcbc_encrypt_segment(struct skcipher_request *req, do { crypto_xor(iv, src, bsize); crypto_cipher_encrypt_one(tfm, dst, iv); - memcpy(iv, dst, bsize); - crypto_xor(iv, src, bsize); + crypto_xor_cpy(iv, dst, src, bsize); src += bsize; dst += bsize; @@ -79,8 +78,7 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req, memcpy(tmpbuf, src, bsize); crypto_xor(iv, src, bsize); crypto_cipher_encrypt_one(tfm, src, iv); - memcpy(iv, tmpbuf, bsize); - crypto_xor(iv, src, bsize); + crypto_xor_cpy(iv, tmpbuf, src, bsize); src += bsize; } while ((nbytes -= bsize) >= bsize); @@ -127,8 +125,7 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req, do { crypto_cipher_decrypt_one(tfm, dst, src); crypto_xor(dst, iv, bsize); - memcpy(iv, src, bsize); - crypto_xor(iv, dst, bsize); + crypto_xor_cpy(iv, dst, src, bsize); src += bsize; dst += bsize; @@ -153,8 +150,7 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req, memcpy(tmpbuf, src, bsize); crypto_cipher_decrypt_one(tfm, src, src); crypto_xor(src, iv, bsize); - memcpy(iv, tmpbuf, bsize); - crypto_xor(iv, src, bsize); + crypto_xor_cpy(iv, src, tmpbuf, bsize); src += bsize; } while ((nbytes -= bsize) >= bsize); diff --git a/crypto/rng.c b/crypto/rng.c index 5e8469244960..b4a618668161 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -43,12 +43,14 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) if (!buf) return -ENOMEM; - get_random_bytes(buf, slen); + err = get_random_bytes_wait(buf, slen); + if (err) + goto out; seed = buf; } err = crypto_rng_alg(tfm)->seed(tfm, seed, slen); - +out: kzfree(buf); return err; } diff --git a/crypto/scompress.c b/crypto/scompress.c index ae1d3cf209e4..2075e2c4e7df 100644 --- a/crypto/scompress.c +++ b/crypto/scompress.c @@ -65,11 +65,6 @@ static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) seq_puts(m, "type : scomp\n"); } -static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) -{ - return 0; -} - static void crypto_scomp_free_scratches(void * __percpu *scratches) { int i; @@ -125,12 +120,26 @@ static int crypto_scomp_alloc_all_scratches(void) if (!scomp_src_scratches) return -ENOMEM; scomp_dst_scratches = crypto_scomp_alloc_scratches(); - if (!scomp_dst_scratches) + if (!scomp_dst_scratches) { + crypto_scomp_free_scratches(scomp_src_scratches); + scomp_src_scratches = NULL; return -ENOMEM; + } } return 0; } +static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) +{ + int ret; + + mutex_lock(&scomp_lock); + ret = crypto_scomp_alloc_all_scratches(); + mutex_unlock(&scomp_lock); + + return ret; +} + static void crypto_scomp_sg_free(struct scatterlist *sgl) { int i, n; @@ -211,9 +220,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) scratch_dst, &req->dlen, *ctx); if (!ret) { if (!req->dst) { - req->dst = crypto_scomp_sg_alloc(req->dlen, - req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? - GFP_KERNEL : GFP_ATOMIC); + req->dst = crypto_scomp_sg_alloc(req->dlen, GFP_ATOMIC); if (!req->dst) goto out; } @@ -240,6 +247,10 @@ static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm) struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); crypto_free_scomp(*ctx); + + mutex_lock(&scomp_lock); + crypto_scomp_free_all_scratches(); + mutex_unlock(&scomp_lock); } int crypto_init_scomp_ops_async(struct crypto_tfm *tfm) @@ -316,40 +327,18 @@ static const struct crypto_type crypto_scomp_type = { int crypto_register_scomp(struct scomp_alg *alg) { struct crypto_alg *base = &alg->base; - int ret = -ENOMEM; - - mutex_lock(&scomp_lock); - if (crypto_scomp_alloc_all_scratches()) - goto error; base->cra_type = &crypto_scomp_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS; - ret = crypto_register_alg(base); - if (ret) - goto error; - - mutex_unlock(&scomp_lock); - return ret; - -error: - crypto_scomp_free_all_scratches(); - mutex_unlock(&scomp_lock); - return ret; + return crypto_register_alg(base); } EXPORT_SYMBOL_GPL(crypto_register_scomp); int crypto_unregister_scomp(struct scomp_alg *alg) { - int ret; - - mutex_lock(&scomp_lock); - ret = crypto_unregister_alg(&alg->base); - crypto_scomp_free_all_scratches(); - mutex_unlock(&scomp_lock); - - return ret; + return crypto_unregister_alg(&alg->base); } EXPORT_SYMBOL_GPL(crypto_unregister_scomp); diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c index 94970a794975..7c3382facc82 100644 --- a/crypto/serpent_generic.c +++ b/crypto/serpent_generic.c @@ -229,6 +229,46 @@ x4 ^= x2; \ }) +static void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2, u32 r3, u32 r4, u32 *k) +{ + k += 100; + S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24); + S4(r1, r2, r4, r3, r0); store_and_load_keys(r2, r4, r3, r0, 24, 20); + S5(r2, r4, r3, r0, r1); store_and_load_keys(r1, r2, r4, r0, 20, 16); + S6(r1, r2, r4, r0, r3); store_and_load_keys(r4, r3, r2, r0, 16, 12); + S7(r4, r3, r2, r0, r1); store_and_load_keys(r1, r2, r0, r4, 12, 8); + S0(r1, r2, r0, r4, r3); store_and_load_keys(r0, r2, r4, r1, 8, 4); + S1(r0, r2, r4, r1, r3); store_and_load_keys(r3, r4, r1, r0, 4, 0); + S2(r3, r4, r1, r0, r2); store_and_load_keys(r2, r4, r3, r0, 0, -4); + S3(r2, r4, r3, r0, r1); store_and_load_keys(r0, r1, r4, r2, -4, -8); + S4(r0, r1, r4, r2, r3); store_and_load_keys(r1, r4, r2, r3, -8, -12); + S5(r1, r4, r2, r3, r0); store_and_load_keys(r0, r1, r4, r3, -12, -16); + S6(r0, r1, r4, r3, r2); store_and_load_keys(r4, r2, r1, r3, -16, -20); + S7(r4, r2, r1, r3, r0); store_and_load_keys(r0, r1, r3, r4, -20, -24); + S0(r0, r1, r3, r4, r2); store_and_load_keys(r3, r1, r4, r0, -24, -28); + k -= 50; + S1(r3, r1, r4, r0, r2); store_and_load_keys(r2, r4, r0, r3, 22, 18); + S2(r2, r4, r0, r3, r1); store_and_load_keys(r1, r4, r2, r3, 18, 14); + S3(r1, r4, r2, r3, r0); store_and_load_keys(r3, r0, r4, r1, 14, 10); + S4(r3, r0, r4, r1, r2); store_and_load_keys(r0, r4, r1, r2, 10, 6); + S5(r0, r4, r1, r2, r3); store_and_load_keys(r3, r0, r4, r2, 6, 2); + S6(r3, r0, r4, r2, r1); store_and_load_keys(r4, r1, r0, r2, 2, -2); + S7(r4, r1, r0, r2, r3); store_and_load_keys(r3, r0, r2, r4, -2, -6); + S0(r3, r0, r2, r4, r1); store_and_load_keys(r2, r0, r4, r3, -6, -10); + S1(r2, r0, r4, r3, r1); store_and_load_keys(r1, r4, r3, r2, -10, -14); + S2(r1, r4, r3, r2, r0); store_and_load_keys(r0, r4, r1, r2, -14, -18); + S3(r0, r4, r1, r2, r3); store_and_load_keys(r2, r3, r4, r0, -18, -22); + k -= 50; + S4(r2, r3, r4, r0, r1); store_and_load_keys(r3, r4, r0, r1, 28, 24); + S5(r3, r4, r0, r1, r2); store_and_load_keys(r2, r3, r4, r1, 24, 20); + S6(r2, r3, r4, r1, r0); store_and_load_keys(r4, r0, r3, r1, 20, 16); + S7(r4, r0, r3, r1, r2); store_and_load_keys(r2, r3, r1, r4, 16, 12); + S0(r2, r3, r1, r4, r0); store_and_load_keys(r1, r3, r4, r2, 12, 8); + S1(r1, r3, r4, r2, r0); store_and_load_keys(r0, r4, r2, r1, 8, 4); + S2(r0, r4, r2, r1, r3); store_and_load_keys(r3, r4, r0, r1, 4, 0); + S3(r3, r4, r0, r1, r2); storekeys(r1, r2, r4, r3, 0); +} + int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key, unsigned int keylen) { @@ -395,42 +435,7 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key, keyiter(k[23], r1, r0, r3, 131, 31); /* Apply S-boxes */ - - S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24); - S4(r1, r2, r4, r3, r0); store_and_load_keys(r2, r4, r3, r0, 24, 20); - S5(r2, r4, r3, r0, r1); store_and_load_keys(r1, r2, r4, r0, 20, 16); - S6(r1, r2, r4, r0, r3); store_and_load_keys(r4, r3, r2, r0, 16, 12); - S7(r4, r3, r2, r0, r1); store_and_load_keys(r1, r2, r0, r4, 12, 8); - S0(r1, r2, r0, r4, r3); store_and_load_keys(r0, r2, r4, r1, 8, 4); - S1(r0, r2, r4, r1, r3); store_and_load_keys(r3, r4, r1, r0, 4, 0); - S2(r3, r4, r1, r0, r2); store_and_load_keys(r2, r4, r3, r0, 0, -4); - S3(r2, r4, r3, r0, r1); store_and_load_keys(r0, r1, r4, r2, -4, -8); - S4(r0, r1, r4, r2, r3); store_and_load_keys(r1, r4, r2, r3, -8, -12); - S5(r1, r4, r2, r3, r0); store_and_load_keys(r0, r1, r4, r3, -12, -16); - S6(r0, r1, r4, r3, r2); store_and_load_keys(r4, r2, r1, r3, -16, -20); - S7(r4, r2, r1, r3, r0); store_and_load_keys(r0, r1, r3, r4, -20, -24); - S0(r0, r1, r3, r4, r2); store_and_load_keys(r3, r1, r4, r0, -24, -28); - k -= 50; - S1(r3, r1, r4, r0, r2); store_and_load_keys(r2, r4, r0, r3, 22, 18); - S2(r2, r4, r0, r3, r1); store_and_load_keys(r1, r4, r2, r3, 18, 14); - S3(r1, r4, r2, r3, r0); store_and_load_keys(r3, r0, r4, r1, 14, 10); - S4(r3, r0, r4, r1, r2); store_and_load_keys(r0, r4, r1, r2, 10, 6); - S5(r0, r4, r1, r2, r3); store_and_load_keys(r3, r0, r4, r2, 6, 2); - S6(r3, r0, r4, r2, r1); store_and_load_keys(r4, r1, r0, r2, 2, -2); - S7(r4, r1, r0, r2, r3); store_and_load_keys(r3, r0, r2, r4, -2, -6); - S0(r3, r0, r2, r4, r1); store_and_load_keys(r2, r0, r4, r3, -6, -10); - S1(r2, r0, r4, r3, r1); store_and_load_keys(r1, r4, r3, r2, -10, -14); - S2(r1, r4, r3, r2, r0); store_and_load_keys(r0, r4, r1, r2, -14, -18); - S3(r0, r4, r1, r2, r3); store_and_load_keys(r2, r3, r4, r0, -18, -22); - k -= 50; - S4(r2, r3, r4, r0, r1); store_and_load_keys(r3, r4, r0, r1, 28, 24); - S5(r3, r4, r0, r1, r2); store_and_load_keys(r2, r3, r4, r1, 24, 20); - S6(r2, r3, r4, r1, r0); store_and_load_keys(r4, r0, r3, r1, 20, 16); - S7(r4, r0, r3, r1, r2); store_and_load_keys(r2, r3, r1, r4, 16, 12); - S0(r2, r3, r1, r4, r0); store_and_load_keys(r1, r3, r4, r2, 12, 8); - S1(r1, r3, r4, r2, r0); store_and_load_keys(r0, r4, r2, r1, 8, 4); - S2(r0, r4, r2, r1, r3); store_and_load_keys(r3, r4, r0, r1, 4, 0); - S3(r3, r4, r0, r1, r2); storekeys(r1, r2, r4, r3, 0); + __serpent_setkey_sbox(r0, r1, r2, r3, r4, ctx->expkey); return 0; } diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 0dd6a432d6ca..0022a18d36ee 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1404,9 +1404,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0, speed_template_32_40_48); test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0, - speed_template_32_48_64); + speed_template_32_64); test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0, - speed_template_32_48_64); + speed_template_32_64); test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0, speed_template_16_24_32); test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0, @@ -1837,9 +1837,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0, speed_template_32_40_48); test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0, - speed_template_32_48_64); + speed_template_32_64); test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0, - speed_template_32_48_64); + speed_template_32_64); test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0, speed_template_16_24_32); test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 6ceb0e2758bb..d54971d2d1c8 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -32675,6 +32675,10 @@ static const struct cipher_testvec chacha20_enc_tv_template[] = { "\x5b\x86\x2f\x37\x30\xe3\x7c\xfd" "\xc4\xfd\x80\x6c\x22\xf2\x21", .rlen = 375, + .also_non_np = 1, + .np = 3, + .tap = { 375 - 20, 4, 16 }, + }, { /* RFC7539 A.2. Test Vector #3 */ .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" @@ -33049,6 +33053,9 @@ static const struct cipher_testvec chacha20_enc_tv_template[] = { "\xa1\xed\xad\xd5\x76\xfa\x24\x8f" "\x98", .rlen = 1281, + .also_non_np = 1, + .np = 3, + .tap = { 1200, 1, 80 }, }, }; |