summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig34
-rw-r--r--crypto/Makefile19
-rw-r--r--crypto/ablkcipher.c3
-rw-r--r--crypto/af_alg.c483
-rw-r--r--crypto/algif_hash.c319
-rw-r--r--crypto/algif_skcipher.c632
-rw-r--r--crypto/ansi_cprng.c2
-rw-r--r--crypto/async_tx/async_xor.c2
-rw-r--r--crypto/authenc.c22
-rw-r--r--crypto/authencesn.c835
-rw-r--r--crypto/cast5.c74
-rw-r--r--crypto/crypto_wq.c3
-rw-r--r--crypto/deflate.c4
-rw-r--r--crypto/eseqiv.c18
-rw-r--r--crypto/gcm.c19
-rw-r--r--crypto/gf128mul.c2
-rw-r--r--crypto/pcrypt.c3
-rw-r--r--crypto/rmd128.c3
-rw-r--r--crypto/rmd160.c3
-rw-r--r--crypto/rmd256.c3
-rw-r--r--crypto/rmd320.c3
-rw-r--r--crypto/shash.c8
-rw-r--r--crypto/tcrypt.c14
-rw-r--r--crypto/testmgr.c26
-rw-r--r--crypto/testmgr.h391
-rw-r--r--crypto/vmac.c2
-rw-r--r--crypto/xts.c2
-rw-r--r--crypto/zlib.c21
28 files changed, 2808 insertions, 142 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index e4bac29a32e7..4b7cb0e691cd 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -110,7 +110,6 @@ config CRYPTO_MANAGER_DISABLE_TESTS
config CRYPTO_GF128MUL
tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
- depends on EXPERIMENTAL
help
Efficient table driven implementation of multiplications in the
field GF(2^128). This is needed by some cypher modes. This
@@ -539,8 +538,9 @@ config CRYPTO_AES_X86_64
config CRYPTO_AES_NI_INTEL
tristate "AES cipher algorithms (AES-NI)"
- depends on (X86 || UML_X86) && 64BIT
- select CRYPTO_AES_X86_64
+ depends on (X86 || UML_X86)
+ select CRYPTO_AES_X86_64 if 64BIT
+ select CRYPTO_AES_586 if !64BIT
select CRYPTO_CRYPTD
select CRYPTO_ALGAPI
select CRYPTO_FPU
@@ -563,9 +563,10 @@ config CRYPTO_AES_NI_INTEL
See <http://csrc.nist.gov/encryption/aes/> for more information.
- In addition to AES cipher algorithm support, the
- acceleration for some popular block cipher mode is supported
- too, including ECB, CBC, CTR, LRW, PCBC, XTS.
+ In addition to AES cipher algorithm support, the acceleration
+ for some popular block cipher mode is supported too, including
+ ECB, CBC, LRW, PCBC, XTS. The 64 bit version has additional
+ acceleration for CTR.
config CRYPTO_ANUBIS
tristate "Anubis cipher algorithm"
@@ -841,6 +842,27 @@ config CRYPTO_ANSI_CPRNG
ANSI X9.31 A.2.4. Note that this option must be enabled if
CRYPTO_FIPS is selected
+config CRYPTO_USER_API
+ tristate
+
+config CRYPTO_USER_API_HASH
+ tristate "User-space interface for hash algorithms"
+ depends on NET
+ select CRYPTO_HASH
+ select CRYPTO_USER_API
+ help
+ This option enables the user-spaces interface for hash
+ algorithms.
+
+config CRYPTO_USER_API_SKCIPHER
+ tristate "User-space interface for symmetric key cipher algorithms"
+ depends on NET
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_USER_API
+ help
+ This option enables the user-spaces interface for symmetric
+ key cipher algorithms.
+
source "drivers/crypto/Kconfig"
endif # if CRYPTO
diff --git a/crypto/Makefile b/crypto/Makefile
index 423b7de61f93..ce5a813d3639 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -3,32 +3,32 @@
#
obj-$(CONFIG_CRYPTO) += crypto.o
-crypto-objs := api.o cipher.o compress.o
+crypto-y := api.o cipher.o compress.o
obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
obj-$(CONFIG_CRYPTO_FIPS) += fips.o
crypto_algapi-$(CONFIG_PROC_FS) += proc.o
-crypto_algapi-objs := algapi.o scatterwalk.o $(crypto_algapi-y)
+crypto_algapi-y := algapi.o scatterwalk.o $(crypto_algapi-y)
obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o
obj-$(CONFIG_CRYPTO_AEAD2) += aead.o
-crypto_blkcipher-objs := ablkcipher.o
-crypto_blkcipher-objs += blkcipher.o
+crypto_blkcipher-y := ablkcipher.o
+crypto_blkcipher-y += blkcipher.o
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
-crypto_hash-objs += ahash.o
-crypto_hash-objs += shash.o
+crypto_hash-y += ahash.o
+crypto_hash-y += shash.o
obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o
-cryptomgr-objs := algboss.o testmgr.o
+cryptomgr-y := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
@@ -78,13 +78,16 @@ obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o
obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
-obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
+obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o
obj-$(CONFIG_CRYPTO_RNG2) += rng.o
obj-$(CONFIG_CRYPTO_RNG2) += krng.o
obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
+obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
+obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
+obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
#
# generic algorithms and the async_tx api
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index a854df2a5a4b..fdc67d38660b 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -141,8 +141,7 @@ err:
if (walk->iv != req->info)
memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
- if (walk->iv_buffer)
- kfree(walk->iv_buffer);
+ kfree(walk->iv_buffer);
return err;
}
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
new file mode 100644
index 000000000000..940d70cb5c25
--- /dev/null
+++ b/crypto/af_alg.c
@@ -0,0 +1,483 @@
+/*
+ * af_alg: User-space algorithm interface
+ *
+ * This file provides the user-space API for algorithms.
+ *
+ * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <asm/atomic.h>
+#include <crypto/if_alg.h>
+#include <linux/crypto.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/rwsem.h>
+
+struct alg_type_list {
+ const struct af_alg_type *type;
+ struct list_head list;
+};
+
+static atomic_long_t alg_memory_allocated;
+
+static struct proto alg_proto = {
+ .name = "ALG",
+ .owner = THIS_MODULE,
+ .memory_allocated = &alg_memory_allocated,
+ .obj_size = sizeof(struct alg_sock),
+};
+
+static LIST_HEAD(alg_types);
+static DECLARE_RWSEM(alg_types_sem);
+
+static const struct af_alg_type *alg_get_type(const char *name)
+{
+ const struct af_alg_type *type = ERR_PTR(-ENOENT);
+ struct alg_type_list *node;
+
+ down_read(&alg_types_sem);
+ list_for_each_entry(node, &alg_types, list) {
+ if (strcmp(node->type->name, name))
+ continue;
+
+ if (try_module_get(node->type->owner))
+ type = node->type;
+ break;
+ }
+ up_read(&alg_types_sem);
+
+ return type;
+}
+
+int af_alg_register_type(const struct af_alg_type *type)
+{
+ struct alg_type_list *node;
+ int err = -EEXIST;
+
+ down_write(&alg_types_sem);
+ list_for_each_entry(node, &alg_types, list) {
+ if (!strcmp(node->type->name, type->name))
+ goto unlock;
+ }
+
+ node = kmalloc(sizeof(*node), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!node)
+ goto unlock;
+
+ type->ops->owner = THIS_MODULE;
+ node->type = type;
+ list_add(&node->list, &alg_types);
+ err = 0;
+
+unlock:
+ up_write(&alg_types_sem);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_register_type);
+
+int af_alg_unregister_type(const struct af_alg_type *type)
+{
+ struct alg_type_list *node;
+ int err = -ENOENT;
+
+ down_write(&alg_types_sem);
+ list_for_each_entry(node, &alg_types, list) {
+ if (strcmp(node->type->name, type->name))
+ continue;
+
+ list_del(&node->list);
+ kfree(node);
+ err = 0;
+ break;
+ }
+ up_write(&alg_types_sem);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_unregister_type);
+
+static void alg_do_release(const struct af_alg_type *type, void *private)
+{
+ if (!type)
+ return;
+
+ type->release(private);
+ module_put(type->owner);
+}
+
+int af_alg_release(struct socket *sock)
+{
+ if (sock->sk)
+ sock_put(sock->sk);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(af_alg_release);
+
+static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct sockaddr_alg *sa = (void *)uaddr;
+ const struct af_alg_type *type;
+ void *private;
+
+ if (sock->state == SS_CONNECTED)
+ return -EINVAL;
+
+ if (addr_len != sizeof(*sa))
+ return -EINVAL;
+
+ sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
+ sa->salg_name[sizeof(sa->salg_name) - 1] = 0;
+
+ type = alg_get_type(sa->salg_type);
+ if (IS_ERR(type) && PTR_ERR(type) == -ENOENT) {
+ request_module("algif-%s", sa->salg_type);
+ type = alg_get_type(sa->salg_type);
+ }
+
+ if (IS_ERR(type))
+ return PTR_ERR(type);
+
+ private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask);
+ if (IS_ERR(private)) {
+ module_put(type->owner);
+ return PTR_ERR(private);
+ }
+
+ lock_sock(sk);
+
+ swap(ask->type, type);
+ swap(ask->private, private);
+
+ release_sock(sk);
+
+ alg_do_release(type, private);
+
+ return 0;
+}
+
+static int alg_setkey(struct sock *sk, char __user *ukey,
+ unsigned int keylen)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ const struct af_alg_type *type = ask->type;
+ u8 *key;
+ int err;
+
+ key = sock_kmalloc(sk, keylen, GFP_KERNEL);
+ if (!key)
+ return -ENOMEM;
+
+ err = -EFAULT;
+ if (copy_from_user(key, ukey, keylen))
+ goto out;
+
+ err = type->setkey(ask->private, key, keylen);
+
+out:
+ sock_kfree_s(sk, key, keylen);
+
+ return err;
+}
+
+static int alg_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, unsigned int optlen)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ const struct af_alg_type *type;
+ int err = -ENOPROTOOPT;
+
+ lock_sock(sk);
+ type = ask->type;
+
+ if (level != SOL_ALG || !type)
+ goto unlock;
+
+ switch (optname) {
+ case ALG_SET_KEY:
+ if (sock->state == SS_CONNECTED)
+ goto unlock;
+ if (!type->setkey)
+ goto unlock;
+
+ err = alg_setkey(sk, optval, optlen);
+ }
+
+unlock:
+ release_sock(sk);
+
+ return err;
+}
+
+int af_alg_accept(struct sock *sk, struct socket *newsock)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ const struct af_alg_type *type;
+ struct sock *sk2;
+ int err;
+
+ lock_sock(sk);
+ type = ask->type;
+
+ err = -EINVAL;
+ if (!type)
+ goto unlock;
+
+ sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto);
+ err = -ENOMEM;
+ if (!sk2)
+ goto unlock;
+
+ sock_init_data(newsock, sk2);
+ sock_graft(sk2, newsock);
+
+ err = type->accept(ask->private, sk2);
+ if (err) {
+ sk_free(sk2);
+ goto unlock;
+ }
+
+ sk2->sk_family = PF_ALG;
+
+ sock_hold(sk);
+ alg_sk(sk2)->parent = sk;
+ alg_sk(sk2)->type = type;
+
+ newsock->ops = type->ops;
+ newsock->state = SS_CONNECTED;
+
+ err = 0;
+
+unlock:
+ release_sock(sk);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_accept);
+
+static int alg_accept(struct socket *sock, struct socket *newsock, int flags)
+{
+ return af_alg_accept(sock->sk, newsock);
+}
+
+static const struct proto_ops alg_proto_ops = {
+ .family = PF_ALG,
+ .owner = THIS_MODULE,
+
+ .connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .getname = sock_no_getname,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .getsockopt = sock_no_getsockopt,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+ .sendmsg = sock_no_sendmsg,
+ .recvmsg = sock_no_recvmsg,
+ .poll = sock_no_poll,
+
+ .bind = alg_bind,
+ .release = af_alg_release,
+ .setsockopt = alg_setsockopt,
+ .accept = alg_accept,
+};
+
+static void alg_sock_destruct(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+
+ alg_do_release(ask->type, ask->private);
+}
+
+static int alg_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
+{
+ struct sock *sk;
+ int err;
+
+ if (sock->type != SOCK_SEQPACKET)
+ return -ESOCKTNOSUPPORT;
+ if (protocol != 0)
+ return -EPROTONOSUPPORT;
+
+ err = -ENOMEM;
+ sk = sk_alloc(net, PF_ALG, GFP_KERNEL, &alg_proto);
+ if (!sk)
+ goto out;
+
+ sock->ops = &alg_proto_ops;
+ sock_init_data(sock, sk);
+
+ sk->sk_family = PF_ALG;
+ sk->sk_destruct = alg_sock_destruct;
+
+ return 0;
+out:
+ return err;
+}
+
+static const struct net_proto_family alg_family = {
+ .family = PF_ALG,
+ .create = alg_create,
+ .owner = THIS_MODULE,
+};
+
+int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
+ int write)
+{
+ unsigned long from = (unsigned long)addr;
+ unsigned long npages;
+ unsigned off;
+ int err;
+ int i;
+
+ err = -EFAULT;
+ if (!access_ok(write ? VERIFY_READ : VERIFY_WRITE, addr, len))
+ goto out;
+
+ off = from & ~PAGE_MASK;
+ npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ if (npages > ALG_MAX_PAGES)
+ npages = ALG_MAX_PAGES;
+
+ err = get_user_pages_fast(from, npages, write, sgl->pages);
+ if (err < 0)
+ goto out;
+
+ npages = err;
+ err = -EINVAL;
+ if (WARN_ON(npages == 0))
+ goto out;
+
+ err = 0;
+
+ sg_init_table(sgl->sg, npages);
+
+ for (i = 0; i < npages; i++) {
+ int plen = min_t(int, len, PAGE_SIZE - off);
+
+ sg_set_page(sgl->sg + i, sgl->pages[i], plen, off);
+
+ off = 0;
+ len -= plen;
+ err += plen;
+ }
+
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_make_sg);
+
+void af_alg_free_sg(struct af_alg_sgl *sgl)
+{
+ int i;
+
+ i = 0;
+ do {
+ put_page(sgl->pages[i]);
+ } while (!sg_is_last(sgl->sg + (i++)));
+}
+EXPORT_SYMBOL_GPL(af_alg_free_sg);
+
+int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
+{
+ struct cmsghdr *cmsg;
+
+ for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+ if (!CMSG_OK(msg, cmsg))
+ return -EINVAL;
+ if (cmsg->cmsg_level != SOL_ALG)
+ continue;
+
+ switch(cmsg->cmsg_type) {
+ case ALG_SET_IV:
+ if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv)))
+ return -EINVAL;
+ con->iv = (void *)CMSG_DATA(cmsg);
+ if (cmsg->cmsg_len < CMSG_LEN(con->iv->ivlen +
+ sizeof(*con->iv)))
+ return -EINVAL;
+ break;
+
+ case ALG_SET_OP:
+ if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32)))
+ return -EINVAL;
+ con->op = *(u32 *)CMSG_DATA(cmsg);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(af_alg_cmsg_send);
+
+int af_alg_wait_for_completion(int err, struct af_alg_completion *completion)
+{
+ switch (err) {
+ case -EINPROGRESS:
+ case -EBUSY:
+ wait_for_completion(&completion->completion);
+ INIT_COMPLETION(completion->completion);
+ err = completion->err;
+ break;
+ };
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_wait_for_completion);
+
+void af_alg_complete(struct crypto_async_request *req, int err)
+{
+ struct af_alg_completion *completion = req->data;
+
+ completion->err = err;
+ complete(&completion->completion);
+}
+EXPORT_SYMBOL_GPL(af_alg_complete);
+
+static int __init af_alg_init(void)
+{
+ int err = proto_register(&alg_proto, 0);
+
+ if (err)
+ goto out;
+
+ err = sock_register(&alg_family);
+ if (err != 0)
+ goto out_unregister_proto;
+
+out:
+ return err;
+
+out_unregister_proto:
+ proto_unregister(&alg_proto);
+ goto out;
+}
+
+static void __exit af_alg_exit(void)
+{
+ sock_unregister(PF_ALG);
+ proto_unregister(&alg_proto);
+}
+
+module_init(af_alg_init);
+module_exit(af_alg_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETPROTO(AF_ALG);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
new file mode 100644
index 000000000000..62122a1a2f7a
--- /dev/null
+++ b/crypto/algif_hash.c
@@ -0,0 +1,319 @@
+/*
+ * algif_hash: User-space interface for hash algorithms
+ *
+ * This file provides the user-space API for hash algorithms.
+ *
+ * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/hash.h>
+#include <crypto/if_alg.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <net/sock.h>
+
+struct hash_ctx {
+ struct af_alg_sgl sgl;
+
+ u8 *result;
+
+ struct af_alg_completion completion;
+
+ unsigned int len;
+ bool more;
+
+ struct ahash_request req;
+};
+
+static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
+ struct msghdr *msg, size_t ignored)
+{
+ int limit = ALG_MAX_PAGES * PAGE_SIZE;
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct hash_ctx *ctx = ask->private;
+ unsigned long iovlen;
+ struct iovec *iov;
+ long copied = 0;
+ int err;
+
+ if (limit > sk->sk_sndbuf)
+ limit = sk->sk_sndbuf;
+
+ lock_sock(sk);
+ if (!ctx->more) {
+ err = crypto_ahash_init(&ctx->req);
+ if (err)
+ goto unlock;
+ }
+
+ ctx->more = 0;
+
+ for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
+ iovlen--, iov++) {
+ unsigned long seglen = iov->iov_len;
+ char __user *from = iov->iov_base;
+
+ while (seglen) {
+ int len = min_t(unsigned long, seglen, limit);
+ int newlen;
+
+ newlen = af_alg_make_sg(&ctx->sgl, from, len, 0);
+ if (newlen < 0)
+ goto unlock;
+
+ ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL,
+ newlen);
+
+ err = af_alg_wait_for_completion(
+ crypto_ahash_update(&ctx->req),
+ &ctx->completion);
+
+ af_alg_free_sg(&ctx->sgl);
+
+ if (err)
+ goto unlock;
+
+ seglen -= newlen;
+ from += newlen;
+ copied += newlen;
+ }
+ }
+
+ err = 0;
+
+ ctx->more = msg->msg_flags & MSG_MORE;
+ if (!ctx->more) {
+ ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
+ err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
+ &ctx->completion);
+ }
+
+unlock:
+ release_sock(sk);
+
+ return err ?: copied;
+}
+
+static ssize_t hash_sendpage(struct socket *sock, struct page *page,
+ int offset, size_t size, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct hash_ctx *ctx = ask->private;
+ int err;
+
+ lock_sock(sk);
+ sg_init_table(ctx->sgl.sg, 1);
+ sg_set_page(ctx->sgl.sg, page, size, offset);
+
+ ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size);
+
+ if (!(flags & MSG_MORE)) {
+ if (ctx->more)
+ err = crypto_ahash_finup(&ctx->req);
+ else
+ err = crypto_ahash_digest(&ctx->req);
+ } else {
+ if (!ctx->more) {
+ err = crypto_ahash_init(&ctx->req);
+ if (err)
+ goto unlock;
+ }
+
+ err = crypto_ahash_update(&ctx->req);
+ }
+
+ err = af_alg_wait_for_completion(err, &ctx->completion);
+ if (err)
+ goto unlock;
+
+ ctx->more = flags & MSG_MORE;
+
+unlock:
+ release_sock(sk);
+
+ return err ?: size;
+}
+
+static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
+ struct msghdr *msg, size_t len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct hash_ctx *ctx = ask->private;
+ unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
+ int err;
+
+ if (len > ds)
+ len = ds;
+ else if (len < ds)
+ msg->msg_flags |= MSG_TRUNC;
+
+ lock_sock(sk);
+ if (ctx->more) {
+ ctx->more = 0;
+ ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
+ err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
+ &ctx->completion);
+ if (err)
+ goto unlock;
+ }
+
+ err = memcpy_toiovec(msg->msg_iov, ctx->result, len);
+
+unlock:
+ release_sock(sk);
+
+ return err ?: len;
+}
+
+static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct hash_ctx *ctx = ask->private;
+ struct ahash_request *req = &ctx->req;
+ char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req))];
+ struct sock *sk2;
+ struct alg_sock *ask2;
+ struct hash_ctx *ctx2;
+ int err;
+
+ err = crypto_ahash_export(req, state);
+ if (err)
+ return err;
+
+ err = af_alg_accept(ask->parent, newsock);
+ if (err)
+ return err;
+
+ sk2 = newsock->sk;
+ ask2 = alg_sk(sk2);
+ ctx2 = ask2->private;
+ ctx2->more = 1;
+
+ err = crypto_ahash_import(&ctx2->req, state);
+ if (err) {
+ sock_orphan(sk2);
+ sock_put(sk2);
+ }
+
+ return err;
+}
+
+static struct proto_ops algif_hash_ops = {
+ .family = PF_ALG,
+
+ .connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .getname = sock_no_getname,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .getsockopt = sock_no_getsockopt,
+ .mmap = sock_no_mmap,
+ .bind = sock_no_bind,
+ .setsockopt = sock_no_setsockopt,
+ .poll = sock_no_poll,
+
+ .release = af_alg_release,
+ .sendmsg = hash_sendmsg,
+ .sendpage = hash_sendpage,
+ .recvmsg = hash_recvmsg,
+ .accept = hash_accept,
+};
+
+static void *hash_bind(const char *name, u32 type, u32 mask)
+{
+ return crypto_alloc_ahash(name, type, mask);
+}
+
+static void hash_release(void *private)
+{
+ crypto_free_ahash(private);
+}
+
+static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
+{
+ return crypto_ahash_setkey(private, key, keylen);
+}
+
+static void hash_sock_destruct(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct hash_ctx *ctx = ask->private;
+
+ sock_kfree_s(sk, ctx->result,
+ crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)));
+ sock_kfree_s(sk, ctx, ctx->len);
+ af_alg_release_parent(sk);
+}
+
+static int hash_accept_parent(void *private, struct sock *sk)
+{
+ struct hash_ctx *ctx;
+ struct alg_sock *ask = alg_sk(sk);
+ unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(private);
+ unsigned ds = crypto_ahash_digestsize(private);
+
+ ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL);
+ if (!ctx->result) {
+ sock_kfree_s(sk, ctx, len);
+ return -ENOMEM;
+ }
+
+ memset(ctx->result, 0, ds);
+
+ ctx->len = len;
+ ctx->more = 0;
+ af_alg_init_completion(&ctx->completion);
+
+ ask->private = ctx;
+
+ ahash_request_set_tfm(&ctx->req, private);
+ ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ af_alg_complete, &ctx->completion);
+
+ sk->sk_destruct = hash_sock_destruct;
+
+ return 0;
+}
+
+static const struct af_alg_type algif_type_hash = {
+ .bind = hash_bind,
+ .release = hash_release,
+ .setkey = hash_setkey,
+ .accept = hash_accept_parent,
+ .ops = &algif_hash_ops,
+ .name = "hash",
+ .owner = THIS_MODULE
+};
+
+static int __init algif_hash_init(void)
+{
+ return af_alg_register_type(&algif_type_hash);
+}
+
+static void __exit algif_hash_exit(void)
+{
+ int err = af_alg_unregister_type(&algif_type_hash);
+ BUG_ON(err);
+}
+
+module_init(algif_hash_init);
+module_exit(algif_hash_exit);
+MODULE_LICENSE("GPL");
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
new file mode 100644
index 000000000000..6a6dfc062d2a
--- /dev/null
+++ b/crypto/algif_skcipher.c
@@ -0,0 +1,632 @@
+/*
+ * algif_skcipher: User-space interface for skcipher algorithms
+ *
+ * This file provides the user-space API for symmetric key ciphers.
+ *
+ * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
+#include <crypto/if_alg.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <net/sock.h>
+
+struct skcipher_sg_list {
+ struct list_head list;
+
+ int cur;
+
+ struct scatterlist sg[0];
+};
+
+struct skcipher_ctx {
+ struct list_head tsgl;
+ struct af_alg_sgl rsgl;
+
+ void *iv;
+
+ struct af_alg_completion completion;
+
+ unsigned used;
+
+ unsigned int len;
+ bool more;
+ bool merge;
+ bool enc;
+
+ struct ablkcipher_request req;
+};
+
+#define MAX_SGL_ENTS ((PAGE_SIZE - sizeof(struct skcipher_sg_list)) / \
+ sizeof(struct scatterlist) - 1)
+
+static inline int skcipher_sndbuf(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct skcipher_ctx *ctx = ask->private;
+
+ return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
+ ctx->used, 0);
+}
+
+static inline bool skcipher_writable(struct sock *sk)
+{
+ return PAGE_SIZE <= skcipher_sndbuf(sk);
+}
+
+static int skcipher_alloc_sgl(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct skcipher_ctx *ctx = ask->private;
+ struct skcipher_sg_list *sgl;
+ struct scatterlist *sg = NULL;
+
+ sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+ if (!list_empty(&ctx->tsgl))
+ sg = sgl->sg;
+
+ if (!sg || sgl->cur >= MAX_SGL_ENTS) {
+ sgl = sock_kmalloc(sk, sizeof(*sgl) +
+ sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
+ GFP_KERNEL);
+ if (!sgl)
+ return -ENOMEM;
+
+ sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
+ sgl->cur = 0;
+
+ if (sg)
+ scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
+
+ list_add_tail(&sgl->list, &ctx->tsgl);
+ }
+
+ return 0;
+}
+
+static void skcipher_pull_sgl(struct sock *sk, int used)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct skcipher_ctx *ctx = ask->private;
+ struct skcipher_sg_list *sgl;
+ struct scatterlist *sg;
+ int i;
+
+ while (!list_empty(&ctx->tsgl)) {
+ sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list,
+ list);
+ sg = sgl->sg;
+
+ for (i = 0; i < sgl->cur; i++) {
+ int plen = min_t(int, used, sg[i].length);
+
+ if (!sg_page(sg + i))
+ continue;
+
+ sg[i].length -= plen;
+ sg[i].offset += plen;
+
+ used -= plen;
+ ctx->used -= plen;
+
+ if (sg[i].length)
+ return;
+
+ put_page(sg_page(sg + i));
+ sg_assign_page(sg + i, NULL);
+ }
+
+ list_del(&sgl->list);
+ sock_kfree_s(sk, sgl,
+ sizeof(*sgl) + sizeof(sgl->sg[0]) *
+ (MAX_SGL_ENTS + 1));
+ }
+
+ if (!ctx->used)
+ ctx->merge = 0;
+}
+
+static void skcipher_free_sgl(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct skcipher_ctx *ctx = ask->private;
+
+ skcipher_pull_sgl(sk, ctx->used);
+}
+
+static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
+{
+ long timeout;
+ DEFINE_WAIT(wait);
+ int err = -ERESTARTSYS;
+
+ if (flags & MSG_DONTWAIT)
+ return -EAGAIN;
+
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+
+ for (;;) {
+ if (signal_pending(current))
+ break;
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) {
+ err = 0;
+ break;
+ }
+ }
+ finish_wait(sk_sleep(sk), &wait);
+
+ return err;
+}
+
+static void skcipher_wmem_wakeup(struct sock *sk)
+{
+ struct socket_wq *wq;
+
+ if (!skcipher_writable(sk))
+ return;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
+ POLLRDNORM |
+ POLLRDBAND);
+ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ rcu_read_unlock();
+}
+
+static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct skcipher_ctx *ctx = ask->private;
+ long timeout;
+ DEFINE_WAIT(wait);
+ int err = -ERESTARTSYS;
+
+ if (flags & MSG_DONTWAIT) {
+ return -EAGAIN;
+ }
+
+ set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+
+ for (;;) {
+ if (signal_pending(current))
+ break;
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ if (sk_wait_event(sk, &timeout, ctx->used)) {
+ err = 0;
+ break;
+ }
+ }
+ finish_wait(sk_sleep(sk), &wait);
+
+ clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+
+ return err;
+}
+
+static void skcipher_data_wakeup(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct skcipher_ctx *ctx = ask->private;
+ struct socket_wq *wq;
+
+ if (!ctx->used)
+ return;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
+ POLLRDNORM |
+ POLLRDBAND);
+ sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ rcu_read_unlock();
+}
+
+static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
+ struct msghdr *msg, size_t size)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct skcipher_ctx *ctx = ask->private;
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
+ unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
+ struct skcipher_sg_list *sgl;
+ struct af_alg_control con = {};
+ long copied = 0;
+ bool enc = 0;
+ int err;
+ int i;
+
+ if (msg->msg_controllen) {
+ err = af_alg_cmsg_send(msg, &con);
+ if (err)
+ return err;
+
+ switch (con.op) {
+ case ALG_OP_ENCRYPT:
+ enc = 1;
+ break;
+ case ALG_OP_DECRYPT:
+ enc = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (con.iv && con.iv->ivlen != ivsize)
+ return -EINVAL;
+ }
+
+ err = -EINVAL;
+
+ lock_sock(sk);
+ if (!ctx->more && ctx->used)
+ goto unlock;
+
+ if (!ctx->used) {
+ ctx->enc = enc;
+ if (con.iv)
+ memcpy(ctx->iv, con.iv->iv, ivsize);
+ }
+
+ while (size) {
+ struct scatterlist *sg;
+ unsigned long len = size;
+ int plen;
+
+ if (ctx->merge) {
+ sgl = list_entry(ctx->tsgl.prev,
+ struct skcipher_sg_list, list);
+ sg = sgl->sg + sgl->cur - 1;
+ len = min_t(unsigned long, len,
+ PAGE_SIZE - sg->offset - sg->length);
+
+ err = memcpy_fromiovec(page_address(sg_page(sg)) +
+ sg->offset + sg->length,
+ msg->msg_iov, len);
+ if (err)
+ goto unlock;
+
+ sg->length += len;
+ ctx->merge = (sg->offset + sg->length) &
+ (PAGE_SIZE - 1);
+
+ ctx->used += len;
+ copied += len;
+ size -= len;
+ continue;
+ }
+
+ if (!skcipher_writable(sk)) {
+ err = skcipher_wait_for_wmem(sk, msg->msg_flags);
+ if (err)
+ goto unlock;
+ }
+
+ len = min_t(unsigned long, len, skcipher_sndbuf(sk));
+
+ err = skcipher_alloc_sgl(sk);
+ if (err)
+ goto unlock;
+
+ sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+ sg = sgl->sg;
+ do {
+ i = sgl->cur;
+ plen = min_t(int, len, PAGE_SIZE);
+
+ sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
+ err = -ENOMEM;
+ if (!sg_page(sg + i))
+ goto unlock;
+
+ err = memcpy_fromiovec(page_address(sg_page(sg + i)),
+ msg->msg_iov, plen);
+ if (err) {
+ __free_page(sg_page(sg + i));
+ sg_assign_page(sg + i, NULL);
+ goto unlock;
+ }
+
+ sg[i].length = plen;
+ len -= plen;
+ ctx->used += plen;
+ copied += plen;
+ size -= plen;
+ sgl->cur++;
+ } while (len && sgl->cur < MAX_SGL_ENTS);
+
+ ctx->merge = plen & (PAGE_SIZE - 1);
+ }
+
+ err = 0;
+
+ ctx->more = msg->msg_flags & MSG_MORE;
+ if (!ctx->more && !list_empty(&ctx->tsgl))
+ sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+
+unlock:
+ skcipher_data_wakeup(sk);
+ release_sock(sk);
+
+ return copied ?: err;
+}
+
+static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
+ int offset, size_t size, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct skcipher_ctx *ctx = ask->private;
+ struct skcipher_sg_list *sgl;
+ int err = -EINVAL;
+
+ lock_sock(sk);
+ if (!ctx->more && ctx->used)
+ goto unlock;
+
+ if (!size)
+ goto done;
+
+ if (!skcipher_writable(sk)) {
+ err = skcipher_wait_for_wmem(sk, flags);
+ if (err)
+ goto unlock;
+ }
+
+ err = skcipher_alloc_sgl(sk);
+ if (err)
+ goto unlock;
+
+ ctx->merge = 0;
+ sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+
+ get_page(page);
+ sg_set_page(sgl->sg + sgl->cur, page, size, offset);
+ sgl->cur++;
+ ctx->used += size;
+
+done:
+ ctx->more = flags & MSG_MORE;
+ if (!ctx->more && !list_empty(&ctx->tsgl))
+ sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+
+unlock:
+ skcipher_data_wakeup(sk);
+ release_sock(sk);
+
+ return err ?: size;
+}
+
+static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
+ struct msghdr *msg, size_t ignored, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct skcipher_ctx *ctx = ask->private;
+ unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(
+ &ctx->req));
+ struct skcipher_sg_list *sgl;
+ struct scatterlist *sg;
+ unsigned long iovlen;
+ struct iovec *iov;
+ int err = -EAGAIN;
+ int used;
+ long copied = 0;
+
+ lock_sock(sk);
+ for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
+ iovlen--, iov++) {
+ unsigned long seglen = iov->iov_len;
+ char __user *from = iov->iov_base;
+
+ while (seglen) {
+ sgl = list_first_entry(&ctx->tsgl,
+ struct skcipher_sg_list, list);
+ sg = sgl->sg;
+
+ while (!sg->length)
+ sg++;
+
+ used = ctx->used;
+ if (!used) {
+ err = skcipher_wait_for_data(sk, flags);
+ if (err)
+ goto unlock;
+ }
+
+ used = min_t(unsigned long, used, seglen);
+
+ used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
+ err = used;
+ if (err < 0)
+ goto unlock;
+
+ if (ctx->more || used < ctx->used)
+ used -= used % bs;
+
+ err = -EINVAL;
+ if (!used)
+ goto free;
+
+ ablkcipher_request_set_crypt(&ctx->req, sg,
+ ctx->rsgl.sg, used,
+ ctx->iv);
+
+ err = af_alg_wait_for_completion(
+ ctx->enc ?
+ crypto_ablkcipher_encrypt(&ctx->req) :
+ crypto_ablkcipher_decrypt(&ctx->req),
+ &ctx->completion);
+
+free:
+ af_alg_free_sg(&ctx->rsgl);
+
+ if (err)
+ goto unlock;
+
+ copied += used;
+ from += used;
+ seglen -= used;
+ skcipher_pull_sgl(sk, used);
+ }
+ }
+
+ err = 0;
+
+unlock:
+ skcipher_wmem_wakeup(sk);
+ release_sock(sk);
+
+ return copied ?: err;
+}
+
+
+static unsigned int skcipher_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct skcipher_ctx *ctx = ask->private;
+ unsigned int mask;
+
+ sock_poll_wait(file, sk_sleep(sk), wait);
+ mask = 0;
+
+ if (ctx->used)
+ mask |= POLLIN | POLLRDNORM;
+
+ if (skcipher_writable(sk))
+ mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+
+ return mask;
+}
+
+static struct proto_ops algif_skcipher_ops = {
+ .family = PF_ALG,
+
+ .connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .getname = sock_no_getname,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .getsockopt = sock_no_getsockopt,
+ .mmap = sock_no_mmap,
+ .bind = sock_no_bind,
+ .accept = sock_no_accept,
+ .setsockopt = sock_no_setsockopt,
+
+ .release = af_alg_release,
+ .sendmsg = skcipher_sendmsg,
+ .sendpage = skcipher_sendpage,
+ .recvmsg = skcipher_recvmsg,
+ .poll = skcipher_poll,
+};
+
+static void *skcipher_bind(const char *name, u32 type, u32 mask)
+{
+ return crypto_alloc_ablkcipher(name, type, mask);
+}
+
+static void skcipher_release(void *private)
+{
+ crypto_free_ablkcipher(private);
+}
+
+static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
+{
+ return crypto_ablkcipher_setkey(private, key, keylen);
+}
+
+static void skcipher_sock_destruct(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct skcipher_ctx *ctx = ask->private;
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
+
+ skcipher_free_sgl(sk);
+ sock_kfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
+ sock_kfree_s(sk, ctx, ctx->len);
+ af_alg_release_parent(sk);
+}
+
+static int skcipher_accept_parent(void *private, struct sock *sk)
+{
+ struct skcipher_ctx *ctx;
+ struct alg_sock *ask = alg_sk(sk);
+ unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private);
+
+ ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private),
+ GFP_KERNEL);
+ if (!ctx->iv) {
+ sock_kfree_s(sk, ctx, len);
+ return -ENOMEM;
+ }
+
+ memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private));
+
+ INIT_LIST_HEAD(&ctx->tsgl);
+ ctx->len = len;
+ ctx->used = 0;
+ ctx->more = 0;
+ ctx->merge = 0;
+ ctx->enc = 0;
+ af_alg_init_completion(&ctx->completion);
+
+ ask->private = ctx;
+
+ ablkcipher_request_set_tfm(&ctx->req, private);
+ ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ af_alg_complete, &ctx->completion);
+
+ sk->sk_destruct = skcipher_sock_destruct;
+
+ return 0;
+}
+
+static const struct af_alg_type algif_type_skcipher = {
+ .bind = skcipher_bind,
+ .release = skcipher_release,
+ .setkey = skcipher_setkey,
+ .accept = skcipher_accept_parent,
+ .ops = &algif_skcipher_ops,
+ .name = "skcipher",
+ .owner = THIS_MODULE
+};
+
+static int __init algif_skcipher_init(void)
+{
+ return af_alg_register_type(&algif_type_skcipher);
+}
+
+static void __exit algif_skcipher_exit(void)
+{
+ int err = af_alg_unregister_type(&algif_type_skcipher);
+ BUG_ON(err);
+}
+
+module_init(algif_skcipher_init);
+module_exit(algif_skcipher_exit);
+MODULE_LICENSE("GPL");
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index 2bc332142849..ffa0245e2abc 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -83,7 +83,7 @@ static void xor_vectors(unsigned char *in1, unsigned char *in2,
}
/*
* Returns DEFAULT_BLK_SZ bytes of random data per call
- * returns 0 if generation succeded, <0 if something went wrong
+ * returns 0 if generation succeeded, <0 if something went wrong
*/
static int _get_more_prng_bytes(struct prng_context *ctx, int cont_test)
{
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 079ae8ca590b..bc28337fded2 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -94,7 +94,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
if (unlikely(!tx))
async_tx_quiesce(&submit->depend_tx);
- /* spin wait for the preceeding transactions to complete */
+ /* spin wait for the preceding transactions to complete */
while (unlikely(!tx)) {
dma_async_issue_pending(chan);
tx = dma->device_prep_dma_xor(chan, dma_dest,
diff --git a/crypto/authenc.c b/crypto/authenc.c
index a5a22cfcd07b..5ef7ba6b6a76 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -107,20 +107,6 @@ badkey:
goto out;
}
-static void authenc_chain(struct scatterlist *head, struct scatterlist *sg,
- int chain)
-{
- if (chain) {
- head->length += sg->length;
- sg = scatterwalk_sg_next(sg);
- }
-
- if (sg)
- scatterwalk_sg_chain(head, 2, sg);
- else
- sg_mark_end(head);
-}
-
static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq,
int err)
{
@@ -345,7 +331,7 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
if (ivsize) {
sg_init_table(cipher, 2);
sg_set_buf(cipher, iv, ivsize);
- authenc_chain(cipher, dst, vdst == iv + ivsize);
+ scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2);
dst = cipher;
cryptlen += ivsize;
}
@@ -354,7 +340,7 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
authenc_ahash_fn = crypto_authenc_ahash;
sg_init_table(asg, 2);
sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
- authenc_chain(asg, dst, 0);
+ scatterwalk_crypto_chain(asg, dst, 0, 2);
dst = asg;
cryptlen += req->assoclen;
}
@@ -499,7 +485,7 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
if (ivsize) {
sg_init_table(cipher, 2);
sg_set_buf(cipher, iv, ivsize);
- authenc_chain(cipher, src, vsrc == iv + ivsize);
+ scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2);
src = cipher;
cryptlen += ivsize;
}
@@ -508,7 +494,7 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
authenc_ahash_fn = crypto_authenc_ahash;
sg_init_table(asg, 2);
sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
- authenc_chain(asg, src, 0);
+ scatterwalk_crypto_chain(asg, src, 0, 2);
src = asg;
cryptlen += req->assoclen;
}
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
new file mode 100644
index 000000000000..136b68b9d8d4
--- /dev/null
+++ b/crypto/authencesn.c
@@ -0,0 +1,835 @@
+/*
+ * authencesn.c - AEAD wrapper for IPsec with extended sequence numbers,
+ * derived from authenc.c
+ *
+ * Copyright (C) 2010 secunet Security Networks AG
+ * Copyright (C) 2010 Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+struct authenc_esn_instance_ctx {
+ struct crypto_ahash_spawn auth;
+ struct crypto_skcipher_spawn enc;
+};
+
+struct crypto_authenc_esn_ctx {
+ unsigned int reqoff;
+ struct crypto_ahash *auth;
+ struct crypto_ablkcipher *enc;
+};
+
+struct authenc_esn_request_ctx {
+ unsigned int cryptlen;
+ unsigned int headlen;
+ unsigned int trailen;
+ struct scatterlist *sg;
+ struct scatterlist hsg[2];
+ struct scatterlist tsg[1];
+ struct scatterlist cipher[2];
+ crypto_completion_t complete;
+ crypto_completion_t update_complete;
+ crypto_completion_t update_complete2;
+ char tail[];
+};
+
+static void authenc_esn_request_complete(struct aead_request *req, int err)
+{
+ if (err != -EINPROGRESS)
+ aead_request_complete(req, err);
+}
+
+static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key,
+ unsigned int keylen)
+{
+ unsigned int authkeylen;
+ unsigned int enckeylen;
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct crypto_ahash *auth = ctx->auth;
+ struct crypto_ablkcipher *enc = ctx->enc;
+ struct rtattr *rta = (void *)key;
+ struct crypto_authenc_key_param *param;
+ int err = -EINVAL;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+
+ if (keylen < enckeylen)
+ goto badkey;
+
+ authkeylen = keylen - enckeylen;
+
+ crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
+ crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_ahash_setkey(auth, key, authkeylen);
+ crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) &
+ CRYPTO_TFM_RES_MASK);
+
+ if (err)
+ goto out;
+
+ crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
+ crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen);
+ crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) &
+ CRYPTO_TFM_RES_MASK);
+
+out:
+ return err;
+
+badkey:
+ crypto_aead_set_flags(authenc_esn, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+}
+
+static void authenc_esn_geniv_ahash_update_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+
+ if (err)
+ goto out;
+
+ ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
+ areq_ctx->cryptlen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq_ctx->update_complete2, req);
+
+ err = crypto_ahash_update(ahreq);
+ if (err)
+ goto out;
+
+ ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
+ areq_ctx->trailen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq_ctx->complete, req);
+
+ err = crypto_ahash_finup(ahreq);
+ if (err)
+ goto out;
+
+ scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
+ areq_ctx->cryptlen,
+ crypto_aead_authsize(authenc_esn), 1);
+
+out:
+ authenc_esn_request_complete(req, err);
+}
+
+static void authenc_esn_geniv_ahash_update_done2(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+
+ if (err)
+ goto out;
+
+ ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
+ areq_ctx->trailen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq_ctx->complete, req);
+
+ err = crypto_ahash_finup(ahreq);
+ if (err)
+ goto out;
+
+ scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
+ areq_ctx->cryptlen,
+ crypto_aead_authsize(authenc_esn), 1);
+
+out:
+ authenc_esn_request_complete(req, err);
+}
+
+
+static void authenc_esn_geniv_ahash_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+
+ if (err)
+ goto out;
+
+ scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
+ areq_ctx->cryptlen,
+ crypto_aead_authsize(authenc_esn), 1);
+
+out:
+ aead_request_complete(req, err);
+}
+
+
+static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *areq,
+ int err)
+{
+ u8 *ihash;
+ unsigned int authsize;
+ struct ablkcipher_request *abreq;
+ struct aead_request *req = areq->data;
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+ unsigned int cryptlen = req->cryptlen;
+
+ if (err)
+ goto out;
+
+ ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
+ areq_ctx->cryptlen);
+
+ ahash_request_set_callback(ahreq,
+ aead_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq_ctx->update_complete2, req);
+
+ err = crypto_ahash_update(ahreq);
+ if (err)
+ goto out;
+
+ ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
+ areq_ctx->trailen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq_ctx->complete, req);
+
+ err = crypto_ahash_finup(ahreq);
+ if (err)
+ goto out;
+
+ authsize = crypto_aead_authsize(authenc_esn);
+ cryptlen -= authsize;
+ ihash = ahreq->result + authsize;
+ scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
+ authsize, 0);
+
+ err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ if (err)
+ goto out;
+
+ abreq = aead_request_ctx(req);
+ ablkcipher_request_set_tfm(abreq, ctx->enc);
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ ablkcipher_request_set_crypt(abreq, req->src, req->dst,
+ cryptlen, req->iv);
+
+ err = crypto_ablkcipher_decrypt(abreq);
+
+out:
+ authenc_esn_request_complete(req, err);
+}
+
+static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *areq,
+ int err)
+{
+ u8 *ihash;
+ unsigned int authsize;
+ struct ablkcipher_request *abreq;
+ struct aead_request *req = areq->data;
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+ unsigned int cryptlen = req->cryptlen;
+
+ if (err)
+ goto out;
+
+ ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
+ areq_ctx->trailen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq_ctx->complete, req);
+
+ err = crypto_ahash_finup(ahreq);
+ if (err)
+ goto out;
+
+ authsize = crypto_aead_authsize(authenc_esn);
+ cryptlen -= authsize;
+ ihash = ahreq->result + authsize;
+ scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
+ authsize, 0);
+
+ err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ if (err)
+ goto out;
+
+ abreq = aead_request_ctx(req);
+ ablkcipher_request_set_tfm(abreq, ctx->enc);
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ ablkcipher_request_set_crypt(abreq, req->src, req->dst,
+ cryptlen, req->iv);
+
+ err = crypto_ablkcipher_decrypt(abreq);
+
+out:
+ authenc_esn_request_complete(req, err);
+}
+
+
+static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
+ int err)
+{
+ u8 *ihash;
+ unsigned int authsize;
+ struct ablkcipher_request *abreq;
+ struct aead_request *req = areq->data;
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+ unsigned int cryptlen = req->cryptlen;
+
+ if (err)
+ goto out;
+
+ authsize = crypto_aead_authsize(authenc_esn);
+ cryptlen -= authsize;
+ ihash = ahreq->result + authsize;
+ scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
+ authsize, 0);
+
+ err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ if (err)
+ goto out;
+
+ abreq = aead_request_ctx(req);
+ ablkcipher_request_set_tfm(abreq, ctx->enc);
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ ablkcipher_request_set_crypt(abreq, req->src, req->dst,
+ cryptlen, req->iv);
+
+ err = crypto_ablkcipher_decrypt(abreq);
+
+out:
+ authenc_esn_request_complete(req, err);
+}
+
+static u8 *crypto_authenc_esn_ahash(struct aead_request *req,
+ unsigned int flags)
+{
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct crypto_ahash *auth = ctx->auth;
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+ u8 *hash = areq_ctx->tail;
+ int err;
+
+ hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
+ crypto_ahash_alignmask(auth) + 1);
+
+ ahash_request_set_tfm(ahreq, auth);
+
+ err = crypto_ahash_init(ahreq);
+ if (err)
+ return ERR_PTR(err);
+
+ ahash_request_set_crypt(ahreq, areq_ctx->hsg, hash, areq_ctx->headlen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
+ areq_ctx->update_complete, req);
+
+ err = crypto_ahash_update(ahreq);
+ if (err)
+ return ERR_PTR(err);
+
+ ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, areq_ctx->cryptlen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
+ areq_ctx->update_complete2, req);
+
+ err = crypto_ahash_update(ahreq);
+ if (err)
+ return ERR_PTR(err);
+
+ ahash_request_set_crypt(ahreq, areq_ctx->tsg, hash,
+ areq_ctx->trailen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
+ areq_ctx->complete, req);
+
+ err = crypto_ahash_finup(ahreq);
+ if (err)
+ return ERR_PTR(err);
+
+ return hash;
+}
+
+static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
+ unsigned int flags)
+{
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct scatterlist *dst = req->dst;
+ struct scatterlist *assoc = req->assoc;
+ struct scatterlist *cipher = areq_ctx->cipher;
+ struct scatterlist *hsg = areq_ctx->hsg;
+ struct scatterlist *tsg = areq_ctx->tsg;
+ struct scatterlist *assoc1;
+ struct scatterlist *assoc2;
+ unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
+ unsigned int cryptlen = req->cryptlen;
+ struct page *dstp;
+ u8 *vdst;
+ u8 *hash;
+
+ dstp = sg_page(dst);
+ vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
+
+ if (ivsize) {
+ sg_init_table(cipher, 2);
+ sg_set_buf(cipher, iv, ivsize);
+ scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2);
+ dst = cipher;
+ cryptlen += ivsize;
+ }
+
+ if (sg_is_last(assoc))
+ return -EINVAL;
+
+ assoc1 = assoc + 1;
+ if (sg_is_last(assoc1))
+ return -EINVAL;
+
+ assoc2 = assoc + 2;
+ if (!sg_is_last(assoc2))
+ return -EINVAL;
+
+ sg_init_table(hsg, 2);
+ sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
+ sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
+
+ sg_init_table(tsg, 1);
+ sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
+
+ areq_ctx->cryptlen = cryptlen;
+ areq_ctx->headlen = assoc->length + assoc2->length;
+ areq_ctx->trailen = assoc1->length;
+ areq_ctx->sg = dst;
+
+ areq_ctx->complete = authenc_esn_geniv_ahash_done;
+ areq_ctx->update_complete = authenc_esn_geniv_ahash_update_done;
+ areq_ctx->update_complete2 = authenc_esn_geniv_ahash_update_done2;
+
+ hash = crypto_authenc_esn_ahash(req, flags);
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
+
+ scatterwalk_map_and_copy(hash, dst, cryptlen,
+ crypto_aead_authsize(authenc_esn), 1);
+ return 0;
+}
+
+
+static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req,
+ int err)
+{
+ struct aead_request *areq = req->data;
+
+ if (!err) {
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(areq);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct ablkcipher_request *abreq = aead_request_ctx(areq);
+ u8 *iv = (u8 *)(abreq + 1) +
+ crypto_ablkcipher_reqsize(ctx->enc);
+
+ err = crypto_authenc_esn_genicv(areq, iv, 0);
+ }
+
+ authenc_esn_request_complete(areq, err);
+}
+
+static int crypto_authenc_esn_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct crypto_ablkcipher *enc = ctx->enc;
+ struct scatterlist *dst = req->dst;
+ unsigned int cryptlen = req->cryptlen;
+ struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
+ + ctx->reqoff);
+ u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc);
+ int err;
+
+ ablkcipher_request_set_tfm(abreq, enc);
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ crypto_authenc_esn_encrypt_done, req);
+ ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv);
+
+ memcpy(iv, req->iv, crypto_aead_ivsize(authenc_esn));
+
+ err = crypto_ablkcipher_encrypt(abreq);
+ if (err)
+ return err;
+
+ return crypto_authenc_esn_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
+}
+
+static void crypto_authenc_esn_givencrypt_done(struct crypto_async_request *req,
+ int err)
+{
+ struct aead_request *areq = req->data;
+
+ if (!err) {
+ struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
+
+ err = crypto_authenc_esn_genicv(areq, greq->giv, 0);
+ }
+
+ authenc_esn_request_complete(areq, err);
+}
+
+static int crypto_authenc_esn_givencrypt(struct aead_givcrypt_request *req)
+{
+ struct crypto_aead *authenc_esn = aead_givcrypt_reqtfm(req);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct aead_request *areq = &req->areq;
+ struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
+ u8 *iv = req->giv;
+ int err;
+
+ skcipher_givcrypt_set_tfm(greq, ctx->enc);
+ skcipher_givcrypt_set_callback(greq, aead_request_flags(areq),
+ crypto_authenc_esn_givencrypt_done, areq);
+ skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen,
+ areq->iv);
+ skcipher_givcrypt_set_giv(greq, iv, req->seq);
+
+ err = crypto_skcipher_givencrypt(greq);
+ if (err)
+ return err;
+
+ return crypto_authenc_esn_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
+}
+
+static int crypto_authenc_esn_verify(struct aead_request *req)
+{
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ u8 *ohash;
+ u8 *ihash;
+ unsigned int authsize;
+
+ areq_ctx->complete = authenc_esn_verify_ahash_done;
+ areq_ctx->update_complete = authenc_esn_verify_ahash_update_done;
+
+ ohash = crypto_authenc_esn_ahash(req, CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (IS_ERR(ohash))
+ return PTR_ERR(ohash);
+
+ authsize = crypto_aead_authsize(authenc_esn);
+ ihash = ohash + authsize;
+ scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
+ authsize, 0);
+ return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0;
+}
+
+static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
+ unsigned int cryptlen)
+{
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct scatterlist *src = req->src;
+ struct scatterlist *assoc = req->assoc;
+ struct scatterlist *cipher = areq_ctx->cipher;
+ struct scatterlist *hsg = areq_ctx->hsg;
+ struct scatterlist *tsg = areq_ctx->tsg;
+ struct scatterlist *assoc1;
+ struct scatterlist *assoc2;
+ unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
+ struct page *srcp;
+ u8 *vsrc;
+
+ srcp = sg_page(src);
+ vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset;
+
+ if (ivsize) {
+ sg_init_table(cipher, 2);
+ sg_set_buf(cipher, iv, ivsize);
+ scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2);
+ src = cipher;
+ cryptlen += ivsize;
+ }
+
+ if (sg_is_last(assoc))
+ return -EINVAL;
+
+ assoc1 = assoc + 1;
+ if (sg_is_last(assoc1))
+ return -EINVAL;
+
+ assoc2 = assoc + 2;
+ if (!sg_is_last(assoc2))
+ return -EINVAL;
+
+ sg_init_table(hsg, 2);
+ sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
+ sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
+
+ sg_init_table(tsg, 1);
+ sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
+
+ areq_ctx->cryptlen = cryptlen;
+ areq_ctx->headlen = assoc->length + assoc2->length;
+ areq_ctx->trailen = assoc1->length;
+ areq_ctx->sg = src;
+
+ areq_ctx->complete = authenc_esn_verify_ahash_done;
+ areq_ctx->update_complete = authenc_esn_verify_ahash_update_done;
+ areq_ctx->update_complete2 = authenc_esn_verify_ahash_update_done2;
+
+ return crypto_authenc_esn_verify(req);
+}
+
+static int crypto_authenc_esn_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+ struct ablkcipher_request *abreq = aead_request_ctx(req);
+ unsigned int cryptlen = req->cryptlen;
+ unsigned int authsize = crypto_aead_authsize(authenc_esn);
+ u8 *iv = req->iv;
+ int err;
+
+ if (cryptlen < authsize)
+ return -EINVAL;
+ cryptlen -= authsize;
+
+ err = crypto_authenc_esn_iverify(req, iv, cryptlen);
+ if (err)
+ return err;
+
+ ablkcipher_request_set_tfm(abreq, ctx->enc);
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv);
+
+ return crypto_ablkcipher_decrypt(abreq);
+}
+
+static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
+ struct authenc_esn_instance_ctx *ictx = crypto_instance_ctx(inst);
+ struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_ahash *auth;
+ struct crypto_ablkcipher *enc;
+ int err;
+
+ auth = crypto_spawn_ahash(&ictx->auth);
+ if (IS_ERR(auth))
+ return PTR_ERR(auth);
+
+ enc = crypto_spawn_skcipher(&ictx->enc);
+ err = PTR_ERR(enc);
+ if (IS_ERR(enc))
+ goto err_free_ahash;
+
+ ctx->auth = auth;
+ ctx->enc = enc;
+
+ ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
+ crypto_ahash_alignmask(auth),
+ crypto_ahash_alignmask(auth) + 1) +
+ crypto_ablkcipher_ivsize(enc);
+
+ tfm->crt_aead.reqsize = sizeof(struct authenc_esn_request_ctx) +
+ ctx->reqoff +
+ max_t(unsigned int,
+ crypto_ahash_reqsize(auth) +
+ sizeof(struct ahash_request),
+ sizeof(struct skcipher_givcrypt_request) +
+ crypto_ablkcipher_reqsize(enc));
+
+ return 0;
+
+err_free_ahash:
+ crypto_free_ahash(auth);
+ return err;
+}
+
+static void crypto_authenc_esn_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_ahash(ctx->auth);
+ crypto_free_ablkcipher(ctx->enc);
+}
+
+static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb)
+{
+ struct crypto_attr_type *algt;
+ struct crypto_instance *inst;
+ struct hash_alg_common *auth;
+ struct crypto_alg *auth_base;
+ struct crypto_alg *enc;
+ struct authenc_esn_instance_ctx *ctx;
+ const char *enc_name;
+ int err;
+
+ algt = crypto_get_attr_type(tb);
+ err = PTR_ERR(algt);
+ if (IS_ERR(algt))
+ return ERR_PTR(err);
+
+ if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
+ return ERR_PTR(-EINVAL);
+
+ auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
+ CRYPTO_ALG_TYPE_AHASH_MASK);
+ if (IS_ERR(auth))
+ return ERR_CAST(auth);
+
+ auth_base = &auth->base;
+
+ enc_name = crypto_attr_alg_name(tb[2]);
+ err = PTR_ERR(enc_name);
+ if (IS_ERR(enc_name))
+ goto out_put_auth;
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!inst)
+ goto out_put_auth;
+
+ ctx = crypto_instance_ctx(inst);
+
+ err = crypto_init_ahash_spawn(&ctx->auth, auth, inst);
+ if (err)
+ goto err_free_inst;
+
+ crypto_set_skcipher_spawn(&ctx->enc, inst);
+ err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
+ if (err)
+ goto err_drop_auth;
+
+ enc = crypto_skcipher_spawn_alg(&ctx->enc);
+
+ err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
+ "authencesn(%s,%s)", auth_base->cra_name, enc->cra_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto err_drop_enc;
+
+ if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "authencesn(%s,%s)", auth_base->cra_driver_name,
+ enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ goto err_drop_enc;
+
+ inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
+ inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
+ inst->alg.cra_priority = enc->cra_priority *
+ 10 + auth_base->cra_priority;
+ inst->alg.cra_blocksize = enc->cra_blocksize;
+ inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask;
+ inst->alg.cra_type = &crypto_aead_type;
+
+ inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
+ inst->alg.cra_aead.maxauthsize = auth->digestsize;
+
+ inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx);
+
+ inst->alg.cra_init = crypto_authenc_esn_init_tfm;
+ inst->alg.cra_exit = crypto_authenc_esn_exit_tfm;
+
+ inst->alg.cra_aead.setkey = crypto_authenc_esn_setkey;
+ inst->alg.cra_aead.encrypt = crypto_authenc_esn_encrypt;
+ inst->alg.cra_aead.decrypt = crypto_authenc_esn_decrypt;
+ inst->alg.cra_aead.givencrypt = crypto_authenc_esn_givencrypt;
+
+out:
+ crypto_mod_put(auth_base);
+ return inst;
+
+err_drop_enc:
+ crypto_drop_skcipher(&ctx->enc);
+err_drop_auth:
+ crypto_drop_ahash(&ctx->auth);
+err_free_inst:
+ kfree(inst);
+out_put_auth:
+ inst = ERR_PTR(err);
+ goto out;
+}
+
+static void crypto_authenc_esn_free(struct crypto_instance *inst)
+{
+ struct authenc_esn_instance_ctx *ctx = crypto_instance_ctx(inst);
+
+ crypto_drop_skcipher(&ctx->enc);
+ crypto_drop_ahash(&ctx->auth);
+ kfree(inst);
+}
+
+static struct crypto_template crypto_authenc_esn_tmpl = {
+ .name = "authencesn",
+ .alloc = crypto_authenc_esn_alloc,
+ .free = crypto_authenc_esn_free,
+ .module = THIS_MODULE,
+};
+
+static int __init crypto_authenc_esn_module_init(void)
+{
+ return crypto_register_template(&crypto_authenc_esn_tmpl);
+}
+
+static void __exit crypto_authenc_esn_module_exit(void)
+{
+ crypto_unregister_template(&crypto_authenc_esn_tmpl);
+}
+
+module_init(crypto_authenc_esn_module_init);
+module_exit(crypto_authenc_esn_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
+MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers");
diff --git a/crypto/cast5.c b/crypto/cast5.c
index a1d2294b50ad..4a230ddec877 100644
--- a/crypto/cast5.c
+++ b/crypto/cast5.c
@@ -604,36 +604,23 @@ static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
* Rounds 3, 6, 9, 12, and 15 use f function Type 3.
*/
+ t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
+ t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
+ t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
+ t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
+ t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
+ t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
+ t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
+ t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
+ t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
+ t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
+ t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
+ t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
if (!(c->rr)) {
- t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
- t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
- t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
- t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
- t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
- t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
- t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
- t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
- t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
- t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
- t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
- t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]);
t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]);
t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]);
t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]);
- } else {
- t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
- t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
- t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
- t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
- t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
- t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
- t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
- t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
- t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
- t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
- t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
- t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
}
/* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and
@@ -663,32 +650,19 @@ static void cast5_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]);
t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]);
t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]);
- t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
- t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
- t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
- t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
- t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
- t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
- t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
- t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
- t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
- t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
- t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
- t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
- } else {
- t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
- t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
- t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
- t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
- t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
- t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
- t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
- t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
- t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
- t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
- t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
- t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
}
+ t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
+ t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
+ t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
+ t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
+ t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
+ t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
+ t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
+ t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
+ t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
+ t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
+ t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
+ t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
dst[0] = cpu_to_be32(r);
dst[1] = cpu_to_be32(l);
diff --git a/crypto/crypto_wq.c b/crypto/crypto_wq.c
index fdcf6248f152..b980ee1af459 100644
--- a/crypto/crypto_wq.c
+++ b/crypto/crypto_wq.c
@@ -20,7 +20,8 @@ EXPORT_SYMBOL_GPL(kcrypto_wq);
static int __init crypto_wq_init(void)
{
- kcrypto_wq = create_workqueue("crypto");
+ kcrypto_wq = alloc_workqueue("crypto",
+ WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
if (unlikely(!kcrypto_wq))
return -ENOMEM;
return 0;
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 463dc859aa05..b5ccae29be74 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -48,12 +48,12 @@ static int deflate_comp_init(struct deflate_ctx *ctx)
int ret = 0;
struct z_stream_s *stream = &ctx->comp_stream;
- stream->workspace = vmalloc(zlib_deflate_workspacesize());
+ stream->workspace = vzalloc(zlib_deflate_workspacesize(
+ -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL));
if (!stream->workspace) {
ret = -ENOMEM;
goto out;
}
- memset(stream->workspace, 0, zlib_deflate_workspacesize());
ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
-DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
Z_DEFAULT_STRATEGY);
diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
index 3ca3b669d5d5..42ce9f570aec 100644
--- a/crypto/eseqiv.c
+++ b/crypto/eseqiv.c
@@ -62,20 +62,6 @@ out:
skcipher_givcrypt_complete(req, err);
}
-static void eseqiv_chain(struct scatterlist *head, struct scatterlist *sg,
- int chain)
-{
- if (chain) {
- head->length += sg->length;
- sg = scatterwalk_sg_next(sg);
- }
-
- if (sg)
- scatterwalk_sg_chain(head, 2, sg);
- else
- sg_mark_end(head);
-}
-
static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
{
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
@@ -124,13 +110,13 @@ static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
sg_init_table(reqctx->src, 2);
sg_set_buf(reqctx->src, giv, ivsize);
- eseqiv_chain(reqctx->src, osrc, vsrc == giv + ivsize);
+ scatterwalk_crypto_chain(reqctx->src, osrc, vsrc == giv + ivsize, 2);
dst = reqctx->src;
if (osrc != odst) {
sg_init_table(reqctx->dst, 2);
sg_set_buf(reqctx->dst, giv, ivsize);
- eseqiv_chain(reqctx->dst, odst, vdst == giv + ivsize);
+ scatterwalk_crypto_chain(reqctx->dst, odst, vdst == giv + ivsize, 2);
dst = reqctx->dst;
}
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 2f5fbba6576c..1a252639ef91 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -1102,21 +1102,6 @@ static int crypto_rfc4543_setauthsize(struct crypto_aead *parent,
return crypto_aead_setauthsize(ctx->child, authsize);
}
-/* this is the same as crypto_authenc_chain */
-static void crypto_rfc4543_chain(struct scatterlist *head,
- struct scatterlist *sg, int chain)
-{
- if (chain) {
- head->length += sg->length;
- sg = scatterwalk_sg_next(sg);
- }
-
- if (sg)
- scatterwalk_sg_chain(head, 2, sg);
- else
- sg_mark_end(head);
-}
-
static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
int enc)
{
@@ -1154,13 +1139,13 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
sg_init_table(payload, 2);
sg_set_buf(payload, req->iv, 8);
- crypto_rfc4543_chain(payload, dst, vdst == req->iv + 8);
+ scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2);
assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);
sg_init_table(assoc, 2);
sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
req->assoc->offset);
- crypto_rfc4543_chain(assoc, payload, 0);
+ scatterwalk_crypto_chain(assoc, payload, 0, 2);
aead_request_set_tfm(subreq, ctx->child);
aead_request_set_callback(subreq, req->base.flags, req->base.complete,
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
index a90d260528d4..df35e4ccd07e 100644
--- a/crypto/gf128mul.c
+++ b/crypto/gf128mul.c
@@ -89,7 +89,7 @@
}
/* Given the value i in 0..255 as the byte overflow when a field element
- in GHASH is multipled by x^8, this function will return the values that
+ in GHASH is multiplied by x^8, this function will return the values that
are generated in the lo 16-bit word of the field value by applying the
modular polynomial. The values lo_byte and hi_byte are returned via the
macro xp_fun(lo_byte, hi_byte) so that the values can be assembled into
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 75586f1f86e7..29a89dad68b6 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -455,7 +455,8 @@ static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
get_online_cpus();
- pcrypt->wq = create_workqueue(name);
+ pcrypt->wq = alloc_workqueue(name,
+ WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
if (!pcrypt->wq)
goto err;
diff --git a/crypto/rmd128.c b/crypto/rmd128.c
index 1ceb6735aa53..8a0f68b7f257 100644
--- a/crypto/rmd128.c
+++ b/crypto/rmd128.c
@@ -5,7 +5,7 @@
*
* Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
*
- * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
+ * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -325,4 +325,5 @@ module_init(rmd128_mod_init);
module_exit(rmd128_mod_fini);
MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
index 472261fc913f..525d7bb752cf 100644
--- a/crypto/rmd160.c
+++ b/crypto/rmd160.c
@@ -5,7 +5,7 @@
*
* Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
*
- * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
+ * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -369,4 +369,5 @@ module_init(rmd160_mod_init);
module_exit(rmd160_mod_fini);
MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
MODULE_DESCRIPTION("RIPEMD-160 Message Digest");
diff --git a/crypto/rmd256.c b/crypto/rmd256.c
index 72eafa8d2e7b..69293d9b56e0 100644
--- a/crypto/rmd256.c
+++ b/crypto/rmd256.c
@@ -5,7 +5,7 @@
*
* Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
*
- * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
+ * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -344,4 +344,5 @@ module_init(rmd256_mod_init);
module_exit(rmd256_mod_fini);
MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
MODULE_DESCRIPTION("RIPEMD-256 Message Digest");
diff --git a/crypto/rmd320.c b/crypto/rmd320.c
index 86becaba2f05..09f97dfdfbba 100644
--- a/crypto/rmd320.c
+++ b/crypto/rmd320.c
@@ -5,7 +5,7 @@
*
* Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
*
- * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
+ * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -393,4 +393,5 @@ module_init(rmd320_mod_init);
module_exit(rmd320_mod_fini);
MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
diff --git a/crypto/shash.c b/crypto/shash.c
index 22fd9433141f..76f74b963151 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -310,7 +310,13 @@ static int shash_async_export(struct ahash_request *req, void *out)
static int shash_async_import(struct ahash_request *req, const void *in)
{
- return crypto_shash_import(ahash_request_ctx(req), in);
+ struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct shash_desc *desc = ahash_request_ctx(req);
+
+ desc->tfm = *ctx;
+ desc->flags = req->base.flags;
+
+ return crypto_shash_import(desc, in);
}
static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm)
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 3ca68f9fc14d..e912ea5def3d 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -8,6 +8,13 @@
* Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org>
* Copyright (c) 2007 Nokia Siemens Networks
*
+ * Updated RFC4106 AES-GCM testing.
+ * Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
+ * Adrian Hoban <adrian.hoban@intel.com>
+ * Gabriele Paoloni <gabriele.paoloni@intel.com>
+ * Tadeusz Struk (tadeusz.struk@intel.com)
+ * Copyright (c) 2010, Intel Corporation.
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
@@ -139,7 +146,8 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int sec,
unsigned int tcount, u8 *keysize)
{
unsigned int ret, i, j, iv_len;
- const char *key, iv[128];
+ const char *key;
+ char iv[128];
struct crypto_blkcipher *tfm;
struct blkcipher_desc desc;
const char *e;
@@ -980,6 +988,10 @@ static int do_test(int m)
ret += tcrypt_test("ansi_cprng");
break;
+ case 151:
+ ret += tcrypt_test("rfc4106(gcm(aes))");
+ break;
+
case 200:
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index fa8c8f78c8d4..2854865f2434 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -6,6 +6,13 @@
* Copyright (c) 2007 Nokia Siemens Networks
* Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
*
+ * Updated RFC4106 AES-GCM testing.
+ * Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
+ * Adrian Hoban <adrian.hoban@intel.com>
+ * Gabriele Paoloni <gabriele.paoloni@intel.com>
+ * Tadeusz Struk (tadeusz.struk@intel.com)
+ * Copyright (c) 2010, Intel Corporation.
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
@@ -2070,6 +2077,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "ghash",
.test = alg_test_hash,
+ .fips_allowed = 1,
.suite = {
.hash = {
.vecs = ghash_tv_template,
@@ -2242,6 +2250,23 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "rfc4106(gcm(aes))",
+ .test = alg_test_aead,
+ .suite = {
+ .aead = {
+ .enc = {
+ .vecs = aes_gcm_rfc4106_enc_tv_template,
+ .count = AES_GCM_4106_ENC_TEST_VECTORS
+ },
+ .dec = {
+ .vecs = aes_gcm_rfc4106_dec_tv_template,
+ .count = AES_GCM_4106_DEC_TEST_VECTORS
+ }
+ }
+ }
+ }, {
+
+
.alg = "rfc4309(ccm(aes))",
.test = alg_test_aead,
.fips_allowed = 1,
@@ -2429,6 +2454,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "xts(aes)",
.test = alg_test_skcipher,
+ .fips_allowed = 1,
.suite = {
.cipher = {
.enc = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 74e35377fd30..aa6dac05f843 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -6,6 +6,15 @@
* Copyright (c) 2007 Nokia Siemens Networks
* Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
*
+ * Updated RFC4106 AES-GCM testing. Some test vectors were taken from
+ * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/
+ * gcm/gcm-test-vectors.tar.gz
+ * Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
+ * Adrian Hoban <adrian.hoban@intel.com>
+ * Gabriele Paoloni <gabriele.paoloni@intel.com>
+ * Tadeusz Struk (tadeusz.struk@intel.com)
+ * Copyright (c) 2010, Intel Corporation.
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
@@ -442,8 +451,9 @@ static struct hash_testvec rmd320_tv_template[] = {
/*
* SHA1 test vectors from from FIPS PUB 180-1
+ * Long vector from CAVS 5.0
*/
-#define SHA1_TEST_VECTORS 2
+#define SHA1_TEST_VECTORS 3
static struct hash_testvec sha1_tv_template[] = {
{
@@ -458,6 +468,33 @@ static struct hash_testvec sha1_tv_template[] = {
"\x4a\xa1\xf9\x51\x29\xe5\xe5\x46\x70\xf1",
.np = 2,
.tap = { 28, 28 }
+ }, {
+ .plaintext = "\xec\x29\x56\x12\x44\xed\xe7\x06"
+ "\xb6\xeb\x30\xa1\xc3\x71\xd7\x44"
+ "\x50\xa1\x05\xc3\xf9\x73\x5f\x7f"
+ "\xa9\xfe\x38\xcf\x67\xf3\x04\xa5"
+ "\x73\x6a\x10\x6e\x92\xe1\x71\x39"
+ "\xa6\x81\x3b\x1c\x81\xa4\xf3\xd3"
+ "\xfb\x95\x46\xab\x42\x96\xfa\x9f"
+ "\x72\x28\x26\xc0\x66\x86\x9e\xda"
+ "\xcd\x73\xb2\x54\x80\x35\x18\x58"
+ "\x13\xe2\x26\x34\xa9\xda\x44\x00"
+ "\x0d\x95\xa2\x81\xff\x9f\x26\x4e"
+ "\xcc\xe0\xa9\x31\x22\x21\x62\xd0"
+ "\x21\xcc\xa2\x8d\xb5\xf3\xc2\xaa"
+ "\x24\x94\x5a\xb1\xe3\x1c\xb4\x13"
+ "\xae\x29\x81\x0f\xd7\x94\xca\xd5"
+ "\xdf\xaf\x29\xec\x43\xcb\x38\xd1"
+ "\x98\xfe\x4a\xe1\xda\x23\x59\x78"
+ "\x02\x21\x40\x5b\xd6\x71\x2a\x53"
+ "\x05\xda\x4b\x1b\x73\x7f\xce\x7c"
+ "\xd2\x1c\x0e\xb7\x72\x8d\x08\x23"
+ "\x5a\x90\x11",
+ .psize = 163,
+ .digest = "\x97\x01\x11\xc4\xe7\x7b\xcc\x88\xcc\x20"
+ "\x45\x9c\x02\xb6\x9b\x4a\xa8\xf5\x82\x17",
+ .np = 4,
+ .tap = { 63, 64, 31, 5 }
}
};
@@ -2947,6 +2984,8 @@ static struct cipher_testvec cast6_dec_tv_template[] = {
#define AES_CTR_3686_DEC_TEST_VECTORS 6
#define AES_GCM_ENC_TEST_VECTORS 9
#define AES_GCM_DEC_TEST_VECTORS 8
+#define AES_GCM_4106_ENC_TEST_VECTORS 7
+#define AES_GCM_4106_DEC_TEST_VECTORS 7
#define AES_CCM_ENC_TEST_VECTORS 7
#define AES_CCM_DEC_TEST_VECTORS 7
#define AES_CCM_4309_ENC_TEST_VECTORS 7
@@ -5829,6 +5868,356 @@ static struct aead_testvec aes_gcm_dec_tv_template[] = {
}
};
+static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
+ { /* Generated using Crypto++ */
+ .key = zeroed_string,
+ .klen = 20,
+ .iv = zeroed_string,
+ .input = zeroed_string,
+ .ilen = 16,
+ .assoc = zeroed_string,
+ .alen = 8,
+ .result = "\x03\x88\xDA\xCE\x60\xB6\xA3\x92"
+ "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
+ "\x97\xFE\x4C\x23\x37\x42\x01\xE0"
+ "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
+ .rlen = 32,
+ },{
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\x00\x00\x00\x00",
+ .klen = 20,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x01"
+ "\x00\x00\x00\x00",
+ .input = zeroed_string,
+ .ilen = 16,
+ .assoc = zeroed_string,
+ .alen = 8,
+ .result = "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18"
+ "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
+ "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
+ "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
+ .rlen = 32,
+
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\x00\x00\x00\x00",
+ .klen = 20,
+ .iv = zeroed_string,
+ .input = "\x01\x01\x01\x01\x01\x01\x01\x01"
+ "\x01\x01\x01\x01\x01\x01\x01\x01",
+ .ilen = 16,
+ .assoc = zeroed_string,
+ .alen = 8,
+ .result = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
+ "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+ "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
+ "\xB1\x68\xFD\x14\x52\x64\x61\xB2",
+ .rlen = 32,
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\x00\x00\x00\x00",
+ .klen = 20,
+ .iv = zeroed_string,
+ .input = "\x01\x01\x01\x01\x01\x01\x01\x01"
+ "\x01\x01\x01\x01\x01\x01\x01\x01",
+ .ilen = 16,
+ .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01",
+ .alen = 8,
+ .result = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
+ "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+ "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
+ "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
+ .rlen = 32,
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\x00\x00\x00\x00",
+ .klen = 20,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x01"
+ "\x00\x00\x00\x00",
+ .input = "\x01\x01\x01\x01\x01\x01\x01\x01"
+ "\x01\x01\x01\x01\x01\x01\x01\x01",
+ .ilen = 16,
+ .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01",
+ .alen = 8,
+ .result = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
+ "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+ "\x64\x50\xF9\x32\x13\xFB\x74\x61"
+ "\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
+ .rlen = 32,
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\x00\x00\x00\x00",
+ .klen = 20,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x01"
+ "\x00\x00\x00\x00",
+ .input = "\x01\x01\x01\x01\x01\x01\x01\x01"
+ "\x01\x01\x01\x01\x01\x01\x01\x01"
+ "\x01\x01\x01\x01\x01\x01\x01\x01"
+ "\x01\x01\x01\x01\x01\x01\x01\x01"
+ "\x01\x01\x01\x01\x01\x01\x01\x01"
+ "\x01\x01\x01\x01\x01\x01\x01\x01"
+ "\x01\x01\x01\x01\x01\x01\x01\x01"
+ "\x01\x01\x01\x01\x01\x01\x01\x01",
+ .ilen = 64,
+ .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01",
+ .alen = 8,
+ .result = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
+ "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+ "\x98\x14\xA1\x42\x37\x80\xFD\x90"
+ "\x68\x12\x01\xA8\x91\x89\xB9\x83"
+ "\x5B\x11\x77\x12\x9B\xFF\x24\x89"
+ "\x94\x5F\x18\x12\xBA\x27\x09\x39"
+ "\x99\x96\x76\x42\x15\x1C\xCD\xCB"
+ "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
+ "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
+ "\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
+ .rlen = 80,
+ }, {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x00\x00\x00\x00",
+ .klen = 20,
+ .iv = "\x00\x00\x45\x67\x89\xab\xcd\xef"
+ "\x00\x00\x00\x00",
+ .input = "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .ilen = 192,
+ .assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa",
+ .alen = 12,
+ .result = "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE"
+ "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A"
+ "\x44\x6D\xC3\x88\x46\x2E\xC2\x01"
+ "\x5E\xF6\x0C\x39\xF0\xC4\xA5\x82"
+ "\xCD\xE8\x31\xCC\x0A\x4C\xE4\x44"
+ "\x41\xA9\x82\x6F\x22\xA1\x23\x1A"
+ "\xA8\xE3\x16\xFD\x31\x5C\x27\x31"
+ "\xF1\x7F\x01\x63\xA3\xAF\x70\xA1"
+ "\xCF\x07\x57\x41\x67\xD0\xC4\x42"
+ "\xDB\x18\xC6\x4C\x4C\xE0\x3D\x9F"
+ "\x05\x07\xFB\x13\x7D\x4A\xCA\x5B"
+ "\xF0\xBF\x64\x7E\x05\xB1\x72\xEE"
+ "\x7C\x3B\xD4\xCD\x14\x03\xB2\x2C"
+ "\xD3\xA9\xEE\xFA\x17\xFC\x9C\xDF"
+ "\xC7\x75\x40\xFF\xAE\xAD\x1E\x59"
+ "\x2F\x30\x24\xFB\xAD\x6B\x10\xFA"
+ "\x6C\x9F\x5B\xE7\x25\xD5\xD0\x25"
+ "\xAC\x4A\x4B\xDA\xFC\x7A\x85\x1B"
+ "\x7E\x13\x06\x82\x08\x17\xA4\x35"
+ "\xEC\xC5\x8D\x63\x96\x81\x0A\x8F"
+ "\xA3\x05\x38\x95\x20\x1A\x47\x04"
+ "\x6F\x6D\xDA\x8F\xEF\xC1\x76\x35"
+ "\x6B\xC7\x4D\x0F\x94\x12\xCA\x3E"
+ "\x2E\xD5\x03\x2E\x86\x7E\xAA\x3B"
+ "\x37\x08\x1C\xCF\xBA\x5D\x71\x46"
+ "\x80\x72\xB0\x4C\x82\x0D\x60\x3C",
+ .rlen = 208,
+ }
+};
+
+static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
+ { /* Generated using Crypto++ */
+ .key = zeroed_string,
+ .klen = 20,
+ .iv = zeroed_string,
+ .input = "\x03\x88\xDA\xCE\x60\xB6\xA3\x92"
+ "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
+ "\x97\xFE\x4C\x23\x37\x42\x01\xE0"
+ "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
+ .ilen = 32,
+ .assoc = zeroed_string,
+ .alen = 8,
+ .result = zeroed_string,
+ .rlen = 16,
+
+ },{
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\x00\x00\x00\x00",
+ .klen = 20,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x01"
+ "\x00\x00\x00\x00",
+ .input = "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18"
+ "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
+ "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
+ "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
+ .ilen = 32,
+ .assoc = zeroed_string,
+ .alen = 8,
+ .result = zeroed_string,
+ .rlen = 16,
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\x00\x00\x00\x00",
+ .klen = 20,
+ .iv = zeroed_string,
+ .input = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
+ "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+ "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
+ "\xB1\x68\xFD\x14\x52\x64\x61\xB2",
+ .ilen = 32,
+ .assoc = zeroed_string,
+ .alen = 8,
+ .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+ "\x01\x01\x01\x01\x01\x01\x01\x01",
+ .rlen = 16,
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\x00\x00\x00\x00",
+ .klen = 20,
+ .iv = zeroed_string,
+ .input = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
+ "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+ "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
+ "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
+ .ilen = 32,
+ .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01",
+ .alen = 8,
+ .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+ "\x01\x01\x01\x01\x01\x01\x01\x01",
+ .rlen = 16,
+
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\x00\x00\x00\x00",
+ .klen = 20,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x01"
+ "\x00\x00\x00\x00",
+ .input = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
+ "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+ "\x64\x50\xF9\x32\x13\xFB\x74\x61"
+ "\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
+ .ilen = 32,
+ .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01",
+ .alen = 8,
+ .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+ "\x01\x01\x01\x01\x01\x01\x01\x01",
+ .rlen = 16,
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\x00\x00\x00\x00",
+ .klen = 20,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x01"
+ "\x00\x00\x00\x00",
+ .input = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
+ "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+ "\x98\x14\xA1\x42\x37\x80\xFD\x90"
+ "\x68\x12\x01\xA8\x91\x89\xB9\x83"
+ "\x5B\x11\x77\x12\x9B\xFF\x24\x89"
+ "\x94\x5F\x18\x12\xBA\x27\x09\x39"
+ "\x99\x96\x76\x42\x15\x1C\xCD\xCB"
+ "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
+ "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
+ "\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
+ .ilen = 80,
+ .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01",
+ .alen = 8,
+ .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+ "\x01\x01\x01\x01\x01\x01\x01\x01"
+ "\x01\x01\x01\x01\x01\x01\x01\x01"
+ "\x01\x01\x01\x01\x01\x01\x01\x01"
+ "\x01\x01\x01\x01\x01\x01\x01\x01"
+ "\x01\x01\x01\x01\x01\x01\x01\x01"
+ "\x01\x01\x01\x01\x01\x01\x01\x01"
+ "\x01\x01\x01\x01\x01\x01\x01\x01",
+ .rlen = 64,
+ }, {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x00\x00\x00\x00",
+ .klen = 20,
+ .iv = "\x00\x00\x45\x67\x89\xab\xcd\xef"
+ "\x00\x00\x00\x00",
+ .input = "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE"
+ "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A"
+ "\x44\x6D\xC3\x88\x46\x2E\xC2\x01"
+ "\x5E\xF6\x0C\x39\xF0\xC4\xA5\x82"
+ "\xCD\xE8\x31\xCC\x0A\x4C\xE4\x44"
+ "\x41\xA9\x82\x6F\x22\xA1\x23\x1A"
+ "\xA8\xE3\x16\xFD\x31\x5C\x27\x31"
+ "\xF1\x7F\x01\x63\xA3\xAF\x70\xA1"
+ "\xCF\x07\x57\x41\x67\xD0\xC4\x42"
+ "\xDB\x18\xC6\x4C\x4C\xE0\x3D\x9F"
+ "\x05\x07\xFB\x13\x7D\x4A\xCA\x5B"
+ "\xF0\xBF\x64\x7E\x05\xB1\x72\xEE"
+ "\x7C\x3B\xD4\xCD\x14\x03\xB2\x2C"
+ "\xD3\xA9\xEE\xFA\x17\xFC\x9C\xDF"
+ "\xC7\x75\x40\xFF\xAE\xAD\x1E\x59"
+ "\x2F\x30\x24\xFB\xAD\x6B\x10\xFA"
+ "\x6C\x9F\x5B\xE7\x25\xD5\xD0\x25"
+ "\xAC\x4A\x4B\xDA\xFC\x7A\x85\x1B"
+ "\x7E\x13\x06\x82\x08\x17\xA4\x35"
+ "\xEC\xC5\x8D\x63\x96\x81\x0A\x8F"
+ "\xA3\x05\x38\x95\x20\x1A\x47\x04"
+ "\x6F\x6D\xDA\x8F\xEF\xC1\x76\x35"
+ "\x6B\xC7\x4D\x0F\x94\x12\xCA\x3E"
+ "\x2E\xD5\x03\x2E\x86\x7E\xAA\x3B"
+ "\x37\x08\x1C\xCF\xBA\x5D\x71\x46"
+ "\x80\x72\xB0\x4C\x82\x0D\x60\x3C",
+ .ilen = 208,
+ .assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xaa\xaa\xaa\xaa",
+ .alen = 12,
+ .result = "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .rlen = 192,
+
+ }
+};
+
static struct aead_testvec aes_ccm_enc_tv_template[] = {
{ /* From RFC 3610 */
.key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
diff --git a/crypto/vmac.c b/crypto/vmac.c
index 0999274a27ac..f35ff8a3926e 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -95,7 +95,7 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
/*
* For highest performance the L1 NH and L2 polynomial hashes should be
- * carefully implemented to take advantage of one's target architechture.
+ * carefully implemented to take advantage of one's target architecture.
* Here these two hash functions are defined multiple time; once for
* 64-bit architectures, once for 32-bit SSE2 architectures, and once
* for the rest (32-bit) architectures.
diff --git a/crypto/xts.c b/crypto/xts.c
index 555ecaab1e54..851705446c82 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -45,7 +45,7 @@ static int setkey(struct crypto_tfm *parent, const u8 *key,
return -EINVAL;
}
- /* we need two cipher instances: one to compute the inital 'tweak'
+ /* we need two cipher instances: one to compute the initial 'tweak'
* by encrypting the IV (usually the 'plain' iv) and the other
* one to encrypt and decrypt the data */
diff --git a/crypto/zlib.c b/crypto/zlib.c
index c3015733c990..d11d761a5e41 100644
--- a/crypto/zlib.c
+++ b/crypto/zlib.c
@@ -85,6 +85,7 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params,
struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
struct z_stream_s *stream = &ctx->comp_stream;
struct nlattr *tb[ZLIB_COMP_MAX + 1];
+ int window_bits, mem_level;
size_t workspacesize;
int ret;
@@ -94,12 +95,18 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params,
zlib_comp_exit(ctx);
- workspacesize = zlib_deflate_workspacesize();
- stream->workspace = vmalloc(workspacesize);
+ window_bits = tb[ZLIB_COMP_WINDOWBITS]
+ ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS])
+ : MAX_WBITS;
+ mem_level = tb[ZLIB_COMP_MEMLEVEL]
+ ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL])
+ : DEF_MEM_LEVEL;
+
+ workspacesize = zlib_deflate_workspacesize(window_bits, mem_level);
+ stream->workspace = vzalloc(workspacesize);
if (!stream->workspace)
return -ENOMEM;
- memset(stream->workspace, 0, workspacesize);
ret = zlib_deflateInit2(stream,
tb[ZLIB_COMP_LEVEL]
? nla_get_u32(tb[ZLIB_COMP_LEVEL])
@@ -107,12 +114,8 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params,
tb[ZLIB_COMP_METHOD]
? nla_get_u32(tb[ZLIB_COMP_METHOD])
: Z_DEFLATED,
- tb[ZLIB_COMP_WINDOWBITS]
- ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS])
- : MAX_WBITS,
- tb[ZLIB_COMP_MEMLEVEL]
- ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL])
- : DEF_MEM_LEVEL,
+ window_bits,
+ mem_level,
tb[ZLIB_COMP_STRATEGY]
? nla_get_u32(tb[ZLIB_COMP_STRATEGY])
: Z_DEFAULT_STRATEGY);
OpenPOWER on IntegriCloud