diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2016-06-21 16:55:14 +0800 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2016-06-23 18:29:52 +0800 |
commit | 38b2f68b426429c06cdf2ae5c8a89371524db203 (patch) | |
tree | bceabceea3214feb3419f23128cb59346cf50a47 /arch/x86/crypto/aesni-intel_glue.c | |
parent | 81760ea6a95ad4c41273a71052f61b9f087b5753 (diff) | |
download | talos-obmc-linux-38b2f68b426429c06cdf2ae5c8a89371524db203.tar.gz talos-obmc-linux-38b2f68b426429c06cdf2ae5c8a89371524db203.zip |
crypto: aesni - Fix cryptd reordering problem on gcm
This patch fixes an old bug where gcm requests can be reordered
because some are processed by cryptd while others are processed
directly in softirq context.
The fix is to always postpone to cryptd if there are currently
requests outstanding from the same tfm.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/x86/crypto/aesni-intel_glue.c')
-rw-r--r-- | arch/x86/crypto/aesni-intel_glue.c | 18 |
1 files changed, 12 insertions, 6 deletions
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 5b7fa1471007..9e15572ef06d 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -1098,9 +1098,12 @@ static int rfc4106_encrypt(struct aead_request *req) struct cryptd_aead **ctx = crypto_aead_ctx(tfm); struct cryptd_aead *cryptd_tfm = *ctx; - aead_request_set_tfm(req, irq_fpu_usable() ? - cryptd_aead_child(cryptd_tfm) : - &cryptd_tfm->base); + tfm = &cryptd_tfm->base; + if (irq_fpu_usable() && (!in_atomic() || + !cryptd_aead_queued(cryptd_tfm))) + tfm = cryptd_aead_child(cryptd_tfm); + + aead_request_set_tfm(req, tfm); return crypto_aead_encrypt(req); } @@ -1111,9 +1114,12 @@ static int rfc4106_decrypt(struct aead_request *req) struct cryptd_aead **ctx = crypto_aead_ctx(tfm); struct cryptd_aead *cryptd_tfm = *ctx; - aead_request_set_tfm(req, irq_fpu_usable() ? - cryptd_aead_child(cryptd_tfm) : - &cryptd_tfm->base); + tfm = &cryptd_tfm->base; + if (irq_fpu_usable() && (!in_atomic() || + !cryptd_aead_queued(cryptd_tfm))) + tfm = cryptd_aead_child(cryptd_tfm); + + aead_request_set_tfm(req, tfm); return crypto_aead_decrypt(req); } |