diff options
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/caam/caamalg.c | 90 | ||||
-rw-r--r-- | drivers/crypto/caam/caamhash.c | 1 | ||||
-rw-r--r-- | drivers/crypto/marvell/cesa.c | 7 | ||||
-rw-r--r-- | drivers/crypto/marvell/cipher.c | 25 | ||||
-rw-r--r-- | drivers/crypto/marvell/hash.c | 12 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_common/qat_algs.c | 4 | ||||
-rw-r--r-- | drivers/crypto/vmx/Kconfig | 2 | ||||
-rw-r--r-- | drivers/crypto/vmx/aes_xts.c | 2 | ||||
-rw-r--r-- | drivers/crypto/vmx/vmx.c | 6 |
9 files changed, 69 insertions, 80 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index ea8189f4b021..b3044219772c 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -441,6 +441,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead) OP_ALG_AAI_CTR_MOD128); const bool is_rfc3686 = alg->caam.rfc3686; + if (!ctx->authsize) + return 0; + /* NULL encryption / decryption */ if (!ctx->enckeylen) return aead_null_set_sh_desc(aead); @@ -553,7 +556,10 @@ skip_enc: /* Read and write assoclen bytes */ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + if (alg->caam.geniv) + append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize); + else + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); /* Skip assoc data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); @@ -562,6 +568,14 @@ skip_enc: append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | KEY_VLF); + if (alg->caam.geniv) { + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + (ctx1_iv_off << LDST_OFFSET_SHIFT)); + append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | + (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize); + } + /* Load Counter into CONTEXT1 reg */ if (is_rfc3686) append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | @@ -614,7 +628,7 @@ skip_enc: keys_fit_inline = true; /* aead_givencrypt shared descriptor */ - desc = ctx->sh_desc_givenc; + desc = ctx->sh_desc_enc; /* Note: Context registers are saved. */ init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); @@ -645,13 +659,13 @@ copy_iv: append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); - /* ivsize + cryptlen = seqoutlen - authsize */ - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); - /* Read and write assoclen bytes */ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + /* ivsize + cryptlen = seqoutlen - authsize */ + append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); + /* Skip assoc data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); @@ -697,7 +711,7 @@ copy_iv: ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { + if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { dev_err(jrdev, "unable to map shared descriptor\n"); return -ENOMEM; } @@ -2147,7 +2161,7 @@ static void init_authenc_job(struct aead_request *req, init_aead_job(req, edesc, all_contig, encrypt); - if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt))) + if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv)) append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT | @@ -2534,20 +2548,6 @@ static int aead_decrypt(struct aead_request *req) return ret; } -static int aead_givdecrypt(struct aead_request *req) -{ - struct crypto_aead *aead = crypto_aead_reqtfm(req); - unsigned int ivsize = crypto_aead_ivsize(aead); - - if (req->cryptlen < ivsize) - return -EINVAL; - - req->cryptlen -= ivsize; - req->assoclen += ivsize; - - return aead_decrypt(req); -} - /* * allocate and map the ablkcipher extended descriptor for ablkcipher */ @@ -3207,7 +3207,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, @@ -3253,7 +3253,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, @@ -3299,7 +3299,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, @@ -3345,7 +3345,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, @@ -3391,7 +3391,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, @@ -3437,7 +3437,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, @@ -3483,7 +3483,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, @@ -3531,7 +3531,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, @@ -3579,7 +3579,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, @@ -3627,7 +3627,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, @@ -3675,7 +3675,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, @@ -3723,7 +3723,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, @@ -3769,7 +3769,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, @@ -3815,7 +3815,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, @@ -3861,7 +3861,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, @@ -3907,7 +3907,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, @@ -3953,7 +3953,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, @@ -3999,7 +3999,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, @@ -4048,7 +4048,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, @@ -4099,7 +4099,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, @@ -4150,7 +4150,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, @@ -4201,7 +4201,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, @@ -4252,7 +4252,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, @@ -4303,7 +4303,7 @@ static struct caam_aead_alg driver_aeads[] = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, - .decrypt = aead_givdecrypt, + .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index f1ecc8df8d41..36365b3efdfd 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1898,6 +1898,7 @@ caam_hash_alloc(struct caam_hash_template *template, template->name); snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", template->driver_name); + t_alg->ahash_alg.setkey = NULL; } alg->cra_module = THIS_MODULE; alg->cra_init = caam_hash_cra_init; diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index e373cc6557c6..d64af8625d7e 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -180,10 +180,11 @@ int mv_cesa_queue_req(struct crypto_async_request *req, struct mv_cesa_engine *engine = creq->engine; spin_lock_bh(&engine->lock); - if (mv_cesa_req_get_type(creq) == CESA_DMA_REQ) - mv_cesa_tdma_chain(engine, creq); - ret = crypto_enqueue_request(&engine->queue, req); + if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) && + (ret == -EINPROGRESS || + (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + mv_cesa_tdma_chain(engine, creq); spin_unlock_bh(&engine->lock); if (ret != -EINPROGRESS) diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index 48df03a06066..d19dc9614e6e 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c @@ -139,20 +139,11 @@ static int mv_cesa_ablkcipher_process(struct crypto_async_request *req, struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); struct mv_cesa_req *basereq = &creq->base; - unsigned int ivsize; - int ret; if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ) return mv_cesa_ablkcipher_std_process(ablkreq, status); - ret = mv_cesa_dma_process(basereq, status); - if (ret) - return ret; - - ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq)); - memcpy_fromio(ablkreq->info, basereq->chain.last->data, ivsize); - - return 0; + return mv_cesa_dma_process(basereq, status); } static void mv_cesa_ablkcipher_step(struct crypto_async_request *req) @@ -320,7 +311,6 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, GFP_KERNEL : GFP_ATOMIC; struct mv_cesa_req *basereq = &creq->base; struct mv_cesa_ablkcipher_dma_iter iter; - struct mv_cesa_tdma_chain chain; bool skip_ctx = false; int ret; unsigned int ivsize; @@ -347,13 +337,13 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, return -ENOMEM; } - mv_cesa_tdma_desc_iter_init(&chain); + mv_cesa_tdma_desc_iter_init(&basereq->chain); mv_cesa_ablkcipher_req_iter_init(&iter, req); do { struct mv_cesa_op_ctx *op; - op = mv_cesa_dma_add_op(&chain, op_templ, skip_ctx, flags); + op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags); if (IS_ERR(op)) { ret = PTR_ERR(op); goto err_free_tdma; @@ -363,18 +353,18 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, mv_cesa_set_crypt_op_len(op, iter.base.op_len); /* Add input transfers */ - ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base, + ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base, &iter.src, flags); if (ret) goto err_free_tdma; /* Add dummy desc to launch the crypto operation */ - ret = mv_cesa_dma_add_dummy_launch(&chain, flags); + ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags); if (ret) goto err_free_tdma; /* Add output transfers */ - ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base, + ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base, &iter.dst, flags); if (ret) goto err_free_tdma; @@ -383,13 +373,12 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, /* Add output data for IV */ ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)); - ret = mv_cesa_dma_add_iv_op(&chain, CESA_SA_CRYPT_IV_SRAM_OFFSET, + ret = mv_cesa_dma_add_iv_op(&basereq->chain, CESA_SA_CRYPT_IV_SRAM_OFFSET, ivsize, CESA_TDMA_SRC_IN_SRAM, flags); if (ret) goto err_free_tdma; - basereq->chain = chain; basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ; return 0; diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index c35912b4fffb..82e0f4e6eb1c 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c @@ -315,12 +315,6 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req) for (i = 0; i < digsize / 4; i++) creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i)); - if (creq->cache_ptr) - sg_pcopy_to_buffer(ahashreq->src, creq->src_nents, - creq->cache, - creq->cache_ptr, - ahashreq->nbytes - creq->cache_ptr); - if (creq->last_req) { /* * Hardware's MD5 digest is in little endian format, but @@ -365,6 +359,12 @@ static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req) mv_cesa_ahash_last_cleanup(ahashreq); mv_cesa_ahash_cleanup(ahashreq); + + if (creq->cache_ptr) + sg_pcopy_to_buffer(ahashreq->src, creq->src_nents, + creq->cache, + creq->cache_ptr, + ahashreq->nbytes - creq->cache_ptr); } static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = { diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 769148dbaeb3..20f35df8a01f 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -1260,8 +1260,8 @@ static struct crypto_alg qat_algs[] = { { .setkey = qat_alg_ablkcipher_xts_setkey, .decrypt = qat_alg_ablkcipher_decrypt, .encrypt = qat_alg_ablkcipher_encrypt, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, + .min_keysize = 2 * AES_MIN_KEY_SIZE, + .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, }, diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig index 89d8208d9851..a83ead109d5f 100644 --- a/drivers/crypto/vmx/Kconfig +++ b/drivers/crypto/vmx/Kconfig @@ -1,7 +1,7 @@ config CRYPTO_DEV_VMX_ENCRYPT tristate "Encryption acceleration support on P8 CPU" depends on CRYPTO_DEV_VMX - default y + default m help Support for VMX cryptographic acceleration instructions on Power8 CPU. This module supports acceleration for AES and GHASH in hardware. If you diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index cfb25413917c..24353ec336c5 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c @@ -129,8 +129,8 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); - iv = (u8 *)walk.iv; ret = blkcipher_walk_virt(desc, &walk); + iv = walk.iv; memset(tweak, 0, AES_BLOCK_SIZE); aes_p8_encrypt(iv, tweak, &ctx->tweak_key); diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c index f688c32fbcc7..31a98dc6f849 100644 --- a/drivers/crypto/vmx/vmx.c +++ b/drivers/crypto/vmx/vmx.c @@ -23,6 +23,7 @@ #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/err.h> +#include <linux/cpufeature.h> #include <linux/crypto.h> #include <asm/cputable.h> #include <crypto/internal/hash.h> @@ -45,9 +46,6 @@ int __init p8_init(void) int ret = 0; struct crypto_alg **alg_it; - if (!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO)) - return -ENODEV; - for (alg_it = algs; *alg_it; alg_it++) { ret = crypto_register_alg(*alg_it); printk(KERN_INFO "crypto_register_alg '%s' = %d\n", @@ -80,7 +78,7 @@ void __exit p8_exit(void) crypto_unregister_shash(&p8_ghash_alg); } -module_init(p8_init); +module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, p8_init); module_exit(p8_exit); MODULE_AUTHOR("Marcelo Cerri<mhcerri@br.ibm.com>"); |