diff options
Diffstat (limited to 'drivers/crypto/chelsio')
-rw-r--r-- | drivers/crypto/chelsio/Kconfig | 31 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_algo.c | 421 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_algo.h | 22 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_core.c | 10 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_crypto.h | 17 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_ipsec.c | 46 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls.h | 12 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls_cm.c | 61 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls_cm.h | 21 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls_hw.c | 85 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls_io.c | 28 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls_main.c | 54 |
12 files changed, 401 insertions, 407 deletions
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig index 4b9b37a130d3..f078b2686418 100644 --- a/drivers/crypto/chelsio/Kconfig +++ b/drivers/crypto/chelsio/Kconfig @@ -2,6 +2,7 @@ config CRYPTO_DEV_CHELSIO tristate "Chelsio Crypto Co-processor Driver" depends on CHELSIO_T4 + select CRYPTO_LIB_AES select CRYPTO_SHA1 select CRYPTO_SHA256 select CRYPTO_SHA512 @@ -22,22 +23,22 @@ config CRYPTO_DEV_CHELSIO will be called chcr. config CHELSIO_IPSEC_INLINE - bool "Chelsio IPSec XFRM Tx crypto offload" - depends on CHELSIO_T4 + bool "Chelsio IPSec XFRM Tx crypto offload" + depends on CHELSIO_T4 depends on CRYPTO_DEV_CHELSIO - depends on XFRM_OFFLOAD - depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD - default n - ---help--- - Enable support for IPSec Tx Inline. + depends on XFRM_OFFLOAD + depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD + default n + ---help--- + Enable support for IPSec Tx Inline. config CRYPTO_DEV_CHELSIO_TLS - tristate "Chelsio Crypto Inline TLS Driver" - depends on CHELSIO_T4 - depends on TLS - select CRYPTO_DEV_CHELSIO - ---help--- - Support Chelsio Inline TLS with Chelsio crypto accelerator. + tristate "Chelsio Crypto Inline TLS Driver" + depends on CHELSIO_T4 + depends on TLS_TOE + select CRYPTO_DEV_CHELSIO + ---help--- + Support Chelsio Inline TLS with Chelsio crypto accelerator. - To compile this driver as a module, choose M here: the module - will be called chtls. + To compile this driver as a module, choose M here: the module + will be called chtls. diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 177f572b9589..b4b9b22125d1 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -93,7 +93,7 @@ static u32 round_constant[11] = { 0x1B000000, 0x36000000, 0x6C000000 }; -static int chcr_handle_cipher_resp(struct ablkcipher_request *req, +static int chcr_handle_cipher_resp(struct skcipher_request *req, unsigned char *input, int err); static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx) @@ -568,11 +568,11 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk, } } -static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm) +static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm) { - struct crypto_alg *alg = tfm->__crt_alg; + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct chcr_alg_template *chcr_crypto_alg = - container_of(alg, struct chcr_alg_template, alg.crypto); + container_of(alg, struct chcr_alg_template, alg.skcipher); return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; } @@ -757,14 +757,14 @@ static inline void create_wreq(struct chcr_context *ctx, */ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); struct sk_buff *skb = NULL; struct chcr_wr *chcr_req; struct cpl_rx_phys_dsgl *phys_cpl; struct ulptx_sgl *ulptx; - struct chcr_blkcipher_req_ctx *reqctx = - ablkcipher_request_ctx(wrparam->req); + struct chcr_skcipher_req_ctx *reqctx = + skcipher_request_ctx(wrparam->req); unsigned int temp = 0, transhdr_len, dst_size; int error; int nents; @@ -807,9 +807,9 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr; if ((reqctx->op == CHCR_DECRYPT_OP) && - (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == + (!(get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_CTR)) && - (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == + (!(get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) { generate_copy_rrkey(ablkctx, &chcr_req->key_ctx); } else { @@ -843,7 +843,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) if (reqctx->op && (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC)) sg_pcopy_to_buffer(wrparam->req->src, - sg_nents(wrparam->req->src), wrparam->req->info, 16, + sg_nents(wrparam->req->src), wrparam->req->iv, 16, reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE); return skb; @@ -866,27 +866,20 @@ static inline int chcr_keyctx_ck_size(unsigned int keylen) return ck_size; } -static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher, +static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); - int err = 0; crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK); crypto_sync_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK); - err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen); - tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->crt_flags |= - crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) & - CRYPTO_TFM_RES_MASK; - return err; + return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen); } -static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher, +static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { @@ -912,13 +905,12 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher, ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC; return 0; badkey_err: - crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); ablkctx->enckey_len = 0; return err; } -static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher, +static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { @@ -943,13 +935,12 @@ static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher, return 0; badkey_err: - crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); ablkctx->enckey_len = 0; return err; } -static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher, +static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { @@ -981,7 +972,6 @@ static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher, return 0; badkey_err: - crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); ablkctx->enckey_len = 0; return err; @@ -1017,28 +1007,27 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes) return bytes; } -static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv, +static int chcr_update_tweak(struct skcipher_request *req, u8 *iv, u32 isfinal) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); - struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); - struct crypto_cipher *cipher; + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); + struct crypto_aes_ctx aes; int ret, i; u8 *key; unsigned int keylen; int round = reqctx->last_req_len / AES_BLOCK_SIZE; int round8 = round / 8; - cipher = ablkctx->aes_generic; memcpy(iv, reqctx->iv, AES_BLOCK_SIZE); keylen = ablkctx->enckey_len / 2; key = ablkctx->key + keylen; - ret = crypto_cipher_setkey(cipher, key, keylen); + ret = aes_expandkey(&aes, key, keylen); if (ret) - goto out; - crypto_cipher_encrypt_one(cipher, iv, iv); + return ret; + aes_encrypt(&aes, iv, iv); for (i = 0; i < round8; i++) gf128mul_x8_ble((le128 *)iv, (le128 *)iv); @@ -1046,21 +1035,22 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv, gf128mul_x_ble((le128 *)iv, (le128 *)iv); if (!isfinal) - crypto_cipher_decrypt_one(cipher, iv, iv); -out: - return ret; + aes_decrypt(&aes, iv, iv); + + memzero_explicit(&aes, sizeof(aes)); + return 0; } -static int chcr_update_cipher_iv(struct ablkcipher_request *req, +static int chcr_update_cipher_iv(struct skcipher_request *req, struct cpl_fw6_pld *fw6_pld, u8 *iv) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); - struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); - int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); + int subtype = get_cryptoalg_subtype(tfm); int ret = 0; if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) - ctr_add_iv(iv, req->info, (reqctx->processed / + ctr_add_iv(iv, req->iv, (reqctx->processed / AES_BLOCK_SIZE)); else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + @@ -1071,7 +1061,7 @@ static int chcr_update_cipher_iv(struct ablkcipher_request *req, else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { if (reqctx->op) /*Updated before sending last WR*/ - memcpy(iv, req->info, AES_BLOCK_SIZE); + memcpy(iv, req->iv, AES_BLOCK_SIZE); else memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE); } @@ -1085,16 +1075,16 @@ static int chcr_update_cipher_iv(struct ablkcipher_request *req, * for subsequent update requests */ -static int chcr_final_cipher_iv(struct ablkcipher_request *req, +static int chcr_final_cipher_iv(struct skcipher_request *req, struct cpl_fw6_pld *fw6_pld, u8 *iv) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); - struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); - int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); + int subtype = get_cryptoalg_subtype(tfm); int ret = 0; if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) - ctr_add_iv(iv, req->info, DIV_ROUND_UP(reqctx->processed, + ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed, AES_BLOCK_SIZE)); else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) ret = chcr_update_tweak(req, iv, 1); @@ -1108,25 +1098,25 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req, } -static int chcr_handle_cipher_resp(struct ablkcipher_request *req, +static int chcr_handle_cipher_resp(struct skcipher_request *req, unsigned char *input, int err) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); struct sk_buff *skb; struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input; - struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); struct cipher_wr_param wrparam; struct chcr_dev *dev = c_ctx(tfm)->dev; int bytes; if (err) goto unmap; - if (req->nbytes == reqctx->processed) { + if (req->cryptlen == reqctx->processed) { chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); - err = chcr_final_cipher_iv(req, fw6_pld, req->info); + err = chcr_final_cipher_iv(req, fw6_pld, req->iv); goto complete; } @@ -1134,13 +1124,13 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req, bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0, CIP_SPACE_LEFT(ablkctx->enckey_len), reqctx->src_ofst, reqctx->dst_ofst); - if ((bytes + reqctx->processed) >= req->nbytes) - bytes = req->nbytes - reqctx->processed; + if ((bytes + reqctx->processed) >= req->cryptlen) + bytes = req->cryptlen - reqctx->processed; else bytes = rounddown(bytes, 16); } else { /*CTR mode counter overfloa*/ - bytes = req->nbytes - reqctx->processed; + bytes = req->cryptlen - reqctx->processed; } err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv); if (err) @@ -1153,13 +1143,13 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req, req->base.flags, req->src, req->dst, - req->nbytes, - req->info, + req->cryptlen, + req->iv, reqctx->op); goto complete; } - if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == + if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_CTR) bytes = adjust_ctr_overflow(reqctx->iv, bytes); wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx]; @@ -1185,33 +1175,33 @@ complete: return err; } -static int process_cipher(struct ablkcipher_request *req, +static int process_cipher(struct skcipher_request *req, unsigned short qid, struct sk_buff **skb, unsigned short op_type) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); - unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); - struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + unsigned int ivsize = crypto_skcipher_ivsize(tfm); + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); struct cipher_wr_param wrparam; int bytes, err = -EINVAL; reqctx->processed = 0; - if (!req->info) + if (!req->iv) goto error; if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) || - (req->nbytes == 0) || - (req->nbytes % crypto_ablkcipher_blocksize(tfm))) { + (req->cryptlen == 0) || + (req->cryptlen % crypto_skcipher_blocksize(tfm))) { pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n", - ablkctx->enckey_len, req->nbytes, ivsize); + ablkctx->enckey_len, req->cryptlen, ivsize); goto error; } err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); if (err) goto error; - if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) + + if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) + AES_MIN_KEY_SIZE + sizeof(struct cpl_rx_phys_dsgl) + /*Min dsgl size*/ @@ -1219,14 +1209,14 @@ static int process_cipher(struct ablkcipher_request *req, /* Can be sent as Imm*/ unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len; - dnents = sg_nents_xlen(req->dst, req->nbytes, + dnents = sg_nents_xlen(req->dst, req->cryptlen, CHCR_DST_SG_SIZE, 0); phys_dsgl = get_space_for_phys_dsgl(dnents); kctx_len = roundup(ablkctx->enckey_len, 16); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); - reqctx->imm = (transhdr_len + IV + req->nbytes) <= + reqctx->imm = (transhdr_len + IV + req->cryptlen) <= SGE_MAX_WR_LEN; - bytes = IV + req->nbytes; + bytes = IV + req->cryptlen; } else { reqctx->imm = 0; @@ -1236,21 +1226,21 @@ static int process_cipher(struct ablkcipher_request *req, bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0, CIP_SPACE_LEFT(ablkctx->enckey_len), 0, 0); - if ((bytes + reqctx->processed) >= req->nbytes) - bytes = req->nbytes - reqctx->processed; + if ((bytes + reqctx->processed) >= req->cryptlen) + bytes = req->cryptlen - reqctx->processed; else bytes = rounddown(bytes, 16); } else { - bytes = req->nbytes; + bytes = req->cryptlen; } - if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == + if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_CTR) { - bytes = adjust_ctr_overflow(req->info, bytes); + bytes = adjust_ctr_overflow(req->iv, bytes); } - if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == + if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) { memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE); - memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info, + memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE); /* initialize counter portion of counter block */ @@ -1259,7 +1249,7 @@ static int process_cipher(struct ablkcipher_request *req, } else { - memcpy(reqctx->iv, req->info, IV); + memcpy(reqctx->iv, req->iv, IV); } if (unlikely(bytes == 0)) { chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, @@ -1268,7 +1258,7 @@ static int process_cipher(struct ablkcipher_request *req, req->base.flags, req->src, req->dst, - req->nbytes, + req->cryptlen, reqctx->iv, op_type); goto error; @@ -1296,9 +1286,9 @@ error: return err; } -static int chcr_aes_encrypt(struct ablkcipher_request *req) +static int chcr_aes_encrypt(struct skcipher_request *req) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct chcr_dev *dev = c_ctx(tfm)->dev; struct sk_buff *skb = NULL; int err, isfull = 0; @@ -1329,9 +1319,9 @@ error: return err; } -static int chcr_aes_decrypt(struct ablkcipher_request *req) +static int chcr_aes_decrypt(struct skcipher_request *req) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); struct chcr_dev *dev = c_ctx(tfm)->dev; struct sk_buff *skb = NULL; @@ -1379,7 +1369,8 @@ static int chcr_device_init(struct chcr_context *ctx) txq_perchan = ntxq / u_ctx->lldi.nchan; spin_lock(&ctx->dev->lock_chcr_dev); ctx->tx_chan_id = ctx->dev->tx_channel_id; - ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id; + ctx->dev->tx_channel_id = + (ctx->dev->tx_channel_id + 1) % u_ctx->lldi.nchan; spin_unlock(&ctx->dev->lock_chcr_dev); rxq_idx = ctx->tx_chan_id * rxq_perchan; rxq_idx += id % rxq_perchan; @@ -1398,37 +1389,28 @@ out: return err; } -static int chcr_cra_init(struct crypto_tfm *tfm) +static int chcr_init_tfm(struct crypto_skcipher *tfm) { - struct crypto_alg *alg = tfm->__crt_alg; - struct chcr_context *ctx = crypto_tfm_ctx(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct chcr_context *ctx = crypto_skcipher_ctx(tfm); struct ablk_ctx *ablkctx = ABLK_CTX(ctx); - ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0, + ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->base.cra_name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ablkctx->sw_cipher)) { - pr_err("failed to allocate fallback for %s\n", alg->cra_name); + pr_err("failed to allocate fallback for %s\n", alg->base.cra_name); return PTR_ERR(ablkctx->sw_cipher); } - if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) { - /* To update tweak*/ - ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0); - if (IS_ERR(ablkctx->aes_generic)) { - pr_err("failed to allocate aes cipher for tweak\n"); - return PTR_ERR(ablkctx->aes_generic); - } - } else - ablkctx->aes_generic = NULL; + crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx)); - tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx); - return chcr_device_init(crypto_tfm_ctx(tfm)); + return chcr_device_init(ctx); } -static int chcr_rfc3686_init(struct crypto_tfm *tfm) +static int chcr_rfc3686_init(struct crypto_skcipher *tfm) { - struct crypto_alg *alg = tfm->__crt_alg; - struct chcr_context *ctx = crypto_tfm_ctx(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct chcr_context *ctx = crypto_skcipher_ctx(tfm); struct ablk_ctx *ablkctx = ABLK_CTX(ctx); /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes)) @@ -1437,22 +1419,20 @@ static int chcr_rfc3686_init(struct crypto_tfm *tfm) ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ablkctx->sw_cipher)) { - pr_err("failed to allocate fallback for %s\n", alg->cra_name); + pr_err("failed to allocate fallback for %s\n", alg->base.cra_name); return PTR_ERR(ablkctx->sw_cipher); } - tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx); - return chcr_device_init(crypto_tfm_ctx(tfm)); + crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx)); + return chcr_device_init(ctx); } -static void chcr_cra_exit(struct crypto_tfm *tfm) +static void chcr_exit_tfm(struct crypto_skcipher *tfm) { - struct chcr_context *ctx = crypto_tfm_ctx(tfm); + struct chcr_context *ctx = crypto_skcipher_ctx(tfm); struct ablk_ctx *ablkctx = ABLK_CTX(ctx); crypto_free_sync_skcipher(ablkctx->sw_cipher); - if (ablkctx->aes_generic) - crypto_free_cipher(ablkctx->aes_generic); } static int get_alg_config(struct algo_param *params, @@ -2068,8 +2048,8 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, err = chcr_handle_aead_resp(aead_request_cast(req), input, err); break; - case CRYPTO_ALG_TYPE_ABLKCIPHER: - chcr_handle_cipher_resp(ablkcipher_request_cast(req), + case CRYPTO_ALG_TYPE_SKCIPHER: + chcr_handle_cipher_resp(skcipher_request_cast(req), input, err); break; case CRYPTO_ALG_TYPE_AHASH: @@ -2160,7 +2140,7 @@ out: return err; } -static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key, +static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int key_len) { struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); @@ -2184,7 +2164,6 @@ static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key, ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; return 0; badkey_err: - crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); ablkctx->enckey_len = 0; return err; @@ -2588,12 +2567,12 @@ void chcr_add_aead_dst_ent(struct aead_request *req, dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id); } -void chcr_add_cipher_src_ent(struct ablkcipher_request *req, +void chcr_add_cipher_src_ent(struct skcipher_request *req, void *ulptx, struct cipher_wr_param *wrparam) { struct ulptx_walk ulp_walk; - struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); u8 *buf = ulptx; memcpy(buf, reqctx->iv, IV); @@ -2611,13 +2590,13 @@ void chcr_add_cipher_src_ent(struct ablkcipher_request *req, } } -void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, +void chcr_add_cipher_dst_ent(struct skcipher_request *req, struct cpl_rx_phys_dsgl *phys_cpl, struct cipher_wr_param *wrparam, unsigned short qid) { - struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req); + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req); struct chcr_context *ctx = c_ctx(tfm); struct dsgl_walk dsgl_walk; @@ -2692,7 +2671,7 @@ void chcr_hash_dma_unmap(struct device *dev, } int chcr_cipher_dma_map(struct device *dev, - struct ablkcipher_request *req) + struct skcipher_request *req) { int error; @@ -2721,7 +2700,7 @@ err: } void chcr_cipher_dma_unmap(struct device *dev, - struct ablkcipher_request *req) + struct skcipher_request *req) { if (req->src == req->dst) { dma_unmap_sg(dev, req->src, sg_nents(req->src), @@ -3206,9 +3185,6 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) aeadctx->mayverify = VERIFY_SW; break; default: - - crypto_tfm_set_flags((struct crypto_tfm *) tfm, - CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); @@ -3233,8 +3209,6 @@ static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm, aeadctx->mayverify = VERIFY_HW; break; default: - crypto_tfm_set_flags((struct crypto_tfm *)tfm, - CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); @@ -3275,8 +3249,6 @@ static int chcr_ccm_setauthsize(struct crypto_aead *tfm, aeadctx->mayverify = VERIFY_HW; break; default: - crypto_tfm_set_flags((struct crypto_tfm *)tfm, - CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); @@ -3301,8 +3273,6 @@ static int chcr_ccm_common_setkey(struct crypto_aead *aead, ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; } else { - crypto_tfm_set_flags((struct crypto_tfm *)aead, - CRYPTO_TFM_RES_BAD_KEY_LEN); aeadctx->enckey_len = 0; return -EINVAL; } @@ -3325,9 +3295,6 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead, crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & - CRYPTO_TFM_RES_MASK); if (error) return error; return chcr_ccm_common_setkey(aead, key, keylen); @@ -3340,8 +3307,6 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, int error; if (keylen < 3) { - crypto_tfm_set_flags((struct crypto_tfm *)aead, - CRYPTO_TFM_RES_BAD_KEY_LEN); aeadctx->enckey_len = 0; return -EINVAL; } @@ -3349,9 +3314,6 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & - CRYPTO_TFM_RES_MASK); if (error) return error; keylen -= 3; @@ -3364,18 +3326,15 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, { struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx); - struct crypto_cipher *cipher; unsigned int ck_size; int ret = 0, key_ctx_size = 0; + struct crypto_aes_ctx aes; aeadctx->enckey_len = 0; crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & - CRYPTO_TFM_RES_MASK); if (ret) goto out; @@ -3391,8 +3350,6 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, } else if (keylen == AES_KEYSIZE_256) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; } else { - crypto_tfm_set_flags((struct crypto_tfm *)aead, - CRYPTO_TFM_RES_BAD_KEY_LEN); pr_err("GCM: Invalid key length %d\n", keylen); ret = -EINVAL; goto out; @@ -3409,23 +3366,15 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, /* Calculate the H = CIPH(K, 0 repeated 16 times). * It will go in key context */ - cipher = crypto_alloc_cipher("aes-generic", 0, 0); - if (IS_ERR(cipher)) { - aeadctx->enckey_len = 0; - ret = -ENOMEM; - goto out; - } - - ret = crypto_cipher_setkey(cipher, key, keylen); + ret = aes_expandkey(&aes, key, keylen); if (ret) { aeadctx->enckey_len = 0; - goto out1; + goto out; } memset(gctx->ghash_h, 0, AEAD_H_SIZE); - crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h); + aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h); + memzero_explicit(&aes, sizeof(aes)); -out1: - crypto_free_cipher(cipher); out: return ret; } @@ -3451,16 +3400,11 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc) & CRYPTO_TFM_REQ_MASK); err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher) - & CRYPTO_TFM_RES_MASK); if (err) goto out; - if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) { - crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto out; - } if (get_alg_config(¶m, max_authsize)) { pr_err("chcr : Unsupported digest size\n"); @@ -3581,16 +3525,12 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc) & CRYPTO_TFM_REQ_MASK); err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher) - & CRYPTO_TFM_RES_MASK); if (err) goto out; - if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) { - crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto out; - } + subtype = get_aead_subtype(authenc); if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { @@ -3732,82 +3672,76 @@ static int chcr_aead_decrypt(struct aead_request *req) static struct chcr_alg_template driver_algs[] = { /* AES-CBC */ { - .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC, + .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC, .is_registered = 0, - .alg.crypto = { - .cra_name = "cbc(aes)", - .cra_driver_name = "cbc-aes-chcr", - .cra_blocksize = AES_BLOCK_SIZE, - .cra_init = chcr_cra_init, - .cra_exit = chcr_cra_exit, - .cra_u.ablkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = chcr_aes_cbc_setkey, - .encrypt = chcr_aes_encrypt, - .decrypt = chcr_aes_decrypt, + .alg.skcipher = { + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-aes-chcr", + .base.cra_blocksize = AES_BLOCK_SIZE, + + .init = chcr_init_tfm, + .exit = chcr_exit_tfm, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = chcr_aes_cbc_setkey, + .encrypt = chcr_aes_encrypt, + .decrypt = chcr_aes_decrypt, } - } }, { - .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS, + .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS, .is_registered = 0, - .alg.crypto = { - .cra_name = "xts(aes)", - .cra_driver_name = "xts-aes-chcr", - .cra_blocksize = AES_BLOCK_SIZE, - .cra_init = chcr_cra_init, - .cra_exit = NULL, - .cra_u .ablkcipher = { - .min_keysize = 2 * AES_MIN_KEY_SIZE, - .max_keysize = 2 * AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = chcr_aes_xts_setkey, - .encrypt = chcr_aes_encrypt, - .decrypt = chcr_aes_decrypt, - } + .alg.skcipher = { + .base.cra_name = "xts(aes)", + .base.cra_driver_name = "xts-aes-chcr", + .base.cra_blocksize = AES_BLOCK_SIZE, + + .init = chcr_init_tfm, + .exit = chcr_exit_tfm, + .min_keysize = 2 * AES_MIN_KEY_SIZE, + .max_keysize = 2 * AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = chcr_aes_xts_setkey, + .encrypt = chcr_aes_encrypt, + .decrypt = chcr_aes_decrypt, } }, { - .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR, + .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR, .is_registered = 0, - .alg.crypto = { - .cra_name = "ctr(aes)", - .cra_driver_name = "ctr-aes-chcr", - .cra_blocksize = 1, - .cra_init = chcr_cra_init, - .cra_exit = chcr_cra_exit, - .cra_u.ablkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = chcr_aes_ctr_setkey, - .encrypt = chcr_aes_encrypt, - .decrypt = chcr_aes_decrypt, - } + .alg.skcipher = { + .base.cra_name = "ctr(aes)", + .base.cra_driver_name = "ctr-aes-chcr", + .base.cra_blocksize = 1, + + .init = chcr_init_tfm, + .exit = chcr_exit_tfm, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = chcr_aes_ctr_setkey, + .encrypt = chcr_aes_encrypt, + .decrypt = chcr_aes_decrypt, } }, { - .type = CRYPTO_ALG_TYPE_ABLKCIPHER | + .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR_RFC3686, .is_registered = 0, - .alg.crypto = { - .cra_name = "rfc3686(ctr(aes))", - .cra_driver_name = "rfc3686-ctr-aes-chcr", - .cra_blocksize = 1, - .cra_init = chcr_rfc3686_init, - .cra_exit = chcr_cra_exit, - .cra_u.ablkcipher = { - .min_keysize = AES_MIN_KEY_SIZE + - CTR_RFC3686_NONCE_SIZE, - .max_keysize = AES_MAX_KEY_SIZE + - CTR_RFC3686_NONCE_SIZE, - .ivsize = CTR_RFC3686_IV_SIZE, - .setkey = chcr_aes_rfc3686_setkey, - .encrypt = chcr_aes_encrypt, - .decrypt = chcr_aes_decrypt, - } + .alg.skcipher = { + .base.cra_name = "rfc3686(ctr(aes))", + .base.cra_driver_name = "rfc3686-ctr-aes-chcr", + .base.cra_blocksize = 1, + + .init = chcr_rfc3686_init, + .exit = chcr_exit_tfm, + .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .ivsize = CTR_RFC3686_IV_SIZE, + .setkey = chcr_aes_rfc3686_setkey, + .encrypt = chcr_aes_encrypt, + .decrypt = chcr_aes_decrypt, } }, /* SHA */ @@ -4274,10 +4208,10 @@ static int chcr_unregister_alg(void) for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { - case CRYPTO_ALG_TYPE_ABLKCIPHER: + case CRYPTO_ALG_TYPE_SKCIPHER: if (driver_algs[i].is_registered) - crypto_unregister_alg( - &driver_algs[i].alg.crypto); + crypto_unregister_skcipher( + &driver_algs[i].alg.skcipher); break; case CRYPTO_ALG_TYPE_AEAD: if (driver_algs[i].is_registered) @@ -4313,21 +4247,20 @@ static int chcr_register_alg(void) if (driver_algs[i].is_registered) continue; switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { - case CRYPTO_ALG_TYPE_ABLKCIPHER: - driver_algs[i].alg.crypto.cra_priority = + case CRYPTO_ALG_TYPE_SKCIPHER: + driver_algs[i].alg.skcipher.base.cra_priority = CHCR_CRA_PRIORITY; - driver_algs[i].alg.crypto.cra_module = THIS_MODULE; - driver_algs[i].alg.crypto.cra_flags = - CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | + driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE; + driver_algs[i].alg.skcipher.base.cra_flags = + CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK; - driver_algs[i].alg.crypto.cra_ctxsize = + driver_algs[i].alg.skcipher.base.cra_ctxsize = sizeof(struct chcr_context) + sizeof(struct ablk_ctx); - driver_algs[i].alg.crypto.cra_alignmask = 0; - driver_algs[i].alg.crypto.cra_type = - &crypto_ablkcipher_type; - err = crypto_register_alg(&driver_algs[i].alg.crypto); - name = driver_algs[i].alg.crypto.cra_driver_name; + driver_algs[i].alg.skcipher.base.cra_alignmask = 0; + + err = crypto_register_skcipher(&driver_algs[i].alg.skcipher); + name = driver_algs[i].alg.skcipher.base.cra_driver_name; break; case CRYPTO_ALG_TYPE_AEAD: driver_algs[i].alg.aead.base.cra_flags = diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h index ee20dd899e83..f58c2b5c7fc5 100644 --- a/drivers/crypto/chelsio/chcr_algo.h +++ b/drivers/crypto/chelsio/chcr_algo.h @@ -287,7 +287,7 @@ struct hash_wr_param { }; struct cipher_wr_param { - struct ablkcipher_request *req; + struct skcipher_request *req; char *iv; int bytes; unsigned short qid; @@ -333,26 +333,26 @@ struct phys_sge_pairs { }; -static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { +static const u32 chcr_sha1_init[SHA1_DIGEST_SIZE / 4] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, }; -static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { +static const u32 chcr_sha224_init[SHA256_DIGEST_SIZE / 4] = { SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, }; -static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { +static const u32 chcr_sha256_init[SHA256_DIGEST_SIZE / 4] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, }; -static const u64 sha384_init[SHA512_DIGEST_SIZE / 8] = { +static const u64 chcr_sha384_init[SHA512_DIGEST_SIZE / 8] = { SHA384_H0, SHA384_H1, SHA384_H2, SHA384_H3, SHA384_H4, SHA384_H5, SHA384_H6, SHA384_H7, }; -static const u64 sha512_init[SHA512_DIGEST_SIZE / 8] = { +static const u64 chcr_sha512_init[SHA512_DIGEST_SIZE / 8] = { SHA512_H0, SHA512_H1, SHA512_H2, SHA512_H3, SHA512_H4, SHA512_H5, SHA512_H6, SHA512_H7, }; @@ -362,21 +362,21 @@ static inline void copy_hash_init_values(char *key, int digestsize) u8 i; __be32 *dkey = (__be32 *)key; u64 *ldkey = (u64 *)key; - __be64 *sha384 = (__be64 *)sha384_init; - __be64 *sha512 = (__be64 *)sha512_init; + __be64 *sha384 = (__be64 *)chcr_sha384_init; + __be64 *sha512 = (__be64 *)chcr_sha512_init; switch (digestsize) { case SHA1_DIGEST_SIZE: for (i = 0; i < SHA1_INIT_STATE; i++) - dkey[i] = cpu_to_be32(sha1_init[i]); + dkey[i] = cpu_to_be32(chcr_sha1_init[i]); break; case SHA224_DIGEST_SIZE: for (i = 0; i < SHA224_INIT_STATE; i++) - dkey[i] = cpu_to_be32(sha224_init[i]); + dkey[i] = cpu_to_be32(chcr_sha224_init[i]); break; case SHA256_DIGEST_SIZE: for (i = 0; i < SHA256_INIT_STATE; i++) - dkey[i] = cpu_to_be32(sha256_init[i]); + dkey[i] = cpu_to_be32(chcr_sha256_init[i]); break; case SHA384_DIGEST_SIZE: for (i = 0; i < SHA384_INIT_STATE; i++) diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c index 029a7354f541..e937605670ac 100644 --- a/drivers/crypto/chelsio/chcr_core.c +++ b/drivers/crypto/chelsio/chcr_core.c @@ -132,8 +132,6 @@ static void chcr_dev_init(struct uld_ctx *u_ctx) static int chcr_dev_move(struct uld_ctx *u_ctx) { - struct adapter *adap; - mutex_lock(&drv_data.drv_mutex); if (drv_data.last_dev == u_ctx) { if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev)) @@ -146,8 +144,6 @@ static int chcr_dev_move(struct uld_ctx *u_ctx) list_move(&u_ctx->entry, &drv_data.inact_dev); if (list_empty(&drv_data.act_dev)) drv_data.last_dev = NULL; - adap = padap(&u_ctx->dev); - memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); atomic_dec(&drv_data.dev_count); mutex_unlock(&drv_data.drv_mutex); @@ -299,17 +295,21 @@ static int __init chcr_crypto_init(void) static void __exit chcr_crypto_exit(void) { struct uld_ctx *u_ctx, *tmp; + struct adapter *adap; stop_crypto(); - cxgb4_unregister_uld(CXGB4_ULD_CRYPTO); /* Remove all devices from list */ mutex_lock(&drv_data.drv_mutex); list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) { + adap = padap(&u_ctx->dev); + memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); list_del(&u_ctx->entry); kfree(u_ctx); } list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) { + adap = padap(&u_ctx->dev); + memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); list_del(&u_ctx->entry); kfree(u_ctx); } diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index 655606f2e4d0..6db2df8c8a05 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h @@ -160,9 +160,9 @@ static inline struct chcr_context *a_ctx(struct crypto_aead *tfm) return crypto_aead_ctx(tfm); } -static inline struct chcr_context *c_ctx(struct crypto_ablkcipher *tfm) +static inline struct chcr_context *c_ctx(struct crypto_skcipher *tfm) { - return crypto_ablkcipher_ctx(tfm); + return crypto_skcipher_ctx(tfm); } static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm) @@ -172,7 +172,6 @@ static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm) struct ablk_ctx { struct crypto_sync_skcipher *sw_cipher; - struct crypto_cipher *aes_generic; __be32 key_ctx_hdr; unsigned int enckey_len; unsigned char ciph_mode; @@ -286,7 +285,7 @@ struct chcr_ahash_req_ctx { u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128]; }; -struct chcr_blkcipher_req_ctx { +struct chcr_skcipher_req_ctx { struct sk_buff *skb; struct scatterlist *dstsg; unsigned int processed; @@ -303,7 +302,7 @@ struct chcr_alg_template { u32 type; u32 is_registered; union { - struct crypto_alg crypto; + struct skcipher_alg skcipher; struct ahash_alg hash; struct aead_alg aead; } alg; @@ -322,12 +321,12 @@ void chcr_add_aead_dst_ent(struct aead_request *req, struct cpl_rx_phys_dsgl *phys_cpl, unsigned short qid); void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx); -void chcr_add_cipher_src_ent(struct ablkcipher_request *req, +void chcr_add_cipher_src_ent(struct skcipher_request *req, void *ulptx, struct cipher_wr_param *wrparam); -int chcr_cipher_dma_map(struct device *dev, struct ablkcipher_request *req); -void chcr_cipher_dma_unmap(struct device *dev, struct ablkcipher_request *req); -void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, +int chcr_cipher_dma_map(struct device *dev, struct skcipher_request *req); +void chcr_cipher_dma_unmap(struct device *dev, struct skcipher_request *req); +void chcr_add_cipher_dst_ent(struct skcipher_request *req, struct cpl_rx_phys_dsgl *phys_cpl, struct cipher_wr_param *wrparam, unsigned short qid); diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c index f429aae72542..9da0f93a330b 100644 --- a/drivers/crypto/chelsio/chcr_ipsec.c +++ b/drivers/crypto/chelsio/chcr_ipsec.c @@ -132,11 +132,11 @@ static inline int chcr_ipsec_setauthsize(struct xfrm_state *x, static inline int chcr_ipsec_setkey(struct xfrm_state *x, struct ipsec_sa_entry *sa_entry) { - struct crypto_cipher *cipher; int keylen = (x->aead->alg_key_len + 7) / 8; unsigned char *key = x->aead->alg_key; int ck_size, key_ctx_size = 0; unsigned char ghash_h[AEAD_H_SIZE]; + struct crypto_aes_ctx aes; int ret = 0; if (keylen > 3) { @@ -170,26 +170,19 @@ static inline int chcr_ipsec_setkey(struct xfrm_state *x, /* Calculate the H = CIPH(K, 0 repeated 16 times). * It will go in key context */ - cipher = crypto_alloc_cipher("aes-generic", 0, 0); - if (IS_ERR(cipher)) { - sa_entry->enckey_len = 0; - ret = -ENOMEM; - goto out; - } - - ret = crypto_cipher_setkey(cipher, key, keylen); + ret = aes_expandkey(&aes, key, keylen); if (ret) { sa_entry->enckey_len = 0; - goto out1; + goto out; } memset(ghash_h, 0, AEAD_H_SIZE); - crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h); + aes_encrypt(&aes, ghash_h, ghash_h); + memzero_explicit(&aes, sizeof(aes)); + memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) * 16), ghash_h, AEAD_H_SIZE); sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) + AEAD_H_SIZE; -out1: - crypto_free_cipher(cipher); out: return ret; } @@ -680,16 +673,16 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n) int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev) { struct xfrm_state *x = xfrm_input_state(skb); + unsigned int last_desc, ndesc, flits = 0; struct ipsec_sa_entry *sa_entry; u64 *pos, *end, *before, *sgl; + struct tx_sw_desc *sgl_sdesc; int qidx, left, credits; - unsigned int flits = 0, ndesc; - struct adapter *adap; + bool immediate = false; struct sge_eth_txq *q; + struct adapter *adap; struct port_info *pi; - dma_addr_t addr[MAX_SKB_FRAGS + 1]; struct sec_path *sp; - bool immediate = false; if (!x->xso.offload_handle) return NETDEV_TX_BUSY; @@ -722,8 +715,14 @@ out_free: dev_kfree_skb_any(skb); return NETDEV_TX_BUSY; } + last_desc = q->q.pidx + ndesc - 1; + if (last_desc >= q->q.size) + last_desc -= q->q.size; + sgl_sdesc = &q->q.sdesc[last_desc]; + if (!immediate && - unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) { + unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) { + memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); q->mapping_err++; goto out_free; } @@ -749,17 +748,10 @@ out_free: dev_kfree_skb_any(skb); cxgb4_inline_tx_skb(skb, &q->q, sgl); dev_consume_skb_any(skb); } else { - int last_desc; - cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, - 0, addr); + 0, sgl_sdesc->addr); skb_orphan(skb); - - last_desc = q->q.pidx + ndesc - 1; - if (last_desc >= q->q.size) - last_desc -= q->q.size; - q->q.sdesc[last_desc].skb = skb; - q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl; + sgl_sdesc->skb = skb; } txq_advance(&q->q, ndesc); diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h index 025c831d0899..459442704eb1 100644 --- a/drivers/crypto/chelsio/chtls/chtls.h +++ b/drivers/crypto/chelsio/chtls/chtls.h @@ -21,6 +21,7 @@ #include <crypto/internal/hash.h> #include <linux/tls.h> #include <net/tls.h> +#include <net/tls_toe.h> #include "t4fw_api.h" #include "t4_msg.h" @@ -118,7 +119,7 @@ struct tls_scmd { }; struct chtls_dev { - struct tls_device tlsdev; + struct tls_toe_device tlsdev; struct list_head list; struct cxgb4_lld_info *lldi; struct pci_dev *pdev; @@ -178,7 +179,10 @@ struct chtls_hws { u32 copied_seq; u64 tx_seq_no; struct tls_scmd scmd; - struct tls12_crypto_info_aes_gcm_128 crypto_info; + union { + struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; + struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; + } crypto_info; }; struct chtls_sock { @@ -362,7 +366,7 @@ enum { #define TCP_PAGE(sk) (sk->sk_frag.page) #define TCP_OFF(sk) (sk->sk_frag.offset) -static inline struct chtls_dev *to_chtls_dev(struct tls_device *tlsdev) +static inline struct chtls_dev *to_chtls_dev(struct tls_toe_device *tlsdev) { return container_of(tlsdev, struct chtls_dev, tlsdev); } @@ -481,7 +485,7 @@ int send_tx_flowc_wr(struct sock *sk, int compl, void chtls_tcp_push(struct sock *sk, int flags); int chtls_push_frames(struct chtls_sock *csk, int comp); int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val); -int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode); +int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode, int cipher_type); void skb_entail(struct sock *sk, struct sk_buff *skb, int flags); unsigned int keyid_to_addr(int start_addr, int keyid); void free_tls_keyid(struct sock *sk); diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c index 774d991d7cca..9b2745ad9e38 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c @@ -727,6 +727,14 @@ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb) return 0; } +static void chtls_purge_wr_queue(struct sock *sk) +{ + struct sk_buff *skb; + + while ((skb = dequeue_wr(sk)) != NULL) + kfree_skb(skb); +} + static void chtls_release_resources(struct sock *sk) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); @@ -741,6 +749,11 @@ static void chtls_release_resources(struct sock *sk) kfree_skb(csk->txdata_skb_cache); csk->txdata_skb_cache = NULL; + if (csk->wr_credits != csk->wr_max_credits) { + chtls_purge_wr_queue(sk); + chtls_reset_wr_list(csk); + } + if (csk->l2t_entry) { cxgb4_l2t_release(csk->l2t_entry); csk->l2t_entry = NULL; @@ -1273,7 +1286,7 @@ static int chtls_pass_accept_req(struct chtls_dev *cdev, struct sk_buff *skb) ctx = (struct listen_ctx *)data; lsk = ctx->lsk; - if (unlikely(tid >= cdev->tids->ntids)) { + if (unlikely(tid_out_of_range(cdev->tids, tid))) { pr_info("passive open TID %u too large\n", tid); return 1; } @@ -1297,7 +1310,7 @@ static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt) tp->write_seq = snd_isn; tp->snd_nxt = snd_isn; tp->snd_una = snd_isn; - inet_sk(sk)->inet_id = tp->write_seq ^ jiffies; + inet_sk(sk)->inet_id = prandom_u32(); assign_rxopt(sk, opt); if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10)) @@ -1735,6 +1748,7 @@ static void chtls_peer_close(struct sock *sk, struct sk_buff *skb) else sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); } + kfree_skb(skb); } static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb) @@ -1815,6 +1829,20 @@ static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb) kfree_skb(skb); } +/* + * Add an skb to the deferred skb queue for processing from process context. + */ +static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev, + defer_handler_t handler) +{ + DEFERRED_SKB_CB(skb)->handler = handler; + spin_lock_bh(&cdev->deferq.lock); + __skb_queue_tail(&cdev->deferq, skb); + if (skb_queue_len(&cdev->deferq) == 1) + schedule_work(&cdev->deferq_task); + spin_unlock_bh(&cdev->deferq.lock); +} + static void send_abort_rpl(struct sock *sk, struct sk_buff *skb, struct chtls_dev *cdev, int status, int queue) { @@ -1829,7 +1857,7 @@ static void send_abort_rpl(struct sock *sk, struct sk_buff *skb, if (!reply_skb) { req->status = (queue << 1); - send_defer_abort_rpl(cdev, skb); + t4_defer_reply(skb, cdev, send_defer_abort_rpl); return; } @@ -1848,20 +1876,6 @@ static void send_abort_rpl(struct sock *sk, struct sk_buff *skb, cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb); } -/* - * Add an skb to the deferred skb queue for processing from process context. - */ -static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev, - defer_handler_t handler) -{ - DEFERRED_SKB_CB(skb)->handler = handler; - spin_lock_bh(&cdev->deferq.lock); - __skb_queue_tail(&cdev->deferq, skb); - if (skb_queue_len(&cdev->deferq) == 1) - schedule_work(&cdev->deferq_task); - spin_unlock_bh(&cdev->deferq.lock); -} - static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb, struct chtls_dev *cdev, int status, int queue) @@ -2062,19 +2076,6 @@ rel_skb: return 0; } -static struct sk_buff *dequeue_wr(struct sock *sk) -{ - struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); - struct sk_buff *skb = csk->wr_skb_head; - - if (likely(skb)) { - /* Don't bother clearing the tail */ - csk->wr_skb_head = WR_SKB_CB(skb)->next_wr; - WR_SKB_CB(skb)->next_wr = NULL; - } - return skb; -} - static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb) { struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR; diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/crypto/chelsio/chtls/chtls_cm.h index 129d7ac649a9..3fac0c74a41f 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.h +++ b/drivers/crypto/chelsio/chtls/chtls_cm.h @@ -185,6 +185,12 @@ static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb) kfree_skb(skb); } +static inline void chtls_reset_wr_list(struct chtls_sock *csk) +{ + csk->wr_skb_head = NULL; + csk->wr_skb_tail = NULL; +} + static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb) { WR_SKB_CB(skb)->next_wr = NULL; @@ -197,4 +203,19 @@ static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb) WR_SKB_CB(csk->wr_skb_tail)->next_wr = skb; csk->wr_skb_tail = skb; } + +static inline struct sk_buff *dequeue_wr(struct sock *sk) +{ + struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); + struct sk_buff *skb = NULL; + + skb = csk->wr_skb_head; + + if (likely(skb)) { + /* Don't bother clearing the tail */ + csk->wr_skb_head = WR_SKB_CB(skb)->next_wr; + WR_SKB_CB(skb)->next_wr = NULL; + } + return skb; +} #endif diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c index f2424f4c5f78..f1820aca0d33 100644 --- a/drivers/crypto/chelsio/chtls/chtls_hw.c +++ b/drivers/crypto/chelsio/chtls/chtls_hw.c @@ -208,44 +208,64 @@ static void chtls_rxkey_ivauth(struct _key_ctx *kctx) static int chtls_key_info(struct chtls_sock *csk, struct _key_ctx *kctx, - u32 keylen, u32 optname) + u32 keylen, u32 optname, + int cipher_type) { - unsigned char key[AES_KEYSIZE_128]; - struct tls12_crypto_info_aes_gcm_128 *gcm_ctx; + unsigned char key[AES_MAX_KEY_SIZE]; + unsigned char *key_p, *salt; unsigned char ghash_h[AEAD_H_SIZE]; - struct crypto_cipher *cipher; - int ck_size, key_ctx_size; + int ck_size, key_ctx_size, kctx_mackey_size, salt_size; + struct crypto_aes_ctx aes; int ret; - gcm_ctx = (struct tls12_crypto_info_aes_gcm_128 *) - &csk->tlshws.crypto_info; - key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) + AEAD_H_SIZE; - if (keylen == AES_KEYSIZE_128) { - ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; - } else { + /* GCM mode of AES supports 128 and 256 bit encryption, so + * prepare key context base on GCM cipher type + */ + switch (cipher_type) { + case TLS_CIPHER_AES_GCM_128: { + struct tls12_crypto_info_aes_gcm_128 *gcm_ctx_128 = + (struct tls12_crypto_info_aes_gcm_128 *) + &csk->tlshws.crypto_info; + memcpy(key, gcm_ctx_128->key, keylen); + + key_p = gcm_ctx_128->key; + salt = gcm_ctx_128->salt; + ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; + salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE; + kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; + break; + } + case TLS_CIPHER_AES_GCM_256: { + struct tls12_crypto_info_aes_gcm_256 *gcm_ctx_256 = + (struct tls12_crypto_info_aes_gcm_256 *) + &csk->tlshws.crypto_info; + memcpy(key, gcm_ctx_256->key, keylen); + + key_p = gcm_ctx_256->key; + salt = gcm_ctx_256->salt; + ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; + salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE; + kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; + break; + } + default: pr_err("GCM: Invalid key length %d\n", keylen); return -EINVAL; } - memcpy(key, gcm_ctx->key, keylen); /* Calculate the H = CIPH(K, 0 repeated 16 times). * It will go in key context */ - cipher = crypto_alloc_cipher("aes", 0, 0); - if (IS_ERR(cipher)) { - ret = -ENOMEM; - goto out; - } - - ret = crypto_cipher_setkey(cipher, key, keylen); + ret = aes_expandkey(&aes, key, keylen); if (ret) - goto out1; + return ret; memset(ghash_h, 0, AEAD_H_SIZE); - crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h); + aes_encrypt(&aes, ghash_h, ghash_h); + memzero_explicit(&aes, sizeof(aes)); csk->tlshws.keylen = key_ctx_size; /* Copy the Key context */ @@ -254,25 +274,22 @@ static int chtls_key_info(struct chtls_sock *csk, key_ctx = ((key_ctx_size >> 4) << 3); kctx->ctx_hdr = FILL_KEY_CRX_HDR(ck_size, - CHCR_KEYCTX_MAC_KEY_SIZE_128, + kctx_mackey_size, 0, 0, key_ctx); chtls_rxkey_ivauth(kctx); } else { kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size, - CHCR_KEYCTX_MAC_KEY_SIZE_128, + kctx_mackey_size, 0, 0, key_ctx_size >> 4); } - memcpy(kctx->salt, gcm_ctx->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); - memcpy(kctx->key, gcm_ctx->key, keylen); + memcpy(kctx->salt, salt, salt_size); + memcpy(kctx->key, key_p, keylen); memcpy(kctx->key + keylen, ghash_h, AEAD_H_SIZE); /* erase key info from driver */ - memset(gcm_ctx->key, 0, keylen); + memset(key_p, 0, keylen); -out1: - crypto_free_cipher(cipher); -out: - return ret; + return 0; } static void chtls_set_scmd(struct chtls_sock *csk) @@ -296,7 +313,8 @@ static void chtls_set_scmd(struct chtls_sock *csk) SCMD_TLS_FRAG_ENABLE_V(1); } -int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname) +int chtls_setkey(struct chtls_sock *csk, u32 keylen, + u32 optname, int cipher_type) { struct tls_key_req *kwr; struct chtls_dev *cdev; @@ -358,9 +376,10 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname) kwr->sc_imm.cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM)); kwr->sc_imm.len = cpu_to_be32(klen); + lock_sock(sk); /* key info */ kctx = (struct _key_ctx *)(kwr + 1); - ret = chtls_key_info(csk, kctx, keylen, optname); + ret = chtls_key_info(csk, kctx, keylen, optname, cipher_type); if (ret) goto out_notcb; @@ -396,8 +415,10 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname) csk->tlshws.txkey = keyid; } + release_sock(sk); return ret; out_notcb: + release_sock(sk); free_tls_keyid(sk); out_nokey: kfree_skb(skb); diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index 551bca6fef24..5cf9b021220b 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -97,7 +97,7 @@ static struct sk_buff *create_flowc_wr_skb(struct sock *sk, if (!skb) return NULL; - memcpy(__skb_put(skb, flowclen), flowc, flowclen); + __skb_put_data(skb, flowc, flowclen); skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA); return skb; @@ -1078,7 +1078,7 @@ new_buf: bool merge; if (page) - pg_size <<= compound_order(page); + pg_size = page_size(page); if (off < pg_size && skb_can_coalesce(skb, i, page, off)) { merge = 1; @@ -1105,8 +1105,7 @@ new_buf: __GFP_NORETRY, order); if (page) - pg_size <<= - compound_order(page); + pg_size <<= order; } if (!page) { page = alloc_page(gfp); @@ -1134,7 +1133,9 @@ copy: } /* Update the skb. */ if (merge) { - skb_shinfo(skb)->frags[i - 1].size += copy; + skb_frag_size_add( + &skb_shinfo(skb)->frags[i - 1], + copy); } else { skb_fill_page_desc(skb, i, page, off, copy); if (off + copy < pg_size) { @@ -1247,7 +1248,7 @@ new_buf: i = skb_shinfo(skb)->nr_frags; if (skb_can_coalesce(skb, i, page, offset)) { - skb_shinfo(skb)->frags[i - 1].size += copy; + skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); } else if (i < MAX_SKB_FRAGS) { get_page(page); skb_fill_page_desc(skb, i, page, offset, copy); @@ -1436,7 +1437,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, csk->wr_max_credits)) sk->sk_write_space(sk); - if (copied >= target && !sk->sk_backlog.tail) + if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break; if (copied) { @@ -1469,7 +1470,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, break; } } - if (sk->sk_backlog.tail) { + if (READ_ONCE(sk->sk_backlog.tail)) { release_sock(sk); lock_sock(sk); chtls_cleanup_rbuf(sk, copied); @@ -1614,7 +1615,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg, break; } - if (sk->sk_backlog.tail) { + if (READ_ONCE(sk->sk_backlog.tail)) { /* Do not sleep, just process backlog. */ release_sock(sk); lock_sock(sk); @@ -1701,7 +1702,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, return peekmsg(sk, msg, len, nonblock, flags); if (sk_can_busy_loop(sk) && - skb_queue_empty(&sk->sk_receive_queue) && + skb_queue_empty_lockless(&sk->sk_receive_queue) && sk->sk_state == TCP_ESTABLISHED) sk_busy_loop(sk, nonblock); @@ -1742,7 +1743,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, csk->wr_max_credits)) sk->sk_write_space(sk); - if (copied >= target && !sk->sk_backlog.tail) + if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break; if (copied) { @@ -1773,7 +1774,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, } } - if (sk->sk_backlog.tail) { + if (READ_ONCE(sk->sk_backlog.tail)) { release_sock(sk); lock_sock(sk); chtls_cleanup_rbuf(sk, copied); @@ -1840,8 +1841,7 @@ skip_copy: tp->urg_data = 0; if (avail + offset >= skb->len) { - if (likely(skb)) - chtls_free_skb(sk, skb); + chtls_free_skb(sk, skb); buffers_freed++; if (copied >= target && diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 635bb4b447fb..a038de90b2ea 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -84,7 +84,6 @@ static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb) static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk) { struct chtls_listen *clisten; - int err; if (sk->sk_protocol != IPPROTO_TCP) return -EPROTONOSUPPORT; @@ -100,10 +99,10 @@ static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk) clisten->cdev = cdev; clisten->sk = sk; mutex_lock(¬ify_mutex); - err = raw_notifier_call_chain(&listen_notify_list, + raw_notifier_call_chain(&listen_notify_list, CHTLS_LISTEN_START, clisten); mutex_unlock(¬ify_mutex); - return err; + return 0; } static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk) @@ -124,7 +123,7 @@ static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk) mutex_unlock(¬ify_mutex); } -static int chtls_inline_feature(struct tls_device *dev) +static int chtls_inline_feature(struct tls_toe_device *dev) { struct net_device *netdev; struct chtls_dev *cdev; @@ -140,7 +139,7 @@ static int chtls_inline_feature(struct tls_device *dev) return 0; } -static int chtls_create_hash(struct tls_device *dev, struct sock *sk) +static int chtls_create_hash(struct tls_toe_device *dev, struct sock *sk) { struct chtls_dev *cdev = to_chtls_dev(dev); @@ -149,7 +148,7 @@ static int chtls_create_hash(struct tls_device *dev, struct sock *sk) return 0; } -static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk) +static void chtls_destroy_hash(struct tls_toe_device *dev, struct sock *sk) { struct chtls_dev *cdev = to_chtls_dev(dev); @@ -161,7 +160,7 @@ static void chtls_free_uld(struct chtls_dev *cdev) { int i; - tls_unregister_device(&cdev->tlsdev); + tls_toe_unregister_device(&cdev->tlsdev); kvfree(cdev->kmap.addr); idr_destroy(&cdev->hwtid_idr); for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) @@ -173,27 +172,27 @@ static void chtls_free_uld(struct chtls_dev *cdev) static inline void chtls_dev_release(struct kref *kref) { + struct tls_toe_device *dev; struct chtls_dev *cdev; - struct tls_device *dev; - dev = container_of(kref, struct tls_device, kref); + dev = container_of(kref, struct tls_toe_device, kref); cdev = to_chtls_dev(dev); chtls_free_uld(cdev); } static void chtls_register_dev(struct chtls_dev *cdev) { - struct tls_device *tlsdev = &cdev->tlsdev; + struct tls_toe_device *tlsdev = &cdev->tlsdev; - strlcpy(tlsdev->name, "chtls", TLS_DEVICE_NAME_MAX); + strlcpy(tlsdev->name, "chtls", TLS_TOE_DEVICE_NAME_MAX); strlcat(tlsdev->name, cdev->lldi->ports[0]->name, - TLS_DEVICE_NAME_MAX); + TLS_TOE_DEVICE_NAME_MAX); tlsdev->feature = chtls_inline_feature; tlsdev->hash = chtls_create_hash; tlsdev->unhash = chtls_destroy_hash; tlsdev->release = chtls_dev_release; kref_init(&tlsdev->kref); - tls_register_device(tlsdev); + tls_toe_register_device(tlsdev); cdev->cdev_state = CHTLS_CDEV_STATE_UP; } @@ -474,7 +473,8 @@ static int chtls_getsockopt(struct sock *sk, int level, int optname, struct tls_context *ctx = tls_get_ctx(sk); if (level != SOL_TLS) - return ctx->getsockopt(sk, level, optname, optval, optlen); + return ctx->sk_proto->getsockopt(sk, level, + optname, optval, optlen); return do_chtls_getsockopt(sk, optval, optlen); } @@ -485,6 +485,7 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, struct tls_crypto_info *crypto_info, tmp_crypto_info; struct chtls_sock *csk; int keylen; + int cipher_type; int rc = 0; csk = rcu_dereference_sk_user_data(sk); @@ -508,6 +509,9 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info; + /* GCM mode of AES supports 128 and 256 bit encryption, so + * copy keys from user based on GCM cipher type. + */ switch (tmp_crypto_info.cipher_type) { case TLS_CIPHER_AES_GCM_128: { /* Obtain version and type from previous copy */ @@ -524,13 +528,30 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, } keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE; - rc = chtls_setkey(csk, keylen, optname); + cipher_type = TLS_CIPHER_AES_GCM_128; + break; + } + case TLS_CIPHER_AES_GCM_256: { + crypto_info[0] = tmp_crypto_info; + rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info), + optval + sizeof(*crypto_info), + sizeof(struct tls12_crypto_info_aes_gcm_256) + - sizeof(*crypto_info)); + + if (rc) { + rc = -EFAULT; + goto out; + } + + keylen = TLS_CIPHER_AES_GCM_256_KEY_SIZE; + cipher_type = TLS_CIPHER_AES_GCM_256; break; } default: rc = -EINVAL; goto out; } + rc = chtls_setkey(csk, keylen, optname, cipher_type); out: return rc; } @@ -541,7 +562,8 @@ static int chtls_setsockopt(struct sock *sk, int level, int optname, struct tls_context *ctx = tls_get_ctx(sk); if (level != SOL_TLS) - return ctx->setsockopt(sk, level, optname, optval, optlen); + return ctx->sk_proto->setsockopt(sk, level, + optname, optval, optlen); return do_chtls_setsockopt(sk, optname, optval, optlen); } |