diff options
author | Tadeusz Struk <tadeusz.struk@intel.com> | 2015-07-15 15:28:38 -0700 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2015-07-17 21:20:18 +0800 |
commit | a990532023b903b10cf14736241cdd138e4bc92c (patch) | |
tree | 5be733fe663603053c8b0ca8829f31326b6248a7 /drivers/crypto/qat | |
parent | 28cfaf67e5c1f5b6b0d549eea398f8401a40e566 (diff) | |
download | blackbird-obmc-linux-a990532023b903b10cf14736241cdd138e4bc92c.tar.gz blackbird-obmc-linux-a990532023b903b10cf14736241cdd138e4bc92c.zip |
crypto: qat - Add support for RSA algorithm
Add RSA support to QAT driver.
Removed unused RNG rings.
Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/qat')
-rw-r--r-- | drivers/crypto/qat/Kconfig | 2 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_common/.gitignore | 1 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_common/Makefile | 5 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_common/adf_common_drv.h | 10 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_common/adf_init.c | 4 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_common/icp_qat_fw.h | 2 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_common/icp_qat_fw_pke.h | 112 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_common/qat_algs.c | 5 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_common/qat_asym_algs.c | 639 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_common/qat_crypto.c | 19 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_common/qat_crypto.h | 2 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_common/qat_rsakey.asn1 | 5 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_dh895xcc/adf_drv.c | 12 |
13 files changed, 779 insertions, 39 deletions
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig index 6fdb9e8b22a7..d8c3d595d98b 100644 --- a/drivers/crypto/qat/Kconfig +++ b/drivers/crypto/qat/Kconfig @@ -3,11 +3,13 @@ config CRYPTO_DEV_QAT select CRYPTO_AEAD select CRYPTO_AUTHENC select CRYPTO_BLKCIPHER + select CRYPTO_AKCIPHER select CRYPTO_HMAC select CRYPTO_SHA1 select CRYPTO_SHA256 select CRYPTO_SHA512 select FW_LOADER + select ASN1 config CRYPTO_DEV_QAT_DH895xCC tristate "Support for Intel(R) DH895xCC" diff --git a/drivers/crypto/qat/qat_common/.gitignore b/drivers/crypto/qat/qat_common/.gitignore new file mode 100644 index 000000000000..ee328374dba8 --- /dev/null +++ b/drivers/crypto/qat/qat_common/.gitignore @@ -0,0 +1 @@ +*-asn1.[ch] diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile index e0424dc382fe..184605f76bea 100644 --- a/drivers/crypto/qat/qat_common/Makefile +++ b/drivers/crypto/qat/qat_common/Makefile @@ -1,3 +1,6 @@ +$(obj)/qat_rsakey-asn1.o: $(obj)/qat_rsakey-asn1.c $(obj)/qat_rsakey-asn1.h +clean-files += qat_rsakey-asn1.c qat_rsakey-asn1.h + obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o intel_qat-objs := adf_cfg.o \ adf_ctl_drv.o \ @@ -8,6 +11,8 @@ intel_qat-objs := adf_cfg.o \ adf_transport.o \ qat_crypto.o \ qat_algs.o \ + qat_rsakey-asn1.o \ + qat_asym_algs.o \ qat_uclo.o \ qat_hal.o diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 1e82ad4bf43d..3c33feefee67 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -55,7 +55,7 @@ #define ADF_MAJOR_VERSION 0 #define ADF_MINOR_VERSION 1 -#define ADF_BUILD_VERSION 3 +#define ADF_BUILD_VERSION 4 #define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \ __stringify(ADF_MINOR_VERSION) "." \ __stringify(ADF_BUILD_VERSION) @@ -94,6 +94,11 @@ struct service_hndl { int admin; }; +static inline int get_current_node(void) +{ + return cpu_data(current_thread_info()->cpu).phys_proc_id; +} + int adf_service_register(struct service_hndl *service); int adf_service_unregister(struct service_hndl *service); @@ -141,10 +146,13 @@ int qat_crypto_unregister(void); struct qat_crypto_instance *qat_crypto_get_instance_node(int node); void qat_crypto_put_instance(struct qat_crypto_instance *inst); void qat_alg_callback(void *resp); +void qat_alg_asym_callback(void *resp); int qat_algs_init(void); void qat_algs_exit(void); int qat_algs_register(void); int qat_algs_unregister(void); +int qat_asym_algs_register(void); +void qat_asym_algs_unregister(void); int qat_hal_init(struct adf_accel_dev *accel_dev); void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle); diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c index 245f43237a2d..7f9dffad7931 100644 --- a/drivers/crypto/qat/qat_common/adf_init.c +++ b/drivers/crypto/qat/qat_common/adf_init.c @@ -257,7 +257,7 @@ int adf_dev_start(struct adf_accel_dev *accel_dev) clear_bit(ADF_STATUS_STARTING, &accel_dev->status); set_bit(ADF_STATUS_STARTED, &accel_dev->status); - if (qat_algs_register()) { + if (qat_algs_register() || qat_asym_algs_register()) { dev_err(&GET_DEV(accel_dev), "Failed to register crypto algs\n"); set_bit(ADF_STATUS_STARTING, &accel_dev->status); @@ -296,6 +296,8 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev) dev_err(&GET_DEV(accel_dev), "Failed to unregister crypto algs\n"); + qat_asym_algs_unregister(); + list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->admin) diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h index f1e30e24a419..46747f01b1d1 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw.h @@ -249,6 +249,8 @@ struct icp_qat_fw_comn_resp { #define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7 #define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1 +#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6 +#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1 #define QAT_COMN_RESP_CMP_STATUS_BITPOS 5 #define QAT_COMN_RESP_CMP_STATUS_MASK 0x1 #define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4 diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h new file mode 100644 index 000000000000..0d7a9b51ce9f --- /dev/null +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h @@ -0,0 +1,112 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef _ICP_QAT_FW_PKE_ +#define _ICP_QAT_FW_PKE_ + +#include "icp_qat_fw.h" + +struct icp_qat_fw_req_hdr_pke_cd_pars { + u64 content_desc_addr; + u32 content_desc_resrvd; + u32 func_id; +}; + +struct icp_qat_fw_req_pke_mid { + u64 opaque; + u64 src_data_addr; + u64 dest_data_addr; +}; + +struct icp_qat_fw_req_pke_hdr { + u8 resrvd1; + u8 resrvd2; + u8 service_type; + u8 hdr_flags; + u16 comn_req_flags; + u16 resrvd4; + struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars; +}; + +struct icp_qat_fw_pke_request { + struct icp_qat_fw_req_pke_hdr pke_hdr; + struct icp_qat_fw_req_pke_mid pke_mid; + u8 output_param_count; + u8 input_param_count; + u16 resrvd1; + u32 resrvd2; + u64 next_req_adr; +}; + +struct icp_qat_fw_resp_pke_hdr { + u8 resrvd1; + u8 resrvd2; + u8 response_type; + u8 hdr_flags; + u16 comn_resp_flags; + u16 resrvd4; +}; + +struct icp_qat_fw_pke_resp { + struct icp_qat_fw_resp_pke_hdr pke_resp_hdr; + u64 opaque; + u64 src_data_addr; + u64 dest_data_addr; +}; + +#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS 7 +#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK 0x1 +#define ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(status_word) \ + QAT_FIELD_GET(((status_word >> ICP_QAT_FW_COMN_ONE_BYTE_SHIFT) & \ + ICP_QAT_FW_COMN_SINGLE_BYTE_MASK), \ + QAT_COMN_RESP_PKE_STATUS_BITPOS, \ + QAT_COMN_RESP_PKE_STATUS_MASK) + +#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(hdr_t, val) \ + QAT_FIELD_SET((hdr_t.hdr_flags), (val), \ + ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS, \ + ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK) +#endif diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 067402c7c2a9..31abfd07379a 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -129,11 +129,6 @@ struct qat_alg_ablkcipher_ctx { spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */ }; -static int get_current_node(void) -{ - return cpu_data(current_thread_info()->cpu).phys_proc_id; -} - static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) { switch (qat_hash_alg) { diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c new file mode 100644 index 000000000000..13a76a0325ed --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c @@ -0,0 +1,639 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include <linux/module.h> +#include <crypto/internal/rsa.h> +#include <crypto/internal/akcipher.h> +#include <crypto/akcipher.h> +#include <linux/dma-mapping.h> +#include <linux/fips.h> +#include "qat_rsakey-asn1.h" +#include "icp_qat_fw_pke.h" +#include "adf_accel_devices.h" +#include "adf_transport.h" +#include "adf_common_drv.h" +#include "qat_crypto.h" + +struct qat_rsa_input_params { + union { + struct { + dma_addr_t m; + dma_addr_t e; + dma_addr_t n; + } enc; + struct { + dma_addr_t c; + dma_addr_t d; + dma_addr_t n; + } dec; + u64 in_tab[8]; + }; +} __packed __aligned(64); + +struct qat_rsa_output_params { + union { + struct { + dma_addr_t c; + } enc; + struct { + dma_addr_t m; + } dec; + u64 out_tab[8]; + }; +} __packed __aligned(64); + +struct qat_rsa_ctx { + char *n; + char *e; + char *d; + dma_addr_t dma_n; + dma_addr_t dma_e; + dma_addr_t dma_d; + unsigned int key_sz; + struct qat_crypto_instance *inst; +} __packed __aligned(64); + +struct qat_rsa_request { + struct qat_rsa_input_params in; + struct qat_rsa_output_params out; + dma_addr_t phy_in; + dma_addr_t phy_out; + char *src_align; + struct icp_qat_fw_pke_request req; + struct qat_rsa_ctx *ctx; + int err; +} __aligned(64); + +static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp) +{ + struct akcipher_request *areq = (void *)(__force long)resp->opaque; + struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64); + struct device *dev = &GET_DEV(req->ctx->inst->accel_dev); + int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET( + resp->pke_resp_hdr.comn_resp_flags); + char *ptr = areq->dst; + + err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; + + if (req->src_align) + dma_free_coherent(dev, req->ctx->key_sz, req->src_align, + req->in.enc.m); + else + dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz, + DMA_TO_DEVICE); + + dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz, + DMA_FROM_DEVICE); + dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params), + DMA_TO_DEVICE); + dma_unmap_single(dev, req->phy_out, + sizeof(struct qat_rsa_output_params), + DMA_TO_DEVICE); + + areq->dst_len = req->ctx->key_sz; + /* Need to set the corect length of the output */ + while (!(*ptr) && areq->dst_len) { + areq->dst_len--; + ptr++; + } + + if (areq->dst_len != req->ctx->key_sz) + memcpy(areq->dst, ptr, areq->dst_len); + + akcipher_request_complete(areq, err); +} + +void qat_alg_asym_callback(void *_resp) +{ + struct icp_qat_fw_pke_resp *resp = _resp; + + qat_rsa_cb(resp); +} + +#define PKE_RSA_EP_512 0x1c161b21 +#define PKE_RSA_EP_1024 0x35111bf7 +#define PKE_RSA_EP_1536 0x4d111cdc +#define PKE_RSA_EP_2048 0x6e111dba +#define PKE_RSA_EP_3072 0x7d111ea3 +#define PKE_RSA_EP_4096 0xa5101f7e + +static unsigned long qat_rsa_enc_fn_id(unsigned int len) +{ + unsigned int bitslen = len << 3; + + switch (bitslen) { + case 512: + return PKE_RSA_EP_512; + case 1024: + return PKE_RSA_EP_1024; + case 1536: + return PKE_RSA_EP_1536; + case 2048: + return PKE_RSA_EP_2048; + case 3072: + return PKE_RSA_EP_3072; + case 4096: + return PKE_RSA_EP_4096; + default: + return 0; + }; +} + +#define PKE_RSA_DP1_512 0x1c161b3c +#define PKE_RSA_DP1_1024 0x35111c12 +#define PKE_RSA_DP1_1536 0x4d111cf7 +#define PKE_RSA_DP1_2048 0x6e111dda +#define PKE_RSA_DP1_3072 0x7d111ebe +#define PKE_RSA_DP1_4096 0xa5101f98 + +static unsigned long qat_rsa_dec_fn_id(unsigned int len) +{ + unsigned int bitslen = len << 3; + + switch (bitslen) { + case 512: + return PKE_RSA_DP1_512; + case 1024: + return PKE_RSA_DP1_1024; + case 1536: + return PKE_RSA_DP1_1536; + case 2048: + return PKE_RSA_DP1_2048; + case 3072: + return PKE_RSA_DP1_3072; + case 4096: + return PKE_RSA_DP1_4096; + default: + return 0; + }; +} + +static int qat_rsa_enc(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct qat_crypto_instance *inst = ctx->inst; + struct device *dev = &GET_DEV(inst->accel_dev); + struct qat_rsa_request *qat_req = + PTR_ALIGN(akcipher_request_ctx(req), 64); + struct icp_qat_fw_pke_request *msg = &qat_req->req; + int ret, ctr = 0; + + if (unlikely(!ctx->n || !ctx->e)) + return -EINVAL; + + if (req->dst_len < ctx->key_sz) { + req->dst_len = ctx->key_sz; + return -EOVERFLOW; + } + memset(msg, '\0', sizeof(*msg)); + ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, + ICP_QAT_FW_COMN_REQ_FLAG_SET); + msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz); + if (unlikely(!msg->pke_hdr.cd_pars.func_id)) + return -EINVAL; + + qat_req->ctx = ctx; + msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; + msg->pke_hdr.comn_req_flags = + ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, + QAT_COMN_CD_FLD_TYPE_64BIT_ADR); + + qat_req->in.enc.e = ctx->dma_e; + qat_req->in.enc.n = ctx->dma_n; + ret = -ENOMEM; + + /* + * src can be of any size in valid range, but HW expects it to be the + * same as modulo n so in case it is different we need to allocate a + * new buf and copy src data. + * In other case we just need to map the user provided buffer. + */ + if (req->src_len < ctx->key_sz) { + int shift = ctx->key_sz - req->src_len; + + qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, + &qat_req->in.enc.m, + GFP_KERNEL); + if (unlikely(!qat_req->src_align)) + return ret; + + memcpy(qat_req->src_align + shift, req->src, req->src_len); + } else { + qat_req->src_align = NULL; + qat_req->in.enc.m = dma_map_single(dev, req->src, req->src_len, + DMA_TO_DEVICE); + } + qat_req->in.in_tab[3] = 0; + qat_req->out.enc.c = dma_map_single(dev, req->dst, req->dst_len, + DMA_FROM_DEVICE); + qat_req->out.out_tab[1] = 0; + qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m, + sizeof(struct qat_rsa_input_params), + DMA_TO_DEVICE); + qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c, + sizeof(struct qat_rsa_output_params), + DMA_TO_DEVICE); + + if (unlikely((!qat_req->src_align && + dma_mapping_error(dev, qat_req->in.enc.m)) || + dma_mapping_error(dev, qat_req->out.enc.c) || + dma_mapping_error(dev, qat_req->phy_in) || + dma_mapping_error(dev, qat_req->phy_out))) + goto unmap; + + msg->pke_mid.src_data_addr = qat_req->phy_in; + msg->pke_mid.dest_data_addr = qat_req->phy_out; + msg->pke_mid.opaque = (uint64_t)(__force long)req; + msg->input_param_count = 3; + msg->output_param_count = 1; + do { + ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); + } while (ret == -EBUSY && ctr++ < 100); + + if (!ret) + return -EINPROGRESS; +unmap: + if (qat_req->src_align) + dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, + qat_req->in.enc.m); + else + if (!dma_mapping_error(dev, qat_req->in.enc.m)) + dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz, + DMA_TO_DEVICE); + if (!dma_mapping_error(dev, qat_req->out.enc.c)) + dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz, + DMA_FROM_DEVICE); + if (!dma_mapping_error(dev, qat_req->phy_in)) + dma_unmap_single(dev, qat_req->phy_in, + sizeof(struct qat_rsa_input_params), + DMA_TO_DEVICE); + if (!dma_mapping_error(dev, qat_req->phy_out)) + dma_unmap_single(dev, qat_req->phy_out, + sizeof(struct qat_rsa_output_params), + DMA_TO_DEVICE); + return ret; +} + +static int qat_rsa_dec(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct qat_crypto_instance *inst = ctx->inst; + struct device *dev = &GET_DEV(inst->accel_dev); + struct qat_rsa_request *qat_req = + PTR_ALIGN(akcipher_request_ctx(req), 64); + struct icp_qat_fw_pke_request *msg = &qat_req->req; + int ret, ctr = 0; + + if (unlikely(!ctx->n || !ctx->d)) + return -EINVAL; + + if (req->dst_len < ctx->key_sz) { + req->dst_len = ctx->key_sz; + return -EOVERFLOW; + } + memset(msg, '\0', sizeof(*msg)); + ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, + ICP_QAT_FW_COMN_REQ_FLAG_SET); + msg->pke_hdr.cd_pars.func_id = qat_rsa_dec_fn_id(ctx->key_sz); + if (unlikely(!msg->pke_hdr.cd_pars.func_id)) + return -EINVAL; + + qat_req->ctx = ctx; + msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; + msg->pke_hdr.comn_req_flags = + ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, + QAT_COMN_CD_FLD_TYPE_64BIT_ADR); + + qat_req->in.dec.d = ctx->dma_d; + qat_req->in.dec.n = ctx->dma_n; + ret = -ENOMEM; + + /* + * src can be of any size in valid range, but HW expects it to be the + * same as modulo n so in case it is different we need to allocate a + * new buf and copy src data. + * In other case we just need to map the user provided buffer. + */ + if (req->src_len < ctx->key_sz) { + int shift = ctx->key_sz - req->src_len; + + qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, + &qat_req->in.dec.c, + GFP_KERNEL); + if (unlikely(!qat_req->src_align)) + return ret; + + memcpy(qat_req->src_align + shift, req->src, req->src_len); + } else { + qat_req->src_align = NULL; + qat_req->in.dec.c = dma_map_single(dev, req->src, req->src_len, + DMA_TO_DEVICE); + } + qat_req->in.in_tab[3] = 0; + qat_req->out.dec.m = dma_map_single(dev, req->dst, req->dst_len, + DMA_FROM_DEVICE); + qat_req->out.out_tab[1] = 0; + qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c, + sizeof(struct qat_rsa_input_params), + DMA_TO_DEVICE); + qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m, + sizeof(struct qat_rsa_output_params), + DMA_TO_DEVICE); + + if (unlikely((!qat_req->src_align && + dma_mapping_error(dev, qat_req->in.dec.c)) || + dma_mapping_error(dev, qat_req->out.dec.m) || + dma_mapping_error(dev, qat_req->phy_in) || + dma_mapping_error(dev, qat_req->phy_out))) + goto unmap; + + msg->pke_mid.src_data_addr = qat_req->phy_in; + msg->pke_mid.dest_data_addr = qat_req->phy_out; + msg->pke_mid.opaque = (uint64_t)(__force long)req; + msg->input_param_count = 3; + msg->output_param_count = 1; + do { + ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); + } while (ret == -EBUSY && ctr++ < 100); + + if (!ret) + return -EINPROGRESS; +unmap: + if (qat_req->src_align) + dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, + qat_req->in.dec.c); + else + if (!dma_mapping_error(dev, qat_req->in.dec.c)) + dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz, + DMA_TO_DEVICE); + if (!dma_mapping_error(dev, qat_req->out.dec.m)) + dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz, + DMA_FROM_DEVICE); + if (!dma_mapping_error(dev, qat_req->phy_in)) + dma_unmap_single(dev, qat_req->phy_in, + sizeof(struct qat_rsa_input_params), + DMA_TO_DEVICE); + if (!dma_mapping_error(dev, qat_req->phy_out)) + dma_unmap_single(dev, qat_req->phy_out, + sizeof(struct qat_rsa_output_params), + DMA_TO_DEVICE); + return ret; +} + +int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct qat_rsa_ctx *ctx = context; + struct qat_crypto_instance *inst = ctx->inst; + struct device *dev = &GET_DEV(inst->accel_dev); + const char *ptr = value; + int ret; + + while (!*ptr && vlen) { + ptr++; + vlen--; + } + + ctx->key_sz = vlen; + ret = -EINVAL; + /* In FIPS mode only allow key size 2K & 3K */ + if (fips_enabled && (ctx->key_sz != 256 || ctx->key_sz != 384)) { + pr_err("QAT: RSA: key size not allowed in FIPS mode\n"); + goto err; + } + /* invalid key size provided */ + if (!qat_rsa_enc_fn_id(ctx->key_sz)) + goto err; + + ret = -ENOMEM; + ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); + if (!ctx->n) + goto err; + + memcpy(ctx->n, ptr, ctx->key_sz); + return 0; +err: + ctx->key_sz = 0; + ctx->n = NULL; + return ret; +} + +int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct qat_rsa_ctx *ctx = context; + struct qat_crypto_instance *inst = ctx->inst; + struct device *dev = &GET_DEV(inst->accel_dev); + const char *ptr = value; + + while (!*ptr && vlen) { + ptr++; + vlen--; + } + + if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) { + ctx->e = NULL; + return -EINVAL; + } + + ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); + if (!ctx->e) { + ctx->e = NULL; + return -ENOMEM; + } + memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen); + return 0; +} + +int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct qat_rsa_ctx *ctx = context; + struct qat_crypto_instance *inst = ctx->inst; + struct device *dev = &GET_DEV(inst->accel_dev); + const char *ptr = value; + int ret; + + while (!*ptr && vlen) { + ptr++; + vlen--; + } + + ret = -EINVAL; + if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) + goto err; + + /* In FIPS mode only allow key size 2K & 3K */ + if (fips_enabled && (vlen != 256 || vlen != 384)) { + pr_err("QAT: RSA: key size not allowed in FIPS mode\n"); + goto err; + } + + ret = -ENOMEM; + ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); + if (!ctx->n) + goto err; + + memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen); + return 0; +err: + ctx->d = NULL; + return ret; +} + +static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct device *dev = &GET_DEV(ctx->inst->accel_dev); + int ret; + + /* Free the old key if any */ + if (ctx->n) + dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); + if (ctx->e) + dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); + if (ctx->d) { + memset(ctx->d, '\0', ctx->key_sz); + dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); + } + + ctx->n = NULL; + ctx->e = NULL; + ctx->d = NULL; + ret = asn1_ber_decoder(&qat_rsakey_decoder, ctx, key, keylen); + if (ret < 0) + goto free; + + if (!ctx->n || !ctx->e) { + /* invalid key provided */ + ret = -EINVAL; + goto free; + } + + return 0; +free: + if (ctx->d) { + memset(ctx->d, '\0', ctx->key_sz); + dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); + ctx->d = NULL; + } + if (ctx->e) { + dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); + ctx->e = NULL; + } + if (ctx->n) { + dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); + ctx->n = NULL; + ctx->key_sz = 0; + } + return ret; +} + +static int qat_rsa_init_tfm(struct crypto_akcipher *tfm) +{ + struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct qat_crypto_instance *inst = + qat_crypto_get_instance_node(get_current_node()); + + if (!inst) + return -EINVAL; + + ctx->key_sz = 0; + ctx->inst = inst; + return 0; +} + +static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm) +{ + struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct device *dev = &GET_DEV(ctx->inst->accel_dev); + + if (ctx->n) + dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); + if (ctx->e) + dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); + if (ctx->d) { + memset(ctx->d, '\0', ctx->key_sz); + dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); + } + qat_crypto_put_instance(ctx->inst); + ctx->n = NULL; + ctx->d = NULL; + ctx->d = NULL; +} + +static struct akcipher_alg rsa = { + .encrypt = qat_rsa_enc, + .decrypt = qat_rsa_dec, + .sign = qat_rsa_dec, + .verify = qat_rsa_enc, + .setkey = qat_rsa_setkey, + .init = qat_rsa_init_tfm, + .exit = qat_rsa_exit_tfm, + .reqsize = sizeof(struct qat_rsa_request) + 64, + .base = { + .cra_name = "rsa", + .cra_driver_name = "qat-rsa", + .cra_priority = 1000, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct qat_rsa_ctx), + }, +}; + +int qat_asym_algs_register(void) +{ + rsa.base.cra_flags = 0; + return crypto_register_akcipher(&rsa); +} + +void qat_asym_algs_unregister(void) +{ + crypto_unregister_akcipher(&rsa); +} diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c index 3bd705ca5973..e23ce6fe074b 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/qat/qat_common/qat_crypto.c @@ -88,12 +88,6 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev) if (inst->pke_rx) adf_remove_ring(inst->pke_rx); - if (inst->rnd_tx) - adf_remove_ring(inst->rnd_tx); - - if (inst->rnd_rx) - adf_remove_ring(inst->rnd_rx); - list_del(list_ptr); kfree(inst); } @@ -202,11 +196,6 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) msg_size, key, NULL, 0, &inst->sym_tx)) goto err; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i); - if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, - msg_size, key, NULL, 0, &inst->rnd_tx)) - goto err; - msg_size = msg_size >> 1; snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, @@ -220,15 +209,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) &inst->sym_rx)) goto err; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i); - if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, - msg_size, key, qat_alg_callback, 0, - &inst->rnd_rx)) - goto err; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym, - msg_size, key, qat_alg_callback, 0, + msg_size, key, qat_alg_asym_callback, 0, &inst->pke_rx)) goto err; } diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h index d503007b49e6..dc0273fe3620 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.h +++ b/drivers/crypto/qat/qat_common/qat_crypto.h @@ -57,8 +57,6 @@ struct qat_crypto_instance { struct adf_etr_ring_data *sym_rx; struct adf_etr_ring_data *pke_tx; struct adf_etr_ring_data *pke_rx; - struct adf_etr_ring_data *rnd_tx; - struct adf_etr_ring_data *rnd_rx; struct adf_accel_dev *accel_dev; struct list_head list; unsigned long state; diff --git a/drivers/crypto/qat/qat_common/qat_rsakey.asn1 b/drivers/crypto/qat/qat_common/qat_rsakey.asn1 new file mode 100644 index 000000000000..97b0e02b600a --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_rsakey.asn1 @@ -0,0 +1,5 @@ +RsaKey ::= SEQUENCE { + n INTEGER ({ qat_rsa_get_n }), + e INTEGER ({ qat_rsa_get_e }), + d INTEGER ({ qat_rsa_get_d }) +} diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index 1bde45b7a3c5..d241037f8728 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -167,12 +167,6 @@ static int adf_dev_configure(struct adf_accel_dev *accel_dev) key, (void *)&val, ADF_DEC)) goto err; - val = 4; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - val = 8; snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, @@ -185,12 +179,6 @@ static int adf_dev_configure(struct adf_accel_dev *accel_dev) key, (void *)&val, ADF_DEC)) goto err; - val = 12; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - val = ADF_COALESCING_DEF_TIME; snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0", |