summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/qat/qat_common
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/qat/qat_common')
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c15
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c45
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c8
3 files changed, 29 insertions, 39 deletions
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index 2839fccdd84b..d3e25c37dc33 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -109,20 +109,7 @@ EXPORT_SYMBOL_GPL(adf_reset_sbr);
void adf_reset_flr(struct adf_accel_dev *accel_dev)
{
- struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
- u16 control = 0;
- int pos = 0;
-
- dev_info(&GET_DEV(accel_dev), "Function level reset\n");
- pos = pci_pcie_cap(pdev);
- if (!pos) {
- dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
- return;
- }
- pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &control);
- control |= PCI_EXP_DEVCTL_BCR_FLR;
- pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, control);
- msleep(100);
+ pcie_flr(accel_to_pci_dev(accel_dev));
}
EXPORT_SYMBOL_GPL(adf_reset_flr);
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 20f35df8a01f..baffae817259 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -51,6 +51,7 @@
#include <crypto/aes.h>
#include <crypto/sha.h>
#include <crypto/hash.h>
+#include <crypto/hmac.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
#include <linux/dma-mapping.h>
@@ -178,8 +179,8 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
for (i = 0; i < block_size; i++) {
char *ipad_ptr = ipad + i;
char *opad_ptr = opad + i;
- *ipad_ptr ^= 0x36;
- *opad_ptr ^= 0x5C;
+ *ipad_ptr ^= HMAC_IPAD_VALUE;
+ *opad_ptr ^= HMAC_OPAD_VALUE;
}
if (crypto_shash_init(shash))
@@ -685,7 +686,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, blp)))
- goto err;
+ goto err_in;
for_each_sg(sgl, sg, n, i) {
int y = sg_nctr;
@@ -698,7 +699,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
DMA_BIDIRECTIONAL);
bufl->bufers[y].len = sg->length;
if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
- goto err;
+ goto err_in;
sg_nctr++;
}
bufl->num_bufs = sg_nctr;
@@ -716,10 +717,10 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
buflout = kzalloc_node(sz_out, GFP_ATOMIC,
dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!buflout))
- goto err;
+ goto err_in;
bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, bloutp)))
- goto err;
+ goto err_out;
bufers = buflout->bufers;
for_each_sg(sglout, sg, n, i) {
int y = sg_nctr;
@@ -731,7 +732,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
sg->length,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
- goto err;
+ goto err_out;
bufers[y].len = sg->length;
sg_nctr++;
}
@@ -746,9 +747,20 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
qat_req->buf.sz_out = 0;
}
return 0;
-err:
- dev_err(dev, "Failed to map buf for dma\n");
- sg_nctr = 0;
+
+err_out:
+ n = sg_nents(sglout);
+ for (i = 0; i < n; i++)
+ if (!dma_mapping_error(dev, buflout->bufers[i].addr))
+ dma_unmap_single(dev, buflout->bufers[i].addr,
+ buflout->bufers[i].len,
+ DMA_BIDIRECTIONAL);
+ if (!dma_mapping_error(dev, bloutp))
+ dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
+ kfree(buflout);
+
+err_in:
+ n = sg_nents(sgl);
for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
dma_unmap_single(dev, bufl->bufers[i].addr,
@@ -758,17 +770,8 @@ err:
if (!dma_mapping_error(dev, blp))
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
kfree(bufl);
- if (sgl != sglout && buflout) {
- n = sg_nents(sglout);
- for (i = 0; i < n; i++)
- if (!dma_mapping_error(dev, buflout->bufers[i].addr))
- dma_unmap_single(dev, buflout->bufers[i].addr,
- buflout->bufers[i].len,
- DMA_BIDIRECTIONAL);
- if (!dma_mapping_error(dev, bloutp))
- dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
- kfree(buflout);
- }
+
+ dev_err(dev, "Failed to map buf for dma\n");
return -ENOMEM;
}
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 2aab80bc241f..6f5dd68449c6 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -521,11 +521,11 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
return 0;
}
-static int qat_dh_max_size(struct crypto_kpp *tfm)
+static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
{
struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
- return ctx->p ? ctx->p_size : -EINVAL;
+ return ctx->p_size;
}
static int qat_dh_init_tfm(struct crypto_kpp *tfm)
@@ -1256,11 +1256,11 @@ static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
return qat_rsa_setkey(tfm, key, keylen, true);
}
-static int qat_rsa_max_size(struct crypto_akcipher *tfm)
+static unsigned int qat_rsa_max_size(struct crypto_akcipher *tfm)
{
struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
- return (ctx->n) ? ctx->key_sz : -EINVAL;
+ return ctx->key_sz;
}
static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
OpenPOWER on IntegriCloud