summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c7
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c17
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c34
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c2
-rw-r--r--drivers/crypto/atmel-sha204a.c6
-rw-r--r--drivers/crypto/ccp/sp-pci.c1
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c140
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_drv.c8
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c8
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c8
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_drv.c8
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c8
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c2
-rw-r--r--drivers/crypto/marvell/cesa/cesa.h9
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c3
-rw-r--r--drivers/crypto/marvell/cesa/hash.c2
-rw-r--r--drivers/crypto/marvell/cesa/tdma.c53
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c7
-rw-r--r--drivers/crypto/mxs-dcp.c8
-rw-r--r--drivers/crypto/xilinx/zynqmp-sha.c18
21 files changed, 153 insertions, 198 deletions
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 19b7fb4a93e8..05f67661553c 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -275,13 +275,16 @@ theend_sgs:
} else {
if (nr_sgs > 0)
dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
- dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
+
+ if (nr_sgd > 0)
+ dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
}
theend_iv:
if (areq->iv && ivsize > 0) {
- if (rctx->addr_iv)
+ if (!dma_mapping_error(ce->dev, rctx->addr_iv))
dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
+
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
memcpy(areq->iv, chan->backup_iv, ivsize);
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index e55e58e164db..fcc6832a065c 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -832,13 +832,12 @@ static int sun8i_ce_pm_init(struct sun8i_ce_dev *ce)
err = pm_runtime_set_suspended(ce->dev);
if (err)
return err;
- pm_runtime_enable(ce->dev);
- return err;
-}
-static void sun8i_ce_pm_exit(struct sun8i_ce_dev *ce)
-{
- pm_runtime_disable(ce->dev);
+ err = devm_pm_runtime_enable(ce->dev);
+ if (err)
+ return err;
+
+ return 0;
}
static int sun8i_ce_get_clks(struct sun8i_ce_dev *ce)
@@ -1041,7 +1040,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
"sun8i-ce-ns", ce);
if (err) {
dev_err(ce->dev, "Cannot request CryptoEngine Non-secure IRQ (err=%d)\n", err);
- goto error_irq;
+ goto error_pm;
}
err = sun8i_ce_register_algs(ce);
@@ -1082,8 +1081,6 @@ static int sun8i_ce_probe(struct platform_device *pdev)
return 0;
error_alg:
sun8i_ce_unregister_algs(ce);
-error_irq:
- sun8i_ce_pm_exit(ce);
error_pm:
sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
return err;
@@ -1104,8 +1101,6 @@ static void sun8i_ce_remove(struct platform_device *pdev)
#endif
sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
-
- sun8i_ce_pm_exit(ce);
}
static const struct of_device_id sun8i_ce_crypto_of_match_table[] = {
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index 6072dd9f390b..3f9d79ea01aa 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -343,9 +343,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
u32 common;
u64 byte_count;
__le32 *bf;
- void *buf = NULL;
+ void *buf, *result;
int j, i, todo;
- void *result = NULL;
u64 bs;
int digestsize;
dma_addr_t addr_res, addr_pad;
@@ -365,14 +364,14 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
buf = kcalloc(2, bs, GFP_KERNEL | GFP_DMA);
if (!buf) {
err = -ENOMEM;
- goto theend;
+ goto err_out;
}
bf = (__le32 *)buf;
result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
if (!result) {
err = -ENOMEM;
- goto theend;
+ goto err_free_buf;
}
flow = rctx->flow;
@@ -398,7 +397,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
- goto theend;
+ goto err_free_result;
}
len = areq->nbytes;
@@ -411,7 +410,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (len > 0) {
dev_err(ce->dev, "remaining len %d\n", len);
err = -EINVAL;
- goto theend;
+ goto err_unmap_src;
}
addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
cet->t_dst[0].addr = desc_addr_val_le32(ce, addr_res);
@@ -419,7 +418,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (dma_mapping_error(ce->dev, addr_res)) {
dev_err(ce->dev, "DMA map dest\n");
err = -EINVAL;
- goto theend;
+ goto err_unmap_src;
}
byte_count = areq->nbytes;
@@ -441,7 +440,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
}
if (!j) {
err = -EINVAL;
- goto theend;
+ goto err_unmap_result;
}
addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
@@ -450,7 +449,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (dma_mapping_error(ce->dev, addr_pad)) {
dev_err(ce->dev, "DMA error on padding SG\n");
err = -EINVAL;
- goto theend;
+ goto err_unmap_result;
}
if (ce->variant->hash_t_dlen_in_bits)
@@ -463,16 +462,25 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm));
dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
- dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
+
+err_unmap_result:
dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
+ if (!err)
+ memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize);
+err_unmap_src:
+ dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
- memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize);
-theend:
- kfree(buf);
+err_free_result:
kfree(result);
+
+err_free_buf:
+ kfree(buf);
+
+err_out:
local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
local_bh_enable();
+
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 3b5c2af013d0..83df4d719053 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -308,8 +308,8 @@ struct sun8i_ce_hash_tfm_ctx {
* @flow: the flow to use for this request
*/
struct sun8i_ce_hash_reqctx {
- struct ahash_request fallback_req;
int flow;
+ struct ahash_request fallback_req; // keep at the end
};
/*
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 9b9605ce8ee6..8831bcb230c2 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -141,7 +141,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
/* we need to copy all IVs from source in case DMA is bi-directionnal */
while (sg && len) {
- if (sg_dma_len(sg) == 0) {
+ if (sg->length == 0) {
sg = sg_next(sg);
continue;
}
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index a02d496f4c41..b9658e38cb03 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -163,6 +163,12 @@ static int atmel_sha204a_probe(struct i2c_client *client)
i2c_priv->hwrng.name = dev_name(&client->dev);
i2c_priv->hwrng.read = atmel_sha204a_rng_read;
+ /*
+ * According to review by Bill Cox [1], this HWRNG has very low entropy.
+ * [1] https://www.metzdowd.com/pipermail/cryptography/2014-December/023858.html
+ */
+ i2c_priv->hwrng.quality = 1;
+
ret = devm_hwrng_register(&client->dev, &i2c_priv->hwrng);
if (ret)
dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 157f9a9ed636..2ebc878da160 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -532,6 +532,7 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
{ PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] },
{ PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] },
+ { PCI_VDEVICE(AMD, 0x1134), (kernel_ulong_t)&dev_vdata[7] },
{ PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] },
{ PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] },
/* Last entry must be zero */
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index e1f60f0f507c..df2728cccf8b 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -1126,8 +1126,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
dma_addr_t dst_addr, unsigned int *dlen,
- u32 *compression_crc,
- bool disable_async)
+ u32 *compression_crc)
{
struct iaa_device_compression_mode *active_compression_mode;
struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1170,7 +1169,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
desc->src2_size = sizeof(struct aecs_comp_table_record);
desc->completion_addr = idxd_desc->compl_dma;
- if (ctx->use_irq && !disable_async) {
+ if (ctx->use_irq) {
desc->flags |= IDXD_OP_FLAG_RCI;
idxd_desc->crypto.req = req;
@@ -1183,8 +1182,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
" src_addr %llx, dst_addr %llx\n", __func__,
active_compression_mode->name,
src_addr, dst_addr);
- } else if (ctx->async_mode && !disable_async)
- req->base.data = idxd_desc;
+ }
dev_dbg(dev, "%s: compression mode %s,"
" desc->src1_addr %llx, desc->src1_size %d,"
@@ -1204,7 +1202,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
update_total_comp_calls();
update_wq_comp_calls(wq);
- if (ctx->async_mode && !disable_async) {
+ if (ctx->async_mode) {
ret = -EINPROGRESS;
dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
goto out;
@@ -1224,7 +1222,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
*compression_crc = idxd_desc->iax_completion->crc;
- if (!ctx->async_mode || disable_async)
+ if (!ctx->async_mode)
idxd_free_desc(wq, idxd_desc);
out:
return ret;
@@ -1421,8 +1419,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
" src_addr %llx, dst_addr %llx\n", __func__,
active_compression_mode->name,
src_addr, dst_addr);
- } else if (ctx->async_mode && !disable_async)
- req->base.data = idxd_desc;
+ }
dev_dbg(dev, "%s: decompression mode %s,"
" desc->src1_addr %llx, desc->src1_size %d,"
@@ -1490,13 +1487,11 @@ static int iaa_comp_acompress(struct acomp_req *req)
struct iaa_compression_ctx *compression_ctx;
struct crypto_tfm *tfm = req->base.tfm;
dma_addr_t src_addr, dst_addr;
- bool disable_async = false;
int nr_sgs, cpu, ret = 0;
struct iaa_wq *iaa_wq;
u32 compression_crc;
struct idxd_wq *wq;
struct device *dev;
- int order = -1;
compression_ctx = crypto_tfm_ctx(tfm);
@@ -1526,21 +1521,6 @@ static int iaa_comp_acompress(struct acomp_req *req)
iaa_wq = idxd_wq_get_private(wq);
- if (!req->dst) {
- gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
-
- /* incompressible data will always be < 2 * slen */
- req->dlen = 2 * req->slen;
- order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE);
- req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL);
- if (!req->dst) {
- ret = -ENOMEM;
- order = -1;
- goto out;
- }
- disable_async = true;
- }
-
dev = &wq->idxd->pdev->dev;
nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
@@ -1570,7 +1550,7 @@ static int iaa_comp_acompress(struct acomp_req *req)
req->dst, req->dlen, sg_dma_len(req->dst));
ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr,
- &req->dlen, &compression_crc, disable_async);
+ &req->dlen, &compression_crc);
if (ret == -EINPROGRESS)
return ret;
@@ -1601,100 +1581,6 @@ err_map_dst:
out:
iaa_wq_put(wq);
- if (order >= 0)
- sgl_free_order(req->dst, order);
-
- return ret;
-}
-
-static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req)
-{
- gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
- GFP_KERNEL : GFP_ATOMIC;
- struct crypto_tfm *tfm = req->base.tfm;
- dma_addr_t src_addr, dst_addr;
- int nr_sgs, cpu, ret = 0;
- struct iaa_wq *iaa_wq;
- struct device *dev;
- struct idxd_wq *wq;
- int order = -1;
-
- cpu = get_cpu();
- wq = wq_table_next_wq(cpu);
- put_cpu();
- if (!wq) {
- pr_debug("no wq configured for cpu=%d\n", cpu);
- return -ENODEV;
- }
-
- ret = iaa_wq_get(wq);
- if (ret) {
- pr_debug("no wq available for cpu=%d\n", cpu);
- return -ENODEV;
- }
-
- iaa_wq = idxd_wq_get_private(wq);
-
- dev = &wq->idxd->pdev->dev;
-
- nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
- if (nr_sgs <= 0 || nr_sgs > 1) {
- dev_dbg(dev, "couldn't map src sg for iaa device %d,"
- " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
- iaa_wq->wq->id, ret);
- ret = -EIO;
- goto out;
- }
- src_addr = sg_dma_address(req->src);
- dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p,"
- " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs,
- req->src, req->slen, sg_dma_len(req->src));
-
- req->dlen = 4 * req->slen; /* start with ~avg comp rato */
-alloc_dest:
- order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE);
- req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL);
- if (!req->dst) {
- ret = -ENOMEM;
- order = -1;
- goto out;
- }
-
- nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
- if (nr_sgs <= 0 || nr_sgs > 1) {
- dev_dbg(dev, "couldn't map dst sg for iaa device %d,"
- " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
- iaa_wq->wq->id, ret);
- ret = -EIO;
- goto err_map_dst;
- }
-
- dst_addr = sg_dma_address(req->dst);
- dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
- " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
- req->dst, req->dlen, sg_dma_len(req->dst));
- ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
- dst_addr, &req->dlen, true);
- if (ret == -EOVERFLOW) {
- dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
- req->dlen *= 2;
- if (req->dlen > CRYPTO_ACOMP_DST_MAX)
- goto err_map_dst;
- goto alloc_dest;
- }
-
- if (ret != 0)
- dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret);
-
- dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
-err_map_dst:
- dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
-out:
- iaa_wq_put(wq);
-
- if (order >= 0)
- sgl_free_order(req->dst, order);
-
return ret;
}
@@ -1717,9 +1603,6 @@ static int iaa_comp_adecompress(struct acomp_req *req)
return -EINVAL;
}
- if (!req->dst)
- return iaa_comp_adecompress_alloc_dest(req);
-
cpu = get_cpu();
wq = wq_table_next_wq(cpu);
put_cpu();
@@ -1800,19 +1683,10 @@ static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm)
return 0;
}
-static void dst_free(struct scatterlist *sgl)
-{
- /*
- * Called for req->dst = NULL cases but we free elsewhere
- * using sgl_free_order().
- */
-}
-
static struct acomp_alg iaa_acomp_fixed_deflate = {
.init = iaa_comp_init_fixed,
.compress = iaa_comp_acompress,
.decompress = iaa_comp_adecompress,
- .dst_free = dst_free,
.base = {
.cra_name = "deflate",
.cra_driver_name = "deflate-iaa",
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
index f49818a13013..41420e349572 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
@@ -181,11 +181,19 @@ static void adf_remove(struct pci_dev *pdev)
adf_cleanup_accel(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = ADF_420XX_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
+ .shutdown = adf_shutdown,
.sriov_configure = adf_sriov_configure,
.err_handler = &adf_err_handler,
};
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index 659905e45950..01b34eda83e9 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -183,11 +183,19 @@ static void adf_remove(struct pci_dev *pdev)
adf_cleanup_accel(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = ADF_4XXX_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
+ .shutdown = adf_shutdown,
.sriov_configure = adf_sriov_configure,
.err_handler = &adf_err_handler,
};
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
index 4d18057745d4..b776f7ea0dfb 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
@@ -19,6 +19,13 @@
#include <adf_dbgfs.h>
#include "adf_c3xxx_hw_data.h"
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
static const struct pci_device_id adf_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX), },
{ }
@@ -33,6 +40,7 @@ static struct pci_driver adf_driver = {
.name = ADF_C3XXX_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
+ .shutdown = adf_shutdown,
.sriov_configure = adf_sriov_configure,
.err_handler = &adf_err_handler,
};
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
index e6b5de55434e..5310149c311e 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
@@ -19,6 +19,13 @@
#include <adf_dbgfs.h>
#include "adf_c62x_hw_data.h"
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
static const struct pci_device_id adf_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X), },
{ }
@@ -33,6 +40,7 @@ static struct pci_driver adf_driver = {
.name = ADF_C62X_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
+ .shutdown = adf_shutdown,
.sriov_configure = adf_sriov_configure,
.err_handler = &adf_err_handler,
};
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
index 2a50cce41515..5ddf567ffcad 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
@@ -19,6 +19,13 @@
#include <adf_dbgfs.h>
#include "adf_dh895xcc_hw_data.h"
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
static const struct pci_device_id adf_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC), },
{ }
@@ -33,6 +40,7 @@ static struct pci_driver adf_driver = {
.name = ADF_DH895XCC_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
+ .shutdown = adf_shutdown,
.sriov_configure = adf_sriov_configure,
.err_handler = &adf_err_handler,
};
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 5fd31ba715c2..24273cb082ba 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -94,7 +94,7 @@ static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
{
- if (engine->chain.first && engine->chain.last)
+ if (engine->chain_hw.first && engine->chain_hw.last)
return mv_cesa_tdma_process(engine, status);
return mv_cesa_std_process(engine, status);
diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h
index d215a6bed6bc..50ca1039fdaa 100644
--- a/drivers/crypto/marvell/cesa/cesa.h
+++ b/drivers/crypto/marvell/cesa/cesa.h
@@ -440,8 +440,10 @@ struct mv_cesa_dev {
* SRAM
* @queue: fifo of the pending crypto requests
* @load: engine load counter, useful for load balancing
- * @chain: list of the current tdma descriptors being processed
- * by this engine.
+ * @chain_hw: list of the current tdma descriptors being processed
+ * by the hardware.
+ * @chain_sw: list of the current tdma descriptors that will be
+ * submitted to the hardware.
* @complete_queue: fifo of the processed requests by the engine
*
* Structure storing CESA engine information.
@@ -463,7 +465,8 @@ struct mv_cesa_engine {
struct gen_pool *pool;
struct crypto_queue queue;
atomic_t load;
- struct mv_cesa_tdma_chain chain;
+ struct mv_cesa_tdma_chain chain_hw;
+ struct mv_cesa_tdma_chain chain_sw;
struct list_head complete_queue;
int irq;
};
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index 0f37dfd42d85..3876e3ce822f 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -459,6 +459,9 @@ static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
struct mv_cesa_engine *engine;
+ if (!req->cryptlen)
+ return 0;
+
ret = mv_cesa_skcipher_req_init(req, tmpl);
if (ret)
return ret;
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index f150861ceaf6..6815eddc9068 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -663,7 +663,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
if (ret)
goto err_free_tdma;
- if (iter.src.sg) {
+ if (iter.base.len > iter.src.op_offset) {
/*
* Add all the new data, inserting an operation block and
* launch command between each full SRAM block-worth of
diff --git a/drivers/crypto/marvell/cesa/tdma.c b/drivers/crypto/marvell/cesa/tdma.c
index 388a06e180d6..243305354420 100644
--- a/drivers/crypto/marvell/cesa/tdma.c
+++ b/drivers/crypto/marvell/cesa/tdma.c
@@ -38,6 +38,15 @@ void mv_cesa_dma_step(struct mv_cesa_req *dreq)
{
struct mv_cesa_engine *engine = dreq->engine;
+ spin_lock_bh(&engine->lock);
+ if (engine->chain_sw.first == dreq->chain.first) {
+ engine->chain_sw.first = NULL;
+ engine->chain_sw.last = NULL;
+ }
+ engine->chain_hw.first = dreq->chain.first;
+ engine->chain_hw.last = dreq->chain.last;
+ spin_unlock_bh(&engine->lock);
+
writel_relaxed(0, engine->regs + CESA_SA_CFG);
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE);
@@ -96,25 +105,27 @@ void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
struct mv_cesa_req *dreq)
{
- if (engine->chain.first == NULL && engine->chain.last == NULL) {
- engine->chain.first = dreq->chain.first;
- engine->chain.last = dreq->chain.last;
- } else {
- struct mv_cesa_tdma_desc *last;
+ struct mv_cesa_tdma_desc *last = engine->chain_sw.last;
- last = engine->chain.last;
+ /*
+ * Break the DMA chain if the request being queued needs the IV
+ * regs to be set before lauching the request.
+ */
+ if (!last || dreq->chain.first->flags & CESA_TDMA_SET_STATE)
+ engine->chain_sw.first = dreq->chain.first;
+ else {
last->next = dreq->chain.first;
- engine->chain.last = dreq->chain.last;
-
- /*
- * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
- * the last element of the current chain, or if the request
- * being queued needs the IV regs to be set before lauching
- * the request.
- */
- if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
- !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
- last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
+ last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
+ }
+ last = dreq->chain.last;
+ engine->chain_sw.last = last;
+ /*
+ * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
+ * the last element of the current chain.
+ */
+ if (last->flags & CESA_TDMA_BREAK_CHAIN) {
+ engine->chain_sw.first = NULL;
+ engine->chain_sw.last = NULL;
}
}
@@ -127,7 +138,7 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
tdma_cur = readl(engine->regs + CESA_TDMA_CUR);
- for (tdma = engine->chain.first; tdma; tdma = next) {
+ for (tdma = engine->chain_hw.first; tdma; tdma = next) {
spin_lock_bh(&engine->lock);
next = tdma->next;
spin_unlock_bh(&engine->lock);
@@ -149,12 +160,12 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
&backlog);
/* Re-chaining to the next request */
- engine->chain.first = tdma->next;
+ engine->chain_hw.first = tdma->next;
tdma->next = NULL;
/* If this is the last request, clear the chain */
- if (engine->chain.first == NULL)
- engine->chain.last = NULL;
+ if (engine->chain_hw.first == NULL)
+ engine->chain_hw.last = NULL;
spin_unlock_bh(&engine->lock);
ctx = crypto_tfm_ctx(req->tfm);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
index 5387c68f3c9d..426244107037 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
@@ -264,9 +264,10 @@ static int cpt_process_ccode(struct otx2_cptlfs_info *lfs,
break;
}
- dev_err(&pdev->dev,
- "Request failed with software error code 0x%x\n",
- cpt_status->s.uc_compcode);
+ pr_debug("Request failed with software error code 0x%x: algo = %s driver = %s\n",
+ cpt_status->s.uc_compcode,
+ info->req->areq->tfm->__crt_alg->cra_name,
+ info->req->areq->tfm->__crt_alg->cra_driver_name);
otx2_cpt_dump_sg_list(pdev, info->req);
break;
}
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 77a6301f37f0..29c0c69d5905 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -265,12 +265,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
MXS_DCP_CONTROL0_INTERRUPT |
MXS_DCP_CONTROL0_ENABLE_CIPHER;
- if (key_referenced)
- /* Set OTP key bit to select the key via KEY_SELECT. */
- desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY;
- else
+ if (!key_referenced)
/* Payload contains the key. */
desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
+ else if (actx->key[0] == DCP_PAES_KEY_OTP)
+ /* Set OTP key bit to select the key via KEY_SELECT. */
+ desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY;
if (rctx->enc)
desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
index 1bcec6f46c9c..9b5345068604 100644
--- a/drivers/crypto/xilinx/zynqmp-sha.c
+++ b/drivers/crypto/xilinx/zynqmp-sha.c
@@ -3,18 +3,19 @@
* Xilinx ZynqMP SHA Driver.
* Copyright (c) 2022 Xilinx Inc.
*/
-#include <linux/cacheflush.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <crypto/sha3.h>
-#include <linux/crypto.h>
+#include <linux/cacheflush.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/firmware/xlnx-zynqmp.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/spinlock.h>
#include <linux/platform_device.h>
#define ZYNQMP_DMA_BIT_MASK 32U
@@ -43,6 +44,8 @@ struct zynqmp_sha_desc_ctx {
static dma_addr_t update_dma_addr, final_dma_addr;
static char *ubuf, *fbuf;
+static DEFINE_SPINLOCK(zynqmp_sha_lock);
+
static int zynqmp_sha_init_tfm(struct crypto_shash *hash)
{
const char *fallback_driver_name = crypto_shash_alg_name(hash);
@@ -124,7 +127,8 @@ static int zynqmp_sha_export(struct shash_desc *desc, void *out)
return crypto_shash_export(&dctx->fbk_req, out);
}
-static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
+static int __zynqmp_sha_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
unsigned int remaining_len = len;
int update_size;
@@ -159,6 +163,12 @@ static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned i
return ret;
}
+static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
+{
+ scoped_guard(spinlock_bh, &zynqmp_sha_lock)
+ return __zynqmp_sha_digest(desc, data, len, out);
+}
+
static struct zynqmp_sha_drv_ctx sha3_drv_ctx = {
.sha3_384 = {
.init = zynqmp_sha_init,