diff options
Diffstat (limited to 'drivers/crypto')
53 files changed, 563 insertions, 608 deletions
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index 19b7fb4a93e8..63e66a85477e 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -265,8 +265,8 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req } chan->timeout = areq->cryptlen; - rctx->nr_sgs = nr_sgs; - rctx->nr_sgd = nr_sgd; + rctx->nr_sgs = ns; + rctx->nr_sgd = nd; return 0; theend_sgs: @@ -275,13 +275,16 @@ theend_sgs: } else { if (nr_sgs > 0) dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); - dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE); + + if (nr_sgd > 0) + dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE); } theend_iv: if (areq->iv && ivsize > 0) { - if (rctx->addr_iv) + if (!dma_mapping_error(ce->dev, rctx->addr_iv)) dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE); + offset = areq->cryptlen - ivsize; if (rctx->op_dir & CE_DECRYPTION) { memcpy(areq->iv, chan->backup_iv, ivsize); diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c index e55e58e164db..fcc6832a065c 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c @@ -832,13 +832,12 @@ static int sun8i_ce_pm_init(struct sun8i_ce_dev *ce) err = pm_runtime_set_suspended(ce->dev); if (err) return err; - pm_runtime_enable(ce->dev); - return err; -} -static void sun8i_ce_pm_exit(struct sun8i_ce_dev *ce) -{ - pm_runtime_disable(ce->dev); + err = devm_pm_runtime_enable(ce->dev); + if (err) + return err; + + return 0; } static int sun8i_ce_get_clks(struct sun8i_ce_dev *ce) @@ -1041,7 +1040,7 @@ static int sun8i_ce_probe(struct platform_device *pdev) "sun8i-ce-ns", ce); if (err) { dev_err(ce->dev, "Cannot request CryptoEngine Non-secure IRQ (err=%d)\n", err); - goto error_irq; + goto error_pm; } err = sun8i_ce_register_algs(ce); @@ -1082,8 +1081,6 @@ static int sun8i_ce_probe(struct platform_device *pdev) return 0; error_alg: sun8i_ce_unregister_algs(ce); -error_irq: - sun8i_ce_pm_exit(ce); error_pm: sun8i_ce_free_chanlist(ce, MAXFLOW - 1); return err; @@ -1104,8 +1101,6 @@ static void sun8i_ce_remove(struct platform_device *pdev) #endif sun8i_ce_free_chanlist(ce, MAXFLOW - 1); - - sun8i_ce_pm_exit(ce); } static const struct of_device_id sun8i_ce_crypto_of_match_table[] = { diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c index 6072dd9f390b..3f9d79ea01aa 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -343,9 +343,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) u32 common; u64 byte_count; __le32 *bf; - void *buf = NULL; + void *buf, *result; int j, i, todo; - void *result = NULL; u64 bs; int digestsize; dma_addr_t addr_res, addr_pad; @@ -365,14 +364,14 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) buf = kcalloc(2, bs, GFP_KERNEL | GFP_DMA); if (!buf) { err = -ENOMEM; - goto theend; + goto err_out; } bf = (__le32 *)buf; result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA); if (!result) { err = -ENOMEM; - goto theend; + goto err_free_buf; } flow = rctx->flow; @@ -398,7 +397,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) if (nr_sgs <= 0 || nr_sgs > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; - goto theend; + goto err_free_result; } len = areq->nbytes; @@ -411,7 +410,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) if (len > 0) { dev_err(ce->dev, "remaining len %d\n", len); err = -EINVAL; - goto theend; + goto err_unmap_src; } addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE); cet->t_dst[0].addr = desc_addr_val_le32(ce, addr_res); @@ -419,7 +418,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) if (dma_mapping_error(ce->dev, addr_res)) { dev_err(ce->dev, "DMA map dest\n"); err = -EINVAL; - goto theend; + goto err_unmap_src; } byte_count = areq->nbytes; @@ -441,7 +440,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) } if (!j) { err = -EINVAL; - goto theend; + goto err_unmap_result; } addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE); @@ -450,7 +449,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) if (dma_mapping_error(ce->dev, addr_pad)) { dev_err(ce->dev, "DMA error on padding SG\n"); err = -EINVAL; - goto theend; + goto err_unmap_result; } if (ce->variant->hash_t_dlen_in_bits) @@ -463,16 +462,25 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm)); dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE); - dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); + +err_unmap_result: dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE); + if (!err) + memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize); +err_unmap_src: + dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); - memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize); -theend: - kfree(buf); +err_free_result: kfree(result); + +err_free_buf: + kfree(buf); + +err_out: local_bh_disable(); crypto_finalize_hash_request(engine, breq, err); local_bh_enable(); + return 0; } diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h index 3b5c2af013d0..83df4d719053 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h @@ -308,8 +308,8 @@ struct sun8i_ce_hash_tfm_ctx { * @flow: the flow to use for this request */ struct sun8i_ce_hash_reqctx { - struct ahash_request fallback_req; int flow; + struct ahash_request fallback_req; // keep at the end }; /* diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index 9b9605ce8ee6..8831bcb230c2 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -141,7 +141,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq) /* we need to copy all IVs from source in case DMA is bi-directionnal */ while (sg && len) { - if (sg_dma_len(sg) == 0) { + if (sg->length == 0) { sg = sg_next(sg); continue; } diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c index a02d496f4c41..b9658e38cb03 100644 --- a/drivers/crypto/atmel-sha204a.c +++ b/drivers/crypto/atmel-sha204a.c @@ -163,6 +163,12 @@ static int atmel_sha204a_probe(struct i2c_client *client) i2c_priv->hwrng.name = dev_name(&client->dev); i2c_priv->hwrng.read = atmel_sha204a_rng_read; + /* + * According to review by Bill Cox [1], this HWRNG has very low entropy. + * [1] https://www.metzdowd.com/pipermail/cryptography/2014-December/023858.html + */ + i2c_priv->hwrng.quality = 1; + ret = devm_hwrng_register(&client->dev, &i2c_priv->hwrng); if (ret) dev_warn(&client->dev, "failed to register RNG (%d)\n", ret); diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c index 87781c1534ee..079a22cc9f02 100644 --- a/drivers/crypto/caam/blob_gen.c +++ b/drivers/crypto/caam/blob_gen.c @@ -2,6 +2,7 @@ /* * Copyright (C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de> * Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de> + * Copyright 2024 NXP */ #define pr_fmt(fmt) "caam blob_gen: " fmt @@ -104,7 +105,7 @@ int caam_process_blob(struct caam_blob_priv *priv, } ctrlpriv = dev_get_drvdata(jrdev->parent); - moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->ctrl->perfmon.status)); + moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status)); if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED) dev_warn(jrdev, "using insecure test key, enable HAB to use unique device key!\n"); diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index 8ed2bb01a619..448606300500 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c @@ -122,12 +122,12 @@ int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req) qm_fd_addr_set64(&fd, addr); do { + refcount_inc(&req->drv_ctx->refcnt); ret = qman_enqueue(req->drv_ctx->req_fq, &fd); - if (likely(!ret)) { - refcount_inc(&req->drv_ctx->refcnt); + if (likely(!ret)) return 0; - } + refcount_dec(&req->drv_ctx->refcnt); if (ret != -EBUSY) break; num_retries++; diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c index a1055554b47a..dc26bc22c91d 100644 --- a/drivers/crypto/ccp/ccp-debugfs.c +++ b/drivers/crypto/ccp/ccp-debugfs.c @@ -319,5 +319,8 @@ void ccp5_debugfs_setup(struct ccp_device *ccp) void ccp5_debugfs_destroy(void) { + mutex_lock(&ccp_debugfs_lock); debugfs_remove_recursive(ccp_debugfs_dir); + ccp_debugfs_dir = NULL; + mutex_unlock(&ccp_debugfs_lock); } diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index af018afd9cd7..4d072c084d7b 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -424,7 +424,7 @@ cleanup: return rc; } -static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order) +static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order, bool locked) { unsigned long npages = 1ul << order, paddr; struct sev_device *sev; @@ -443,7 +443,7 @@ static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order) return page; paddr = __pa((unsigned long)page_address(page)); - if (rmp_mark_pages_firmware(paddr, npages, false)) + if (rmp_mark_pages_firmware(paddr, npages, locked)) return NULL; return page; @@ -453,7 +453,7 @@ void *snp_alloc_firmware_page(gfp_t gfp_mask) { struct page *page; - page = __snp_alloc_firmware_pages(gfp_mask, 0); + page = __snp_alloc_firmware_pages(gfp_mask, 0, false); return page ? page_address(page) : NULL; } @@ -488,7 +488,7 @@ static void *sev_fw_alloc(unsigned long len) { struct page *page; - page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len)); + page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len), true); if (!page) return NULL; diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 248d98fd8c48..224edaaa737b 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -189,14 +189,17 @@ static bool sp_pci_is_master(struct sp_device *sp) pdev_new = to_pci_dev(dev_new); pdev_cur = to_pci_dev(dev_cur); - if (pdev_new->bus->number < pdev_cur->bus->number) - return true; + if (pci_domain_nr(pdev_new->bus) != pci_domain_nr(pdev_cur->bus)) + return pci_domain_nr(pdev_new->bus) < pci_domain_nr(pdev_cur->bus); - if (PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn)) - return true; + if (pdev_new->bus->number != pdev_cur->bus->number) + return pdev_new->bus->number < pdev_cur->bus->number; - if (PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn)) - return true; + if (PCI_SLOT(pdev_new->devfn) != PCI_SLOT(pdev_cur->devfn)) + return PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn); + + if (PCI_FUNC(pdev_new->devfn) != PCI_FUNC(pdev_cur->devfn)) + return PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn); return false; } @@ -448,6 +451,7 @@ static const struct psp_vdata pspv6 = { .cmdresp_reg = 0x10944, /* C2PMSG_17 */ .cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */ .cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */ + .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */ .feature_reg = 0x109fc, /* C2PMSG_63 */ .inten_reg = 0x10510, /* P2CMSG_INTEN */ .intsts_reg = 0x10514, /* P2CMSG_INTSTS */ @@ -529,6 +533,7 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] }, { PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] }, { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] }, + { PCI_VDEVICE(AMD, 0x1134), (kernel_ulong_t)&dev_vdata[7] }, { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] }, { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] }, /* Last entry must be zero */ diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index c167dbd6c7d6..e71f1e459764 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -1487,11 +1487,13 @@ static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp) if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); + /* Do unmap before data processing */ + hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src); + p = sg_virt(areq->dst); memmove(p, p + ctx->key_sz - curve_sz, curve_sz); memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz); - hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src); kpp_request_complete(areq, ret); atomic64_inc(&dfx[HPRE_RECV_CNT].value); @@ -1801,9 +1803,11 @@ static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp) if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); + /* Do unmap before data processing */ + hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src); + hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE); - hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src); kpp_request_complete(areq, ret); atomic64_inc(&dfx[HPRE_RECV_CNT].value); diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 410c83712e28..2fc04e210bc4 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -90,9 +90,7 @@ struct sec_auth_ctx { dma_addr_t a_key_dma; u8 *a_key; u8 a_key_len; - u8 mac_len; u8 a_alg; - bool fallback; struct crypto_shash *hash_tfm; struct crypto_aead *fallback_aead_tfm; }; diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 0558f98e221f..8605cb3cae92 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -57,7 +57,6 @@ #define SEC_TYPE_MASK 0x0F #define SEC_DONE_MASK 0x0001 #define SEC_ICV_MASK 0x000E -#define SEC_SQE_LEN_RATE_MASK 0x3 #define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth)) #define SEC_SGL_SGE_NR 128 @@ -80,16 +79,16 @@ #define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \ SEC_PBUF_LEFT_SZ(depth)) -#define SEC_SQE_LEN_RATE 4 #define SEC_SQE_CFLAG 2 #define SEC_SQE_AEAD_FLAG 3 #define SEC_SQE_DONE 0x1 #define SEC_ICV_ERR 0x2 -#define MIN_MAC_LEN 4 #define MAC_LEN_MASK 0x1U #define MAX_INPUT_DATA_LEN 0xFFFE00 #define BITS_MASK 0xFF +#define WORD_MASK 0x3 #define BYTE_BITS 0x8 +#define BYTES_TO_WORDS(bcount) ((bcount) >> 2) #define SEC_XTS_NAME_SZ 0x3 #define IV_CM_CAL_NUM 2 #define IV_CL_MASK 0x7 @@ -691,14 +690,10 @@ static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm) c_ctx->fallback = false; - /* Currently, only XTS mode need fallback tfm when using 192bit key */ - if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ))) - return 0; - c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(c_ctx->fbtfm)) { - pr_err("failed to alloc xts mode fallback tfm!\n"); + pr_err("failed to alloc fallback tfm for %s!\n", alg); return PTR_ERR(c_ctx->fbtfm); } @@ -858,7 +853,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, } memcpy(c_ctx->c_key, key, keylen); - if (c_ctx->fallback && c_ctx->fbtfm) { + if (c_ctx->fbtfm) { ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen); if (ret) { dev_err(dev, "failed to set fallback skcipher key!\n"); @@ -948,15 +943,14 @@ static int sec_aead_mac_init(struct sec_aead_req *req) struct aead_request *aead_req = req->aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); size_t authsize = crypto_aead_authsize(tfm); - u8 *mac_out = req->out_mac; struct scatterlist *sgl = aead_req->src; + u8 *mac_out = req->out_mac; size_t copy_size; off_t skip_size; /* Copy input mac */ skip_size = aead_req->assoclen + aead_req->cryptlen - authsize; - copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, - authsize, skip_size); + copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size); if (unlikely(copy_size != authsize)) return -EINVAL; @@ -1091,11 +1085,6 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, struct crypto_shash *hash_tfm = ctx->hash_tfm; int blocksize, digestsize, ret; - if (!keys->authkeylen) { - pr_err("hisi_sec2: aead auth key error!\n"); - return -EINVAL; - } - blocksize = crypto_shash_blocksize(hash_tfm); digestsize = crypto_shash_digestsize(hash_tfm); if (keys->authkeylen > blocksize) { @@ -1107,7 +1096,8 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, } ctx->a_key_len = digestsize; } else { - memcpy(ctx->a_key, keys->authkey, keys->authkeylen); + if (keys->authkeylen) + memcpy(ctx->a_key, keys->authkey, keys->authkeylen); ctx->a_key_len = keys->authkeylen; } @@ -1120,10 +1110,7 @@ static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize) struct sec_ctx *ctx = crypto_tfm_ctx(tfm); struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - if (unlikely(a_ctx->fallback_aead_tfm)) - return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize); - - return 0; + return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize); } static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx, @@ -1139,7 +1126,6 @@ static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx, static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, const u32 keylen, const enum sec_hash_alg a_alg, const enum sec_calg c_alg, - const enum sec_mac_len mac_len, const enum sec_cmode c_mode) { struct sec_ctx *ctx = crypto_aead_ctx(tfm); @@ -1151,7 +1137,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, ctx->a_ctx.a_alg = a_alg; ctx->c_ctx.c_alg = c_alg; - ctx->a_ctx.mac_len = mac_len; c_ctx->c_mode = c_mode; if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) { @@ -1162,18 +1147,14 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, } memcpy(c_ctx->c_key, key, keylen); - if (unlikely(a_ctx->fallback_aead_tfm)) { - ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); - if (ret) - return ret; - } - - return 0; + return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); } ret = crypto_authenc_extractkeys(&keys, key, keylen); - if (ret) + if (ret) { + dev_err(dev, "sec extract aead keys err!\n"); goto bad_key; + } ret = sec_aead_aes_set_key(c_ctx, &keys); if (ret) { @@ -1187,10 +1168,9 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, goto bad_key; } - if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) || - (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) { - ret = -EINVAL; - dev_err(dev, "MAC or AUTH key length error!\n"); + ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); + if (ret) { + dev_err(dev, "set sec fallback key err!\n"); goto bad_key; } @@ -1202,27 +1182,19 @@ bad_key: } -#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \ -static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \ - u32 keylen) \ -{ \ - return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\ -} - -GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, - SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC) -GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, - SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC) -GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, - SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC) -GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, - SEC_HMAC_CCM_MAC, SEC_CMODE_CCM) -GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, - SEC_HMAC_GCM_MAC, SEC_CMODE_GCM) -GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, - SEC_HMAC_CCM_MAC, SEC_CMODE_CCM) -GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, - SEC_HMAC_GCM_MAC, SEC_CMODE_GCM) +#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode) \ +static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen) \ +{ \ + return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode); \ +} + +GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC) +GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC) +GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC) +GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM) +GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM) +GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM) +GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM) static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req) { @@ -1470,9 +1442,10 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req) { struct aead_request *aead_req = req->aead_req.aead_req; - struct sec_cipher_req *c_req = &req->c_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); + size_t authsize = crypto_aead_authsize(tfm); struct sec_aead_req *a_req = &req->aead_req; - size_t authsize = ctx->a_ctx.mac_len; + struct sec_cipher_req *c_req = &req->c_req; u32 data_size = aead_req->cryptlen; u8 flage = 0; u8 cm, cl; @@ -1513,10 +1486,8 @@ static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req) static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req) { struct aead_request *aead_req = req->aead_req.aead_req; - struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); - size_t authsize = crypto_aead_authsize(tfm); - struct sec_cipher_req *c_req = &req->c_req; struct sec_aead_req *a_req = &req->aead_req; + struct sec_cipher_req *c_req = &req->c_req; memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize); @@ -1524,15 +1495,11 @@ static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req) /* * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter}, * the counter must set to 0x01 + * CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */ - ctx->a_ctx.mac_len = authsize; - /* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */ set_aead_auth_iv(ctx, req); - } - - /* GCM 12Byte Cipher_IV == Auth_IV */ - if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) { - ctx->a_ctx.mac_len = authsize; + } else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) { + /* GCM 12Byte Cipher_IV == Auth_IV */ memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE); } } @@ -1542,9 +1509,11 @@ static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir, { struct sec_aead_req *a_req = &req->aead_req; struct aead_request *aq = a_req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aq); + size_t authsize = crypto_aead_authsize(tfm); /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */ - sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len); + sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize); /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */ sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr; @@ -1568,9 +1537,11 @@ static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir, { struct sec_aead_req *a_req = &req->aead_req; struct aead_request *aq = a_req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aq); + size_t authsize = crypto_aead_authsize(tfm); /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */ - sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3); + sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3); /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */ sqe3->a_key_addr = sqe3->c_key_addr; @@ -1594,15 +1565,15 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir, struct sec_aead_req *a_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; struct aead_request *aq = a_req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aq); + size_t authsize = crypto_aead_authsize(tfm); sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma); - sec_sqe->type2.mac_key_alg = - cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE); + sec_sqe->type2.mac_key_alg = cpu_to_le32(BYTES_TO_WORDS(authsize)); sec_sqe->type2.mac_key_alg |= - cpu_to_le32((u32)((ctx->a_key_len) / - SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET); + cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET); sec_sqe->type2.mac_key_alg |= cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET); @@ -1648,16 +1619,16 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir, struct sec_aead_req *a_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; struct aead_request *aq = a_req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aq); + size_t authsize = crypto_aead_authsize(tfm); sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma); sqe3->auth_mac_key |= - cpu_to_le32((u32)(ctx->mac_len / - SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3); + cpu_to_le32(BYTES_TO_WORDS(authsize) << SEC_MAC_OFFSET_V3); sqe3->auth_mac_key |= - cpu_to_le32((u32)(ctx->a_key_len / - SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3); + cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET_V3); sqe3->auth_mac_key |= cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3); @@ -1703,9 +1674,9 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) { struct aead_request *a_req = req->aead_req.aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); + size_t authsize = crypto_aead_authsize(tfm); struct sec_aead_req *aead_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; - size_t authsize = crypto_aead_authsize(tfm); struct sec_qp_ctx *qp_ctx = req->qp_ctx; struct aead_request *backlog_aead_req; struct sec_req *backlog_req; @@ -1718,10 +1689,8 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) if (!err && c_req->encrypt) { struct scatterlist *sgl = a_req->dst; - sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), - aead_req->out_mac, - authsize, a_req->cryptlen + - a_req->assoclen); + sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac, + authsize, a_req->cryptlen + a_req->assoclen); if (unlikely(sz != authsize)) { dev_err(c->dev, "copy out mac err!\n"); err = -EINVAL; @@ -1929,8 +1898,10 @@ static void sec_aead_exit(struct crypto_aead *tfm) static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) { + struct aead_alg *alg = crypto_aead_alg(tfm); struct sec_ctx *ctx = crypto_aead_ctx(tfm); - struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; + struct sec_auth_ctx *a_ctx = &ctx->a_ctx; + const char *aead_name = alg->base.cra_name; int ret; ret = sec_aead_init(tfm); @@ -1939,11 +1910,20 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) return ret; } - auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); - if (IS_ERR(auth_ctx->hash_tfm)) { + a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); + if (IS_ERR(a_ctx->hash_tfm)) { dev_err(ctx->dev, "aead alloc shash error!\n"); sec_aead_exit(tfm); - return PTR_ERR(auth_ctx->hash_tfm); + return PTR_ERR(a_ctx->hash_tfm); + } + + a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0, + CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); + if (IS_ERR(a_ctx->fallback_aead_tfm)) { + dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n"); + crypto_free_shash(ctx->a_ctx.hash_tfm); + sec_aead_exit(tfm); + return PTR_ERR(a_ctx->fallback_aead_tfm); } return 0; @@ -1953,6 +1933,7 @@ static void sec_aead_ctx_exit(struct crypto_aead *tfm) { struct sec_ctx *ctx = crypto_aead_ctx(tfm); + crypto_free_aead(ctx->a_ctx.fallback_aead_tfm); crypto_free_shash(ctx->a_ctx.hash_tfm); sec_aead_exit(tfm); } @@ -1979,7 +1960,6 @@ static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm) sec_aead_exit(tfm); return PTR_ERR(a_ctx->fallback_aead_tfm); } - a_ctx->fallback = false; return 0; } @@ -2007,8 +1987,7 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm) return sec_aead_ctx_init(tfm, "sha512"); } -static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, - struct sec_req *sreq) +static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, struct sec_req *sreq) { u32 cryptlen = sreq->c_req.sk_req->cryptlen; struct device *dev = ctx->dev; @@ -2030,10 +2009,6 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, } break; case SEC_CMODE_CTR: - if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) { - dev_err(dev, "skcipher HW version error!\n"); - ret = -EINVAL; - } break; default: ret = -EINVAL; @@ -2042,17 +2017,21 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, return ret; } -static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) +static int sec_skcipher_param_check(struct sec_ctx *ctx, + struct sec_req *sreq, bool *need_fallback) { struct skcipher_request *sk_req = sreq->c_req.sk_req; struct device *dev = ctx->dev; u8 c_alg = ctx->c_ctx.c_alg; - if (unlikely(!sk_req->src || !sk_req->dst || - sk_req->cryptlen > MAX_INPUT_DATA_LEN)) { + if (unlikely(!sk_req->src || !sk_req->dst)) { dev_err(dev, "skcipher input param error!\n"); return -EINVAL; } + + if (sk_req->cryptlen > MAX_INPUT_DATA_LEN) + *need_fallback = true; + sreq->c_req.c_len = sk_req->cryptlen; if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ) @@ -2110,6 +2089,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req); struct sec_req *req = skcipher_request_ctx(sk_req); struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + bool need_fallback = false; int ret; if (!sk_req->cryptlen) { @@ -2123,11 +2103,11 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) req->c_req.encrypt = encrypt; req->ctx = ctx; - ret = sec_skcipher_param_check(ctx, req); + ret = sec_skcipher_param_check(ctx, req, &need_fallback); if (unlikely(ret)) return -EINVAL; - if (unlikely(ctx->c_ctx.fallback)) + if (unlikely(ctx->c_ctx.fallback || need_fallback)) return sec_skcipher_soft_crypto(ctx, sk_req, encrypt); return ctx->req_op->process(ctx, req); @@ -2233,55 +2213,37 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq) { struct aead_request *req = sreq->aead_req.aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(req); - size_t authsize = crypto_aead_authsize(tfm); + size_t sz = crypto_aead_authsize(tfm); u8 c_mode = ctx->c_ctx.c_mode; - struct device *dev = ctx->dev; int ret; - if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN || - req->assoclen > SEC_MAX_AAD_LEN)) { - dev_err(dev, "aead input spec error!\n"); + if (unlikely(ctx->sec->qm.ver == QM_HW_V2 && !sreq->c_req.c_len)) return -EINVAL; - } - if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) || - (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN || - authsize & MAC_LEN_MASK)))) { - dev_err(dev, "aead input mac length error!\n"); + if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN || + req->assoclen > SEC_MAX_AAD_LEN)) return -EINVAL; - } if (c_mode == SEC_CMODE_CCM) { - if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) { - dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n"); + if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) return -EINVAL; - } - ret = aead_iv_demension_check(req); - if (ret) { - dev_err(dev, "aead input iv param error!\n"); - return ret; - } - } - if (sreq->c_req.encrypt) - sreq->c_req.c_len = req->cryptlen; - else - sreq->c_req.c_len = req->cryptlen - authsize; - if (c_mode == SEC_CMODE_CBC) { - if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { - dev_err(dev, "aead crypto length error!\n"); + ret = aead_iv_demension_check(req); + if (unlikely(ret)) + return -EINVAL; + } else if (c_mode == SEC_CMODE_CBC) { + if (unlikely(sz & WORD_MASK)) + return -EINVAL; + if (unlikely(ctx->a_ctx.a_key_len & WORD_MASK)) return -EINVAL; - } } return 0; } -static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) +static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq, bool *need_fallback) { struct aead_request *req = sreq->aead_req.aead_req; - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - size_t authsize = crypto_aead_authsize(tfm); struct device *dev = ctx->dev; u8 c_alg = ctx->c_ctx.c_alg; @@ -2290,12 +2252,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) return -EINVAL; } - if (ctx->sec->qm.ver == QM_HW_V2) { - if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt && - req->cryptlen <= authsize))) { - ctx->a_ctx.fallback = true; - return -EINVAL; - } + if (unlikely(ctx->c_ctx.c_mode == SEC_CMODE_CBC && + sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { + dev_err(dev, "aead cbc mode input data length error!\n"); + return -EINVAL; } /* Support AES or SM4 */ @@ -2304,8 +2264,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) return -EINVAL; } - if (unlikely(sec_aead_spec_check(ctx, sreq))) + if (unlikely(sec_aead_spec_check(ctx, sreq))) { + *need_fallback = true; return -EINVAL; + } if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <= SEC_PBUF_SZ) @@ -2321,16 +2283,9 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx, bool encrypt) { struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - struct device *dev = ctx->dev; struct aead_request *subreq; int ret; - /* Kunpeng920 aead mode not support input 0 size */ - if (!a_ctx->fallback_aead_tfm) { - dev_err(dev, "aead fallback tfm is NULL!\n"); - return -EINVAL; - } - subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL); if (!subreq) return -ENOMEM; @@ -2356,16 +2311,19 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); struct sec_req *req = aead_request_ctx(a_req); struct sec_ctx *ctx = crypto_aead_ctx(tfm); + size_t sz = crypto_aead_authsize(tfm); + bool need_fallback = false; int ret; req->flag = a_req->base.flags; req->aead_req.aead_req = a_req; req->c_req.encrypt = encrypt; req->ctx = ctx; + req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz); - ret = sec_aead_param_check(ctx, req); + ret = sec_aead_param_check(ctx, req, &need_fallback); if (unlikely(ret)) { - if (ctx->a_ctx.fallback) + if (need_fallback) return sec_aead_soft_crypto(ctx, a_req, encrypt); return -EINVAL; } diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h index 27a0ee5ad913..04725b514382 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.h +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h @@ -23,17 +23,6 @@ enum sec_hash_alg { SEC_A_HMAC_SHA512 = 0x15, }; -enum sec_mac_len { - SEC_HMAC_CCM_MAC = 16, - SEC_HMAC_GCM_MAC = 16, - SEC_SM3_MAC = 32, - SEC_HMAC_SM3_MAC = 32, - SEC_HMAC_MD5_MAC = 16, - SEC_HMAC_SHA1_MAC = 20, - SEC_HMAC_SHA256_MAC = 32, - SEC_HMAC_SHA512_MAC = 64, -}; - enum sec_cmode { SEC_CMODE_ECB = 0x0, SEC_CMODE_CBC = 0x1, diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index 7e93159c3b6b..d5df3d2da50c 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -436,7 +436,7 @@ static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev) struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); if (ctx->flags & DRIVER_FLAGS_SG) - dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE); + dma_unmap_sg(hdev->dev, ctx->sg, 1, DMA_TO_DEVICE); return 0; } diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index f44c08f5f5ec..af4b978189e5 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -249,7 +249,9 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, safexcel_complete(priv, ring); if (sreq->nents) { - dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE); + dma_unmap_sg(priv->dev, areq->src, + sg_nents_for_len(areq->src, areq->nbytes), + DMA_TO_DEVICE); sreq->nents = 0; } @@ -497,7 +499,9 @@ unmap_result: DMA_FROM_DEVICE); unmap_sg: if (req->nents) { - dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE); + dma_unmap_sg(priv->dev, areq->src, + sg_nents_for_len(areq->src, areq->nbytes), + DMA_TO_DEVICE); req->nents = 0; } cdesc_rollback: diff --git a/drivers/crypto/intel/iaa/Makefile b/drivers/crypto/intel/iaa/Makefile index b64b208d2344..55bda7770fac 100644 --- a/drivers/crypto/intel/iaa/Makefile +++ b/drivers/crypto/intel/iaa/Makefile @@ -3,7 +3,7 @@ # Makefile for IAA crypto device drivers # -ccflags-y += -I $(srctree)/drivers/dma/idxd -DDEFAULT_SYMBOL_NAMESPACE=IDXD +ccflags-y += -I $(srctree)/drivers/dma/idxd -DDEFAULT_SYMBOL_NAMESPACE='"IDXD"' obj-$(CONFIG_CRYPTO_DEV_IAA_CRYPTO) := iaa_crypto.o diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 237f87000070..df2728cccf8b 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -173,7 +173,7 @@ static int set_iaa_sync_mode(const char *name) async_mode = false; use_irq = false; } else if (sysfs_streq(name, "async")) { - async_mode = true; + async_mode = false; use_irq = false; } else if (sysfs_streq(name, "async_irq")) { async_mode = true; @@ -1126,8 +1126,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, struct idxd_wq *wq, dma_addr_t src_addr, unsigned int slen, dma_addr_t dst_addr, unsigned int *dlen, - u32 *compression_crc, - bool disable_async) + u32 *compression_crc) { struct iaa_device_compression_mode *active_compression_mode; struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); @@ -1170,7 +1169,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, desc->src2_size = sizeof(struct aecs_comp_table_record); desc->completion_addr = idxd_desc->compl_dma; - if (ctx->use_irq && !disable_async) { + if (ctx->use_irq) { desc->flags |= IDXD_OP_FLAG_RCI; idxd_desc->crypto.req = req; @@ -1183,8 +1182,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, " src_addr %llx, dst_addr %llx\n", __func__, active_compression_mode->name, src_addr, dst_addr); - } else if (ctx->async_mode && !disable_async) - req->base.data = idxd_desc; + } dev_dbg(dev, "%s: compression mode %s," " desc->src1_addr %llx, desc->src1_size %d," @@ -1204,7 +1202,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, update_total_comp_calls(); update_wq_comp_calls(wq); - if (ctx->async_mode && !disable_async) { + if (ctx->async_mode) { ret = -EINPROGRESS; dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__); goto out; @@ -1224,7 +1222,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, *compression_crc = idxd_desc->iax_completion->crc; - if (!ctx->async_mode || disable_async) + if (!ctx->async_mode) idxd_free_desc(wq, idxd_desc); out: return ret; @@ -1421,8 +1419,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, " src_addr %llx, dst_addr %llx\n", __func__, active_compression_mode->name, src_addr, dst_addr); - } else if (ctx->async_mode && !disable_async) - req->base.data = idxd_desc; + } dev_dbg(dev, "%s: decompression mode %s," " desc->src1_addr %llx, desc->src1_size %d," @@ -1490,13 +1487,11 @@ static int iaa_comp_acompress(struct acomp_req *req) struct iaa_compression_ctx *compression_ctx; struct crypto_tfm *tfm = req->base.tfm; dma_addr_t src_addr, dst_addr; - bool disable_async = false; int nr_sgs, cpu, ret = 0; struct iaa_wq *iaa_wq; u32 compression_crc; struct idxd_wq *wq; struct device *dev; - int order = -1; compression_ctx = crypto_tfm_ctx(tfm); @@ -1526,21 +1521,6 @@ static int iaa_comp_acompress(struct acomp_req *req) iaa_wq = idxd_wq_get_private(wq); - if (!req->dst) { - gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; - - /* incompressible data will always be < 2 * slen */ - req->dlen = 2 * req->slen; - order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE); - req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL); - if (!req->dst) { - ret = -ENOMEM; - order = -1; - goto out; - } - disable_async = true; - } - dev = &wq->idxd->pdev->dev; nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); @@ -1570,7 +1550,7 @@ static int iaa_comp_acompress(struct acomp_req *req) req->dst, req->dlen, sg_dma_len(req->dst)); ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr, - &req->dlen, &compression_crc, disable_async); + &req->dlen, &compression_crc); if (ret == -EINPROGRESS) return ret; @@ -1601,100 +1581,6 @@ err_map_dst: out: iaa_wq_put(wq); - if (order >= 0) - sgl_free_order(req->dst, order); - - return ret; -} - -static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req) -{ - gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? - GFP_KERNEL : GFP_ATOMIC; - struct crypto_tfm *tfm = req->base.tfm; - dma_addr_t src_addr, dst_addr; - int nr_sgs, cpu, ret = 0; - struct iaa_wq *iaa_wq; - struct device *dev; - struct idxd_wq *wq; - int order = -1; - - cpu = get_cpu(); - wq = wq_table_next_wq(cpu); - put_cpu(); - if (!wq) { - pr_debug("no wq configured for cpu=%d\n", cpu); - return -ENODEV; - } - - ret = iaa_wq_get(wq); - if (ret) { - pr_debug("no wq available for cpu=%d\n", cpu); - return -ENODEV; - } - - iaa_wq = idxd_wq_get_private(wq); - - dev = &wq->idxd->pdev->dev; - - nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); - if (nr_sgs <= 0 || nr_sgs > 1) { - dev_dbg(dev, "couldn't map src sg for iaa device %d," - " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, - iaa_wq->wq->id, ret); - ret = -EIO; - goto out; - } - src_addr = sg_dma_address(req->src); - dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," - " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs, - req->src, req->slen, sg_dma_len(req->src)); - - req->dlen = 4 * req->slen; /* start with ~avg comp rato */ -alloc_dest: - order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE); - req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL); - if (!req->dst) { - ret = -ENOMEM; - order = -1; - goto out; - } - - nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); - if (nr_sgs <= 0 || nr_sgs > 1) { - dev_dbg(dev, "couldn't map dst sg for iaa device %d," - " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, - iaa_wq->wq->id, ret); - ret = -EIO; - goto err_map_dst; - } - - dst_addr = sg_dma_address(req->dst); - dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," - " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, - req->dst, req->dlen, sg_dma_len(req->dst)); - ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, - dst_addr, &req->dlen, true); - if (ret == -EOVERFLOW) { - dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); - req->dlen *= 2; - if (req->dlen > CRYPTO_ACOMP_DST_MAX) - goto err_map_dst; - goto alloc_dest; - } - - if (ret != 0) - dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret); - - dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); -err_map_dst: - dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); -out: - iaa_wq_put(wq); - - if (order >= 0) - sgl_free_order(req->dst, order); - return ret; } @@ -1717,9 +1603,6 @@ static int iaa_comp_adecompress(struct acomp_req *req) return -EINVAL; } - if (!req->dst) - return iaa_comp_adecompress_alloc_dest(req); - cpu = get_cpu(); wq = wq_table_next_wq(cpu); put_cpu(); @@ -1800,19 +1683,10 @@ static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm) return 0; } -static void dst_free(struct scatterlist *sgl) -{ - /* - * Called for req->dst = NULL cases but we free elsewhere - * using sgl_free_order(). - */ -} - static struct acomp_alg iaa_acomp_fixed_deflate = { .init = iaa_comp_init_fixed, .compress = iaa_comp_acompress, .decompress = iaa_comp_adecompress, - .dst_free = dst_free, .base = { .cra_name = "deflate", .cra_driver_name = "deflate-iaa", diff --git a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c index f8a77bff8844..e43361392c83 100644 --- a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c +++ b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c @@ -471,6 +471,7 @@ static int init_ixp_crypto(struct device *dev) return -ENODEV; } npe_id = npe_spec.args[0]; + of_node_put(npe_spec.np); ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0, &queue_spec); @@ -479,6 +480,7 @@ static int init_ixp_crypto(struct device *dev) return -ENODEV; } recv_qid = queue_spec.args[0]; + of_node_put(queue_spec.np); ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0, &queue_spec); @@ -487,6 +489,7 @@ static int init_ixp_crypto(struct device *dev) return -ENODEV; } send_qid = queue_spec.args[0]; + of_node_put(queue_spec.np); } else { /* * Hardcoded engine when using platform data, this goes away diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c index e54c79890d44..fdeca861933c 100644 --- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c +++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c @@ -68,6 +68,7 @@ struct ocs_hcu_ctx { * @sg_data_total: Total data in the SG list at any time. * @sg_data_offset: Offset into the data of the current individual SG node. * @sg_dma_nents: Number of sg entries mapped in dma_list. + * @nents: Number of entries in the scatterlist. */ struct ocs_hcu_rctx { struct ocs_hcu_dev *hcu_dev; @@ -91,6 +92,7 @@ struct ocs_hcu_rctx { unsigned int sg_data_total; unsigned int sg_data_offset; unsigned int sg_dma_nents; + unsigned int nents; }; /** @@ -199,7 +201,7 @@ static void kmb_ocs_hcu_dma_cleanup(struct ahash_request *req, /* Unmap req->src (if mapped). */ if (rctx->sg_dma_nents) { - dma_unmap_sg(dev, req->src, rctx->sg_dma_nents, DMA_TO_DEVICE); + dma_unmap_sg(dev, req->src, rctx->nents, DMA_TO_DEVICE); rctx->sg_dma_nents = 0; } @@ -260,6 +262,10 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req) rc = -ENOMEM; goto cleanup; } + + /* Save the value of nents to pass to dma_unmap_sg. */ + rctx->nents = nents; + /* * The value returned by dma_map_sg() can be < nents; so update * nents accordingly. diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 9faef33e54bd..ef5f03be4190 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -199,7 +199,6 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) ICP_ACCEL_CAPABILITIES_SM4 | ICP_ACCEL_CAPABILITIES_AES_V2 | ICP_ACCEL_CAPABILITIES_ZUC | - ICP_ACCEL_CAPABILITIES_ZUC_256 | ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT | ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN; @@ -231,17 +230,11 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) if (fusectl1 & ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE) { capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC; - capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT; } - if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE) capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC; - capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256; - } - - if (fusectl1 & ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE) - capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256; capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | ICP_ACCEL_CAPABILITIES_SM2 | @@ -420,6 +413,7 @@ static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask) dev_err_mask->parerr_cpr_xlt_mask = ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK; dev_err_mask->parerr_dcpr_ucs_mask = ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK; dev_err_mask->parerr_pke_mask = ADF_420XX_PARITYERRORMASK_PKE_MASK; + dev_err_mask->parerr_wat_wcp_mask = ADF_420XX_PARITYERRORMASK_WAT_WCP_MASK; dev_err_mask->ssmfeatren_mask = ADF_420XX_SSMFEATREN_MASK; } diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c index f49818a13013..41420e349572 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c @@ -181,11 +181,19 @@ static void adf_remove(struct pci_dev *pdev) adf_cleanup_accel(accel_dev); } +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + static struct pci_driver adf_driver = { .id_table = adf_pci_tbl, .name = ADF_420XX_DEVICE_NAME, .probe = adf_probe, .remove = adf_remove, + .shutdown = adf_shutdown, .sriov_configure = adf_sriov_configure, .err_handler = &adf_err_handler, }; diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index 659905e45950..01b34eda83e9 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -183,11 +183,19 @@ static void adf_remove(struct pci_dev *pdev) adf_cleanup_accel(accel_dev); } +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + static struct pci_driver adf_driver = { .id_table = adf_pci_tbl, .name = ADF_4XXX_DEVICE_NAME, .probe = adf_probe, .remove = adf_remove, + .shutdown = adf_shutdown, .sriov_configure = adf_sriov_configure, .err_handler = &adf_err_handler, }; diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c index 4d18057745d4..b776f7ea0dfb 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c @@ -19,6 +19,13 @@ #include <adf_dbgfs.h> #include "adf_c3xxx_hw_data.h" +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + static const struct pci_device_id adf_pci_tbl[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX), }, { } @@ -33,6 +40,7 @@ static struct pci_driver adf_driver = { .name = ADF_C3XXX_DEVICE_NAME, .probe = adf_probe, .remove = adf_remove, + .shutdown = adf_shutdown, .sriov_configure = adf_sriov_configure, .err_handler = &adf_err_handler, }; diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c index e6b5de55434e..5310149c311e 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c @@ -19,6 +19,13 @@ #include <adf_dbgfs.h> #include "adf_c62x_hw_data.h" +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + static const struct pci_device_id adf_pci_tbl[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X), }, { } @@ -33,6 +40,7 @@ static struct pci_driver adf_driver = { .name = ADF_C62X_DEVICE_NAME, .probe = adf_probe, .remove = adf_remove, + .shutdown = adf_shutdown, .sriov_configure = adf_sriov_configure, .err_handler = &adf_err_handler, }; diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index eac73cbfdd38..7acf9c576149 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o -ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CRYPTO_QAT +ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CRYPTO_QAT"' intel_qat-objs := adf_cfg.o \ adf_isr.o \ adf_ctl_drv.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index 41a0979e68c1..e70adb90e5e4 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -585,6 +585,28 @@ static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base, ops->write_csr_int_srcsel_w_val(base, bank, state->iaintflagsrcsel0); ops->write_csr_exp_int_en(base, bank, state->ringexpintenable); ops->write_csr_int_col_ctl(base, bank, state->iaintcolctl); + + /* + * Verify whether any exceptions were raised during the bank save process. + * If exceptions occurred, the status and exception registers cannot + * be directly restored. Consequently, further restoration is not + * feasible, and the current state of the ring should be maintained. + */ + val = state->ringexpstat; + if (val) { + pr_info("QAT: Bank %u state not fully restored due to exception in saved state (%#x)\n", + bank, val); + return 0; + } + + /* Ensure that the restoration process completed without exceptions */ + tmp_val = ops->read_csr_exp_stat(base, bank); + if (tmp_val) { + pr_err("QAT: Bank %u restored with exception: %#x\n", + bank, tmp_val); + return -EFAULT; + } + ops->write_csr_ring_srv_arb_en(base, bank, state->ringsrvarben); /* Check that all ring statuses match the saved state. */ @@ -618,13 +640,6 @@ static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base, if (ret) return ret; - tmp_val = ops->read_csr_exp_stat(base, bank); - val = state->ringexpstat; - if (tmp_val && !val) { - pr_err("QAT: Bank was restored with exception: 0x%x\n", val); - return -EINVAL; - } - return 0; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index 2dd3772bf58a..0f7f00a19e7d 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -695,7 +695,7 @@ static bool adf_handle_slice_hang_error(struct adf_accel_dev *accel_dev, if (err_mask->parerr_wat_wcp_mask) adf_poll_slicehang_csr(accel_dev, csr, ADF_GEN4_SLICEHANGSTATUS_WAT_WCP, - "ath_cph"); + "wat_wcp"); return false; } @@ -1043,63 +1043,16 @@ static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev, return reset_required; } -static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, +static void adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 iastatssm) { - struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); - u32 reg; - if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT)) - return false; - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC); - reg &= ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH); - reg &= err_mask->parerr_ath_cph_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT); - reg &= err_mask->parerr_cpr_xlt_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS); - reg &= err_mask->parerr_dcpr_ucs_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE); - reg &= err_mask->parerr_pke_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE, reg); - } - - if (err_mask->parerr_wat_wcp_mask) { - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP); - reg &= err_mask->parerr_wat_wcp_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP, - reg); - } - } + return; + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); dev_err(&GET_DEV(accel_dev), "Slice ssm soft parity error reported"); - return false; + return; } static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev, @@ -1171,8 +1124,8 @@ static bool adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev, reset_required |= adf_handle_slice_hang_error(accel_dev, csr, iastatssm); reset_required |= adf_handle_spppar_err(accel_dev, csr, iastatssm); reset_required |= adf_handle_ssmcpppar_err(accel_dev, csr, iastatssm); - reset_required |= adf_handle_rf_parr_err(accel_dev, csr, iastatssm); reset_required |= adf_handle_ser_err_ssmsh(accel_dev, csr, iastatssm); + adf_handle_rf_parr_err(accel_dev, csr, iastatssm); ADF_CSR_WR(csr, ADF_GEN4_IAINTSTATSSM, iastatssm); diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c index c75d0b6cb0ad..31d1ef0cb1f5 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c @@ -155,7 +155,6 @@ static int adf_do_enable_sriov(struct adf_accel_dev *accel_dev) if (!device_iommu_mapped(&GET_DEV(accel_dev))) { dev_warn(&GET_DEV(accel_dev), "IOMMU should be enabled for SR-IOV to work correctly\n"); - return -EINVAL; } if (adf_dev_started(accel_dev)) { diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c index e2dd568b87b5..621b5d3dfcef 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c +++ b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c @@ -31,8 +31,10 @@ static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos) struct adf_etr_ring_data *ring = sfile->private; if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / - ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) + ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) { + (*pos)++; return NULL; + } return ring->base_addr + (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++); diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c index 338acf29c487..5a7b43f9150d 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_bl.c +++ b/drivers/crypto/intel/qat/qat_common/qat_bl.c @@ -38,7 +38,7 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev, for (i = 0; i < blout->num_mapped_bufs; i++) { dma_unmap_single(dev, blout->buffers[i].addr, blout->buffers[i].len, - DMA_FROM_DEVICE); + DMA_BIDIRECTIONAL); } dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE); @@ -162,7 +162,7 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, } buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left, sg->length - left, - DMA_FROM_DEVICE); + DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(dev, buffers[y].addr))) goto err_out; buffers[y].len = sg->length; @@ -204,7 +204,7 @@ err_out: if (!dma_mapping_error(dev, buflout->buffers[i].addr)) dma_unmap_single(dev, buflout->buffers[i].addr, buflout->buffers[i].len, - DMA_FROM_DEVICE); + DMA_BIDIRECTIONAL); } if (!buf->sgl_dst_valid) diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c index 7842a9f22178..cf94ba3011d5 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_compression.c +++ b/drivers/crypto/intel/qat/qat_common/qat_compression.c @@ -197,7 +197,7 @@ static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev) struct adf_dc_data *dc_data = NULL; u8 *obuff = NULL; - dc_data = devm_kzalloc(dev, sizeof(*dc_data), GFP_KERNEL); + dc_data = kzalloc_node(sizeof(*dc_data), GFP_KERNEL, dev_to_node(dev)); if (!dc_data) goto err; @@ -205,7 +205,7 @@ static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev) if (!obuff) goto err; - obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_FROM_DEVICE); + obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(dev, obuff_p))) goto err; @@ -233,9 +233,9 @@ static void qat_free_dc_data(struct adf_accel_dev *accel_dev) return; dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz, - DMA_FROM_DEVICE); + DMA_BIDIRECTIONAL); kfree_sensitive(dc_data->ovf_buff); - devm_kfree(dev, dc_data); + kfree(dc_data); accel_dev->dc_data = NULL; } diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c index 2a50cce41515..5ddf567ffcad 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c @@ -19,6 +19,13 @@ #include <adf_dbgfs.h> #include "adf_dh895xcc_hw_data.h" +static void adf_shutdown(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + adf_dev_down(accel_dev); +} + static const struct pci_device_id adf_pci_tbl[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC), }, { } @@ -33,6 +40,7 @@ static struct pci_driver adf_driver = { .name = ADF_DH895XCC_DEVICE_NAME, .probe = adf_probe, .remove = adf_remove, + .shutdown = adf_shutdown, .sriov_configure = adf_sriov_configure, .err_handler = &adf_err_handler, }; diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c index 5fd31ba715c2..24273cb082ba 100644 --- a/drivers/crypto/marvell/cesa/cesa.c +++ b/drivers/crypto/marvell/cesa/cesa.c @@ -94,7 +94,7 @@ static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status) static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status) { - if (engine->chain.first && engine->chain.last) + if (engine->chain_hw.first && engine->chain_hw.last) return mv_cesa_tdma_process(engine, status); return mv_cesa_std_process(engine, status); diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h index d215a6bed6bc..50ca1039fdaa 100644 --- a/drivers/crypto/marvell/cesa/cesa.h +++ b/drivers/crypto/marvell/cesa/cesa.h @@ -440,8 +440,10 @@ struct mv_cesa_dev { * SRAM * @queue: fifo of the pending crypto requests * @load: engine load counter, useful for load balancing - * @chain: list of the current tdma descriptors being processed - * by this engine. + * @chain_hw: list of the current tdma descriptors being processed + * by the hardware. + * @chain_sw: list of the current tdma descriptors that will be + * submitted to the hardware. * @complete_queue: fifo of the processed requests by the engine * * Structure storing CESA engine information. @@ -463,7 +465,8 @@ struct mv_cesa_engine { struct gen_pool *pool; struct crypto_queue queue; atomic_t load; - struct mv_cesa_tdma_chain chain; + struct mv_cesa_tdma_chain chain_hw; + struct mv_cesa_tdma_chain chain_sw; struct list_head complete_queue; int irq; }; diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c index 0f37dfd42d85..eabed9d977df 100644 --- a/drivers/crypto/marvell/cesa/cipher.c +++ b/drivers/crypto/marvell/cesa/cipher.c @@ -75,9 +75,12 @@ mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req) static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req) { struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); + struct mv_cesa_engine *engine = creq->base.engine; if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) mv_cesa_skcipher_dma_cleanup(req); + + atomic_sub(req->cryptlen, &engine->load); } static void mv_cesa_skcipher_std_step(struct skcipher_request *req) @@ -212,7 +215,6 @@ mv_cesa_skcipher_complete(struct crypto_async_request *req) struct mv_cesa_engine *engine = creq->base.engine; unsigned int ivsize; - atomic_sub(skreq->cryptlen, &engine->load); ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq)); if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) { @@ -459,6 +461,9 @@ static int mv_cesa_skcipher_queue_req(struct skcipher_request *req, struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); struct mv_cesa_engine *engine; + if (!req->cryptlen) + return 0; + ret = mv_cesa_skcipher_req_init(req, tmpl); if (ret) return ret; diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c index f150861ceaf6..e339ce7ad533 100644 --- a/drivers/crypto/marvell/cesa/hash.c +++ b/drivers/crypto/marvell/cesa/hash.c @@ -110,9 +110,12 @@ static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req) static inline void mv_cesa_ahash_cleanup(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + struct mv_cesa_engine *engine = creq->base.engine; if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) mv_cesa_ahash_dma_cleanup(req); + + atomic_sub(req->nbytes, &engine->load); } static void mv_cesa_ahash_last_cleanup(struct ahash_request *req) @@ -395,8 +398,6 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req) } } } - - atomic_sub(ahashreq->nbytes, &engine->load); } static void mv_cesa_ahash_prepare(struct crypto_async_request *req, @@ -663,7 +664,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) if (ret) goto err_free_tdma; - if (iter.src.sg) { + if (iter.base.len > iter.src.op_offset) { /* * Add all the new data, inserting an operation block and * launch command between each full SRAM block-worth of diff --git a/drivers/crypto/marvell/cesa/tdma.c b/drivers/crypto/marvell/cesa/tdma.c index 388a06e180d6..243305354420 100644 --- a/drivers/crypto/marvell/cesa/tdma.c +++ b/drivers/crypto/marvell/cesa/tdma.c @@ -38,6 +38,15 @@ void mv_cesa_dma_step(struct mv_cesa_req *dreq) { struct mv_cesa_engine *engine = dreq->engine; + spin_lock_bh(&engine->lock); + if (engine->chain_sw.first == dreq->chain.first) { + engine->chain_sw.first = NULL; + engine->chain_sw.last = NULL; + } + engine->chain_hw.first = dreq->chain.first; + engine->chain_hw.last = dreq->chain.last; + spin_unlock_bh(&engine->lock); + writel_relaxed(0, engine->regs + CESA_SA_CFG); mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE); @@ -96,25 +105,27 @@ void mv_cesa_dma_prepare(struct mv_cesa_req *dreq, void mv_cesa_tdma_chain(struct mv_cesa_engine *engine, struct mv_cesa_req *dreq) { - if (engine->chain.first == NULL && engine->chain.last == NULL) { - engine->chain.first = dreq->chain.first; - engine->chain.last = dreq->chain.last; - } else { - struct mv_cesa_tdma_desc *last; + struct mv_cesa_tdma_desc *last = engine->chain_sw.last; - last = engine->chain.last; + /* + * Break the DMA chain if the request being queued needs the IV + * regs to be set before lauching the request. + */ + if (!last || dreq->chain.first->flags & CESA_TDMA_SET_STATE) + engine->chain_sw.first = dreq->chain.first; + else { last->next = dreq->chain.first; - engine->chain.last = dreq->chain.last; - - /* - * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on - * the last element of the current chain, or if the request - * being queued needs the IV regs to be set before lauching - * the request. - */ - if (!(last->flags & CESA_TDMA_BREAK_CHAIN) && - !(dreq->chain.first->flags & CESA_TDMA_SET_STATE)) - last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma); + last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma); + } + last = dreq->chain.last; + engine->chain_sw.last = last; + /* + * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on + * the last element of the current chain. + */ + if (last->flags & CESA_TDMA_BREAK_CHAIN) { + engine->chain_sw.first = NULL; + engine->chain_sw.last = NULL; } } @@ -127,7 +138,7 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status) tdma_cur = readl(engine->regs + CESA_TDMA_CUR); - for (tdma = engine->chain.first; tdma; tdma = next) { + for (tdma = engine->chain_hw.first; tdma; tdma = next) { spin_lock_bh(&engine->lock); next = tdma->next; spin_unlock_bh(&engine->lock); @@ -149,12 +160,12 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status) &backlog); /* Re-chaining to the next request */ - engine->chain.first = tdma->next; + engine->chain_hw.first = tdma->next; tdma->next = NULL; /* If this is the last request, clear the chain */ - if (engine->chain.first == NULL) - engine->chain.last = NULL; + if (engine->chain_hw.first == NULL) + engine->chain_hw.last = NULL; spin_unlock_bh(&engine->lock); ctx = crypto_tfm_ctx(req->tfm); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c index 5c9484646172..357a7c6ac837 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c @@ -1493,6 +1493,7 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf) dma_addr_t rptr_baddr; struct pci_dev *pdev; u32 len, compl_rlen; + int timeout = 10000; int ret, etype; void *rptr; @@ -1555,16 +1556,27 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf) etype); otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr); lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]); + timeout = 10000; while (lfs->ops->cpt_get_compcode(result) == - OTX2_CPT_COMPLETION_CODE_INIT) + OTX2_CPT_COMPLETION_CODE_INIT) { cpu_relax(); + udelay(1); + timeout--; + if (!timeout) { + ret = -ENODEV; + cptpf->is_eng_caps_discovered = false; + dev_warn(&pdev->dev, "Timeout on CPT load_fvc completion poll\n"); + goto error_no_response; + } + } cptpf->eng_caps[etype].u = be64_to_cpup(rptr); } - dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL); cptpf->is_eng_caps_discovered = true; +error_no_response: + dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL); free_result: kfree(result); lf_cleanup: diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c index 5387c68f3c9d..426244107037 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c @@ -264,9 +264,10 @@ static int cpt_process_ccode(struct otx2_cptlfs_info *lfs, break; } - dev_err(&pdev->dev, - "Request failed with software error code 0x%x\n", - cpt_status->s.uc_compcode); + pr_debug("Request failed with software error code 0x%x: algo = %s driver = %s\n", + cpt_status->s.uc_compcode, + info->req->areq->tfm->__crt_alg->cra_name, + info->req->areq->tfm->__crt_alg->cra_driver_name); otx2_cpt_dump_sg_list(pdev, info->req); break; } diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index 77a6301f37f0..29c0c69d5905 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -265,12 +265,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, MXS_DCP_CONTROL0_INTERRUPT | MXS_DCP_CONTROL0_ENABLE_CIPHER; - if (key_referenced) - /* Set OTP key bit to select the key via KEY_SELECT. */ - desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY; - else + if (!key_referenced) /* Payload contains the key. */ desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; + else if (actx->key[0] == DCP_PAES_KEY_OTP) + /* Set OTP key bit to select the key via KEY_SELECT. */ + desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY; if (rctx->enc) desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c index 35f2d0d8507e..7e98f174f69b 100644 --- a/drivers/crypto/nx/nx-common-pseries.c +++ b/drivers/crypto/nx/nx-common-pseries.c @@ -1144,6 +1144,7 @@ static void __init nxcop_get_capabilities(void) { struct hv_vas_all_caps *hv_caps; struct hv_nx_cop_caps *hv_nxc; + u64 feat; int rc; hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL); @@ -1154,27 +1155,26 @@ static void __init nxcop_get_capabilities(void) */ rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, 0, (u64)virt_to_phys(hv_caps)); + if (!rc) + feat = be64_to_cpu(hv_caps->feat_type); + kfree(hv_caps); if (rc) - goto out; + return; + if (!(feat & VAS_NX_GZIP_FEAT_BIT)) + return; - caps_feat = be64_to_cpu(hv_caps->feat_type); /* * NX-GZIP feature available */ - if (caps_feat & VAS_NX_GZIP_FEAT_BIT) { - hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL); - if (!hv_nxc) - goto out; - /* - * Get capabilities for NX-GZIP feature - */ - rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, - VAS_NX_GZIP_FEAT, - (u64)virt_to_phys(hv_nxc)); - } else { - pr_err("NX-GZIP feature is not available\n"); - rc = -EINVAL; - } + hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL); + if (!hv_nxc) + return; + /* + * Get capabilities for NX-GZIP feature + */ + rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, + VAS_NX_GZIP_FEAT, + (u64)virt_to_phys(hv_nxc)); if (!rc) { nx_cop_caps.descriptor = be64_to_cpu(hv_nxc->descriptor); @@ -1184,13 +1184,10 @@ static void __init nxcop_get_capabilities(void) be64_to_cpu(hv_nxc->min_compress_len); nx_cop_caps.min_decompress_len = be64_to_cpu(hv_nxc->min_decompress_len); - } else { - caps_feat = 0; + caps_feat = feat; } kfree(hv_nxc); -out: - kfree(hv_caps); } static const struct vio_device_id nx842_vio_driver_ids[] = { diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c index 7d811728f047..97b56e92ea33 100644 --- a/drivers/crypto/qce/aead.c +++ b/drivers/crypto/qce/aead.c @@ -786,7 +786,7 @@ static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_devi alg->init = qce_aead_init; alg->exit = qce_aead_exit; - alg->base.cra_priority = 300; + alg->base.cra_priority = 275; alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c index 28b5fd823827..3ec8297ed3ff 100644 --- a/drivers/crypto/qce/core.c +++ b/drivers/crypto/qce/core.c @@ -51,16 +51,19 @@ static void qce_unregister_algs(struct qce_device *qce) static int qce_register_algs(struct qce_device *qce) { const struct qce_algo_ops *ops; - int i, ret = -ENODEV; + int i, j, ret = -ENODEV; for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { ops = qce_ops[i]; ret = ops->register_algs(qce); - if (ret) - break; + if (ret) { + for (j = i - 1; j >= 0; j--) + ops->unregister_algs(qce); + return ret; + } } - return ret; + return 0; } static int qce_handle_request(struct crypto_async_request *async_req) @@ -247,7 +250,7 @@ static int qce_crypto_probe(struct platform_device *pdev) ret = qce_check_version(qce); if (ret) - goto err_clks; + goto err_dma; spin_lock_init(&qce->lock); tasklet_init(&qce->done_tasklet, qce_tasklet_req_done, diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index fc72af8aa9a7..71b748183cfa 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -482,7 +482,7 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def, base = &alg->halg.base; base->cra_blocksize = def->blocksize; - base->cra_priority = 300; + base->cra_priority = 175; base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; base->cra_ctxsize = sizeof(struct qce_sha_ctx); base->cra_alignmask = 0; diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c index 5b493fdc1e74..ffb334eb5b34 100644 --- a/drivers/crypto/qce/skcipher.c +++ b/drivers/crypto/qce/skcipher.c @@ -461,7 +461,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def, alg->encrypt = qce_skcipher_encrypt; alg->decrypt = qce_skcipher_decrypt; - alg->base.cra_priority = 300; + alg->base.cra_priority = 275; alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY; diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c index ae7a0f8435fc..cd52807e76af 100644 --- a/drivers/crypto/tegra/tegra-se-aes.c +++ b/drivers/crypto/tegra/tegra-se-aes.c @@ -263,13 +263,7 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) unsigned int cmdlen; int ret; - rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_AES_BUFLEN, - &rctx->datbuf.addr, GFP_KERNEL); - if (!rctx->datbuf.buf) - return -ENOMEM; - - rctx->datbuf.size = SE_AES_BUFLEN; - rctx->iv = (u32 *)req->iv; + rctx->iv = (ctx->alg == SE_ALG_ECB) ? NULL : (u32 *)req->iv; rctx->len = req->cryptlen; /* Pad input to AES Block size */ @@ -278,18 +272,24 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE); } + rctx->datbuf.size = rctx->len; + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) + return -ENOMEM; + scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0); /* Prepare the command and submit for execution */ cmdlen = tegra_aes_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); /* Copy the result */ tegra_aes_update_iv(req, ctx); scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1); /* Free the buffer */ - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->datbuf.size, rctx->datbuf.buf, rctx->datbuf.addr); crypto_finalize_skcipher_request(se->engine, req, ret); @@ -719,7 +719,7 @@ static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqct cmdlen = tegra_gmac_prep_cmd(ctx, rctx); - return tegra_se_host1x_submit(se, cmdlen); + return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); } static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) @@ -736,7 +736,7 @@ static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc /* Prepare command and submit */ cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) return ret; @@ -759,7 +759,7 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc /* Prepare command and submit */ cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) return ret; @@ -891,7 +891,7 @@ static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_req /* Prepare command and submit */ cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx); - return tegra_se_host1x_submit(se, cmdlen); + return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); } static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize) @@ -1098,7 +1098,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx /* Prepare command and submit */ cmdlen = tegra_ctr_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) return ret; @@ -1117,6 +1117,11 @@ static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se, rctx->assoclen = req->assoclen; rctx->authsize = crypto_aead_authsize(tfm); + if (rctx->encrypt) + rctx->cryptlen = req->cryptlen; + else + rctx->cryptlen = req->cryptlen - rctx->authsize; + memcpy(iv, req->iv, 16); ret = tegra_ccm_check_iv(iv); @@ -1145,30 +1150,26 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_se *se = ctx->se; int ret; + ret = tegra_ccm_crypt_init(req, se, rctx); + if (ret) + return ret; + /* Allocate buffers required */ - rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, + rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100; + rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size, &rctx->inbuf.addr, GFP_KERNEL); if (!rctx->inbuf.buf) return -ENOMEM; - rctx->inbuf.size = SE_AES_BUFLEN; - - rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, + rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100; + rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size, &rctx->outbuf.addr, GFP_KERNEL); if (!rctx->outbuf.buf) { ret = -ENOMEM; goto outbuf_err; } - rctx->outbuf.size = SE_AES_BUFLEN; - - ret = tegra_ccm_crypt_init(req, se, rctx); - if (ret) - goto out; - if (rctx->encrypt) { - rctx->cryptlen = req->cryptlen; - /* CBC MAC Operation */ ret = tegra_ccm_compute_auth(ctx, rctx); if (ret) @@ -1179,10 +1180,6 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) if (ret) goto out; } else { - rctx->cryptlen = req->cryptlen - ctx->authsize; - if (ret) - goto out; - /* CTR operation */ ret = tegra_ccm_do_ctr(ctx, rctx); if (ret) @@ -1195,11 +1192,11 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) } out: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->inbuf.size, rctx->outbuf.buf, rctx->outbuf.addr); outbuf_err: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->outbuf.size, rctx->inbuf.buf, rctx->inbuf.addr); crypto_finalize_aead_request(ctx->se->engine, req, ret); @@ -1215,23 +1212,6 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_aead_reqctx *rctx = aead_request_ctx(req); int ret; - /* Allocate buffers required */ - rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, - &rctx->inbuf.addr, GFP_KERNEL); - if (!rctx->inbuf.buf) - return -ENOMEM; - - rctx->inbuf.size = SE_AES_BUFLEN; - - rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, - &rctx->outbuf.addr, GFP_KERNEL); - if (!rctx->outbuf.buf) { - ret = -ENOMEM; - goto outbuf_err; - } - - rctx->outbuf.size = SE_AES_BUFLEN; - rctx->src_sg = req->src; rctx->dst_sg = req->dst; rctx->assoclen = req->assoclen; @@ -1245,6 +1225,21 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE); rctx->iv[3] = (1 << 24); + /* Allocate buffers required */ + rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen; + rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size, + &rctx->inbuf.addr, GFP_KERNEL); + if (!rctx->inbuf.buf) + return -ENOMEM; + + rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen; + rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size, + &rctx->outbuf.addr, GFP_KERNEL); + if (!rctx->outbuf.buf) { + ret = -ENOMEM; + goto outbuf_err; + } + /* If there is associated data perform GMAC operation */ if (rctx->assoclen) { ret = tegra_gcm_do_gmac(ctx, rctx); @@ -1268,11 +1263,11 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) ret = tegra_gcm_do_verify(ctx->se, rctx); out: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->outbuf.size, rctx->outbuf.buf, rctx->outbuf.addr); outbuf_err: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->inbuf.size, rctx->inbuf.buf, rctx->inbuf.addr); /* Finalize the request if there are no errors */ @@ -1499,6 +1494,11 @@ static int tegra_cmac_do_update(struct ahash_request *req) return 0; } + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) + return -ENOMEM; + /* Copy the previous residue first */ if (rctx->residue.size) memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); @@ -1513,23 +1513,19 @@ static int tegra_cmac_do_update(struct ahash_request *req) rctx->residue.size = nresidue; /* - * If this is not the first 'update' call, paste the previous copied + * If this is not the first task, paste the previous copied * intermediate results to the registers so that it gets picked up. - * This is to support the import/export functionality. */ if (!(rctx->task & SHA_FIRST)) tegra_cmac_paste_result(ctx->se, rctx); cmdlen = tegra_cmac_prep_cmd(ctx, rctx); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); - ret = tegra_se_host1x_submit(se, cmdlen); - /* - * If this is not the final update, copy the intermediate results - * from the registers so that it can be used in the next 'update' - * call. This is to support the import/export functionality. - */ - if (!(rctx->task & SHA_FINAL)) - tegra_cmac_copy_result(ctx->se, rctx); + tegra_cmac_copy_result(ctx->se, rctx); + + dma_free_coherent(ctx->se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); return ret; } @@ -1545,17 +1541,34 @@ static int tegra_cmac_do_final(struct ahash_request *req) if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) { return crypto_shash_tfm_digest(ctx->fallback_tfm, - rctx->datbuf.buf, 0, req->result); + NULL, 0, req->result); + } + + if (rctx->residue.size) { + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) { + ret = -ENOMEM; + goto out_free; + } + + memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); } - memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); rctx->datbuf.size = rctx->residue.size; rctx->total_len += rctx->residue.size; rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0); + /* + * If this is not the first task, paste the previous copied + * intermediate results to the registers so that it gets picked up. + */ + if (!(rctx->task & SHA_FIRST)) + tegra_cmac_paste_result(ctx->se, rctx); + /* Prepare command and submit */ cmdlen = tegra_cmac_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) goto out; @@ -1567,8 +1580,10 @@ static int tegra_cmac_do_final(struct ahash_request *req) writel(0, se->base + se->hw->regs->result + (i * 4)); out: - dma_free_coherent(se->dev, SE_SHA_BUFLEN, - rctx->datbuf.buf, rctx->datbuf.addr); + if (rctx->residue.size) + dma_free_coherent(se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); +out_free: dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2, rctx->residue.buf, rctx->residue.addr); return ret; @@ -1581,18 +1596,24 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); struct tegra_se *se = ctx->se; - int ret; + int ret = 0; if (rctx->task & SHA_UPDATE) { ret = tegra_cmac_do_update(req); + if (ret) + goto out; + rctx->task &= ~SHA_UPDATE; } if (rctx->task & SHA_FINAL) { ret = tegra_cmac_do_final(req); + if (ret) + goto out; + rctx->task &= ~SHA_FINAL; } - +out: crypto_finalize_hash_request(se->engine, req, ret); return 0; @@ -1674,28 +1695,15 @@ static int tegra_cmac_init(struct ahash_request *req) rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2, &rctx->residue.addr, GFP_KERNEL); if (!rctx->residue.buf) - goto resbuf_fail; + return -ENOMEM; rctx->residue.size = 0; - rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN, - &rctx->datbuf.addr, GFP_KERNEL); - if (!rctx->datbuf.buf) - goto datbuf_fail; - - rctx->datbuf.size = 0; - /* Clear any previous result */ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) writel(0, se->base + se->hw->regs->result + (i * 4)); return 0; - -datbuf_fail: - dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf, - rctx->residue.addr); -resbuf_fail: - return -ENOMEM; } static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, @@ -1752,10 +1760,13 @@ static int tegra_cmac_digest(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); + int ret; - tegra_cmac_init(req); - rctx->task |= SHA_UPDATE | SHA_FINAL; + ret = tegra_cmac_init(req); + if (ret) + return ret; + rctx->task |= SHA_UPDATE | SHA_FINAL; return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); } diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c index 4d4bd727f498..451b8eaab16a 100644 --- a/drivers/crypto/tegra/tegra-se-hash.c +++ b/drivers/crypto/tegra/tegra-se-hash.c @@ -300,8 +300,9 @@ static int tegra_sha_do_update(struct ahash_request *req) { struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct tegra_se *se = ctx->se; unsigned int nblks, nresidue, size, ret; - u32 *cpuvaddr = ctx->se->cmdbuf->addr; + u32 *cpuvaddr = se->cmdbuf->addr; nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size; nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size; @@ -331,6 +332,11 @@ static int tegra_sha_do_update(struct ahash_request *req) return 0; } + rctx->datbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->datbuf.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) + return -ENOMEM; + /* Copy the previous residue first */ if (rctx->residue.size) memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); @@ -353,11 +359,11 @@ static int tegra_sha_do_update(struct ahash_request *req) * This is to support the import/export functionality. */ if (!(rctx->task & SHA_FIRST)) - tegra_sha_paste_hash_result(ctx->se, rctx); + tegra_sha_paste_hash_result(se, rctx); - size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx); + size = tegra_sha_prep_cmd(se, cpuvaddr, rctx); - ret = tegra_se_host1x_submit(ctx->se, size); + ret = tegra_se_host1x_submit(se, se->cmdbuf, size); /* * If this is not the final update, copy the intermediate results @@ -365,7 +371,10 @@ static int tegra_sha_do_update(struct ahash_request *req) * call. This is to support the import/export functionality. */ if (!(rctx->task & SHA_FINAL)) - tegra_sha_copy_hash_result(ctx->se, rctx); + tegra_sha_copy_hash_result(se, rctx); + + dma_free_coherent(ctx->se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); return ret; } @@ -379,7 +388,17 @@ static int tegra_sha_do_final(struct ahash_request *req) u32 *cpuvaddr = se->cmdbuf->addr; int size, ret = 0; - memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); + if (rctx->residue.size) { + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) { + ret = -ENOMEM; + goto out_free; + } + + memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); + } + rctx->datbuf.size = rctx->residue.size; rctx->total_len += rctx->residue.size; @@ -388,7 +407,7 @@ static int tegra_sha_do_final(struct ahash_request *req) size = tegra_sha_prep_cmd(se, cpuvaddr, rctx); - ret = tegra_se_host1x_submit(se, size); + ret = tegra_se_host1x_submit(se, se->cmdbuf, size); if (ret) goto out; @@ -396,8 +415,10 @@ static int tegra_sha_do_final(struct ahash_request *req) memcpy(req->result, rctx->digest.buf, rctx->digest.size); out: - dma_free_coherent(se->dev, SE_SHA_BUFLEN, - rctx->datbuf.buf, rctx->datbuf.addr); + if (rctx->residue.size) + dma_free_coherent(se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); +out_free: dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm), rctx->residue.buf, rctx->residue.addr); dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, @@ -416,14 +437,21 @@ static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq) if (rctx->task & SHA_UPDATE) { ret = tegra_sha_do_update(req); + if (ret) + goto out; + rctx->task &= ~SHA_UPDATE; } if (rctx->task & SHA_FINAL) { ret = tegra_sha_do_final(req); + if (ret) + goto out; + rctx->task &= ~SHA_FINAL; } +out: crypto_finalize_hash_request(se->engine, req, ret); return 0; @@ -526,19 +554,11 @@ static int tegra_sha_init(struct ahash_request *req) if (!rctx->residue.buf) goto resbuf_fail; - rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN, - &rctx->datbuf.addr, GFP_KERNEL); - if (!rctx->datbuf.buf) - goto datbuf_fail; - return 0; -datbuf_fail: - dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf, - rctx->residue.addr); resbuf_fail: - dma_free_coherent(se->dev, SE_SHA_BUFLEN, rctx->datbuf.buf, - rctx->datbuf.addr); + dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, + rctx->digest.addr); digbuf_fail: return -ENOMEM; } @@ -559,13 +579,18 @@ static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + int ret; if (aes_check_keylen(keylen)) return tegra_hmac_fallback_setkey(ctx, key, keylen); + ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + if (ret) + return tegra_hmac_fallback_setkey(ctx, key, keylen); + ctx->fallback = false; - return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + return 0; } static int tegra_sha_update(struct ahash_request *req) @@ -615,13 +640,16 @@ static int tegra_sha_digest(struct ahash_request *req) struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + int ret; if (ctx->fallback) return tegra_sha_fallback_digest(req); - tegra_sha_init(req); - rctx->task |= SHA_UPDATE | SHA_FINAL; + ret = tegra_sha_init(req); + if (ret) + return ret; + rctx->task |= SHA_UPDATE | SHA_FINAL; return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); } diff --git a/drivers/crypto/tegra/tegra-se-key.c b/drivers/crypto/tegra/tegra-se-key.c index ac14678dbd30..276b261fb6df 100644 --- a/drivers/crypto/tegra/tegra-se-key.c +++ b/drivers/crypto/tegra/tegra-se-key.c @@ -115,11 +115,17 @@ static int tegra_key_insert(struct tegra_se *se, const u8 *key, u32 keylen, u16 slot, u32 alg) { const u32 *keyval = (u32 *)key; - u32 *addr = se->cmdbuf->addr, size; + u32 *addr = se->keybuf->addr, size; + int ret; + + mutex_lock(&kslt_lock); size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg); + ret = tegra_se_host1x_submit(se, se->keybuf, size); - return tegra_se_host1x_submit(se, size); + mutex_unlock(&kslt_lock); + + return ret; } void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg) diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c index f94c0331b148..55690b044e41 100644 --- a/drivers/crypto/tegra/tegra-se-main.c +++ b/drivers/crypto/tegra/tegra-se-main.c @@ -141,7 +141,7 @@ static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssi return cmdbuf; } -int tegra_se_host1x_submit(struct tegra_se *se, u32 size) +int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size) { struct host1x_job *job; int ret; @@ -160,9 +160,9 @@ int tegra_se_host1x_submit(struct tegra_se *se, u32 size) job->engine_fallback_streamid = se->stream_id; job->engine_streamid_offset = SE_STREAM_ID; - se->cmdbuf->words = size; + cmdbuf->words = size; - host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0); + host1x_job_add_gather(job, &cmdbuf->bo, size, 0); ret = host1x_job_pin(job, se->dev); if (ret) { @@ -220,14 +220,22 @@ static int tegra_se_client_init(struct host1x_client *client) goto syncpt_put; } + se->keybuf = tegra_se_host1x_bo_alloc(se, SZ_4K); + if (!se->keybuf) { + ret = -ENOMEM; + goto cmdbuf_put; + } + ret = se->hw->init_alg(se); if (ret) { dev_err(se->dev, "failed to register algorithms\n"); - goto cmdbuf_put; + goto keybuf_put; } return 0; +keybuf_put: + tegra_se_cmdbuf_put(&se->keybuf->bo); cmdbuf_put: tegra_se_cmdbuf_put(&se->cmdbuf->bo); syncpt_put: diff --git a/drivers/crypto/tegra/tegra-se.h b/drivers/crypto/tegra/tegra-se.h index b9dd7ceb8783..e196a90eedb9 100644 --- a/drivers/crypto/tegra/tegra-se.h +++ b/drivers/crypto/tegra/tegra-se.h @@ -340,8 +340,6 @@ #define SE_CRYPTO_CTR_REG_COUNT 4 #define SE_MAX_KEYSLOT 15 #define SE_MAX_MEM_ALLOC SZ_4M -#define SE_AES_BUFLEN 0x8000 -#define SE_SHA_BUFLEN 0x2000 #define SHA_FIRST BIT(0) #define SHA_UPDATE BIT(1) @@ -420,6 +418,7 @@ struct tegra_se { struct host1x_client client; struct host1x_channel *channel; struct tegra_se_cmdbuf *cmdbuf; + struct tegra_se_cmdbuf *keybuf; struct crypto_engine *engine; struct host1x_syncpt *syncpt; struct device *dev; @@ -502,7 +501,7 @@ void tegra_deinit_hash(struct tegra_se *se); int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid); void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg); -int tegra_se_host1x_submit(struct tegra_se *se, u32 size); +int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size); /* HOST1x OPCODES */ static inline u32 host1x_opcode_setpayload(unsigned int payload) diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c index 1bcec6f46c9c..9b5345068604 100644 --- a/drivers/crypto/xilinx/zynqmp-sha.c +++ b/drivers/crypto/xilinx/zynqmp-sha.c @@ -3,18 +3,19 @@ * Xilinx ZynqMP SHA Driver. * Copyright (c) 2022 Xilinx Inc. */ -#include <linux/cacheflush.h> #include <crypto/hash.h> #include <crypto/internal/hash.h> #include <crypto/sha3.h> -#include <linux/crypto.h> +#include <linux/cacheflush.h> +#include <linux/cleanup.h> #include <linux/device.h> #include <linux/dma-mapping.h> +#include <linux/err.h> #include <linux/firmware/xlnx-zynqmp.h> -#include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/spinlock.h> #include <linux/platform_device.h> #define ZYNQMP_DMA_BIT_MASK 32U @@ -43,6 +44,8 @@ struct zynqmp_sha_desc_ctx { static dma_addr_t update_dma_addr, final_dma_addr; static char *ubuf, *fbuf; +static DEFINE_SPINLOCK(zynqmp_sha_lock); + static int zynqmp_sha_init_tfm(struct crypto_shash *hash) { const char *fallback_driver_name = crypto_shash_alg_name(hash); @@ -124,7 +127,8 @@ static int zynqmp_sha_export(struct shash_desc *desc, void *out) return crypto_shash_export(&dctx->fbk_req, out); } -static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) +static int __zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) { unsigned int remaining_len = len; int update_size; @@ -159,6 +163,12 @@ static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned i return ret; } +static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) +{ + scoped_guard(spinlock_bh, &zynqmp_sha_lock) + return __zynqmp_sha_digest(desc, data, len, out); +} + static struct zynqmp_sha_drv_ctx sha3_drv_ctx = { .sha3_384 = { .init = zynqmp_sha_init, |