summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-04-16 01:22:26 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2026-04-16 01:22:26 +0300
commitaec2f682d47c54ef434b2d440992626d80b1ebdc (patch)
treefc71dfb5db214bc8567b10c5b0a6327751f95abf /drivers
parent8801e23b5b0dcf7d9c2291cc0901628dc1006145 (diff)
parent8879a3c110cb8ca5a69c937643f226697aa551d9 (diff)
downloadlinux-aec2f682d47c54ef434b2d440992626d80b1ebdc.tar.xz
Merge tag 'v7.1-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: "API: - Replace crypto_get_default_rng with crypto_stdrng_get_bytes - Remove simd skcipher support - Allow algorithm types to be disabled when CRYPTO_SELFTESTS is off Algorithms: - Remove CPU-based des/3des acceleration - Add test vectors for authenc(hmac(md5),cbc({aes,des})) and authenc(hmac({md5,sha1,sha224,sha256,sha384,sha512}),rfc3686(ctr(aes))) - Replace spin lock with mutex in jitterentropy Drivers: - Add authenc algorithms to safexcel - Add support for zstd in qat - Add wireless mode support for QAT GEN6 - Add anti-rollback support for QAT GEN6 - Add support for ctr(aes), gcm(aes), and ccm(aes) in dthev2" * tag 'v7.1-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (129 commits) crypto: af_alg - use sock_kmemdup in alg_setkey_by_key_serial crypto: vmx - remove CRYPTO_DEV_VMX from Kconfig crypto: omap - convert reqctx buffer to fixed-size array crypto: atmel-sha204a - add Thorsten Blum as maintainer crypto: atmel-ecc - add Thorsten Blum as maintainer crypto: qat - fix IRQ cleanup on 6xxx probe failure crypto: geniv - Remove unused spinlock from struct aead_geniv_ctx crypto: qce - simplify qce_xts_swapiv() crypto: hisilicon - Fix dma_unmap_single() direction crypto: talitos - rename first/last to first_desc/last_desc crypto: talitos - fix SEC1 32k ahash request limitation crypto: jitterentropy - replace long-held spinlock with mutex crypto: hisilicon - remove unused and non-public APIs for qm and sec crypto: hisilicon/qm - drop redundant variable initialization crypto: hisilicon/qm - remove else after return crypto: hisilicon/qm - add const qualifier to info_name in struct qm_cmd_dump_item crypto: hisilicon - fix the format string type error crypto: ccree - fix a memory leak in cc_mac_digest() crypto: qat - add support for zstd crypto: qat - use swab32 macro ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/crypto/Kconfig13
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/allwinner/Kconfig2
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-hash.c3
-rw-r--r--drivers/crypto/atmel-aes.c8
-rw-r--r--drivers/crypto/atmel-ecc.c1
-rw-r--r--drivers/crypto/atmel-i2c.c6
-rw-r--r--drivers/crypto/atmel-sha.c17
-rw-r--r--drivers/crypto/atmel-sha204a.c41
-rw-r--r--drivers/crypto/atmel-tdes.c8
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c9
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c17
-rw-r--r--drivers/crypto/caam/caamhash.c16
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c6
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c6
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c5
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c5
-rw-r--r--drivers/crypto/ccp/ccp-crypto-rsa.c6
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c5
-rw-r--r--drivers/crypto/ccp/sev-dev.c27
-rw-r--r--drivers/crypto/ccree/cc_hash.c1
-rw-r--r--drivers/crypto/hifn_795x.c6
-rw-r--r--drivers/crypto/hisilicon/debugfs.c76
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c12
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c18
-rw-r--r--drivers/crypto/hisilicon/qm.c16
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c13
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c19
-rw-r--r--drivers/crypto/img-hash.c24
-rw-r--r--drivers/crypto/inside-secure/eip93/Kconfig2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-aead.c2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-aead.h2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-aes.h2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-cipher.c4
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-cipher.h2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-common.c2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-common.h2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-des.h2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-hash.c2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-hash.h2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-main.c18
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-main.h2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-regs.h4
-rw-r--r--drivers/crypto/inside-secure/safexcel.c8
-rw-r--r--drivers/crypto/inside-secure/safexcel.h8
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c149
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c2
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-ecc.c17
-rw-r--r--drivers/crypto/intel/qat/Kconfig2
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c21
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c15
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c130
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h20
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_drv.c37
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h8
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_engine.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.c70
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_anti_rb.c66
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_anti_rb.h37
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_fw_config.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c18
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c9
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.c133
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c12
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c10
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw.h7
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h15
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw.h6
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_algs.c540
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_req.h9
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.c165
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.h13
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.c23
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_hal.c27
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c25
-rw-r--r--drivers/crypto/marvell/cesa/hash.c3
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c8
-rw-r--r--drivers/crypto/nx/nx-842.c10
-rw-r--r--drivers/crypto/nx/nx-842.h6
-rw-r--r--drivers/crypto/omap-sham.c21
-rw-r--r--drivers/crypto/qce/aead.c22
-rw-r--r--drivers/crypto/qce/common.c12
-rw-r--r--drivers/crypto/qce/sha.c6
-rw-r--r--drivers/crypto/qce/skcipher.c6
-rw-r--r--drivers/crypto/s5p-sss.c27
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c16
-rw-r--r--drivers/crypto/stm32/stm32-hash.c16
-rw-r--r--drivers/crypto/talitos.c254
-rw-r--r--drivers/crypto/tegra/tegra-se-aes.c9
-rw-r--r--drivers/crypto/tegra/tegra-se-hash.c3
-rw-r--r--drivers/crypto/ti/Kconfig4
-rw-r--r--drivers/crypto/ti/dthev2-aes.c899
-rw-r--r--drivers/crypto/ti/dthev2-common.c19
-rw-r--r--drivers/crypto/ti/dthev2-common.h27
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h3
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c11
108 files changed, 2950 insertions, 525 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 9960100e6066..d23b58b81ca3 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -301,6 +301,7 @@ config CRYPTO_DEV_PPC4XX
select CRYPTO_CCM
select CRYPTO_CTR
select CRYPTO_GCM
+ select CRYPTO_RNG
select CRYPTO_SKCIPHER
help
This option allows you to have support for AMCC crypto acceleration.
@@ -490,7 +491,7 @@ config CRYPTO_DEV_ATMEL_ECC
select CRYPTO_ECDH
select CRC16
help
- Microhip / Atmel ECC hw accelerator.
+ Microchip / Atmel ECC hw accelerator.
Select this if you want to use the Microchip / Atmel module for
ECDH algorithm.
@@ -504,7 +505,7 @@ config CRYPTO_DEV_ATMEL_SHA204A
select HW_RANDOM
select CRC16
help
- Microhip / Atmel SHA accelerator and RNG.
+ Microchip / Atmel SHA accelerator and RNG.
Select this if you want to use the Microchip / Atmel SHA204A
module as a random number generator. (Other functions of the
chip are currently not exposed by this driver)
@@ -667,14 +668,6 @@ config CRYPTO_DEV_QCOM_RNG
To compile this driver as a module, choose M here. The
module will be called qcom-rng. If unsure, say N.
-#config CRYPTO_DEV_VMX
-# bool "Support for VMX cryptographic acceleration instructions"
-# depends on PPC64 && VSX
-# help
-# Support for VMX cryptographic acceleration instructions.
-#
-#source "drivers/crypto/vmx/Kconfig"
-
config CRYPTO_DEV_IMGTEC_HASH
tristate "Imagination Technologies hardware hash accelerator"
depends on MIPS || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 322ae8854e3e..283bbc650b5b 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -38,7 +38,6 @@ obj-y += stm32/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_TEGRA) += tegra/
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
-#obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-y += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig
index b8e75210a0e3..7270e5fbc573 100644
--- a/drivers/crypto/allwinner/Kconfig
+++ b/drivers/crypto/allwinner/Kconfig
@@ -14,6 +14,7 @@ config CRYPTO_DEV_SUN4I_SS
select CRYPTO_SHA1
select CRYPTO_AES
select CRYPTO_LIB_DES
+ select CRYPTO_RNG
select CRYPTO_SKCIPHER
help
Some Allwinner SoC have a crypto accelerator named
@@ -49,6 +50,7 @@ config CRYPTO_DEV_SUN8I_CE
select CRYPTO_CBC
select CRYPTO_AES
select CRYPTO_DES
+ select CRYPTO_RNG
depends on CRYPTO_DEV_ALLWINNER
depends on PM
help
diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
index f8f37c9d5f3c..6f0d03cfbefc 100644
--- a/drivers/crypto/aspeed/aspeed-hace-hash.c
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -182,8 +182,7 @@ static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
final = true;
} else
length -= remain;
- scatterwalk_map_and_copy(hash_engine->ahash_src_addr, rctx->src_sg,
- rctx->offset, length, 0);
+ memcpy_from_sglist(hash_engine->ahash_src_addr, rctx->src_sg, rctx->offset, length);
aspeed_ahash_update_counter(rctx, length);
if (final)
length += aspeed_ahash_fill_padding(
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index bc0c40f10944..b393689400b4 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -2131,7 +2131,7 @@ static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
{
- free_page((unsigned long)dd->buf);
+ free_pages((unsigned long)dd->buf, ATMEL_AES_BUFFER_ORDER);
}
static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
@@ -2270,10 +2270,12 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
/* i = ARRAY_SIZE(aes_authenc_algs); */
err_aes_authenc_alg:
crypto_unregister_aeads(aes_authenc_algs, i);
- crypto_unregister_skcipher(&aes_xts_alg);
+ if (dd->caps.has_xts)
+ crypto_unregister_skcipher(&aes_xts_alg);
#endif
err_aes_xts_alg:
- crypto_unregister_aead(&aes_gcm_alg);
+ if (dd->caps.has_gcm)
+ crypto_unregister_aead(&aes_gcm_alg);
err_aes_gcm_alg:
i = ARRAY_SIZE(aes_algs);
err_aes_algs:
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index b6a77c8d439c..9c380351d2f9 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -261,6 +261,7 @@ static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm)
if (IS_ERR(fallback)) {
dev_err(&ctx->client->dev, "Failed to allocate transformation for '%s': %ld\n",
alg, PTR_ERR(fallback));
+ atmel_ecc_i2c_client_free(ctx->client);
return PTR_ERR(fallback);
}
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c
index da3cd986b1eb..0e275dbdc8c5 100644
--- a/drivers/crypto/atmel-i2c.c
+++ b/drivers/crypto/atmel-i2c.c
@@ -72,8 +72,8 @@ EXPORT_SYMBOL(atmel_i2c_init_read_config_cmd);
int atmel_i2c_init_read_otp_cmd(struct atmel_i2c_cmd *cmd, u16 addr)
{
- if (addr < 0 || addr > OTP_ZONE_SIZE)
- return -1;
+ if (addr >= OTP_ZONE_SIZE / 4)
+ return -EINVAL;
cmd->word_addr = COMMAND;
cmd->opcode = OPCODE_READ;
@@ -370,7 +370,7 @@ int atmel_i2c_probe(struct i2c_client *client)
}
}
- if (bus_clk_rate > 1000000L) {
+ if (bus_clk_rate > I2C_MAX_FAST_MODE_PLUS_FREQ) {
dev_err(dev, "%u exceeds maximum supported clock frequency (1MHz)\n",
bus_clk_rate);
return -EINVAL;
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 1f1341a16c42..002b62902553 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -404,20 +404,13 @@ static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
static struct atmel_sha_dev *atmel_sha_find_dev(struct atmel_sha_ctx *tctx)
{
- struct atmel_sha_dev *dd = NULL;
- struct atmel_sha_dev *tmp;
+ struct atmel_sha_dev *dd;
spin_lock_bh(&atmel_sha.lock);
- if (!tctx->dd) {
- list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
- dd = tmp;
- break;
- }
- tctx->dd = dd;
- } else {
- dd = tctx->dd;
- }
-
+ if (!tctx->dd)
+ tctx->dd = list_first_entry_or_null(&atmel_sha.dev_list,
+ struct atmel_sha_dev, list);
+ dd = tctx->dd;
spin_unlock_bh(&atmel_sha.lock);
return dd;
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index 98d1023007e3..dbb39ed0cea1 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/sysfs.h>
#include <linux/workqueue.h>
#include "atmel-i2c.h"
@@ -95,19 +96,24 @@ static int atmel_sha204a_rng_read(struct hwrng *rng, void *data, size_t max,
static int atmel_sha204a_otp_read(struct i2c_client *client, u16 addr, u8 *otp)
{
struct atmel_i2c_cmd cmd;
- int ret = -1;
+ int ret;
- if (atmel_i2c_init_read_otp_cmd(&cmd, addr) < 0) {
+ ret = atmel_i2c_init_read_otp_cmd(&cmd, addr);
+ if (ret < 0) {
dev_err(&client->dev, "failed, invalid otp address %04X\n",
addr);
return ret;
}
ret = atmel_i2c_send_receive(client, &cmd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to read otp at %04X\n", addr);
+ return ret;
+ }
if (cmd.data[0] == 0xff) {
dev_err(&client->dev, "failed, device not ready\n");
- return -EINVAL;
+ return -EIO;
}
memcpy(otp, cmd.data+1, 4);
@@ -120,21 +126,22 @@ static ssize_t otp_show(struct device *dev,
{
u16 addr;
u8 otp[OTP_ZONE_SIZE];
- char *str = buf;
struct i2c_client *client = to_i2c_client(dev);
- int i;
+ ssize_t len = 0;
+ int i, ret;
- for (addr = 0; addr < OTP_ZONE_SIZE/4; addr++) {
- if (atmel_sha204a_otp_read(client, addr, otp + addr * 4) < 0) {
+ for (addr = 0; addr < OTP_ZONE_SIZE / 4; addr++) {
+ ret = atmel_sha204a_otp_read(client, addr, otp + addr * 4);
+ if (ret < 0) {
dev_err(dev, "failed to read otp zone\n");
- break;
+ return ret;
}
}
- for (i = 0; i < addr*2; i++)
- str += sprintf(str, "%02X", otp[i]);
- str += sprintf(str, "\n");
- return str - buf;
+ for (i = 0; i < OTP_ZONE_SIZE; i++)
+ len += sysfs_emit_at(buf, len, "%02X", otp[i]);
+ len += sysfs_emit_at(buf, len, "\n");
+ return len;
}
static DEVICE_ATTR_RO(otp);
@@ -174,10 +181,6 @@ static int atmel_sha204a_probe(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);
- /* otp read out */
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
- return -ENODEV;
-
ret = sysfs_create_group(&client->dev.kobj, &atmel_sha204a_groups);
if (ret) {
dev_err(&client->dev, "failed to register sysfs entry\n");
@@ -191,10 +194,8 @@ static void atmel_sha204a_remove(struct i2c_client *client)
{
struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
- if (atomic_read(&i2c_priv->tfm_count)) {
- dev_emerg(&client->dev, "Device is busy, will remove it anyhow\n");
- return;
- }
+ devm_hwrng_unregister(&client->dev, &i2c_priv->hwrng);
+ atmel_i2c_flush_queue();
sysfs_remove_group(&client->dev.kobj, &atmel_sha204a_groups);
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 278c0df3c92f..643e507f9c02 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -294,8 +294,8 @@ static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
} else {
- dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
- dd->dma_size, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
+ dd->dma_size, DMA_FROM_DEVICE);
/* copy data */
count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
@@ -619,8 +619,8 @@ static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
} else {
- dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
- dd->dma_size, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
+ dd->dma_size, DMA_FROM_DEVICE);
/* copy data */
count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index b04d6379244a..a4793b76300c 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -1323,7 +1323,7 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
artpec6_crypto_init_dma_operation(common);
- /* Upload HMAC key, must be first the first packet */
+ /* Upload HMAC key, it must be the first packet */
if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
if (variant == ARTPEC6_CRYPTO) {
req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
@@ -1333,11 +1333,8 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
a7_regk_crypto_dlkey);
}
- /* Copy and pad up the key */
- memcpy(req_ctx->key_buffer, ctx->hmac_key,
- ctx->hmac_key_length);
- memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
- blocksize - ctx->hmac_key_length);
+ memcpy_and_pad(req_ctx->key_buffer, blocksize, ctx->hmac_key,
+ ctx->hmac_key_length, 0);
error = artpec6_crypto_setup_out_descr(common,
(void *)&req_ctx->key_md,
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 78964e1712e5..bf10c3dda745 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -19,6 +19,7 @@
#include <linux/dma-mapping.h>
#include <linux/fsl/mc.h>
#include <linux/kernel.h>
+#include <linux/string.h>
#include <linux/string_choices.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
@@ -3269,7 +3270,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
dpaa2_fl_set_addr(out_fle, key_dma);
dpaa2_fl_set_len(out_fle, digestsize);
- print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
+ print_hex_dump_devel("key_in@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
@@ -3289,7 +3290,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
/* in progress */
wait_for_completion(&result.completion);
ret = result.err;
- print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
+ print_hex_dump_devel("digested key@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key,
digestsize, 1);
}
@@ -4645,16 +4646,12 @@ static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
alg = &halg->halg.base;
if (keyed) {
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->hmac_name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->hmac_driver_name);
+ strscpy(alg->cra_name, template->hmac_name);
+ strscpy(alg->cra_driver_name, template->hmac_driver_name);
t_alg->is_hmac = true;
} else {
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->driver_name);
+ strscpy(alg->cra_name, template->name);
+ strscpy(alg->cra_driver_name, template->driver_name);
t_alg->ahash_alg.setkey = NULL;
t_alg->is_hmac = false;
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 44122208f70c..ddb2a35aec2d 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -393,7 +393,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
- print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
+ print_hex_dump_devel("key_in@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
@@ -408,7 +408,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
wait_for_completion(&result.completion);
ret = result.err;
- print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
+ print_hex_dump_devel("digested key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key,
digestsize, 1);
}
@@ -1914,16 +1914,12 @@ caam_hash_alloc(struct caam_hash_template *template,
alg = &halg->halg.base;
if (keyed) {
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->hmac_name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->hmac_driver_name);
+ strscpy(alg->cra_name, template->hmac_name);
+ strscpy(alg->cra_driver_name, template->hmac_driver_name);
t_alg->is_hmac = true;
} else {
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->driver_name);
+ strscpy(alg->cra_name, template->name);
+ strscpy(alg->cra_driver_name, template->driver_name);
halg->setkey = NULL;
t_alg->is_hmac = false;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index 6c8d1b87d60d..fc14c2e73ccd 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
+#include <linux/string.h>
#include <linux/crypto.h>
#include <crypto/internal/aead.h>
#include <crypto/algapi.h>
@@ -223,9 +224,8 @@ static int ccp_register_aes_aead(struct list_head *head,
/* Copy the defaults and override as necessary */
alg = &ccp_aead->alg;
*alg = *def->alg_defaults;
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- def->driver_name);
+ strscpy(alg->base.cra_name, def->name);
+ strscpy(alg->base.cra_driver_name, def->driver_name);
alg->base.cra_blocksize = def->blocksize;
ret = crypto_register_aead(alg);
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index c7e26ce71156..8e59137284b7 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
+#include <linux/string.h>
#include <crypto/aes.h>
#include <crypto/xts.h>
#include <crypto/internal/skcipher.h>
@@ -239,9 +240,8 @@ static int ccp_register_aes_xts_alg(struct list_head *head,
alg = &ccp_alg->alg;
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- def->drv_name);
+ strscpy(alg->base.cra_name, def->name);
+ strscpy(alg->base.cra_driver_name, def->drv_name);
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index 01d298350b92..94bccc5d6c78 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -305,9 +305,8 @@ static int ccp_register_aes_alg(struct list_head *head,
/* Copy the defaults and override as necessary */
alg = &ccp_alg->alg;
*alg = *def->alg_defaults;
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- def->driver_name);
+ strscpy(alg->base.cra_name, def->name);
+ strscpy(alg->base.cra_driver_name, def->driver_name);
alg->base.cra_blocksize = def->blocksize;
alg->ivsize = def->ivsize;
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index c20b5a6a340a..e26b431a5993 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -193,9 +193,8 @@ static int ccp_register_des3_alg(struct list_head *head,
/* Copy the defaults and override as necessary */
alg = &ccp_alg->alg;
*alg = *def->alg_defaults;
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- def->driver_name);
+ strscpy(alg->base.cra_name, def->name);
+ strscpy(alg->base.cra_driver_name, def->driver_name);
alg->base.cra_blocksize = def->blocksize;
alg->ivsize = def->ivsize;
diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
index 090adacaaf93..287d7f62026d 100644
--- a/drivers/crypto/ccp/ccp-crypto-rsa.c
+++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/scatterlist.h>
+#include <linux/string.h>
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <crypto/internal/rsa.h>
@@ -257,9 +258,8 @@ static int ccp_register_rsa_alg(struct list_head *head,
alg = &ccp_alg->alg;
*alg = *def->alg_defaults;
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- def->driver_name);
+ strscpy(alg->base.cra_name, def->name);
+ strscpy(alg->base.cra_driver_name, def->driver_name);
ret = crypto_register_akcipher(alg);
if (ret) {
pr_err("%s akcipher algorithm registration error (%d)\n",
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 286b2d716236..85058a89f35b 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -484,9 +484,8 @@ static int ccp_register_sha_alg(struct list_head *head,
halg->statesize = sizeof(struct ccp_sha_exp_ctx);
base = &halg->base;
- snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- def->drv_name);
+ strscpy(base->cra_name, def->name);
+ strscpy(base->cra_driver_name, def->drv_name);
base->cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 939fa8aa155c..450d491379d4 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -1965,11 +1965,11 @@ static int sev_get_firmware(struct device *dev,
/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
static int sev_update_firmware(struct device *dev)
{
- struct sev_data_download_firmware *data;
+ struct sev_data_download_firmware data;
const struct firmware *firmware;
int ret, error, order;
struct page *p;
- u64 data_size;
+ void *fw_blob;
if (!sev_version_greater_or_equal(0, 15)) {
dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n");
@@ -1981,16 +1981,7 @@ static int sev_update_firmware(struct device *dev)
return -1;
}
- /*
- * SEV FW expects the physical address given to it to be 32
- * byte aligned. Memory allocated has structure placed at the
- * beginning followed by the firmware being passed to the SEV
- * FW. Allocate enough memory for data structure + alignment
- * padding + SEV FW.
- */
- data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32);
-
- order = get_order(firmware->size + data_size);
+ order = get_order(firmware->size);
p = alloc_pages(GFP_KERNEL, order);
if (!p) {
ret = -1;
@@ -2001,20 +1992,20 @@ static int sev_update_firmware(struct device *dev)
* Copy firmware data to a kernel allocated contiguous
* memory region.
*/
- data = page_address(p);
- memcpy(page_address(p) + data_size, firmware->data, firmware->size);
+ fw_blob = page_address(p);
+ memcpy(fw_blob, firmware->data, firmware->size);
- data->address = __psp_pa(page_address(p) + data_size);
- data->len = firmware->size;
+ data.address = __psp_pa(fw_blob);
+ data.len = firmware->size;
- ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
+ ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, &data, &error);
/*
* A quirk for fixing the committed TCB version, when upgrading from
* earlier firmware version than 1.50.
*/
if (!ret && !sev_version_greater_or_equal(1, 50))
- ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
+ ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, &data, &error);
if (ret)
dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index c6d085c8ff79..73179bf725a7 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -1448,6 +1448,7 @@ static int cc_mac_digest(struct ahash_request *req)
if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
req->nbytes, 1, flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_result(dev, state, digestsize, req->result);
cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index a897541f897b..2da0894f31fd 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -15,6 +15,7 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
+#include <linux/string.h>
#include <linux/highmem.h>
#include <linux/crypto.h>
#include <linux/hw_random.h>
@@ -2256,8 +2257,7 @@ static int hifn_alg_alloc(struct hifn_device *dev, const struct hifn_alg_templat
alg->alg.init = hifn_init_tfm;
err = -EINVAL;
- if (snprintf(alg->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
- "%s", t->name) >= CRYPTO_MAX_ALG_NAME)
+ if (strscpy(alg->alg.base.cra_name, t->name) < 0)
goto out_free_alg;
if (snprintf(alg->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"%s-%s", t->drv_name, dev->name) >= CRYPTO_MAX_ALG_NAME)
@@ -2367,7 +2367,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&dev->alg_list);
- snprintf(dev->name, sizeof(dev->name), "%s", name);
+ strscpy(dev->name, name);
spin_lock_init(&dev->lock);
for (i = 0; i < 3; ++i) {
diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
index 32e9f8350289..3ee6de16e3f1 100644
--- a/drivers/crypto/hisilicon/debugfs.c
+++ b/drivers/crypto/hisilicon/debugfs.c
@@ -45,8 +45,8 @@ struct qm_dfx_item {
struct qm_cmd_dump_item {
const char *cmd;
- char *info_name;
- int (*dump_fn)(struct hisi_qm *qm, char *cmd, char *info_name);
+ const char *info_name;
+ int (*dump_fn)(struct hisi_qm *qm, char *cmd, const char *info_name);
};
static struct qm_dfx_item qm_dfx_files[] = {
@@ -151,7 +151,7 @@ static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
}
static void dump_show(struct hisi_qm *qm, void *info,
- unsigned int info_size, char *info_name)
+ unsigned int info_size, const char *info_name)
{
struct device *dev = &qm->pdev->dev;
u8 *info_curr = info;
@@ -165,7 +165,7 @@ static void dump_show(struct hisi_qm *qm, void *info,
}
}
-static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
+static int qm_sqc_dump(struct hisi_qm *qm, char *s, const char *name)
{
struct device *dev = &qm->pdev->dev;
struct qm_sqc sqc;
@@ -202,7 +202,7 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
return 0;
}
-static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
+static int qm_cqc_dump(struct hisi_qm *qm, char *s, const char *name)
{
struct device *dev = &qm->pdev->dev;
struct qm_cqc cqc;
@@ -239,7 +239,7 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
return 0;
}
-static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name)
+static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, const char *name)
{
struct device *dev = &qm->pdev->dev;
struct qm_aeqc aeqc;
@@ -305,7 +305,7 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
ret = kstrtou32(presult, 0, e_id);
if (ret || *e_id >= q_depth) {
- dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1);
+ dev_err(dev, "Please input sqe num (0-%d)", q_depth - 1);
return -EINVAL;
}
@@ -317,7 +317,7 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
return 0;
}
-static int qm_sq_dump(struct hisi_qm *qm, char *s, char *name)
+static int qm_sq_dump(struct hisi_qm *qm, char *s, const char *name)
{
u16 sq_depth = qm->qp_array->sq_depth;
struct hisi_qp *qp;
@@ -345,7 +345,7 @@ static int qm_sq_dump(struct hisi_qm *qm, char *s, char *name)
return 0;
}
-static int qm_cq_dump(struct hisi_qm *qm, char *s, char *name)
+static int qm_cq_dump(struct hisi_qm *qm, char *s, const char *name)
{
struct qm_cqe *cqe_curr;
struct hisi_qp *qp;
@@ -363,7 +363,7 @@ static int qm_cq_dump(struct hisi_qm *qm, char *s, char *name)
return 0;
}
-static int qm_eq_aeq_dump(struct hisi_qm *qm, char *s, char *name)
+static int qm_eq_aeq_dump(struct hisi_qm *qm, char *s, const char *name)
{
struct device *dev = &qm->pdev->dev;
u16 xeq_depth;
@@ -388,7 +388,7 @@ static int qm_eq_aeq_dump(struct hisi_qm *qm, char *s, char *name)
}
if (xeqe_id >= xeq_depth) {
- dev_err(dev, "Please input eqe or aeqe num (0-%u)", xeq_depth - 1);
+ dev_err(dev, "Please input eqe or aeqe num (0-%d)", xeq_depth - 1);
return -EINVAL;
}
@@ -1040,6 +1040,57 @@ void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm)
}
}
+static int qm_usage_percent(struct hisi_qm *qm, int chan_num)
+{
+ u32 val, used_bw, total_bw;
+
+ val = readl(qm->io_base + QM_CHANNEL_USAGE_OFFSET +
+ chan_num * QM_CHANNEL_ADDR_INTRVL);
+ used_bw = lower_16_bits(val);
+ total_bw = upper_16_bits(val);
+ if (!total_bw)
+ return -EIO;
+
+ if (total_bw <= used_bw)
+ return QM_MAX_DEV_USAGE;
+
+ return (used_bw * QM_DEV_USAGE_RATE) / total_bw;
+}
+
+static int qm_usage_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+ bool dev_is_active = true;
+ int i, ret;
+
+ /* If device is in suspended, usage is 0. */
+ ret = hisi_qm_get_dfx_access(qm);
+ if (ret == -EAGAIN) {
+ dev_is_active = false;
+ } else if (ret) {
+ dev_err(&qm->pdev->dev, "failed to get dfx access for usage_show!\n");
+ return ret;
+ }
+
+ ret = 0;
+ for (i = 0; i < qm->channel_data.channel_num; i++) {
+ if (dev_is_active) {
+ ret = qm_usage_percent(qm, i);
+ if (ret < 0) {
+ hisi_qm_put_dfx_access(qm);
+ return ret;
+ }
+ }
+ seq_printf(s, "%s: %d\n", qm->channel_data.channel_name[i], ret);
+ }
+
+ if (dev_is_active)
+ hisi_qm_put_dfx_access(qm);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(qm_usage);
+
static int qm_diff_regs_show(struct seq_file *s, void *unused)
{
struct hisi_qm *qm = s->private;
@@ -1159,6 +1210,9 @@ void hisi_qm_debug_init(struct hisi_qm *qm)
debugfs_create_file("diff_regs", 0444, qm->debug.qm_d,
qm, &qm_diff_regs_fops);
+ if (qm->ver >= QM_HW_V5)
+ debugfs_create_file("dev_usage", 0444, qm->debug.debug_root, qm, &qm_usage_fops);
+
debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 839c1f677143..09077abbf6ad 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -1327,17 +1327,9 @@ static int ecdh_gen_privkey(struct hpre_ctx *ctx, struct ecdh *params)
struct device *dev = ctx->dev;
int ret;
- ret = crypto_get_default_rng();
- if (ret) {
- dev_err(dev, "failed to get default rng, ret = %d!\n", ret);
- return ret;
- }
-
- ret = crypto_rng_get_bytes(crypto_default_rng, (u8 *)params->key,
- params->key_size);
- crypto_put_default_rng();
+ ret = crypto_stdrng_get_bytes(params->key, params->key_size);
if (ret)
- dev_err(dev, "failed to get rng, ret = %d!\n", ret);
+ dev_err(dev, "failed to get random bytes, ret = %d!\n", ret);
return ret;
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 884d5d0afaf4..357ab5e5887e 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -121,6 +121,8 @@
#define HPRE_DFX_COMMON2_LEN 0xE
#define HPRE_DFX_CORE_LEN 0x43
+#define HPRE_MAX_CHANNEL_NUM 2
+
static const char hpre_name[] = "hisi_hpre";
static struct dentry *hpre_debugfs_root;
static const struct pci_device_id hpre_dev_ids[] = {
@@ -370,6 +372,11 @@ static struct dfx_diff_registers hpre_diff_regs[] = {
},
};
+static const char *hpre_channel_name[HPRE_MAX_CHANNEL_NUM] = {
+ "RSA",
+ "ECC",
+};
+
static const struct hisi_qm_err_ini hpre_err_ini;
bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
@@ -1234,6 +1241,16 @@ static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
return 0;
}
+static void hpre_set_channels(struct hisi_qm *qm)
+{
+ struct qm_channel *channel_data = &qm->channel_data;
+ int i;
+
+ channel_data->channel_num = HPRE_MAX_CHANNEL_NUM;
+ for (i = 0; i < HPRE_MAX_CHANNEL_NUM; i++)
+ channel_data->channel_name[i] = hpre_channel_name[i];
+}
+
static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
u64 alg_msk;
@@ -1267,6 +1284,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return ret;
}
+ hpre_set_channels(qm);
/* Fetch and save the value of capability registers */
ret = hpre_pre_store_cap_reg(qm);
if (ret) {
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index d1626685ed9f..3ca47e2a9719 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -472,6 +472,8 @@ static struct qm_typical_qos_table shaper_cbs_s[] = {
static void qm_irqs_unregister(struct hisi_qm *qm);
static int qm_reset_device(struct hisi_qm *qm);
+static void hisi_qm_stop_qp(struct hisi_qp *qp);
+
int hisi_qm_q_num_set(const char *val, const struct kernel_param *kp,
unsigned int device)
{
@@ -2262,7 +2264,7 @@ static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
* After this function, qp can receive request from user. Return 0 if
* successful, negative error code if failed.
*/
-int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
+static int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
{
struct hisi_qm *qm = qp->qm;
int ret;
@@ -2273,7 +2275,6 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
return ret;
}
-EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
/**
* qp_stop_fail_cb() - call request cb.
@@ -2418,13 +2419,12 @@ static void qm_stop_qp_nolock(struct hisi_qp *qp)
*
* This function is reverse of hisi_qm_start_qp.
*/
-void hisi_qm_stop_qp(struct hisi_qp *qp)
+static void hisi_qm_stop_qp(struct hisi_qp *qp)
{
down_write(&qp->qm->qps_lock);
qm_stop_qp_nolock(qp);
up_write(&qp->qm->qps_lock);
}
-EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
/**
* hisi_qp_send() - Queue up a task in the hardware queue.
@@ -3381,7 +3381,7 @@ static int __hisi_qm_start(struct hisi_qm *qm)
int hisi_qm_start(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- int ret = 0;
+ int ret;
down_write(&qm->qps_lock);
@@ -3917,8 +3917,8 @@ back_func_qos:
static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
{
- u64 cir_u = 0, cir_b = 0, cir_s = 0;
u64 shaper_vft, ir_calc, ir;
+ u64 cir_u, cir_b, cir_s;
unsigned int val;
u32 error_rate;
int ret;
@@ -4278,8 +4278,8 @@ int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs == 0)
return hisi_qm_sriov_disable(pdev, false);
- else
- return hisi_qm_sriov_enable(pdev, num_vfs);
+
+ return hisi_qm_sriov_enable(pdev, num_vfs);
}
EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index 54e24fd7b9be..85eecbb40e7e 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -844,7 +844,7 @@ err_free_elements:
if (crypto_skcipher_ivsize(atfm))
dma_unmap_single(info->dev, sec_req->dma_iv,
crypto_skcipher_ivsize(atfm),
- DMA_BIDIRECTIONAL);
+ DMA_TO_DEVICE);
err_unmap_out_sg:
if (split)
sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 0710977861f3..adf95795dffe 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -285,7 +285,5 @@ enum sec_cap_table_type {
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
struct hisi_qp **sec_create_qps(void);
-int sec_register_to_crypto(struct hisi_qm *qm);
-void sec_unregister_from_crypto(struct hisi_qm *qm);
u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 15174216d8c4..2471a4dd0b50 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -230,7 +230,7 @@ static int qp_send_message(struct sec_req *req)
spin_unlock_bh(&qp_ctx->req_lock);
- atomic64_inc(&req->ctx->sec->debug.dfx.send_cnt);
+ atomic64_inc(&qp_ctx->ctx->sec->debug.dfx.send_cnt);
return -EINPROGRESS;
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index efda8646fc60..056bd8f4da5a 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -133,6 +133,8 @@
#define SEC_AEAD_BITMAP (GENMASK_ULL(7, 6) | GENMASK_ULL(18, 17) | \
GENMASK_ULL(45, 43))
+#define SEC_MAX_CHANNEL_NUM 1
+
struct sec_hw_error {
u32 int_msk;
const char *msg;
@@ -907,7 +909,7 @@ static int sec_debugfs_atomic64_set(void *data, u64 val)
}
DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
- sec_debugfs_atomic64_set, "%lld\n");
+ sec_debugfs_atomic64_set, "%llu\n");
static int sec_regs_show(struct seq_file *s, void *unused)
{
@@ -1288,6 +1290,14 @@ static int sec_pre_store_cap_reg(struct hisi_qm *qm)
return 0;
}
+static void sec_set_channels(struct hisi_qm *qm)
+{
+ struct qm_channel *channel_data = &qm->channel_data;
+
+ channel_data->channel_num = SEC_MAX_CHANNEL_NUM;
+ channel_data->channel_name[0] = "SEC";
+}
+
static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
u64 alg_msk;
@@ -1325,6 +1335,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return ret;
}
+ sec_set_channels(qm);
/* Fetch and save the value of capability registers */
ret = sec_pre_store_cap_reg(qm);
if (ret) {
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 85b26ef17548..44df9c859bd8 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -122,6 +122,8 @@
#define HZIP_LIT_LEN_EN_OFFSET 0x301204
#define HZIP_LIT_LEN_EN_EN BIT(4)
+#define HZIP_MAX_CHANNEL_NUM 3
+
enum {
HZIP_HIGH_COMP_RATE,
HZIP_HIGH_COMP_PERF,
@@ -359,6 +361,12 @@ static struct dfx_diff_registers hzip_diff_regs[] = {
},
};
+static const char *zip_channel_name[HZIP_MAX_CHANNEL_NUM] = {
+ "COMPRESS",
+ "DECOMPRESS",
+ "DAE"
+};
+
static int hzip_diff_regs_show(struct seq_file *s, void *unused)
{
struct hisi_qm *qm = s->private;
@@ -1400,6 +1408,16 @@ static int zip_pre_store_cap_reg(struct hisi_qm *qm)
return 0;
}
+static void zip_set_channels(struct hisi_qm *qm)
+{
+ struct qm_channel *channel_data = &qm->channel_data;
+ int i;
+
+ channel_data->channel_num = HZIP_MAX_CHANNEL_NUM;
+ for (i = 0; i < HZIP_MAX_CHANNEL_NUM; i++)
+ channel_data->channel_name[i] = zip_channel_name[i];
+}
+
static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
u64 alg_msk;
@@ -1438,6 +1456,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return ret;
}
+ zip_set_channels(qm);
/* Fetch and save the value of capability registers */
ret = zip_pre_store_cap_reg(qm);
if (ret) {
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 7195c37dd102..c0467185ee42 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -629,24 +629,14 @@ static int img_hash_digest(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
- struct img_hash_dev *hdev = NULL;
- struct img_hash_dev *tmp;
- int err;
spin_lock(&img_hash.lock);
- if (!tctx->hdev) {
- list_for_each_entry(tmp, &img_hash.dev_list, list) {
- hdev = tmp;
- break;
- }
- tctx->hdev = hdev;
-
- } else {
- hdev = tctx->hdev;
- }
-
+ if (!tctx->hdev)
+ tctx->hdev = list_first_entry_or_null(&img_hash.dev_list,
+ struct img_hash_dev, list);
+ ctx->hdev = tctx->hdev;
spin_unlock(&img_hash.lock);
- ctx->hdev = hdev;
+
ctx->flags = 0;
ctx->digsize = crypto_ahash_digestsize(tfm);
@@ -675,9 +665,7 @@ static int img_hash_digest(struct ahash_request *req)
ctx->sgfirst = req->src;
ctx->nents = sg_nents(ctx->sg);
- err = img_hash_handle_queue(tctx->hdev, req);
-
- return err;
+ return img_hash_handle_queue(ctx->hdev, req);
}
static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
diff --git a/drivers/crypto/inside-secure/eip93/Kconfig b/drivers/crypto/inside-secure/eip93/Kconfig
index 8353d3d7ec9b..29523f6927dd 100644
--- a/drivers/crypto/inside-secure/eip93/Kconfig
+++ b/drivers/crypto/inside-secure/eip93/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config CRYPTO_DEV_EIP93
tristate "Support for EIP93 crypto HW accelerators"
- depends on SOC_MT7621 || ARCH_AIROHA ||COMPILE_TEST
+ depends on SOC_MT7621 || ARCH_AIROHA || ECONET || COMPILE_TEST
select CRYPTO_LIB_AES
select CRYPTO_LIB_DES
select CRYPTO_SKCIPHER
diff --git a/drivers/crypto/inside-secure/eip93/eip93-aead.c b/drivers/crypto/inside-secure/eip93/eip93-aead.c
index 1a08aed5de13..2bbd0af7b0e0 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-aead.c
+++ b/drivers/crypto/inside-secure/eip93/eip93-aead.c
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#include <crypto/aead.h>
diff --git a/drivers/crypto/inside-secure/eip93/eip93-aead.h b/drivers/crypto/inside-secure/eip93/eip93-aead.h
index e2fa8fd39c50..d933a8fbdf04 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-aead.h
+++ b/drivers/crypto/inside-secure/eip93/eip93-aead.h
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef _EIP93_AEAD_H_
#define _EIP93_AEAD_H_
diff --git a/drivers/crypto/inside-secure/eip93/eip93-aes.h b/drivers/crypto/inside-secure/eip93/eip93-aes.h
index 1d83d39cab2a..82064cc8f5c7 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-aes.h
+++ b/drivers/crypto/inside-secure/eip93/eip93-aes.h
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef _EIP93_AES_H_
#define _EIP93_AES_H_
diff --git a/drivers/crypto/inside-secure/eip93/eip93-cipher.c b/drivers/crypto/inside-secure/eip93/eip93-cipher.c
index 0713c71ab458..4dd7ab7503e8 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-cipher.c
+++ b/drivers/crypto/inside-secure/eip93/eip93-cipher.c
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#include <crypto/aes.h>
@@ -320,7 +320,7 @@ struct eip93_alg_template eip93_alg_ecb_des = {
.ivsize = 0,
.base = {
.cra_name = "ecb(des)",
- .cra_driver_name = "ebc(des-eip93)",
+ .cra_driver_name = "ecb(des-eip93)",
.cra_priority = EIP93_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
diff --git a/drivers/crypto/inside-secure/eip93/eip93-cipher.h b/drivers/crypto/inside-secure/eip93/eip93-cipher.h
index 6e2545ebd879..47e4e84ff14e 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-cipher.h
+++ b/drivers/crypto/inside-secure/eip93/eip93-cipher.h
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef _EIP93_CIPHER_H_
#define _EIP93_CIPHER_H_
diff --git a/drivers/crypto/inside-secure/eip93/eip93-common.c b/drivers/crypto/inside-secure/eip93/eip93-common.c
index f4ad6beff15e..6f147014f996 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-common.c
+++ b/drivers/crypto/inside-secure/eip93/eip93-common.c
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#include <crypto/aes.h>
diff --git a/drivers/crypto/inside-secure/eip93/eip93-common.h b/drivers/crypto/inside-secure/eip93/eip93-common.h
index 80964cfa34df..41c43782eb5c 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-common.h
+++ b/drivers/crypto/inside-secure/eip93/eip93-common.h
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef _EIP93_COMMON_H_
diff --git a/drivers/crypto/inside-secure/eip93/eip93-des.h b/drivers/crypto/inside-secure/eip93/eip93-des.h
index 74748df04acf..53ffe0f341b8 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-des.h
+++ b/drivers/crypto/inside-secure/eip93/eip93-des.h
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef _EIP93_DES_H_
#define _EIP93_DES_H_
diff --git a/drivers/crypto/inside-secure/eip93/eip93-hash.c b/drivers/crypto/inside-secure/eip93/eip93-hash.c
index 2705855475b2..84d3ff2d3836 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-hash.c
+++ b/drivers/crypto/inside-secure/eip93/eip93-hash.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2024
*
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#include <crypto/sha1.h>
diff --git a/drivers/crypto/inside-secure/eip93/eip93-hash.h b/drivers/crypto/inside-secure/eip93/eip93-hash.h
index 556f22fc1dd0..29da18d78894 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-hash.h
+++ b/drivers/crypto/inside-secure/eip93/eip93-hash.h
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef _EIP93_HASH_H_
#define _EIP93_HASH_H_
diff --git a/drivers/crypto/inside-secure/eip93/eip93-main.c b/drivers/crypto/inside-secure/eip93/eip93-main.c
index b7fd9795062d..7dccfdeb7b11 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-main.c
+++ b/drivers/crypto/inside-secure/eip93/eip93-main.c
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#include <linux/atomic.h>
@@ -36,6 +36,14 @@ static struct eip93_alg_template *eip93_algs[] = {
&eip93_alg_cbc_aes,
&eip93_alg_ctr_aes,
&eip93_alg_rfc3686_aes,
+ &eip93_alg_md5,
+ &eip93_alg_sha1,
+ &eip93_alg_sha224,
+ &eip93_alg_sha256,
+ &eip93_alg_hmac_md5,
+ &eip93_alg_hmac_sha1,
+ &eip93_alg_hmac_sha224,
+ &eip93_alg_hmac_sha256,
&eip93_alg_authenc_hmac_md5_cbc_des,
&eip93_alg_authenc_hmac_sha1_cbc_des,
&eip93_alg_authenc_hmac_sha224_cbc_des,
@@ -52,14 +60,6 @@ static struct eip93_alg_template *eip93_algs[] = {
&eip93_alg_authenc_hmac_sha1_rfc3686_aes,
&eip93_alg_authenc_hmac_sha224_rfc3686_aes,
&eip93_alg_authenc_hmac_sha256_rfc3686_aes,
- &eip93_alg_md5,
- &eip93_alg_sha1,
- &eip93_alg_sha224,
- &eip93_alg_sha256,
- &eip93_alg_hmac_md5,
- &eip93_alg_hmac_sha1,
- &eip93_alg_hmac_sha224,
- &eip93_alg_hmac_sha256,
};
inline void eip93_irq_disable(struct eip93_device *eip93, u32 mask)
diff --git a/drivers/crypto/inside-secure/eip93/eip93-main.h b/drivers/crypto/inside-secure/eip93/eip93-main.h
index 79b078f0e5da..990c2401b7ce 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-main.h
+++ b/drivers/crypto/inside-secure/eip93/eip93-main.h
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef _EIP93_MAIN_H_
#define _EIP93_MAIN_H_
diff --git a/drivers/crypto/inside-secure/eip93/eip93-regs.h b/drivers/crypto/inside-secure/eip93/eip93-regs.h
index 0490b8d15131..96285ca6fbbe 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-regs.h
+++ b/drivers/crypto/inside-secure/eip93/eip93-regs.h
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef REG_EIP93_H
#define REG_EIP93_H
@@ -109,7 +109,7 @@
#define EIP93_REG_PE_BUF_THRESH 0x10c
#define EIP93_PE_OUTBUF_THRESH GENMASK(23, 16)
#define EIP93_PE_INBUF_THRESH GENMASK(7, 0)
-#define EIP93_REG_PE_INBUF_COUNT 0x100
+#define EIP93_REG_PE_INBUF_COUNT 0x110
#define EIP93_REG_PE_OUTBUF_COUNT 0x114
#define EIP93_REG_PE_BUF_RW_PNTR 0x118 /* BUF_PNTR */
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 660f45ab8647..fb4936e7afa2 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1204,12 +1204,13 @@ static struct safexcel_alg_template *safexcel_algs[] = {
&safexcel_alg_hmac_sha256,
&safexcel_alg_hmac_sha384,
&safexcel_alg_hmac_sha512,
+ &safexcel_alg_authenc_hmac_md5_cbc_aes,
&safexcel_alg_authenc_hmac_sha1_cbc_aes,
&safexcel_alg_authenc_hmac_sha224_cbc_aes,
&safexcel_alg_authenc_hmac_sha256_cbc_aes,
&safexcel_alg_authenc_hmac_sha384_cbc_aes,
&safexcel_alg_authenc_hmac_sha512_cbc_aes,
- &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
+ &safexcel_alg_authenc_hmac_md5_ctr_aes,
&safexcel_alg_authenc_hmac_sha1_ctr_aes,
&safexcel_alg_authenc_hmac_sha224_ctr_aes,
&safexcel_alg_authenc_hmac_sha256_ctr_aes,
@@ -1241,11 +1242,14 @@ static struct safexcel_alg_template *safexcel_algs[] = {
&safexcel_alg_hmac_sha3_256,
&safexcel_alg_hmac_sha3_384,
&safexcel_alg_hmac_sha3_512,
- &safexcel_alg_authenc_hmac_sha1_cbc_des,
+ &safexcel_alg_authenc_hmac_md5_cbc_des3_ede,
+ &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
&safexcel_alg_authenc_hmac_sha256_cbc_des3_ede,
&safexcel_alg_authenc_hmac_sha224_cbc_des3_ede,
&safexcel_alg_authenc_hmac_sha512_cbc_des3_ede,
&safexcel_alg_authenc_hmac_sha384_cbc_des3_ede,
+ &safexcel_alg_authenc_hmac_md5_cbc_des,
+ &safexcel_alg_authenc_hmac_sha1_cbc_des,
&safexcel_alg_authenc_hmac_sha256_cbc_des,
&safexcel_alg_authenc_hmac_sha224_cbc_des,
&safexcel_alg_authenc_hmac_sha512_cbc_des,
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 0f27367a85fa..52fd460c0e9b 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -945,12 +945,13 @@ extern struct safexcel_alg_template safexcel_alg_hmac_sha224;
extern struct safexcel_alg_template safexcel_alg_hmac_sha256;
extern struct safexcel_alg_template safexcel_alg_hmac_sha384;
extern struct safexcel_alg_template safexcel_alg_hmac_sha512;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes;
-extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_ctr_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes;
@@ -982,11 +983,14 @@ extern struct safexcel_alg_template safexcel_alg_hmac_sha3_224;
extern struct safexcel_alg_template safexcel_alg_hmac_sha3_256;
extern struct safexcel_alg_template safexcel_alg_hmac_sha3_384;
extern struct safexcel_alg_template safexcel_alg_hmac_sha3_512;
-extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des3_ede;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des3_ede;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des3_ede;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des;
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 27b180057417..a8349b684693 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -17,6 +17,7 @@
#include <crypto/internal/des.h>
#include <crypto/gcm.h>
#include <crypto/ghash.h>
+#include <crypto/md5.h>
#include <crypto/poly1305.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
@@ -462,6 +463,9 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
/* Auth key */
switch (ctx->hash_alg) {
+ case CONTEXT_CONTROL_CRYPTO_ALG_MD5:
+ alg = "safexcel-md5";
+ break;
case CONTEXT_CONTROL_CRYPTO_ALG_SHA1:
alg = "safexcel-sha1";
break;
@@ -1662,6 +1666,42 @@ static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
return 0;
}
+static int safexcel_aead_md5_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_cra_init(tfm);
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
+ ctx->state_sz = MD5_DIGEST_SIZE;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_aes = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_MD5,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "safexcel-authenc-hmac-md5-cbc-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_md5_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
static int safexcel_aead_sha1_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1842,6 +1882,43 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = {
},
};
+static int safexcel_aead_md5_des3_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_md5_cra_init(tfm);
+ ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_MD5,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "safexcel-authenc-hmac-md5-cbc-des3_ede",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_md5_des3_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
static int safexcel_aead_sha1_des3_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -2027,6 +2104,43 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede = {
},
};
+static int safexcel_aead_md5_des_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_md5_cra_init(tfm);
+ ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_des = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_MD5,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des))",
+ .cra_driver_name = "safexcel-authenc-hmac-md5-cbc-des",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_md5_des_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
static int safexcel_aead_sha1_des_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -2212,6 +2326,41 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des = {
},
};
+static int safexcel_aead_md5_ctr_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_md5_cra_init(tfm);
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_ctr_aes = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_MD5,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
+ .cra_driver_name = "safexcel-authenc-hmac-md5-ctr-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_md5_ctr_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
static int safexcel_aead_sha1_ctr_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 547abf453d4a..f62b994e18e5 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -906,8 +906,8 @@ static void rebalance_wq_table(void)
return;
}
+ cpu = 0;
for_each_node_with_cpus(node) {
- cpu = 0;
node_cpus = cpumask_of_node(node);
for_each_cpu(node_cpu, node_cpus) {
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
index 59308926399d..e61a95f66a0c 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
@@ -230,12 +230,7 @@ static int kmb_ecc_point_mult(struct ocs_ecc_dev *ecc_dev,
int rc = 0;
/* Generate random nbytes for Simple and Differential SCA protection. */
- rc = crypto_get_default_rng();
- if (rc)
- return rc;
-
- rc = crypto_rng_get_bytes(crypto_default_rng, sca, nbytes);
- crypto_put_default_rng();
+ rc = crypto_stdrng_get_bytes(sca, nbytes);
if (rc)
return rc;
@@ -509,14 +504,10 @@ static int kmb_ecc_gen_privkey(const struct ecc_curve *curve, u64 *privkey)
* The maximum security strength identified by NIST SP800-57pt1r4 for
* ECC is 256 (N >= 512).
*
- * This condition is met by the default RNG because it selects a favored
- * DRBG with a security strength of 256.
+ * This condition is met by stdrng because it selects a favored DRBG
+ * with a security strength of 256.
*/
- if (crypto_get_default_rng())
- return -EFAULT;
-
- rc = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes);
- crypto_put_default_rng();
+ rc = crypto_stdrng_get_bytes(priv, nbytes);
if (rc)
goto cleanup;
diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig
index 4b4861460dd4..9d6e6f52d2dc 100644
--- a/drivers/crypto/intel/qat/Kconfig
+++ b/drivers/crypto/intel/qat/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config CRYPTO_DEV_QAT
tristate
+ select CRYPTO_ACOMP
select CRYPTO_AEAD
select CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
@@ -11,6 +12,7 @@ config CRYPTO_DEV_QAT
select CRYPTO_LIB_SHA1
select CRYPTO_LIB_SHA256
select CRYPTO_LIB_SHA512
+ select CRYPTO_ZSTD
select FW_LOADER
select CRC8
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
index 35105213d40c..19f9f738630b 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -97,9 +97,25 @@ static struct adf_hw_device_class adf_420xx_class = {
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
- u32 me_disable = self->fuses[ADF_FUSECTL4];
+ unsigned long fuses = self->fuses[ADF_FUSECTL4];
+ u32 mask = ADF_420XX_ACCELENGINES_MASK;
- return ~me_disable & ADF_420XX_ACCELENGINES_MASK;
+ if (test_bit(0, &fuses))
+ mask &= ~ADF_AE_GROUP_0;
+
+ if (test_bit(4, &fuses))
+ mask &= ~ADF_AE_GROUP_1;
+
+ if (test_bit(8, &fuses))
+ mask &= ~ADF_AE_GROUP_2;
+
+ if (test_bit(12, &fuses))
+ mask &= ~ADF_AE_GROUP_3;
+
+ if (test_bit(16, &fuses))
+ mask &= ~ADF_AE_GROUP_4;
+
+ return mask;
}
static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
@@ -472,6 +488,7 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->clock_frequency = ADF_420XX_AE_FREQ;
hw_data->services_supported = adf_gen4_services_supported;
hw_data->get_svc_slice_cnt = adf_gen4_get_svc_slice_cnt;
+ hw_data->accel_capabilities_ext_mask = ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S;
adf_gen4_set_err_mask(&hw_data->dev_err_mask);
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 740f68a36ac5..49b425be34c8 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -100,9 +100,19 @@ static struct adf_hw_device_class adf_4xxx_class = {
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
- u32 me_disable = self->fuses[ADF_FUSECTL4];
+ unsigned long fuses = self->fuses[ADF_FUSECTL4];
+ u32 mask = ADF_4XXX_ACCELENGINES_MASK;
- return ~me_disable & ADF_4XXX_ACCELENGINES_MASK;
+ if (test_bit(0, &fuses))
+ mask &= ~ADF_AE_GROUP_0;
+
+ if (test_bit(4, &fuses))
+ mask &= ~ADF_AE_GROUP_1;
+
+ if (test_bit(8, &fuses))
+ mask &= ~ADF_AE_GROUP_2;
+
+ return mask;
}
static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
@@ -463,6 +473,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
hw_data->services_supported = adf_gen4_services_supported;
hw_data->get_svc_slice_cnt = adf_gen4_get_svc_slice_cnt;
+ hw_data->accel_capabilities_ext_mask = ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S;
adf_gen4_set_err_mask(&hw_data->dev_err_mask);
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
index bed88d3ce8ca..205680797e2c 100644
--- a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
@@ -33,6 +33,8 @@
#define ADF_AE_GROUP_1 GENMASK(7, 4)
#define ADF_AE_GROUP_2 BIT(8)
+#define ASB_MULTIPLIER 9
+
struct adf_ring_config {
u32 ring_mask;
enum adf_cfg_service_type ring_type;
@@ -82,10 +84,15 @@ static const unsigned long thrd_mask_dcpr[ADF_6XXX_MAX_ACCELENGINES] = {
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x00
};
+static const unsigned long thrd_mask_wcy[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x00
+};
+
static const char *const adf_6xxx_fw_objs[] = {
[ADF_FW_CY_OBJ] = ADF_6XXX_CY_OBJ,
[ADF_FW_DC_OBJ] = ADF_6XXX_DC_OBJ,
[ADF_FW_ADMIN_OBJ] = ADF_6XXX_ADMIN_OBJ,
+ [ADF_FW_WCY_OBJ] = ADF_6XXX_WCY_OBJ,
};
static const struct adf_fw_config adf_default_fw_config[] = {
@@ -94,6 +101,12 @@ static const struct adf_fw_config adf_default_fw_config[] = {
{ ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ },
};
+static const struct adf_fw_config adf_wcy_fw_config[] = {
+ { ADF_AE_GROUP_1, ADF_FW_WCY_OBJ },
+ { ADF_AE_GROUP_0, ADF_FW_WCY_OBJ },
+ { ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ },
+};
+
static struct adf_hw_device_class adf_6xxx_class = {
.name = ADF_6XXX_DEVICE_NAME,
.type = DEV_6XXX,
@@ -118,6 +131,12 @@ static bool services_supported(unsigned long mask)
}
}
+static bool wcy_services_supported(unsigned long mask)
+{
+ /* The wireless SKU supports only the symmetric crypto service */
+ return mask == BIT(SVC_SYM);
+}
+
static int get_service(unsigned long *mask)
{
if (test_and_clear_bit(SVC_ASYM, mask))
@@ -155,8 +174,12 @@ static enum adf_cfg_service_type get_ring_type(unsigned int service)
}
}
-static const unsigned long *get_thrd_mask(unsigned int service)
+static const unsigned long *get_thrd_mask(struct adf_accel_dev *accel_dev,
+ unsigned int service)
{
+ if (adf_6xxx_is_wcy(GET_HW_DATA(accel_dev)))
+ return (service == SVC_SYM) ? thrd_mask_wcy : NULL;
+
switch (service) {
case SVC_SYM:
return thrd_mask_sym;
@@ -194,7 +217,7 @@ static int get_rp_config(struct adf_accel_dev *accel_dev, struct adf_ring_config
return service;
rp_config[i].ring_type = get_ring_type(service);
- rp_config[i].thrd_mask = get_thrd_mask(service);
+ rp_config[i].thrd_mask = get_thrd_mask(accel_dev, service);
/*
* If there is only one service enabled, use all ring pairs for
@@ -386,6 +409,8 @@ static void set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTCNVL_OFFSET, ADF_SSMWDTCNVH_OFFSET, val);
ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTUCSL_OFFSET, ADF_SSMWDTUCSH_OFFSET, val);
ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTDCPRL_OFFSET, ADF_SSMWDTDCPRH_OFFSET, val);
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTWCPL_OFFSET, ADF_SSMWDTWCPH_OFFSET, val);
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTWATL_OFFSET, ADF_SSMWDTWATH_OFFSET, val);
/* Enable watchdog timer for pke */
ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTPKEL_OFFSET, ADF_SSMWDTPKEH_OFFSET, val_pke);
@@ -439,6 +464,21 @@ static int reset_ring_pair(void __iomem *csr, u32 bank_number)
return 0;
}
+static bool adf_anti_rb_enabled(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+
+ return !!(hw_data->fuses[0] & ADF_GEN6_ANTI_RB_FUSE_BIT);
+}
+
+static void adf_gen6_init_anti_rb(struct adf_anti_rb_hw_data *anti_rb_data)
+{
+ anti_rb_data->anti_rb_enabled = adf_anti_rb_enabled;
+ anti_rb_data->svncheck_offset = ADF_GEN6_SVNCHECK_CSR_MSG;
+ anti_rb_data->svncheck_retry = 0;
+ anti_rb_data->sysfs_added = false;
+}
+
static int ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
@@ -471,6 +511,9 @@ static int build_comp_block(void *ctx, enum adf_dc_algo algo)
case QAT_DEFLATE:
header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
break;
+ case QAT_ZSTD:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_ZSTD_COMPRESS;
+ break;
default:
return -EINVAL;
}
@@ -481,6 +524,13 @@ static int build_comp_block(void *ctx, enum adf_dc_algo algo)
cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
+ /*
+ * Store Auto Select Best (ASB) multiplier in the request template.
+ * This will be used in the data path to set the actual threshold
+ * value based on the input data size.
+ */
+ req_tmpl->u3.asb_threshold.asb_value = ASB_MULTIPLIER;
+
return 0;
}
@@ -494,12 +544,16 @@ static int build_decomp_block(void *ctx, enum adf_dc_algo algo)
case QAT_DEFLATE:
header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
break;
+ case QAT_ZSTD:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_ZSTD_DECOMPRESS;
+ break;
default:
return -EINVAL;
}
cd_pars->u.sl.comp_slice_cfg_word[0] = 0;
cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
+ req_tmpl->u3.asb_threshold.asb_value = 0;
return 0;
}
@@ -631,6 +685,12 @@ static int adf_gen6_set_vc(struct adf_accel_dev *accel_dev)
return set_vc_config(accel_dev);
}
+static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev)
+{
+ return adf_6xxx_is_wcy(GET_HW_DATA(accel_dev)) ? adf_wcy_fw_config :
+ adf_default_fw_config;
+}
+
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
unsigned long fuses = self->fuses[ADF_FUSECTL4];
@@ -653,6 +713,38 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
return mask;
}
+static u32 get_accel_cap_wcy(struct adf_accel_dev *accel_dev)
+{
+ u32 capabilities_sym;
+ u32 fuse;
+
+ fuse = GET_HW_DATA(accel_dev)->fuses[ADF_FUSECTL1];
+
+ capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+ ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT |
+ ICP_ACCEL_CAPABILITIES_5G |
+ ICP_ACCEL_CAPABILITIES_ZUC |
+ ICP_ACCEL_CAPABILITIES_ZUC_256 |
+ ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN;
+
+ if (fuse & ICP_ACCEL_GEN6_MASK_EIA3_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
+ }
+ if (fuse & ICP_ACCEL_GEN6_MASK_ZUC_256_SLICE)
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
+
+ if (fuse & ICP_ACCEL_GEN6_MASK_5G_SLICE)
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_5G;
+
+ if (adf_get_service_enabled(accel_dev) == SVC_SYM)
+ return capabilities_sym;
+
+ return 0;
+}
+
static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
{
u32 capabilities_sym, capabilities_asym;
@@ -661,6 +753,9 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
u32 caps = 0;
u32 fusectl1;
+ if (adf_6xxx_is_wcy(GET_HW_DATA(accel_dev)))
+ return get_accel_cap_wcy(accel_dev);
+
fusectl1 = GET_HW_DATA(accel_dev)->fuses[ADF_FUSECTL1];
/* Read accelerator capabilities mask */
@@ -733,15 +828,19 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
{
- return ARRAY_SIZE(adf_default_fw_config);
+ return adf_6xxx_is_wcy(GET_HW_DATA(accel_dev)) ?
+ ARRAY_SIZE(adf_wcy_fw_config) :
+ ARRAY_SIZE(adf_default_fw_config);
}
static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num)
{
int num_fw_objs = ARRAY_SIZE(adf_6xxx_fw_objs);
+ const struct adf_fw_config *fw_config;
int id;
- id = adf_default_fw_config[obj_num].obj;
+ fw_config = get_fw_config(accel_dev);
+ id = fw_config[obj_num].obj;
if (id >= num_fw_objs)
return NULL;
@@ -755,15 +854,22 @@ static const char *uof_get_name_6xxx(struct adf_accel_dev *accel_dev, u32 obj_nu
static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num)
{
+ const struct adf_fw_config *fw_config;
+
if (obj_num >= uof_get_num_objs(accel_dev))
return -EINVAL;
- return adf_default_fw_config[obj_num].obj;
+ fw_config = get_fw_config(accel_dev);
+
+ return fw_config[obj_num].obj;
}
static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
{
- return adf_default_fw_config[obj_num].ae_mask;
+ const struct adf_fw_config *fw_config;
+
+ fw_config = get_fw_config(accel_dev);
+ return fw_config[obj_num].ae_mask;
}
static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
@@ -873,6 +979,14 @@ static void adf_gen6_init_rl_data(struct adf_rl_hw_data *rl_data)
init_num_svc_aes(rl_data);
}
+static void adf_gen6_init_services_supported(struct adf_hw_device_data *hw_data)
+{
+ if (adf_6xxx_is_wcy(hw_data))
+ hw_data->services_supported = wcy_services_supported;
+ else
+ hw_data->services_supported = services_supported;
+}
+
void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &adf_6xxx_class;
@@ -929,11 +1043,12 @@ void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
hw_data->stop_timer = adf_timer_stop;
hw_data->init_device = adf_init_device;
hw_data->enable_pm = enable_pm;
- hw_data->services_supported = services_supported;
hw_data->num_rps = ADF_GEN6_ETR_MAX_BANKS;
hw_data->clock_frequency = ADF_6XXX_AE_FREQ;
hw_data->get_svc_slice_cnt = adf_gen6_get_svc_slice_cnt;
+ hw_data->accel_capabilities_ext_mask = ADF_ACCEL_CAPABILITIES_EXT_ZSTD;
+ adf_gen6_init_services_supported(hw_data);
adf_gen6_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen6_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen6_init_dc_ops(&hw_data->dc_ops);
@@ -941,6 +1056,7 @@ void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
adf_gen6_init_ras_ops(&hw_data->ras_ops);
adf_gen6_init_tl_data(&hw_data->tl_data);
adf_gen6_init_rl_data(&hw_data->rl_data);
+ adf_gen6_init_anti_rb(&hw_data->anti_rb_data);
}
void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data)
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
index d822911fe68c..e4d433bdd379 100644
--- a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
@@ -53,6 +53,12 @@
#define ADF_GEN6_ADMINMSGLR_OFFSET 0x500578
#define ADF_GEN6_MAILBOX_BASE_OFFSET 0x600970
+/* Anti-rollback */
+#define ADF_GEN6_SVNCHECK_CSR_MSG 0x640004
+
+/* Fuse bits */
+#define ADF_GEN6_ANTI_RB_FUSE_BIT BIT(24)
+
/*
* Watchdog timers
* Timeout is in cycles. Clock speed may vary across products but this
@@ -64,10 +70,14 @@
#define ADF_SSMWDTATHH_OFFSET 0x520C
#define ADF_SSMWDTCNVL_OFFSET 0x5408
#define ADF_SSMWDTCNVH_OFFSET 0x540C
+#define ADF_SSMWDTWCPL_OFFSET 0x5608
+#define ADF_SSMWDTWCPH_OFFSET 0x560C
#define ADF_SSMWDTUCSL_OFFSET 0x5808
#define ADF_SSMWDTUCSH_OFFSET 0x580C
#define ADF_SSMWDTDCPRL_OFFSET 0x5A08
#define ADF_SSMWDTDCPRH_OFFSET 0x5A0C
+#define ADF_SSMWDTWATL_OFFSET 0x5C08
+#define ADF_SSMWDTWATH_OFFSET 0x5C0C
#define ADF_SSMWDTPKEL_OFFSET 0x5E08
#define ADF_SSMWDTPKEH_OFFSET 0x5E0C
@@ -139,6 +149,7 @@
#define ADF_6XXX_CY_OBJ "qat_6xxx_cy.bin"
#define ADF_6XXX_DC_OBJ "qat_6xxx_dc.bin"
#define ADF_6XXX_ADMIN_OBJ "qat_6xxx_admin.bin"
+#define ADF_6XXX_WCY_OBJ "qat_6xxx_wcy.bin"
/* RL constants */
#define ADF_6XXX_RL_PCIE_SCALE_FACTOR_DIV 100
@@ -159,9 +170,18 @@ enum icp_qat_gen6_slice_mask {
ICP_ACCEL_GEN6_MASK_PKE_SLICE = BIT(2),
ICP_ACCEL_GEN6_MASK_CPR_SLICE = BIT(3),
ICP_ACCEL_GEN6_MASK_DCPRZ_SLICE = BIT(4),
+ ICP_ACCEL_GEN6_MASK_EIA3_SLICE = BIT(5),
ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE = BIT(6),
+ ICP_ACCEL_GEN6_MASK_ZUC_256_SLICE = BIT(7),
+ ICP_ACCEL_GEN6_MASK_5G_SLICE = BIT(8),
};
+/* Return true if the device is a wireless crypto (WCY) SKU */
+static inline bool adf_6xxx_is_wcy(struct adf_hw_device_data *hw_data)
+{
+ return !(hw_data->fuses[ADF_FUSECTL1] & ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE);
+}
+
void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data);
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c
index c1dc9c56fdf5..c52462a48c34 100644
--- a/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c
@@ -16,6 +16,7 @@
#include "adf_gen6_shared.h"
#include "adf_6xxx_hw_data.h"
+#include "adf_heartbeat.h"
static int bar_map[] = {
0, /* SRAM */
@@ -53,6 +54,35 @@ static void adf_devmgr_remove(void *accel_dev)
adf_devmgr_rm_dev(accel_dev, NULL);
}
+static int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev)
+{
+ const char *config;
+ int ret;
+
+ /*
+ * Wireless SKU - symmetric crypto service only
+ * Non-wireless SKU - crypto service for even devices and compression for odd devices
+ */
+ if (adf_6xxx_is_wcy(GET_HW_DATA(accel_dev)))
+ config = ADF_CFG_SYM;
+ else
+ config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY;
+
+ ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
+ if (ret)
+ return ret;
+
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, config,
+ ADF_STR);
+ if (ret)
+ return ret;
+
+ adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS);
+
+ return 0;
+}
+
static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct adf_accel_pci *accel_pci_dev;
@@ -91,9 +121,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_read_config_dword(pdev, ADF_GEN6_FUSECTL0_OFFSET, &hw_data->fuses[ADF_FUSECTL0]);
pci_read_config_dword(pdev, ADF_GEN6_FUSECTL1_OFFSET, &hw_data->fuses[ADF_FUSECTL1]);
- if (!(hw_data->fuses[ADF_FUSECTL1] & ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE))
- return dev_err_probe(dev, -EFAULT, "Wireless mode is not supported.\n");
-
/* Enable PCI device */
ret = pcim_enable_device(pdev);
if (ret)
@@ -182,8 +209,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
ret = adf_dev_up(accel_dev, true);
- if (ret)
+ if (ret) {
+ adf_dev_down(accel_dev);
return ret;
+ }
ret = devm_add_action_or_reset(dev, adf_device_down, accel_dev);
if (ret)
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 89845754841b..9478111c8437 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -4,6 +4,7 @@ ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CRYPTO_QAT"'
intel_qat-y := adf_accel_engine.o \
adf_admin.o \
adf_aer.o \
+ adf_anti_rb.o \
adf_bank_state.o \
adf_cfg.o \
adf_cfg_services.o \
@@ -29,6 +30,7 @@ intel_qat-y := adf_accel_engine.o \
adf_rl_admin.o \
adf_rl.o \
adf_sysfs.o \
+ adf_sysfs_anti_rb.o \
adf_sysfs_ras_counters.o \
adf_sysfs_rl.o \
adf_timer.o \
@@ -39,6 +41,7 @@ intel_qat-y := adf_accel_engine.o \
qat_bl.o \
qat_comp_algs.o \
qat_compression.o \
+ qat_comp_zstd_utils.o \
qat_crypto.o \
qat_hal.o \
qat_mig_dev.o \
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index 9fe3239f0114..03a4e9690208 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/qat/qat_mig_dev.h>
#include <linux/wordpart.h>
+#include "adf_anti_rb.h"
#include "adf_cfg_common.h"
#include "adf_dc.h"
#include "adf_rl.h"
@@ -58,6 +59,11 @@ enum adf_accel_capabilities {
ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
};
+enum adf_accel_capabilities_ext {
+ ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S = BIT(0),
+ ADF_ACCEL_CAPABILITIES_EXT_ZSTD = BIT(1),
+};
+
enum adf_fuses {
ADF_FUSECTL0,
ADF_FUSECTL1,
@@ -328,12 +334,14 @@ struct adf_hw_device_data {
struct adf_dev_err_mask dev_err_mask;
struct adf_rl_hw_data rl_data;
struct adf_tl_hw_data tl_data;
+ struct adf_anti_rb_hw_data anti_rb_data;
struct qat_migdev_ops vfmig_ops;
const char *fw_name;
const char *fw_mmp_name;
u32 fuses[ADF_MAX_FUSES];
u32 straps;
u32 accel_capabilities_mask;
+ u32 accel_capabilities_ext_mask;
u32 extended_dc_capabilities;
u16 fw_capabilities;
u32 clock_frequency;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
index f9f1018a2823..09d4f547e082 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/pci.h>
#include "adf_cfg.h"
@@ -162,8 +163,14 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev)
static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+ unsigned long reset_delay;
qat_hal_reset(loader_data->fw_loader);
+
+ reset_delay = loader_data->fw_loader->chip_info->reset_delay_us;
+ if (reset_delay)
+ fsleep(reset_delay);
+
if (qat_hal_clr_reset(loader_data->fw_loader))
return -EFAULT;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
index 573388c37100..841aa802c79e 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c
@@ -6,8 +6,10 @@
#include <linux/iopoll.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
+#include <linux/delay.h>
#include "adf_accel_devices.h"
#include "adf_admin.h"
+#include "adf_anti_rb.h"
#include "adf_common_drv.h"
#include "adf_cfg.h"
#include "adf_heartbeat.h"
@@ -19,6 +21,7 @@
#define ADF_ADMIN_POLL_DELAY_US 20
#define ADF_ADMIN_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
#define ADF_ONE_AE 1
+#define ADF_ADMIN_RETRY_MAX 60
static const u8 const_tab[1024] __aligned(1024) = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -536,6 +539,73 @@ int adf_send_admin_tl_stop(struct adf_accel_dev *accel_dev)
return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
+static int adf_send_admin_retry(struct adf_accel_dev *accel_dev, u8 cmd_id,
+ struct icp_qat_fw_init_admin_resp *resp,
+ unsigned int sleep_ms)
+{
+ u32 admin_ae_mask = GET_HW_DATA(accel_dev)->admin_ae_mask;
+ struct icp_qat_fw_init_admin_req req = { };
+ unsigned int retries = ADF_ADMIN_RETRY_MAX;
+ int ret;
+
+ req.cmd_id = cmd_id;
+
+ do {
+ ret = adf_send_admin(accel_dev, &req, resp, admin_ae_mask);
+ if (!ret)
+ return 0;
+
+ if (resp->status != ICP_QAT_FW_INIT_RESP_STATUS_RETRY)
+ return ret;
+
+ msleep(sleep_ms);
+ } while (--retries);
+
+ return -ETIMEDOUT;
+}
+
+static int adf_send_admin_svn(struct adf_accel_dev *accel_dev, u8 cmd_id,
+ struct icp_qat_fw_init_admin_resp *resp)
+{
+ return adf_send_admin_retry(accel_dev, cmd_id, resp, ADF_SVN_RETRY_MS);
+}
+
+int adf_send_admin_arb_query(struct adf_accel_dev *accel_dev, int cmd, u8 *svn)
+{
+ struct icp_qat_fw_init_admin_resp resp = { };
+ int ret;
+
+ ret = adf_send_admin_svn(accel_dev, ICP_QAT_FW_SVN_READ, &resp);
+ if (ret)
+ return ret;
+
+ switch (cmd) {
+ case ARB_ENFORCED_MIN_SVN:
+ *svn = resp.enforced_min_svn;
+ break;
+ case ARB_PERMANENT_MIN_SVN:
+ *svn = resp.permanent_min_svn;
+ break;
+ case ARB_ACTIVE_SVN:
+ *svn = resp.active_svn;
+ break;
+ default:
+ *svn = 0;
+ dev_err(&GET_DEV(accel_dev),
+ "Unknown secure version number request\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int adf_send_admin_arb_commit(struct adf_accel_dev *accel_dev)
+{
+ struct icp_qat_fw_init_admin_resp resp = { };
+
+ return adf_send_admin_svn(accel_dev, ICP_QAT_FW_SVN_COMMIT, &resp);
+}
+
int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
{
struct adf_admin_comms *admin;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.h b/drivers/crypto/intel/qat/qat_common/adf_admin.h
index 647c8e196752..9704219f2eb7 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_admin.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.h
@@ -27,5 +27,7 @@ int adf_send_admin_tl_start(struct adf_accel_dev *accel_dev,
dma_addr_t tl_dma_addr, size_t layout_sz, u8 *rp_indexes,
struct icp_qat_fw_init_admin_slice_cnt *slice_count);
int adf_send_admin_tl_stop(struct adf_accel_dev *accel_dev);
+int adf_send_admin_arb_query(struct adf_accel_dev *accel_dev, int cmd, u8 *svn);
+int adf_send_admin_arb_commit(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_anti_rb.c b/drivers/crypto/intel/qat/qat_common/adf_anti_rb.c
new file mode 100644
index 000000000000..2c19a82d89ad
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_anti_rb.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2026 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kstrtox.h>
+
+#include "adf_accel_devices.h"
+#include "adf_admin.h"
+#include "adf_anti_rb.h"
+#include "adf_common_drv.h"
+#include "icp_qat_fw_init_admin.h"
+
+#define ADF_SVN_RETRY_MAX 60
+
+int adf_anti_rb_commit(struct adf_accel_dev *accel_dev)
+{
+ return adf_send_admin_arb_commit(accel_dev);
+}
+
+int adf_anti_rb_query(struct adf_accel_dev *accel_dev, enum anti_rb cmd, u8 *svn)
+{
+ return adf_send_admin_arb_query(accel_dev, cmd, svn);
+}
+
+int adf_anti_rb_check(struct pci_dev *pdev)
+{
+ struct adf_anti_rb_hw_data *anti_rb;
+ u32 svncheck_sts, cfc_svncheck_sts;
+ struct adf_accel_dev *accel_dev;
+ void __iomem *pmisc_addr;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+ if (!accel_dev)
+ return -EINVAL;
+
+ anti_rb = GET_ANTI_RB_DATA(accel_dev);
+ if (!anti_rb->anti_rb_enabled || !anti_rb->anti_rb_enabled(accel_dev))
+ return 0;
+
+ pmisc_addr = adf_get_pmisc_base(accel_dev);
+
+ cfc_svncheck_sts = ADF_CSR_RD(pmisc_addr, anti_rb->svncheck_offset);
+
+ svncheck_sts = FIELD_GET(ADF_SVN_STS_MASK, cfc_svncheck_sts);
+ switch (svncheck_sts) {
+ case ADF_SVN_NO_STS:
+ return 0;
+ case ADF_SVN_PASS_STS:
+ anti_rb->svncheck_retry = 0;
+ return 0;
+ case ADF_SVN_FAIL_STS:
+ dev_err(&GET_DEV(accel_dev), "Security Version Number failure\n");
+ return -EIO;
+ case ADF_SVN_RETRY_STS:
+ if (anti_rb->svncheck_retry++ >= ADF_SVN_RETRY_MAX) {
+ anti_rb->svncheck_retry = 0;
+ return -ETIMEDOUT;
+ }
+ msleep(ADF_SVN_RETRY_MS);
+ return -EAGAIN;
+ default:
+ dev_err(&GET_DEV(accel_dev), "Invalid SVN check status\n");
+ return -EINVAL;
+ }
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_anti_rb.h b/drivers/crypto/intel/qat/qat_common/adf_anti_rb.h
new file mode 100644
index 000000000000..531af41a3db8
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_anti_rb.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2026 Intel Corporation */
+#ifndef ADF_ANTI_RB_H_
+#define ADF_ANTI_RB_H_
+
+#include <linux/types.h>
+
+#define GET_ANTI_RB_DATA(accel_dev) (&(accel_dev)->hw_device->anti_rb_data)
+
+#define ADF_SVN_NO_STS 0x00
+#define ADF_SVN_PASS_STS 0x01
+#define ADF_SVN_RETRY_STS 0x02
+#define ADF_SVN_FAIL_STS 0x03
+#define ADF_SVN_RETRY_MS 250
+#define ADF_SVN_STS_MASK GENMASK(7, 0)
+
+enum anti_rb {
+ ARB_ENFORCED_MIN_SVN,
+ ARB_PERMANENT_MIN_SVN,
+ ARB_ACTIVE_SVN,
+};
+
+struct adf_accel_dev;
+struct pci_dev;
+
+struct adf_anti_rb_hw_data {
+ bool (*anti_rb_enabled)(struct adf_accel_dev *accel_dev);
+ u32 svncheck_offset;
+ u32 svncheck_retry;
+ bool sysfs_added;
+};
+
+int adf_anti_rb_commit(struct adf_accel_dev *accel_dev);
+int adf_anti_rb_query(struct adf_accel_dev *accel_dev, enum anti_rb cmd, u8 *svn);
+int adf_anti_rb_check(struct pci_dev *pdev);
+
+#endif /* ADF_ANTI_RB_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index 6cf3a95489e8..7b8b295ac459 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -111,12 +111,12 @@ void qat_algs_unregister(void);
int qat_asym_algs_register(void);
void qat_asym_algs_unregister(void);
-struct qat_compression_instance *qat_compression_get_instance_node(int node);
+struct qat_compression_instance *qat_compression_get_instance_node(int node, int alg);
void qat_compression_put_instance(struct qat_compression_instance *inst);
int qat_compression_register(void);
int qat_compression_unregister(void);
-int qat_comp_algs_register(void);
-void qat_comp_algs_unregister(void);
+int qat_comp_algs_register(u32 caps);
+void qat_comp_algs_unregister(u32 caps);
void qat_comp_alg_callback(void *resp);
int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
index 78957fa900b7..d5c578e3fd8d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
@@ -9,6 +9,7 @@ enum adf_fw_objs {
ADF_FW_DC_OBJ,
ADF_FW_ADMIN_OBJ,
ADF_FW_CY_OBJ,
+ ADF_FW_WCY_OBJ,
};
struct adf_fw_config {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index 349fdb323763..f4a58f04071a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -504,14 +504,20 @@ static int adf_gen4_build_comp_block(void *ctx, enum adf_dc_algo algo)
switch (algo) {
case QAT_DEFLATE:
header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
+ hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
+ hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
+ hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
+ break;
+ case QAT_LZ4S:
+ header->service_cmd_id = ICP_QAT_FW_COMP_20_CMD_LZ4S_COMPRESS;
+ hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_LZ4S;
+ hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_DISABLED;
+ hw_comp_lower_csr.abd = ICP_QAT_HW_COMP_20_ABD_ABD_DISABLED;
break;
default:
return -EINVAL;
}
- hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
- hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
- hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1;
hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW;
hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED;
@@ -538,12 +544,16 @@ static int adf_gen4_build_decomp_block(void *ctx, enum adf_dc_algo algo)
switch (algo) {
case QAT_DEFLATE:
header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+ hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
+ break;
+ case QAT_LZ4S:
+ header->service_cmd_id = ICP_QAT_FW_COMP_20_CMD_LZ4S_DECOMPRESS;
+ hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_LZ4S;
break;
default:
return -EINVAL;
}
- hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr);
cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
index c9b151006dca..ffe4525a1e69 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
@@ -31,12 +31,6 @@ void adf_gen6_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
}
EXPORT_SYMBOL_GPL(adf_gen6_init_hw_csr_ops);
-int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev)
-{
- return adf_gen4_cfg_dev_init(accel_dev);
-}
-EXPORT_SYMBOL_GPL(adf_gen6_cfg_dev_init);
-
int adf_gen6_comp_dev_config(struct adf_accel_dev *accel_dev)
{
return adf_comp_dev_config(accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
index fc6fad029a70..072115a531e4 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
@@ -10,7 +10,6 @@ struct adf_pfvf_ops;
void adf_gen6_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
void adf_gen6_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
-int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev);
int adf_gen6_comp_dev_config(struct adf_accel_dev *accel_dev);
int adf_gen6_no_dev_config(struct adf_accel_dev *accel_dev);
void adf_gen6_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
index 46491048e0bb..f8088388cf12 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
@@ -10,6 +10,7 @@
#include "adf_dbgfs.h"
#include "adf_heartbeat.h"
#include "adf_rl.h"
+#include "adf_sysfs_anti_rb.h"
#include "adf_sysfs_ras_counters.h"
#include "adf_telemetry.h"
@@ -179,6 +180,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
+ u32 caps;
int ret;
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
@@ -252,7 +254,8 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
}
set_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
- if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) {
+ caps = hw_data->accel_capabilities_ext_mask;
+ if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register(caps)) {
dev_err(&GET_DEV(accel_dev),
"Failed to register compression algs\n");
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
@@ -263,6 +266,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
adf_dbgfs_add(accel_dev);
adf_sysfs_start_ras(accel_dev);
+ adf_sysfs_start_arb(accel_dev);
return 0;
}
@@ -292,6 +296,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
adf_rl_stop(accel_dev);
adf_dbgfs_rm(accel_dev);
adf_sysfs_stop_ras(accel_dev);
+ adf_sysfs_stop_arb(accel_dev);
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
@@ -305,7 +310,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
if (!list_empty(&accel_dev->compression_list) &&
test_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status))
- qat_comp_algs_unregister();
+ qat_comp_algs_unregister(hw_data->accel_capabilities_ext_mask);
clear_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
list_for_each_entry(service, &service_table, list) {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.c
new file mode 100644
index 000000000000..789341ad1bdc
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2026 Intel Corporation */
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#include "adf_anti_rb.h"
+#include "adf_common_drv.h"
+#include "adf_sysfs_anti_rb.h"
+
+static ssize_t enforced_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct adf_accel_dev *accel_dev;
+ int err;
+ u8 svn;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ err = adf_anti_rb_query(accel_dev, ARB_ENFORCED_MIN_SVN, &svn);
+ if (err)
+ return err;
+
+ return sysfs_emit(buf, "%u\n", svn);
+}
+static DEVICE_ATTR_RO(enforced_min);
+
+static ssize_t active_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct adf_accel_dev *accel_dev;
+ int err;
+ u8 svn;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ err = adf_anti_rb_query(accel_dev, ARB_ACTIVE_SVN, &svn);
+ if (err)
+ return err;
+
+ return sysfs_emit(buf, "%u\n", svn);
+}
+static DEVICE_ATTR_RO(active);
+
+static ssize_t permanent_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct adf_accel_dev *accel_dev;
+ int err;
+ u8 svn;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ err = adf_anti_rb_query(accel_dev, ARB_PERMANENT_MIN_SVN, &svn);
+ if (err)
+ return err;
+
+ return sysfs_emit(buf, "%u\n", svn);
+}
+static DEVICE_ATTR_RO(permanent_min);
+
+static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adf_accel_dev *accel_dev;
+ bool val;
+ int err;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ err = kstrtobool(buf, &val);
+ if (err)
+ return err;
+
+ if (!val)
+ return -EINVAL;
+
+ err = adf_anti_rb_commit(accel_dev);
+ if (err)
+ return err;
+
+ return count;
+}
+static DEVICE_ATTR_WO(commit);
+
+static struct attribute *qat_svn_attrs[] = {
+ &dev_attr_commit.attr,
+ &dev_attr_active.attr,
+ &dev_attr_enforced_min.attr,
+ &dev_attr_permanent_min.attr,
+ NULL
+};
+
+static const struct attribute_group qat_svn_group = {
+ .attrs = qat_svn_attrs,
+ .name = "qat_svn",
+};
+
+void adf_sysfs_start_arb(struct adf_accel_dev *accel_dev)
+{
+ struct adf_anti_rb_hw_data *anti_rb = GET_ANTI_RB_DATA(accel_dev);
+
+ if (!anti_rb->anti_rb_enabled || !anti_rb->anti_rb_enabled(accel_dev))
+ return;
+
+ if (device_add_group(&GET_DEV(accel_dev), &qat_svn_group)) {
+ dev_warn(&GET_DEV(accel_dev),
+ "Failed to create qat_svn attribute group\n");
+ return;
+ }
+
+ anti_rb->sysfs_added = true;
+}
+
+void adf_sysfs_stop_arb(struct adf_accel_dev *accel_dev)
+{
+ struct adf_anti_rb_hw_data *anti_rb = GET_ANTI_RB_DATA(accel_dev);
+
+ if (!anti_rb->sysfs_added)
+ return;
+
+ device_remove_group(&GET_DEV(accel_dev), &qat_svn_group);
+
+ anti_rb->sysfs_added = false;
+ anti_rb->svncheck_retry = 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.h b/drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.h
new file mode 100644
index 000000000000..f0c2b6e464f7
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2026 Intel Corporation */
+#ifndef ADF_SYSFS_ANTI_RB_H_
+#define ADF_SYSFS_ANTI_RB_H_
+
+struct adf_accel_dev;
+
+void adf_sysfs_start_arb(struct adf_accel_dev *accel_dev);
+void adf_sysfs_stop_arb(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_SYSFS_ANTI_RB_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c
index e97c67c87b3c..ef1420199210 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c
@@ -13,14 +13,14 @@ static ssize_t errors_correctable_show(struct device *dev,
char *buf)
{
struct adf_accel_dev *accel_dev;
- unsigned long counter;
+ int counter;
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
if (!accel_dev)
return -EINVAL;
counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_CORR);
- return scnprintf(buf, PAGE_SIZE, "%ld\n", counter);
+ return sysfs_emit(buf, "%d\n", counter);
}
static ssize_t errors_nonfatal_show(struct device *dev,
@@ -28,14 +28,14 @@ static ssize_t errors_nonfatal_show(struct device *dev,
char *buf)
{
struct adf_accel_dev *accel_dev;
- unsigned long counter;
+ int counter;
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
if (!accel_dev)
return -EINVAL;
counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_UNCORR);
- return scnprintf(buf, PAGE_SIZE, "%ld\n", counter);
+ return sysfs_emit(buf, "%d\n", counter);
}
static ssize_t errors_fatal_show(struct device *dev,
@@ -43,14 +43,14 @@ static ssize_t errors_fatal_show(struct device *dev,
char *buf)
{
struct adf_accel_dev *accel_dev;
- unsigned long counter;
+ int counter;
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
if (!accel_dev)
return -EINVAL;
counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_FATAL);
- return scnprintf(buf, PAGE_SIZE, "%ld\n", counter);
+ return sysfs_emit(buf, "%d\n", counter);
}
static ssize_t reset_error_counters_store(struct device *dev,
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
index f31556beed8b..89bfd8761d75 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
@@ -321,7 +321,7 @@ static ssize_t cap_rem_show(struct device *dev, struct device_attribute *attr,
{
struct adf_rl_interface_data *data;
struct adf_accel_dev *accel_dev;
- int ret, rem_cap;
+ int rem_cap;
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
if (!accel_dev)
@@ -336,23 +336,19 @@ static ssize_t cap_rem_show(struct device *dev, struct device_attribute *attr,
if (rem_cap < 0)
return rem_cap;
- ret = sysfs_emit(buf, "%u\n", rem_cap);
-
- return ret;
+ return sysfs_emit(buf, "%u\n", rem_cap);
}
static ssize_t cap_rem_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- unsigned int val;
int ret;
ret = sysfs_match_string(rl_services, buf);
if (ret < 0)
return ret;
- val = ret;
- ret = set_param_u(dev, CAP_REM_SRV, val);
+ ret = set_param_u(dev, CAP_REM_SRV, ret);
if (ret)
return ret;
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h
index c141160421e1..2fea30a78340 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h
@@ -151,6 +151,13 @@ struct icp_qat_fw_comn_resp {
ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \
ICP_QAT_FW_COMN_CNV_FLAG_MASK)
+#define ICP_QAT_FW_COMN_ST_BLK_FLAG_BITPOS 4
+#define ICP_QAT_FW_COMN_ST_BLK_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_HDR_ST_BLK_FLAG_GET(hdr_flags) \
+ QAT_FIELD_GET(hdr_flags, \
+ ICP_QAT_FW_COMN_ST_BLK_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_ST_BLK_FLAG_MASK)
+
#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_SET(hdr_t, val) \
QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
index 81969c515a17..2526053ee630 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
@@ -8,6 +8,8 @@ enum icp_qat_fw_comp_cmd_id {
ICP_QAT_FW_COMP_CMD_STATIC = 0,
ICP_QAT_FW_COMP_CMD_DYNAMIC = 1,
ICP_QAT_FW_COMP_CMD_DECOMPRESS = 2,
+ ICP_QAT_FW_COMP_CMD_ZSTD_COMPRESS = 10,
+ ICP_QAT_FW_COMP_CMD_ZSTD_DECOMPRESS = 11,
ICP_QAT_FW_COMP_CMD_DELIMITER
};
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
index 63cf18e2a4e5..6b0f0d100cb9 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
@@ -31,11 +31,15 @@ enum icp_qat_fw_init_admin_cmd_id {
ICP_QAT_FW_RL_REMOVE = 136,
ICP_QAT_FW_TL_START = 137,
ICP_QAT_FW_TL_STOP = 138,
+ ICP_QAT_FW_SVN_READ = 146,
+ ICP_QAT_FW_SVN_COMMIT = 147,
};
enum icp_qat_fw_init_admin_resp_status {
ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0,
- ICP_QAT_FW_INIT_RESP_STATUS_FAIL
+ ICP_QAT_FW_INIT_RESP_STATUS_FAIL = 1,
+ ICP_QAT_FW_INIT_RESP_STATUS_RETRY = 2,
+ ICP_QAT_FW_INIT_RESP_STATUS_UNSUPPORTED = 4,
};
struct icp_qat_fw_init_admin_tl_rp_indexes {
@@ -159,6 +163,15 @@ struct icp_qat_fw_init_admin_resp {
};
struct icp_qat_fw_init_admin_slice_cnt slices;
__u16 fw_capabilities;
+ struct {
+ __u8 enforced_min_svn;
+ __u8 permanent_min_svn;
+ __u8 active_svn;
+ __u8 resrvd9;
+ __u16 svn_status;
+ __u16 resrvd10;
+ __u64 resrvd11;
+ };
};
} __packed;
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
index 6887930c7995..e74cafa95f1c 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
@@ -27,6 +27,7 @@ struct icp_qat_fw_loader_chip_info {
int mmp_sram_size;
bool nn;
bool lm2lm3;
+ u16 reset_delay_us;
u32 lm_size;
u32 icp_rst_csr;
u32 icp_rst_mask;
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
index b8f1c4ffb8b5..16ef6d98fa42 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
@@ -94,7 +94,8 @@ enum icp_qat_capabilities_mask {
ICP_ACCEL_CAPABILITIES_AUTHENTICATION = BIT(3),
ICP_ACCEL_CAPABILITIES_RESERVED_1 = BIT(4),
ICP_ACCEL_CAPABILITIES_COMPRESSION = BIT(5),
- /* Bits 6-7 are currently reserved */
+ /* Bit 6 is currently reserved */
+ ICP_ACCEL_CAPABILITIES_5G = BIT(7),
ICP_ACCEL_CAPABILITIES_ZUC = BIT(8),
ICP_ACCEL_CAPABILITIES_SHA3 = BIT(9),
/* Bits 10-11 are currently reserved */
@@ -335,7 +336,8 @@ enum icp_qat_hw_compression_delayed_match {
enum icp_qat_hw_compression_algo {
ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0,
ICP_QAT_HW_COMPRESSION_ALGO_LZS = 1,
- ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2
+ ICP_QAT_HW_COMPRESSION_ALGO_ZSTD = 2,
+ ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER
};
enum icp_qat_hw_compression_depth {
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h
index 7ea8962272f2..d28732225c9e 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h
@@ -3,6 +3,8 @@
#ifndef _ICP_QAT_HW_20_COMP_H_
#define _ICP_QAT_HW_20_COMP_H_
+#include <linux/swab.h>
+
#include "icp_qat_hw_20_comp_defs.h"
#include "icp_qat_fw.h"
@@ -54,7 +56,7 @@ ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_comp_20_config_csr_lower
QAT_FIELD_SET(val32, csr.abd, ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK);
- return __builtin_bswap32(val32);
+ return swab32(val32);
}
struct icp_qat_hw_comp_20_config_csr_upper {
@@ -106,7 +108,7 @@ ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_comp_20_config_csr_upper
ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK);
- return __builtin_bswap32(val32);
+ return swab32(val32);
}
struct icp_qat_hw_decomp_20_config_csr_lower {
@@ -138,7 +140,7 @@ ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_decomp_20_config_csr_l
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK);
- return __builtin_bswap32(val32);
+ return swab32(val32);
}
struct icp_qat_hw_decomp_20_config_csr_upper {
@@ -158,7 +160,7 @@ ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_decomp_20_config_csr_u
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK);
- return __builtin_bswap32(val32);
+ return swab32(val32);
}
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
index 8b123472b71c..e0d003b50358 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
@@ -6,6 +6,7 @@
#include <crypto/scatterwalk.h>
#include <linux/dma-mapping.h>
#include <linux/workqueue.h>
+#include <linux/zstd.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_dc.h"
@@ -13,9 +14,104 @@
#include "qat_comp_req.h"
#include "qat_compression.h"
#include "qat_algs_send.h"
+#include "qat_comp_zstd_utils.h"
+
+#define QAT_ZSTD_SCRATCH_SIZE 524288
+#define QAT_ZSTD_MAX_BLOCK_SIZE 65535
+#define QAT_ZSTD_MAX_CONTENT_SIZE 4096
+#define QAT_LZ4S_MIN_INPUT_SIZE 8192
+#define QAT_LZ4S_MAX_OUTPUT_SIZE QAT_ZSTD_SCRATCH_SIZE
+#define QAT_MAX_SEQUENCES (128 * 1024)
static DEFINE_MUTEX(algs_lock);
-static unsigned int active_devs;
+static unsigned int active_devs_deflate;
+static unsigned int active_devs_lz4s;
+static unsigned int active_devs_zstd;
+
+struct qat_zstd_scratch {
+ size_t cctx_buffer_size;
+ void *lz4s;
+ void *literals;
+ void *out_seqs;
+ void *workspace;
+ ZSTD_CCtx *ctx;
+};
+
+static void *qat_zstd_alloc_scratch(void)
+{
+ struct qat_zstd_scratch *scratch;
+ ZSTD_parameters params;
+ size_t cctx_size;
+ ZSTD_CCtx *ctx;
+ size_t zret;
+ int ret;
+
+ ret = -ENOMEM;
+ scratch = kzalloc_obj(*scratch);
+ if (!scratch)
+ return ERR_PTR(ret);
+
+ scratch->lz4s = kvmalloc(QAT_ZSTD_SCRATCH_SIZE, GFP_KERNEL);
+ if (!scratch->lz4s)
+ goto error;
+
+ scratch->literals = kvmalloc(QAT_ZSTD_SCRATCH_SIZE, GFP_KERNEL);
+ if (!scratch->literals)
+ goto error;
+
+ scratch->out_seqs = kvcalloc(QAT_MAX_SEQUENCES, sizeof(ZSTD_Sequence),
+ GFP_KERNEL);
+ if (!scratch->out_seqs)
+ goto error;
+
+ params = zstd_get_params(zstd_max_clevel(), QAT_ZSTD_SCRATCH_SIZE);
+ cctx_size = zstd_cctx_workspace_bound(&params.cParams);
+
+ scratch->workspace = kvmalloc(cctx_size, GFP_KERNEL | __GFP_ZERO);
+ if (!scratch->workspace)
+ goto error;
+
+ ret = -EINVAL;
+ ctx = zstd_init_cctx(scratch->workspace, cctx_size);
+ if (!ctx)
+ goto error;
+
+ scratch->ctx = ctx;
+ scratch->cctx_buffer_size = cctx_size;
+
+ zret = zstd_cctx_set_param(ctx, ZSTD_c_blockDelimiters, ZSTD_sf_explicitBlockDelimiters);
+ if (zstd_is_error(zret))
+ goto error;
+
+ return scratch;
+
+error:
+ kvfree(scratch->lz4s);
+ kvfree(scratch->literals);
+ kvfree(scratch->out_seqs);
+ kvfree(scratch->workspace);
+ kfree(scratch);
+ return ERR_PTR(ret);
+}
+
+static void qat_zstd_free_scratch(void *ctx)
+{
+ struct qat_zstd_scratch *scratch = ctx;
+
+ if (!scratch)
+ return;
+
+ kvfree(scratch->lz4s);
+ kvfree(scratch->literals);
+ kvfree(scratch->out_seqs);
+ kvfree(scratch->workspace);
+ kfree(scratch);
+}
+
+static struct crypto_acomp_streams qat_zstd_streams = {
+ .alloc_ctx = qat_zstd_alloc_scratch,
+ .free_ctx = qat_zstd_free_scratch,
+};
enum direction {
DECOMPRESSION = 0,
@@ -24,10 +120,18 @@ enum direction {
struct qat_compression_req;
+struct qat_callback_params {
+ unsigned int produced;
+ unsigned int dlen;
+ bool plain;
+};
+
struct qat_compression_ctx {
u8 comp_ctx[QAT_COMP_CTX_SIZE];
struct qat_compression_instance *inst;
- int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp);
+ int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp,
+ struct qat_callback_params *params);
+ struct crypto_acomp *ftfm;
};
struct qat_compression_req {
@@ -62,6 +166,7 @@ static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
struct qat_compression_instance *inst = ctx->inst;
+ struct qat_callback_params params = { };
int consumed, produced;
s8 cmp_err, xlt_err;
int res = -EBADMSG;
@@ -76,6 +181,10 @@ static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
consumed = qat_comp_get_consumed_ctr(resp);
produced = qat_comp_get_produced_ctr(resp);
+ /* Cache parameters for algorithm specific callback */
+ params.produced = produced;
+ params.dlen = areq->dlen;
+
dev_dbg(&GET_DEV(accel_dev),
"[%s][%s][%s] slen = %8d dlen = %8d consumed = %8d produced = %8d cmp_err = %3d xlt_err = %3d",
crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
@@ -83,16 +192,20 @@ static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
status ? "ERR" : "OK ",
areq->slen, areq->dlen, consumed, produced, cmp_err, xlt_err);
- areq->dlen = 0;
+ if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) {
+ if (cmp_err == ERR_CODE_OVERFLOW_ERROR || xlt_err == ERR_CODE_OVERFLOW_ERROR)
+ res = -E2BIG;
- if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+ areq->dlen = 0;
goto end;
+ }
if (qat_req->dir == COMPRESSION) {
cnv = qat_comp_get_cmp_cnv_flag(resp);
if (unlikely(!cnv)) {
dev_err(&GET_DEV(accel_dev),
"Verified compression not supported\n");
+ areq->dlen = 0;
goto end;
}
@@ -102,38 +215,41 @@ static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
dev_dbg(&GET_DEV(accel_dev),
"Actual buffer overflow: produced=%d, dlen=%d\n",
produced, qat_req->actual_dlen);
+
+ res = -E2BIG;
+ areq->dlen = 0;
goto end;
}
+
+ params.plain = !!qat_comp_get_cmp_uncomp_flag(resp);
}
res = 0;
areq->dlen = produced;
if (ctx->qat_comp_callback)
- res = ctx->qat_comp_callback(qat_req, resp);
+ res = ctx->qat_comp_callback(qat_req, resp, &params);
end:
qat_bl_free_bufl(accel_dev, &qat_req->buf);
acomp_request_complete(areq, res);
+ qat_alg_send_backlog(qat_req->alg_req.backlog);
}
void qat_comp_alg_callback(void *resp)
{
struct qat_compression_req *qat_req =
(void *)(__force long)qat_comp_get_opaque(resp);
- struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
qat_comp_generic_callback(qat_req, resp);
-
- qat_alg_send_backlog(backlog);
}
-static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm)
+static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm, int alg)
{
+ struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm);
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
- struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_compression_instance *inst;
- int node;
+ int node, ret;
if (tfm->node == NUMA_NO_NODE)
node = numa_node_id();
@@ -141,18 +257,28 @@ static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm)
node = tfm->node;
memset(ctx, 0, sizeof(*ctx));
- inst = qat_compression_get_instance_node(node);
+ inst = qat_compression_get_instance_node(node, alg);
if (!inst)
return -EINVAL;
ctx->inst = inst;
- return qat_comp_build_ctx(inst->accel_dev, ctx->comp_ctx, QAT_DEFLATE);
+ ret = qat_comp_build_ctx(inst->accel_dev, ctx->comp_ctx, alg);
+ if (ret) {
+ qat_compression_put_instance(inst);
+ memset(ctx, 0, sizeof(*ctx));
+ }
+
+ return ret;
+}
+
+static int qat_comp_alg_deflate_init_tfm(struct crypto_acomp *acomp_tfm)
+{
+ return qat_comp_alg_init_tfm(acomp_tfm, QAT_DEFLATE);
}
static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
{
- struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
- struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm);
qat_compression_put_instance(ctx->inst);
memset(ctx, 0, sizeof(*ctx));
@@ -164,8 +290,7 @@ static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum directi
{
struct qat_compression_req *qat_req = acomp_request_ctx(areq);
struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(areq);
- struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
- struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm);
struct qat_compression_instance *inst = ctx->inst;
gfp_t f = qat_algs_alloc_flags(&areq->base);
struct qat_sgl_to_bufl_params params = {0};
@@ -233,7 +358,234 @@ static int qat_comp_alg_decompress(struct acomp_req *req)
return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0);
}
-static struct acomp_alg qat_acomp[] = { {
+static int qat_comp_alg_zstd_decompress(struct acomp_req *req)
+{
+ struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req);
+ struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm);
+ struct acomp_req *nreq = acomp_request_ctx(req);
+ zstd_frame_header header;
+ void *buffer;
+ size_t zret;
+ int ret;
+
+ buffer = kmap_local_page(sg_page(req->src)) + req->src->offset;
+ zret = zstd_get_frame_header(&header, buffer, req->src->length);
+ kunmap_local(buffer);
+ if (zret) {
+ dev_err(&GET_DEV(ctx->inst->accel_dev),
+ "ZSTD-compressed data has an incomplete frame header\n");
+ return -EINVAL;
+ }
+
+ if (header.windowSize > QAT_ZSTD_MAX_BLOCK_SIZE ||
+ header.frameContentSize >= QAT_ZSTD_MAX_CONTENT_SIZE) {
+ dev_dbg(&GET_DEV(ctx->inst->accel_dev), "Window size=0x%llx\n",
+ header.windowSize);
+
+ memcpy(nreq, req, sizeof(*req));
+ acomp_request_set_tfm(nreq, ctx->ftfm);
+
+ ret = crypto_acomp_decompress(nreq);
+ req->dlen = nreq->dlen;
+
+ return ret;
+ }
+
+ return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0);
+}
+
+static int qat_comp_lz4s_zstd_callback(struct qat_compression_req *qat_req, void *resp,
+ struct qat_callback_params *params)
+{
+ struct qat_compression_ctx *qat_ctx = qat_req->qat_compression_ctx;
+ struct acomp_req *areq = qat_req->acompress_req;
+ struct qat_zstd_scratch *scratch;
+ struct crypto_acomp_stream *s;
+ unsigned int lit_len = 0;
+ ZSTD_Sequence *out_seqs;
+ void *lz4s, *zstd;
+ size_t comp_size;
+ ZSTD_CCtx *ctx;
+ void *literals;
+ int seq_count;
+ int ret = 0;
+
+ if (params->produced + QAT_ZSTD_LIT_COPY_LEN > QAT_ZSTD_SCRATCH_SIZE) {
+ dev_dbg(&GET_DEV(qat_ctx->inst->accel_dev),
+ "LZ4s-ZSTD: produced size (%u) + COPY_SIZE > QAT_ZSTD_SCRATCH_SIZE (%u)\n",
+ params->produced, QAT_ZSTD_SCRATCH_SIZE);
+ areq->dlen = 0;
+ return -E2BIG;
+ }
+
+ s = crypto_acomp_lock_stream_bh(&qat_zstd_streams);
+ scratch = s->ctx;
+
+ lz4s = scratch->lz4s;
+ zstd = lz4s; /* Output buffer is same as lz4s */
+ out_seqs = scratch->out_seqs;
+ ctx = scratch->ctx;
+ literals = scratch->literals;
+
+ if (likely(!params->plain)) {
+ if (likely(sg_nents(areq->dst) == 1)) {
+ zstd = sg_virt(areq->dst);
+ lz4s = zstd;
+ } else {
+ memcpy_from_sglist(lz4s, areq->dst, 0, params->produced);
+ }
+
+ seq_count = qat_alg_dec_lz4s(out_seqs, QAT_MAX_SEQUENCES, lz4s,
+ params->produced, literals, &lit_len);
+ if (seq_count < 0) {
+ ret = seq_count;
+ comp_size = 0;
+ goto out;
+ }
+ } else {
+ out_seqs[0].litLength = areq->slen;
+ out_seqs[0].offset = 0;
+ out_seqs[0].matchLength = 0;
+
+ seq_count = 1;
+ }
+
+ comp_size = zstd_compress_sequences_and_literals(ctx, zstd, params->dlen,
+ out_seqs, seq_count,
+ literals, lit_len,
+ QAT_ZSTD_SCRATCH_SIZE,
+ areq->slen);
+ if (zstd_is_error(comp_size)) {
+ if (comp_size == ZSTD_error_cannotProduce_uncompressedBlock)
+ ret = -E2BIG;
+ else
+ ret = -EOPNOTSUPP;
+
+ comp_size = 0;
+ goto out;
+ }
+
+ if (comp_size > params->dlen) {
+ dev_dbg(&GET_DEV(qat_ctx->inst->accel_dev),
+ "LZ4s-ZSTD: compressed_size (%u) > output buffer size (%u)\n",
+ (unsigned int)comp_size, params->dlen);
+ ret = -EOVERFLOW;
+ goto out;
+ }
+
+ if (unlikely(sg_nents(areq->dst) != 1))
+ memcpy_to_sglist(areq->dst, 0, zstd, comp_size);
+
+out:
+ areq->dlen = comp_size;
+ crypto_acomp_unlock_stream_bh(s);
+
+ return ret;
+}
+
+static int qat_comp_alg_lz4s_zstd_init_tfm(struct crypto_acomp *acomp_tfm)
+{
+ struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm);
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+ int reqsize;
+ int ret;
+
+ /* qat_comp_alg_init_tfm() wipes out the ctx */
+ ret = qat_comp_alg_init_tfm(acomp_tfm, QAT_LZ4S);
+ if (ret)
+ return ret;
+
+ ctx->ftfm = crypto_alloc_acomp_node("zstd", 0, CRYPTO_ALG_NEED_FALLBACK,
+ tfm->node);
+ if (IS_ERR(ctx->ftfm)) {
+ qat_comp_alg_exit_tfm(acomp_tfm);
+ return PTR_ERR(ctx->ftfm);
+ }
+
+ reqsize = max(sizeof(struct qat_compression_req),
+ sizeof(struct acomp_req) + crypto_acomp_reqsize(ctx->ftfm));
+
+ acomp_tfm->reqsize = reqsize;
+
+ ctx->qat_comp_callback = qat_comp_lz4s_zstd_callback;
+
+ return 0;
+}
+
+static int qat_comp_alg_zstd_init_tfm(struct crypto_acomp *acomp_tfm)
+{
+ struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm);
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+ int reqsize;
+ int ret;
+
+ /* qat_comp_alg_init_tfm() wipes out the ctx */
+ ret = qat_comp_alg_init_tfm(acomp_tfm, QAT_ZSTD);
+ if (ret)
+ return ret;
+
+ ctx->ftfm = crypto_alloc_acomp_node("zstd", 0, CRYPTO_ALG_NEED_FALLBACK,
+ tfm->node);
+ if (IS_ERR(ctx->ftfm)) {
+ qat_comp_alg_exit_tfm(acomp_tfm);
+ return PTR_ERR(ctx->ftfm);
+ }
+
+ reqsize = max(sizeof(struct qat_compression_req),
+ sizeof(struct acomp_req) + crypto_acomp_reqsize(ctx->ftfm));
+
+ acomp_tfm->reqsize = reqsize;
+
+ return 0;
+}
+
+static void qat_comp_alg_zstd_exit_tfm(struct crypto_acomp *acomp_tfm)
+{
+ struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm);
+
+ if (ctx->ftfm)
+ crypto_free_acomp(ctx->ftfm);
+
+ qat_comp_alg_exit_tfm(acomp_tfm);
+}
+
+static int qat_comp_alg_lz4s_zstd_compress(struct acomp_req *req)
+{
+ struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req);
+ struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm);
+ struct acomp_req *nreq = acomp_request_ctx(req);
+ int ret;
+
+ if (req->slen >= QAT_LZ4S_MIN_INPUT_SIZE && req->dlen >= QAT_LZ4S_MIN_INPUT_SIZE &&
+ req->slen <= QAT_LZ4S_MAX_OUTPUT_SIZE && req->dlen <= QAT_LZ4S_MAX_OUTPUT_SIZE)
+ return qat_comp_alg_compress(req);
+
+ memcpy(nreq, req, sizeof(*req));
+ acomp_request_set_tfm(nreq, ctx->ftfm);
+
+ ret = crypto_acomp_compress(nreq);
+ req->dlen = nreq->dlen;
+
+ return ret;
+}
+
+static int qat_comp_alg_sw_decompress(struct acomp_req *req)
+{
+ struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req);
+ struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm);
+ struct acomp_req *nreq = acomp_request_ctx(req);
+ int ret;
+
+ memcpy(nreq, req, sizeof(*req));
+ acomp_request_set_tfm(nreq, ctx->ftfm);
+
+ ret = crypto_acomp_decompress(nreq);
+ req->dlen = nreq->dlen;
+
+ return ret;
+}
+
+static struct acomp_alg qat_acomp_deflate[] = { {
.base = {
.cra_name = "deflate",
.cra_driver_name = "qat_deflate",
@@ -243,27 +595,165 @@ static struct acomp_alg qat_acomp[] = { {
.cra_reqsize = sizeof(struct qat_compression_req),
.cra_module = THIS_MODULE,
},
- .init = qat_comp_alg_init_tfm,
+ .init = qat_comp_alg_deflate_init_tfm,
.exit = qat_comp_alg_exit_tfm,
.compress = qat_comp_alg_compress,
.decompress = qat_comp_alg_decompress,
}};
-int qat_comp_algs_register(void)
+static struct acomp_alg qat_acomp_zstd_lz4s = {
+ .base = {
+ .cra_name = "zstd",
+ .cra_driver_name = "qat_zstd",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_reqsize = sizeof(struct qat_compression_req),
+ .cra_ctxsize = sizeof(struct qat_compression_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = qat_comp_alg_lz4s_zstd_init_tfm,
+ .exit = qat_comp_alg_zstd_exit_tfm,
+ .compress = qat_comp_alg_lz4s_zstd_compress,
+ .decompress = qat_comp_alg_sw_decompress,
+};
+
+static struct acomp_alg qat_acomp_zstd_native = {
+ .base = {
+ .cra_name = "zstd",
+ .cra_driver_name = "qat_zstd",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_reqsize = sizeof(struct qat_compression_req),
+ .cra_ctxsize = sizeof(struct qat_compression_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = qat_comp_alg_zstd_init_tfm,
+ .exit = qat_comp_alg_zstd_exit_tfm,
+ .compress = qat_comp_alg_compress,
+ .decompress = qat_comp_alg_zstd_decompress,
+};
+
+static int qat_comp_algs_register_deflate(void)
+{
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (++active_devs_deflate == 1) {
+ ret = crypto_register_acomps(qat_acomp_deflate,
+ ARRAY_SIZE(qat_acomp_deflate));
+ if (ret)
+ active_devs_deflate--;
+ }
+ mutex_unlock(&algs_lock);
+
+ return ret;
+}
+
+static void qat_comp_algs_unregister_deflate(void)
+{
+ mutex_lock(&algs_lock);
+ if (--active_devs_deflate == 0)
+ crypto_unregister_acomps(qat_acomp_deflate, ARRAY_SIZE(qat_acomp_deflate));
+ mutex_unlock(&algs_lock);
+}
+
+static int qat_comp_algs_register_lz4s(void)
{
int ret = 0;
mutex_lock(&algs_lock);
- if (++active_devs == 1)
- ret = crypto_register_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
+ if (++active_devs_lz4s == 1) {
+ ret = crypto_acomp_alloc_streams(&qat_zstd_streams);
+ if (ret) {
+ active_devs_lz4s--;
+ goto unlock;
+ }
+
+ ret = crypto_register_acomp(&qat_acomp_zstd_lz4s);
+ if (ret) {
+ crypto_acomp_free_streams(&qat_zstd_streams);
+ active_devs_lz4s--;
+ }
+ }
+unlock:
mutex_unlock(&algs_lock);
+
return ret;
}
-void qat_comp_algs_unregister(void)
+static void qat_comp_algs_unregister_lz4s(void)
{
mutex_lock(&algs_lock);
- if (--active_devs == 0)
- crypto_unregister_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
+ if (--active_devs_lz4s == 0) {
+ crypto_unregister_acomp(&qat_acomp_zstd_lz4s);
+ crypto_acomp_free_streams(&qat_zstd_streams);
+ }
+ mutex_unlock(&algs_lock);
+}
+
+static int qat_comp_algs_register_zstd(void)
+{
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (++active_devs_zstd == 1) {
+ ret = crypto_register_acomp(&qat_acomp_zstd_native);
+ if (ret)
+ active_devs_zstd--;
+ }
mutex_unlock(&algs_lock);
+
+ return ret;
+}
+
+static void qat_comp_algs_unregister_zstd(void)
+{
+ mutex_lock(&algs_lock);
+ if (--active_devs_zstd == 0)
+ crypto_unregister_acomp(&qat_acomp_zstd_native);
+ mutex_unlock(&algs_lock);
+}
+
+int qat_comp_algs_register(u32 caps)
+{
+ int ret;
+
+ ret = qat_comp_algs_register_deflate();
+ if (ret)
+ return ret;
+
+ if (caps & ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S) {
+ ret = qat_comp_algs_register_lz4s();
+ if (ret)
+ goto err_unregister_deflate;
+ }
+
+ if (caps & ADF_ACCEL_CAPABILITIES_EXT_ZSTD) {
+ ret = qat_comp_algs_register_zstd();
+ if (ret)
+ goto err_unregister_lz4s;
+ }
+
+ return ret;
+
+err_unregister_lz4s:
+ if (caps & ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S)
+ qat_comp_algs_unregister_lz4s();
+err_unregister_deflate:
+ qat_comp_algs_unregister_deflate();
+
+ return ret;
+}
+
+void qat_comp_algs_unregister(u32 caps)
+{
+ qat_comp_algs_unregister_deflate();
+
+ if (caps & ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S)
+ qat_comp_algs_unregister_lz4s();
+
+ if (caps & ADF_ACCEL_CAPABILITIES_EXT_ZSTD)
+ qat_comp_algs_unregister_zstd();
}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_req.h b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h
index 18a1f33a6db9..f165d28aaaf4 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_comp_req.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h
@@ -23,6 +23,7 @@ static inline void qat_comp_create_req(void *ctx, void *req, u64 src, u32 slen,
fw_req->comn_mid.opaque_data = opaque;
req_pars->comp_len = slen;
req_pars->out_buffer_sz = dlen;
+ fw_req->u3.asb_threshold.asb_value *= slen >> 4;
}
static inline void qat_comp_create_compression_req(void *ctx, void *req,
@@ -110,4 +111,12 @@ static inline u8 qat_comp_get_cmp_cnv_flag(void *resp)
return ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(flags);
}
+static inline u8 qat_comp_get_cmp_uncomp_flag(void *resp)
+{
+ struct icp_qat_fw_comp_resp *qat_resp = resp;
+ u8 flags = qat_resp->comn_resp.hdr_flags;
+
+ return ICP_QAT_FW_COMN_HDR_ST_BLK_FLAG_GET(flags);
+}
+
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.c b/drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.c
new file mode 100644
index 000000000000..62ec2d5c3ab8
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2026 Intel Corporation */
+#include <linux/errno.h>
+#include <linux/printk.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
+#include <linux/zstd.h>
+
+#include "qat_comp_zstd_utils.h"
+
+#define ML_BITS 4
+#define ML_MASK ((1U << ML_BITS) - 1)
+#define RUN_BITS (8 - ML_BITS)
+#define RUN_MASK ((1U << RUN_BITS) - 1)
+#define LZ4S_MINMATCH 2
+
+/*
+ * ZSTD blocks can decompress to at most min(windowSize, 128KB) bytes.
+ * Insert explicit block delimiters to keep blocks within this limit.
+ */
+#define QAT_ZSTD_BLOCK_MAX ZSTD_BLOCKSIZE_MAX
+
+static int emit_delimiter(ZSTD_Sequence *out_seqs, size_t *seqs_idx,
+ size_t out_seqs_capacity, unsigned int lz4s_buff_size)
+{
+ if (*seqs_idx >= out_seqs_capacity - 1) {
+ pr_debug("QAT ZSTD: sequence overflow (seqs_idx:%zu, capacity:%zu, lz4s_size:%u)\n",
+ *seqs_idx, out_seqs_capacity, lz4s_buff_size);
+ return -EOVERFLOW;
+ }
+
+ out_seqs[*seqs_idx].offset = 0;
+ out_seqs[*seqs_idx].litLength = 0;
+ out_seqs[*seqs_idx].matchLength = 0;
+ (*seqs_idx)++;
+
+ return 0;
+}
+
+int qat_alg_dec_lz4s(ZSTD_Sequence *out_seqs, size_t out_seqs_capacity,
+ unsigned char *lz4s_buff, unsigned int lz4s_buff_size,
+ unsigned char *literals, unsigned int *lit_len)
+{
+ unsigned char *end_ip = lz4s_buff + lz4s_buff_size;
+ unsigned char *start, *dest, *dest_end;
+ unsigned int hist_literal_len = 0;
+ unsigned char *ip = lz4s_buff;
+ size_t block_decomp_size = 0;
+ size_t seqs_idx = 0;
+ int ret;
+
+ *lit_len = 0;
+
+ if (!lz4s_buff_size)
+ return 0;
+
+ while (ip < end_ip) {
+ size_t literal_len = 0, match_len = 0;
+ const unsigned int token = *ip++;
+ size_t length = 0;
+ size_t offset = 0;
+
+ /* Get literal length */
+ length = token >> ML_BITS;
+ if (length == RUN_MASK) {
+ unsigned int s;
+
+ do {
+ s = *ip++;
+ length += s;
+ } while (s == 255);
+ }
+
+ literal_len = length;
+
+ start = ip;
+ dest = literals;
+ dest_end = literals + length;
+
+ do {
+ memcpy(dest, start, QAT_ZSTD_LIT_COPY_LEN);
+ dest += QAT_ZSTD_LIT_COPY_LEN;
+ start += QAT_ZSTD_LIT_COPY_LEN;
+ } while (dest < dest_end);
+
+ literals += length;
+ *lit_len += length;
+
+ ip += length;
+ if (ip == end_ip) {
+ literal_len += hist_literal_len;
+ /*
+ * If adding trailing literals would overflow the
+ * current block, close it first.
+ */
+ if (block_decomp_size + literal_len > QAT_ZSTD_BLOCK_MAX) {
+ ret = emit_delimiter(out_seqs, &seqs_idx,
+ out_seqs_capacity,
+ lz4s_buff_size);
+ if (ret)
+ return ret;
+ }
+ out_seqs[seqs_idx].litLength = literal_len;
+ out_seqs[seqs_idx].offset = offset;
+ out_seqs[seqs_idx].matchLength = match_len;
+ break;
+ }
+
+ offset = get_unaligned_le16(ip);
+ ip += 2;
+
+ length = token & ML_MASK;
+ if (length == ML_MASK) {
+ unsigned int s;
+
+ do {
+ s = *ip++;
+ length += s;
+ } while (s == 255);
+ }
+ if (length != 0) {
+ length += LZ4S_MINMATCH;
+ match_len = (unsigned short)length;
+ literal_len += hist_literal_len;
+
+ /*
+ * If this sequence would push the current block past
+ * the ZSTD maximum, close the block first.
+ */
+ if (block_decomp_size + literal_len + match_len > QAT_ZSTD_BLOCK_MAX) {
+ ret = emit_delimiter(out_seqs, &seqs_idx,
+ out_seqs_capacity,
+ lz4s_buff_size);
+ if (ret)
+ return ret;
+
+ block_decomp_size = 0;
+ }
+
+ out_seqs[seqs_idx].offset = offset;
+ out_seqs[seqs_idx].litLength = literal_len;
+ out_seqs[seqs_idx].matchLength = match_len;
+ hist_literal_len = 0;
+ seqs_idx++;
+ if (seqs_idx >= out_seqs_capacity - 1) {
+ pr_debug("QAT ZSTD: sequence overflow (seqs_idx:%zu, capacity:%zu, lz4s_size:%u)\n",
+ seqs_idx, out_seqs_capacity, lz4s_buff_size);
+ return -EOVERFLOW;
+ }
+
+ block_decomp_size += literal_len + match_len;
+ } else {
+ if (literal_len > 0) {
+ /*
+ * When match length is 0, the literal length needs
+ * to be temporarily stored and processed together
+ * with the next data block.
+ */
+ hist_literal_len += literal_len;
+ }
+ }
+ }
+
+ return seqs_idx + 1;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.h b/drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.h
new file mode 100644
index 000000000000..55c7a1b9b848
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2026 Intel Corporation */
+#ifndef QAT_COMP_ZSTD_UTILS_H_
+#define QAT_COMP_ZSTD_UTILS_H_
+#include <linux/zstd_lib.h>
+
+#define QAT_ZSTD_LIT_COPY_LEN 8
+
+int qat_alg_dec_lz4s(ZSTD_Sequence *out_seqs, size_t out_seqs_capacity,
+ unsigned char *lz4s_buff, unsigned int lz4s_buff_size,
+ unsigned char *literals, unsigned int *lit_len);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c
index 53a4db5507ec..1424d7a9bcd3 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_compression.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.c
@@ -46,12 +46,14 @@ static int qat_compression_free_instances(struct adf_accel_dev *accel_dev)
return 0;
}
-struct qat_compression_instance *qat_compression_get_instance_node(int node)
+struct qat_compression_instance *qat_compression_get_instance_node(int node, int alg)
{
struct qat_compression_instance *inst = NULL;
+ struct adf_hw_device_data *hw_data = NULL;
struct adf_accel_dev *accel_dev = NULL;
unsigned long best = ~0;
struct list_head *itr;
+ u32 caps, mask;
list_for_each(itr, adf_devmgr_get_head()) {
struct adf_accel_dev *tmp_dev;
@@ -61,6 +63,15 @@ struct qat_compression_instance *qat_compression_get_instance_node(int node)
tmp_dev = list_entry(itr, struct adf_accel_dev, list);
tmp_dev_node = dev_to_node(&GET_DEV(tmp_dev));
+ if (alg == QAT_ZSTD || alg == QAT_LZ4S) {
+ hw_data = tmp_dev->hw_device;
+ caps = hw_data->accel_capabilities_ext_mask;
+ mask = ADF_ACCEL_CAPABILITIES_EXT_ZSTD |
+ ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S;
+ if (!(caps & mask))
+ continue;
+ }
+
if ((node == tmp_dev_node || tmp_dev_node < 0) &&
adf_dev_started(tmp_dev) && !list_empty(&tmp_dev->compression_list)) {
ctr = atomic_read(&tmp_dev->ref_count);
@@ -78,6 +89,16 @@ struct qat_compression_instance *qat_compression_get_instance_node(int node)
struct adf_accel_dev *tmp_dev;
tmp_dev = list_entry(itr, struct adf_accel_dev, list);
+
+ if (alg == QAT_ZSTD || alg == QAT_LZ4S) {
+ hw_data = tmp_dev->hw_device;
+ caps = hw_data->accel_capabilities_ext_mask;
+ mask = ADF_ACCEL_CAPABILITIES_EXT_ZSTD |
+ ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S;
+ if (!(caps & mask))
+ continue;
+ }
+
if (adf_dev_started(tmp_dev) &&
!list_empty(&tmp_dev->compression_list)) {
accel_dev = tmp_dev;
diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c
index 7a6ba6f22e3e..1c3d1311f1c7 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c
@@ -9,17 +9,18 @@
#include "icp_qat_hal.h"
#include "icp_qat_uclo.h"
-#define BAD_REGADDR 0xffff
-#define MAX_RETRY_TIMES 10000
-#define INIT_CTX_ARB_VALUE 0x0
-#define INIT_CTX_ENABLE_VALUE 0x0
-#define INIT_PC_VALUE 0x0
-#define INIT_WAKEUP_EVENTS_VALUE 0x1
-#define INIT_SIG_EVENTS_VALUE 0x1
-#define INIT_CCENABLE_VALUE 0x2000
-#define RST_CSR_QAT_LSB 20
-#define RST_CSR_AE_LSB 0
-#define MC_TIMESTAMP_ENABLE (0x1 << 7)
+#define BAD_REGADDR 0xffff
+#define MAX_RETRY_TIMES 10000
+#define INIT_CTX_ARB_VALUE 0x0
+#define INIT_CTX_ENABLE_VALUE 0x0
+#define INIT_PC_VALUE 0x0
+#define INIT_WAKEUP_EVENTS_VALUE 0x1
+#define INIT_SIG_EVENTS_VALUE 0x1
+#define INIT_CCENABLE_VALUE 0x2000
+#define RST_CSR_QAT_LSB 20
+#define RST_CSR_AE_LSB 0
+#define MC_TIMESTAMP_ENABLE (0x1 << 7)
+#define MIN_RESET_DELAY_US 3
#define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
(~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
@@ -713,8 +714,10 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
handle->chip_info->wakeup_event_val = 0x80000000;
handle->chip_info->fw_auth = true;
handle->chip_info->css_3k = true;
- if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_6XXX)
+ if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_6XXX) {
handle->chip_info->dual_sign = true;
+ handle->chip_info->reset_delay_us = MIN_RESET_DELAY_US;
+ }
handle->chip_info->tgroup_share_ustore = true;
handle->chip_info->fcu_ctl_csr = FCU_CONTROL_4XXX;
handle->chip_info->fcu_sts_csr = FCU_STATUS_4XXX;
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index e61a367b0d17..a00ca2a0900f 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -12,6 +12,7 @@
#include <linux/pci_ids.h>
#include <linux/wordpart.h>
#include "adf_accel_devices.h"
+#include "adf_anti_rb.h"
#include "adf_common_drv.h"
#include "icp_qat_uclo.h"
#include "icp_qat_hal.h"
@@ -1230,10 +1231,11 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_fw_auth_desc *desc)
{
- u32 fcu_sts, retry = 0;
+ unsigned int retries = FW_AUTH_MAX_RETRY;
u32 fcu_ctl_csr, fcu_sts_csr;
u32 fcu_dram_hi_csr, fcu_dram_lo_csr;
u64 bus_addr;
+ u32 fcu_sts;
bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
- sizeof(struct icp_qat_auth_chunk);
@@ -1248,17 +1250,32 @@ static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
do {
+ int arb_ret;
+
msleep(FW_AUTH_WAIT_PERIOD);
fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
+
+ arb_ret = adf_anti_rb_check(handle->pci_dev);
+ if (arb_ret == -EAGAIN) {
+ if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL) {
+ SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
+ continue;
+ }
+ } else if (arb_ret) {
+ goto auth_fail;
+ }
+
if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
goto auth_fail;
+
if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
return 0;
- } while (retry++ < FW_AUTH_MAX_RETRY);
+ } while (--retries);
+
auth_fail:
- pr_err("authentication error (FCU_STATUS = 0x%x),retry = %d\n",
- fcu_sts & FCU_AUTH_STS_MASK, retry);
+ pr_err("authentication error (FCU_STATUS = 0x%x)\n", fcu_sts & FCU_AUTH_STS_MASK);
+
return -EINVAL;
}
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index 5103d36cdfdb..2f203042d9bd 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -847,8 +847,7 @@ static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
*len = creq->len;
memcpy(hash, creq->state, digsize);
- memset(cache, 0, blocksize);
- memcpy(cache, creq->cache, creq->cache_ptr);
+ memcpy_and_pad(cache, blocksize, creq->cache, creq->cache_ptr, 0);
return 0;
}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
index 09e6a8474d1a..e0f38d32bc93 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
@@ -10,6 +10,7 @@
#include <linux/ctype.h>
#include <linux/firmware.h>
+#include <linux/string.h>
#include <linux/string_choices.h>
#include "otx_cpt_common.h"
#include "otx_cptpf_ucode.h"
@@ -509,13 +510,12 @@ EXPORT_SYMBOL_GPL(otx_cpt_uc_supports_eng_type);
static void print_ucode_info(struct otx_cpt_eng_grp_info *eng_grp,
char *buf, int size)
{
- if (eng_grp->mirror.is_ena) {
+ if (eng_grp->mirror.is_ena)
scnprintf(buf, size, "%s (shared with engine_group%d)",
eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].ver_str,
eng_grp->mirror.idx);
- } else {
- scnprintf(buf, size, "%s", eng_grp->ucode[0].ver_str);
- }
+ else
+ strscpy(buf, eng_grp->ucode[0].ver_str, size);
}
static void print_engs_info(struct otx_cpt_eng_grp_info *eng_grp,
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index b61f2545e165..a61208cbcd27 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -115,10 +115,7 @@ void *nx842_crypto_alloc_ctx(struct nx842_driver *driver)
ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
- kfree(ctx->wmem);
- free_page((unsigned long)ctx->sbounce);
- free_page((unsigned long)ctx->dbounce);
- kfree(ctx);
+ nx842_crypto_free_ctx(ctx);
return ERR_PTR(-ENOMEM);
}
@@ -131,8 +128,9 @@ void nx842_crypto_free_ctx(void *p)
struct nx842_crypto_ctx *ctx = p;
kfree(ctx->wmem);
- free_page((unsigned long)ctx->sbounce);
- free_page((unsigned long)ctx->dbounce);
+ free_pages((unsigned long)ctx->sbounce, BOUNCE_BUFFER_ORDER);
+ free_pages((unsigned long)ctx->dbounce, BOUNCE_BUFFER_ORDER);
+ kfree(ctx);
}
EXPORT_SYMBOL_GPL(nx842_crypto_free_ctx);
diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h
index f5e2c82ba876..c401cdf1a453 100644
--- a/drivers/crypto/nx/nx-842.h
+++ b/drivers/crypto/nx/nx-842.h
@@ -159,15 +159,15 @@ struct nx842_crypto_header_group {
struct nx842_crypto_header {
/* New members MUST be added within the struct_group() macro below. */
- struct_group_tagged(nx842_crypto_header_hdr, hdr,
+ __struct_group(nx842_crypto_header_hdr, hdr, __packed,
__be16 magic; /* NX842_CRYPTO_MAGIC */
__be16 ignore; /* decompressed end bytes to ignore */
u8 groups; /* total groups in this header */
);
- struct nx842_crypto_header_group group[];
+ struct nx842_crypto_header_group group[] __counted_by(groups);
} __packed;
static_assert(offsetof(struct nx842_crypto_header, group) == sizeof(struct nx842_crypto_header_hdr),
- "struct member likely outside of struct_group_tagged()");
+ "struct member likely outside of __struct_group()");
#define NX842_CRYPTO_GROUP_MAX (0x20)
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 6a3c7f9277cf..b8c416c5ee70 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -147,7 +147,6 @@ struct omap_sham_reqctx {
u8 digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
size_t digcnt;
size_t bufcnt;
- size_t buflen;
/* walk state */
struct scatterlist *sg;
@@ -156,7 +155,7 @@ struct omap_sham_reqctx {
int sg_len;
unsigned int total; /* total request */
- u8 buffer[] OMAP_ALIGNED;
+ u8 buffer[BUFLEN] OMAP_ALIGNED;
};
struct omap_sham_hmac_ctx {
@@ -891,7 +890,7 @@ static int omap_sham_prepare_request(struct crypto_engine *engine, void *areq)
if (hash_later < 0)
hash_later = 0;
- if (hash_later && hash_later <= rctx->buflen) {
+ if (hash_later && hash_later <= sizeof(rctx->buffer)) {
scatterwalk_map_and_copy(rctx->buffer,
req->src,
req->nbytes - hash_later,
@@ -902,7 +901,7 @@ static int omap_sham_prepare_request(struct crypto_engine *engine, void *areq)
rctx->bufcnt = 0;
}
- if (hash_later > rctx->buflen)
+ if (hash_later > sizeof(rctx->buffer))
set_bit(FLAGS_HUGE, &rctx->dd->flags);
rctx->total = min(nbytes, rctx->total);
@@ -987,7 +986,6 @@ static int omap_sham_init(struct ahash_request *req)
ctx->digcnt = 0;
ctx->total = 0;
ctx->offset = 0;
- ctx->buflen = BUFLEN;
if (tctx->flags & BIT(FLAGS_HMAC)) {
if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
@@ -1200,7 +1198,7 @@ static int omap_sham_update(struct ahash_request *req)
if (!req->nbytes)
return 0;
- if (ctx->bufcnt + req->nbytes <= ctx->buflen) {
+ if (ctx->bufcnt + req->nbytes <= sizeof(ctx->buffer)) {
scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
0, req->nbytes, 0);
ctx->bufcnt += req->nbytes;
@@ -1333,7 +1331,7 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
}
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct omap_sham_reqctx) + BUFLEN);
+ sizeof(struct omap_sham_reqctx));
if (alg_base) {
struct omap_sham_hmac_ctx *bctx = tctx->base;
@@ -1404,7 +1402,8 @@ static int omap_sham_export(struct ahash_request *req, void *out)
{
struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
- memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt);
+ memcpy(out, rctx, offsetof(struct omap_sham_reqctx, buffer) +
+ rctx->bufcnt);
return 0;
}
@@ -1414,7 +1413,8 @@ static int omap_sham_import(struct ahash_request *req, const void *in)
struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
const struct omap_sham_reqctx *ctx_in = in;
- memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt);
+ memcpy(rctx, in, offsetof(struct omap_sham_reqctx, buffer) +
+ ctx_in->bufcnt);
return 0;
}
@@ -2146,8 +2146,7 @@ static int omap_sham_probe(struct platform_device *pdev)
alg = &ealg->base;
alg->export = omap_sham_export;
alg->import = omap_sham_import;
- alg->halg.statesize = sizeof(struct omap_sham_reqctx) +
- BUFLEN;
+ alg->halg.statesize = sizeof(struct omap_sham_reqctx);
err = crypto_engine_register_ahash(ealg);
if (err)
goto err_algs;
diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
index 846e1d42775d..9cb11fada2c4 100644
--- a/drivers/crypto/qce/aead.c
+++ b/drivers/crypto/qce/aead.c
@@ -5,6 +5,7 @@
*/
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/string.h>
#include <crypto/gcm.h>
#include <crypto/authenc.h>
#include <crypto/internal/aead.h>
@@ -35,7 +36,6 @@ static void qce_aead_done(void *data)
u32 status;
unsigned int totallen;
unsigned char tag[SHA256_DIGEST_SIZE] = {0};
- int ret = 0;
diff_dst = (req->src != req->dst) ? true : false;
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
@@ -79,8 +79,7 @@ static void qce_aead_done(void *data)
} else if (!IS_CCM(rctx->flags)) {
totallen = req->cryptlen + req->assoclen - ctx->authsize;
scatterwalk_map_and_copy(tag, req->src, totallen, ctx->authsize, 0);
- ret = memcmp(result_buf->auth_iv, tag, ctx->authsize);
- if (ret) {
+ if (memcmp(result_buf->auth_iv, tag, ctx->authsize)) {
pr_err("Bad message error\n");
error = -EBADMSG;
}
@@ -144,16 +143,12 @@ qce_aead_prepare_dst_buf(struct aead_request *req)
sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->adata_sg,
rctx->assoclen);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
+ if (IS_ERR(sg))
goto dst_tbl_free;
- }
/* dst buffer */
sg = qce_sgtable_add(&rctx->dst_tbl, msg_sg, rctx->cryptlen);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
+ if (IS_ERR(sg))
goto dst_tbl_free;
- }
totallen = rctx->cryptlen + rctx->assoclen;
} else {
if (totallen) {
@@ -642,8 +637,8 @@ static int qce_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int
memcpy(ctx->enc_key, authenc_keys.enckey, authenc_keys.enckeylen);
- memset(ctx->auth_key, 0, sizeof(ctx->auth_key));
- memcpy(ctx->auth_key, authenc_keys.authkey, authenc_keys.authkeylen);
+ memcpy_and_pad(ctx->auth_key, sizeof(ctx->auth_key),
+ authenc_keys.authkey, authenc_keys.authkeylen, 0);
return crypto_aead_setkey(ctx->fallback, key, keylen);
}
@@ -768,9 +763,8 @@ static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_devi
alg = &tmpl->alg.aead;
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- def->drv_name);
+ strscpy(alg->base.cra_name, def->name);
+ strscpy(alg->base.cra_driver_name, def->drv_name);
alg->base.cra_blocksize = def->blocksize;
alg->chunksize = def->chunksize;
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 04253a8d3340..54a78a57f630 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -280,17 +280,17 @@ static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
{
- u8 swap[QCE_AES_IV_LENGTH];
- u32 i, j;
+ u8 swap[QCE_AES_IV_LENGTH] = {0};
+ unsigned int i, offset;
if (ivsize > QCE_AES_IV_LENGTH)
return;
- memset(swap, 0, QCE_AES_IV_LENGTH);
+ offset = QCE_AES_IV_LENGTH - ivsize;
- for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
- i < QCE_AES_IV_LENGTH; i++, j--)
- swap[i] = src[j];
+ /* Reverse and right-align IV bytes. */
+ for (i = 0; i < ivsize; i++)
+ swap[offset + i] = src[ivsize - 1 - i];
qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
}
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 402e4e64347d..1b37121cbcdc 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -6,6 +6,7 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/string.h>
#include <crypto/internal/hash.h>
#include "common.h"
@@ -489,9 +490,8 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def,
base->cra_module = THIS_MODULE;
base->cra_init = qce_ahash_cra_init;
- snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- def->drv_name);
+ strscpy(base->cra_name, def->name);
+ strscpy(base->cra_driver_name, def->drv_name);
INIT_LIST_HEAD(&tmpl->entry);
tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AHASH;
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 4ad3a1702010..db0b648a56eb 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -7,6 +7,7 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
+#include <linux/string.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <crypto/aes.h>
@@ -446,9 +447,8 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
alg = &tmpl->alg.skcipher;
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- def->drv_name);
+ strscpy(alg->base.cra_name, def->name);
+ strscpy(alg->base.cra_driver_name, def->drv_name);
alg->base.cra_blocksize = def->blocksize;
alg->chunksize = def->chunksize;
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index eece1ff6c62f..bdda7b39af85 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -2131,7 +2131,7 @@ static struct skcipher_alg algs[] = {
static int s5p_aes_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- int i, j, err;
+ int i, err;
const struct samsung_aes_variant *variant;
struct s5p_aes_dev *pdata;
struct resource *res;
@@ -2237,8 +2237,11 @@ static int s5p_aes_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(algs); i++) {
err = crypto_register_skcipher(&algs[i]);
- if (err)
+ if (err) {
+ dev_err(dev, "can't register '%s': %d\n",
+ algs[i].base.cra_name, err);
goto err_algs;
+ }
}
if (pdata->use_hash) {
@@ -2265,20 +2268,12 @@ static int s5p_aes_probe(struct platform_device *pdev)
return 0;
err_hash:
- for (j = hash_i - 1; j >= 0; j--)
- crypto_unregister_ahash(&algs_sha1_md5_sha256[j]);
-
+ crypto_unregister_ahashes(algs_sha1_md5_sha256, hash_i);
tasklet_kill(&pdata->hash_tasklet);
res->end -= 0x300;
err_algs:
- if (i < ARRAY_SIZE(algs))
- dev_err(dev, "can't register '%s': %d\n", algs[i].base.cra_name,
- err);
-
- for (j = 0; j < i; j++)
- crypto_unregister_skcipher(&algs[j]);
-
+ crypto_unregister_skciphers(algs, i);
tasklet_kill(&pdata->tasklet);
err_irq:
@@ -2294,15 +2289,13 @@ err_clk:
static void s5p_aes_remove(struct platform_device *pdev)
{
struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
- int i;
- for (i = 0; i < ARRAY_SIZE(algs); i++)
- crypto_unregister_skcipher(&algs[i]);
+ crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
tasklet_kill(&pdata->tasklet);
if (pdata->use_hash) {
- for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--)
- crypto_unregister_ahash(&algs_sha1_md5_sha256[i]);
+ crypto_unregister_ahashes(algs_sha1_md5_sha256,
+ ARRAY_SIZE(algs_sha1_md5_sha256));
pdata->res->end -= 0x300;
tasklet_kill(&pdata->hash_tasklet);
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 3c9b3f679461..b79877099942 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -361,19 +361,13 @@ static int stm32_cryp_it_start(struct stm32_cryp *cryp);
static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx)
{
- struct stm32_cryp *tmp, *cryp = NULL;
+ struct stm32_cryp *cryp;
spin_lock_bh(&cryp_list.lock);
- if (!ctx->cryp) {
- list_for_each_entry(tmp, &cryp_list.dev_list, list) {
- cryp = tmp;
- break;
- }
- ctx->cryp = cryp;
- } else {
- cryp = ctx->cryp;
- }
-
+ if (!ctx->cryp)
+ ctx->cryp = list_first_entry_or_null(&cryp_list.dev_list,
+ struct stm32_cryp, list);
+ cryp = ctx->cryp;
spin_unlock_bh(&cryp_list.lock);
return cryp;
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index d60147a7594e..dada5951082c 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -792,19 +792,13 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
{
- struct stm32_hash_dev *hdev = NULL, *tmp;
+ struct stm32_hash_dev *hdev;
spin_lock_bh(&stm32_hash.lock);
- if (!ctx->hdev) {
- list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
- hdev = tmp;
- break;
- }
- ctx->hdev = hdev;
- } else {
- hdev = ctx->hdev;
- }
-
+ if (!ctx->hdev)
+ ctx->hdev = list_first_entry_or_null(&stm32_hash.dev_list,
+ struct stm32_hash_dev, list);
+ hdev = ctx->hdev;
spin_unlock_bh(&stm32_hash.lock);
return hdev;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index e8c0db687c57..bc61d0fe3514 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -12,6 +12,7 @@
* All rights reserved.
*/
+#include <linux/workqueue.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -868,20 +869,28 @@ struct talitos_ahash_req_ctx {
u8 buf[2][HASH_MAX_BLOCK_SIZE];
int buf_idx;
unsigned int swinit;
- unsigned int first;
- unsigned int last;
+ unsigned int first_desc;
+ unsigned int last_desc;
+ unsigned int last_request;
unsigned int to_hash_later;
unsigned int nbuf;
struct scatterlist bufsl[2];
struct scatterlist *psrc;
+
+ struct scatterlist request_bufsl[2];
+ struct ahash_request *areq;
+ struct scatterlist *request_sl;
+ unsigned int remaining_ahash_request_bytes;
+ unsigned int current_ahash_request_bytes;
+ struct work_struct sec1_ahash_process_remaining;
};
struct talitos_export_state {
u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
u8 buf[HASH_MAX_BLOCK_SIZE];
unsigned int swinit;
- unsigned int first;
- unsigned int last;
+ unsigned int first_desc;
+ unsigned int last_desc;
unsigned int to_hash_later;
unsigned int nbuf;
};
@@ -1713,7 +1722,7 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
if (desc->next_desc &&
desc->ptr[5].ptr != desc2->ptr[5].ptr)
unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
- if (req_ctx->last)
+ if (req_ctx->last_desc)
memcpy(areq->result, req_ctx->hw_context,
crypto_ahash_digestsize(tfm));
@@ -1750,7 +1759,7 @@ static void ahash_done(struct device *dev,
container_of(desc, struct talitos_edesc, desc);
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- if (!req_ctx->last && req_ctx->to_hash_later) {
+ if (!req_ctx->last_desc && req_ctx->to_hash_later) {
/* Position any partial block for next update/final/finup */
req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
req_ctx->nbuf = req_ctx->to_hash_later;
@@ -1759,7 +1768,20 @@ static void ahash_done(struct device *dev,
kfree(edesc);
- ahash_request_complete(areq, err);
+ if (err) {
+ ahash_request_complete(areq, err);
+ return;
+ }
+
+ req_ctx->remaining_ahash_request_bytes -=
+ req_ctx->current_ahash_request_bytes;
+
+ if (!req_ctx->remaining_ahash_request_bytes) {
+ ahash_request_complete(areq, 0);
+ return;
+ }
+
+ schedule_work(&req_ctx->sec1_ahash_process_remaining);
}
/*
@@ -1803,7 +1825,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
/* first DWORD empty */
/* hash context in */
- if (!req_ctx->first || req_ctx->swinit) {
+ if (!req_ctx->first_desc || req_ctx->swinit) {
map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
req_ctx->hw_context_size,
req_ctx->hw_context,
@@ -1811,7 +1833,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
req_ctx->swinit = 0;
}
/* Indicate next op is not the first. */
- req_ctx->first = 0;
+ req_ctx->first_desc = 0;
/* HMAC key */
if (ctx->keylen)
@@ -1844,7 +1866,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
/* fifth DWORD empty */
/* hash/HMAC out -or- hash context out */
- if (req_ctx->last)
+ if (req_ctx->last_desc)
map_single_talitos_ptr(dev, &desc->ptr[5],
crypto_ahash_digestsize(tfm),
req_ctx->hw_context, DMA_FROM_DEVICE);
@@ -1886,7 +1908,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
if (sg_count > 1)
sync_needed = true;
copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
- if (req_ctx->last)
+ if (req_ctx->last_desc)
map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
req_ctx->hw_context_size,
req_ctx->hw_context,
@@ -1925,60 +1947,7 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
nbytes, 0, 0, 0, areq->base.flags, false);
}
-static int ahash_init(struct ahash_request *areq)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
- struct device *dev = ctx->dev;
- struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- unsigned int size;
- dma_addr_t dma;
-
- /* Initialize the context */
- req_ctx->buf_idx = 0;
- req_ctx->nbuf = 0;
- req_ctx->first = 1; /* first indicates h/w must init its context */
- req_ctx->swinit = 0; /* assume h/w init of context */
- size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
- ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
- : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
- req_ctx->hw_context_size = size;
-
- dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
- DMA_TO_DEVICE);
- dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
-
- return 0;
-}
-
-/*
- * on h/w without explicit sha224 support, we initialize h/w context
- * manually with sha224 constants, and tell it to run sha256.
- */
-static int ahash_init_sha224_swinit(struct ahash_request *areq)
-{
- struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
-
- req_ctx->hw_context[0] = SHA224_H0;
- req_ctx->hw_context[1] = SHA224_H1;
- req_ctx->hw_context[2] = SHA224_H2;
- req_ctx->hw_context[3] = SHA224_H3;
- req_ctx->hw_context[4] = SHA224_H4;
- req_ctx->hw_context[5] = SHA224_H5;
- req_ctx->hw_context[6] = SHA224_H6;
- req_ctx->hw_context[7] = SHA224_H7;
-
- /* init 64-bit count */
- req_ctx->hw_context[8] = 0;
- req_ctx->hw_context[9] = 0;
-
- ahash_init(areq);
- req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
-
- return 0;
-}
-
-static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
+static int ahash_process_req_one(struct ahash_request *areq, unsigned int nbytes)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
@@ -1995,14 +1964,14 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
bool is_sec1 = has_ftr_sec1(priv);
u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
- if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
+ if (!req_ctx->last_desc && (nbytes + req_ctx->nbuf <= blocksize)) {
/* Buffer up to one whole block */
- nents = sg_nents_for_len(areq->src, nbytes);
+ nents = sg_nents_for_len(req_ctx->request_sl, nbytes);
if (nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
return nents;
}
- sg_copy_to_buffer(areq->src, nents,
+ sg_copy_to_buffer(req_ctx->request_sl, nents,
ctx_buf + req_ctx->nbuf, nbytes);
req_ctx->nbuf += nbytes;
return 0;
@@ -2012,7 +1981,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
nbytes_to_hash = nbytes + req_ctx->nbuf;
to_hash_later = nbytes_to_hash & (blocksize - 1);
- if (req_ctx->last)
+ if (req_ctx->last_desc)
to_hash_later = 0;
else if (to_hash_later)
/* There is a partial block. Hash the full block(s) now */
@@ -2029,7 +1998,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
sg_init_table(req_ctx->bufsl, nsg);
sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
if (nsg > 1)
- sg_chain(req_ctx->bufsl, 2, areq->src);
+ sg_chain(req_ctx->bufsl, 2, req_ctx->request_sl);
req_ctx->psrc = req_ctx->bufsl;
} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
int offset;
@@ -2038,26 +2007,26 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
offset = blocksize - req_ctx->nbuf;
else
offset = nbytes_to_hash - req_ctx->nbuf;
- nents = sg_nents_for_len(areq->src, offset);
+ nents = sg_nents_for_len(req_ctx->request_sl, offset);
if (nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
return nents;
}
- sg_copy_to_buffer(areq->src, nents,
+ sg_copy_to_buffer(req_ctx->request_sl, nents,
ctx_buf + req_ctx->nbuf, offset);
req_ctx->nbuf += offset;
- req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
+ req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, req_ctx->request_sl,
offset);
} else
- req_ctx->psrc = areq->src;
+ req_ctx->psrc = req_ctx->request_sl;
if (to_hash_later) {
- nents = sg_nents_for_len(areq->src, nbytes);
+ nents = sg_nents_for_len(req_ctx->request_sl, nbytes);
if (nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
return nents;
}
- sg_pcopy_to_buffer(areq->src, nents,
+ sg_pcopy_to_buffer(req_ctx->request_sl, nents,
req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
to_hash_later,
nbytes - to_hash_later);
@@ -2065,36 +2034,145 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
req_ctx->to_hash_later = to_hash_later;
/* Allocate extended descriptor */
- edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
+ edesc = ahash_edesc_alloc(req_ctx->areq, nbytes_to_hash);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
edesc->desc.hdr = ctx->desc_hdr_template;
/* On last one, request SEC to pad; otherwise continue */
- if (req_ctx->last)
+ if (req_ctx->last_desc)
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
else
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
/* request SEC to INIT hash. */
- if (req_ctx->first && !req_ctx->swinit)
+ if (req_ctx->first_desc && !req_ctx->swinit)
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
/* When the tfm context has a keylen, it's an HMAC.
* A first or last (ie. not middle) descriptor must request HMAC.
*/
- if (ctx->keylen && (req_ctx->first || req_ctx->last))
+ if (ctx->keylen && (req_ctx->first_desc || req_ctx->last_desc))
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
- return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
+ return common_nonsnoop_hash(edesc, req_ctx->areq, nbytes_to_hash, ahash_done);
+}
+
+static void sec1_ahash_process_remaining(struct work_struct *work)
+{
+ struct talitos_ahash_req_ctx *req_ctx =
+ container_of(work, struct talitos_ahash_req_ctx,
+ sec1_ahash_process_remaining);
+ int err = 0;
+
+ req_ctx->request_sl = scatterwalk_ffwd(req_ctx->request_bufsl,
+ req_ctx->request_sl, TALITOS1_MAX_DATA_LEN);
+
+ if (req_ctx->remaining_ahash_request_bytes > TALITOS1_MAX_DATA_LEN)
+ req_ctx->current_ahash_request_bytes = TALITOS1_MAX_DATA_LEN;
+ else {
+ req_ctx->current_ahash_request_bytes =
+ req_ctx->remaining_ahash_request_bytes;
+
+ if (req_ctx->last_request)
+ req_ctx->last_desc = 1;
+ }
+
+ err = ahash_process_req_one(req_ctx->areq,
+ req_ctx->current_ahash_request_bytes);
+
+ if (err != -EINPROGRESS)
+ ahash_request_complete(req_ctx->areq, err);
+}
+
+static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = ctx->dev;
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
+
+ req_ctx->areq = areq;
+ req_ctx->request_sl = areq->src;
+ req_ctx->remaining_ahash_request_bytes = nbytes;
+
+ if (is_sec1) {
+ if (nbytes > TALITOS1_MAX_DATA_LEN)
+ nbytes = TALITOS1_MAX_DATA_LEN;
+ else if (req_ctx->last_request)
+ req_ctx->last_desc = 1;
+ }
+
+ req_ctx->current_ahash_request_bytes = nbytes;
+
+ return ahash_process_req_one(req_ctx->areq,
+ req_ctx->current_ahash_request_bytes);
+}
+
+static int ahash_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = ctx->dev;
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ unsigned int size;
+ dma_addr_t dma;
+
+ /* Initialize the context */
+ req_ctx->buf_idx = 0;
+ req_ctx->nbuf = 0;
+ req_ctx->first_desc = 1; /* first_desc indicates h/w must init its context */
+ req_ctx->swinit = 0; /* assume h/w init of context */
+ size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+ ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
+ : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+ req_ctx->hw_context_size = size;
+ req_ctx->last_request = 0;
+ req_ctx->last_desc = 0;
+ INIT_WORK(&req_ctx->sec1_ahash_process_remaining, sec1_ahash_process_remaining);
+
+ dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
+
+ return 0;
+}
+
+/*
+ * on h/w without explicit sha224 support, we initialize h/w context
+ * manually with sha224 constants, and tell it to run sha256.
+ */
+static int ahash_init_sha224_swinit(struct ahash_request *areq)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ req_ctx->hw_context[0] = SHA224_H0;
+ req_ctx->hw_context[1] = SHA224_H1;
+ req_ctx->hw_context[2] = SHA224_H2;
+ req_ctx->hw_context[3] = SHA224_H3;
+ req_ctx->hw_context[4] = SHA224_H4;
+ req_ctx->hw_context[5] = SHA224_H5;
+ req_ctx->hw_context[6] = SHA224_H6;
+ req_ctx->hw_context[7] = SHA224_H7;
+
+ /* init 64-bit count */
+ req_ctx->hw_context[8] = 0;
+ req_ctx->hw_context[9] = 0;
+
+ ahash_init(areq);
+ req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
+
+ return 0;
}
static int ahash_update(struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- req_ctx->last = 0;
+ req_ctx->last_request = 0;
return ahash_process_req(areq, areq->nbytes);
}
@@ -2103,7 +2181,7 @@ static int ahash_final(struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- req_ctx->last = 1;
+ req_ctx->last_request = 1;
return ahash_process_req(areq, 0);
}
@@ -2112,7 +2190,7 @@ static int ahash_finup(struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- req_ctx->last = 1;
+ req_ctx->last_request = 1;
return ahash_process_req(areq, areq->nbytes);
}
@@ -2146,8 +2224,8 @@ static int ahash_export(struct ahash_request *areq, void *out)
req_ctx->hw_context_size);
memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
export->swinit = req_ctx->swinit;
- export->first = req_ctx->first;
- export->last = req_ctx->last;
+ export->first_desc = req_ctx->first_desc;
+ export->last_desc = req_ctx->last_desc;
export->to_hash_later = req_ctx->to_hash_later;
export->nbuf = req_ctx->nbuf;
@@ -2172,8 +2250,8 @@ static int ahash_import(struct ahash_request *areq, const void *in)
memcpy(req_ctx->hw_context, export->hw_context, size);
memcpy(req_ctx->buf[0], export->buf, export->nbuf);
req_ctx->swinit = export->swinit;
- req_ctx->first = export->first;
- req_ctx->last = export->last;
+ req_ctx->first_desc = export->first_desc;
+ req_ctx->last_desc = export->last_desc;
req_ctx->to_hash_later = export->to_hash_later;
req_ctx->nbuf = export->nbuf;
diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c
index 9210cceb4b7b..30c78afe3dea 100644
--- a/drivers/crypto/tegra/tegra-se-aes.c
+++ b/drivers/crypto/tegra/tegra-se-aes.c
@@ -4,6 +4,7 @@
* Crypto driver to handle block cipher algorithms using NVIDIA Security Engine.
*/
+#include <linux/bottom_half.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
@@ -333,7 +334,9 @@ out:
tegra_key_invalidate_reserved(ctx->se, key2_id, ctx->alg);
out_finalize:
+ local_bh_disable();
crypto_finalize_skcipher_request(se->engine, req, ret);
+ local_bh_enable();
return 0;
}
@@ -1262,7 +1265,9 @@ out_free_inbuf:
tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
out_finalize:
+ local_bh_disable();
crypto_finalize_aead_request(ctx->se->engine, req, ret);
+ local_bh_enable();
return 0;
}
@@ -1348,7 +1353,9 @@ out_free_inbuf:
tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
out_finalize:
+ local_bh_disable();
crypto_finalize_aead_request(ctx->se->engine, req, ret);
+ local_bh_enable();
return 0;
}
@@ -1746,7 +1753,9 @@ out:
if (tegra_key_is_reserved(rctx->key_id))
tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
+ local_bh_disable();
crypto_finalize_hash_request(se->engine, req, ret);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
index 06bb5bf0fa33..23d549801612 100644
--- a/drivers/crypto/tegra/tegra-se-hash.c
+++ b/drivers/crypto/tegra/tegra-se-hash.c
@@ -4,6 +4,7 @@
* Crypto driver to handle HASH algorithms using NVIDIA Security Engine.
*/
+#include <linux/bottom_half.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
@@ -546,7 +547,9 @@ static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq)
}
out:
+ local_bh_disable();
crypto_finalize_hash_request(se->engine, req, ret);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/ti/Kconfig b/drivers/crypto/ti/Kconfig
index a3692ceec49b..1a3a571ac8ce 100644
--- a/drivers/crypto/ti/Kconfig
+++ b/drivers/crypto/ti/Kconfig
@@ -6,7 +6,11 @@ config CRYPTO_DEV_TI_DTHEV2
select CRYPTO_SKCIPHER
select CRYPTO_ECB
select CRYPTO_CBC
+ select CRYPTO_CTR
select CRYPTO_XTS
+ select CRYPTO_GCM
+ select CRYPTO_CCM
+ select SG_SPLIT
help
This enables support for the TI DTHE V2 hw cryptography engine
which can be found on TI K3 SOCs. Selecting this enables use
diff --git a/drivers/crypto/ti/dthev2-aes.c b/drivers/crypto/ti/dthev2-aes.c
index 156729ccc50e..eb5cd902dfb5 100644
--- a/drivers/crypto/ti/dthev2-aes.c
+++ b/drivers/crypto/ti/dthev2-aes.c
@@ -10,15 +10,18 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/engine.h>
+#include <crypto/gcm.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include "dthev2-common.h"
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/scatterlist.h>
/* Registers */
@@ -53,6 +56,7 @@
#define DTHE_P_AES_C_LENGTH_1 0x0058
#define DTHE_P_AES_AUTH_LENGTH 0x005C
#define DTHE_P_AES_DATA_IN_OUT 0x0060
+#define DTHE_P_AES_TAG_OUT 0x0070
#define DTHE_P_AES_SYSCONFIG 0x0084
#define DTHE_P_AES_IRQSTATUS 0x008C
@@ -63,7 +67,10 @@
enum aes_ctrl_mode_masks {
AES_CTRL_ECB_MASK = 0x00,
AES_CTRL_CBC_MASK = BIT(5),
+ AES_CTRL_CTR_MASK = BIT(6),
AES_CTRL_XTS_MASK = BIT(12) | BIT(11),
+ AES_CTRL_GCM_MASK = BIT(17) | BIT(16) | BIT(6),
+ AES_CTRL_CCM_MASK = BIT(18) | BIT(6),
};
#define DTHE_AES_CTRL_MODE_CLEAR_MASK ~GENMASK(28, 5)
@@ -74,6 +81,13 @@ enum aes_ctrl_mode_masks {
#define DTHE_AES_CTRL_KEYSIZE_24B BIT(4)
#define DTHE_AES_CTRL_KEYSIZE_32B (BIT(3) | BIT(4))
+#define DTHE_AES_CTRL_CTR_WIDTH_128B (BIT(7) | BIT(8))
+
+#define DTHE_AES_CCM_L_FROM_IV_MASK GENMASK(2, 0)
+#define DTHE_AES_CCM_M_BITS GENMASK(2, 0)
+#define DTHE_AES_CTRL_CCM_L_FIELD_MASK GENMASK(21, 19)
+#define DTHE_AES_CTRL_CCM_M_FIELD_MASK GENMASK(24, 22)
+
#define DTHE_AES_CTRL_SAVE_CTX_SET BIT(29)
#define DTHE_AES_CTRL_OUTPUT_READY BIT_MASK(0)
@@ -88,6 +102,10 @@ enum aes_ctrl_mode_masks {
#define AES_IV_SIZE AES_BLOCK_SIZE
#define AES_BLOCK_WORDS (AES_BLOCK_SIZE / sizeof(u32))
#define AES_IV_WORDS AES_BLOCK_WORDS
+#define DTHE_AES_GCM_AAD_MAXLEN (BIT_ULL(32) - 1)
+#define DTHE_AES_CCM_AAD_MAXLEN (BIT(16) - BIT(8))
+#define DTHE_AES_CCM_CRYPT_MAXLEN (BIT_ULL(61) - 1)
+#define POLL_TIMEOUT_INTERVAL HZ
static int dthe_cipher_init_tfm(struct crypto_skcipher *tfm)
{
@@ -100,25 +118,27 @@ static int dthe_cipher_init_tfm(struct crypto_skcipher *tfm)
return 0;
}
-static int dthe_cipher_xts_init_tfm(struct crypto_skcipher *tfm)
+static int dthe_cipher_init_tfm_fallback(struct crypto_skcipher *tfm)
{
struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
struct dthe_data *dev_data = dthe_get_dev(ctx);
+ const char *alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
ctx->dev_data = dev_data;
ctx->keylen = 0;
- ctx->skcipher_fb = crypto_alloc_sync_skcipher("xts(aes)", 0,
+ ctx->skcipher_fb = crypto_alloc_sync_skcipher(alg_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->skcipher_fb)) {
- dev_err(dev_data->dev, "fallback driver xts(aes) couldn't be loaded\n");
+ dev_err(dev_data->dev, "fallback driver %s couldn't be loaded\n",
+ alg_name);
return PTR_ERR(ctx->skcipher_fb);
}
return 0;
}
-static void dthe_cipher_xts_exit_tfm(struct crypto_skcipher *tfm)
+static void dthe_cipher_exit_tfm(struct crypto_skcipher *tfm)
{
struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -156,6 +176,24 @@ static int dthe_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key, unsig
return dthe_aes_setkey(tfm, key, keylen);
}
+static int dthe_aes_ctr_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret = dthe_aes_setkey(tfm, key, keylen);
+
+ if (ret)
+ return ret;
+
+ ctx->aes_mode = DTHE_AES_CTR;
+
+ crypto_sync_skcipher_clear_flags(ctx->skcipher_fb, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ctx->skcipher_fb,
+ crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(ctx->skcipher_fb, key, keylen);
+}
+
static int dthe_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)
{
struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -171,8 +209,8 @@ static int dthe_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, unsig
crypto_sync_skcipher_clear_flags(ctx->skcipher_fb, CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(ctx->skcipher_fb,
- crypto_skcipher_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
return crypto_sync_skcipher_setkey(ctx->skcipher_fb, key, keylen);
}
@@ -236,9 +274,23 @@ static void dthe_aes_set_ctrl_key(struct dthe_tfm_ctx *ctx,
case DTHE_AES_CBC:
ctrl_val |= AES_CTRL_CBC_MASK;
break;
+ case DTHE_AES_CTR:
+ ctrl_val |= AES_CTRL_CTR_MASK;
+ ctrl_val |= DTHE_AES_CTRL_CTR_WIDTH_128B;
+ break;
case DTHE_AES_XTS:
ctrl_val |= AES_CTRL_XTS_MASK;
break;
+ case DTHE_AES_GCM:
+ ctrl_val |= AES_CTRL_GCM_MASK;
+ break;
+ case DTHE_AES_CCM:
+ ctrl_val |= AES_CTRL_CCM_MASK;
+ ctrl_val |= FIELD_PREP(DTHE_AES_CTRL_CCM_L_FIELD_MASK,
+ (iv_in[0] & DTHE_AES_CCM_L_FROM_IV_MASK));
+ ctrl_val |= FIELD_PREP(DTHE_AES_CTRL_CCM_M_FIELD_MASK,
+ ((ctx->authsize - 2) >> 1) & DTHE_AES_CCM_M_BITS);
+ break;
}
if (iv_in) {
@@ -251,6 +303,22 @@ static void dthe_aes_set_ctrl_key(struct dthe_tfm_ctx *ctx,
writel_relaxed(ctrl_val, aes_base_reg + DTHE_P_AES_CTRL);
}
+static int dthe_aes_do_fallback(struct skcipher_request *req)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
+
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->skcipher_fb);
+
+ skcipher_request_set_callback(subreq, skcipher_request_flags(req),
+ req->base.complete, req->base.data);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+
+ return rctx->enc ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
+}
+
static void dthe_aes_dma_in_callback(void *data)
{
struct skcipher_request *req = (struct skcipher_request *)data;
@@ -271,7 +339,7 @@ static int dthe_aes_run(struct crypto_engine *engine, void *areq)
struct scatterlist *dst = req->dst;
int src_nents = sg_nents_for_len(src, len);
- int dst_nents;
+ int dst_nents = sg_nents_for_len(dst, len);
int src_mapped_nents;
int dst_mapped_nents;
@@ -305,25 +373,62 @@ static int dthe_aes_run(struct crypto_engine *engine, void *areq)
dst_dir = DMA_FROM_DEVICE;
}
+ /*
+ * CTR mode can operate on any input length, but the hardware
+ * requires input length to be a multiple of the block size.
+ * We need to handle the padding in the driver.
+ */
+ if (ctx->aes_mode == DTHE_AES_CTR && req->cryptlen % AES_BLOCK_SIZE) {
+ unsigned int pad_size = AES_BLOCK_SIZE - (req->cryptlen % AES_BLOCK_SIZE);
+ u8 *pad_buf = rctx->padding;
+ struct scatterlist *sg;
+
+ len += pad_size;
+ src_nents++;
+ dst_nents++;
+
+ src = kmalloc_array(src_nents, sizeof(*src), GFP_ATOMIC);
+ if (!src) {
+ ret = -ENOMEM;
+ goto aes_ctr_src_alloc_err;
+ }
+
+ sg_init_table(src, src_nents);
+ sg = dthe_copy_sg(src, req->src, req->cryptlen);
+ memzero_explicit(pad_buf, AES_BLOCK_SIZE);
+ sg_set_buf(sg, pad_buf, pad_size);
+
+ if (diff_dst) {
+ dst = kmalloc_array(dst_nents, sizeof(*dst), GFP_ATOMIC);
+ if (!dst) {
+ ret = -ENOMEM;
+ goto aes_ctr_dst_alloc_err;
+ }
+
+ sg_init_table(dst, dst_nents);
+ sg = dthe_copy_sg(dst, req->dst, req->cryptlen);
+ sg_set_buf(sg, pad_buf, pad_size);
+ } else {
+ dst = src;
+ }
+ }
+
tx_dev = dmaengine_get_dma_device(dev_data->dma_aes_tx);
rx_dev = dmaengine_get_dma_device(dev_data->dma_aes_rx);
src_mapped_nents = dma_map_sg(tx_dev, src, src_nents, src_dir);
if (src_mapped_nents == 0) {
ret = -EINVAL;
- goto aes_err;
+ goto aes_map_src_err;
}
if (!diff_dst) {
- dst_nents = src_nents;
dst_mapped_nents = src_mapped_nents;
} else {
- dst_nents = sg_nents_for_len(dst, len);
dst_mapped_nents = dma_map_sg(rx_dev, dst, dst_nents, dst_dir);
if (dst_mapped_nents == 0) {
- dma_unmap_sg(tx_dev, src, src_nents, src_dir);
ret = -EINVAL;
- goto aes_err;
+ goto aes_map_dst_err;
}
}
@@ -353,8 +458,8 @@ static int dthe_aes_run(struct crypto_engine *engine, void *areq)
else
dthe_aes_set_ctrl_key(ctx, rctx, (u32 *)req->iv);
- writel_relaxed(lower_32_bits(req->cryptlen), aes_base_reg + DTHE_P_AES_C_LENGTH_0);
- writel_relaxed(upper_32_bits(req->cryptlen), aes_base_reg + DTHE_P_AES_C_LENGTH_1);
+ writel_relaxed(lower_32_bits(len), aes_base_reg + DTHE_P_AES_C_LENGTH_0);
+ writel_relaxed(upper_32_bits(len), aes_base_reg + DTHE_P_AES_C_LENGTH_1);
dmaengine_submit(desc_in);
dmaengine_submit(desc_out);
@@ -386,11 +491,26 @@ static int dthe_aes_run(struct crypto_engine *engine, void *areq)
}
aes_prep_err:
- dma_unmap_sg(tx_dev, src, src_nents, src_dir);
if (dst_dir != DMA_BIDIRECTIONAL)
dma_unmap_sg(rx_dev, dst, dst_nents, dst_dir);
+aes_map_dst_err:
+ dma_unmap_sg(tx_dev, src, src_nents, src_dir);
+
+aes_map_src_err:
+ if (ctx->aes_mode == DTHE_AES_CTR && req->cryptlen % AES_BLOCK_SIZE) {
+ memzero_explicit(rctx->padding, AES_BLOCK_SIZE);
+ if (diff_dst)
+ kfree(dst);
+aes_ctr_dst_alloc_err:
+ kfree(src);
+aes_ctr_src_alloc_err:
+ /*
+ * Fallback to software if ENOMEM
+ */
+ if (ret == -ENOMEM)
+ ret = dthe_aes_do_fallback(req);
+ }
-aes_err:
local_bh_disable();
crypto_finalize_skcipher_request(dev_data->engine, req, ret);
local_bh_enable();
@@ -400,7 +520,6 @@ aes_err:
static int dthe_aes_crypt(struct skcipher_request *req)
{
struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
- struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
struct dthe_data *dev_data = dthe_get_dev(ctx);
struct crypto_engine *engine;
@@ -408,20 +527,14 @@ static int dthe_aes_crypt(struct skcipher_request *req)
* If data is not a multiple of AES_BLOCK_SIZE:
* - need to return -EINVAL for ECB, CBC as they are block ciphers
* - need to fallback to software as H/W doesn't support Ciphertext Stealing for XTS
+ * - do nothing for CTR
*/
if (req->cryptlen % AES_BLOCK_SIZE) {
- if (ctx->aes_mode == DTHE_AES_XTS) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->skcipher_fb);
-
- skcipher_request_set_callback(subreq, skcipher_request_flags(req),
- req->base.complete, req->base.data);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->cryptlen, req->iv);
+ if (ctx->aes_mode == DTHE_AES_XTS)
+ return dthe_aes_do_fallback(req);
- return rctx->enc ? crypto_skcipher_encrypt(subreq) :
- crypto_skcipher_decrypt(subreq);
- }
- return -EINVAL;
+ if (ctx->aes_mode != DTHE_AES_CTR)
+ return -EINVAL;
}
/*
@@ -454,6 +567,642 @@ static int dthe_aes_decrypt(struct skcipher_request *req)
return dthe_aes_crypt(req);
}
+static int dthe_aead_init_tfm(struct crypto_aead *tfm)
+{
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(tfm);
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+
+ ctx->dev_data = dev_data;
+
+ const char *alg_name = crypto_tfm_alg_name(crypto_aead_tfm(tfm));
+
+ ctx->aead_fb = crypto_alloc_sync_aead(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->aead_fb)) {
+ dev_err(dev_data->dev, "fallback driver %s couldn't be loaded\n",
+ alg_name);
+ return PTR_ERR(ctx->aead_fb);
+ }
+
+ return 0;
+}
+
+static void dthe_aead_exit_tfm(struct crypto_aead *tfm)
+{
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_free_sync_aead(ctx->aead_fb);
+}
+
+/**
+ * dthe_aead_prep_aad - Prepare AAD scatterlist from input request
+ * @sg: Input scatterlist containing AAD
+ * @assoclen: Length of AAD
+ * @pad_buf: Buffer to hold AAD padding if needed
+ *
+ * Description:
+ * Creates a scatterlist containing only the AAD portion with padding
+ * to align to AES_BLOCK_SIZE. This simplifies DMA handling by allowing
+ * AAD to be sent separately via TX-only DMA.
+ *
+ * Return:
+ * Pointer to the AAD scatterlist, or ERR_PTR(error) on failure.
+ * The calling function needs to free the returned scatterlist when done.
+ **/
+static struct scatterlist *dthe_aead_prep_aad(struct scatterlist *sg,
+ unsigned int assoclen,
+ u8 *pad_buf)
+{
+ struct scatterlist *aad_sg;
+ struct scatterlist *to_sg;
+ int aad_nents;
+
+ if (assoclen == 0)
+ return NULL;
+
+ aad_nents = sg_nents_for_len(sg, assoclen);
+ if (assoclen % AES_BLOCK_SIZE)
+ aad_nents++;
+
+ aad_sg = kmalloc_array(aad_nents, sizeof(struct scatterlist), GFP_ATOMIC);
+ if (!aad_sg)
+ return ERR_PTR(-ENOMEM);
+
+ sg_init_table(aad_sg, aad_nents);
+ to_sg = dthe_copy_sg(aad_sg, sg, assoclen);
+ if (assoclen % AES_BLOCK_SIZE) {
+ unsigned int pad_len = AES_BLOCK_SIZE - (assoclen % AES_BLOCK_SIZE);
+
+ memset(pad_buf, 0, pad_len);
+ sg_set_buf(to_sg, pad_buf, pad_len);
+ }
+
+ return aad_sg;
+}
+
+/**
+ * dthe_aead_prep_crypt - Prepare crypt scatterlist from req->src/req->dst
+ * @sg: Input req->src/req->dst scatterlist
+ * @assoclen: Length of AAD (to skip)
+ * @cryptlen: Length of ciphertext/plaintext (minus the size of TAG in decryption)
+ * @pad_buf: Zeroed buffer to hold crypt padding if needed
+ *
+ * Description:
+ * Creates a scatterlist containing only the ciphertext/plaintext portion
+ * (skipping AAD) with padding to align to AES_BLOCK_SIZE.
+ *
+ * Return:
+ * Pointer to the ciphertext scatterlist, or ERR_PTR(error) on failure.
+ * The calling function needs to free the returned scatterlist when done.
+ **/
+static struct scatterlist *dthe_aead_prep_crypt(struct scatterlist *sg,
+ unsigned int assoclen,
+ unsigned int cryptlen,
+ u8 *pad_buf)
+{
+ struct scatterlist *out_sg[1];
+ struct scatterlist *crypt_sg;
+ struct scatterlist *to_sg;
+ size_t split_sizes[1] = {cryptlen};
+ int out_mapped_nents[1];
+ int crypt_nents;
+ int err;
+
+ if (cryptlen == 0)
+ return NULL;
+
+ /* Skip AAD, extract ciphertext portion */
+ err = sg_split(sg, 0, assoclen, 1, split_sizes, out_sg, out_mapped_nents, GFP_ATOMIC);
+ if (err)
+ goto dthe_aead_prep_crypt_split_err;
+
+ crypt_nents = sg_nents_for_len(out_sg[0], cryptlen);
+ if (cryptlen % AES_BLOCK_SIZE)
+ crypt_nents++;
+
+ crypt_sg = kmalloc_array(crypt_nents, sizeof(struct scatterlist), GFP_ATOMIC);
+ if (!crypt_sg) {
+ err = -ENOMEM;
+ goto dthe_aead_prep_crypt_mem_err;
+ }
+
+ sg_init_table(crypt_sg, crypt_nents);
+ to_sg = dthe_copy_sg(crypt_sg, out_sg[0], cryptlen);
+ if (cryptlen % AES_BLOCK_SIZE) {
+ unsigned int pad_len = AES_BLOCK_SIZE - (cryptlen % AES_BLOCK_SIZE);
+
+ sg_set_buf(to_sg, pad_buf, pad_len);
+ }
+
+dthe_aead_prep_crypt_mem_err:
+ kfree(out_sg[0]);
+
+dthe_aead_prep_crypt_split_err:
+ if (err)
+ return ERR_PTR(err);
+ return crypt_sg;
+}
+
+static int dthe_aead_read_tag(struct dthe_tfm_ctx *ctx, u32 *tag)
+{
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+ void __iomem *aes_base_reg = dev_data->regs + DTHE_P_AES_BASE;
+ u32 val;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(aes_base_reg + DTHE_P_AES_CTRL, val,
+ (val & DTHE_AES_CTRL_SAVED_CTX_READY),
+ 0, POLL_TIMEOUT_INTERVAL);
+ if (ret)
+ return ret;
+
+ for (int i = 0; i < AES_BLOCK_WORDS; ++i)
+ tag[i] = readl_relaxed(aes_base_reg +
+ DTHE_P_AES_TAG_OUT +
+ DTHE_REG_SIZE * i);
+ return 0;
+}
+
+static int dthe_aead_enc_get_tag(struct aead_request *req)
+{
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ u32 tag[AES_BLOCK_WORDS];
+ int nents;
+ int ret;
+
+ ret = dthe_aead_read_tag(ctx, tag);
+ if (ret)
+ return ret;
+
+ nents = sg_nents_for_len(req->dst, req->cryptlen + req->assoclen + ctx->authsize);
+
+ sg_pcopy_from_buffer(req->dst, nents, tag, ctx->authsize,
+ req->assoclen + req->cryptlen);
+
+ return 0;
+}
+
+static int dthe_aead_dec_verify_tag(struct aead_request *req)
+{
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ u32 tag_out[AES_BLOCK_WORDS];
+ u32 tag_in[AES_BLOCK_WORDS];
+ int nents;
+ int ret;
+
+ ret = dthe_aead_read_tag(ctx, tag_out);
+ if (ret)
+ return ret;
+
+ nents = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
+
+ sg_pcopy_to_buffer(req->src, nents, tag_in, ctx->authsize,
+ req->assoclen + req->cryptlen - ctx->authsize);
+
+ if (crypto_memneq(tag_in, tag_out, ctx->authsize))
+ return -EBADMSG;
+ else
+ return 0;
+}
+
+static int dthe_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ crypto_sync_aead_clear_flags(ctx->aead_fb, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_aead_set_flags(ctx->aead_fb,
+ crypto_aead_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_aead_setkey(ctx->aead_fb, key, keylen);
+}
+
+static int dthe_gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
+
+ ret = dthe_aead_setkey(tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ ctx->aes_mode = DTHE_AES_GCM;
+ ctx->keylen = keylen;
+ memcpy(ctx->key, key, keylen);
+
+ return ret;
+}
+
+static int dthe_ccm_aes_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
+
+ ret = dthe_aead_setkey(tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ ctx->aes_mode = DTHE_AES_CCM;
+ ctx->keylen = keylen;
+ memcpy(ctx->key, key, keylen);
+
+ return ret;
+}
+
+static int dthe_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(tfm);
+
+ /* Invalid auth size will be handled by crypto_aead_setauthsize() */
+ ctx->authsize = authsize;
+
+ return crypto_sync_aead_setauthsize(ctx->aead_fb, authsize);
+}
+
+static int dthe_aead_do_fallback(struct aead_request *req)
+{
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct dthe_aes_req_ctx *rctx = aead_request_ctx(req);
+
+ SYNC_AEAD_REQUEST_ON_STACK(subreq, ctx->aead_fb);
+
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ return rctx->enc ? crypto_aead_encrypt(subreq) :
+ crypto_aead_decrypt(subreq);
+}
+
+static void dthe_aead_dma_in_callback(void *data)
+{
+ struct aead_request *req = (struct aead_request *)data;
+ struct dthe_aes_req_ctx *rctx = aead_request_ctx(req);
+
+ complete(&rctx->aes_compl);
+}
+
+static int dthe_aead_run(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request, base);
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct dthe_aes_req_ctx *rctx = aead_request_ctx(req);
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+
+ unsigned int cryptlen = req->cryptlen;
+ unsigned int assoclen = req->assoclen;
+ unsigned int authsize = ctx->authsize;
+ unsigned int unpadded_cryptlen;
+ struct scatterlist *src = NULL;
+ struct scatterlist *dst = NULL;
+ struct scatterlist *aad_sg = NULL;
+ u32 iv_in[AES_IV_WORDS];
+
+ int aad_nents = 0;
+ int src_nents = 0;
+ int dst_nents = 0;
+ int aad_mapped_nents = 0;
+ int src_mapped_nents = 0;
+ int dst_mapped_nents = 0;
+
+ u8 *src_assoc_padbuf = rctx->padding;
+ u8 *src_crypt_padbuf = rctx->padding + AES_BLOCK_SIZE;
+ u8 *dst_crypt_padbuf = rctx->padding + AES_BLOCK_SIZE;
+
+ bool diff_dst;
+ enum dma_data_direction aad_dir, src_dir, dst_dir;
+
+ struct device *tx_dev, *rx_dev;
+ struct dma_async_tx_descriptor *desc_in, *desc_out, *desc_aad_out;
+
+ int ret;
+ int err;
+
+ void __iomem *aes_base_reg = dev_data->regs + DTHE_P_AES_BASE;
+
+ u32 aes_irqenable_val = readl_relaxed(aes_base_reg + DTHE_P_AES_IRQENABLE);
+ u32 aes_sysconfig_val = readl_relaxed(aes_base_reg + DTHE_P_AES_SYSCONFIG);
+
+ aes_sysconfig_val |= DTHE_AES_SYSCONFIG_DMA_DATA_IN_OUT_EN;
+ writel_relaxed(aes_sysconfig_val, aes_base_reg + DTHE_P_AES_SYSCONFIG);
+
+ aes_irqenable_val |= DTHE_AES_IRQENABLE_EN_ALL;
+ writel_relaxed(aes_irqenable_val, aes_base_reg + DTHE_P_AES_IRQENABLE);
+
+ /* In decryption, the last authsize bytes are the TAG */
+ if (!rctx->enc)
+ cryptlen -= authsize;
+ unpadded_cryptlen = cryptlen;
+
+ memset(src_assoc_padbuf, 0, AES_BLOCK_SIZE);
+ memset(src_crypt_padbuf, 0, AES_BLOCK_SIZE);
+ memset(dst_crypt_padbuf, 0, AES_BLOCK_SIZE);
+
+ tx_dev = dmaengine_get_dma_device(dev_data->dma_aes_tx);
+ rx_dev = dmaengine_get_dma_device(dev_data->dma_aes_rx);
+
+ if (req->src == req->dst) {
+ diff_dst = false;
+ src_dir = DMA_BIDIRECTIONAL;
+ dst_dir = DMA_BIDIRECTIONAL;
+ } else {
+ diff_dst = true;
+ src_dir = DMA_TO_DEVICE;
+ dst_dir = DMA_FROM_DEVICE;
+ }
+ aad_dir = DMA_TO_DEVICE;
+
+ /* Prep AAD scatterlist (always from req->src) */
+ aad_sg = dthe_aead_prep_aad(req->src, req->assoclen, src_assoc_padbuf);
+ if (IS_ERR(aad_sg)) {
+ ret = PTR_ERR(aad_sg);
+ goto aead_prep_aad_err;
+ }
+
+ /* Prep ciphertext src scatterlist */
+ src = dthe_aead_prep_crypt(req->src, req->assoclen, cryptlen, src_crypt_padbuf);
+ if (IS_ERR(src)) {
+ ret = PTR_ERR(src);
+ goto aead_prep_src_err;
+ }
+
+ /* Prep ciphertext dst scatterlist (only if separate dst) */
+ if (diff_dst) {
+ dst = dthe_aead_prep_crypt(req->dst, req->assoclen, unpadded_cryptlen,
+ dst_crypt_padbuf);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ goto aead_prep_dst_err;
+ }
+ } else {
+ dst = src;
+ }
+
+ /* Calculate padded lengths for nents calculations */
+ if (req->assoclen % AES_BLOCK_SIZE)
+ assoclen += AES_BLOCK_SIZE - (req->assoclen % AES_BLOCK_SIZE);
+ if (cryptlen % AES_BLOCK_SIZE)
+ cryptlen += AES_BLOCK_SIZE - (cryptlen % AES_BLOCK_SIZE);
+
+ if (assoclen != 0) {
+ /* Map AAD for TX only */
+ aad_nents = sg_nents_for_len(aad_sg, assoclen);
+ aad_mapped_nents = dma_map_sg(tx_dev, aad_sg, aad_nents, aad_dir);
+ if (aad_mapped_nents == 0) {
+ dev_err(dev_data->dev, "Failed to map AAD for TX\n");
+ ret = -EINVAL;
+ goto aead_dma_map_aad_err;
+ }
+
+ /* Prepare DMA descriptors for AAD TX */
+ desc_aad_out = dmaengine_prep_slave_sg(dev_data->dma_aes_tx, aad_sg,
+ aad_mapped_nents, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_aad_out) {
+ dev_err(dev_data->dev, "AAD TX prep_slave_sg() failed\n");
+ ret = -EINVAL;
+ goto aead_dma_prep_aad_err;
+ }
+ }
+
+ if (cryptlen != 0) {
+ /* Map ciphertext src for TX (BIDIRECTIONAL if in-place) */
+ src_nents = sg_nents_for_len(src, cryptlen);
+ src_mapped_nents = dma_map_sg(tx_dev, src, src_nents, src_dir);
+ if (src_mapped_nents == 0) {
+ dev_err(dev_data->dev, "Failed to map ciphertext src for TX\n");
+ ret = -EINVAL;
+ goto aead_dma_prep_aad_err;
+ }
+
+ /* Prepare DMA descriptors for ciphertext TX */
+ desc_out = dmaengine_prep_slave_sg(dev_data->dma_aes_tx, src,
+ src_mapped_nents, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_out) {
+ dev_err(dev_data->dev, "Ciphertext TX prep_slave_sg() failed\n");
+ ret = -EINVAL;
+ goto aead_dma_prep_src_err;
+ }
+
+ /* Map ciphertext dst for RX (only if separate dst) */
+ if (diff_dst) {
+ dst_nents = sg_nents_for_len(dst, cryptlen);
+ dst_mapped_nents = dma_map_sg(rx_dev, dst, dst_nents, dst_dir);
+ if (dst_mapped_nents == 0) {
+ dev_err(dev_data->dev, "Failed to map ciphertext dst for RX\n");
+ ret = -EINVAL;
+ goto aead_dma_prep_src_err;
+ }
+ } else {
+ dst_nents = src_nents;
+ dst_mapped_nents = src_mapped_nents;
+ }
+
+ /* Prepare DMA descriptor for ciphertext RX */
+ desc_in = dmaengine_prep_slave_sg(dev_data->dma_aes_rx, dst,
+ dst_mapped_nents, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_in) {
+ dev_err(dev_data->dev, "Ciphertext RX prep_slave_sg() failed\n");
+ ret = -EINVAL;
+ goto aead_dma_prep_dst_err;
+ }
+
+ desc_in->callback = dthe_aead_dma_in_callback;
+ desc_in->callback_param = req;
+ } else if (assoclen != 0) {
+ /* AAD-only operation */
+ desc_aad_out->callback = dthe_aead_dma_in_callback;
+ desc_aad_out->callback_param = req;
+ }
+
+ init_completion(&rctx->aes_compl);
+
+ /*
+ * HACK: There is an unknown hw issue where if the previous operation had alen = 0 and
+ * plen != 0, the current operation's tag calculation is incorrect in the case where
+ * plen = 0 and alen != 0 currently. This is a workaround for now which somehow works;
+ * by resetting the context by writing a 1 to the C_LENGTH_0 and AUTH_LENGTH registers.
+ */
+ if (cryptlen == 0) {
+ writel_relaxed(1, aes_base_reg + DTHE_P_AES_C_LENGTH_0);
+ writel_relaxed(1, aes_base_reg + DTHE_P_AES_AUTH_LENGTH);
+ }
+
+ if (ctx->aes_mode == DTHE_AES_GCM) {
+ if (req->iv) {
+ memcpy(iv_in, req->iv, GCM_AES_IV_SIZE);
+ } else {
+ iv_in[0] = 0;
+ iv_in[1] = 0;
+ iv_in[2] = 0;
+ }
+ iv_in[3] = 0x01000000;
+ } else {
+ memcpy(iv_in, req->iv, AES_IV_SIZE);
+ }
+
+ /* Clear key2 to reset previous GHASH intermediate data */
+ for (int i = 0; i < AES_KEYSIZE_256 / sizeof(u32); ++i)
+ writel_relaxed(0, aes_base_reg + DTHE_P_AES_KEY2_6 + DTHE_REG_SIZE * i);
+
+ dthe_aes_set_ctrl_key(ctx, rctx, iv_in);
+
+ writel_relaxed(lower_32_bits(unpadded_cryptlen), aes_base_reg + DTHE_P_AES_C_LENGTH_0);
+ writel_relaxed(upper_32_bits(unpadded_cryptlen), aes_base_reg + DTHE_P_AES_C_LENGTH_1);
+ writel_relaxed(req->assoclen, aes_base_reg + DTHE_P_AES_AUTH_LENGTH);
+
+ /* Submit DMA descriptors: AAD TX, ciphertext TX, ciphertext RX */
+ if (assoclen != 0)
+ dmaengine_submit(desc_aad_out);
+ if (cryptlen != 0) {
+ dmaengine_submit(desc_out);
+ dmaengine_submit(desc_in);
+ }
+
+ if (cryptlen != 0)
+ dma_async_issue_pending(dev_data->dma_aes_rx);
+ dma_async_issue_pending(dev_data->dma_aes_tx);
+
+ /* Need to do timeout to ensure finalise gets called if DMA callback fails for any reason */
+ ret = wait_for_completion_timeout(&rctx->aes_compl, msecs_to_jiffies(DTHE_DMA_TIMEOUT_MS));
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ if (cryptlen != 0)
+ dmaengine_terminate_sync(dev_data->dma_aes_rx);
+ dmaengine_terminate_sync(dev_data->dma_aes_tx);
+
+ for (int i = 0; i < AES_BLOCK_WORDS; ++i)
+ readl_relaxed(aes_base_reg + DTHE_P_AES_DATA_IN_OUT + DTHE_REG_SIZE * i);
+ } else {
+ ret = 0;
+ }
+
+ if (cryptlen != 0)
+ dma_sync_sg_for_cpu(rx_dev, dst, dst_nents, dst_dir);
+
+ if (rctx->enc)
+ err = dthe_aead_enc_get_tag(req);
+ else
+ err = dthe_aead_dec_verify_tag(req);
+
+ ret = (ret) ? ret : err;
+
+aead_dma_prep_dst_err:
+ if (diff_dst && cryptlen != 0)
+ dma_unmap_sg(rx_dev, dst, dst_nents, dst_dir);
+aead_dma_prep_src_err:
+ if (cryptlen != 0)
+ dma_unmap_sg(tx_dev, src, src_nents, src_dir);
+aead_dma_prep_aad_err:
+ if (assoclen != 0)
+ dma_unmap_sg(tx_dev, aad_sg, aad_nents, aad_dir);
+
+aead_dma_map_aad_err:
+ if (diff_dst && cryptlen != 0)
+ kfree(dst);
+aead_prep_dst_err:
+ if (cryptlen != 0)
+ kfree(src);
+aead_prep_src_err:
+ if (assoclen != 0)
+ kfree(aad_sg);
+
+aead_prep_aad_err:
+ memzero_explicit(rctx->padding, 2 * AES_BLOCK_SIZE);
+
+ if (ret)
+ ret = dthe_aead_do_fallback(req);
+
+ local_bh_disable();
+ crypto_finalize_aead_request(engine, req, ret);
+ local_bh_enable();
+ return 0;
+}
+
+static int dthe_aead_crypt(struct aead_request *req)
+{
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct dthe_aes_req_ctx *rctx = aead_request_ctx(req);
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+ struct crypto_engine *engine;
+ unsigned int cryptlen = req->cryptlen;
+ bool is_zero_ctr = true;
+
+ /* In decryption, last authsize bytes are the TAG */
+ if (!rctx->enc)
+ cryptlen -= ctx->authsize;
+
+ if (ctx->aes_mode == DTHE_AES_CCM) {
+ /*
+ * For CCM Mode, the 128-bit IV contains the following:
+ * | 0 .. 2 | 3 .. 7 | 8 .. (127-8*L) | (128-8*L) .. 127 |
+ * | L-1 | Zero | Nonce | Counter |
+ * L needs to be between 2-8 (inclusive), i.e. 1 <= (L-1) <= 7
+ * and the next 5 bits need to be zeroes. Else return -EINVAL
+ */
+ u8 *iv = req->iv;
+ u8 L = iv[0];
+
+ /* variable L stores L-1 here */
+ if (L < 1 || L > 7)
+ return -EINVAL;
+ /*
+ * DTHEv2 HW can only work with zero initial counter in CCM mode.
+ * Check if the initial counter value is zero or not
+ */
+ for (int i = 0; i < L + 1; ++i) {
+ if (iv[AES_IV_SIZE - 1 - i] != 0) {
+ is_zero_ctr = false;
+ break;
+ }
+ }
+ }
+
+ /*
+ * Need to fallback to software in the following cases due to HW restrictions:
+ * - Both AAD and plaintext/ciphertext are zero length
+ * - For AES-GCM, AAD length is more than 2^32 - 1 bytes
+ * - For AES-CCM, AAD length is more than 2^16 - 2^8 bytes
+ * - For AES-CCM, plaintext/ciphertext length is more than 2^61 - 1 bytes
+ * - For AES-CCM, AAD length is non-zero but plaintext/ciphertext length is zero
+ * - For AES-CCM, the initial counter (last L+1 bytes of IV) is not all zeroes
+ *
+ * PS: req->cryptlen is currently unsigned int type, which causes the second and fourth
+ * cases above tautologically false. If req->cryptlen is to be changed to a 64-bit
+ * type, the check for these would also need to be added below.
+ */
+ if ((req->assoclen == 0 && cryptlen == 0) ||
+ (ctx->aes_mode == DTHE_AES_CCM && req->assoclen > DTHE_AES_CCM_AAD_MAXLEN) ||
+ (ctx->aes_mode == DTHE_AES_CCM && cryptlen == 0) ||
+ (ctx->aes_mode == DTHE_AES_CCM && !is_zero_ctr))
+ return dthe_aead_do_fallback(req);
+
+ engine = dev_data->engine;
+ return crypto_transfer_aead_request_to_engine(engine, req);
+}
+
+static int dthe_aead_encrypt(struct aead_request *req)
+{
+ struct dthe_aes_req_ctx *rctx = aead_request_ctx(req);
+
+ rctx->enc = 1;
+ return dthe_aead_crypt(req);
+}
+
+static int dthe_aead_decrypt(struct aead_request *req)
+{
+ struct dthe_aes_req_ctx *rctx = aead_request_ctx(req);
+
+ rctx->enc = 0;
+ return dthe_aead_crypt(req);
+}
+
static struct skcipher_engine_alg cipher_algs[] = {
{
.base.init = dthe_cipher_init_tfm,
@@ -501,8 +1250,33 @@ static struct skcipher_engine_alg cipher_algs[] = {
.op.do_one_request = dthe_aes_run,
}, /* CBC AES */
{
- .base.init = dthe_cipher_xts_init_tfm,
- .base.exit = dthe_cipher_xts_exit_tfm,
+ .base.init = dthe_cipher_init_tfm_fallback,
+ .base.exit = dthe_cipher_exit_tfm,
+ .base.setkey = dthe_aes_ctr_setkey,
+ .base.encrypt = dthe_aes_encrypt,
+ .base.decrypt = dthe_aes_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE,
+ .base.max_keysize = AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_IV_SIZE,
+ .base.chunksize = AES_BLOCK_SIZE,
+ .base.base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-dthev2",
+ .cra_priority = 299,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct dthe_tfm_ctx),
+ .cra_reqsize = sizeof(struct dthe_aes_req_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .op.do_one_request = dthe_aes_run,
+ }, /* CTR AES */
+ {
+ .base.init = dthe_cipher_init_tfm_fallback,
+ .base.exit = dthe_cipher_exit_tfm,
.base.setkey = dthe_aes_xts_setkey,
.base.encrypt = dthe_aes_encrypt,
.base.decrypt = dthe_aes_decrypt,
@@ -527,12 +1301,75 @@ static struct skcipher_engine_alg cipher_algs[] = {
}, /* XTS AES */
};
+static struct aead_engine_alg aead_algs[] = {
+ {
+ .base.init = dthe_aead_init_tfm,
+ .base.exit = dthe_aead_exit_tfm,
+ .base.setkey = dthe_gcm_aes_setkey,
+ .base.setauthsize = dthe_aead_setauthsize,
+ .base.maxauthsize = AES_BLOCK_SIZE,
+ .base.encrypt = dthe_aead_encrypt,
+ .base.decrypt = dthe_aead_decrypt,
+ .base.chunksize = AES_BLOCK_SIZE,
+ .base.ivsize = GCM_AES_IV_SIZE,
+ .base.base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-dthev2",
+ .cra_priority = 299,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct dthe_tfm_ctx),
+ .cra_reqsize = sizeof(struct dthe_aes_req_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .op.do_one_request = dthe_aead_run,
+ }, /* GCM AES */
+ {
+ .base.init = dthe_aead_init_tfm,
+ .base.exit = dthe_aead_exit_tfm,
+ .base.setkey = dthe_ccm_aes_setkey,
+ .base.setauthsize = dthe_aead_setauthsize,
+ .base.maxauthsize = AES_BLOCK_SIZE,
+ .base.encrypt = dthe_aead_encrypt,
+ .base.decrypt = dthe_aead_decrypt,
+ .base.chunksize = AES_BLOCK_SIZE,
+ .base.ivsize = AES_IV_SIZE,
+ .base.base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-dthev2",
+ .cra_priority = 299,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct dthe_tfm_ctx),
+ .cra_reqsize = sizeof(struct dthe_aes_req_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .op.do_one_request = dthe_aead_run,
+ }, /* CCM AES */
+};
+
int dthe_register_aes_algs(void)
{
- return crypto_engine_register_skciphers(cipher_algs, ARRAY_SIZE(cipher_algs));
+ int ret = 0;
+
+ ret = crypto_engine_register_skciphers(cipher_algs, ARRAY_SIZE(cipher_algs));
+ if (ret)
+ return ret;
+ ret = crypto_engine_register_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+ if (ret)
+ crypto_engine_unregister_skciphers(cipher_algs, ARRAY_SIZE(cipher_algs));
+
+ return ret;
}
void dthe_unregister_aes_algs(void)
{
crypto_engine_unregister_skciphers(cipher_algs, ARRAY_SIZE(cipher_algs));
+ crypto_engine_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
}
diff --git a/drivers/crypto/ti/dthev2-common.c b/drivers/crypto/ti/dthev2-common.c
index c39d37933b9e..a2ad79bec105 100644
--- a/drivers/crypto/ti/dthev2-common.c
+++ b/drivers/crypto/ti/dthev2-common.c
@@ -48,6 +48,25 @@ struct dthe_data *dthe_get_dev(struct dthe_tfm_ctx *ctx)
return dev_data;
}
+struct scatterlist *dthe_copy_sg(struct scatterlist *dst,
+ struct scatterlist *src,
+ int buflen)
+{
+ struct scatterlist *from_sg, *to_sg;
+ int sglen;
+
+ for (to_sg = dst, from_sg = src; buflen && from_sg; buflen -= sglen) {
+ sglen = from_sg->length;
+ if (sglen > buflen)
+ sglen = buflen;
+ sg_set_buf(to_sg, sg_virt(from_sg), sglen);
+ from_sg = sg_next(from_sg);
+ to_sg = sg_next(to_sg);
+ }
+
+ return to_sg;
+}
+
static int dthe_dma_init(struct dthe_data *dev_data)
{
int ret;
diff --git a/drivers/crypto/ti/dthev2-common.h b/drivers/crypto/ti/dthev2-common.h
index c7a06a4c353f..d4a3b9c18bbc 100644
--- a/drivers/crypto/ti/dthev2-common.h
+++ b/drivers/crypto/ti/dthev2-common.h
@@ -36,7 +36,10 @@
enum dthe_aes_mode {
DTHE_AES_ECB = 0,
DTHE_AES_CBC,
+ DTHE_AES_CTR,
DTHE_AES_XTS,
+ DTHE_AES_GCM,
+ DTHE_AES_CCM,
};
/* Driver specific struct definitions */
@@ -77,25 +80,33 @@ struct dthe_list {
* struct dthe_tfm_ctx - Transform ctx struct containing ctx for all sub-components of DTHE V2
* @dev_data: Device data struct pointer
* @keylen: AES key length
+ * @authsize: Authentication size for modes with authentication
* @key: AES key
* @aes_mode: AES mode
+ * @aead_fb: Fallback crypto aead handle
* @skcipher_fb: Fallback crypto skcipher handle for AES-XTS mode
*/
struct dthe_tfm_ctx {
struct dthe_data *dev_data;
unsigned int keylen;
+ unsigned int authsize;
u32 key[DTHE_MAX_KEYSIZE / sizeof(u32)];
enum dthe_aes_mode aes_mode;
- struct crypto_sync_skcipher *skcipher_fb;
+ union {
+ struct crypto_sync_aead *aead_fb;
+ struct crypto_sync_skcipher *skcipher_fb;
+ };
};
/**
* struct dthe_aes_req_ctx - AES engine req ctx struct
* @enc: flag indicating encryption or decryption operation
+ * @padding: padding buffer for handling unaligned data
* @aes_compl: Completion variable for use in manual completion in case of DMA callback failure
*/
struct dthe_aes_req_ctx {
int enc;
+ u8 padding[2 * AES_BLOCK_SIZE];
struct completion aes_compl;
};
@@ -103,6 +114,20 @@ struct dthe_aes_req_ctx {
struct dthe_data *dthe_get_dev(struct dthe_tfm_ctx *ctx);
+/**
+ * dthe_copy_sg - Copy sg entries from src to dst
+ * @dst: Destination sg to be filled
+ * @src: Source sg to be copied from
+ * @buflen: Number of bytes to be copied
+ *
+ * Description:
+ * Copy buflen bytes of data from src to dst.
+ *
+ **/
+struct scatterlist *dthe_copy_sg(struct scatterlist *dst,
+ struct scatterlist *src,
+ int buflen);
+
int dthe_register_aes_algs(void);
void dthe_unregister_aes_algs(void);
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index e559bdadf4f9..0c2efdc83257 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -11,6 +11,7 @@
#include <linux/crypto.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/engine.h>
@@ -29,7 +30,7 @@ struct data_queue {
char name[32];
struct crypto_engine *engine;
- struct tasklet_struct done_task;
+ struct work_struct done_work;
};
struct virtio_crypto {
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 955bff8820da..ee83bf6568f0 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -70,9 +70,9 @@ int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterl
return 0;
}
-static void virtcrypto_done_task(unsigned long data)
+static void virtcrypto_done_work(struct work_struct *work)
{
- struct data_queue *data_vq = (struct data_queue *)data;
+ struct data_queue *data_vq = from_work(data_vq, work, done_work);
struct virtqueue *vq = data_vq->vq;
struct virtio_crypto_request *vc_req;
unsigned long flags;
@@ -96,7 +96,7 @@ static void virtcrypto_dataq_callback(struct virtqueue *vq)
struct virtio_crypto *vcrypto = vq->vdev->priv;
struct data_queue *dq = &vcrypto->data_vq[vq->index];
- tasklet_schedule(&dq->done_task);
+ queue_work(system_bh_wq, &dq->done_work);
}
static int virtcrypto_find_vqs(struct virtio_crypto *vi)
@@ -150,8 +150,7 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
ret = -ENOMEM;
goto err_engine;
}
- tasklet_init(&vi->data_vq[i].done_task, virtcrypto_done_task,
- (unsigned long)&vi->data_vq[i]);
+ INIT_WORK(&vi->data_vq[i].done_work, virtcrypto_done_work);
}
kfree(vqs_info);
@@ -501,7 +500,7 @@ static void virtcrypto_remove(struct virtio_device *vdev)
if (virtcrypto_dev_started(vcrypto))
virtcrypto_dev_stop(vcrypto);
for (i = 0; i < vcrypto->max_data_queues; i++)
- tasklet_kill(&vcrypto->data_vq[i].done_task);
+ cancel_work_sync(&vcrypto->data_vq[i].done_work);
virtio_reset_device(vdev);
virtcrypto_free_unused_reqs(vcrypto);
virtcrypto_clear_crypto_engines(vcrypto);