summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig7
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c1
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c41
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c172
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c98
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h16
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c27
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c155
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c114
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h19
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c13
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c61
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl.h5
-rw-r--r--drivers/crypto/aspeed/aspeed-acry.c40
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-crypto.c134
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-hash.c285
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.c9
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.h30
-rw-r--r--drivers/crypto/atmel-aes.c21
-rw-r--r--drivers/crypto/atmel-ecc.c2
-rw-r--r--drivers/crypto/atmel-sha.c28
-rw-r--r--drivers/crypto/atmel-tdes.c20
-rw-r--r--drivers/crypto/bcm/cipher.c6
-rw-r--r--drivers/crypto/caam/caamalg.c386
-rw-r--r--drivers/crypto/caam/caamhash.c47
-rw-r--r--drivers/crypto/caam/caampkc.c25
-rw-r--r--drivers/crypto/caam/caampkc.h3
-rw-r--r--drivers/crypto/caam/ctrl.c111
-rw-r--r--drivers/crypto/caam/intern.h25
-rw-r--r--drivers/crypto/caam/jr.c206
-rw-r--r--drivers/crypto/caam/qi.c1
-rw-r--r--drivers/crypto/caam/regs.h9
-rw-r--r--drivers/crypto/ccp/Makefile3
-rw-r--r--drivers/crypto/ccp/dbc.c250
-rw-r--r--drivers/crypto/ccp/dbc.h56
-rw-r--r--drivers/crypto/ccp/psp-dev.c19
-rw-r--r--drivers/crypto/ccp/psp-dev.h1
-rw-r--r--drivers/crypto/ccp/sp-dev.h7
-rw-r--r--drivers/crypto/ccp/sp-pci.c96
-rw-r--r--drivers/crypto/ccree/cc_driver.c1
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c3
-rw-r--r--drivers/crypto/chelsio/chcr_core.h1
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h1
-rw-r--r--drivers/crypto/exynos-rng.c4
-rw-r--r--drivers/crypto/gemini/sl3516-ce-cipher.c22
-rw-r--r--drivers/crypto/gemini/sl3516-ce-core.c50
-rw-r--r--drivers/crypto/gemini/sl3516-ce.h8
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c10
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c5
-rw-r--r--drivers/crypto/hisilicon/qm.c28
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c4
-rw-r--r--drivers/crypto/img-hash.c4
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-aes-core.c441
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-ecc.c73
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c243
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c23
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h4
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c3
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c28
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h7
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c28
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h7
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile5
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h14
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.c61
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_clock.c131
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_clock.h14
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h14
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dbgfs.c12
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_fw_counters.c264
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_fw_counters.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_config.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c70
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat.c336
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat.h79
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c194
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.h12
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c28
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_isr.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h23
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw.h5
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c7
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c13
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h5
-rw-r--r--drivers/crypto/n2_core.c2
-rw-r--r--drivers/crypto/omap-aes-gcm.c37
-rw-r--r--drivers/crypto/omap-aes.c266
-rw-r--r--drivers/crypto/omap-aes.h15
-rw-r--r--drivers/crypto/omap-des.c214
-rw-r--r--drivers/crypto/omap-sham.c281
-rw-r--r--drivers/crypto/qcom-rng.c10
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c59
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.h21
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c80
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c60
-rw-r--r--drivers/crypto/s5p-sss.c1
-rw-r--r--drivers/crypto/sa2ul.c3
-rw-r--r--drivers/crypto/sahara.c1
-rw-r--r--drivers/crypto/starfive/Kconfig2
-rw-r--r--drivers/crypto/starfive/Makefile2
-rw-r--r--drivers/crypto/starfive/jh7110-aes.c1024
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.c53
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.h74
-rw-r--r--drivers/crypto/starfive/jh7110-hash.c290
-rw-r--r--drivers/crypto/stm32/Kconfig2
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c370
-rw-r--r--drivers/crypto/stm32/stm32-hash.c978
-rw-r--r--drivers/crypto/talitos.c4
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c35
-rw-r--r--drivers/crypto/virtio/virtio_crypto_skcipher_algs.c25
-rw-r--r--drivers/crypto/xilinx/zynqmp-aes-gcm.c39
-rw-r--r--drivers/crypto/xilinx/zynqmp-sha.c1
118 files changed, 6357 insertions, 2484 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 44e44b8d9ce6..c761952f0dc6 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -70,10 +70,9 @@ config ZCRYPT
select HW_RANDOM
help
Select this option if you want to enable support for
- s390 cryptographic adapters like:
- + Crypto Express 2 up to 7 Coprocessor (CEXxC)
- + Crypto Express 2 up to 7 Accelerator (CEXxA)
- + Crypto Express 4 up to 7 EP11 Coprocessor (CEXxP)
+ s390 cryptographic adapters like Crypto Express 4 up
+ to 8 in Coprocessor (CEXxC), EP11 Coprocessor (CEXxP)
+ or Accelerator (CEXxA) mode.
config ZCRYPT_DEBUG
bool "Enable debug features for s390 cryptographic adapters"
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 51a3a7b5b985..3bcfcfc37084 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -14,7 +14,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <crypto/scatterwalk.h>
#include <linux/scatterlist.h>
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index c13550090785..8d4c42863a62 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -29,7 +29,7 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
struct sun8i_ce_alg_template *algt;
unsigned int todo, len;
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base);
if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG ||
sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) {
@@ -92,13 +92,18 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
int err;
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct sun8i_ce_alg_template *algt;
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
- algt->stat_fb++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun8i_ce_alg_template *algt __maybe_unused;
+
+ algt = container_of(alg, struct sun8i_ce_alg_template,
+ alg.skcipher.base);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ algt->stat_fb++;
#endif
+ }
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
@@ -133,7 +138,7 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
int ns = sg_nents_for_len(areq->src, areq->cryptlen);
int nd = sg_nents_for_len(areq->dst, areq->cryptlen);
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base);
dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
crypto_tfm_alg_name(areq->base.tfm),
@@ -294,7 +299,7 @@ theend:
return err;
}
-static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
+static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
{
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
@@ -308,10 +313,10 @@ static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
local_bh_enable();
- return 0;
}
-static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_req)
+static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
+ void *async_req)
{
struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
@@ -353,7 +358,17 @@ static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_r
}
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
+}
+
+int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
+{
+ int err = sun8i_ce_cipher_prepare(engine, areq);
+ if (err)
+ return err;
+
+ sun8i_ce_cipher_run(engine, areq);
+ sun8i_ce_cipher_unprepare(engine, areq);
return 0;
}
@@ -406,7 +421,7 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base);
op->ce = algt->ce;
op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@@ -423,10 +438,6 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
CRYPTO_MAX_ALG_NAME);
- op->enginectx.op.do_one_request = sun8i_ce_cipher_run;
- op->enginectx.op.prepare_request = sun8i_ce_cipher_prepare;
- op->enginectx.op.unprepare_request = sun8i_ce_cipher_unprepare;
-
err = pm_runtime_get_sync(op->ce->dev);
if (err < 0)
goto error_pm;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 07ea0cc82b16..d4ccd5254280 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -9,21 +9,24 @@
*
* You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
*/
+
+#include <crypto/engine.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/rng.h>
+#include <crypto/internal/skcipher.h>
#include <linux/clk.h>
-#include <linux/crypto.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-#include <crypto/internal/rng.h>
-#include <crypto/internal/skcipher.h>
#include "sun8i-ce.h"
@@ -277,7 +280,7 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ce_algo_id = CE_ID_CIPHER_AES,
.ce_blockmode = CE_ID_OP_CBC,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-sun8i-ce",
@@ -298,13 +301,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.setkey = sun8i_ce_aes_setkey,
.encrypt = sun8i_ce_skencrypt,
.decrypt = sun8i_ce_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = sun8i_ce_cipher_do_one,
+ },
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ce_algo_id = CE_ID_CIPHER_AES,
.ce_blockmode = CE_ID_OP_ECB,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-sun8i-ce",
@@ -324,13 +330,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.setkey = sun8i_ce_aes_setkey,
.encrypt = sun8i_ce_skencrypt,
.decrypt = sun8i_ce_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = sun8i_ce_cipher_do_one,
+ },
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ce_algo_id = CE_ID_CIPHER_DES3,
.ce_blockmode = CE_ID_OP_CBC,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-des3-sun8i-ce",
@@ -351,13 +360,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.setkey = sun8i_ce_des3_setkey,
.encrypt = sun8i_ce_skencrypt,
.decrypt = sun8i_ce_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = sun8i_ce_cipher_do_one,
+ },
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ce_algo_id = CE_ID_CIPHER_DES3,
.ce_blockmode = CE_ID_OP_ECB,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3-sun8i-ce",
@@ -377,12 +389,15 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.setkey = sun8i_ce_des3_setkey,
.encrypt = sun8i_ce_skencrypt,
.decrypt = sun8i_ce_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = sun8i_ce_cipher_do_one,
+ },
},
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_HASH
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ce_algo_id = CE_ID_HASH_MD5,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ce_hash_init,
.update = sun8i_ce_hash_update,
.final = sun8i_ce_hash_final,
@@ -390,6 +405,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.digest = sun8i_ce_hash_digest,
.export = sun8i_ce_hash_export,
.import = sun8i_ce_hash_import,
+ .init_tfm = sun8i_ce_hash_init_tfm,
+ .exit_tfm = sun8i_ce_hash_exit_tfm,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = sizeof(struct md5_state),
@@ -404,15 +421,17 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ce_hash_crainit,
- .cra_exit = sun8i_ce_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ce_hash_run,
+ },
+
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ce_algo_id = CE_ID_HASH_SHA1,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ce_hash_init,
.update = sun8i_ce_hash_update,
.final = sun8i_ce_hash_final,
@@ -420,6 +439,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.digest = sun8i_ce_hash_digest,
.export = sun8i_ce_hash_export,
.import = sun8i_ce_hash_import,
+ .init_tfm = sun8i_ce_hash_init_tfm,
+ .exit_tfm = sun8i_ce_hash_exit_tfm,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct sha1_state),
@@ -434,15 +455,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ce_hash_crainit,
- .cra_exit = sun8i_ce_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ce_hash_run,
+ },
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ce_algo_id = CE_ID_HASH_SHA224,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ce_hash_init,
.update = sun8i_ce_hash_update,
.final = sun8i_ce_hash_final,
@@ -450,6 +472,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.digest = sun8i_ce_hash_digest,
.export = sun8i_ce_hash_export,
.import = sun8i_ce_hash_import,
+ .init_tfm = sun8i_ce_hash_init_tfm,
+ .exit_tfm = sun8i_ce_hash_exit_tfm,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
@@ -464,15 +488,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ce_hash_crainit,
- .cra_exit = sun8i_ce_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ce_hash_run,
+ },
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ce_algo_id = CE_ID_HASH_SHA256,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ce_hash_init,
.update = sun8i_ce_hash_update,
.final = sun8i_ce_hash_final,
@@ -480,6 +505,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.digest = sun8i_ce_hash_digest,
.export = sun8i_ce_hash_export,
.import = sun8i_ce_hash_import,
+ .init_tfm = sun8i_ce_hash_init_tfm,
+ .exit_tfm = sun8i_ce_hash_exit_tfm,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
@@ -494,15 +521,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ce_hash_crainit,
- .cra_exit = sun8i_ce_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ce_hash_run,
+ },
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ce_algo_id = CE_ID_HASH_SHA384,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ce_hash_init,
.update = sun8i_ce_hash_update,
.final = sun8i_ce_hash_final,
@@ -510,6 +538,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.digest = sun8i_ce_hash_digest,
.export = sun8i_ce_hash_export,
.import = sun8i_ce_hash_import,
+ .init_tfm = sun8i_ce_hash_init_tfm,
+ .exit_tfm = sun8i_ce_hash_exit_tfm,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = sizeof(struct sha512_state),
@@ -524,15 +554,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ce_hash_crainit,
- .cra_exit = sun8i_ce_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ce_hash_run,
+ },
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ce_algo_id = CE_ID_HASH_SHA512,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ce_hash_init,
.update = sun8i_ce_hash_update,
.final = sun8i_ce_hash_final,
@@ -540,6 +571,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.digest = sun8i_ce_hash_digest,
.export = sun8i_ce_hash_export,
.import = sun8i_ce_hash_import,
+ .init_tfm = sun8i_ce_hash_init_tfm,
+ .exit_tfm = sun8i_ce_hash_exit_tfm,
.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = sizeof(struct sha512_state),
@@ -554,11 +587,12 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ce_hash_crainit,
- .cra_exit = sun8i_ce_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ce_hash_run,
+ },
},
#endif
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG
@@ -582,14 +616,18 @@ static struct sun8i_ce_alg_template ce_algs[] = {
#endif
};
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
{
- struct sun8i_ce_dev *ce = seq->private;
+ struct sun8i_ce_dev *ce __maybe_unused = seq->private;
unsigned int i;
for (i = 0; i < MAXFLOW; i++)
- seq_printf(seq, "Channel %d: nreq %lu\n", i, ce->chanlist[i].stat_req);
+ seq_printf(seq, "Channel %d: nreq %lu\n", i,
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ ce->chanlist[i].stat_req);
+#else
+ 0ul);
+#endif
for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
if (!ce_algs[i].ce)
@@ -597,8 +635,8 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
switch (ce_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
- ce_algs[i].alg.skcipher.base.cra_driver_name,
- ce_algs[i].alg.skcipher.base.cra_name,
+ ce_algs[i].alg.skcipher.base.base.cra_driver_name,
+ ce_algs[i].alg.skcipher.base.base.cra_name,
ce_algs[i].stat_req, ce_algs[i].stat_fb);
seq_printf(seq, "\tLast fallback is: %s\n",
ce_algs[i].fbname);
@@ -621,8 +659,8 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
break;
case CRYPTO_ALG_TYPE_AHASH:
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
- ce_algs[i].alg.hash.halg.base.cra_driver_name,
- ce_algs[i].alg.hash.halg.base.cra_name,
+ ce_algs[i].alg.hash.base.halg.base.cra_driver_name,
+ ce_algs[i].alg.hash.base.halg.base.cra_name,
ce_algs[i].stat_req, ce_algs[i].stat_fb);
seq_printf(seq, "\tLast fallback is: %s\n",
ce_algs[i].fbname);
@@ -643,7 +681,8 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
break;
}
}
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG
+#if defined(CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG) && \
+ defined(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)
seq_printf(seq, "HWRNG %lu %lu\n",
ce->hwrng_stat_req, ce->hwrng_stat_bytes);
#endif
@@ -651,7 +690,6 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(sun8i_ce_debugfs);
-#endif
static void sun8i_ce_free_chanlist(struct sun8i_ce_dev *ce, int i)
{
@@ -839,7 +877,7 @@ static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
if (ce_method == CE_ID_NOTSUPP) {
dev_dbg(ce->dev,
"DEBUG: Algo of %s not supported\n",
- ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].alg.skcipher.base.base.cra_name);
ce_algs[i].ce = NULL;
break;
}
@@ -847,16 +885,16 @@ static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
ce_method = ce->variant->op_mode[id];
if (ce_method == CE_ID_NOTSUPP) {
dev_dbg(ce->dev, "DEBUG: Blockmode of %s not supported\n",
- ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].alg.skcipher.base.base.cra_name);
ce_algs[i].ce = NULL;
break;
}
dev_info(ce->dev, "Register %s\n",
- ce_algs[i].alg.skcipher.base.cra_name);
- err = crypto_register_skcipher(&ce_algs[i].alg.skcipher);
+ ce_algs[i].alg.skcipher.base.base.cra_name);
+ err = crypto_engine_register_skcipher(&ce_algs[i].alg.skcipher);
if (err) {
dev_err(ce->dev, "ERROR: Fail to register %s\n",
- ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].alg.skcipher.base.base.cra_name);
ce_algs[i].ce = NULL;
return err;
}
@@ -867,16 +905,16 @@ static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
if (ce_method == CE_ID_NOTSUPP) {
dev_info(ce->dev,
"DEBUG: Algo of %s not supported\n",
- ce_algs[i].alg.hash.halg.base.cra_name);
+ ce_algs[i].alg.hash.base.halg.base.cra_name);
ce_algs[i].ce = NULL;
break;
}
dev_info(ce->dev, "Register %s\n",
- ce_algs[i].alg.hash.halg.base.cra_name);
- err = crypto_register_ahash(&ce_algs[i].alg.hash);
+ ce_algs[i].alg.hash.base.halg.base.cra_name);
+ err = crypto_engine_register_ahash(&ce_algs[i].alg.hash);
if (err) {
dev_err(ce->dev, "ERROR: Fail to register %s\n",
- ce_algs[i].alg.hash.halg.base.cra_name);
+ ce_algs[i].alg.hash.base.halg.base.cra_name);
ce_algs[i].ce = NULL;
return err;
}
@@ -916,13 +954,13 @@ static void sun8i_ce_unregister_algs(struct sun8i_ce_dev *ce)
switch (ce_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
dev_info(ce->dev, "Unregister %d %s\n", i,
- ce_algs[i].alg.skcipher.base.cra_name);
- crypto_unregister_skcipher(&ce_algs[i].alg.skcipher);
+ ce_algs[i].alg.skcipher.base.base.cra_name);
+ crypto_engine_unregister_skcipher(&ce_algs[i].alg.skcipher);
break;
case CRYPTO_ALG_TYPE_AHASH:
dev_info(ce->dev, "Unregister %d %s\n", i,
- ce_algs[i].alg.hash.halg.base.cra_name);
- crypto_unregister_ahash(&ce_algs[i].alg.hash);
+ ce_algs[i].alg.hash.base.halg.base.cra_name);
+ crypto_engine_unregister_ahash(&ce_algs[i].alg.hash);
break;
case CRYPTO_ALG_TYPE_RNG:
dev_info(ce->dev, "Unregister %d %s\n", i,
@@ -1007,13 +1045,21 @@ static int sun8i_ce_probe(struct platform_device *pdev)
pm_runtime_put_sync(ce->dev);
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
+ struct dentry *dbgfs_dir __maybe_unused;
+ struct dentry *dbgfs_stats __maybe_unused;
+
+ /* Ignore error of debugfs */
+ dbgfs_dir = debugfs_create_dir("sun8i-ce", NULL);
+ dbgfs_stats = debugfs_create_file("stats", 0444,
+ dbgfs_dir, ce,
+ &sun8i_ce_debugfs_fops);
+
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- /* Ignore error of debugfs */
- ce->dbgfs_dir = debugfs_create_dir("sun8i-ce", NULL);
- ce->dbgfs_stats = debugfs_create_file("stats", 0444,
- ce->dbgfs_dir, ce,
- &sun8i_ce_debugfs_fops);
+ ce->dbgfs_dir = dbgfs_dir;
+ ce->dbgfs_stats = dbgfs_stats;
#endif
+ }
return 0;
error_alg:
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index 930ad157004c..d358334e5981 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -9,48 +9,46 @@
*
* You could find the datasheet in Documentation/arch/arm/sunxi.rst
*/
+
+#include <crypto/internal/hash.h>
+#include <crypto/md5.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
#include <linux/bottom_half.h>
#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
-#include <crypto/internal/hash.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/md5.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include "sun8i-ce.h"
-int sun8i_ce_hash_crainit(struct crypto_tfm *tfm)
+int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm)
{
- struct sun8i_ce_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+ struct sun8i_ce_hash_tfm_ctx *op = crypto_ahash_ctx(tfm);
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct sun8i_ce_alg_template *algt;
int err;
- memset(op, 0, sizeof(struct sun8i_ce_hash_tfm_ctx));
-
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
op->ce = algt->ce;
- op->enginectx.op.do_one_request = sun8i_ce_hash_run;
- op->enginectx.op.prepare_request = NULL;
- op->enginectx.op.unprepare_request = NULL;
-
/* FALLBACK */
- op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
+ op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback_tfm)) {
dev_err(algt->ce->dev, "Fallback driver could no be loaded\n");
return PTR_ERR(op->fallback_tfm);
}
- if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
- algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
+ crypto_ahash_set_statesize(tfm,
+ crypto_ahash_statesize(op->fallback_tfm));
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ crypto_ahash_set_reqsize(tfm,
sizeof(struct sun8i_ce_hash_reqctx) +
crypto_ahash_reqsize(op->fallback_tfm));
- memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base),
+ memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm),
CRYPTO_MAX_ALG_NAME);
err = pm_runtime_get_sync(op->ce->dev);
@@ -63,9 +61,9 @@ error_pm:
return err;
}
-void sun8i_ce_hash_craexit(struct crypto_tfm *tfm)
+void sun8i_ce_hash_exit_tfm(struct crypto_ahash *tfm)
{
- struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
+ struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
crypto_free_ahash(tfmctx->fallback_tfm);
pm_runtime_put_sync_suspend(tfmctx->ce->dev);
@@ -114,20 +112,22 @@ int sun8i_ce_hash_final(struct ahash_request *areq)
struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ce_alg_template *algt;
-#endif
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
rctx->fallback_req.base.flags = areq->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP;
rctx->fallback_req.result = areq->result;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
+ struct sun8i_ce_alg_template *algt __maybe_unused;
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+
+ algt = container_of(alg, struct sun8i_ce_alg_template,
+ alg.hash.base);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
- algt->stat_fb++;
+ algt->stat_fb++;
#endif
+ }
return crypto_ahash_final(&rctx->fallback_req);
}
@@ -152,10 +152,6 @@ int sun8i_ce_hash_finup(struct ahash_request *areq)
struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ce_alg_template *algt;
-#endif
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
rctx->fallback_req.base.flags = areq->base.flags &
@@ -164,10 +160,17 @@ int sun8i_ce_hash_finup(struct ahash_request *areq)
rctx->fallback_req.nbytes = areq->nbytes;
rctx->fallback_req.src = areq->src;
rctx->fallback_req.result = areq->result;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
+ struct sun8i_ce_alg_template *algt __maybe_unused;
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+
+ algt = container_of(alg, struct sun8i_ce_alg_template,
+ alg.hash.base);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
- algt->stat_fb++;
+ algt->stat_fb++;
#endif
+ }
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -177,10 +180,6 @@ static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ce_alg_template *algt;
-#endif
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
rctx->fallback_req.base.flags = areq->base.flags &
@@ -189,10 +188,17 @@ static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
rctx->fallback_req.nbytes = areq->nbytes;
rctx->fallback_req.src = areq->src;
rctx->fallback_req.result = areq->result;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
+ struct sun8i_ce_alg_template *algt __maybe_unused;
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+
+ algt = container_of(alg, struct sun8i_ce_alg_template,
+ alg.hash.base);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
- algt->stat_fb++;
+ algt->stat_fb++;
#endif
+ }
return crypto_ahash_digest(&rctx->fallback_req);
}
@@ -204,7 +210,7 @@ static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
struct sun8i_ce_alg_template *algt;
struct scatterlist *sg;
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
if (areq->nbytes == 0) {
algt->stat_fb_len0++;
@@ -253,7 +259,7 @@ int sun8i_ce_hash_digest(struct ahash_request *areq)
return sun8i_ce_hash_digest_fb(areq);
}
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
ce = algt->ce;
e = sun8i_ce_get_engine_number(ce);
@@ -345,11 +351,11 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
dma_addr_t addr_res, addr_pad;
int ns = sg_nents_for_len(areq->src, areq->nbytes);
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
ce = algt->ce;
- bs = algt->alg.hash.halg.base.cra_blocksize;
- digestsize = algt->alg.hash.halg.digestsize;
+ bs = algt->alg.hash.base.halg.base.cra_blocksize;
+ digestsize = algt->alg.hash.base.halg.digestsize;
if (digestsize == SHA224_DIGEST_SIZE)
digestsize = SHA256_DIGEST_SIZE;
if (digestsize == SHA384_DIGEST_SIZE)
@@ -454,14 +460,14 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
chan->timeout = areq->nbytes;
- err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
+ err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm));
dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
- memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
+ memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize);
theend:
kfree(buf);
kfree(result);
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 27029fb77e29..93d4985def87 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -265,14 +265,12 @@ struct sun8i_cipher_req_ctx {
/*
* struct sun8i_cipher_tfm_ctx - context for a skcipher TFM
- * @enginectx: crypto_engine used by this TFM
* @key: pointer to key data
* @keylen: len of the key
* @ce: pointer to the private data of driver handling this TFM
* @fallback_tfm: pointer to the fallback TFM
*/
struct sun8i_cipher_tfm_ctx {
- struct crypto_engine_ctx enginectx;
u32 *key;
u32 keylen;
struct sun8i_ce_dev *ce;
@@ -281,12 +279,10 @@ struct sun8i_cipher_tfm_ctx {
/*
* struct sun8i_ce_hash_tfm_ctx - context for an ahash TFM
- * @enginectx: crypto_engine used by this TFM
* @ce: pointer to the private data of driver handling this TFM
* @fallback_tfm: pointer to the fallback TFM
*/
struct sun8i_ce_hash_tfm_ctx {
- struct crypto_engine_ctx enginectx;
struct sun8i_ce_dev *ce;
struct crypto_ahash *fallback_tfm;
};
@@ -329,8 +325,8 @@ struct sun8i_ce_alg_template {
u32 ce_blockmode;
struct sun8i_ce_dev *ce;
union {
- struct skcipher_alg skcipher;
- struct ahash_alg hash;
+ struct skcipher_engine_alg skcipher;
+ struct ahash_engine_alg hash;
struct rng_alg rng;
} alg;
unsigned long stat_req;
@@ -347,14 +343,13 @@ struct sun8i_ce_alg_template {
char fbname[CRYPTO_MAX_ALG_NAME];
};
-int sun8i_ce_enqueue(struct crypto_async_request *areq, u32 type);
-
int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int sun8i_ce_cipher_init(struct crypto_tfm *tfm);
void sun8i_ce_cipher_exit(struct crypto_tfm *tfm);
+int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq);
int sun8i_ce_skdecrypt(struct skcipher_request *areq);
int sun8i_ce_skencrypt(struct skcipher_request *areq);
@@ -362,12 +357,11 @@ int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce);
int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name);
-int sun8i_ce_hash_crainit(struct crypto_tfm *tfm);
-void sun8i_ce_hash_craexit(struct crypto_tfm *tfm);
+int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm);
+void sun8i_ce_hash_exit_tfm(struct crypto_ahash *tfm);
int sun8i_ce_hash_init(struct ahash_request *areq);
int sun8i_ce_hash_export(struct ahash_request *areq, void *out);
int sun8i_ce_hash_import(struct ahash_request *areq, const void *in);
-int sun8i_ce_hash(struct ahash_request *areq);
int sun8i_ce_hash_final(struct ahash_request *areq);
int sun8i_ce_hash_update(struct ahash_request *areq);
int sun8i_ce_hash_finup(struct ahash_request *areq);
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 381a90fbeaff..7fa359725ec7 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -24,7 +24,7 @@ static bool sun8i_ss_need_fallback(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct sun8i_ss_alg_template *algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
+ struct sun8i_ss_alg_template *algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base);
struct scatterlist *in_sg = areq->src;
struct scatterlist *out_sg = areq->dst;
struct scatterlist *sg;
@@ -93,13 +93,18 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq)
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
int err;
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct sun8i_ss_alg_template *algt;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun8i_ss_alg_template *algt __maybe_unused;
+
+ algt = container_of(alg, struct sun8i_ss_alg_template,
+ alg.skcipher.base);
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
- algt->stat_fb++;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ algt->stat_fb++;
#endif
+ }
+
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
areq->base.complete, areq->base.data);
@@ -193,7 +198,7 @@ static int sun8i_ss_cipher(struct skcipher_request *areq)
int nsgd = sg_nents_for_len(areq->dst, areq->cryptlen);
int i;
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base);
dev_dbg(ss->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
crypto_tfm_alg_name(areq->base.tfm),
@@ -324,7 +329,7 @@ theend:
return err;
}
-static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq)
+int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq)
{
int err;
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
@@ -390,7 +395,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base);
op->ss = algt->ss;
op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@@ -408,10 +413,6 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
CRYPTO_MAX_ALG_NAME);
- op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request;
- op->enginectx.op.prepare_request = NULL;
- op->enginectx.op.unprepare_request = NULL;
-
err = pm_runtime_resume_and_get(op->ss->dev);
if (err < 0) {
dev_err(op->ss->dev, "pm error %d\n", err);
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 3dd844b40ff7..4a9587285c04 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -9,22 +9,23 @@
*
* You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
*/
+
+#include <crypto/engine.h>
+#include <crypto/internal/rng.h>
+#include <crypto/internal/skcipher.h>
#include <linux/clk.h>
-#include <linux/crypto.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-#include <crypto/internal/rng.h>
-#include <crypto/internal/skcipher.h>
#include "sun8i-ss.h"
@@ -168,7 +169,7 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ss_algo_id = SS_ID_CIPHER_AES,
.ss_blockmode = SS_ID_OP_CBC,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-sun8i-ss",
@@ -189,13 +190,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.setkey = sun8i_ss_aes_setkey,
.encrypt = sun8i_ss_skencrypt,
.decrypt = sun8i_ss_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = sun8i_ss_handle_cipher_request,
+ },
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ss_algo_id = SS_ID_CIPHER_AES,
.ss_blockmode = SS_ID_OP_ECB,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-sun8i-ss",
@@ -215,13 +219,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.setkey = sun8i_ss_aes_setkey,
.encrypt = sun8i_ss_skencrypt,
.decrypt = sun8i_ss_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = sun8i_ss_handle_cipher_request,
+ },
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ss_algo_id = SS_ID_CIPHER_DES3,
.ss_blockmode = SS_ID_OP_CBC,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-des3-sun8i-ss",
@@ -242,13 +249,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.setkey = sun8i_ss_des3_setkey,
.encrypt = sun8i_ss_skencrypt,
.decrypt = sun8i_ss_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = sun8i_ss_handle_cipher_request,
+ },
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ss_algo_id = SS_ID_CIPHER_DES3,
.ss_blockmode = SS_ID_OP_ECB,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3-sun8i-ss",
@@ -268,7 +278,10 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.setkey = sun8i_ss_des3_setkey,
.encrypt = sun8i_ss_skencrypt,
.decrypt = sun8i_ss_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = sun8i_ss_handle_cipher_request,
+ },
},
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG
{
@@ -292,7 +305,7 @@ static struct sun8i_ss_alg_template ss_algs[] = {
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_HASH
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ss_algo_id = SS_ID_HASH_MD5,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ss_hash_init,
.update = sun8i_ss_hash_update,
.final = sun8i_ss_hash_final,
@@ -300,6 +313,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.digest = sun8i_ss_hash_digest,
.export = sun8i_ss_hash_export,
.import = sun8i_ss_hash_import,
+ .init_tfm = sun8i_ss_hash_init_tfm,
+ .exit_tfm = sun8i_ss_hash_exit_tfm,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = sizeof(struct md5_state),
@@ -314,15 +329,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ss_hash_crainit,
- .cra_exit = sun8i_ss_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ss_hash_run,
+ },
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ss_algo_id = SS_ID_HASH_SHA1,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ss_hash_init,
.update = sun8i_ss_hash_update,
.final = sun8i_ss_hash_final,
@@ -330,6 +346,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.digest = sun8i_ss_hash_digest,
.export = sun8i_ss_hash_export,
.import = sun8i_ss_hash_import,
+ .init_tfm = sun8i_ss_hash_init_tfm,
+ .exit_tfm = sun8i_ss_hash_exit_tfm,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct sha1_state),
@@ -344,15 +362,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ss_hash_crainit,
- .cra_exit = sun8i_ss_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ss_hash_run,
+ },
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ss_algo_id = SS_ID_HASH_SHA224,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ss_hash_init,
.update = sun8i_ss_hash_update,
.final = sun8i_ss_hash_final,
@@ -360,6 +379,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.digest = sun8i_ss_hash_digest,
.export = sun8i_ss_hash_export,
.import = sun8i_ss_hash_import,
+ .init_tfm = sun8i_ss_hash_init_tfm,
+ .exit_tfm = sun8i_ss_hash_exit_tfm,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
@@ -374,15 +395,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ss_hash_crainit,
- .cra_exit = sun8i_ss_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ss_hash_run,
+ },
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ss_algo_id = SS_ID_HASH_SHA256,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ss_hash_init,
.update = sun8i_ss_hash_update,
.final = sun8i_ss_hash_final,
@@ -390,6 +412,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.digest = sun8i_ss_hash_digest,
.export = sun8i_ss_hash_export,
.import = sun8i_ss_hash_import,
+ .init_tfm = sun8i_ss_hash_init_tfm,
+ .exit_tfm = sun8i_ss_hash_exit_tfm,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
@@ -404,15 +428,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ss_hash_crainit,
- .cra_exit = sun8i_ss_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ss_hash_run,
+ },
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ss_algo_id = SS_ID_HASH_SHA1,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ss_hash_init,
.update = sun8i_ss_hash_update,
.final = sun8i_ss_hash_final,
@@ -420,6 +445,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.digest = sun8i_ss_hash_digest,
.export = sun8i_ss_hash_export,
.import = sun8i_ss_hash_import,
+ .init_tfm = sun8i_ss_hash_init_tfm,
+ .exit_tfm = sun8i_ss_hash_exit_tfm,
.setkey = sun8i_ss_hmac_setkey,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
@@ -435,23 +462,28 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ss_hash_crainit,
- .cra_exit = sun8i_ss_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ss_hash_run,
+ },
},
#endif
};
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
{
- struct sun8i_ss_dev *ss = seq->private;
+ struct sun8i_ss_dev *ss __maybe_unused = seq->private;
unsigned int i;
for (i = 0; i < MAXFLOW; i++)
- seq_printf(seq, "Channel %d: nreq %lu\n", i, ss->flows[i].stat_req);
+ seq_printf(seq, "Channel %d: nreq %lu\n", i,
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ ss->flows[i].stat_req);
+#else
+ 0ul);
+#endif
for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
if (!ss_algs[i].ss)
@@ -459,8 +491,8 @@ static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
switch (ss_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
- ss_algs[i].alg.skcipher.base.cra_driver_name,
- ss_algs[i].alg.skcipher.base.cra_name,
+ ss_algs[i].alg.skcipher.base.base.cra_driver_name,
+ ss_algs[i].alg.skcipher.base.base.cra_name,
ss_algs[i].stat_req, ss_algs[i].stat_fb);
seq_printf(seq, "\tLast fallback is: %s\n",
@@ -482,8 +514,8 @@ static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
break;
case CRYPTO_ALG_TYPE_AHASH:
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
- ss_algs[i].alg.hash.halg.base.cra_driver_name,
- ss_algs[i].alg.hash.halg.base.cra_name,
+ ss_algs[i].alg.hash.base.halg.base.cra_driver_name,
+ ss_algs[i].alg.hash.base.halg.base.cra_name,
ss_algs[i].stat_req, ss_algs[i].stat_fb);
seq_printf(seq, "\tLast fallback is: %s\n",
ss_algs[i].fbname);
@@ -502,7 +534,6 @@ static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(sun8i_ss_debugfs);
-#endif
static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i)
{
@@ -659,7 +690,7 @@ static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss)
if (ss_method == SS_ID_NOTSUPP) {
dev_info(ss->dev,
"DEBUG: Algo of %s not supported\n",
- ss_algs[i].alg.skcipher.base.cra_name);
+ ss_algs[i].alg.skcipher.base.base.cra_name);
ss_algs[i].ss = NULL;
break;
}
@@ -667,16 +698,16 @@ static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss)
ss_method = ss->variant->op_mode[id];
if (ss_method == SS_ID_NOTSUPP) {
dev_info(ss->dev, "DEBUG: Blockmode of %s not supported\n",
- ss_algs[i].alg.skcipher.base.cra_name);
+ ss_algs[i].alg.skcipher.base.base.cra_name);
ss_algs[i].ss = NULL;
break;
}
dev_info(ss->dev, "DEBUG: Register %s\n",
- ss_algs[i].alg.skcipher.base.cra_name);
- err = crypto_register_skcipher(&ss_algs[i].alg.skcipher);
+ ss_algs[i].alg.skcipher.base.base.cra_name);
+ err = crypto_engine_register_skcipher(&ss_algs[i].alg.skcipher);
if (err) {
dev_err(ss->dev, "Fail to register %s\n",
- ss_algs[i].alg.skcipher.base.cra_name);
+ ss_algs[i].alg.skcipher.base.base.cra_name);
ss_algs[i].ss = NULL;
return err;
}
@@ -695,16 +726,16 @@ static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss)
if (ss_method == SS_ID_NOTSUPP) {
dev_info(ss->dev,
"DEBUG: Algo of %s not supported\n",
- ss_algs[i].alg.hash.halg.base.cra_name);
+ ss_algs[i].alg.hash.base.halg.base.cra_name);
ss_algs[i].ss = NULL;
break;
}
dev_info(ss->dev, "Register %s\n",
- ss_algs[i].alg.hash.halg.base.cra_name);
- err = crypto_register_ahash(&ss_algs[i].alg.hash);
+ ss_algs[i].alg.hash.base.halg.base.cra_name);
+ err = crypto_engine_register_ahash(&ss_algs[i].alg.hash);
if (err) {
dev_err(ss->dev, "ERROR: Fail to register %s\n",
- ss_algs[i].alg.hash.halg.base.cra_name);
+ ss_algs[i].alg.hash.base.halg.base.cra_name);
ss_algs[i].ss = NULL;
return err;
}
@@ -727,8 +758,8 @@ static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss)
switch (ss_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
dev_info(ss->dev, "Unregister %d %s\n", i,
- ss_algs[i].alg.skcipher.base.cra_name);
- crypto_unregister_skcipher(&ss_algs[i].alg.skcipher);
+ ss_algs[i].alg.skcipher.base.base.cra_name);
+ crypto_engine_unregister_skcipher(&ss_algs[i].alg.skcipher);
break;
case CRYPTO_ALG_TYPE_RNG:
dev_info(ss->dev, "Unregister %d %s\n", i,
@@ -737,8 +768,8 @@ static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss)
break;
case CRYPTO_ALG_TYPE_AHASH:
dev_info(ss->dev, "Unregister %d %s\n", i,
- ss_algs[i].alg.hash.halg.base.cra_name);
- crypto_unregister_ahash(&ss_algs[i].alg.hash);
+ ss_algs[i].alg.hash.base.halg.base.cra_name);
+ crypto_engine_unregister_ahash(&ss_algs[i].alg.hash);
break;
}
}
@@ -851,13 +882,21 @@ static int sun8i_ss_probe(struct platform_device *pdev)
pm_runtime_put_sync(ss->dev);
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
+ struct dentry *dbgfs_dir __maybe_unused;
+ struct dentry *dbgfs_stats __maybe_unused;
+
+ /* Ignore error of debugfs */
+ dbgfs_dir = debugfs_create_dir("sun8i-ss", NULL);
+ dbgfs_stats = debugfs_create_file("stats", 0444,
+ dbgfs_dir, ss,
+ &sun8i_ss_debugfs_fops);
+
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
- /* Ignore error of debugfs */
- ss->dbgfs_dir = debugfs_create_dir("sun8i-ss", NULL);
- ss->dbgfs_stats = debugfs_create_file("stats", 0444,
- ss->dbgfs_dir, ss,
- &sun8i_ss_debugfs_fops);
+ ss->dbgfs_dir = dbgfs_dir;
+ ss->dbgfs_stats = dbgfs_stats;
#endif
+ }
return 0;
error_alg:
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index a4b67d130d11..d70b105dcfa1 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -9,16 +9,21 @@
*
* You could find the datasheet in Documentation/arch/arm/sunxi.rst
*/
-#include <linux/bottom_half.h>
-#include <linux/dma-mapping.h>
-#include <linux/pm_runtime.h>
-#include <linux/scatterlist.h>
-#include <crypto/internal/hash.h>
+
#include <crypto/hmac.h>
+#include <crypto/internal/hash.h>
+#include <crypto/md5.h>
#include <crypto/scatterwalk.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
-#include <crypto/md5.h>
+#include <linux/bottom_half.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include "sun8i-ss.h"
static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key,
@@ -60,14 +65,11 @@ int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
unsigned int keylen)
{
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(ahash);
- struct ahash_alg *alg = __crypto_ahash_alg(ahash->base.__crt_alg);
- struct sun8i_ss_alg_template *algt;
int digestsize, i;
int bs = crypto_ahash_blocksize(ahash);
int ret;
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
- digestsize = algt->alg.hash.halg.digestsize;
+ digestsize = crypto_ahash_digestsize(ahash);
if (keylen > bs) {
ret = sun8i_ss_hashkey(tfmctx, key, keylen);
@@ -107,38 +109,33 @@ err_opad:
return ret;
}
-int sun8i_ss_hash_crainit(struct crypto_tfm *tfm)
+int sun8i_ss_hash_init_tfm(struct crypto_ahash *tfm)
{
- struct sun8i_ss_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+ struct sun8i_ss_hash_tfm_ctx *op = crypto_ahash_ctx(tfm);
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct sun8i_ss_alg_template *algt;
int err;
- memset(op, 0, sizeof(struct sun8i_ss_hash_tfm_ctx));
-
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
op->ss = algt->ss;
- op->enginectx.op.do_one_request = sun8i_ss_hash_run;
- op->enginectx.op.prepare_request = NULL;
- op->enginectx.op.unprepare_request = NULL;
-
/* FALLBACK */
- op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
+ op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback_tfm)) {
dev_err(algt->ss->dev, "Fallback driver could no be loaded\n");
return PTR_ERR(op->fallback_tfm);
}
- if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
- algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
+ crypto_ahash_set_statesize(tfm,
+ crypto_ahash_statesize(op->fallback_tfm));
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ crypto_ahash_set_reqsize(tfm,
sizeof(struct sun8i_ss_hash_reqctx) +
crypto_ahash_reqsize(op->fallback_tfm));
- memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base), CRYPTO_MAX_ALG_NAME);
+ memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm),
+ CRYPTO_MAX_ALG_NAME);
err = pm_runtime_get_sync(op->ss->dev);
if (err < 0)
@@ -150,9 +147,9 @@ error_pm:
return err;
}
-void sun8i_ss_hash_craexit(struct crypto_tfm *tfm)
+void sun8i_ss_hash_exit_tfm(struct crypto_ahash *tfm)
{
- struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
kfree_sensitive(tfmctx->ipad);
kfree_sensitive(tfmctx->opad);
@@ -204,20 +201,23 @@ int sun8i_ss_hash_final(struct ahash_request *areq)
struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ss_alg_template *algt;
-#endif
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
rctx->fallback_req.base.flags = areq->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP;
rctx->fallback_req.result = areq->result;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ struct sun8i_ss_alg_template *algt __maybe_unused;
+
+ algt = container_of(alg, struct sun8i_ss_alg_template,
+ alg.hash.base);
+
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
- algt->stat_fb++;
+ algt->stat_fb++;
#endif
+ }
return crypto_ahash_final(&rctx->fallback_req);
}
@@ -242,10 +242,6 @@ int sun8i_ss_hash_finup(struct ahash_request *areq)
struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ss_alg_template *algt;
-#endif
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
rctx->fallback_req.base.flags = areq->base.flags &
@@ -254,10 +250,18 @@ int sun8i_ss_hash_finup(struct ahash_request *areq)
rctx->fallback_req.nbytes = areq->nbytes;
rctx->fallback_req.src = areq->src;
rctx->fallback_req.result = areq->result;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ struct sun8i_ss_alg_template *algt __maybe_unused;
+
+ algt = container_of(alg, struct sun8i_ss_alg_template,
+ alg.hash.base);
+
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
- algt->stat_fb++;
+ algt->stat_fb++;
#endif
+ }
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -267,10 +271,6 @@ static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ss_alg_template *algt;
-#endif
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
rctx->fallback_req.base.flags = areq->base.flags &
@@ -279,10 +279,18 @@ static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
rctx->fallback_req.nbytes = areq->nbytes;
rctx->fallback_req.src = areq->src;
rctx->fallback_req.result = areq->result;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ struct sun8i_ss_alg_template *algt __maybe_unused;
+
+ algt = container_of(alg, struct sun8i_ss_alg_template,
+ alg.hash.base);
+
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
- algt->stat_fb++;
+ algt->stat_fb++;
#endif
+ }
return crypto_ahash_digest(&rctx->fallback_req);
}
@@ -349,11 +357,11 @@ static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss,
static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct sun8i_ss_alg_template *algt;
struct scatterlist *sg;
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
if (areq->nbytes == 0) {
algt->stat_fb_len++;
@@ -398,8 +406,8 @@ static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
int sun8i_ss_hash_digest(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct sun8i_ss_alg_template *algt;
struct sun8i_ss_dev *ss;
struct crypto_engine *engine;
@@ -408,7 +416,7 @@ int sun8i_ss_hash_digest(struct ahash_request *areq)
if (sun8i_ss_hash_need_fallback(areq))
return sun8i_ss_hash_digest_fb(areq);
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
ss = algt->ss;
e = sun8i_ss_get_engine_number(ss);
@@ -484,8 +492,8 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
struct ahash_request *areq = container_of(breq, struct ahash_request, base);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct sun8i_ss_alg_template *algt;
struct sun8i_ss_dev *ss;
struct scatterlist *sg;
@@ -504,10 +512,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
*/
int hmac = 0;
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
ss = algt->ss;
- digestsize = algt->alg.hash.halg.digestsize;
+ digestsize = crypto_ahash_digestsize(tfm);
if (digestsize == SHA224_DIGEST_SIZE)
digestsize = SHA256_DIGEST_SIZE;
@@ -700,7 +708,7 @@ err_dma_result:
}
if (!err)
- memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
+ memcpy(areq->result, result, crypto_ahash_digestsize(tfm));
theend:
local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
index df6f08f6092f..ae66eb45fb24 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
@@ -201,16 +201,12 @@ struct sun8i_cipher_req_ctx {
/*
* struct sun8i_cipher_tfm_ctx - context for a skcipher TFM
- * @enginectx: crypto_engine used by this TFM
* @key: pointer to key data
* @keylen: len of the key
* @ss: pointer to the private data of driver handling this TFM
* @fallback_tfm: pointer to the fallback TFM
- *
- * enginectx must be the first element
*/
struct sun8i_cipher_tfm_ctx {
- struct crypto_engine_ctx enginectx;
u32 *key;
u32 keylen;
struct sun8i_ss_dev *ss;
@@ -229,14 +225,10 @@ struct sun8i_ss_rng_tfm_ctx {
/*
* struct sun8i_ss_hash_tfm_ctx - context for an ahash TFM
- * @enginectx: crypto_engine used by this TFM
* @fallback_tfm: pointer to the fallback TFM
* @ss: pointer to the private data of driver handling this TFM
- *
- * enginectx must be the first element
*/
struct sun8i_ss_hash_tfm_ctx {
- struct crypto_engine_ctx enginectx;
struct crypto_ahash *fallback_tfm;
struct sun8i_ss_dev *ss;
u8 *ipad;
@@ -279,9 +271,9 @@ struct sun8i_ss_alg_template {
u32 ss_blockmode;
struct sun8i_ss_dev *ss;
union {
- struct skcipher_alg skcipher;
+ struct skcipher_engine_alg skcipher;
struct rng_alg rng;
- struct ahash_alg hash;
+ struct ahash_engine_alg hash;
} alg;
unsigned long stat_req;
unsigned long stat_fb;
@@ -293,14 +285,13 @@ struct sun8i_ss_alg_template {
char fbname[CRYPTO_MAX_ALG_NAME];
};
-int sun8i_ss_enqueue(struct crypto_async_request *areq, u32 type);
-
int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int sun8i_ss_cipher_init(struct crypto_tfm *tfm);
void sun8i_ss_cipher_exit(struct crypto_tfm *tfm);
+int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq);
int sun8i_ss_skdecrypt(struct skcipher_request *areq);
int sun8i_ss_skencrypt(struct skcipher_request *areq);
@@ -313,8 +304,8 @@ int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen
int sun8i_ss_prng_init(struct crypto_tfm *tfm);
void sun8i_ss_prng_exit(struct crypto_tfm *tfm);
-int sun8i_ss_hash_crainit(struct crypto_tfm *tfm);
-void sun8i_ss_hash_craexit(struct crypto_tfm *tfm);
+int sun8i_ss_hash_init_tfm(struct crypto_ahash *tfm);
+void sun8i_ss_hash_exit_tfm(struct crypto_ahash *tfm);
int sun8i_ss_hash_init(struct ahash_request *areq);
int sun8i_ss_hash_export(struct ahash_request *areq, void *out);
int sun8i_ss_hash_import(struct ahash_request *areq, const void *in);
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index af017a087ebf..3308406612fc 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -65,7 +65,7 @@ static int meson_cipher_do_fallback(struct skcipher_request *areq)
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct meson_alg_template *algt;
- algt = container_of(alg, struct meson_alg_template, alg.skcipher);
+ algt = container_of(alg, struct meson_alg_template, alg.skcipher.base);
algt->stat_fb++;
#endif
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
@@ -101,7 +101,7 @@ static int meson_cipher(struct skcipher_request *areq)
void *backup_iv = NULL, *bkeyiv;
u32 v;
- algt = container_of(alg, struct meson_alg_template, alg.skcipher);
+ algt = container_of(alg, struct meson_alg_template, alg.skcipher.base);
dev_dbg(mc->dev, "%s %s %u %x IV(%u) key=%u flow=%d\n", __func__,
crypto_tfm_alg_name(areq->base.tfm),
@@ -258,8 +258,7 @@ theend:
return err;
}
-static int meson_handle_cipher_request(struct crypto_engine *engine,
- void *areq)
+int meson_handle_cipher_request(struct crypto_engine *engine, void *areq)
{
int err;
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
@@ -318,7 +317,7 @@ int meson_cipher_init(struct crypto_tfm *tfm)
memset(op, 0, sizeof(struct meson_cipher_tfm_ctx));
- algt = container_of(alg, struct meson_alg_template, alg.skcipher);
+ algt = container_of(alg, struct meson_alg_template, alg.skcipher.base);
op->mc = algt->mc;
op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@@ -331,10 +330,6 @@ int meson_cipher_init(struct crypto_tfm *tfm)
sktfm->reqsize = sizeof(struct meson_cipher_req_ctx) +
crypto_skcipher_reqsize(op->fallback_tfm);
- op->enginectx.op.do_one_request = meson_handle_cipher_request;
- op->enginectx.op.prepare_request = NULL;
- op->enginectx.op.unprepare_request = NULL;
-
return 0;
}
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index 937187027ad5..da6dfe0f9ac3 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -6,17 +6,19 @@
*
* Core file which registers crypto algorithms supported by the hardware.
*/
+
+#include <crypto/engine.h>
+#include <crypto/internal/skcipher.h>
#include <linux/clk.h>
-#include <linux/crypto.h>
-#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/dma-mapping.h>
#include "amlogic-gxl.h"
@@ -47,7 +49,7 @@ static struct meson_alg_template mc_algs[] = {
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.blockmode = MESON_OPMODE_CBC,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-gxl",
@@ -68,12 +70,15 @@ static struct meson_alg_template mc_algs[] = {
.setkey = meson_aes_setkey,
.encrypt = meson_skencrypt,
.decrypt = meson_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = meson_handle_cipher_request,
+ },
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.blockmode = MESON_OPMODE_ECB,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-gxl",
@@ -93,33 +98,43 @@ static struct meson_alg_template mc_algs[] = {
.setkey = meson_aes_setkey,
.encrypt = meson_skencrypt,
.decrypt = meson_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = meson_handle_cipher_request,
+ },
},
};
-#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
static int meson_debugfs_show(struct seq_file *seq, void *v)
{
- struct meson_dev *mc = seq->private;
+ struct meson_dev *mc __maybe_unused = seq->private;
int i;
for (i = 0; i < MAXFLOW; i++)
- seq_printf(seq, "Channel %d: nreq %lu\n", i, mc->chanlist[i].stat_req);
+ seq_printf(seq, "Channel %d: nreq %lu\n", i,
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ mc->chanlist[i].stat_req);
+#else
+ 0ul);
+#endif
for (i = 0; i < ARRAY_SIZE(mc_algs); i++) {
switch (mc_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
seq_printf(seq, "%s %s %lu %lu\n",
- mc_algs[i].alg.skcipher.base.cra_driver_name,
- mc_algs[i].alg.skcipher.base.cra_name,
+ mc_algs[i].alg.skcipher.base.base.cra_driver_name,
+ mc_algs[i].alg.skcipher.base.base.cra_name,
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
mc_algs[i].stat_req, mc_algs[i].stat_fb);
+#else
+ 0ul, 0ul);
+#endif
break;
}
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(meson_debugfs);
-#endif
static void meson_free_chanlist(struct meson_dev *mc, int i)
{
@@ -183,10 +198,10 @@ static int meson_register_algs(struct meson_dev *mc)
mc_algs[i].mc = mc;
switch (mc_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
- err = crypto_register_skcipher(&mc_algs[i].alg.skcipher);
+ err = crypto_engine_register_skcipher(&mc_algs[i].alg.skcipher);
if (err) {
dev_err(mc->dev, "Fail to register %s\n",
- mc_algs[i].alg.skcipher.base.cra_name);
+ mc_algs[i].alg.skcipher.base.base.cra_name);
mc_algs[i].mc = NULL;
return err;
}
@@ -206,7 +221,7 @@ static void meson_unregister_algs(struct meson_dev *mc)
continue;
switch (mc_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
- crypto_unregister_skcipher(&mc_algs[i].alg.skcipher);
+ crypto_engine_unregister_skcipher(&mc_algs[i].alg.skcipher);
break;
}
}
@@ -264,10 +279,16 @@ static int meson_crypto_probe(struct platform_device *pdev)
if (err)
goto error_alg;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG)) {
+ struct dentry *dbgfs_dir;
+
+ dbgfs_dir = debugfs_create_dir("gxl-crypto", NULL);
+ debugfs_create_file("stats", 0444, dbgfs_dir, mc, &meson_debugfs_fops);
+
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
- mc->dbgfs_dir = debugfs_create_dir("gxl-crypto", NULL);
- debugfs_create_file("stats", 0444, mc->dbgfs_dir, mc, &meson_debugfs_fops);
+ mc->dbgfs_dir = dbgfs_dir;
#endif
+ }
return 0;
error_alg:
diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h
index 8c0746a1d6d4..1013a666c932 100644
--- a/drivers/crypto/amlogic/amlogic-gxl.h
+++ b/drivers/crypto/amlogic/amlogic-gxl.h
@@ -114,7 +114,6 @@ struct meson_cipher_req_ctx {
/*
* struct meson_cipher_tfm_ctx - context for a skcipher TFM
- * @enginectx: crypto_engine used by this TFM
* @key: pointer to key data
* @keylen: len of the key
* @keymode: The keymode(type and size of key) associated with this TFM
@@ -122,7 +121,6 @@ struct meson_cipher_req_ctx {
* @fallback_tfm: pointer to the fallback TFM
*/
struct meson_cipher_tfm_ctx {
- struct crypto_engine_ctx enginectx;
u32 *key;
u32 keylen;
u32 keymode;
@@ -143,7 +141,7 @@ struct meson_alg_template {
u32 type;
u32 blockmode;
union {
- struct skcipher_alg skcipher;
+ struct skcipher_engine_alg skcipher;
} alg;
struct meson_dev *mc;
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
@@ -160,3 +158,4 @@ int meson_cipher_init(struct crypto_tfm *tfm);
void meson_cipher_exit(struct crypto_tfm *tfm);
int meson_skdecrypt(struct skcipher_request *areq);
int meson_skencrypt(struct skcipher_request *areq);
+int meson_handle_cipher_request(struct crypto_engine *engine, void *areq);
diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c
index 470122c87fea..247c568aa8df 100644
--- a/drivers/crypto/aspeed/aspeed-acry.c
+++ b/drivers/crypto/aspeed/aspeed-acry.c
@@ -2,25 +2,23 @@
/*
* Copyright 2021 Aspeed Technology Inc.
*/
-#include <crypto/akcipher.h>
-#include <crypto/algapi.h>
#include <crypto/engine.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/rsa.h>
#include <crypto/scatterwalk.h>
#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/mfd/syscon.h>
-#include <linux/interrupt.h>
#include <linux/count_zeros.h>
-#include <linux/err.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
#define ACRY_DBG(d, fmt, ...) \
@@ -112,7 +110,6 @@ struct aspeed_acry_dev {
};
struct aspeed_acry_ctx {
- struct crypto_engine_ctx enginectx;
struct aspeed_acry_dev *acry_dev;
struct rsa_key key;
@@ -131,7 +128,7 @@ struct aspeed_acry_ctx {
struct aspeed_acry_alg {
struct aspeed_acry_dev *acry_dev;
- struct akcipher_alg akcipher;
+ struct akcipher_engine_alg akcipher;
};
enum aspeed_rsa_key_mode {
@@ -577,7 +574,7 @@ static int aspeed_acry_rsa_init_tfm(struct crypto_akcipher *tfm)
const char *name = crypto_tfm_alg_name(&tfm->base);
struct aspeed_acry_alg *acry_alg;
- acry_alg = container_of(alg, struct aspeed_acry_alg, akcipher);
+ acry_alg = container_of(alg, struct aspeed_acry_alg, akcipher.base);
ctx->acry_dev = acry_alg->acry_dev;
@@ -589,10 +586,6 @@ static int aspeed_acry_rsa_init_tfm(struct crypto_akcipher *tfm)
return PTR_ERR(ctx->fallback_tfm);
}
- ctx->enginectx.op.do_one_request = aspeed_acry_do_request;
- ctx->enginectx.op.prepare_request = NULL;
- ctx->enginectx.op.unprepare_request = NULL;
-
return 0;
}
@@ -605,7 +598,7 @@ static void aspeed_acry_rsa_exit_tfm(struct crypto_akcipher *tfm)
static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = {
{
- .akcipher = {
+ .akcipher.base = {
.encrypt = aspeed_acry_rsa_enc,
.decrypt = aspeed_acry_rsa_dec,
.sign = aspeed_acry_rsa_dec,
@@ -627,6 +620,9 @@ static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = {
.cra_ctxsize = sizeof(struct aspeed_acry_ctx),
},
},
+ .akcipher.op = {
+ .do_one_request = aspeed_acry_do_request,
+ },
},
};
@@ -636,10 +632,10 @@ static void aspeed_acry_register(struct aspeed_acry_dev *acry_dev)
for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++) {
aspeed_acry_akcipher_algs[i].acry_dev = acry_dev;
- rc = crypto_register_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
+ rc = crypto_engine_register_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
if (rc) {
ACRY_DBG(acry_dev, "Failed to register %s\n",
- aspeed_acry_akcipher_algs[i].akcipher.base.cra_name);
+ aspeed_acry_akcipher_algs[i].akcipher.base.base.cra_name);
}
}
}
@@ -649,7 +645,7 @@ static void aspeed_acry_unregister(struct aspeed_acry_dev *acry_dev)
int i;
for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++)
- crypto_unregister_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
+ crypto_engine_unregister_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
}
/* ACRY interrupt service routine. */
diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c
index ef73b0028b4d..f0eddb7854e5 100644
--- a/drivers/crypto/aspeed/aspeed-hace-crypto.c
+++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c
@@ -4,6 +4,17 @@
*/
#include "aspeed-hace.h"
+#include <crypto/des.h>
+#include <crypto/engine.h>
+#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO_DEBUG
#define CIPHER_DBG(h, fmt, ...) \
@@ -696,7 +707,7 @@ static int aspeed_crypto_cra_init(struct crypto_skcipher *tfm)
struct aspeed_hace_alg *crypto_alg;
- crypto_alg = container_of(alg, struct aspeed_hace_alg, alg.skcipher);
+ crypto_alg = container_of(alg, struct aspeed_hace_alg, alg.skcipher.base);
ctx->hace_dev = crypto_alg->hace_dev;
ctx->start = aspeed_hace_skcipher_trigger;
@@ -713,10 +724,6 @@ static int aspeed_crypto_cra_init(struct crypto_skcipher *tfm)
crypto_skcipher_set_reqsize(tfm, sizeof(struct aspeed_cipher_reqctx) +
crypto_skcipher_reqsize(ctx->fallback_tfm));
- ctx->enginectx.op.do_one_request = aspeed_crypto_do_request;
- ctx->enginectx.op.prepare_request = NULL;
- ctx->enginectx.op.unprepare_request = NULL;
-
return 0;
}
@@ -731,7 +738,7 @@ static void aspeed_crypto_cra_exit(struct crypto_skcipher *tfm)
static struct aspeed_hace_alg aspeed_crypto_algs[] = {
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = aspeed_aes_setkey,
@@ -751,10 +758,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -775,10 +785,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -799,10 +812,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -823,10 +839,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = aspeed_des_setkey,
@@ -846,10 +865,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -870,10 +892,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -894,10 +919,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -918,10 +946,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = aspeed_des_setkey,
@@ -941,10 +972,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
@@ -965,10 +999,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
@@ -989,10 +1026,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
@@ -1013,13 +1053,16 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
};
static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1039,10 +1082,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -1062,10 +1108,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
@@ -1085,7 +1134,10 @@ static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
};
@@ -1095,13 +1147,13 @@ void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
int i;
for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++)
- crypto_unregister_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
+ crypto_engine_unregister_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
if (hace_dev->version != AST2600_VERSION)
return;
for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++)
- crypto_unregister_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
+ crypto_engine_unregister_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
}
void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
@@ -1112,10 +1164,10 @@ void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) {
aspeed_crypto_algs[i].hace_dev = hace_dev;
- rc = crypto_register_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
+ rc = crypto_engine_register_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
if (rc) {
CIPHER_DBG(hace_dev, "Failed to register %s\n",
- aspeed_crypto_algs[i].alg.skcipher.base.cra_name);
+ aspeed_crypto_algs[i].alg.skcipher.base.base.cra_name);
}
}
@@ -1124,10 +1176,10 @@ void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) {
aspeed_crypto_algs_g6[i].hace_dev = hace_dev;
- rc = crypto_register_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
+ rc = crypto_engine_register_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
if (rc) {
CIPHER_DBG(hace_dev, "Failed to register %s\n",
- aspeed_crypto_algs_g6[i].alg.skcipher.base.cra_name);
+ aspeed_crypto_algs_g6[i].alg.skcipher.base.base.cra_name);
}
}
}
diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
index 935135229ebd..0b6e49c06eff 100644
--- a/drivers/crypto/aspeed/aspeed-hace-hash.c
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -4,6 +4,17 @@
*/
#include "aspeed-hace.h"
+#include <crypto/engine.h>
+#include <crypto/hmac.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
#define AHASH_DBG(h, fmt, ...) \
@@ -48,28 +59,6 @@ static const __be64 sha512_iv[8] = {
cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7)
};
-static const __be32 sha512_224_iv[16] = {
- cpu_to_be32(0xC8373D8CUL), cpu_to_be32(0xA24D5419UL),
- cpu_to_be32(0x6699E173UL), cpu_to_be32(0xD6D4DC89UL),
- cpu_to_be32(0xAEB7FA1DUL), cpu_to_be32(0x829CFF32UL),
- cpu_to_be32(0x14D59D67UL), cpu_to_be32(0xCF9F2F58UL),
- cpu_to_be32(0x692B6D0FUL), cpu_to_be32(0xA84DD47BUL),
- cpu_to_be32(0x736FE377UL), cpu_to_be32(0x4289C404UL),
- cpu_to_be32(0xA8859D3FUL), cpu_to_be32(0xC8361D6AUL),
- cpu_to_be32(0xADE61211UL), cpu_to_be32(0xA192D691UL)
-};
-
-static const __be32 sha512_256_iv[16] = {
- cpu_to_be32(0x94213122UL), cpu_to_be32(0x2CF72BFCUL),
- cpu_to_be32(0xA35F559FUL), cpu_to_be32(0xC2644CC8UL),
- cpu_to_be32(0x6BB89323UL), cpu_to_be32(0x51B1536FUL),
- cpu_to_be32(0x19773896UL), cpu_to_be32(0xBDEA4059UL),
- cpu_to_be32(0xE23E2896UL), cpu_to_be32(0xE3FF8EA8UL),
- cpu_to_be32(0x251E5EBEUL), cpu_to_be32(0x92398653UL),
- cpu_to_be32(0xFC99012BUL), cpu_to_be32(0xAAB8852CUL),
- cpu_to_be32(0xDC2DB70EUL), cpu_to_be32(0xA22CC581UL)
-};
-
/* The purpose of this padding is to ensure that the padded message is a
* multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
* The bit "1" is appended at the end of the message followed by
@@ -565,8 +554,8 @@ static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq)
return 0;
}
-static int aspeed_ahash_prepare_request(struct crypto_engine *engine,
- void *areq)
+static void aspeed_ahash_prepare_request(struct crypto_engine *engine,
+ void *areq)
{
struct ahash_request *req = ahash_request_cast(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -581,8 +570,12 @@ static int aspeed_ahash_prepare_request(struct crypto_engine *engine,
hash_engine->dma_prepare = aspeed_ahash_dma_prepare_sg;
else
hash_engine->dma_prepare = aspeed_ahash_dma_prepare;
+}
- return 0;
+static int aspeed_ahash_do_one(struct crypto_engine *engine, void *areq)
+{
+ aspeed_ahash_prepare_request(engine, areq);
+ return aspeed_ahash_do_request(engine, areq);
}
static int aspeed_sham_update(struct ahash_request *req)
@@ -750,62 +743,6 @@ static int aspeed_sham_init(struct ahash_request *req)
return 0;
}
-static int aspeed_sha512s_init(struct ahash_request *req)
-{
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
- struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
-
- AHASH_DBG(hace_dev, "digest size: %d\n", crypto_ahash_digestsize(tfm));
-
- rctx->cmd = HASH_CMD_ACC_MODE;
- rctx->flags = 0;
-
- switch (crypto_ahash_digestsize(tfm)) {
- case SHA224_DIGEST_SIZE:
- rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_224 |
- HASH_CMD_SHA_SWAP;
- rctx->flags |= SHA_FLAGS_SHA512_224;
- rctx->digsize = SHA224_DIGEST_SIZE;
- rctx->block_size = SHA512_BLOCK_SIZE;
- rctx->sha_iv = sha512_224_iv;
- rctx->ivsize = 64;
- memcpy(rctx->digest, sha512_224_iv, rctx->ivsize);
- break;
- case SHA256_DIGEST_SIZE:
- rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_256 |
- HASH_CMD_SHA_SWAP;
- rctx->flags |= SHA_FLAGS_SHA512_256;
- rctx->digsize = SHA256_DIGEST_SIZE;
- rctx->block_size = SHA512_BLOCK_SIZE;
- rctx->sha_iv = sha512_256_iv;
- rctx->ivsize = 64;
- memcpy(rctx->digest, sha512_256_iv, rctx->ivsize);
- break;
- default:
- dev_warn(tctx->hace_dev->dev, "digest size %d not support\n",
- crypto_ahash_digestsize(tfm));
- return -EINVAL;
- }
-
- rctx->bufcnt = 0;
- rctx->total = 0;
- rctx->digcnt[0] = 0;
- rctx->digcnt[1] = 0;
-
- /* HMAC init */
- if (tctx->flags & SHA_FLAGS_HMAC) {
- rctx->digcnt[0] = rctx->block_size;
- rctx->bufcnt = rctx->block_size;
- memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
- rctx->flags |= SHA_FLAGS_HMAC;
- }
-
- return 0;
-}
-
static int aspeed_sham_digest(struct ahash_request *req)
{
return aspeed_sham_init(req) ? : aspeed_sham_finup(req);
@@ -854,7 +791,7 @@ static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
struct aspeed_hace_alg *ast_alg;
- ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash);
+ ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash.base);
tctx->hace_dev = ast_alg->hace_dev;
tctx->flags = 0;
@@ -876,10 +813,6 @@ static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
}
}
- tctx->enginectx.op.do_one_request = aspeed_ahash_do_request;
- tctx->enginectx.op.prepare_request = aspeed_ahash_prepare_request;
- tctx->enginectx.op.unprepare_request = NULL;
-
return 0;
}
@@ -917,7 +850,7 @@ static int aspeed_sham_import(struct ahash_request *req, const void *in)
static struct aspeed_hace_alg aspeed_ahash_algs[] = {
{
- .alg.ahash = {
+ .alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@@ -944,9 +877,12 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
}
}
},
+ .alg.ahash.op = {
+ .do_one_request = aspeed_ahash_do_one,
+ },
},
{
- .alg.ahash = {
+ .alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@@ -973,9 +909,12 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
}
}
},
+ .alg.ahash.op = {
+ .do_one_request = aspeed_ahash_do_one,
+ },
},
{
- .alg.ahash = {
+ .alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@@ -1002,10 +941,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
}
}
},
+ .alg.ahash.op = {
+ .do_one_request = aspeed_ahash_do_one,
+ },
},
{
.alg_base = "sha1",
- .alg.ahash = {
+ .alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@@ -1034,10 +976,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
}
}
},
+ .alg.ahash.op = {
+ .do_one_request = aspeed_ahash_do_one,
+ },
},
{
.alg_base = "sha224",
- .alg.ahash = {
+ .alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@@ -1066,10 +1011,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
}
}
},
+ .alg.ahash.op = {
+ .do_one_request = aspeed_ahash_do_one,
+ },
},
{
.alg_base = "sha256",
- .alg.ahash = {
+ .alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@@ -1098,12 +1046,15 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
}
}
},
+ .alg.ahash.op = {
+ .do_one_request = aspeed_ahash_do_one,
+ },
},
};
static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
{
- .alg.ahash = {
+ .alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@@ -1130,9 +1081,12 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
}
}
},
+ .alg.ahash.op = {
+ .do_one_request = aspeed_ahash_do_one,
+ },
},
{
- .alg.ahash = {
+ .alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@@ -1159,68 +1113,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
}
}
},
- },
- {
- .alg.ahash = {
- .init = aspeed_sha512s_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "sha512_224",
- .cra_driver_name = "aspeed-sha512_224",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- },
- {
- .alg.ahash = {
- .init = aspeed_sha512s_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "sha512_256",
- .cra_driver_name = "aspeed-sha512_256",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
+ .alg.ahash.op = {
+ .do_one_request = aspeed_ahash_do_one,
},
},
{
.alg_base = "sha384",
- .alg.ahash = {
+ .alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@@ -1249,10 +1148,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
}
}
},
+ .alg.ahash.op = {
+ .do_one_request = aspeed_ahash_do_one,
+ },
},
{
.alg_base = "sha512",
- .alg.ahash = {
+ .alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@@ -1281,69 +1183,8 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
}
}
},
- },
- {
- .alg_base = "sha512_224",
- .alg.ahash = {
- .init = aspeed_sha512s_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha512_224)",
- .cra_driver_name = "aspeed-hmac-sha512_224",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- },
- {
- .alg_base = "sha512_256",
- .alg.ahash = {
- .init = aspeed_sha512s_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha512_256)",
- .cra_driver_name = "aspeed-hmac-sha512_256",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
+ .alg.ahash.op = {
+ .do_one_request = aspeed_ahash_do_one,
},
},
};
@@ -1353,13 +1194,13 @@ void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
int i;
for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++)
- crypto_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash);
+ crypto_engine_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash);
if (hace_dev->version != AST2600_VERSION)
return;
for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++)
- crypto_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
+ crypto_engine_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
}
void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
@@ -1370,10 +1211,10 @@ void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) {
aspeed_ahash_algs[i].hace_dev = hace_dev;
- rc = crypto_register_ahash(&aspeed_ahash_algs[i].alg.ahash);
+ rc = crypto_engine_register_ahash(&aspeed_ahash_algs[i].alg.ahash);
if (rc) {
AHASH_DBG(hace_dev, "Failed to register %s\n",
- aspeed_ahash_algs[i].alg.ahash.halg.base.cra_name);
+ aspeed_ahash_algs[i].alg.ahash.base.halg.base.cra_name);
}
}
@@ -1382,10 +1223,10 @@ void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) {
aspeed_ahash_algs_g6[i].hace_dev = hace_dev;
- rc = crypto_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
+ rc = crypto_engine_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
if (rc) {
AHASH_DBG(hace_dev, "Failed to register %s\n",
- aspeed_ahash_algs_g6[i].alg.ahash.halg.base.cra_name);
+ aspeed_ahash_algs_g6[i].alg.ahash.base.halg.base.cra_name);
}
}
}
diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c
index d2871e1de9c2..8f7aab82e1d8 100644
--- a/drivers/crypto/aspeed/aspeed-hace.c
+++ b/drivers/crypto/aspeed/aspeed-hace.c
@@ -3,7 +3,14 @@
* Copyright (c) 2021 Aspeed Technology Inc.
*/
+#include "aspeed-hace.h"
+#include <crypto/engine.h>
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -11,8 +18,6 @@
#include <linux/of.h>
#include <linux/platform_device.h>
-#include "aspeed-hace.h"
-
#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
#define HACE_DBG(d, fmt, ...) \
dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h
index 05d0a15d546d..68f70e01fccb 100644
--- a/drivers/crypto/aspeed/aspeed-hace.h
+++ b/drivers/crypto/aspeed/aspeed-hace.h
@@ -2,25 +2,14 @@
#ifndef __ASPEED_HACE_H__
#define __ASPEED_HACE_H__
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/fips.h>
-#include <linux/dma-mapping.h>
#include <crypto/aes.h>
-#include <crypto/des.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/aead.h>
-#include <crypto/internal/akcipher.h>
-#include <crypto/internal/des.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/kpp.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/algapi.h>
#include <crypto/engine.h>
-#include <crypto/hmac.h>
-#include <crypto/sha1.h>
+#include <crypto/hash.h>
#include <crypto/sha2.h>
+#include <linux/bits.h>
+#include <linux/compiler_attributes.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
/*****************************
* *
@@ -144,6 +133,7 @@
HACE_CMD_OFB | HACE_CMD_CTR)
struct aspeed_hace_dev;
+struct scatterlist;
typedef int (*aspeed_hace_fn_t)(struct aspeed_hace_dev *);
@@ -178,8 +168,6 @@ struct aspeed_sha_hmac_ctx {
};
struct aspeed_sham_ctx {
- struct crypto_engine_ctx enginectx;
-
struct aspeed_hace_dev *hace_dev;
unsigned long flags; /* hmac flag */
@@ -235,8 +223,6 @@ struct aspeed_engine_crypto {
};
struct aspeed_cipher_ctx {
- struct crypto_engine_ctx enginectx;
-
struct aspeed_hace_dev *hace_dev;
int key_len;
u8 key[AES_MAX_KEYLENGTH];
@@ -275,8 +261,8 @@ struct aspeed_hace_alg {
const char *alg_base;
union {
- struct skcipher_alg skcipher;
- struct ahash_alg ahash;
+ struct skcipher_engine_alg skcipher;
+ struct ahash_engine_alg ahash;
} alg;
};
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 143d33fbb316..55b5f577b01c 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -28,7 +28,7 @@
#include <linux/irq.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <crypto/scatterwalk.h>
@@ -2533,13 +2533,11 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
}
}
-#if defined(CONFIG_OF)
static const struct of_device_id atmel_aes_dt_ids[] = {
{ .compatible = "atmel,at91sam9g46-aes" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
-#endif
static int atmel_aes_probe(struct platform_device *pdev)
{
@@ -2566,11 +2564,9 @@ static int atmel_aes_probe(struct platform_device *pdev)
crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
- /* Get the base address */
- aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!aes_res) {
- dev_err(dev, "no MEM resource info\n");
- err = -ENODEV;
+ aes_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &aes_res);
+ if (IS_ERR(aes_dd->io_base)) {
+ err = PTR_ERR(aes_dd->io_base);
goto err_tasklet_kill;
}
aes_dd->phys_base = aes_res->start;
@@ -2597,13 +2593,6 @@ static int atmel_aes_probe(struct platform_device *pdev)
goto err_tasklet_kill;
}
- aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
- if (IS_ERR(aes_dd->io_base)) {
- dev_err(dev, "can't ioremap\n");
- err = PTR_ERR(aes_dd->io_base);
- goto err_tasklet_kill;
- }
-
err = clk_prepare(aes_dd->iclk);
if (err)
goto err_tasklet_kill;
@@ -2687,7 +2676,7 @@ static struct platform_driver atmel_aes_driver = {
.remove = atmel_aes_remove,
.driver = {
.name = "atmel_aes",
- .of_match_table = of_match_ptr(atmel_aes_dt_ids),
+ .of_match_table = atmel_aes_dt_ids,
},
};
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index 432beabd79e6..590ea984c622 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -14,7 +14,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 6bef634d3c86..3622120add62 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -28,7 +28,7 @@
#include <linux/irq.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <crypto/scatterwalk.h>
@@ -1770,7 +1770,8 @@ static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd)
size_t bs = ctx->block_size;
size_t i, num_words = bs / sizeof(u32);
- memcpy(hmac->opad, hmac->ipad, bs);
+ unsafe_memcpy(hmac->opad, hmac->ipad, bs,
+ "fortified memcpy causes -Wrestrict warning");
for (i = 0; i < num_words; ++i) {
hmac->ipad[i] ^= 0x36363636;
hmac->opad[i] ^= 0x5c5c5c5c;
@@ -2499,8 +2500,8 @@ static int atmel_sha_dma_init(struct atmel_sha_dev *dd)
{
dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
if (IS_ERR(dd->dma_lch_in.chan)) {
- dev_err(dd->dev, "DMA channel is not available\n");
- return PTR_ERR(dd->dma_lch_in.chan);
+ return dev_err_probe(dd->dev, PTR_ERR(dd->dma_lch_in.chan),
+ "DMA channel is not available\n");
}
dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
@@ -2570,14 +2571,12 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
}
}
-#if defined(CONFIG_OF)
static const struct of_device_id atmel_sha_dt_ids[] = {
{ .compatible = "atmel,at91sam9g46-sha" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
-#endif
static int atmel_sha_probe(struct platform_device *pdev)
{
@@ -2604,11 +2603,9 @@ static int atmel_sha_probe(struct platform_device *pdev)
crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
- /* Get the base address */
- sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!sha_res) {
- dev_err(dev, "no MEM resource info\n");
- err = -ENODEV;
+ sha_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &sha_res);
+ if (IS_ERR(sha_dd->io_base)) {
+ err = PTR_ERR(sha_dd->io_base);
goto err_tasklet_kill;
}
sha_dd->phys_base = sha_res->start;
@@ -2635,13 +2632,6 @@ static int atmel_sha_probe(struct platform_device *pdev)
goto err_tasklet_kill;
}
- sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
- if (IS_ERR(sha_dd->io_base)) {
- dev_err(dev, "can't ioremap\n");
- err = PTR_ERR(sha_dd->io_base);
- goto err_tasklet_kill;
- }
-
err = clk_prepare(sha_dd->iclk);
if (err)
goto err_tasklet_kill;
@@ -2716,7 +2706,7 @@ static struct platform_driver atmel_sha_driver = {
.remove = atmel_sha_remove,
.driver = {
.name = "atmel_sha",
- .of_match_table = of_match_ptr(atmel_sha_dt_ids),
+ .of_match_table = atmel_sha_dt_ids,
},
};
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index c9ded8be9c39..099b32a10dd7 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -28,7 +28,7 @@
#include <linux/irq.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <crypto/scatterwalk.h>
@@ -1139,13 +1139,11 @@ static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
}
}
-#if defined(CONFIG_OF)
static const struct of_device_id atmel_tdes_dt_ids[] = {
{ .compatible = "atmel,at91sam9g46-tdes" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
-#endif
static int atmel_tdes_probe(struct platform_device *pdev)
{
@@ -1172,11 +1170,9 @@ static int atmel_tdes_probe(struct platform_device *pdev)
crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
- /* Get the base address */
- tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!tdes_res) {
- dev_err(dev, "no MEM resource info\n");
- err = -ENODEV;
+ tdes_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &tdes_res);
+ if (IS_ERR(tdes_dd->io_base)) {
+ err = PTR_ERR(tdes_dd->io_base);
goto err_tasklet_kill;
}
tdes_dd->phys_base = tdes_res->start;
@@ -1203,12 +1199,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
goto err_tasklet_kill;
}
- tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
- if (IS_ERR(tdes_dd->io_base)) {
- err = PTR_ERR(tdes_dd->io_base);
- goto err_tasklet_kill;
- }
-
err = atmel_tdes_hw_version_init(tdes_dd);
if (err)
goto err_tasklet_kill;
@@ -1282,7 +1272,7 @@ static struct platform_driver atmel_tdes_driver = {
.remove = atmel_tdes_remove,
.driver = {
.name = "atmel_tdes",
- .of_match_table = of_match_ptr(atmel_tdes_dt_ids),
+ .of_match_table = atmel_tdes_dt_ids,
},
};
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 70b911baab26..689be70d69c1 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -15,8 +15,7 @@
#include <linux/kthread.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/io.h>
#include <linux/bitops.h>
@@ -2397,7 +2396,8 @@ static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
memset(ctx->ipad + ctx->authkeylen, 0,
blocksize - ctx->authkeylen);
ctx->authkeylen = 0;
- memcpy(ctx->opad, ctx->ipad, blocksize);
+ unsafe_memcpy(ctx->opad, ctx->ipad, blocksize,
+ "fortified memcpy causes -Wrestrict warning");
for (index = 0; index < blocksize; index++) {
ctx->ipad[index] ^= HMAC_IPAD_VALUE;
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index feb86013dbf6..eba2d750c3b0 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -56,12 +56,15 @@
#include "sg_sw_sec4.h"
#include "key_gen.h"
#include "caamalg_desc.h"
-#include <crypto/engine.h>
-#include <crypto/xts.h>
#include <asm/unaligned.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/engine.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/xts.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -95,13 +98,13 @@ struct caam_alg_entry {
};
struct caam_aead_alg {
- struct aead_alg aead;
+ struct aead_engine_alg aead;
struct caam_alg_entry caam;
bool registered;
};
struct caam_skcipher_alg {
- struct skcipher_alg skcipher;
+ struct skcipher_engine_alg skcipher;
struct caam_alg_entry caam;
bool registered;
};
@@ -110,7 +113,6 @@ struct caam_skcipher_alg {
* per-session context
*/
struct caam_ctx {
- struct crypto_engine_ctx enginectx;
u32 sh_desc_enc[DESC_MAX_USED_LEN];
u32 sh_desc_dec[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
@@ -188,7 +190,8 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
static int aead_set_sh_desc(struct crypto_aead *aead)
{
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
- struct caam_aead_alg, aead);
+ struct caam_aead_alg,
+ aead.base);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
@@ -738,7 +741,7 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct caam_skcipher_alg *alg =
container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
- skcipher);
+ skcipher.base);
struct device *jrdev = ctx->jrdev;
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc;
@@ -1195,7 +1198,8 @@ static void init_authenc_job(struct aead_request *req,
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
- struct caam_aead_alg, aead);
+ struct caam_aead_alg,
+ aead.base);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
@@ -1881,7 +1885,7 @@ static int skcipher_decrypt(struct skcipher_request *req)
static struct caam_skcipher_alg driver_algs[] = {
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-caam",
@@ -1894,10 +1898,13 @@ static struct caam_skcipher_alg driver_algs[] = {
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-3des-caam",
@@ -1910,10 +1917,13 @@ static struct caam_skcipher_alg driver_algs[] = {
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "cbc(des)",
.cra_driver_name = "cbc-des-caam",
@@ -1926,10 +1936,13 @@ static struct caam_skcipher_alg driver_algs[] = {
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-caam",
@@ -1943,11 +1956,14 @@ static struct caam_skcipher_alg driver_algs[] = {
.ivsize = AES_BLOCK_SIZE,
.chunksize = AES_BLOCK_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "rfc3686(ctr(aes))",
.cra_driver_name = "rfc3686-ctr-aes-caam",
@@ -1963,6 +1979,9 @@ static struct caam_skcipher_alg driver_algs[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.chunksize = AES_BLOCK_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -1970,7 +1989,7 @@ static struct caam_skcipher_alg driver_algs[] = {
},
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-caam",
@@ -1984,10 +2003,13 @@ static struct caam_skcipher_alg driver_algs[] = {
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "ecb(des)",
.cra_driver_name = "ecb-des-caam",
@@ -1999,10 +2021,13 @@ static struct caam_skcipher_alg driver_algs[] = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB,
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-caam",
@@ -2014,10 +2039,13 @@ static struct caam_skcipher_alg driver_algs[] = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB,
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3-caam",
@@ -2029,13 +2057,16 @@ static struct caam_skcipher_alg driver_algs[] = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
},
};
static struct caam_aead_alg driver_aeads[] = {
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "rfc4106(gcm(aes))",
.cra_driver_name = "rfc4106-gcm-aes-caam",
@@ -2048,13 +2079,16 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
.nodkp = true,
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "rfc4543(gcm(aes))",
.cra_driver_name = "rfc4543-gcm-aes-caam",
@@ -2067,6 +2101,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = GCM_RFC4543_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
.nodkp = true,
@@ -2074,7 +2111,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
/* Galois Counter Mode */
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "gcm-aes-caam",
@@ -2087,6 +2124,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
.nodkp = true,
@@ -2094,7 +2134,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
/* single-pass ipsec_esp descriptor */
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(md5),"
"ecb(cipher_null))",
@@ -2109,13 +2149,16 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = NULL_IV_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha1),"
"ecb(cipher_null))",
@@ -2130,13 +2173,16 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha224),"
"ecb(cipher_null))",
@@ -2151,13 +2197,16 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha256),"
"ecb(cipher_null))",
@@ -2172,13 +2221,16 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha384),"
"ecb(cipher_null))",
@@ -2193,13 +2245,16 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha512),"
"ecb(cipher_null))",
@@ -2214,13 +2269,16 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(aes))",
.cra_driver_name = "authenc-hmac-md5-"
@@ -2234,6 +2292,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2241,7 +2302,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(md5),"
"cbc(aes)))",
@@ -2256,6 +2317,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2264,7 +2328,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha1-"
@@ -2278,6 +2342,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2285,7 +2352,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha1),"
"cbc(aes)))",
@@ -2300,6 +2367,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2308,7 +2378,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha224),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha224-"
@@ -2322,6 +2392,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2329,7 +2402,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha224),"
"cbc(aes)))",
@@ -2344,6 +2417,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2352,7 +2428,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha256-"
@@ -2366,6 +2442,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2373,7 +2452,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha256),"
"cbc(aes)))",
@@ -2388,6 +2467,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2396,7 +2478,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha384),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha384-"
@@ -2410,6 +2492,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2417,7 +2502,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha384),"
"cbc(aes)))",
@@ -2432,6 +2517,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2440,7 +2528,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha512),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha512-"
@@ -2454,6 +2542,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -2461,7 +2552,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha512),"
"cbc(aes)))",
@@ -2476,6 +2567,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -2484,7 +2578,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-md5-"
@@ -2498,6 +2592,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2505,7 +2602,7 @@ static struct caam_aead_alg driver_aeads[] = {
}
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(md5),"
"cbc(des3_ede)))",
@@ -2520,6 +2617,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2528,7 +2628,7 @@ static struct caam_aead_alg driver_aeads[] = {
}
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha1),"
"cbc(des3_ede))",
@@ -2543,6 +2643,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2550,7 +2653,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha1),"
"cbc(des3_ede)))",
@@ -2566,6 +2669,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2574,7 +2680,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha224),"
"cbc(des3_ede))",
@@ -2589,6 +2695,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2596,7 +2705,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha224),"
"cbc(des3_ede)))",
@@ -2612,6 +2721,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2620,7 +2732,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha256),"
"cbc(des3_ede))",
@@ -2635,6 +2747,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2642,7 +2757,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha256),"
"cbc(des3_ede)))",
@@ -2658,6 +2773,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2666,7 +2784,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha384),"
"cbc(des3_ede))",
@@ -2681,6 +2799,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2688,7 +2809,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha384),"
"cbc(des3_ede)))",
@@ -2704,6 +2825,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2712,7 +2836,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha512),"
"cbc(des3_ede))",
@@ -2727,6 +2851,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -2734,7 +2861,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha512),"
"cbc(des3_ede)))",
@@ -2750,6 +2877,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -2758,7 +2888,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(des))",
.cra_driver_name = "authenc-hmac-md5-"
@@ -2772,6 +2902,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2779,7 +2912,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(md5),"
"cbc(des)))",
@@ -2794,6 +2927,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2802,7 +2938,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(des))",
.cra_driver_name = "authenc-hmac-sha1-"
@@ -2816,6 +2952,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2823,7 +2962,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha1),"
"cbc(des)))",
@@ -2838,6 +2977,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2846,7 +2988,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha224),cbc(des))",
.cra_driver_name = "authenc-hmac-sha224-"
@@ -2860,6 +3002,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2867,7 +3012,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha224),"
"cbc(des)))",
@@ -2882,6 +3027,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2890,7 +3038,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(des))",
.cra_driver_name = "authenc-hmac-sha256-"
@@ -2904,6 +3052,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2911,7 +3062,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha256),"
"cbc(des)))",
@@ -2926,6 +3077,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2934,7 +3088,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha384),cbc(des))",
.cra_driver_name = "authenc-hmac-sha384-"
@@ -2948,6 +3102,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2955,7 +3112,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha384),"
"cbc(des)))",
@@ -2970,6 +3127,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2978,7 +3138,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha512),cbc(des))",
.cra_driver_name = "authenc-hmac-sha512-"
@@ -2992,6 +3152,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -2999,7 +3162,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha512),"
"cbc(des)))",
@@ -3014,6 +3177,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -3022,7 +3188,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(md5),"
"rfc3686(ctr(aes)))",
@@ -3037,6 +3203,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3046,7 +3215,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "seqiv(authenc("
"hmac(md5),rfc3686(ctr(aes))))",
@@ -3061,6 +3230,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3071,7 +3243,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha1),"
"rfc3686(ctr(aes)))",
@@ -3086,6 +3258,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3095,7 +3270,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "seqiv(authenc("
"hmac(sha1),rfc3686(ctr(aes))))",
@@ -3110,6 +3285,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3120,7 +3298,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha224),"
"rfc3686(ctr(aes)))",
@@ -3135,6 +3313,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3144,7 +3325,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "seqiv(authenc("
"hmac(sha224),rfc3686(ctr(aes))))",
@@ -3159,6 +3340,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3169,7 +3353,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha256),"
"rfc3686(ctr(aes)))",
@@ -3184,6 +3368,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3193,7 +3380,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "seqiv(authenc(hmac(sha256),"
"rfc3686(ctr(aes))))",
@@ -3208,6 +3395,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3218,7 +3408,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha384),"
"rfc3686(ctr(aes)))",
@@ -3233,6 +3423,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3242,7 +3435,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "seqiv(authenc(hmac(sha384),"
"rfc3686(ctr(aes))))",
@@ -3257,6 +3450,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3267,7 +3463,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha512),"
"rfc3686(ctr(aes)))",
@@ -3282,6 +3478,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3291,7 +3490,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "seqiv(authenc(hmac(sha512),"
"rfc3686(ctr(aes))))",
@@ -3306,6 +3505,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3316,7 +3518,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "rfc7539(chacha20,poly1305)",
.cra_driver_name = "rfc7539-chacha20-poly1305-"
@@ -3330,6 +3532,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CHACHAPOLY_IV_SIZE,
.maxauthsize = POLY1305_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
OP_ALG_AAI_AEAD,
@@ -3339,7 +3544,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "rfc7539esp(chacha20,poly1305)",
.cra_driver_name = "rfc7539esp-chacha20-"
@@ -3353,6 +3558,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = 8,
.maxauthsize = POLY1305_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
OP_ALG_AAI_AEAD,
@@ -3412,13 +3620,11 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
{
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct caam_skcipher_alg *caam_alg =
- container_of(alg, typeof(*caam_alg), skcipher);
+ container_of(alg, typeof(*caam_alg), skcipher.base);
struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
int ret = 0;
- ctx->enginectx.op.do_one_request = skcipher_do_one_req;
-
if (alg_aai == OP_ALG_AAI_XTS) {
const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
struct crypto_skcipher *fallback;
@@ -3449,13 +3655,11 @@ static int caam_aead_init(struct crypto_aead *tfm)
{
struct aead_alg *alg = crypto_aead_alg(tfm);
struct caam_aead_alg *caam_alg =
- container_of(alg, struct caam_aead_alg, aead);
+ container_of(alg, struct caam_aead_alg, aead.base);
struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm);
crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx));
- ctx->enginectx.op.do_one_request = aead_do_one_req;
-
return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
}
@@ -3490,20 +3694,20 @@ void caam_algapi_exit(void)
struct caam_aead_alg *t_alg = driver_aeads + i;
if (t_alg->registered)
- crypto_unregister_aead(&t_alg->aead);
+ crypto_engine_unregister_aead(&t_alg->aead);
}
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
struct caam_skcipher_alg *t_alg = driver_algs + i;
if (t_alg->registered)
- crypto_unregister_skcipher(&t_alg->skcipher);
+ crypto_engine_unregister_skcipher(&t_alg->skcipher);
}
}
static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
{
- struct skcipher_alg *alg = &t_alg->skcipher;
+ struct skcipher_alg *alg = &t_alg->skcipher.base;
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
@@ -3517,7 +3721,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
{
- struct aead_alg *alg = &t_alg->aead;
+ struct aead_alg *alg = &t_alg->aead.base;
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
@@ -3607,10 +3811,10 @@ int caam_algapi_init(struct device *ctrldev)
caam_skcipher_alg_init(t_alg);
- err = crypto_register_skcipher(&t_alg->skcipher);
+ err = crypto_engine_register_skcipher(&t_alg->skcipher);
if (err) {
pr_warn("%s alg registration failed\n",
- t_alg->skcipher.base.cra_driver_name);
+ t_alg->skcipher.base.base.cra_driver_name);
continue;
}
@@ -3654,15 +3858,15 @@ int caam_algapi_init(struct device *ctrldev)
* if MD or MD size is not supported by device.
*/
if (is_mdha(c2_alg_sel) &&
- (!md_inst || t_alg->aead.maxauthsize > md_limit))
+ (!md_inst || t_alg->aead.base.maxauthsize > md_limit))
continue;
caam_aead_alg_init(t_alg);
- err = crypto_register_aead(&t_alg->aead);
+ err = crypto_engine_register_aead(&t_alg->aead);
if (err) {
pr_warn("%s alg registration failed\n",
- t_alg->aead.base.cra_driver_name);
+ t_alg->aead.base.base.cra_driver_name);
continue;
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 80deb003f0a5..290c8500c247 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -65,9 +65,13 @@
#include "sg_sw_sec4.h"
#include "key_gen.h"
#include "caamhash_desc.h"
-#include <crypto/engine.h>
+#include <crypto/internal/engine.h>
+#include <crypto/internal/hash.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#define CAAM_CRA_PRIORITY 3000
@@ -89,7 +93,6 @@ static struct list_head hash_list;
/* ahash per-session context */
struct caam_hash_ctx {
- struct crypto_engine_ctx enginectx;
u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
@@ -368,10 +371,8 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
int ret;
desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL);
- if (!desc) {
- dev_err(jrdev, "unable to allocate key input memory\n");
+ if (!desc)
return -ENOMEM;
- }
init_job_desc(desc, 0);
@@ -702,19 +703,14 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
int sg_num, u32 *sh_desc,
dma_addr_t sh_desc_dma)
{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
struct caam_hash_state *state = ahash_request_ctx_dma(req);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
struct ahash_edesc *edesc;
- unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
- edesc = kzalloc(sizeof(*edesc) + sg_size, flags);
- if (!edesc) {
- dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
+ edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags);
+ if (!edesc)
return NULL;
- }
state->edesc = edesc;
@@ -1757,7 +1753,7 @@ static struct caam_hash_template driver_hash[] = {
struct caam_hash_alg {
struct list_head entry;
int alg_type;
- struct ahash_alg ahash_alg;
+ struct ahash_engine_alg ahash_alg;
};
static int caam_hash_cra_init(struct crypto_tfm *tfm)
@@ -1769,7 +1765,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
struct ahash_alg *alg =
container_of(halg, struct ahash_alg, halg);
struct caam_hash_alg *caam_hash =
- container_of(alg, struct caam_hash_alg, ahash_alg);
+ container_of(alg, struct caam_hash_alg, ahash_alg.base);
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
@@ -1860,8 +1856,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
sh_desc_digest) -
sh_desc_update_offset;
- ctx->enginectx.op.do_one_request = ahash_do_one_req;
-
crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
/*
@@ -1894,7 +1888,7 @@ void caam_algapi_hash_exit(void)
return;
list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
- crypto_unregister_ahash(&t_alg->ahash_alg);
+ crypto_engine_unregister_ahash(&t_alg->ahash_alg);
list_del(&t_alg->entry);
kfree(t_alg);
}
@@ -1909,13 +1903,11 @@ caam_hash_alloc(struct caam_hash_template *template,
struct crypto_alg *alg;
t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
- if (!t_alg) {
- pr_err("failed to allocate t_alg\n");
+ if (!t_alg)
return ERR_PTR(-ENOMEM);
- }
- t_alg->ahash_alg = template->template_ahash;
- halg = &t_alg->ahash_alg;
+ t_alg->ahash_alg.base = template->template_ahash;
+ halg = &t_alg->ahash_alg.base;
alg = &halg->halg.base;
if (keyed) {
@@ -1928,7 +1920,7 @@ caam_hash_alloc(struct caam_hash_template *template,
template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
- t_alg->ahash_alg.setkey = NULL;
+ halg->setkey = NULL;
}
alg->cra_module = THIS_MODULE;
alg->cra_init = caam_hash_cra_init;
@@ -1940,6 +1932,7 @@ caam_hash_alloc(struct caam_hash_template *template,
alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
t_alg->alg_type = template->alg_type;
+ t_alg->ahash_alg.op.do_one_request = ahash_do_one_req;
return t_alg;
}
@@ -2001,10 +1994,10 @@ int caam_algapi_hash_init(struct device *ctrldev)
continue;
}
- err = crypto_register_ahash(&t_alg->ahash_alg);
+ err = crypto_engine_register_ahash(&t_alg->ahash_alg);
if (err) {
pr_warn("%s alg registration failed: %d\n",
- t_alg->ahash_alg.halg.base.cra_driver_name,
+ t_alg->ahash_alg.base.halg.base.cra_driver_name,
err);
kfree(t_alg);
} else
@@ -2021,10 +2014,10 @@ int caam_algapi_hash_init(struct device *ctrldev)
continue;
}
- err = crypto_register_ahash(&t_alg->ahash_alg);
+ err = crypto_engine_register_ahash(&t_alg->ahash_alg);
if (err) {
pr_warn("%s alg registration failed: %d\n",
- t_alg->ahash_alg.halg.base.cra_driver_name,
+ t_alg->ahash_alg.base.halg.base.cra_driver_name,
err);
kfree(t_alg);
} else
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 72afc249d42f..887a5f2fb927 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -16,8 +16,12 @@
#include "desc_constr.h"
#include "sg_sw_sec4.h"
#include "caampkc.h"
+#include <crypto/internal/engine.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
@@ -38,7 +42,7 @@ static u8 *zero_buffer;
static bool init_done;
struct caam_akcipher_alg {
- struct akcipher_alg akcipher;
+ struct akcipher_engine_alg akcipher;
bool registered;
};
@@ -225,7 +229,9 @@ static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
if (len && *buff)
break;
- sg_miter_next(&miter);
+ if (!sg_miter_next(&miter))
+ break;
+
buff = miter.addr;
len = miter.length;
@@ -1121,8 +1127,6 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
return -ENOMEM;
}
- ctx->enginectx.op.do_one_request = akcipher_do_one_req;
-
return 0;
}
@@ -1139,7 +1143,7 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
}
static struct caam_akcipher_alg caam_rsa = {
- .akcipher = {
+ .akcipher.base = {
.encrypt = caam_rsa_enc,
.decrypt = caam_rsa_dec,
.set_pub_key = caam_rsa_set_pub_key,
@@ -1155,7 +1159,10 @@ static struct caam_akcipher_alg caam_rsa = {
.cra_ctxsize = sizeof(struct caam_rsa_ctx) +
CRYPTO_DMA_PADDING,
},
- }
+ },
+ .akcipher.op = {
+ .do_one_request = akcipher_do_one_req,
+ },
};
/* Public Key Cryptography module initialization handler */
@@ -1193,12 +1200,12 @@ int caam_pkc_init(struct device *ctrldev)
if (!zero_buffer)
return -ENOMEM;
- err = crypto_register_akcipher(&caam_rsa.akcipher);
+ err = crypto_engine_register_akcipher(&caam_rsa.akcipher);
if (err) {
kfree(zero_buffer);
dev_warn(ctrldev, "%s alg registration failed\n",
- caam_rsa.akcipher.base.cra_driver_name);
+ caam_rsa.akcipher.base.base.cra_driver_name);
} else {
init_done = true;
caam_rsa.registered = true;
@@ -1214,7 +1221,7 @@ void caam_pkc_exit(void)
return;
if (caam_rsa.registered)
- crypto_unregister_akcipher(&caam_rsa.akcipher);
+ crypto_engine_unregister_akcipher(&caam_rsa.akcipher);
kfree(zero_buffer);
}
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
index cc889a525e2f..96d03704c9be 100644
--- a/drivers/crypto/caam/caampkc.h
+++ b/drivers/crypto/caam/caampkc.h
@@ -12,7 +12,6 @@
#define _PKC_DESC_H_
#include "compat.h"
#include "pdb.h"
-#include <crypto/engine.h>
/**
* caam_priv_key_form - CAAM RSA private key representation
@@ -88,13 +87,11 @@ struct caam_rsa_key {
/**
* caam_rsa_ctx - per session context.
- * @enginectx : crypto engine context
* @key : RSA key in DMA zone
* @dev : device structure
* @padding_dma : dma address of padding, for adding it to the input
*/
struct caam_rsa_ctx {
- struct crypto_engine_ctx enginectx;
struct caam_rsa_key key;
struct device *dev;
dma_addr_t padding_dma;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index ff9ddbbca377..bdf367f3f679 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/sys_soc.h>
#include <linux/fsl/mc.h>
@@ -382,8 +383,8 @@ static void kick_trng(struct device *dev, int ent_delay)
val = ent_delay;
/* min. freq. count, equal to 1/4 of the entropy sample length */
wr_reg32(&r4tst->rtfrqmin, val >> 2);
- /* max. freq. count, equal to 16 times the entropy sample length */
- wr_reg32(&r4tst->rtfrqmax, val << 4);
+ /* disable maximum frequency count */
+ wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
}
wr_reg32(&r4tst->rtsdctl, (val << RTSDCTL_ENT_DLY_SHIFT) |
@@ -740,6 +741,109 @@ static int caam_ctrl_rng_init(struct device *dev)
return 0;
}
+/* Indicate if the internal state of the CAAM is lost during PM */
+static int caam_off_during_pm(void)
+{
+ bool not_off_during_pm = of_machine_is_compatible("fsl,imx6q") ||
+ of_machine_is_compatible("fsl,imx6qp") ||
+ of_machine_is_compatible("fsl,imx6dl");
+
+ return not_off_during_pm ? 0 : 1;
+}
+
+static void caam_state_save(struct device *dev)
+{
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+ struct caam_ctl_state *state = &ctrlpriv->state;
+ struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
+ u32 deco_inst, jr_inst;
+ int i;
+
+ state->mcr = rd_reg32(&ctrl->mcr);
+ state->scfgr = rd_reg32(&ctrl->scfgr);
+
+ deco_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
+ CHA_ID_MS_DECO_MASK) >> CHA_ID_MS_DECO_SHIFT;
+ for (i = 0; i < deco_inst; i++) {
+ state->deco_mid[i].liodn_ms =
+ rd_reg32(&ctrl->deco_mid[i].liodn_ms);
+ state->deco_mid[i].liodn_ls =
+ rd_reg32(&ctrl->deco_mid[i].liodn_ls);
+ }
+
+ jr_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
+ CHA_ID_MS_JR_MASK) >> CHA_ID_MS_JR_SHIFT;
+ for (i = 0; i < jr_inst; i++) {
+ state->jr_mid[i].liodn_ms =
+ rd_reg32(&ctrl->jr_mid[i].liodn_ms);
+ state->jr_mid[i].liodn_ls =
+ rd_reg32(&ctrl->jr_mid[i].liodn_ls);
+ }
+}
+
+static void caam_state_restore(const struct device *dev)
+{
+ const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+ const struct caam_ctl_state *state = &ctrlpriv->state;
+ struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
+ u32 deco_inst, jr_inst;
+ int i;
+
+ wr_reg32(&ctrl->mcr, state->mcr);
+ wr_reg32(&ctrl->scfgr, state->scfgr);
+
+ deco_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
+ CHA_ID_MS_DECO_MASK) >> CHA_ID_MS_DECO_SHIFT;
+ for (i = 0; i < deco_inst; i++) {
+ wr_reg32(&ctrl->deco_mid[i].liodn_ms,
+ state->deco_mid[i].liodn_ms);
+ wr_reg32(&ctrl->deco_mid[i].liodn_ls,
+ state->deco_mid[i].liodn_ls);
+ }
+
+ jr_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
+ CHA_ID_MS_JR_MASK) >> CHA_ID_MS_JR_SHIFT;
+ for (i = 0; i < jr_inst; i++) {
+ wr_reg32(&ctrl->jr_mid[i].liodn_ms,
+ state->jr_mid[i].liodn_ms);
+ wr_reg32(&ctrl->jr_mid[i].liodn_ls,
+ state->jr_mid[i].liodn_ls);
+ }
+
+ if (ctrlpriv->virt_en == 1)
+ clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START |
+ JRSTART_JR1_START | JRSTART_JR2_START |
+ JRSTART_JR3_START);
+}
+
+static int caam_ctrl_suspend(struct device *dev)
+{
+ const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+
+ if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en)
+ caam_state_save(dev);
+
+ return 0;
+}
+
+static int caam_ctrl_resume(struct device *dev)
+{
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en) {
+ caam_state_restore(dev);
+
+ /* HW and rng will be reset so deinstantiation can be removed */
+ devm_remove_action(dev, devm_deinstantiate_rng, dev);
+ ret = caam_ctrl_rng_init(dev);
+ }
+
+ return ret;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(caam_ctrl_pm_ops, caam_ctrl_suspend, caam_ctrl_resume);
+
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
@@ -771,6 +875,8 @@ static int caam_probe(struct platform_device *pdev)
caam_imx = (bool)imx_soc_match;
+ ctrlpriv->caam_off_during_pm = caam_imx && caam_off_during_pm();
+
if (imx_soc_match) {
/*
* Until Layerscape and i.MX OP-TEE get in sync,
@@ -1033,6 +1139,7 @@ static struct platform_driver caam_driver = {
.driver = {
.name = "caam",
.of_match_table = caam_match,
+ .pm = pm_ptr(&caam_ctrl_pm_ops),
},
.probe = caam_probe,
};
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index b4f7bf77f487..e51320150872 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -4,7 +4,7 @@
* Private/internal definitions between modules
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2019 NXP
+ * Copyright 2019, 2023 NXP
*/
#ifndef INTERN_H
@@ -47,6 +47,16 @@ struct caam_jrentry_info {
u32 desc_size; /* Stored size for postprocessing, header derived */
};
+struct caam_jr_state {
+ dma_addr_t inpbusaddr;
+ dma_addr_t outbusaddr;
+};
+
+struct caam_jr_dequeue_params {
+ struct device *dev;
+ int enable_itr;
+};
+
/* Private sub-storage for a single JobR */
struct caam_drv_private_jr {
struct list_head list_node; /* Job Ring device list */
@@ -54,6 +64,7 @@ struct caam_drv_private_jr {
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
struct tasklet_struct irqtask;
+ struct caam_jr_dequeue_params tasklet_params;
int irq; /* One per queue */
bool hwrng;
@@ -71,6 +82,15 @@ struct caam_drv_private_jr {
int tail; /* entinfo (s/w ring) tail index */
void *outring; /* Base of output ring, DMA-safe */
struct crypto_engine *engine;
+
+ struct caam_jr_state state; /* State of the JR during PM */
+};
+
+struct caam_ctl_state {
+ struct masterid deco_mid[16];
+ struct masterid jr_mid[4];
+ u32 mcr;
+ u32 scfgr;
};
/*
@@ -116,6 +136,9 @@ struct caam_drv_private {
struct dentry *ctl; /* controller dir */
struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
#endif
+
+ int caam_off_during_pm; /* If the CAAM is reset after suspend */
+ struct caam_ctl_state state; /* State of the CTL during PM */
};
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 96dea5304d22..b1f1b393b98e 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -9,6 +9,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include "compat.h"
#include "ctrl.h"
@@ -117,6 +118,23 @@ static int caam_jr_flush(struct device *dev)
return caam_jr_stop_processing(dev, JRCR_RESET);
}
+/* The resume can be used after a park or a flush if CAAM has not been reset */
+static int caam_jr_restart_processing(struct device *dev)
+{
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+ u32 halt_status = rd_reg32(&jrp->rregs->jrintstatus) &
+ JRINT_ERR_HALT_MASK;
+
+ /* Check that the flush/park is completed */
+ if (halt_status != JRINT_ERR_HALT_COMPLETE)
+ return -1;
+
+ /* Resume processing of jobs */
+ clrsetbits_32(&jrp->rregs->jrintstatus, 0, JRINT_ERR_HALT_COMPLETE);
+
+ return 0;
+}
+
static int caam_reset_hw_jr(struct device *dev)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
@@ -215,7 +233,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
* tasklet if jobs done.
*/
irqstate = rd_reg32(&jrp->rregs->jrintstatus);
- if (!irqstate)
+ if (!(irqstate & JRINT_JR_INT))
return IRQ_NONE;
/*
@@ -245,7 +263,8 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
static void caam_jr_dequeue(unsigned long devarg)
{
int hw_idx, sw_idx, i, head, tail;
- struct device *dev = (struct device *)devarg;
+ struct caam_jr_dequeue_params *params = (void *)devarg;
+ struct device *dev = params->dev;
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
@@ -319,8 +338,9 @@ static void caam_jr_dequeue(unsigned long devarg)
outring_used--;
}
- /* reenable / unmask IRQs */
- clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
+ if (params->enable_itr)
+ /* reenable / unmask IRQs */
+ clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
}
/**
@@ -445,8 +465,16 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
* Guarantee that the descriptor's DMA address has been written to
* the next slot in the ring before the write index is updated, since
* other cores may update this index independently.
+ *
+ * Under heavy DDR load, smp_wmb() or dma_wmb() fail to make the input
+ * ring be updated before the CAAM starts reading it. So, CAAM will
+ * process, again, an old descriptor address and will put it in the
+ * output ring. This will make caam_jr_dequeue() to fail, since this
+ * old descriptor is not in the software ring.
+ * To fix this, use wmb() which works on the full system instead of
+ * inner/outer shareable domains.
*/
- smp_wmb();
+ wmb();
jrp->head = (head + 1) & (JOBR_DEPTH - 1);
@@ -470,6 +498,29 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
}
EXPORT_SYMBOL(caam_jr_enqueue);
+static void caam_jr_init_hw(struct device *dev, dma_addr_t inpbusaddr,
+ dma_addr_t outbusaddr)
+{
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+
+ wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
+ wr_reg64(&jrp->rregs->outring_base, outbusaddr);
+ wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
+ wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
+
+ /* Select interrupt coalescing parameters */
+ clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
+ (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
+ (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
+}
+
+static void caam_jr_reset_index(struct caam_drv_private_jr *jrp)
+{
+ jrp->out_ring_read_index = 0;
+ jrp->head = 0;
+ jrp->tail = 0;
+}
+
/*
* Init JobR independent of platform property detection
*/
@@ -506,25 +557,16 @@ static int caam_jr_init(struct device *dev)
jrp->entinfo[i].desc_addr_dma = !0;
/* Setup rings */
- jrp->out_ring_read_index = 0;
- jrp->head = 0;
- jrp->tail = 0;
-
- wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
- wr_reg64(&jrp->rregs->outring_base, outbusaddr);
- wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
- wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
-
+ caam_jr_reset_index(jrp);
jrp->inpring_avail = JOBR_DEPTH;
+ caam_jr_init_hw(dev, inpbusaddr, outbusaddr);
spin_lock_init(&jrp->inplock);
- /* Select interrupt coalescing parameters */
- clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
- (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
- (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
-
- tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
+ jrp->tasklet_params.dev = dev;
+ jrp->tasklet_params.enable_itr = 1;
+ tasklet_init(&jrp->irqtask, caam_jr_dequeue,
+ (unsigned long)&jrp->tasklet_params);
/* Connect job ring interrupt handler. */
error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED,
@@ -635,11 +677,134 @@ static int caam_jr_probe(struct platform_device *pdev)
atomic_set(&jrpriv->tfm_count, 0);
+ device_init_wakeup(&pdev->dev, 1);
+ device_set_wakeup_enable(&pdev->dev, false);
+
register_algs(jrpriv, jrdev->parent);
return 0;
}
+static void caam_jr_get_hw_state(struct device *dev)
+{
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+
+ jrp->state.inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
+ jrp->state.outbusaddr = rd_reg64(&jrp->rregs->outring_base);
+}
+
+static int caam_jr_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct caam_drv_private_jr *jrpriv = platform_get_drvdata(pdev);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev->parent);
+ struct caam_jr_dequeue_params suspend_params = {
+ .dev = dev,
+ .enable_itr = 0,
+ };
+
+ /* Remove the node from Physical JobR list maintained by driver */
+ spin_lock(&driver_data.jr_alloc_lock);
+ list_del(&jrpriv->list_node);
+ spin_unlock(&driver_data.jr_alloc_lock);
+
+ if (jrpriv->hwrng)
+ caam_rng_exit(dev->parent);
+
+ if (ctrlpriv->caam_off_during_pm) {
+ int err;
+
+ tasklet_disable(&jrpriv->irqtask);
+
+ /* mask itr to call flush */
+ clrsetbits_32(&jrpriv->rregs->rconfig_lo, 0, JRCFG_IMSK);
+
+ /* Invalid job in process */
+ err = caam_jr_flush(dev);
+ if (err) {
+ dev_err(dev, "Failed to flush\n");
+ return err;
+ }
+
+ /* Dequeing jobs flushed */
+ caam_jr_dequeue((unsigned long)&suspend_params);
+
+ /* Save state */
+ caam_jr_get_hw_state(dev);
+ } else if (device_may_wakeup(&pdev->dev)) {
+ enable_irq_wake(jrpriv->irq);
+ }
+
+ return 0;
+}
+
+static int caam_jr_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct caam_drv_private_jr *jrpriv = platform_get_drvdata(pdev);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev->parent);
+
+ if (ctrlpriv->caam_off_during_pm) {
+ u64 inp_addr;
+ int err;
+
+ /*
+ * Check if the CAAM has been resetted checking the address of
+ * the input ring
+ */
+ inp_addr = rd_reg64(&jrpriv->rregs->inpring_base);
+ if (inp_addr != 0) {
+ /* JR still has some configuration */
+ if (inp_addr == jrpriv->state.inpbusaddr) {
+ /* JR has not been resetted */
+ err = caam_jr_restart_processing(dev);
+ if (err) {
+ dev_err(dev,
+ "Restart processing failed\n");
+ return err;
+ }
+
+ tasklet_enable(&jrpriv->irqtask);
+
+ clrsetbits_32(&jrpriv->rregs->rconfig_lo,
+ JRCFG_IMSK, 0);
+
+ goto add_jr;
+ } else if (ctrlpriv->optee_en) {
+ /* JR has been used by OPTEE, reset it */
+ err = caam_reset_hw_jr(dev);
+ if (err) {
+ dev_err(dev, "Failed to reset JR\n");
+ return err;
+ }
+ } else {
+ /* No explanation, return error */
+ return -EIO;
+ }
+ }
+
+ caam_jr_reset_index(jrpriv);
+ caam_jr_init_hw(dev, jrpriv->state.inpbusaddr,
+ jrpriv->state.outbusaddr);
+
+ tasklet_enable(&jrpriv->irqtask);
+ } else if (device_may_wakeup(&pdev->dev)) {
+ disable_irq_wake(jrpriv->irq);
+ }
+
+add_jr:
+ spin_lock(&driver_data.jr_alloc_lock);
+ list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
+ spin_unlock(&driver_data.jr_alloc_lock);
+
+ if (jrpriv->hwrng)
+ jrpriv->hwrng = !caam_rng_init(dev->parent);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(caam_jr_pm_ops, caam_jr_suspend, caam_jr_resume);
+
static const struct of_device_id caam_jr_match[] = {
{
.compatible = "fsl,sec-v4.0-job-ring",
@@ -655,6 +820,7 @@ static struct platform_driver caam_jr_driver = {
.driver = {
.name = "caam_jr",
.of_match_table = caam_jr_match,
+ .pm = pm_ptr(&caam_jr_pm_ops),
},
.probe = caam_jr_probe,
.remove = caam_jr_remove,
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 2ad2c1035856..46a083849a8e 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <soc/fsl/qman.h>
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 189e74c21f0c..873df9de9890 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -459,12 +459,6 @@ struct masterid {
u32 liodn_ls; /* LIODN for non-sequence and seq access */
};
-/* Partition ID for DMA configuration */
-struct partid {
- u32 rsvd1;
- u32 pidr; /* partition ID, DECO */
-};
-
/* RNGB test mode (replicated twice in some configurations) */
/* Padded out to 0x100 */
struct rngtst {
@@ -590,8 +584,7 @@ struct caam_ctrl {
u32 deco_rsr; /* DECORSR - Deco Request Source */
u32 rsvd11;
u32 deco_rq; /* DECORR - DECO Request */
- struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */
- u32 rsvd5[22];
+ struct masterid deco_mid[16]; /* DECOxLIODNR - 1 per DECO */
/* DECO Availability/Reset Section 120-3ff */
u32 deco_avail; /* DAR - DECO availability */
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index f6196495e862..aa0ba2d17e1e 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -11,7 +11,8 @@ ccp-$(CONFIG_PCI) += sp-pci.o
ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
sev-dev.o \
tee-dev.o \
- platform-access.o
+ platform-access.o \
+ dbc.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/dbc.c b/drivers/crypto/ccp/dbc.c
new file mode 100644
index 000000000000..839ea14b9a85
--- /dev/null
+++ b/drivers/crypto/ccp/dbc.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Secure Processor Dynamic Boost Control interface
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#include "dbc.h"
+
+struct error_map {
+ u32 psp;
+ int ret;
+};
+
+#define DBC_ERROR_ACCESS_DENIED 0x0001
+#define DBC_ERROR_EXCESS_DATA 0x0004
+#define DBC_ERROR_BAD_PARAMETERS 0x0006
+#define DBC_ERROR_BAD_STATE 0x0007
+#define DBC_ERROR_NOT_IMPLEMENTED 0x0009
+#define DBC_ERROR_BUSY 0x000D
+#define DBC_ERROR_MESSAGE_FAILURE 0x0307
+#define DBC_ERROR_OVERFLOW 0x300F
+#define DBC_ERROR_SIGNATURE_INVALID 0x3072
+
+static struct error_map error_codes[] = {
+ {DBC_ERROR_ACCESS_DENIED, -EACCES},
+ {DBC_ERROR_EXCESS_DATA, -E2BIG},
+ {DBC_ERROR_BAD_PARAMETERS, -EINVAL},
+ {DBC_ERROR_BAD_STATE, -EAGAIN},
+ {DBC_ERROR_MESSAGE_FAILURE, -ENOENT},
+ {DBC_ERROR_NOT_IMPLEMENTED, -ENOENT},
+ {DBC_ERROR_BUSY, -EBUSY},
+ {DBC_ERROR_OVERFLOW, -ENFILE},
+ {DBC_ERROR_SIGNATURE_INVALID, -EPERM},
+ {0x0, 0x0},
+};
+
+static int send_dbc_cmd(struct psp_dbc_device *dbc_dev,
+ enum psp_platform_access_msg msg)
+{
+ int ret;
+
+ dbc_dev->mbox->req.header.status = 0;
+ ret = psp_send_platform_access_msg(msg, (struct psp_request *)dbc_dev->mbox);
+ if (ret == -EIO) {
+ int i;
+
+ dev_dbg(dbc_dev->dev,
+ "msg 0x%x failed with PSP error: 0x%x\n",
+ msg, dbc_dev->mbox->req.header.status);
+
+ for (i = 0; error_codes[i].psp; i++) {
+ if (dbc_dev->mbox->req.header.status == error_codes[i].psp)
+ return error_codes[i].ret;
+ }
+ }
+
+ return ret;
+}
+
+static int send_dbc_nonce(struct psp_dbc_device *dbc_dev)
+{
+ int ret;
+
+ dbc_dev->mbox->req.header.payload_size = sizeof(dbc_dev->mbox->dbc_nonce);
+ ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_NONCE);
+ if (ret == -EAGAIN) {
+ dev_dbg(dbc_dev->dev, "retrying get nonce\n");
+ ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_NONCE);
+ }
+
+ return ret;
+}
+
+static int send_dbc_parameter(struct psp_dbc_device *dbc_dev)
+{
+ dbc_dev->mbox->req.header.payload_size = sizeof(dbc_dev->mbox->dbc_param);
+
+ switch (dbc_dev->mbox->dbc_param.user.msg_index) {
+ case PARAM_SET_FMAX_CAP:
+ case PARAM_SET_PWR_CAP:
+ case PARAM_SET_GFX_MODE:
+ return send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_PARAMETER);
+ case PARAM_GET_FMAX_CAP:
+ case PARAM_GET_PWR_CAP:
+ case PARAM_GET_CURR_TEMP:
+ case PARAM_GET_FMAX_MAX:
+ case PARAM_GET_FMAX_MIN:
+ case PARAM_GET_SOC_PWR_MAX:
+ case PARAM_GET_SOC_PWR_MIN:
+ case PARAM_GET_SOC_PWR_CUR:
+ case PARAM_GET_GFX_MODE:
+ return send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_PARAMETER);
+ }
+
+ return -EINVAL;
+}
+
+void dbc_dev_destroy(struct psp_device *psp)
+{
+ struct psp_dbc_device *dbc_dev = psp->dbc_data;
+
+ if (!dbc_dev)
+ return;
+
+ misc_deregister(&dbc_dev->char_dev);
+ mutex_destroy(&dbc_dev->ioctl_mutex);
+ psp->dbc_data = NULL;
+}
+
+static long dbc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct psp_device *psp_master = psp_get_master_device();
+ void __user *argp = (void __user *)arg;
+ struct psp_dbc_device *dbc_dev;
+ int ret;
+
+ if (!psp_master || !psp_master->dbc_data)
+ return -ENODEV;
+ dbc_dev = psp_master->dbc_data;
+
+ mutex_lock(&dbc_dev->ioctl_mutex);
+
+ switch (cmd) {
+ case DBCIOCNONCE:
+ if (copy_from_user(&dbc_dev->mbox->dbc_nonce.user, argp,
+ sizeof(struct dbc_user_nonce))) {
+ ret = -EFAULT;
+ goto unlock;
+ }
+
+ ret = send_dbc_nonce(dbc_dev);
+ if (ret)
+ goto unlock;
+
+ if (copy_to_user(argp, &dbc_dev->mbox->dbc_nonce.user,
+ sizeof(struct dbc_user_nonce))) {
+ ret = -EFAULT;
+ goto unlock;
+ }
+ break;
+ case DBCIOCUID:
+ dbc_dev->mbox->req.header.payload_size = sizeof(dbc_dev->mbox->dbc_set_uid);
+ if (copy_from_user(&dbc_dev->mbox->dbc_set_uid.user, argp,
+ sizeof(struct dbc_user_setuid))) {
+ ret = -EFAULT;
+ goto unlock;
+ }
+
+ ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_UID);
+ if (ret)
+ goto unlock;
+
+ if (copy_to_user(argp, &dbc_dev->mbox->dbc_set_uid.user,
+ sizeof(struct dbc_user_setuid))) {
+ ret = -EFAULT;
+ goto unlock;
+ }
+ break;
+ case DBCIOCPARAM:
+ if (copy_from_user(&dbc_dev->mbox->dbc_param.user, argp,
+ sizeof(struct dbc_user_param))) {
+ ret = -EFAULT;
+ goto unlock;
+ }
+
+ ret = send_dbc_parameter(dbc_dev);
+ if (ret)
+ goto unlock;
+
+ if (copy_to_user(argp, &dbc_dev->mbox->dbc_param.user,
+ sizeof(struct dbc_user_param))) {
+ ret = -EFAULT;
+ goto unlock;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+
+ }
+unlock:
+ mutex_unlock(&dbc_dev->ioctl_mutex);
+
+ return ret;
+}
+
+static const struct file_operations dbc_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = dbc_ioctl,
+};
+
+int dbc_dev_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ struct psp_dbc_device *dbc_dev;
+ int ret;
+
+ if (!PSP_FEATURE(psp, DBC))
+ return 0;
+
+ dbc_dev = devm_kzalloc(dev, sizeof(*dbc_dev), GFP_KERNEL);
+ if (!dbc_dev)
+ return -ENOMEM;
+
+ BUILD_BUG_ON(sizeof(union dbc_buffer) > PAGE_SIZE);
+ dbc_dev->mbox = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0);
+ if (!dbc_dev->mbox) {
+ ret = -ENOMEM;
+ goto cleanup_dev;
+ }
+
+ psp->dbc_data = dbc_dev;
+ dbc_dev->dev = dev;
+
+ ret = send_dbc_nonce(dbc_dev);
+ if (ret == -EACCES) {
+ dev_dbg(dbc_dev->dev,
+ "dynamic boost control was previously authenticated\n");
+ ret = 0;
+ }
+ dev_dbg(dbc_dev->dev, "dynamic boost control is %savailable\n",
+ ret ? "un" : "");
+ if (ret) {
+ ret = 0;
+ goto cleanup_mbox;
+ }
+
+ dbc_dev->char_dev.minor = MISC_DYNAMIC_MINOR;
+ dbc_dev->char_dev.name = "dbc";
+ dbc_dev->char_dev.fops = &dbc_fops;
+ dbc_dev->char_dev.mode = 0600;
+ ret = misc_register(&dbc_dev->char_dev);
+ if (ret)
+ goto cleanup_mbox;
+
+ mutex_init(&dbc_dev->ioctl_mutex);
+
+ return 0;
+
+cleanup_mbox:
+ devm_free_pages(dev, (unsigned long)dbc_dev->mbox);
+
+cleanup_dev:
+ psp->dbc_data = NULL;
+ devm_kfree(dev, dbc_dev);
+
+ return ret;
+}
diff --git a/drivers/crypto/ccp/dbc.h b/drivers/crypto/ccp/dbc.h
new file mode 100644
index 000000000000..e963099ca38e
--- /dev/null
+++ b/drivers/crypto/ccp/dbc.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Platform Security Processor (PSP) Dynamic Boost Control support
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#ifndef __DBC_H__
+#define __DBC_H__
+
+#include <uapi/linux/psp-dbc.h>
+
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/psp-platform-access.h>
+
+#include "psp-dev.h"
+
+struct psp_dbc_device {
+ struct device *dev;
+
+ union dbc_buffer *mbox;
+
+ struct mutex ioctl_mutex;
+
+ struct miscdevice char_dev;
+};
+
+struct dbc_nonce {
+ struct psp_req_buffer_hdr header;
+ struct dbc_user_nonce user;
+} __packed;
+
+struct dbc_set_uid {
+ struct psp_req_buffer_hdr header;
+ struct dbc_user_setuid user;
+} __packed;
+
+struct dbc_param {
+ struct psp_req_buffer_hdr header;
+ struct dbc_user_param user;
+} __packed;
+
+union dbc_buffer {
+ struct psp_request req;
+ struct dbc_nonce dbc_nonce;
+ struct dbc_set_uid dbc_set_uid;
+ struct dbc_param dbc_param;
+};
+
+void dbc_dev_destroy(struct psp_device *psp);
+int dbc_dev_init(struct psp_device *psp);
+
+#endif /* __DBC_H */
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index e3d6955d3265..d42d7bc62352 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -15,6 +15,7 @@
#include "sev-dev.h"
#include "tee-dev.h"
#include "platform-access.h"
+#include "dbc.h"
struct psp_device *psp_master;
@@ -112,6 +113,12 @@ static void psp_init_platform_access(struct psp_device *psp)
dev_warn(psp->dev, "platform access init failed: %d\n", ret);
return;
}
+
+ /* dbc must come after platform access as it tests the feature */
+ ret = dbc_dev_init(psp);
+ if (ret)
+ dev_warn(psp->dev, "failed to init dynamic boost control: %d\n",
+ ret);
}
static int psp_init(struct psp_device *psp)
@@ -173,13 +180,14 @@ int psp_dev_init(struct sp_device *sp)
goto e_err;
}
+ /* master device must be set for platform access */
+ if (psp->sp->set_psp_master_device)
+ psp->sp->set_psp_master_device(psp->sp);
+
ret = psp_init(psp);
if (ret)
goto e_irq;
- if (sp->set_psp_master_device)
- sp->set_psp_master_device(sp);
-
/* Enable interrupt */
iowrite32(-1, psp->io_regs + psp->vdata->inten_reg);
@@ -188,6 +196,9 @@ int psp_dev_init(struct sp_device *sp)
return 0;
e_irq:
+ if (sp->clear_psp_master_device)
+ sp->clear_psp_master_device(sp);
+
sp_free_psp_irq(psp->sp, psp);
e_err:
sp->psp_data = NULL;
@@ -213,6 +224,8 @@ void psp_dev_destroy(struct sp_device *sp)
tee_dev_destroy(psp);
+ dbc_dev_destroy(psp);
+
platform_access_dev_destroy(psp);
sp_free_psp_irq(sp, psp);
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index 505e4bdeaca8..8a4de69399c5 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -40,6 +40,7 @@ struct psp_device {
void *sev_data;
void *tee_data;
void *platform_access_data;
+ void *dbc_data;
unsigned int capability;
};
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 1253a0217985..2329ad524b49 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -28,6 +28,10 @@
#define CACHE_NONE 0x00
#define CACHE_WB_NO_ALLOC 0xb7
+#define PLATFORM_FEATURE_DBC 0x1
+
+#define PSP_FEATURE(psp, feat) (psp->vdata && psp->vdata->platform_features & PLATFORM_FEATURE_##feat)
+
/* Structure to hold CCP device data */
struct ccp_device;
struct ccp_vdata {
@@ -51,6 +55,7 @@ struct tee_vdata {
const unsigned int cmdbuff_addr_hi_reg;
const unsigned int ring_wptr_reg;
const unsigned int ring_rptr_reg;
+ const unsigned int info_reg;
};
struct platform_access_vdata {
@@ -69,6 +74,8 @@ struct psp_vdata {
const unsigned int feature_reg;
const unsigned int inten_reg;
const unsigned int intsts_reg;
+ const unsigned int bootloader_info_reg;
+ const unsigned int platform_features;
};
/* Structure to hold SP device data */
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index b603ad9b8341..b6ab56abeb68 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -8,6 +8,7 @@
* Author: Gary R Hook <gary.hook@amd.com>
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
@@ -24,6 +25,12 @@
#include "ccp-dev.h"
#include "psp-dev.h"
+/* used for version string AA.BB.CC.DD */
+#define AA GENMASK(31, 24)
+#define BB GENMASK(23, 16)
+#define CC GENMASK(15, 8)
+#define DD GENMASK(7, 0)
+
#define MSIX_VECTORS 2
struct sp_pci {
@@ -32,7 +39,7 @@ struct sp_pci {
};
static struct sp_device *sp_dev_master;
-#define attribute_show(name, def) \
+#define security_attribute_show(name, def) \
static ssize_t name##_show(struct device *d, struct device_attribute *attr, \
char *buf) \
{ \
@@ -42,24 +49,24 @@ static ssize_t name##_show(struct device *d, struct device_attribute *attr, \
return sysfs_emit(buf, "%d\n", (psp->capability & bit) > 0); \
}
-attribute_show(fused_part, FUSED_PART)
+security_attribute_show(fused_part, FUSED_PART)
static DEVICE_ATTR_RO(fused_part);
-attribute_show(debug_lock_on, DEBUG_LOCK_ON)
+security_attribute_show(debug_lock_on, DEBUG_LOCK_ON)
static DEVICE_ATTR_RO(debug_lock_on);
-attribute_show(tsme_status, TSME_STATUS)
+security_attribute_show(tsme_status, TSME_STATUS)
static DEVICE_ATTR_RO(tsme_status);
-attribute_show(anti_rollback_status, ANTI_ROLLBACK_STATUS)
+security_attribute_show(anti_rollback_status, ANTI_ROLLBACK_STATUS)
static DEVICE_ATTR_RO(anti_rollback_status);
-attribute_show(rpmc_production_enabled, RPMC_PRODUCTION_ENABLED)
+security_attribute_show(rpmc_production_enabled, RPMC_PRODUCTION_ENABLED)
static DEVICE_ATTR_RO(rpmc_production_enabled);
-attribute_show(rpmc_spirom_available, RPMC_SPIROM_AVAILABLE)
+security_attribute_show(rpmc_spirom_available, RPMC_SPIROM_AVAILABLE)
static DEVICE_ATTR_RO(rpmc_spirom_available);
-attribute_show(hsp_tpm_available, HSP_TPM_AVAILABLE)
+security_attribute_show(hsp_tpm_available, HSP_TPM_AVAILABLE)
static DEVICE_ATTR_RO(hsp_tpm_available);
-attribute_show(rom_armor_enforced, ROM_ARMOR_ENFORCED)
+security_attribute_show(rom_armor_enforced, ROM_ARMOR_ENFORCED)
static DEVICE_ATTR_RO(rom_armor_enforced);
-static struct attribute *psp_attrs[] = {
+static struct attribute *psp_security_attrs[] = {
&dev_attr_fused_part.attr,
&dev_attr_debug_lock_on.attr,
&dev_attr_tsme_status.attr,
@@ -83,13 +90,70 @@ static umode_t psp_security_is_visible(struct kobject *kobj, struct attribute *a
return 0;
}
-static struct attribute_group psp_attr_group = {
- .attrs = psp_attrs,
+static struct attribute_group psp_security_attr_group = {
+ .attrs = psp_security_attrs,
.is_visible = psp_security_is_visible,
};
+#define version_attribute_show(name, _offset) \
+static ssize_t name##_show(struct device *d, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct sp_device *sp = dev_get_drvdata(d); \
+ struct psp_device *psp = sp->psp_data; \
+ unsigned int val = ioread32(psp->io_regs + _offset); \
+ return sysfs_emit(buf, "%02lx.%02lx.%02lx.%02lx\n", \
+ FIELD_GET(AA, val), \
+ FIELD_GET(BB, val), \
+ FIELD_GET(CC, val), \
+ FIELD_GET(DD, val)); \
+}
+
+version_attribute_show(bootloader_version, psp->vdata->bootloader_info_reg)
+static DEVICE_ATTR_RO(bootloader_version);
+version_attribute_show(tee_version, psp->vdata->tee->info_reg)
+static DEVICE_ATTR_RO(tee_version);
+
+static struct attribute *psp_firmware_attrs[] = {
+ &dev_attr_bootloader_version.attr,
+ &dev_attr_tee_version.attr,
+ NULL,
+};
+
+static umode_t psp_firmware_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct sp_device *sp = dev_get_drvdata(dev);
+ struct psp_device *psp = sp->psp_data;
+ unsigned int val = 0xffffffff;
+
+ if (!psp)
+ return 0;
+
+ if (attr == &dev_attr_bootloader_version.attr &&
+ psp->vdata->bootloader_info_reg)
+ val = ioread32(psp->io_regs + psp->vdata->bootloader_info_reg);
+
+ if (attr == &dev_attr_tee_version.attr &&
+ psp->capability & PSP_CAPABILITY_TEE &&
+ psp->vdata->tee->info_reg)
+ val = ioread32(psp->io_regs + psp->vdata->tee->info_reg);
+
+ /* If platform disallows accessing this register it will be all f's */
+ if (val != 0xffffffff)
+ return 0444;
+
+ return 0;
+}
+
+static struct attribute_group psp_firmware_attr_group = {
+ .attrs = psp_firmware_attrs,
+ .is_visible = psp_firmware_is_visible,
+};
+
static const struct attribute_group *psp_groups[] = {
- &psp_attr_group,
+ &psp_security_attr_group,
+ &psp_firmware_attr_group,
NULL,
};
@@ -359,6 +423,7 @@ static const struct tee_vdata teev1 = {
.cmdbuff_addr_hi_reg = 0x1054c, /* C2PMSG_19 */
.ring_wptr_reg = 0x10550, /* C2PMSG_20 */
.ring_rptr_reg = 0x10554, /* C2PMSG_21 */
+ .info_reg = 0x109e8, /* C2PMSG_58 */
};
static const struct tee_vdata teev2 = {
@@ -384,6 +449,7 @@ static const struct platform_access_vdata pa_v2 = {
static const struct psp_vdata pspv1 = {
.sev = &sevv1,
+ .bootloader_info_reg = 0x105ec, /* C2PMSG_59 */
.feature_reg = 0x105fc, /* C2PMSG_63 */
.inten_reg = 0x10610, /* P2CMSG_INTEN */
.intsts_reg = 0x10614, /* P2CMSG_INTSTS */
@@ -391,6 +457,7 @@ static const struct psp_vdata pspv1 = {
static const struct psp_vdata pspv2 = {
.sev = &sevv2,
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10690, /* P2CMSG_INTEN */
.intsts_reg = 0x10694, /* P2CMSG_INTSTS */
@@ -399,14 +466,17 @@ static const struct psp_vdata pspv2 = {
static const struct psp_vdata pspv3 = {
.tee = &teev1,
.platform_access = &pa_v1,
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10690, /* P2CMSG_INTEN */
.intsts_reg = 0x10694, /* P2CMSG_INTSTS */
+ .platform_features = PLATFORM_FEATURE_DBC,
};
static const struct psp_vdata pspv4 = {
.sev = &sevv2,
.tee = &teev1,
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10690, /* P2CMSG_INTEN */
.intsts_reg = 0x10694, /* P2CMSG_INTSTS */
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index c57f929805d5..0f0694037dd7 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -14,7 +14,6 @@
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include "cc_driver.h"
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 0eade4fa6695..16298ae4a00b 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -2216,7 +2216,8 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
memcpy(hmacctx->ipad, key, keylen);
}
memset(hmacctx->ipad + keylen, 0, bs - keylen);
- memcpy(hmacctx->opad, hmacctx->ipad, bs);
+ unsafe_memcpy(hmacctx->opad, hmacctx->ipad, bs,
+ "fortified memcpy causes -Wrestrict warning");
for (i = 0; i < bs / sizeof(int); i++) {
*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index f7c8bb95a71b..5e9d568131fe 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -133,7 +133,6 @@ int start_crypto(void);
int stop_crypto(void);
int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *pgl);
-int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev);
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
int err);
#endif /* __CHCR_CORE_H__ */
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 7f88ddb08631..1d693b8436e6 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -344,7 +344,6 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
struct cipher_wr_param *wrparam,
unsigned short qid);
-int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip);
void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx,
struct hash_wr_param *param);
int chcr_hash_dma_map(struct device *dev, struct ahash_request *req);
diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
index cbd8ca6e52ee..5d60a4bcb511 100644
--- a/drivers/crypto/exynos-rng.c
+++ b/drivers/crypto/exynos-rng.c
@@ -15,7 +15,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <crypto/internal/rng.h>
@@ -277,7 +277,7 @@ static int exynos_rng_probe(struct platform_device *pdev)
if (!rng)
return -ENOMEM;
- rng->type = (enum exynos_prng_type)of_device_get_match_data(&pdev->dev);
+ rng->type = (uintptr_t)of_device_get_match_data(&pdev->dev);
mutex_init(&rng->lock);
diff --git a/drivers/crypto/gemini/sl3516-ce-cipher.c b/drivers/crypto/gemini/sl3516-ce-cipher.c
index 14d0d83d388d..49dce9e0a834 100644
--- a/drivers/crypto/gemini/sl3516-ce-cipher.c
+++ b/drivers/crypto/gemini/sl3516-ce-cipher.c
@@ -8,13 +8,17 @@
* ECB mode.
*/
-#include <linux/crypto.h>
+#include <crypto/engine.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/pm_runtime.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/skcipher.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include "sl3516-ce.h"
/* sl3516_ce_need_fallback - check if a request can be handled by the CE */
@@ -105,7 +109,7 @@ static int sl3516_ce_cipher_fallback(struct skcipher_request *areq)
struct sl3516_ce_alg_template *algt;
int err;
- algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
+ algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher.base);
algt->stat_fb++;
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
@@ -136,7 +140,7 @@ static int sl3516_ce_cipher(struct skcipher_request *areq)
int err = 0;
int i;
- algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
+ algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher.base);
dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
crypto_tfm_alg_name(areq->base.tfm),
@@ -258,7 +262,7 @@ theend:
return err;
}
-static int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
+int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
{
int err;
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
@@ -318,7 +322,7 @@ int sl3516_ce_cipher_init(struct crypto_tfm *tfm)
memset(op, 0, sizeof(struct sl3516_ce_cipher_tfm_ctx));
- algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
+ algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher.base);
op->ce = algt->ce;
op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@@ -335,10 +339,6 @@ int sl3516_ce_cipher_init(struct crypto_tfm *tfm)
crypto_tfm_alg_driver_name(&sktfm->base),
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
- op->enginectx.op.do_one_request = sl3516_ce_handle_cipher_request;
- op->enginectx.op.prepare_request = NULL;
- op->enginectx.op.unprepare_request = NULL;
-
err = pm_runtime_get_sync(op->ce->dev);
if (err < 0)
goto error_pm;
diff --git a/drivers/crypto/gemini/sl3516-ce-core.c b/drivers/crypto/gemini/sl3516-ce-core.c
index b7524b649068..0f43c6e39bb9 100644
--- a/drivers/crypto/gemini/sl3516-ce-core.c
+++ b/drivers/crypto/gemini/sl3516-ce-core.c
@@ -6,22 +6,24 @@
*
* Core file which registers crypto algorithms supported by the CryptoEngine
*/
+
+#include <crypto/engine.h>
+#include <crypto/internal/rng.h>
+#include <crypto/internal/skcipher.h>
#include <linux/clk.h>
-#include <linux/crypto.h>
#include <linux/debugfs.h>
#include <linux/dev_printk.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-#include <crypto/internal/rng.h>
-#include <crypto/internal/skcipher.h>
#include "sl3516-ce.h"
@@ -217,7 +219,7 @@ static struct sl3516_ce_alg_template ce_algs[] = {
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.mode = ECB_AES,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-sl3516",
@@ -236,11 +238,13 @@ static struct sl3516_ce_alg_template ce_algs[] = {
.setkey = sl3516_ce_aes_setkey,
.encrypt = sl3516_ce_skencrypt,
.decrypt = sl3516_ce_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = sl3516_ce_handle_cipher_request,
+ },
},
};
-#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v)
{
struct sl3516_ce_dev *ce = seq->private;
@@ -264,8 +268,8 @@ static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v)
switch (ce_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
- ce_algs[i].alg.skcipher.base.cra_driver_name,
- ce_algs[i].alg.skcipher.base.cra_name,
+ ce_algs[i].alg.skcipher.base.base.cra_driver_name,
+ ce_algs[i].alg.skcipher.base.base.cra_name,
ce_algs[i].stat_req, ce_algs[i].stat_fb);
break;
}
@@ -274,7 +278,6 @@ static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(sl3516_ce_debugfs);
-#endif
static int sl3516_ce_register_algs(struct sl3516_ce_dev *ce)
{
@@ -286,11 +289,11 @@ static int sl3516_ce_register_algs(struct sl3516_ce_dev *ce)
switch (ce_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
dev_info(ce->dev, "DEBUG: Register %s\n",
- ce_algs[i].alg.skcipher.base.cra_name);
- err = crypto_register_skcipher(&ce_algs[i].alg.skcipher);
+ ce_algs[i].alg.skcipher.base.base.cra_name);
+ err = crypto_engine_register_skcipher(&ce_algs[i].alg.skcipher);
if (err) {
dev_err(ce->dev, "Fail to register %s\n",
- ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].alg.skcipher.base.base.cra_name);
ce_algs[i].ce = NULL;
return err;
}
@@ -313,8 +316,8 @@ static void sl3516_ce_unregister_algs(struct sl3516_ce_dev *ce)
switch (ce_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
dev_info(ce->dev, "Unregister %d %s\n", i,
- ce_algs[i].alg.skcipher.base.cra_name);
- crypto_unregister_skcipher(&ce_algs[i].alg.skcipher);
+ ce_algs[i].alg.skcipher.base.base.cra_name);
+ crypto_engine_unregister_skcipher(&ce_algs[i].alg.skcipher);
break;
}
}
@@ -473,13 +476,20 @@ static int sl3516_ce_probe(struct platform_device *pdev)
pm_runtime_put_sync(ce->dev);
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SL3516_DEBUG)) {
+ struct dentry *dbgfs_dir __maybe_unused;
+ struct dentry *dbgfs_stats __maybe_unused;
+
+ /* Ignore error of debugfs */
+ dbgfs_dir = debugfs_create_dir("sl3516", NULL);
+ dbgfs_stats = debugfs_create_file("stats", 0444,
+ dbgfs_dir, ce,
+ &sl3516_ce_debugfs_fops);
#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
- /* Ignore error of debugfs */
- ce->dbgfs_dir = debugfs_create_dir("sl3516", NULL);
- ce->dbgfs_stats = debugfs_create_file("stats", 0444,
- ce->dbgfs_dir, ce,
- &sl3516_ce_debugfs_fops);
+ ce->dbgfs_dir = dbgfs_dir;
+ ce->dbgfs_stats = dbgfs_stats;
#endif
+ }
return 0;
error_pmuse:
diff --git a/drivers/crypto/gemini/sl3516-ce.h b/drivers/crypto/gemini/sl3516-ce.h
index 4c0ec6c920d1..9e1a7e7f8961 100644
--- a/drivers/crypto/gemini/sl3516-ce.h
+++ b/drivers/crypto/gemini/sl3516-ce.h
@@ -17,7 +17,6 @@
#include <crypto/engine.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
-#include <linux/crypto.h>
#include <linux/debugfs.h>
#include <linux/hw_random.h>
@@ -292,16 +291,12 @@ struct sl3516_ce_cipher_req_ctx {
/*
* struct sl3516_ce_cipher_tfm_ctx - context for a skcipher TFM
- * @enginectx: crypto_engine used by this TFM
* @key: pointer to key data
* @keylen: len of the key
* @ce: pointer to the private data of driver handling this TFM
* @fallback_tfm: pointer to the fallback TFM
- *
- * enginectx must be the first element
*/
struct sl3516_ce_cipher_tfm_ctx {
- struct crypto_engine_ctx enginectx;
u32 *key;
u32 keylen;
struct sl3516_ce_dev *ce;
@@ -324,7 +319,7 @@ struct sl3516_ce_alg_template {
u32 mode;
struct sl3516_ce_dev *ce;
union {
- struct skcipher_alg skcipher;
+ struct skcipher_engine_alg skcipher;
} alg;
unsigned long stat_req;
unsigned long stat_fb;
@@ -345,3 +340,4 @@ int sl3516_ce_run_task(struct sl3516_ce_dev *ce,
int sl3516_ce_rng_register(struct sl3516_ce_dev *ce);
void sl3516_ce_rng_unregister(struct sl3516_ce_dev *ce);
+int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 8ede77310dc5..9a1c61be32cc 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -1392,9 +1392,9 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ unsigned int sz, sz_shift, curve_sz;
struct device *dev = ctx->dev;
char key[HPRE_ECC_MAX_KSZ];
- unsigned int sz, sz_shift;
struct ecdh params;
int ret;
@@ -1406,7 +1406,13 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
/* Use stdrng to generate private key */
if (!params.key || !params.key_size) {
params.key = key;
- params.key_size = hpre_ecdh_get_curvesz(ctx->curve_id);
+ curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
+ if (!curve_sz) {
+ dev_err(dev, "Invalid curve size!\n");
+ return -EINVAL;
+ }
+
+ params.key_size = curve_sz - 1;
ret = ecdh_gen_privkey(ctx, &params);
if (ret)
return ret;
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 5d0adfb54a34..39297ce70f44 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -209,7 +209,7 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = {
{HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37},
{HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37},
{HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
- {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xFFFFFE},
+ {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFFFE},
{HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE},
{HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE},
{HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
@@ -276,6 +276,9 @@ static const struct hpre_hw_error hpre_hw_errors[] = {
.int_msk = BIT(23),
.msg = "sva_fsm_timeout_int_set"
}, {
+ .int_msk = BIT(24),
+ .msg = "sva_int_set"
+ }, {
/* sentinel */
}
};
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index edc6fd44e7ca..a99fd589445c 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -88,6 +88,8 @@
#define QM_DB_PRIORITY_SHIFT_V1 48
#define QM_PAGE_SIZE 0x0034
#define QM_QP_DB_INTERVAL 0x10000
+#define QM_DB_TIMEOUT_CFG 0x100074
+#define QM_DB_TIMEOUT_SET 0x1fffff
#define QM_MEM_START_INIT 0x100040
#define QM_MEM_INIT_DONE 0x100044
@@ -954,6 +956,11 @@ static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
if (!val)
return IRQ_NONE;
+ if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) {
+ dev_warn(&qm->pdev->dev, "Driver is down, message cannot be processed!\n");
+ return IRQ_HANDLED;
+ }
+
schedule_work(&qm->cmd_process);
return IRQ_HANDLED;
@@ -997,7 +1004,7 @@ static void qm_reset_function(struct hisi_qm *qm)
return;
}
- ret = hisi_qm_stop(qm, QM_FLR);
+ ret = hisi_qm_stop(qm, QM_DOWN);
if (ret) {
dev_err(dev, "failed to stop qm when reset function\n");
goto clear_bit;
@@ -2743,6 +2750,9 @@ void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
test_bit(QM_RESETTING, &qm->misc_ctl))
msleep(WAIT_PERIOD);
+ if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
+ flush_work(&qm->cmd_process);
+
udelay(REMOVE_WAIT_DELAY);
}
EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
@@ -3243,7 +3253,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
}
if (qm->status.stop_reason == QM_SOFT_RESET ||
- qm->status.stop_reason == QM_FLR) {
+ qm->status.stop_reason == QM_DOWN) {
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
ret = qm_stop_started_qp(qm);
if (ret < 0) {
@@ -4539,11 +4549,11 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
if (qm->fun_type == QM_HW_PF)
qm_cmd_uninit(qm);
- ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_FLR);
+ ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_DOWN);
if (ret)
pci_err(pdev, "failed to stop vfs by pf in FLR.\n");
- ret = hisi_qm_stop(qm, QM_FLR);
+ ret = hisi_qm_stop(qm, QM_DOWN);
if (ret) {
pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
@@ -4641,9 +4651,11 @@ void hisi_qm_dev_shutdown(struct pci_dev *pdev)
struct hisi_qm *qm = pci_get_drvdata(pdev);
int ret;
- ret = hisi_qm_stop(qm, QM_NORMAL);
+ ret = hisi_qm_stop(qm, QM_DOWN);
if (ret)
dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
+
+ hisi_qm_cache_wb(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
@@ -4807,7 +4819,7 @@ static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
cmd = msg & QM_MB_CMD_DATA_MASK;
switch (cmd) {
case QM_PF_FLR_PREPARE:
- qm_pf_reset_vf_process(qm, QM_FLR);
+ qm_pf_reset_vf_process(qm, QM_DOWN);
break;
case QM_PF_SRST_PREPARE:
qm_pf_reset_vf_process(qm, QM_SOFT_RESET);
@@ -5371,6 +5383,8 @@ int hisi_qm_init(struct hisi_qm *qm)
goto err_pci_init;
if (qm->fun_type == QM_HW_PF) {
+ /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */
+ writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG);
qm_disable_clock_gate(qm);
ret = qm_dev_mem_reset(qm);
if (ret) {
@@ -5538,6 +5552,8 @@ static int qm_rebuild_for_resume(struct hisi_qm *qm)
qm_cmd_init(qm);
hisi_qm_dev_err_init(qm);
+ /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */
+ writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG);
qm_disable_clock_gate(qm);
ret = qm_dev_mem_reset(qm);
if (ret)
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
index e75851326c1e..e1e08993de12 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.c
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -1107,8 +1107,8 @@ static int sec_queue_res_cfg(struct sec_queue *queue)
}
queue->task_irq = platform_get_irq(to_platform_device(dev),
queue->queue_id * 2 + 1);
- if (queue->task_irq <= 0) {
- ret = -EINVAL;
+ if (queue->task_irq < 0) {
+ ret = queue->task_irq;
goto err_free_ring_db;
}
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 359aa2b41016..45063693859c 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -13,7 +13,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
@@ -1105,7 +1105,7 @@ static struct platform_driver img_hash_driver = {
.driver = {
.name = "img-hash-accelerator",
.pm = &img_hash_pm_ops,
- .of_match_table = of_match_ptr(img_hash_match),
+ .of_match_table = img_hash_match,
}
};
module_platform_driver(img_hash_driver);
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
index ae31be00357a..1e2fd9a754ec 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
@@ -5,24 +5,23 @@
* Copyright (C) 2018-2020 Intel Corporation
*/
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <crypto/gcm.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <linux/clk.h>
#include <linux/completion.h>
-#include <linux/crypto.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/types.h>
-
-#include <crypto/aes.h>
-#include <crypto/engine.h>
-#include <crypto/gcm.h>
-#include <crypto/scatterwalk.h>
-
-#include <crypto/internal/aead.h>
-#include <crypto/internal/skcipher.h>
+#include <linux/string.h>
#include "ocs-aes.h"
@@ -38,7 +37,6 @@
/**
* struct ocs_aes_tctx - OCS AES Transform context
- * @engine_ctx: Engine context.
* @aes_dev: The OCS AES device.
* @key: AES/SM4 key.
* @key_len: The length (in bytes) of @key.
@@ -47,7 +45,6 @@
* @use_fallback: Whether or not fallback cipher should be used.
*/
struct ocs_aes_tctx {
- struct crypto_engine_ctx engine_ctx;
struct ocs_aes_dev *aes_dev;
u8 key[OCS_AES_KEYSIZE_256];
unsigned int key_len;
@@ -1148,15 +1145,6 @@ static int kmb_ocs_sm4_ccm_decrypt(struct aead_request *req)
return kmb_ocs_aead_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CCM);
}
-static inline int ocs_common_init(struct ocs_aes_tctx *tctx)
-{
- tctx->engine_ctx.op.prepare_request = NULL;
- tctx->engine_ctx.op.do_one_request = kmb_ocs_aes_sk_do_one_request;
- tctx->engine_ctx.op.unprepare_request = NULL;
-
- return 0;
-}
-
static int ocs_aes_init_tfm(struct crypto_skcipher *tfm)
{
const char *alg_name = crypto_tfm_alg_name(&tfm->base);
@@ -1172,16 +1160,14 @@ static int ocs_aes_init_tfm(struct crypto_skcipher *tfm)
crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
- return ocs_common_init(tctx);
+ return 0;
}
static int ocs_sm4_init_tfm(struct crypto_skcipher *tfm)
{
- struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
-
crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
- return ocs_common_init(tctx);
+ return 0;
}
static inline void clear_key(struct ocs_aes_tctx *tctx)
@@ -1206,15 +1192,6 @@ static void ocs_exit_tfm(struct crypto_skcipher *tfm)
}
}
-static inline int ocs_common_aead_init(struct ocs_aes_tctx *tctx)
-{
- tctx->engine_ctx.op.prepare_request = NULL;
- tctx->engine_ctx.op.do_one_request = kmb_ocs_aes_aead_do_one_request;
- tctx->engine_ctx.op.unprepare_request = NULL;
-
- return 0;
-}
-
static int ocs_aes_aead_cra_init(struct crypto_aead *tfm)
{
const char *alg_name = crypto_tfm_alg_name(&tfm->base);
@@ -1233,7 +1210,7 @@ static int ocs_aes_aead_cra_init(struct crypto_aead *tfm)
(sizeof(struct aead_request) +
crypto_aead_reqsize(tctx->sw_cipher.aead))));
- return ocs_common_aead_init(tctx);
+ return 0;
}
static int kmb_ocs_aead_ccm_setauthsize(struct crypto_aead *tfm,
@@ -1261,11 +1238,9 @@ static int kmb_ocs_aead_gcm_setauthsize(struct crypto_aead *tfm,
static int ocs_sm4_aead_cra_init(struct crypto_aead *tfm)
{
- struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
-
crypto_aead_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
- return ocs_common_aead_init(tctx);
+ return 0;
}
static void ocs_aead_cra_exit(struct crypto_aead *tfm)
@@ -1280,182 +1255,190 @@ static void ocs_aead_cra_exit(struct crypto_aead *tfm)
}
}
-static struct skcipher_alg algs[] = {
+static struct skcipher_engine_alg algs[] = {
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
{
- .base.cra_name = "ecb(aes)",
- .base.cra_driver_name = "ecb-aes-keembay-ocs",
- .base.cra_priority = KMB_OCS_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
- .base.cra_module = THIS_MODULE,
- .base.cra_alignmask = 0,
-
- .min_keysize = OCS_AES_MIN_KEY_SIZE,
- .max_keysize = OCS_AES_MAX_KEY_SIZE,
- .setkey = kmb_ocs_aes_set_key,
- .encrypt = kmb_ocs_aes_ecb_encrypt,
- .decrypt = kmb_ocs_aes_ecb_decrypt,
- .init = ocs_aes_init_tfm,
- .exit = ocs_exit_tfm,
+ .base.base.cra_name = "ecb(aes)",
+ .base.base.cra_driver_name = "ecb-aes-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_AES_MIN_KEY_SIZE,
+ .base.max_keysize = OCS_AES_MAX_KEY_SIZE,
+ .base.setkey = kmb_ocs_aes_set_key,
+ .base.encrypt = kmb_ocs_aes_ecb_encrypt,
+ .base.decrypt = kmb_ocs_aes_ecb_decrypt,
+ .base.init = ocs_aes_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
{
- .base.cra_name = "cbc(aes)",
- .base.cra_driver_name = "cbc-aes-keembay-ocs",
- .base.cra_priority = KMB_OCS_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
- .base.cra_module = THIS_MODULE,
- .base.cra_alignmask = 0,
-
- .min_keysize = OCS_AES_MIN_KEY_SIZE,
- .max_keysize = OCS_AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = kmb_ocs_aes_set_key,
- .encrypt = kmb_ocs_aes_cbc_encrypt,
- .decrypt = kmb_ocs_aes_cbc_decrypt,
- .init = ocs_aes_init_tfm,
- .exit = ocs_exit_tfm,
+ .base.base.cra_name = "cbc(aes)",
+ .base.base.cra_driver_name = "cbc-aes-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_AES_MIN_KEY_SIZE,
+ .base.max_keysize = OCS_AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.setkey = kmb_ocs_aes_set_key,
+ .base.encrypt = kmb_ocs_aes_cbc_encrypt,
+ .base.decrypt = kmb_ocs_aes_cbc_decrypt,
+ .base.init = ocs_aes_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
{
- .base.cra_name = "ctr(aes)",
- .base.cra_driver_name = "ctr-aes-keembay-ocs",
- .base.cra_priority = KMB_OCS_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
- .base.cra_module = THIS_MODULE,
- .base.cra_alignmask = 0,
-
- .min_keysize = OCS_AES_MIN_KEY_SIZE,
- .max_keysize = OCS_AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = kmb_ocs_aes_set_key,
- .encrypt = kmb_ocs_aes_ctr_encrypt,
- .decrypt = kmb_ocs_aes_ctr_decrypt,
- .init = ocs_aes_init_tfm,
- .exit = ocs_exit_tfm,
+ .base.base.cra_name = "ctr(aes)",
+ .base.base.cra_driver_name = "ctr-aes-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.base.cra_blocksize = 1,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_AES_MIN_KEY_SIZE,
+ .base.max_keysize = OCS_AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.setkey = kmb_ocs_aes_set_key,
+ .base.encrypt = kmb_ocs_aes_ctr_encrypt,
+ .base.decrypt = kmb_ocs_aes_ctr_decrypt,
+ .base.init = ocs_aes_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
{
- .base.cra_name = "cts(cbc(aes))",
- .base.cra_driver_name = "cts-aes-keembay-ocs",
- .base.cra_priority = KMB_OCS_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
- .base.cra_module = THIS_MODULE,
- .base.cra_alignmask = 0,
-
- .min_keysize = OCS_AES_MIN_KEY_SIZE,
- .max_keysize = OCS_AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = kmb_ocs_aes_set_key,
- .encrypt = kmb_ocs_aes_cts_encrypt,
- .decrypt = kmb_ocs_aes_cts_decrypt,
- .init = ocs_aes_init_tfm,
- .exit = ocs_exit_tfm,
+ .base.base.cra_name = "cts(cbc(aes))",
+ .base.base.cra_driver_name = "cts-aes-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_AES_MIN_KEY_SIZE,
+ .base.max_keysize = OCS_AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.setkey = kmb_ocs_aes_set_key,
+ .base.encrypt = kmb_ocs_aes_cts_encrypt,
+ .base.decrypt = kmb_ocs_aes_cts_decrypt,
+ .base.init = ocs_aes_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
{
- .base.cra_name = "ecb(sm4)",
- .base.cra_driver_name = "ecb-sm4-keembay-ocs",
- .base.cra_priority = KMB_OCS_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
- .base.cra_module = THIS_MODULE,
- .base.cra_alignmask = 0,
-
- .min_keysize = OCS_SM4_KEY_SIZE,
- .max_keysize = OCS_SM4_KEY_SIZE,
- .setkey = kmb_ocs_sm4_set_key,
- .encrypt = kmb_ocs_sm4_ecb_encrypt,
- .decrypt = kmb_ocs_sm4_ecb_decrypt,
- .init = ocs_sm4_init_tfm,
- .exit = ocs_exit_tfm,
+ .base.base.cra_name = "ecb(sm4)",
+ .base.base.cra_driver_name = "ecb-sm4-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_SM4_KEY_SIZE,
+ .base.max_keysize = OCS_SM4_KEY_SIZE,
+ .base.setkey = kmb_ocs_sm4_set_key,
+ .base.encrypt = kmb_ocs_sm4_ecb_encrypt,
+ .base.decrypt = kmb_ocs_sm4_ecb_decrypt,
+ .base.init = ocs_sm4_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
{
- .base.cra_name = "cbc(sm4)",
- .base.cra_driver_name = "cbc-sm4-keembay-ocs",
- .base.cra_priority = KMB_OCS_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
- .base.cra_module = THIS_MODULE,
- .base.cra_alignmask = 0,
-
- .min_keysize = OCS_SM4_KEY_SIZE,
- .max_keysize = OCS_SM4_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = kmb_ocs_sm4_set_key,
- .encrypt = kmb_ocs_sm4_cbc_encrypt,
- .decrypt = kmb_ocs_sm4_cbc_decrypt,
- .init = ocs_sm4_init_tfm,
- .exit = ocs_exit_tfm,
+ .base.base.cra_name = "cbc(sm4)",
+ .base.base.cra_driver_name = "cbc-sm4-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_SM4_KEY_SIZE,
+ .base.max_keysize = OCS_SM4_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.setkey = kmb_ocs_sm4_set_key,
+ .base.encrypt = kmb_ocs_sm4_cbc_encrypt,
+ .base.decrypt = kmb_ocs_sm4_cbc_decrypt,
+ .base.init = ocs_sm4_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
{
- .base.cra_name = "ctr(sm4)",
- .base.cra_driver_name = "ctr-sm4-keembay-ocs",
- .base.cra_priority = KMB_OCS_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
- .base.cra_module = THIS_MODULE,
- .base.cra_alignmask = 0,
-
- .min_keysize = OCS_SM4_KEY_SIZE,
- .max_keysize = OCS_SM4_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = kmb_ocs_sm4_set_key,
- .encrypt = kmb_ocs_sm4_ctr_encrypt,
- .decrypt = kmb_ocs_sm4_ctr_decrypt,
- .init = ocs_sm4_init_tfm,
- .exit = ocs_exit_tfm,
+ .base.base.cra_name = "ctr(sm4)",
+ .base.base.cra_driver_name = "ctr-sm4-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.base.cra_blocksize = 1,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_SM4_KEY_SIZE,
+ .base.max_keysize = OCS_SM4_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.setkey = kmb_ocs_sm4_set_key,
+ .base.encrypt = kmb_ocs_sm4_ctr_encrypt,
+ .base.decrypt = kmb_ocs_sm4_ctr_decrypt,
+ .base.init = ocs_sm4_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
{
- .base.cra_name = "cts(cbc(sm4))",
- .base.cra_driver_name = "cts-sm4-keembay-ocs",
- .base.cra_priority = KMB_OCS_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
- .base.cra_module = THIS_MODULE,
- .base.cra_alignmask = 0,
-
- .min_keysize = OCS_SM4_KEY_SIZE,
- .max_keysize = OCS_SM4_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = kmb_ocs_sm4_set_key,
- .encrypt = kmb_ocs_sm4_cts_encrypt,
- .decrypt = kmb_ocs_sm4_cts_decrypt,
- .init = ocs_sm4_init_tfm,
- .exit = ocs_exit_tfm,
+ .base.base.cra_name = "cts(cbc(sm4))",
+ .base.base.cra_driver_name = "cts-sm4-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_SM4_KEY_SIZE,
+ .base.max_keysize = OCS_SM4_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.setkey = kmb_ocs_sm4_set_key,
+ .base.encrypt = kmb_ocs_sm4_cts_encrypt,
+ .base.decrypt = kmb_ocs_sm4_cts_decrypt,
+ .base.init = ocs_sm4_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
}
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
};
-static struct aead_alg algs_aead[] = {
+static struct aead_engine_alg algs_aead[] = {
{
- .base = {
+ .base.base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "gcm-aes-keembay-ocs",
.cra_priority = KMB_OCS_PRIORITY,
@@ -1467,17 +1450,18 @@ static struct aead_alg algs_aead[] = {
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
- .init = ocs_aes_aead_cra_init,
- .exit = ocs_aead_cra_exit,
- .ivsize = GCM_AES_IV_SIZE,
- .maxauthsize = AES_BLOCK_SIZE,
- .setauthsize = kmb_ocs_aead_gcm_setauthsize,
- .setkey = kmb_ocs_aes_aead_set_key,
- .encrypt = kmb_ocs_aes_gcm_encrypt,
- .decrypt = kmb_ocs_aes_gcm_decrypt,
+ .base.init = ocs_aes_aead_cra_init,
+ .base.exit = ocs_aead_cra_exit,
+ .base.ivsize = GCM_AES_IV_SIZE,
+ .base.maxauthsize = AES_BLOCK_SIZE,
+ .base.setauthsize = kmb_ocs_aead_gcm_setauthsize,
+ .base.setkey = kmb_ocs_aes_aead_set_key,
+ .base.encrypt = kmb_ocs_aes_gcm_encrypt,
+ .base.decrypt = kmb_ocs_aes_gcm_decrypt,
+ .op.do_one_request = kmb_ocs_aes_aead_do_one_request,
},
{
- .base = {
+ .base.base = {
.cra_name = "ccm(aes)",
.cra_driver_name = "ccm-aes-keembay-ocs",
.cra_priority = KMB_OCS_PRIORITY,
@@ -1489,17 +1473,18 @@ static struct aead_alg algs_aead[] = {
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
- .init = ocs_aes_aead_cra_init,
- .exit = ocs_aead_cra_exit,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = AES_BLOCK_SIZE,
- .setauthsize = kmb_ocs_aead_ccm_setauthsize,
- .setkey = kmb_ocs_aes_aead_set_key,
- .encrypt = kmb_ocs_aes_ccm_encrypt,
- .decrypt = kmb_ocs_aes_ccm_decrypt,
+ .base.init = ocs_aes_aead_cra_init,
+ .base.exit = ocs_aead_cra_exit,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.maxauthsize = AES_BLOCK_SIZE,
+ .base.setauthsize = kmb_ocs_aead_ccm_setauthsize,
+ .base.setkey = kmb_ocs_aes_aead_set_key,
+ .base.encrypt = kmb_ocs_aes_ccm_encrypt,
+ .base.decrypt = kmb_ocs_aes_ccm_decrypt,
+ .op.do_one_request = kmb_ocs_aes_aead_do_one_request,
},
{
- .base = {
+ .base.base = {
.cra_name = "gcm(sm4)",
.cra_driver_name = "gcm-sm4-keembay-ocs",
.cra_priority = KMB_OCS_PRIORITY,
@@ -1510,17 +1495,18 @@ static struct aead_alg algs_aead[] = {
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
- .init = ocs_sm4_aead_cra_init,
- .exit = ocs_aead_cra_exit,
- .ivsize = GCM_AES_IV_SIZE,
- .maxauthsize = AES_BLOCK_SIZE,
- .setauthsize = kmb_ocs_aead_gcm_setauthsize,
- .setkey = kmb_ocs_sm4_aead_set_key,
- .encrypt = kmb_ocs_sm4_gcm_encrypt,
- .decrypt = kmb_ocs_sm4_gcm_decrypt,
+ .base.init = ocs_sm4_aead_cra_init,
+ .base.exit = ocs_aead_cra_exit,
+ .base.ivsize = GCM_AES_IV_SIZE,
+ .base.maxauthsize = AES_BLOCK_SIZE,
+ .base.setauthsize = kmb_ocs_aead_gcm_setauthsize,
+ .base.setkey = kmb_ocs_sm4_aead_set_key,
+ .base.encrypt = kmb_ocs_sm4_gcm_encrypt,
+ .base.decrypt = kmb_ocs_sm4_gcm_decrypt,
+ .op.do_one_request = kmb_ocs_aes_aead_do_one_request,
},
{
- .base = {
+ .base.base = {
.cra_name = "ccm(sm4)",
.cra_driver_name = "ccm-sm4-keembay-ocs",
.cra_priority = KMB_OCS_PRIORITY,
@@ -1531,21 +1517,22 @@ static struct aead_alg algs_aead[] = {
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
- .init = ocs_sm4_aead_cra_init,
- .exit = ocs_aead_cra_exit,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = AES_BLOCK_SIZE,
- .setauthsize = kmb_ocs_aead_ccm_setauthsize,
- .setkey = kmb_ocs_sm4_aead_set_key,
- .encrypt = kmb_ocs_sm4_ccm_encrypt,
- .decrypt = kmb_ocs_sm4_ccm_decrypt,
+ .base.init = ocs_sm4_aead_cra_init,
+ .base.exit = ocs_aead_cra_exit,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.maxauthsize = AES_BLOCK_SIZE,
+ .base.setauthsize = kmb_ocs_aead_ccm_setauthsize,
+ .base.setkey = kmb_ocs_sm4_aead_set_key,
+ .base.encrypt = kmb_ocs_sm4_ccm_encrypt,
+ .base.decrypt = kmb_ocs_sm4_ccm_decrypt,
+ .op.do_one_request = kmb_ocs_aes_aead_do_one_request,
}
};
static void unregister_aes_algs(struct ocs_aes_dev *aes_dev)
{
- crypto_unregister_aeads(algs_aead, ARRAY_SIZE(algs_aead));
- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
+ crypto_engine_unregister_aeads(algs_aead, ARRAY_SIZE(algs_aead));
+ crypto_engine_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
static int register_aes_algs(struct ocs_aes_dev *aes_dev)
@@ -1556,13 +1543,13 @@ static int register_aes_algs(struct ocs_aes_dev *aes_dev)
* If any algorithm fails to register, all preceding algorithms that
* were successfully registered will be automatically unregistered.
*/
- ret = crypto_register_aeads(algs_aead, ARRAY_SIZE(algs_aead));
+ ret = crypto_engine_register_aeads(algs_aead, ARRAY_SIZE(algs_aead));
if (ret)
return ret;
- ret = crypto_register_skciphers(algs, ARRAY_SIZE(algs));
+ ret = crypto_engine_register_skciphers(algs, ARRAY_SIZE(algs));
if (ret)
- crypto_unregister_aeads(algs_aead, ARRAY_SIZE(algs));
+ crypto_engine_unregister_aeads(algs_aead, ARRAY_SIZE(algs));
return ret;
}
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
index 2269df17514c..fb95deed9057 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
@@ -7,30 +7,27 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <crypto/ecc_curve.h>
+#include <crypto/ecdh.h>
+#include <crypto/engine.h>
+#include <crypto/internal/ecc.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/kpp.h>
+#include <crypto/rng.h>
#include <linux/clk.h>
#include <linux/completion.h>
-#include <linux/crypto.h>
-#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/fips.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-
-#include <crypto/ecc_curve.h>
-#include <crypto/ecdh.h>
-#include <crypto/engine.h>
-#include <crypto/kpp.h>
-#include <crypto/rng.h>
-
-#include <crypto/internal/ecc.h>
-#include <crypto/internal/kpp.h>
+#include <linux/string.h>
#define DRV_NAME "keembay-ocs-ecc"
@@ -95,13 +92,11 @@ struct ocs_ecc_dev {
/**
* struct ocs_ecc_ctx - Transformation context.
- * @engine_ctx: Crypto engine ctx.
* @ecc_dev: The ECC driver associated with this context.
* @curve: The elliptic curve used by this transformation.
* @private_key: The private key.
*/
struct ocs_ecc_ctx {
- struct crypto_engine_ctx engine_ctx;
struct ocs_ecc_dev *ecc_dev;
const struct ecc_curve *curve;
u64 private_key[KMB_ECC_VLI_MAX_DIGITS];
@@ -794,10 +789,6 @@ static int kmb_ecc_tctx_init(struct ocs_ecc_ctx *tctx, unsigned int curve_id)
if (!tctx->curve)
return -EOPNOTSUPP;
- tctx->engine_ctx.op.prepare_request = NULL;
- tctx->engine_ctx.op.do_one_request = kmb_ocs_ecc_do_one_request;
- tctx->engine_ctx.op.unprepare_request = NULL;
-
return 0;
}
@@ -830,36 +821,38 @@ static unsigned int kmb_ocs_ecdh_max_size(struct crypto_kpp *tfm)
return digits_to_bytes(tctx->curve->g.ndigits) * 2;
}
-static struct kpp_alg ocs_ecdh_p256 = {
- .set_secret = kmb_ocs_ecdh_set_secret,
- .generate_public_key = kmb_ocs_ecdh_generate_public_key,
- .compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
- .init = kmb_ocs_ecdh_nist_p256_init_tfm,
- .exit = kmb_ocs_ecdh_exit_tfm,
- .max_size = kmb_ocs_ecdh_max_size,
- .base = {
+static struct kpp_engine_alg ocs_ecdh_p256 = {
+ .base.set_secret = kmb_ocs_ecdh_set_secret,
+ .base.generate_public_key = kmb_ocs_ecdh_generate_public_key,
+ .base.compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
+ .base.init = kmb_ocs_ecdh_nist_p256_init_tfm,
+ .base.exit = kmb_ocs_ecdh_exit_tfm,
+ .base.max_size = kmb_ocs_ecdh_max_size,
+ .base.base = {
.cra_name = "ecdh-nist-p256",
.cra_driver_name = "ecdh-nist-p256-keembay-ocs",
.cra_priority = KMB_OCS_ECC_PRIORITY,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct ocs_ecc_ctx),
},
+ .op.do_one_request = kmb_ocs_ecc_do_one_request,
};
-static struct kpp_alg ocs_ecdh_p384 = {
- .set_secret = kmb_ocs_ecdh_set_secret,
- .generate_public_key = kmb_ocs_ecdh_generate_public_key,
- .compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
- .init = kmb_ocs_ecdh_nist_p384_init_tfm,
- .exit = kmb_ocs_ecdh_exit_tfm,
- .max_size = kmb_ocs_ecdh_max_size,
- .base = {
+static struct kpp_engine_alg ocs_ecdh_p384 = {
+ .base.set_secret = kmb_ocs_ecdh_set_secret,
+ .base.generate_public_key = kmb_ocs_ecdh_generate_public_key,
+ .base.compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
+ .base.init = kmb_ocs_ecdh_nist_p384_init_tfm,
+ .base.exit = kmb_ocs_ecdh_exit_tfm,
+ .base.max_size = kmb_ocs_ecdh_max_size,
+ .base.base = {
.cra_name = "ecdh-nist-p384",
.cra_driver_name = "ecdh-nist-p384-keembay-ocs",
.cra_priority = KMB_OCS_ECC_PRIORITY,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct ocs_ecc_ctx),
},
+ .op.do_one_request = kmb_ocs_ecc_do_one_request,
};
static irqreturn_t ocs_ecc_irq_handler(int irq, void *dev_id)
@@ -941,14 +934,14 @@ static int kmb_ocs_ecc_probe(struct platform_device *pdev)
}
/* Register the KPP algo. */
- rc = crypto_register_kpp(&ocs_ecdh_p256);
+ rc = crypto_engine_register_kpp(&ocs_ecdh_p256);
if (rc) {
dev_err(dev,
"Could not register OCS algorithms with Crypto API\n");
goto cleanup;
}
- rc = crypto_register_kpp(&ocs_ecdh_p384);
+ rc = crypto_engine_register_kpp(&ocs_ecdh_p384);
if (rc) {
dev_err(dev,
"Could not register OCS algorithms with Crypto API\n");
@@ -958,7 +951,7 @@ static int kmb_ocs_ecc_probe(struct platform_device *pdev)
return 0;
ocs_ecdh_p384_error:
- crypto_unregister_kpp(&ocs_ecdh_p256);
+ crypto_engine_unregister_kpp(&ocs_ecdh_p256);
cleanup:
crypto_engine_exit(ecc_dev->engine);
@@ -977,8 +970,8 @@ static int kmb_ocs_ecc_remove(struct platform_device *pdev)
ecc_dev = platform_get_drvdata(pdev);
- crypto_unregister_kpp(&ocs_ecdh_p384);
- crypto_unregister_kpp(&ocs_ecdh_p256);
+ crypto_engine_unregister_kpp(&ocs_ecdh_p384);
+ crypto_engine_unregister_kpp(&ocs_ecdh_p256);
spin_lock(&ocs_ecc.lock);
list_del(&ecc_dev->list);
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
index d4bcbed1f546..daba8ca05dbe 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
@@ -5,19 +5,21 @@
* Copyright (C) 2018-2020 Intel Corporation
*/
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-
#include <crypto/engine.h>
+#include <crypto/hmac.h>
+#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <crypto/sha2.h>
#include <crypto/sm3.h>
-#include <crypto/hmac.h>
-#include <crypto/internal/hash.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
#include "ocs-hcu.h"
@@ -34,7 +36,6 @@
/**
* struct ocs_hcu_ctx: OCS HCU Transform context.
- * @engine_ctx: Crypto Engine context.
* @hcu_dev: The OCS HCU device used by the transformation.
* @key: The key (used only for HMAC transformations).
* @key_len: The length of the key.
@@ -42,7 +43,6 @@
* @is_hmac_tfm: Whether or not this is a HMAC transformation.
*/
struct ocs_hcu_ctx {
- struct crypto_engine_ctx engine_ctx;
struct ocs_hcu_dev *hcu_dev;
u8 key[SHA512_BLOCK_SIZE];
size_t key_len;
@@ -824,11 +824,6 @@ static void __cra_init(struct crypto_tfm *tfm, struct ocs_hcu_ctx *ctx)
{
crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
sizeof(struct ocs_hcu_rctx));
-
- /* Init context to 0. */
- memzero_explicit(ctx, sizeof(*ctx));
- /* Set engine ops. */
- ctx->engine_ctx.op.do_one_request = kmb_ocs_hcu_do_one_request;
}
static int kmb_ocs_hcu_sha_cra_init(struct crypto_tfm *tfm)
@@ -883,17 +878,17 @@ static void kmb_ocs_hcu_hmac_cra_exit(struct crypto_tfm *tfm)
memzero_explicit(ctx->key, sizeof(ctx->key));
}
-static struct ahash_alg ocs_hcu_algs[] = {
+static struct ahash_engine_alg ocs_hcu_algs[] = {
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
{
- .init = kmb_ocs_hcu_init,
- .update = kmb_ocs_hcu_update,
- .final = kmb_ocs_hcu_final,
- .finup = kmb_ocs_hcu_finup,
- .digest = kmb_ocs_hcu_digest,
- .export = kmb_ocs_hcu_export,
- .import = kmb_ocs_hcu_import,
- .halg = {
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
@@ -907,18 +902,19 @@ static struct ahash_alg ocs_hcu_algs[] = {
.cra_module = THIS_MODULE,
.cra_init = kmb_ocs_hcu_sha_cra_init,
}
- }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
- .init = kmb_ocs_hcu_init,
- .update = kmb_ocs_hcu_update,
- .final = kmb_ocs_hcu_final,
- .finup = kmb_ocs_hcu_finup,
- .digest = kmb_ocs_hcu_digest,
- .export = kmb_ocs_hcu_export,
- .import = kmb_ocs_hcu_import,
- .setkey = kmb_ocs_hcu_setkey,
- .halg = {
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.setkey = kmb_ocs_hcu_setkey,
+ .base.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
@@ -933,18 +929,19 @@ static struct ahash_alg ocs_hcu_algs[] = {
.cra_init = kmb_ocs_hcu_hmac_cra_init,
.cra_exit = kmb_ocs_hcu_hmac_cra_exit,
}
- }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
},
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
{
- .init = kmb_ocs_hcu_init,
- .update = kmb_ocs_hcu_update,
- .final = kmb_ocs_hcu_final,
- .finup = kmb_ocs_hcu_finup,
- .digest = kmb_ocs_hcu_digest,
- .export = kmb_ocs_hcu_export,
- .import = kmb_ocs_hcu_import,
- .halg = {
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
@@ -958,18 +955,19 @@ static struct ahash_alg ocs_hcu_algs[] = {
.cra_module = THIS_MODULE,
.cra_init = kmb_ocs_hcu_sha_cra_init,
}
- }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
- .init = kmb_ocs_hcu_init,
- .update = kmb_ocs_hcu_update,
- .final = kmb_ocs_hcu_final,
- .finup = kmb_ocs_hcu_finup,
- .digest = kmb_ocs_hcu_digest,
- .export = kmb_ocs_hcu_export,
- .import = kmb_ocs_hcu_import,
- .setkey = kmb_ocs_hcu_setkey,
- .halg = {
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.setkey = kmb_ocs_hcu_setkey,
+ .base.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
@@ -984,17 +982,18 @@ static struct ahash_alg ocs_hcu_algs[] = {
.cra_init = kmb_ocs_hcu_hmac_cra_init,
.cra_exit = kmb_ocs_hcu_hmac_cra_exit,
}
- }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
- .init = kmb_ocs_hcu_init,
- .update = kmb_ocs_hcu_update,
- .final = kmb_ocs_hcu_final,
- .finup = kmb_ocs_hcu_finup,
- .digest = kmb_ocs_hcu_digest,
- .export = kmb_ocs_hcu_export,
- .import = kmb_ocs_hcu_import,
- .halg = {
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.halg = {
.digestsize = SM3_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
@@ -1008,18 +1007,19 @@ static struct ahash_alg ocs_hcu_algs[] = {
.cra_module = THIS_MODULE,
.cra_init = kmb_ocs_hcu_sm3_cra_init,
}
- }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
- .init = kmb_ocs_hcu_init,
- .update = kmb_ocs_hcu_update,
- .final = kmb_ocs_hcu_final,
- .finup = kmb_ocs_hcu_finup,
- .digest = kmb_ocs_hcu_digest,
- .export = kmb_ocs_hcu_export,
- .import = kmb_ocs_hcu_import,
- .setkey = kmb_ocs_hcu_setkey,
- .halg = {
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.setkey = kmb_ocs_hcu_setkey,
+ .base.halg = {
.digestsize = SM3_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
@@ -1034,17 +1034,18 @@ static struct ahash_alg ocs_hcu_algs[] = {
.cra_init = kmb_ocs_hcu_hmac_sm3_cra_init,
.cra_exit = kmb_ocs_hcu_hmac_cra_exit,
}
- }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
- .init = kmb_ocs_hcu_init,
- .update = kmb_ocs_hcu_update,
- .final = kmb_ocs_hcu_final,
- .finup = kmb_ocs_hcu_finup,
- .digest = kmb_ocs_hcu_digest,
- .export = kmb_ocs_hcu_export,
- .import = kmb_ocs_hcu_import,
- .halg = {
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
@@ -1058,18 +1059,19 @@ static struct ahash_alg ocs_hcu_algs[] = {
.cra_module = THIS_MODULE,
.cra_init = kmb_ocs_hcu_sha_cra_init,
}
- }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
- .init = kmb_ocs_hcu_init,
- .update = kmb_ocs_hcu_update,
- .final = kmb_ocs_hcu_final,
- .finup = kmb_ocs_hcu_finup,
- .digest = kmb_ocs_hcu_digest,
- .export = kmb_ocs_hcu_export,
- .import = kmb_ocs_hcu_import,
- .setkey = kmb_ocs_hcu_setkey,
- .halg = {
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.setkey = kmb_ocs_hcu_setkey,
+ .base.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
@@ -1084,17 +1086,18 @@ static struct ahash_alg ocs_hcu_algs[] = {
.cra_init = kmb_ocs_hcu_hmac_cra_init,
.cra_exit = kmb_ocs_hcu_hmac_cra_exit,
}
- }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
- .init = kmb_ocs_hcu_init,
- .update = kmb_ocs_hcu_update,
- .final = kmb_ocs_hcu_final,
- .finup = kmb_ocs_hcu_finup,
- .digest = kmb_ocs_hcu_digest,
- .export = kmb_ocs_hcu_export,
- .import = kmb_ocs_hcu_import,
- .halg = {
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
@@ -1108,18 +1111,19 @@ static struct ahash_alg ocs_hcu_algs[] = {
.cra_module = THIS_MODULE,
.cra_init = kmb_ocs_hcu_sha_cra_init,
}
- }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
- .init = kmb_ocs_hcu_init,
- .update = kmb_ocs_hcu_update,
- .final = kmb_ocs_hcu_final,
- .finup = kmb_ocs_hcu_finup,
- .digest = kmb_ocs_hcu_digest,
- .export = kmb_ocs_hcu_export,
- .import = kmb_ocs_hcu_import,
- .setkey = kmb_ocs_hcu_setkey,
- .halg = {
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.setkey = kmb_ocs_hcu_setkey,
+ .base.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
@@ -1134,7 +1138,8 @@ static struct ahash_alg ocs_hcu_algs[] = {
.cra_init = kmb_ocs_hcu_hmac_cra_init,
.cra_exit = kmb_ocs_hcu_hmac_cra_exit,
}
- }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
},
};
@@ -1155,7 +1160,7 @@ static int kmb_ocs_hcu_remove(struct platform_device *pdev)
if (!hcu_dev)
return -ENODEV;
- crypto_unregister_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
+ crypto_engine_unregister_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
rc = crypto_engine_exit(hcu_dev->engine);
@@ -1170,7 +1175,6 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ocs_hcu_dev *hcu_dev;
- struct resource *hcu_mem;
int rc;
hcu_dev = devm_kzalloc(dev, sizeof(*hcu_dev), GFP_KERNEL);
@@ -1184,14 +1188,7 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
if (rc)
return rc;
- /* Get the memory address and remap. */
- hcu_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!hcu_mem) {
- dev_err(dev, "Could not retrieve io mem resource.\n");
- return -ENODEV;
- }
-
- hcu_dev->io_base = devm_ioremap_resource(dev, hcu_mem);
+ hcu_dev->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hcu_dev->io_base))
return PTR_ERR(hcu_dev->io_base);
@@ -1231,7 +1228,7 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
/* Security infrastructure guarantees OCS clock is enabled. */
- rc = crypto_register_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
+ rc = crypto_engine_register_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
if (rc) {
dev_err(dev, "Could not register algorithms.\n");
goto cleanup;
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index e543a9e24a06..dd4464b7e00b 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -3,11 +3,13 @@
#include <linux/iopoll.h>
#include <adf_accel_devices.h>
#include <adf_cfg.h>
+#include <adf_clock.h>
#include <adf_common_drv.h>
#include <adf_gen4_dc.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
+#include <adf_gen4_timer.h>
#include "adf_4xxx_hw_data.h"
#include "icp_qat_hw.h"
@@ -223,6 +225,8 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
ICP_ACCEL_CAPABILITIES_HKDF |
ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
+ ICP_ACCEL_CAPABILITIES_SM3 |
+ ICP_ACCEL_CAPABILITIES_SM4 |
ICP_ACCEL_CAPABILITIES_AES_V2;
/* A set bit in fusectl1 means the feature is OFF in this SKU */
@@ -246,12 +250,19 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4;
+ }
+
capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_SM2 |
ICP_ACCEL_CAPABILITIES_ECEDMONT;
if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
}
@@ -317,6 +328,14 @@ static void get_admin_info(struct admin_info *admin_csrs_info)
admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET;
}
+static u32 get_heartbeat_clock(struct adf_hw_device_data *self)
+{
+ /*
+ * 4XXX uses KPT counter for HB
+ */
+ return ADF_4XXX_KPT_COUNTER_FREQ;
+}
+
static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
{
struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR];
@@ -508,6 +527,10 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_gen4_dev_config;
+ hw_data->start_timer = adf_gen4_timer_start;
+ hw_data->stop_timer = adf_gen4_timer_stop;
+ hw_data->get_hb_clock = get_heartbeat_clock;
+ hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
index e5b314d2b60e..bb3d95a8fb21 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
@@ -3,6 +3,7 @@
#ifndef ADF_4XXX_HW_DATA_H_
#define ADF_4XXX_HW_DATA_H_
+#include <linux/units.h>
#include <adf_accel_devices.h>
/* PCIe configuration space */
@@ -64,6 +65,9 @@
#define ADF_402XX_ASYM_OBJ "qat_402xx_asym.bin"
#define ADF_402XX_ADMIN_OBJ "qat_402xx_admin.bin"
+/* Clocks frequency */
+#define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ)
+
/* qat_4xxx fuse bits are different from old GENs, redefine them */
enum icp_qat_4xxx_slice_mask {
ICP_ACCEL_4XXX_MASK_CIPHER_SLICE = BIT(0),
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index 1a15600361d0..6d4e2e139ffa 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -8,6 +8,7 @@
#include <adf_cfg.h>
#include <adf_common_drv.h>
#include <adf_dbgfs.h>
+#include <adf_heartbeat.h>
#include "adf_4xxx_hw_data.h"
#include "qat_compression.h"
@@ -77,6 +78,8 @@ static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev)
if (ret)
return ret;
+ adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS);
+
return 0;
}
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index 475643654e64..9c00c441b602 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
+#include <adf_clock.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_c3xxx_hw_data.h"
+#include "adf_heartbeat.h"
#include "icp_qat_hw.h"
/* Worker thread to service arbiter mappings */
@@ -50,6 +52,28 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
return ~(fuses | straps) & ADF_C3XXX_ACCELENGINES_MASK;
}
+static u32 get_ts_clock(struct adf_hw_device_data *self)
+{
+ /*
+ * Timestamp update interval is 16 AE clock ticks for c3xxx.
+ */
+ return self->clock_frequency / 16;
+}
+
+static int measure_clock(struct adf_accel_dev *accel_dev)
+{
+ u32 frequency;
+ int ret;
+
+ ret = adf_dev_measure_clock(accel_dev, &frequency, ADF_C3XXX_MIN_AE_FREQ,
+ ADF_C3XXX_MAX_AE_FREQ);
+ if (ret)
+ return ret;
+
+ accel_dev->hw_device->clock_frequency = frequency;
+ return 0;
+}
+
static u32 get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_C3XXX_PMISC_BAR;
@@ -127,6 +151,10 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
hw_data->disable_iov = adf_disable_sriov;
hw_data->dev_config = adf_gen2_dev_config;
+ hw_data->measure_clock = measure_clock;
+ hw_data->get_hb_clock = get_ts_clock;
+ hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
+ hw_data->check_hb_ctrs = adf_heartbeat_check_ctrs;
adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h
index 336a06f11dbd..690c6a1aa172 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h
@@ -3,6 +3,8 @@
#ifndef ADF_C3XXX_HW_DATA_H_
#define ADF_C3XXX_HW_DATA_H_
+#include <linux/units.h>
+
/* PCIe configuration space */
#define ADF_C3XXX_PMISC_BAR 0
#define ADF_C3XXX_ETR_BAR 1
@@ -19,6 +21,11 @@
#define ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS 48
#define ADF_C3XXX_AE2FUNC_MAP_GRP_B_NUM_REGS 6
+/* Clocks frequency */
+#define ADF_C3XXX_AE_FREQ (685 * HZ_PER_MHZ)
+#define ADF_C3XXX_MIN_AE_FREQ (533 * HZ_PER_MHZ)
+#define ADF_C3XXX_MAX_AE_FREQ (685 * HZ_PER_MHZ)
+
/* Firmware Binary */
#define ADF_C3XXX_FW "qat_c3xxx.bin"
#define ADF_C3XXX_MMP "qat_c3xxx_mmp.bin"
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
index e14270703670..355a781693eb 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
+#include <adf_clock.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_c62x_hw_data.h"
+#include "adf_heartbeat.h"
#include "icp_qat_hw.h"
/* Worker thread to service arbiter mappings */
@@ -50,6 +52,28 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
return ~(fuses | straps) & ADF_C62X_ACCELENGINES_MASK;
}
+static u32 get_ts_clock(struct adf_hw_device_data *self)
+{
+ /*
+ * Timestamp update interval is 16 AE clock ticks for c62x.
+ */
+ return self->clock_frequency / 16;
+}
+
+static int measure_clock(struct adf_accel_dev *accel_dev)
+{
+ u32 frequency;
+ int ret;
+
+ ret = adf_dev_measure_clock(accel_dev, &frequency, ADF_C62X_MIN_AE_FREQ,
+ ADF_C62X_MAX_AE_FREQ);
+ if (ret)
+ return ret;
+
+ accel_dev->hw_device->clock_frequency = frequency;
+ return 0;
+}
+
static u32 get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_C62X_PMISC_BAR;
@@ -129,6 +153,10 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
hw_data->disable_iov = adf_disable_sriov;
hw_data->dev_config = adf_gen2_dev_config;
+ hw_data->measure_clock = measure_clock;
+ hw_data->get_hb_clock = get_ts_clock;
+ hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
+ hw_data->check_hb_ctrs = adf_heartbeat_check_ctrs;
adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h
index 008c0a3a9769..13e6ebf6fd91 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h
@@ -3,6 +3,8 @@
#ifndef ADF_C62X_HW_DATA_H_
#define ADF_C62X_HW_DATA_H_
+#include <linux/units.h>
+
/* PCIe configuration space */
#define ADF_C62X_SRAM_BAR 0
#define ADF_C62X_PMISC_BAR 1
@@ -19,6 +21,11 @@
#define ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS 80
#define ADF_C62X_AE2FUNC_MAP_GRP_B_NUM_REGS 10
+/* Clocks frequency */
+#define ADF_C62X_AE_FREQ (685 * HZ_PER_MHZ)
+#define ADF_C62X_MIN_AE_FREQ (533 * HZ_PER_MHZ)
+#define ADF_C62X_MAX_AE_FREQ (800 * HZ_PER_MHZ)
+
/* Firmware Binary */
#define ADF_C62X_FW "qat_c62x.bin"
#define ADF_C62X_MMP "qat_c62x_mmp.bin"
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 38de3aba6e8c..43622c7fca71 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -17,6 +17,8 @@ intel_qat-objs := adf_cfg.o \
adf_gen4_pm.o \
adf_gen2_dc.o \
adf_gen4_dc.o \
+ adf_gen4_timer.o \
+ adf_clock.o \
qat_crypto.o \
qat_compression.o \
qat_comp_algs.o \
@@ -28,6 +30,9 @@ intel_qat-objs := adf_cfg.o \
qat_bl.o
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
+ adf_fw_counters.o \
+ adf_heartbeat.o \
+ adf_heartbeat_dbgfs.o \
adf_dbgfs.o
intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index 0399417b91fc..e57abde66f4f 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -188,6 +188,11 @@ struct adf_hw_device_data {
int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
int (*send_admin_init)(struct adf_accel_dev *accel_dev);
+ int (*start_timer)(struct adf_accel_dev *accel_dev);
+ void (*stop_timer)(struct adf_accel_dev *accel_dev);
+ void (*check_hb_ctrs)(struct adf_accel_dev *accel_dev);
+ uint32_t (*get_hb_clock)(struct adf_hw_device_data *self);
+ int (*measure_clock)(struct adf_accel_dev *accel_dev);
int (*init_arb)(struct adf_accel_dev *accel_dev);
void (*exit_arb)(struct adf_accel_dev *accel_dev);
const u32 *(*get_arb_mapping)(struct adf_accel_dev *accel_dev);
@@ -229,6 +234,7 @@ struct adf_hw_device_data {
u8 num_accel;
u8 num_logical_accel;
u8 num_engines;
+ u32 num_hb_ctrs;
};
/* CSR write macro */
@@ -241,6 +247,11 @@ struct adf_hw_device_data {
#define ADF_CFG_NUM_SERVICES 4
#define ADF_SRV_TYPE_BIT_LEN 3
#define ADF_SRV_TYPE_MASK 0x7
+#define ADF_AE_ADMIN_THREAD 7
+#define ADF_NUM_THREADS_PER_AE 8
+#define ADF_NUM_PKE_STRAND 2
+#define ADF_AE_STRAND0_THREAD 8
+#define ADF_AE_STRAND1_THREAD 9
#define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
#define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
@@ -292,9 +303,12 @@ struct adf_accel_dev {
unsigned long status;
atomic_t ref_count;
struct dentry *debugfs_dir;
+ struct dentry *fw_cntr_dbgfile;
struct list_head list;
struct module *owner;
struct adf_accel_pci accel_pci_dev;
+ struct adf_timer *timer;
+ struct adf_heartbeat *heartbeat;
union {
struct {
/* protects VF2PF interrupts access */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
index 118775ee02f2..ff790823b868 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c
@@ -8,6 +8,7 @@
#include <linux/dma-mapping.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
+#include "adf_heartbeat.h"
#include "icp_qat_fw_init_admin.h"
#define ADF_ADMIN_MAILBOX_STRIDE 0x1000
@@ -15,6 +16,7 @@
#define ADF_CONST_TABLE_SIZE 1024
#define ADF_ADMIN_POLL_DELAY_US 20
#define ADF_ADMIN_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
+#define ADF_ONE_AE 1
static const u8 const_tab[1024] __aligned(1024) = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -194,6 +196,22 @@ static int adf_set_fw_constants(struct adf_accel_dev *accel_dev)
return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
+int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp)
+{
+ struct icp_qat_fw_init_admin_req req = { };
+ struct icp_qat_fw_init_admin_resp resp;
+ unsigned int ae_mask = ADF_ONE_AE;
+ int ret;
+
+ req.cmd_id = ICP_QAT_FW_TIMER_GET;
+ ret = adf_send_admin(accel_dev, &req, &resp, ae_mask);
+ if (ret)
+ return ret;
+
+ *timestamp = resp.timestamp;
+ return 0;
+}
+
static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev,
u32 *capabilities)
{
@@ -223,6 +241,49 @@ static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev,
return 0;
}
+int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps)
+{
+ struct icp_qat_fw_init_admin_resp resp = { };
+ struct icp_qat_fw_init_admin_req req = { };
+ int ret;
+
+ req.cmd_id = ICP_QAT_FW_COUNTERS_GET;
+
+ ret = adf_put_admin_msg_sync(accel_dev, ae, &req, &resp);
+ if (ret || resp.status)
+ return -EFAULT;
+
+ *reqs = resp.req_rec_count;
+ *resps = resp.resp_sent_count;
+
+ return 0;
+}
+
+int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt)
+{
+ u32 ae_mask = accel_dev->hw_device->ae_mask;
+ struct icp_qat_fw_init_admin_req req = { };
+ struct icp_qat_fw_init_admin_resp resp = { };
+
+ req.cmd_id = ICP_QAT_FW_SYNC;
+ req.int_timer_ticks = cnt;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
+int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks)
+{
+ u32 ae_mask = accel_dev->hw_device->ae_mask;
+ struct icp_qat_fw_init_admin_req req = { };
+ struct icp_qat_fw_init_admin_resp resp;
+
+ req.cmd_id = ICP_QAT_FW_HEARTBEAT_TIMER_SET;
+ req.init_cfg_ptr = accel_dev->heartbeat->dma.phy_addr;
+ req.heartbeat_ticks = ticks;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
/**
* adf_send_admin_init() - Function sends init message to FW
* @accel_dev: Pointer to acceleration device.
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
index 3ae1e5caee0e..6066dc637352 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
@@ -47,4 +47,6 @@
#define ADF_ETRMGR_CORE_AFFINITY_FORMAT \
ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY
#define ADF_ACCEL_STR "Accelerator%d"
+#define ADF_HEARTBEAT_TIMER "HeartbeatTimer"
+
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_clock.c b/drivers/crypto/intel/qat/qat_common/adf_clock.c
new file mode 100644
index 000000000000..dc0778691eb0
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_clock.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/export.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+#include <linux/units.h>
+#include <asm/errno.h>
+#include "adf_accel_devices.h"
+#include "adf_clock.h"
+#include "adf_common_drv.h"
+
+#define MEASURE_CLOCK_RETRIES 10
+#define MEASURE_CLOCK_DELAY_US 10000
+#define ME_CLK_DIVIDER 16
+#define MEASURE_CLOCK_DELTA_THRESHOLD_US 100
+
+static inline u64 timespec_to_us(const struct timespec64 *ts)
+{
+ return (u64)DIV_ROUND_CLOSEST_ULL(timespec64_to_ns(ts), NSEC_PER_USEC);
+}
+
+static inline u64 timespec_to_ms(const struct timespec64 *ts)
+{
+ return (u64)DIV_ROUND_CLOSEST_ULL(timespec64_to_ns(ts), NSEC_PER_MSEC);
+}
+
+u64 adf_clock_get_current_time(void)
+{
+ struct timespec64 ts;
+
+ ktime_get_real_ts64(&ts);
+ return timespec_to_ms(&ts);
+}
+
+static int measure_clock(struct adf_accel_dev *accel_dev, u32 *frequency)
+{
+ struct timespec64 ts1, ts2, ts3, ts4;
+ u64 timestamp1, timestamp2, temp;
+ u32 delta_us, tries;
+ int ret;
+
+ tries = MEASURE_CLOCK_RETRIES;
+ do {
+ ktime_get_real_ts64(&ts1);
+ ret = adf_get_fw_timestamp(accel_dev, &timestamp1);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to get fw timestamp\n");
+ return ret;
+ }
+ ktime_get_real_ts64(&ts2);
+ delta_us = timespec_to_us(&ts2) - timespec_to_us(&ts1);
+ } while (delta_us > MEASURE_CLOCK_DELTA_THRESHOLD_US && --tries);
+
+ if (!tries) {
+ dev_err(&GET_DEV(accel_dev), "Excessive clock measure delay\n");
+ return -ETIMEDOUT;
+ }
+
+ fsleep(MEASURE_CLOCK_DELAY_US);
+
+ tries = MEASURE_CLOCK_RETRIES;
+ do {
+ ktime_get_real_ts64(&ts3);
+ if (adf_get_fw_timestamp(accel_dev, &timestamp2)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to get fw timestamp\n");
+ return -EIO;
+ }
+ ktime_get_real_ts64(&ts4);
+ delta_us = timespec_to_us(&ts4) - timespec_to_us(&ts3);
+ } while (delta_us > MEASURE_CLOCK_DELTA_THRESHOLD_US && --tries);
+
+ if (!tries) {
+ dev_err(&GET_DEV(accel_dev), "Excessive clock measure delay\n");
+ return -ETIMEDOUT;
+ }
+
+ delta_us = timespec_to_us(&ts3) - timespec_to_us(&ts1);
+ temp = (timestamp2 - timestamp1) * ME_CLK_DIVIDER * 10;
+ temp = DIV_ROUND_CLOSEST_ULL(temp, delta_us);
+ /*
+ * Enclose the division to allow the preprocessor to precalculate it,
+ * and avoid promoting r-value to 64-bit before division.
+ */
+ *frequency = temp * (HZ_PER_MHZ / 10);
+
+ return 0;
+}
+
+/**
+ * adf_dev_measure_clock() - measures device clock frequency
+ * @accel_dev: Pointer to acceleration device.
+ * @frequency: Pointer to variable where result will be stored
+ * @min: Minimal allowed frequency value
+ * @max: Maximal allowed frequency value
+ *
+ * If the measurement result will go beyond the min/max thresholds the value
+ * will take the value of the crossed threshold.
+ *
+ * This algorithm compares the device firmware timestamp with the kernel
+ * timestamp. So we can't expect too high accuracy from this measurement.
+ *
+ * Return:
+ * * 0 - measurement succeed
+ * * -ETIMEDOUT - measurement failed
+ */
+int adf_dev_measure_clock(struct adf_accel_dev *accel_dev,
+ u32 *frequency, u32 min, u32 max)
+{
+ int ret;
+ u32 freq;
+
+ ret = measure_clock(accel_dev, &freq);
+ if (ret)
+ return ret;
+
+ *frequency = clamp(freq, min, max);
+
+ if (*frequency != freq)
+ dev_warn(&GET_DEV(accel_dev),
+ "Measured clock %d Hz is out of range, assuming %d\n",
+ freq, *frequency);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_measure_clock);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_clock.h b/drivers/crypto/intel/qat/qat_common/adf_clock.h
new file mode 100644
index 000000000000..e309bc0dc35c
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_clock.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef ADF_CLOCK_H
+#define ADF_CLOCK_H
+
+#include <linux/types.h>
+
+struct adf_accel_dev;
+
+int adf_dev_measure_clock(struct adf_accel_dev *accel_dev, u32 *frequency,
+ u32 min, u32 max);
+u64 adf_clock_get_current_time(void);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index b8132eb9bc2a..673b5044c62a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -58,12 +58,6 @@ int adf_dev_restart(struct adf_accel_dev *accel_dev);
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
void adf_clean_vf_map(bool);
-
-int adf_ctl_dev_register(void);
-void adf_ctl_dev_unregister(void);
-int adf_processes_dev_register(void);
-void adf_processes_dev_unregister(void);
-
int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
struct adf_accel_dev *pf);
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
@@ -94,7 +88,11 @@ void adf_exit_aer(void);
int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
int adf_send_admin_init(struct adf_accel_dev *accel_dev);
+int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps);
int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay);
+int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt);
+int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks);
+int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp);
int adf_init_arb(struct adf_accel_dev *accel_dev);
void adf_exit_arb(struct adf_accel_dev *accel_dev);
void adf_update_ring_arb(struct adf_etr_ring_data *ring);
@@ -178,8 +176,6 @@ int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned long ctx_mask,
unsigned short reg_num, unsigned int regdata);
-int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle,
- unsigned char ae, unsigned short lm_addr, unsigned int value);
void qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char mode);
int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
@@ -193,6 +189,8 @@ int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
int adf_init_misc_wq(void);
void adf_exit_misc_wq(void);
bool adf_misc_wq_queue_work(struct work_struct *work);
+bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
+ unsigned long delay);
#if defined(CONFIG_PCI_IOV)
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
index d0a2f892e6eb..04845f8d72be 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
@@ -6,6 +6,8 @@
#include "adf_cfg.h"
#include "adf_common_drv.h"
#include "adf_dbgfs.h"
+#include "adf_fw_counters.h"
+#include "adf_heartbeat_dbgfs.h"
/**
* adf_dbgfs_init() - add persistent debugfs entries
@@ -56,6 +58,11 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
{
if (!accel_dev->debugfs_dir)
return;
+
+ if (!accel_dev->is_vf) {
+ adf_fw_counters_dbgfs_add(accel_dev);
+ adf_heartbeat_dbgfs_add(accel_dev);
+ }
}
/**
@@ -66,4 +73,9 @@ void adf_dbgfs_rm(struct adf_accel_dev *accel_dev)
{
if (!accel_dev->debugfs_dir)
return;
+
+ if (!accel_dev->is_vf) {
+ adf_heartbeat_dbgfs_rm(accel_dev);
+ adf_fw_counters_dbgfs_rm(accel_dev);
+ }
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c
new file mode 100644
index 000000000000..cb6e09ef5c9f
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/types.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_fw_counters.h"
+
+#define ADF_FW_COUNTERS_MAX_PADDING 16
+
+enum adf_fw_counters_types {
+ ADF_FW_REQUESTS,
+ ADF_FW_RESPONSES,
+ ADF_FW_COUNTERS_COUNT
+};
+
+static const char * const adf_fw_counter_names[] = {
+ [ADF_FW_REQUESTS] = "Requests",
+ [ADF_FW_RESPONSES] = "Responses",
+};
+
+static_assert(ARRAY_SIZE(adf_fw_counter_names) == ADF_FW_COUNTERS_COUNT);
+
+struct adf_ae_counters {
+ u16 ae;
+ u64 values[ADF_FW_COUNTERS_COUNT];
+};
+
+struct adf_fw_counters {
+ u16 ae_count;
+ struct adf_ae_counters ae_counters[];
+};
+
+static void adf_fw_counters_parse_ae_values(struct adf_ae_counters *ae_counters, u32 ae,
+ u64 req_count, u64 resp_count)
+{
+ ae_counters->ae = ae;
+ ae_counters->values[ADF_FW_REQUESTS] = req_count;
+ ae_counters->values[ADF_FW_RESPONSES] = resp_count;
+}
+
+static int adf_fw_counters_load_from_device(struct adf_accel_dev *accel_dev,
+ struct adf_fw_counters *fw_counters)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ unsigned long ae_mask;
+ unsigned int i;
+ unsigned long ae;
+
+ /* Ignore the admin AEs */
+ ae_mask = hw_data->ae_mask & ~hw_data->admin_ae_mask;
+
+ if (hweight_long(ae_mask) > fw_counters->ae_count)
+ return -EINVAL;
+
+ i = 0;
+ for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
+ u64 req_count, resp_count;
+ int ret;
+
+ ret = adf_get_ae_fw_counters(accel_dev, ae, &req_count, &resp_count);
+ if (ret)
+ return ret;
+
+ adf_fw_counters_parse_ae_values(&fw_counters->ae_counters[i++], ae,
+ req_count, resp_count);
+ }
+
+ return 0;
+}
+
+static struct adf_fw_counters *adf_fw_counters_allocate(unsigned long ae_count)
+{
+ struct adf_fw_counters *fw_counters;
+
+ if (unlikely(!ae_count))
+ return ERR_PTR(-EINVAL);
+
+ fw_counters = kmalloc(struct_size(fw_counters, ae_counters, ae_count), GFP_KERNEL);
+ if (!fw_counters)
+ return ERR_PTR(-ENOMEM);
+
+ fw_counters->ae_count = ae_count;
+
+ return fw_counters;
+}
+
+/**
+ * adf_fw_counters_get() - Return FW counters for the provided device.
+ * @accel_dev: Pointer to a QAT acceleration device
+ *
+ * Allocates and returns a table of counters containing execution statistics
+ * for each non-admin AE available through the supplied acceleration device.
+ * The caller becomes the owner of such memory and is responsible for
+ * the deallocation through a call to kfree().
+ *
+ * Returns: a pointer to a dynamically allocated struct adf_fw_counters
+ * on success, or a negative value on error.
+ */
+static struct adf_fw_counters *adf_fw_counters_get(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_fw_counters *fw_counters;
+ unsigned long ae_count;
+ int ret;
+
+ if (!adf_dev_started(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "QAT Device not started\n");
+ return ERR_PTR(-EFAULT);
+ }
+
+ /* Ignore the admin AEs */
+ ae_count = hweight_long(hw_data->ae_mask & ~hw_data->admin_ae_mask);
+
+ fw_counters = adf_fw_counters_allocate(ae_count);
+ if (IS_ERR(fw_counters))
+ return fw_counters;
+
+ ret = adf_fw_counters_load_from_device(accel_dev, fw_counters);
+ if (ret) {
+ kfree(fw_counters);
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to create QAT fw_counters file table [%d].\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ return fw_counters;
+}
+
+static void *qat_fw_counters_seq_start(struct seq_file *sfile, loff_t *pos)
+{
+ struct adf_fw_counters *fw_counters = sfile->private;
+
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
+ if (*pos > fw_counters->ae_count)
+ return NULL;
+
+ return &fw_counters->ae_counters[*pos - 1];
+}
+
+static void *qat_fw_counters_seq_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+ struct adf_fw_counters *fw_counters = sfile->private;
+
+ (*pos)++;
+
+ if (*pos > fw_counters->ae_count)
+ return NULL;
+
+ return &fw_counters->ae_counters[*pos - 1];
+}
+
+static void qat_fw_counters_seq_stop(struct seq_file *sfile, void *v) {}
+
+static int qat_fw_counters_seq_show(struct seq_file *sfile, void *v)
+{
+ int i;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(sfile, "AE ");
+ for (i = 0; i < ADF_FW_COUNTERS_COUNT; ++i)
+ seq_printf(sfile, " %*s", ADF_FW_COUNTERS_MAX_PADDING,
+ adf_fw_counter_names[i]);
+ } else {
+ struct adf_ae_counters *ae_counters = (struct adf_ae_counters *)v;
+
+ seq_printf(sfile, "%2d:", ae_counters->ae);
+ for (i = 0; i < ADF_FW_COUNTERS_COUNT; ++i)
+ seq_printf(sfile, " %*llu", ADF_FW_COUNTERS_MAX_PADDING,
+ ae_counters->values[i]);
+ }
+ seq_putc(sfile, '\n');
+
+ return 0;
+}
+
+static const struct seq_operations qat_fw_counters_sops = {
+ .start = qat_fw_counters_seq_start,
+ .next = qat_fw_counters_seq_next,
+ .stop = qat_fw_counters_seq_stop,
+ .show = qat_fw_counters_seq_show,
+};
+
+static int qat_fw_counters_file_open(struct inode *inode, struct file *file)
+{
+ struct adf_accel_dev *accel_dev = inode->i_private;
+ struct seq_file *fw_counters_seq_file;
+ struct adf_fw_counters *fw_counters;
+ int ret;
+
+ fw_counters = adf_fw_counters_get(accel_dev);
+ if (IS_ERR(fw_counters))
+ return PTR_ERR(fw_counters);
+
+ ret = seq_open(file, &qat_fw_counters_sops);
+ if (unlikely(ret)) {
+ kfree(fw_counters);
+ return ret;
+ }
+
+ fw_counters_seq_file = file->private_data;
+ fw_counters_seq_file->private = fw_counters;
+ return ret;
+}
+
+static int qat_fw_counters_file_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+
+ kfree(seq->private);
+ seq->private = NULL;
+
+ return seq_release(inode, file); }
+
+static const struct file_operations qat_fw_counters_fops = {
+ .owner = THIS_MODULE,
+ .open = qat_fw_counters_file_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = qat_fw_counters_file_release,
+};
+
+/**
+ * adf_fw_counters_dbgfs_add() - Create a debugfs file containing FW
+ * execution counters.
+ * @accel_dev: Pointer to a QAT acceleration device
+ *
+ * Function creates a file to display a table with statistics for the given
+ * QAT acceleration device. The table stores device specific execution values
+ * for each AE, such as the number of requests sent to the FW and responses
+ * received from the FW.
+ *
+ * Return: void
+ */
+void adf_fw_counters_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+ accel_dev->fw_cntr_dbgfile = debugfs_create_file("fw_counters", 0400,
+ accel_dev->debugfs_dir,
+ accel_dev,
+ &qat_fw_counters_fops);
+}
+
+/**
+ * adf_fw_counters_dbgfs_rm() - Remove the debugfs file containing FW counters.
+ * @accel_dev: Pointer to a QAT acceleration device.
+ *
+ * Function removes the file providing the table of statistics for the given
+ * QAT acceleration device.
+ *
+ * Return: void
+ */
+void adf_fw_counters_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+ debugfs_remove(accel_dev->fw_cntr_dbgfile);
+ accel_dev->fw_cntr_dbgfile = NULL;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.h b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.h
new file mode 100644
index 000000000000..91b3b6a95f1f
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef ADF_FW_COUNTERS_H
+#define ADF_FW_COUNTERS_H
+
+struct adf_accel_dev;
+
+void adf_fw_counters_dbgfs_add(struct adf_accel_dev *accel_dev);
+void adf_fw_counters_dbgfs_rm(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c
index eeb30da7587a..c27ff6d18e11 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c
@@ -7,6 +7,7 @@
#include "adf_common_drv.h"
#include "qat_crypto.h"
#include "qat_compression.h"
+#include "adf_heartbeat.h"
#include "adf_transport_access_macros.h"
static int adf_gen2_crypto_dev_config(struct adf_accel_dev *accel_dev)
@@ -195,6 +196,12 @@ int adf_gen2_dev_config(struct adf_accel_dev *accel_dev)
if (ret)
goto err;
+ ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
+ if (ret)
+ goto err;
+
+ adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_DEFAULT_MS);
+
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
return ret;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
index e4bc07529be4..6bd341061de4 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
@@ -145,6 +145,9 @@ do { \
#define ADF_GEN2_CERRSSMSH(i) ((i) * 0x4000 + 0x10)
#define ADF_GEN2_ERRSSMSH_EN BIT(3)
+/* Number of heartbeat counter pairs */
+#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE
+
/* Interrupts */
#define ADF_GEN2_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
#define ADF_GEN2_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index 4fb4b3df5a18..02d7a019ebf8 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -136,6 +136,9 @@ do { \
#define ADF_GEN4_VFLNOTIFY BIT(7)
+/* Number of heartbeat counter pairs */
+#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE
+
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
index dd112923e006..c2768762cca3 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
@@ -35,7 +35,7 @@
#define ADF_GEN4_PM_MSG_PENDING BIT(0)
#define ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK GENMASK(28, 1)
-#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER (0x0)
+#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER (0x6)
#define ADF_GEN4_PM_MAX_IDLE_FILTER (0x7)
#define ADF_GEN4_PM_DEFAULT_IDLE_SUPPORT (0x1)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c
new file mode 100644
index 000000000000..646c57922fcd
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/container_of.h>
+#include <linux/dev_printk.h>
+#include <linux/export.h>
+#include <linux/jiffies.h>
+#include <linux/ktime.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_timer.h"
+
+#define ADF_GEN4_TIMER_PERIOD_MS 200
+
+/* This periodic update is used to trigger HB, RL & TL fw events */
+static void work_handler(struct work_struct *work)
+{
+ struct adf_accel_dev *accel_dev;
+ struct adf_timer *timer_ctx;
+ u32 time_periods;
+
+ timer_ctx = container_of(to_delayed_work(work), struct adf_timer, work_ctx);
+ accel_dev = timer_ctx->accel_dev;
+
+ adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
+ msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS));
+
+ time_periods = div_u64(ktime_ms_delta(ktime_get_real(), timer_ctx->initial_ktime),
+ ADF_GEN4_TIMER_PERIOD_MS);
+
+ if (adf_send_admin_tim_sync(accel_dev, time_periods))
+ dev_err(&GET_DEV(accel_dev), "Failed to synchronize qat timer\n");
+}
+
+int adf_gen4_timer_start(struct adf_accel_dev *accel_dev)
+{
+ struct adf_timer *timer_ctx;
+
+ timer_ctx = kzalloc(sizeof(*timer_ctx), GFP_KERNEL);
+ if (!timer_ctx)
+ return -ENOMEM;
+
+ timer_ctx->accel_dev = accel_dev;
+ accel_dev->timer = timer_ctx;
+ timer_ctx->initial_ktime = ktime_get_real();
+
+ INIT_DELAYED_WORK(&timer_ctx->work_ctx, work_handler);
+ adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
+ msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_timer_start);
+
+void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev)
+{
+ struct adf_timer *timer_ctx = accel_dev->timer;
+
+ if (!timer_ctx)
+ return;
+
+ cancel_delayed_work_sync(&timer_ctx->work_ctx);
+
+ kfree(timer_ctx);
+ accel_dev->timer = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_timer_stop);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h
new file mode 100644
index 000000000000..66a709e7b358
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_GEN4_TIMER_H_
+#define ADF_GEN4_TIMER_H_
+
+#include <linux/ktime.h>
+#include <linux/workqueue.h>
+
+struct adf_accel_dev;
+
+struct adf_timer {
+ struct adf_accel_dev *accel_dev;
+ struct delayed_work work_ctx;
+ ktime_t initial_ktime;
+};
+
+int adf_gen4_timer_start(struct adf_accel_dev *accel_dev);
+void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_GEN4_TIMER_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c
new file mode 100644
index 000000000000..beef9a5f6c75
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/dev_printk.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/kstrtox.h>
+#include <linux/overflow.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <asm/errno.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_clock.h"
+#include "adf_common_drv.h"
+#include "adf_heartbeat.h"
+#include "adf_transport_internal.h"
+#include "icp_qat_fw_init_admin.h"
+
+#define ADF_HB_EMPTY_SIG 0xA5A5A5A5
+
+/* Heartbeat counter pair */
+struct hb_cnt_pair {
+ __u16 resp_heartbeat_cnt;
+ __u16 req_heartbeat_cnt;
+};
+
+static int adf_hb_check_polling_freq(struct adf_accel_dev *accel_dev)
+{
+ u64 curr_time = adf_clock_get_current_time();
+ u64 polling_time = curr_time - accel_dev->heartbeat->last_hb_check_time;
+
+ if (polling_time < accel_dev->heartbeat->hb_timer) {
+ dev_warn(&GET_DEV(accel_dev),
+ "HB polling too frequent. Configured HB timer %d ms\n",
+ accel_dev->heartbeat->hb_timer);
+ return -EINVAL;
+ }
+
+ accel_dev->heartbeat->last_hb_check_time = curr_time;
+ return 0;
+}
+
+/**
+ * validate_hb_ctrs_cnt() - checks if the number of heartbeat counters should
+ * be updated by one to support the currently loaded firmware.
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Return:
+ * * true - hb_ctrs must increased by ADF_NUM_PKE_STRAND
+ * * false - no changes needed
+ */
+static bool validate_hb_ctrs_cnt(struct adf_accel_dev *accel_dev)
+{
+ const size_t hb_ctrs = accel_dev->hw_device->num_hb_ctrs;
+ const size_t max_aes = accel_dev->hw_device->num_engines;
+ const size_t hb_struct_size = sizeof(struct hb_cnt_pair);
+ const size_t exp_diff_size = array3_size(ADF_NUM_PKE_STRAND, max_aes,
+ hb_struct_size);
+ const size_t dev_ctrs = size_mul(max_aes, hb_ctrs);
+ const size_t stats_size = size_mul(dev_ctrs, hb_struct_size);
+ const u32 exp_diff_cnt = exp_diff_size / sizeof(u32);
+ const u32 stats_el_cnt = stats_size / sizeof(u32);
+ struct hb_cnt_pair *hb_stats = accel_dev->heartbeat->dma.virt_addr;
+ const u32 *mem_to_chk = (u32 *)(hb_stats + dev_ctrs);
+ u32 el_diff_cnt = 0;
+ int i;
+
+ /* count how many bytes are different from pattern */
+ for (i = 0; i < stats_el_cnt; i++) {
+ if (mem_to_chk[i] == ADF_HB_EMPTY_SIG)
+ break;
+
+ el_diff_cnt++;
+ }
+
+ return el_diff_cnt && el_diff_cnt == exp_diff_cnt;
+}
+
+void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev)
+{
+ struct hb_cnt_pair *hb_stats = accel_dev->heartbeat->dma.virt_addr;
+ const size_t hb_ctrs = accel_dev->hw_device->num_hb_ctrs;
+ const size_t max_aes = accel_dev->hw_device->num_engines;
+ const size_t dev_ctrs = size_mul(max_aes, hb_ctrs);
+ const size_t stats_size = size_mul(dev_ctrs, sizeof(struct hb_cnt_pair));
+ const size_t mem_items_to_fill = size_mul(stats_size, 2) / sizeof(u32);
+
+ /* fill hb stats memory with pattern */
+ memset32((uint32_t *)hb_stats, ADF_HB_EMPTY_SIG, mem_items_to_fill);
+ accel_dev->heartbeat->ctrs_cnt_checked = false;
+}
+EXPORT_SYMBOL_GPL(adf_heartbeat_check_ctrs);
+
+static int get_timer_ticks(struct adf_accel_dev *accel_dev, unsigned int *value)
+{
+ char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { };
+ u32 timer_ms = ADF_CFG_HB_TIMER_DEFAULT_MS;
+ int cfg_read_status;
+ u32 ticks;
+ int ret;
+
+ cfg_read_status = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_HEARTBEAT_TIMER, timer_str);
+ if (cfg_read_status == 0) {
+ if (kstrtouint(timer_str, 10, &timer_ms))
+ dev_dbg(&GET_DEV(accel_dev),
+ "kstrtouint failed to parse the %s, param value",
+ ADF_HEARTBEAT_TIMER);
+ }
+
+ if (timer_ms < ADF_CFG_HB_TIMER_MIN_MS) {
+ dev_err(&GET_DEV(accel_dev), "Timer cannot be less than %u\n",
+ ADF_CFG_HB_TIMER_MIN_MS);
+ return -EINVAL;
+ }
+
+ /*
+ * On 4xxx devices adf_timer is responsible for HB updates and
+ * its period is fixed to 200ms
+ */
+ if (accel_dev->timer)
+ timer_ms = ADF_CFG_HB_TIMER_MIN_MS;
+
+ ret = adf_heartbeat_ms_to_ticks(accel_dev, timer_ms, &ticks);
+ if (ret)
+ return ret;
+
+ adf_heartbeat_save_cfg_param(accel_dev, timer_ms);
+
+ accel_dev->heartbeat->hb_timer = timer_ms;
+ *value = ticks;
+
+ return 0;
+}
+
+static int check_ae(struct hb_cnt_pair *curr, struct hb_cnt_pair *prev,
+ u16 *count, const size_t hb_ctrs)
+{
+ size_t thr;
+
+ /* loop through all threads in AE */
+ for (thr = 0; thr < hb_ctrs; thr++) {
+ u16 req = curr[thr].req_heartbeat_cnt;
+ u16 resp = curr[thr].resp_heartbeat_cnt;
+ u16 last = prev[thr].resp_heartbeat_cnt;
+
+ if ((thr == ADF_AE_ADMIN_THREAD || req != resp) && resp == last) {
+ u16 retry = ++count[thr];
+
+ if (retry >= ADF_CFG_HB_COUNT_THRESHOLD)
+ return -EIO;
+
+ } else {
+ count[thr] = 0;
+ }
+ }
+ return 0;
+}
+
+static int adf_hb_get_status(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ struct hb_cnt_pair *live_stats, *last_stats, *curr_stats;
+ const size_t hb_ctrs = hw_device->num_hb_ctrs;
+ const unsigned long ae_mask = hw_device->ae_mask;
+ const size_t max_aes = hw_device->num_engines;
+ const size_t dev_ctrs = size_mul(max_aes, hb_ctrs);
+ const size_t stats_size = size_mul(dev_ctrs, sizeof(*curr_stats));
+ struct hb_cnt_pair *ae_curr_p, *ae_prev_p;
+ u16 *count_fails, *ae_count_p;
+ size_t ae_offset;
+ size_t ae = 0;
+ int ret = 0;
+
+ if (!accel_dev->heartbeat->ctrs_cnt_checked) {
+ if (validate_hb_ctrs_cnt(accel_dev))
+ hw_device->num_hb_ctrs += ADF_NUM_PKE_STRAND;
+
+ accel_dev->heartbeat->ctrs_cnt_checked = true;
+ }
+
+ live_stats = accel_dev->heartbeat->dma.virt_addr;
+ last_stats = live_stats + dev_ctrs;
+ count_fails = (u16 *)(last_stats + dev_ctrs);
+
+ curr_stats = kmemdup(live_stats, stats_size, GFP_KERNEL);
+ if (!curr_stats)
+ return -ENOMEM;
+
+ /* loop through active AEs */
+ for_each_set_bit(ae, &ae_mask, max_aes) {
+ ae_offset = size_mul(ae, hb_ctrs);
+ ae_curr_p = curr_stats + ae_offset;
+ ae_prev_p = last_stats + ae_offset;
+ ae_count_p = count_fails + ae_offset;
+
+ ret = check_ae(ae_curr_p, ae_prev_p, ae_count_p, hb_ctrs);
+ if (ret)
+ break;
+ }
+
+ /* Copy current stats for the next iteration */
+ memcpy(last_stats, curr_stats, stats_size);
+ kfree(curr_stats);
+
+ return ret;
+}
+
+void adf_heartbeat_status(struct adf_accel_dev *accel_dev,
+ enum adf_device_heartbeat_status *hb_status)
+{
+ struct adf_heartbeat *hb;
+
+ if (!adf_dev_started(accel_dev) ||
+ test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) {
+ *hb_status = HB_DEV_UNRESPONSIVE;
+ return;
+ }
+
+ if (adf_hb_check_polling_freq(accel_dev) == -EINVAL) {
+ *hb_status = HB_DEV_UNSUPPORTED;
+ return;
+ }
+
+ hb = accel_dev->heartbeat;
+ hb->hb_sent_counter++;
+
+ if (adf_hb_get_status(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Heartbeat ERROR: QAT is not responding.\n");
+ *hb_status = HB_DEV_UNRESPONSIVE;
+ hb->hb_failed_counter++;
+ return;
+ }
+
+ *hb_status = HB_DEV_ALIVE;
+}
+
+int adf_heartbeat_ms_to_ticks(struct adf_accel_dev *accel_dev, unsigned int time_ms,
+ u32 *value)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 clk_per_sec;
+
+ /* HB clock may be different than AE clock */
+ if (!hw_data->get_hb_clock)
+ return -EINVAL;
+
+ clk_per_sec = hw_data->get_hb_clock(hw_data);
+ *value = time_ms * (clk_per_sec / MSEC_PER_SEC);
+
+ return 0;
+}
+
+int adf_heartbeat_save_cfg_param(struct adf_accel_dev *accel_dev,
+ unsigned int timer_ms)
+{
+ char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+ snprintf(timer_str, sizeof(timer_str), "%u", timer_ms);
+ return adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+ ADF_HEARTBEAT_TIMER, timer_str,
+ ADF_STR);
+}
+EXPORT_SYMBOL_GPL(adf_heartbeat_save_cfg_param);
+
+int adf_heartbeat_init(struct adf_accel_dev *accel_dev)
+{
+ struct adf_heartbeat *hb;
+
+ hb = kzalloc(sizeof(*hb), GFP_KERNEL);
+ if (!hb)
+ goto err_ret;
+
+ hb->dma.virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ &hb->dma.phy_addr, GFP_KERNEL);
+ if (!hb->dma.virt_addr)
+ goto err_free;
+
+ /*
+ * Default set this flag as true to avoid unnecessary checks,
+ * it will be reset on platforms that need such a check
+ */
+ hb->ctrs_cnt_checked = true;
+ accel_dev->heartbeat = hb;
+
+ return 0;
+
+err_free:
+ kfree(hb);
+err_ret:
+ return -ENOMEM;
+}
+
+int adf_heartbeat_start(struct adf_accel_dev *accel_dev)
+{
+ unsigned int timer_ticks;
+ int ret;
+
+ if (!accel_dev->heartbeat) {
+ dev_warn(&GET_DEV(accel_dev), "Heartbeat instance not found!");
+ return -EFAULT;
+ }
+
+ if (accel_dev->hw_device->check_hb_ctrs)
+ accel_dev->hw_device->check_hb_ctrs(accel_dev);
+
+ ret = get_timer_ticks(accel_dev, &timer_ticks);
+ if (ret)
+ return ret;
+
+ ret = adf_send_admin_hb_timer(accel_dev, timer_ticks);
+ if (ret)
+ dev_warn(&GET_DEV(accel_dev), "Heartbeat not supported!");
+
+ return ret;
+}
+
+void adf_heartbeat_shutdown(struct adf_accel_dev *accel_dev)
+{
+ struct adf_heartbeat *hb = accel_dev->heartbeat;
+
+ if (!hb)
+ return;
+
+ if (hb->dma.virt_addr)
+ dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ hb->dma.virt_addr, hb->dma.phy_addr);
+
+ kfree(hb);
+ accel_dev->heartbeat = NULL;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h
new file mode 100644
index 000000000000..b22e3cb29798
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_HEARTBEAT_H_
+#define ADF_HEARTBEAT_H_
+
+#include <linux/types.h>
+
+struct adf_accel_dev;
+struct dentry;
+
+#define ADF_CFG_HB_TIMER_MIN_MS 200
+#define ADF_CFG_HB_TIMER_DEFAULT_MS 500
+#define ADF_CFG_HB_COUNT_THRESHOLD 3
+
+enum adf_device_heartbeat_status {
+ HB_DEV_UNRESPONSIVE = 0,
+ HB_DEV_ALIVE,
+ HB_DEV_UNSUPPORTED,
+};
+
+struct adf_heartbeat {
+ unsigned int hb_sent_counter;
+ unsigned int hb_failed_counter;
+ unsigned int hb_timer;
+ u64 last_hb_check_time;
+ bool ctrs_cnt_checked;
+ struct hb_dma_addr {
+ dma_addr_t phy_addr;
+ void *virt_addr;
+ } dma;
+ struct {
+ struct dentry *base_dir;
+ struct dentry *status;
+ struct dentry *cfg;
+ struct dentry *sent;
+ struct dentry *failed;
+ } dbgfs;
+};
+
+#ifdef CONFIG_DEBUG_FS
+int adf_heartbeat_init(struct adf_accel_dev *accel_dev);
+int adf_heartbeat_start(struct adf_accel_dev *accel_dev);
+void adf_heartbeat_shutdown(struct adf_accel_dev *accel_dev);
+
+int adf_heartbeat_ms_to_ticks(struct adf_accel_dev *accel_dev, unsigned int time_ms,
+ uint32_t *value);
+int adf_heartbeat_save_cfg_param(struct adf_accel_dev *accel_dev,
+ unsigned int timer_ms);
+void adf_heartbeat_status(struct adf_accel_dev *accel_dev,
+ enum adf_device_heartbeat_status *hb_status);
+void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev);
+
+#else
+static inline int adf_heartbeat_init(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static inline int adf_heartbeat_start(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static inline void adf_heartbeat_shutdown(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline int adf_heartbeat_save_cfg_param(struct adf_accel_dev *accel_dev,
+ unsigned int timer_ms)
+{
+ return 0;
+}
+
+static inline void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev)
+{
+}
+#endif
+#endif /* ADF_HEARTBEAT_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c
new file mode 100644
index 000000000000..803cbfd838f0
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/kstrtox.h>
+#include <linux/types.h>
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+#include "adf_heartbeat.h"
+#include "adf_heartbeat_dbgfs.h"
+
+#define HB_OK 0
+#define HB_ERROR -1
+#define HB_STATUS_MAX_STRLEN 4
+#define HB_STATS_MAX_STRLEN 16
+
+static ssize_t adf_hb_stats_read(struct file *file, char __user *user_buffer,
+ size_t count, loff_t *ppos)
+{
+ char buf[HB_STATS_MAX_STRLEN];
+ unsigned int *value;
+ int len;
+
+ if (*ppos > 0)
+ return 0;
+
+ value = file->private_data;
+ len = scnprintf(buf, sizeof(buf), "%u\n", *value);
+
+ return simple_read_from_buffer(user_buffer, count, ppos, buf, len + 1);
+}
+
+static const struct file_operations adf_hb_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = adf_hb_stats_read,
+};
+
+static ssize_t adf_hb_status_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ enum adf_device_heartbeat_status hb_status;
+ char ret_str[HB_STATUS_MAX_STRLEN];
+ struct adf_accel_dev *accel_dev;
+ int ret_code;
+ size_t len;
+
+ if (*ppos > 0)
+ return 0;
+
+ accel_dev = file->private_data;
+ ret_code = HB_OK;
+
+ adf_heartbeat_status(accel_dev, &hb_status);
+
+ if (hb_status != HB_DEV_ALIVE)
+ ret_code = HB_ERROR;
+
+ len = scnprintf(ret_str, sizeof(ret_str), "%d\n", ret_code);
+
+ return simple_read_from_buffer(user_buf, count, ppos, ret_str, len + 1);
+}
+
+static const struct file_operations adf_hb_status_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = adf_hb_status_read,
+};
+
+static ssize_t adf_hb_cfg_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+ struct adf_accel_dev *accel_dev;
+ unsigned int timer_ms;
+ int len;
+
+ if (*ppos > 0)
+ return 0;
+
+ accel_dev = file->private_data;
+ timer_ms = accel_dev->heartbeat->hb_timer;
+ len = scnprintf(timer_str, sizeof(timer_str), "%u\n", timer_ms);
+
+ return simple_read_from_buffer(user_buf, count, ppos, timer_str,
+ len + 1);
+}
+
+static ssize_t adf_hb_cfg_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char input_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { };
+ struct adf_accel_dev *accel_dev;
+ int ret, written_chars;
+ unsigned int timer_ms;
+ u32 ticks;
+
+ accel_dev = file->private_data;
+ timer_ms = ADF_CFG_HB_TIMER_DEFAULT_MS;
+
+ /* last byte left as string termination */
+ if (count > sizeof(input_str) - 1)
+ return -EINVAL;
+
+ written_chars = simple_write_to_buffer(input_str, sizeof(input_str) - 1,
+ ppos, user_buf, count);
+ if (written_chars > 0) {
+ ret = kstrtouint(input_str, 10, &timer_ms);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "heartbeat_cfg: Invalid value\n");
+ return ret;
+ }
+
+ if (timer_ms < ADF_CFG_HB_TIMER_MIN_MS) {
+ dev_err(&GET_DEV(accel_dev),
+ "heartbeat_cfg: Invalid value\n");
+ return -EINVAL;
+ }
+
+ /*
+ * On 4xxx devices adf_timer is responsible for HB updates and
+ * its period is fixed to 200ms
+ */
+ if (accel_dev->timer)
+ timer_ms = ADF_CFG_HB_TIMER_MIN_MS;
+
+ ret = adf_heartbeat_save_cfg_param(accel_dev, timer_ms);
+ if (ret)
+ return ret;
+
+ ret = adf_heartbeat_ms_to_ticks(accel_dev, timer_ms, &ticks);
+ if (ret)
+ return ret;
+
+ ret = adf_send_admin_hb_timer(accel_dev, ticks);
+ if (ret)
+ return ret;
+
+ accel_dev->heartbeat->hb_timer = timer_ms;
+ }
+
+ return written_chars;
+}
+
+static const struct file_operations adf_hb_cfg_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = adf_hb_cfg_read,
+ .write = adf_hb_cfg_write,
+};
+
+void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+ struct adf_heartbeat *hb = accel_dev->heartbeat;
+
+ if (!hb)
+ return;
+
+ hb->dbgfs.base_dir = debugfs_create_dir("heartbeat", accel_dev->debugfs_dir);
+ hb->dbgfs.status = debugfs_create_file("status", 0400, hb->dbgfs.base_dir,
+ accel_dev, &adf_hb_status_fops);
+ hb->dbgfs.sent = debugfs_create_file("queries_sent", 0400, hb->dbgfs.base_dir,
+ &hb->hb_sent_counter, &adf_hb_stats_fops);
+ hb->dbgfs.failed = debugfs_create_file("queries_failed", 0400, hb->dbgfs.base_dir,
+ &hb->hb_failed_counter, &adf_hb_stats_fops);
+ hb->dbgfs.cfg = debugfs_create_file("config", 0600, hb->dbgfs.base_dir,
+ accel_dev, &adf_hb_cfg_fops);
+}
+EXPORT_SYMBOL_GPL(adf_heartbeat_dbgfs_add);
+
+void adf_heartbeat_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+ struct adf_heartbeat *hb = accel_dev->heartbeat;
+
+ if (!hb)
+ return;
+
+ debugfs_remove(hb->dbgfs.status);
+ hb->dbgfs.status = NULL;
+ debugfs_remove(hb->dbgfs.sent);
+ hb->dbgfs.sent = NULL;
+ debugfs_remove(hb->dbgfs.failed);
+ hb->dbgfs.failed = NULL;
+ debugfs_remove(hb->dbgfs.cfg);
+ hb->dbgfs.cfg = NULL;
+ debugfs_remove(hb->dbgfs.base_dir);
+ hb->dbgfs.base_dir = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_heartbeat_dbgfs_rm);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.h
new file mode 100644
index 000000000000..84dd29ea6454
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_HEARTBEAT_DBGFS_H_
+#define ADF_HEARTBEAT_DBGFS_H_
+
+struct adf_accel_dev;
+
+void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev);
+void adf_heartbeat_dbgfs_rm(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_HEARTBEAT_DBGFS_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
index 826179c98524..89001fe92e76 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
@@ -8,6 +8,7 @@
#include "adf_cfg.h"
#include "adf_common_drv.h"
#include "adf_dbgfs.h"
+#include "adf_heartbeat.h"
static LIST_HEAD(service_table);
static DEFINE_MUTEX(service_lock);
@@ -129,6 +130,8 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev)
return -EFAULT;
}
+ adf_heartbeat_init(accel_dev);
+
/*
* Subservice initialisation is divided into two stages: init and start.
* This is to facilitate any ordering dependencies between services
@@ -163,6 +166,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
struct list_head *list_itr;
+ int ret;
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
@@ -177,6 +181,14 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
return -EFAULT;
}
+ if (hw_data->measure_clock) {
+ ret = hw_data->measure_clock(accel_dev);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed measure device clock\n");
+ return ret;
+ }
+ }
+
/* Set ssm watch dog timer */
if (hw_data->set_ssm_wdtimer)
hw_data->set_ssm_wdtimer(accel_dev);
@@ -187,6 +199,16 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
return -EFAULT;
}
+ if (hw_data->start_timer) {
+ ret = hw_data->start_timer(accel_dev);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed to start internal sync timer\n");
+ return ret;
+ }
+ }
+
+ adf_heartbeat_start(accel_dev);
+
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
@@ -235,6 +257,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
*/
static void adf_dev_stop(struct adf_accel_dev *accel_dev)
{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
struct list_head *list_itr;
bool wait = false;
@@ -270,6 +293,9 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
}
}
+ if (hw_data->stop_timer)
+ hw_data->stop_timer(accel_dev);
+
if (wait)
msleep(100);
@@ -326,6 +352,8 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
clear_bit(accel_dev->accel_id, service->init_status);
}
+ adf_heartbeat_shutdown(accel_dev);
+
hw_data->disable_iov(accel_dev);
if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c
index ad9e135b8560..2aba194a7c29 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c
@@ -380,3 +380,9 @@ bool adf_misc_wq_queue_work(struct work_struct *work)
{
return queue_work(adf_misc_wq, work);
}
+
+bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
+ unsigned long delay)
+{
+ return queue_delayed_work(adf_misc_wq, work, delay);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
index 56cb827f93ea..3e968a4bcc9c 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
@@ -16,6 +16,8 @@ enum icp_qat_fw_init_admin_cmd_id {
ICP_QAT_FW_HEARTBEAT_SYNC = 7,
ICP_QAT_FW_HEARTBEAT_GET = 8,
ICP_QAT_FW_COMP_CAPABILITY_GET = 9,
+ ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13,
+ ICP_QAT_FW_TIMER_GET = 19,
ICP_QAT_FW_PM_STATE_CONFIG = 128,
};
@@ -37,6 +39,12 @@ struct icp_qat_fw_init_admin_req {
__u16 ibuf_size_in_kb;
__u16 resrvd3;
};
+ struct {
+ __u32 int_timer_ticks;
+ };
+ struct {
+ __u32 heartbeat_ticks;
+ };
__u32 idle_filter;
};
@@ -97,19 +105,6 @@ struct icp_qat_fw_init_admin_resp {
};
} __packed;
-#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0
-#define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1
-#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0
-#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK 0x1
-#define ICP_QAT_FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE
-#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \
- ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags)
-
-#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \
- ICP_QAT_FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val)
+#define ICP_QAT_FW_SYNC ICP_QAT_FW_HEARTBEAT_SYNC
-#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(flags) \
- QAT_FIELD_GET(flags, \
- ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS, \
- ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK)
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
index a65059e56248..0c8883e2ccc6 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
@@ -97,7 +97,10 @@ enum icp_qat_capabilities_mask {
ICP_ACCEL_CAPABILITIES_SHA3_EXT = BIT(15),
ICP_ACCEL_CAPABILITIES_AESGCM_SPC = BIT(16),
ICP_ACCEL_CAPABILITIES_CHACHA_POLY = BIT(17),
- /* Bits 18-21 are currently reserved */
+ ICP_ACCEL_CAPABILITIES_SM2 = BIT(18),
+ ICP_ACCEL_CAPABILITIES_SM3 = BIT(19),
+ ICP_ACCEL_CAPABILITIES_SM4 = BIT(20),
+ /* Bit 21 is currently reserved */
ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY = BIT(22),
ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 = BIT(23),
ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION = BIT(24),
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c
index 3f1f35283266..7842a9f22178 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_compression.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.c
@@ -234,8 +234,7 @@ static void qat_free_dc_data(struct adf_accel_dev *accel_dev)
dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz,
DMA_FROM_DEVICE);
- memset(dc_data->ovf_buff, 0, dc_data->ovf_buff_sz);
- kfree(dc_data->ovf_buff);
+ kfree_sensitive(dc_data->ovf_buff);
devm_kfree(dev, dc_data);
accel_dev->dc_data = NULL;
}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index ce837bcc1cab..4bd150d1441a 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -11,7 +11,7 @@
#include "icp_qat_hal.h"
#include "icp_qat_fw_loader_handle.h"
-#define UWORD_CPYBUF_SIZE 1024
+#define UWORD_CPYBUF_SIZE 1024U
#define INVLD_UWORD 0xffffffffffull
#define PID_MINOR_REV 0xf
#define PID_MAJOR_REV (0xf << 4)
@@ -1986,10 +1986,7 @@ static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
uw_relative_addr = 0;
words_num = encap_page->micro_words_num;
while (words_num) {
- if (words_num < UWORD_CPYBUF_SIZE)
- cpylen = words_num;
- else
- cpylen = UWORD_CPYBUF_SIZE;
+ cpylen = min(words_num, UWORD_CPYBUF_SIZE);
/* load the buffer */
for (i = 0; i < cpylen; i++)
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index 1ebe0b351fae..09551f949126 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -7,6 +7,7 @@
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_dh895xcc_hw_data.h"
+#include "adf_heartbeat.h"
#include "icp_qat_hw.h"
#define ADF_DH895XCC_VF_MSK 0xFFFFFFFF
@@ -44,6 +45,14 @@ static u32 get_misc_bar_id(struct adf_hw_device_data *self)
return ADF_DH895XCC_PMISC_BAR;
}
+static u32 get_ts_clock(struct adf_hw_device_data *self)
+{
+ /*
+ * Timestamp update interval is 16 AE clock ticks for dh895xcc.
+ */
+ return self->clock_frequency / 16;
+}
+
static u32 get_etr_bar_id(struct adf_hw_device_data *self)
{
return ADF_DH895XCC_ETR_BAR;
@@ -237,6 +246,10 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->reset_device = adf_reset_sbr;
hw_data->disable_iov = adf_disable_sriov;
hw_data->dev_config = adf_gen2_dev_config;
+ hw_data->clock_frequency = ADF_DH895X_AE_FREQ;
+ hw_data->get_hb_clock = get_ts_clock;
+ hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
+ hw_data->check_hb_ctrs = adf_heartbeat_check_ctrs;
adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts;
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index 7b674bbe4192..cd3a21985455 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -3,6 +3,8 @@
#ifndef ADF_DH895x_HW_DATA_H_
#define ADF_DH895x_HW_DATA_H_
+#include <linux/units.h>
+
/* PCIe configuration space */
#define ADF_DH895XCC_SRAM_BAR 0
#define ADF_DH895XCC_PMISC_BAR 1
@@ -30,6 +32,9 @@
#define ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS 96
#define ADF_DH895XCC_AE2FUNC_MAP_GRP_B_NUM_REGS 12
+/* Clocks frequency */
+#define ADF_DH895X_AE_FREQ (933 * HZ_PER_MHZ)
+
/* FW names */
#define ADF_DH895XCC_FW "qat_895xcc.bin"
#define ADF_DH895XCC_MMP "qat_895xcc_mmp.bin"
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 4f6ca229ee5e..d5a32d71a3e9 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -10,7 +10,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/cpumask.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index 9f937bdc53a7..c498950402e8 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -7,18 +7,21 @@
* Copyright (c) 2016 Texas Instruments Incorporated
*/
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <crypto/gcm.h>
+#include <crypto/internal/aead.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
#include <linux/errno.h>
-#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/omap-dma.h>
#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/omap-dma.h>
#include <linux/pm_runtime.h>
-#include <crypto/aes.h>
-#include <crypto/gcm.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/skcipher.h>
-#include <crypto/internal/aead.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
#include "omap-crypto.h"
#include "omap-aes.h"
@@ -212,12 +215,10 @@ static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
return 0;
}
-static int omap_aes_gcm_prepare_req(struct crypto_engine *engine, void *areq)
+static int omap_aes_gcm_prepare_req(struct aead_request *req,
+ struct omap_aes_dev *dd)
{
- struct aead_request *req = container_of(areq, struct aead_request,
- base);
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
- struct omap_aes_dev *dd = rctx->dd;
struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
int err;
@@ -356,17 +357,21 @@ int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent,
return crypto_rfc4106_check_authsize(authsize);
}
-static int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq)
+int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq)
{
struct aead_request *req = container_of(areq, struct aead_request,
base);
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
struct omap_aes_dev *dd = rctx->dd;
- int ret = 0;
+ int ret;
if (!dd)
return -ENODEV;
+ ret = omap_aes_gcm_prepare_req(req, dd);
+ if (ret)
+ return ret;
+
if (dd->in_sg_len)
ret = omap_aes_crypt_dma_start(dd);
else
@@ -377,12 +382,6 @@ static int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq)
int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
-
- ctx->enginectx.op.prepare_request = omap_aes_gcm_prepare_req;
- ctx->enginectx.op.unprepare_request = NULL;
- ctx->enginectx.op.do_one_request = omap_aes_gcm_crypt_req;
-
crypto_aead_set_reqsize(tfm, sizeof(struct omap_aes_reqctx));
return 0;
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 67a99c760bc4..ed83023dd77a 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -13,28 +13,26 @@
#define prn(num) pr_debug(#num "=%d\n", num)
#define prx(num) pr_debug(#num "=%x\n", num)
+#include <crypto/aes.h>
+#include <crypto/gcm.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/engine.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
-#include <linux/module.h>
#include <linux/init.h>
-#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmaengine.h>
-#include <linux/pm_runtime.h>
+#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
-#include <linux/io.h>
-#include <linux/crypto.h>
-#include <linux/interrupt.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/aes.h>
-#include <crypto/gcm.h>
-#include <crypto/engine.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/internal/aead.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
#include "omap-crypto.h"
#include "omap-aes.h"
@@ -426,20 +424,15 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
return 0;
}
-static int omap_aes_prepare_req(struct crypto_engine *engine,
- void *areq)
+static int omap_aes_prepare_req(struct skcipher_request *req,
+ struct omap_aes_dev *dd)
{
- struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
struct omap_aes_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
struct omap_aes_reqctx *rctx = skcipher_request_ctx(req);
- struct omap_aes_dev *dd = rctx->dd;
int ret;
u16 flags;
- if (!dd)
- return -ENODEV;
-
/* assign new request to device */
dd->req = req;
dd->total = req->cryptlen;
@@ -491,7 +484,8 @@ static int omap_aes_crypt_req(struct crypto_engine *engine,
if (!dd)
return -ENODEV;
- return omap_aes_crypt_dma_start(dd);
+ return omap_aes_prepare_req(req, dd) ?:
+ omap_aes_crypt_dma_start(dd);
}
static void omap_aes_copy_ivout(struct omap_aes_dev *dd, u8 *ivbuf)
@@ -629,11 +623,6 @@ static int omap_aes_ctr_decrypt(struct skcipher_request *req)
return omap_aes_crypt(req, FLAGS_CTR);
}
-static int omap_aes_prepare_req(struct crypto_engine *engine,
- void *req);
-static int omap_aes_crypt_req(struct crypto_engine *engine,
- void *req);
-
static int omap_aes_init_tfm(struct crypto_skcipher *tfm)
{
const char *name = crypto_tfm_alg_name(&tfm->base);
@@ -649,10 +638,6 @@ static int omap_aes_init_tfm(struct crypto_skcipher *tfm)
crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx) +
crypto_skcipher_reqsize(blk));
- ctx->enginectx.op.prepare_request = omap_aes_prepare_req;
- ctx->enginectx.op.unprepare_request = NULL;
- ctx->enginectx.op.do_one_request = omap_aes_crypt_req;
-
return 0;
}
@@ -668,68 +653,77 @@ static void omap_aes_exit_tfm(struct crypto_skcipher *tfm)
/* ********************** ALGS ************************************ */
-static struct skcipher_alg algs_ecb_cbc[] = {
+static struct skcipher_engine_alg algs_ecb_cbc[] = {
{
- .base.cra_name = "ecb(aes)",
- .base.cra_driver_name = "ecb-aes-omap",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = omap_aes_setkey,
- .encrypt = omap_aes_ecb_encrypt,
- .decrypt = omap_aes_ecb_decrypt,
- .init = omap_aes_init_tfm,
- .exit = omap_aes_exit_tfm,
+ .base = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = omap_aes_setkey,
+ .encrypt = omap_aes_ecb_encrypt,
+ .decrypt = omap_aes_ecb_decrypt,
+ .init = omap_aes_init_tfm,
+ .exit = omap_aes_exit_tfm,
+ },
+ .op.do_one_request = omap_aes_crypt_req,
},
{
- .base.cra_name = "cbc(aes)",
- .base.cra_driver_name = "cbc-aes-omap",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = omap_aes_setkey,
- .encrypt = omap_aes_cbc_encrypt,
- .decrypt = omap_aes_cbc_decrypt,
- .init = omap_aes_init_tfm,
- .exit = omap_aes_exit_tfm,
+ .base = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = omap_aes_setkey,
+ .encrypt = omap_aes_cbc_encrypt,
+ .decrypt = omap_aes_cbc_decrypt,
+ .init = omap_aes_init_tfm,
+ .exit = omap_aes_exit_tfm,
+ },
+ .op.do_one_request = omap_aes_crypt_req,
}
};
-static struct skcipher_alg algs_ctr[] = {
+static struct skcipher_engine_alg algs_ctr[] = {
{
- .base.cra_name = "ctr(aes)",
- .base.cra_driver_name = "ctr-aes-omap",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = omap_aes_setkey,
- .encrypt = omap_aes_ctr_encrypt,
- .decrypt = omap_aes_ctr_decrypt,
- .init = omap_aes_init_tfm,
- .exit = omap_aes_exit_tfm,
+ .base = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = omap_aes_setkey,
+ .encrypt = omap_aes_ctr_encrypt,
+ .decrypt = omap_aes_ctr_decrypt,
+ .init = omap_aes_init_tfm,
+ .exit = omap_aes_exit_tfm,
+ },
+ .op.do_one_request = omap_aes_crypt_req,
}
};
@@ -740,46 +734,52 @@ static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
},
};
-static struct aead_alg algs_aead_gcm[] = {
+static struct aead_engine_alg algs_aead_gcm[] = {
{
.base = {
- .cra_name = "gcm(aes)",
- .cra_driver_name = "gcm-aes-omap",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-omap",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .init = omap_aes_gcm_cra_init,
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = omap_aes_gcm_setkey,
+ .setauthsize = omap_aes_gcm_setauthsize,
+ .encrypt = omap_aes_gcm_encrypt,
+ .decrypt = omap_aes_gcm_decrypt,
},
- .init = omap_aes_gcm_cra_init,
- .ivsize = GCM_AES_IV_SIZE,
- .maxauthsize = AES_BLOCK_SIZE,
- .setkey = omap_aes_gcm_setkey,
- .setauthsize = omap_aes_gcm_setauthsize,
- .encrypt = omap_aes_gcm_encrypt,
- .decrypt = omap_aes_gcm_decrypt,
+ .op.do_one_request = omap_aes_gcm_crypt_req,
},
{
.base = {
- .cra_name = "rfc4106(gcm(aes))",
- .cra_driver_name = "rfc4106-gcm-aes-omap",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-omap",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .init = omap_aes_gcm_cra_init,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .ivsize = GCM_RFC4106_IV_SIZE,
+ .setkey = omap_aes_4106gcm_setkey,
+ .setauthsize = omap_aes_4106gcm_setauthsize,
+ .encrypt = omap_aes_4106gcm_encrypt,
+ .decrypt = omap_aes_4106gcm_decrypt,
},
- .init = omap_aes_gcm_cra_init,
- .maxauthsize = AES_BLOCK_SIZE,
- .ivsize = GCM_RFC4106_IV_SIZE,
- .setkey = omap_aes_4106gcm_setkey,
- .setauthsize = omap_aes_4106gcm_setauthsize,
- .encrypt = omap_aes_4106gcm_encrypt,
- .decrypt = omap_aes_4106gcm_decrypt,
+ .op.do_one_request = omap_aes_gcm_crypt_req,
},
};
@@ -1101,8 +1101,8 @@ static int omap_aes_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct omap_aes_dev *dd;
- struct skcipher_alg *algp;
- struct aead_alg *aalg;
+ struct skcipher_engine_alg *algp;
+ struct aead_engine_alg *aalg;
struct resource res;
int err = -ENOMEM, i, j, irq = -1;
u32 reg;
@@ -1195,9 +1195,9 @@ static int omap_aes_probe(struct platform_device *pdev)
for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
algp = &dd->pdata->algs_info[i].algs_list[j];
- pr_debug("reg alg: %s\n", algp->base.cra_name);
+ pr_debug("reg alg: %s\n", algp->base.base.cra_name);
- err = crypto_register_skcipher(algp);
+ err = crypto_engine_register_skcipher(algp);
if (err)
goto err_algs;
@@ -1211,9 +1211,9 @@ static int omap_aes_probe(struct platform_device *pdev)
for (i = 0; i < dd->pdata->aead_algs_info->size; i++) {
aalg = &dd->pdata->aead_algs_info->algs_list[i];
- pr_debug("reg alg: %s\n", aalg->base.cra_name);
+ pr_debug("reg alg: %s\n", aalg->base.base.cra_name);
- err = crypto_register_aead(aalg);
+ err = crypto_engine_register_aead(aalg);
if (err)
goto err_aead_algs;
@@ -1231,12 +1231,12 @@ static int omap_aes_probe(struct platform_device *pdev)
err_aead_algs:
for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
aalg = &dd->pdata->aead_algs_info->algs_list[i];
- crypto_unregister_aead(aalg);
+ crypto_engine_unregister_aead(aalg);
}
err_algs:
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_skcipher(
+ crypto_engine_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
err_engine:
@@ -1258,7 +1258,7 @@ err_data:
static int omap_aes_remove(struct platform_device *pdev)
{
struct omap_aes_dev *dd = platform_get_drvdata(pdev);
- struct aead_alg *aalg;
+ struct aead_engine_alg *aalg;
int i, j;
spin_lock_bh(&list_lock);
@@ -1267,14 +1267,14 @@ static int omap_aes_remove(struct platform_device *pdev)
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
- crypto_unregister_skcipher(
+ crypto_engine_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
dd->pdata->algs_info[i].registered--;
}
for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
aalg = &dd->pdata->aead_algs_info->algs_list[i];
- crypto_unregister_aead(aalg);
+ crypto_engine_unregister_aead(aalg);
dd->pdata->aead_algs_info->registered--;
}
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 23d073e87bb8..0f35c9164764 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -10,7 +10,6 @@
#define __OMAP_AES_H__
#include <crypto/aes.h>
-#include <crypto/engine.h>
#define DST_MAXBURST 4
#define DMA_MIN (DST_MAXBURST * sizeof(u32))
@@ -93,7 +92,6 @@ struct omap_aes_gcm_result {
};
struct omap_aes_ctx {
- struct crypto_engine_ctx enginectx;
int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
u8 nonce[4];
@@ -117,15 +115,15 @@ struct omap_aes_reqctx {
#define OMAP_AES_CACHE_SIZE 0
struct omap_aes_algs_info {
- struct skcipher_alg *algs_list;
- unsigned int size;
- unsigned int registered;
+ struct skcipher_engine_alg *algs_list;
+ unsigned int size;
+ unsigned int registered;
};
struct omap_aes_aead_algs {
- struct aead_alg *algs_list;
- unsigned int size;
- unsigned int registered;
+ struct aead_engine_alg *algs_list;
+ unsigned int size;
+ unsigned int registered;
};
struct omap_aes_pdata {
@@ -218,5 +216,6 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd);
int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd);
void omap_aes_gcm_dma_out_callback(void *data);
void omap_aes_clear_copy_flags(struct omap_aes_dev *dd);
+int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq);
#endif
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index f783769ea110..089dd45eaedd 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -16,27 +16,23 @@
#define prx(num) do { } while (0)
#endif
+#include <crypto/engine.h>
+#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
-#include <linux/module.h>
#include <linux/init.h>
-#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmaengine.h>
#include <linux/pm_runtime.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
-#include <linux/crypto.h>
-#include <linux/interrupt.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/des.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/algapi.h>
-#include <crypto/engine.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
#include "omap-crypto.h"
@@ -83,7 +79,6 @@
#define FLAGS_OUT_DATA_ST_SHIFT 10
struct omap_des_ctx {
- struct crypto_engine_ctx enginectx;
struct omap_des_dev *dd;
int keylen;
@@ -99,9 +94,9 @@ struct omap_des_reqctx {
#define OMAP_DES_CACHE_SIZE 0
struct omap_des_algs_info {
- struct skcipher_alg *algs_list;
- unsigned int size;
- unsigned int registered;
+ struct skcipher_engine_alg *algs_list;
+ unsigned int size;
+ unsigned int registered;
};
struct omap_des_pdata {
@@ -522,20 +517,15 @@ static int omap_des_handle_queue(struct omap_des_dev *dd,
return 0;
}
-static int omap_des_prepare_req(struct crypto_engine *engine,
- void *areq)
+static int omap_des_prepare_req(struct skcipher_request *req,
+ struct omap_des_dev *dd)
{
- struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
struct omap_des_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
- struct omap_des_dev *dd = omap_des_find_dev(ctx);
struct omap_des_reqctx *rctx;
int ret;
u16 flags;
- if (!dd)
- return -ENODEV;
-
/* assign new request to device */
dd->req = req;
dd->total = req->cryptlen;
@@ -590,7 +580,8 @@ static int omap_des_crypt_req(struct crypto_engine *engine,
if (!dd)
return -ENODEV;
- return omap_des_crypt_dma_start(dd);
+ return omap_des_prepare_req(req, dd) ?:
+ omap_des_crypt_dma_start(dd);
}
static void omap_des_done_task(unsigned long data)
@@ -709,98 +700,99 @@ static int omap_des_cbc_decrypt(struct skcipher_request *req)
return omap_des_crypt(req, FLAGS_CBC);
}
-static int omap_des_prepare_req(struct crypto_engine *engine,
- void *areq);
-static int omap_des_crypt_req(struct crypto_engine *engine,
- void *areq);
-
static int omap_des_init_tfm(struct crypto_skcipher *tfm)
{
- struct omap_des_ctx *ctx = crypto_skcipher_ctx(tfm);
-
pr_debug("enter\n");
crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_des_reqctx));
- ctx->enginectx.op.prepare_request = omap_des_prepare_req;
- ctx->enginectx.op.unprepare_request = NULL;
- ctx->enginectx.op.do_one_request = omap_des_crypt_req;
-
return 0;
}
/* ********************** ALGS ************************************ */
-static struct skcipher_alg algs_ecb_cbc[] = {
+static struct skcipher_engine_alg algs_ecb_cbc[] = {
{
- .base.cra_name = "ecb(des)",
- .base.cra_driver_name = "ecb-des-omap",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct omap_des_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = omap_des_setkey,
- .encrypt = omap_des_ecb_encrypt,
- .decrypt = omap_des_ecb_decrypt,
- .init = omap_des_init_tfm,
+ .base = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_des_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = omap_des_setkey,
+ .encrypt = omap_des_ecb_encrypt,
+ .decrypt = omap_des_ecb_decrypt,
+ .init = omap_des_init_tfm,
+ },
+ .op.do_one_request = omap_des_crypt_req,
},
{
- .base.cra_name = "cbc(des)",
- .base.cra_driver_name = "cbc-des-omap",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct omap_des_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = omap_des_setkey,
- .encrypt = omap_des_cbc_encrypt,
- .decrypt = omap_des_cbc_decrypt,
- .init = omap_des_init_tfm,
+ .base = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_des_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = omap_des_setkey,
+ .encrypt = omap_des_cbc_encrypt,
+ .decrypt = omap_des_cbc_decrypt,
+ .init = omap_des_init_tfm,
+ },
+ .op.do_one_request = omap_des_crypt_req,
},
{
- .base.cra_name = "ecb(des3_ede)",
- .base.cra_driver_name = "ecb-des3-omap",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct omap_des_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = omap_des3_setkey,
- .encrypt = omap_des_ecb_encrypt,
- .decrypt = omap_des_ecb_decrypt,
- .init = omap_des_init_tfm,
+ .base = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_des_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = omap_des3_setkey,
+ .encrypt = omap_des_ecb_encrypt,
+ .decrypt = omap_des_ecb_decrypt,
+ .init = omap_des_init_tfm,
+ },
+ .op.do_one_request = omap_des_crypt_req,
},
{
- .base.cra_name = "cbc(des3_ede)",
- .base.cra_driver_name = "cbc-des3-omap",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct omap_des_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = omap_des3_setkey,
- .encrypt = omap_des_cbc_encrypt,
- .decrypt = omap_des_cbc_decrypt,
- .init = omap_des_init_tfm,
+ .base = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_des_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = omap_des3_setkey,
+ .encrypt = omap_des_cbc_encrypt,
+ .decrypt = omap_des_cbc_decrypt,
+ .init = omap_des_init_tfm,
+ },
+ .op.do_one_request = omap_des_crypt_req,
}
};
@@ -958,7 +950,7 @@ static int omap_des_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct omap_des_dev *dd;
- struct skcipher_alg *algp;
+ struct skcipher_engine_alg *algp;
struct resource *res;
int err = -ENOMEM, i, j, irq = -1;
u32 reg;
@@ -971,18 +963,12 @@ static int omap_des_probe(struct platform_device *pdev)
dd->dev = dev;
platform_set_drvdata(pdev, dd);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "no MEM resource info\n");
- goto err_res;
- }
-
err = (dev->of_node) ? omap_des_get_of(dd, pdev) :
omap_des_get_pdev(dd, pdev);
if (err)
goto err_res;
- dd->io_base = devm_ioremap_resource(dev, res);
+ dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(dd->io_base)) {
err = PTR_ERR(dd->io_base);
goto err_res;
@@ -1052,9 +1038,9 @@ static int omap_des_probe(struct platform_device *pdev)
for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
algp = &dd->pdata->algs_info[i].algs_list[j];
- pr_debug("reg alg: %s\n", algp->base.cra_name);
+ pr_debug("reg alg: %s\n", algp->base.base.cra_name);
- err = crypto_register_skcipher(algp);
+ err = crypto_engine_register_skcipher(algp);
if (err)
goto err_algs;
@@ -1067,7 +1053,7 @@ static int omap_des_probe(struct platform_device *pdev)
err_algs:
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_skcipher(
+ crypto_engine_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
err_engine:
@@ -1097,7 +1083,7 @@ static int omap_des_remove(struct platform_device *pdev)
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_skcipher(
+ crypto_engine_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
tasklet_kill(&dd->done_task);
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index cbeda59c6b19..a6b4a0b3ace3 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -13,34 +13,30 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
+#include <crypto/engine.h>
+#include <crypto/hmac.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
#include <linux/err.h>
#include <linux/device.h>
-#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/init.h>
-#include <linux/errno.h>
#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/irq.h>
#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmaengine.h>
-#include <linux/pm_runtime.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/delay.h>
-#include <linux/crypto.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/algapi.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/hash.h>
-#include <crypto/hmac.h>
-#include <crypto/internal/hash.h>
-#include <crypto/engine.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#define MD5_DIGEST_SIZE 16
@@ -168,7 +164,6 @@ struct omap_sham_hmac_ctx {
};
struct omap_sham_ctx {
- struct crypto_engine_ctx enginectx;
unsigned long flags;
/* fallback stuff */
@@ -180,7 +175,7 @@ struct omap_sham_ctx {
#define OMAP_SHAM_QUEUE_LENGTH 10
struct omap_sham_algs_info {
- struct ahash_alg *algs_list;
+ struct ahash_engine_alg *algs_list;
unsigned int size;
unsigned int registered;
};
@@ -1074,6 +1069,10 @@ static int omap_sham_hash_one_req(struct crypto_engine *engine, void *areq)
dev_dbg(dd->dev, "hash-one: op: %u, total: %u, digcnt: %zd, final: %d",
ctx->op, ctx->total, ctx->digcnt, final);
+ err = omap_sham_prepare_request(engine, areq);
+ if (err)
+ return err;
+
err = pm_runtime_resume_and_get(dd->dev);
if (err < 0) {
dev_err(dd->dev, "failed to get sync: %d\n", err);
@@ -1349,10 +1348,6 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
}
- tctx->enginectx.op.do_one_request = omap_sham_hash_one_req;
- tctx->enginectx.op.prepare_request = omap_sham_prepare_request;
- tctx->enginectx.op.unprepare_request = NULL;
-
return 0;
}
@@ -1423,15 +1418,15 @@ static int omap_sham_import(struct ahash_request *req, const void *in)
return 0;
}
-static struct ahash_alg algs_sha1_md5[] = {
+static struct ahash_engine_alg algs_sha1_md5[] = {
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .halg.digestsize = SHA1_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.halg.digestsize = SHA1_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "sha1",
.cra_driver_name = "omap-sha1",
.cra_priority = 400,
@@ -1444,16 +1439,17 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .halg.digestsize = MD5_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.halg.digestsize = MD5_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "md5",
.cra_driver_name = "omap-md5",
.cra_priority = 400,
@@ -1466,17 +1462,18 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .setkey = omap_sham_setkey,
- .halg.digestsize = SHA1_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.setkey = omap_sham_setkey,
+ .base.halg.digestsize = SHA1_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "omap-hmac-sha1",
.cra_priority = 400,
@@ -1490,17 +1487,18 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_sha1_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .setkey = omap_sham_setkey,
- .halg.digestsize = MD5_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.setkey = omap_sham_setkey,
+ .base.halg.digestsize = MD5_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "hmac(md5)",
.cra_driver_name = "omap-hmac-md5",
.cra_priority = 400,
@@ -1514,20 +1512,21 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_md5_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
}
};
/* OMAP4 has some algs in addition to what OMAP2 has */
-static struct ahash_alg algs_sha224_sha256[] = {
-{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .halg.digestsize = SHA224_DIGEST_SIZE,
- .halg.base = {
+static struct ahash_engine_alg algs_sha224_sha256[] = {
+{
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.halg.digestsize = SHA224_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "sha224",
.cra_driver_name = "omap-sha224",
.cra_priority = 400,
@@ -1540,16 +1539,17 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .halg.digestsize = SHA256_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.halg.digestsize = SHA256_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "sha256",
.cra_driver_name = "omap-sha256",
.cra_priority = 400,
@@ -1562,17 +1562,18 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .setkey = omap_sham_setkey,
- .halg.digestsize = SHA224_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.setkey = omap_sham_setkey,
+ .base.halg.digestsize = SHA224_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "omap-hmac-sha224",
.cra_priority = 400,
@@ -1586,17 +1587,18 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_sha224_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .setkey = omap_sham_setkey,
- .halg.digestsize = SHA256_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.setkey = omap_sham_setkey,
+ .base.halg.digestsize = SHA256_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "omap-hmac-sha256",
.cra_priority = 400,
@@ -1610,19 +1612,20 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_sha256_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
};
-static struct ahash_alg algs_sha384_sha512[] = {
+static struct ahash_engine_alg algs_sha384_sha512[] = {
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .halg.digestsize = SHA384_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.halg.digestsize = SHA384_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "sha384",
.cra_driver_name = "omap-sha384",
.cra_priority = 400,
@@ -1635,16 +1638,17 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .halg.digestsize = SHA512_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.halg.digestsize = SHA512_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "sha512",
.cra_driver_name = "omap-sha512",
.cra_priority = 400,
@@ -1657,17 +1661,18 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .setkey = omap_sham_setkey,
- .halg.digestsize = SHA384_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.setkey = omap_sham_setkey,
+ .base.halg.digestsize = SHA384_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "omap-hmac-sha384",
.cra_priority = 400,
@@ -1681,17 +1686,18 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_sha384_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .setkey = omap_sham_setkey,
- .halg.digestsize = SHA512_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.setkey = omap_sham_setkey,
+ .base.halg.digestsize = SHA512_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "omap-hmac-sha512",
.cra_priority = 400,
@@ -1705,7 +1711,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_sha512_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
};
@@ -2146,14 +2153,16 @@ static int omap_sham_probe(struct platform_device *pdev)
break;
for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
+ struct ahash_engine_alg *ealg;
struct ahash_alg *alg;
- alg = &dd->pdata->algs_info[i].algs_list[j];
+ ealg = &dd->pdata->algs_info[i].algs_list[j];
+ alg = &ealg->base;
alg->export = omap_sham_export;
alg->import = omap_sham_import;
alg->halg.statesize = sizeof(struct omap_sham_reqctx) +
BUFLEN;
- err = crypto_register_ahash(alg);
+ err = crypto_engine_register_ahash(ealg);
if (err)
goto err_algs;
@@ -2172,7 +2181,7 @@ static int omap_sham_probe(struct platform_device *pdev)
err_algs:
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_ahash(
+ crypto_engine_unregister_ahash(
&dd->pdata->algs_info[i].algs_list[j]);
err_engine_start:
crypto_engine_exit(dd->engine);
@@ -2203,7 +2212,7 @@ static int omap_sham_remove(struct platform_device *pdev)
spin_unlock_bh(&sham.lock);
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
- crypto_unregister_ahash(
+ crypto_engine_unregister_ahash(
&dd->pdata->algs_info[i].algs_list[j]);
dd->pdata->algs_info[i].registered--;
}
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
index 72dd1a4ebac4..825a729f205e 100644
--- a/drivers/crypto/qcom-rng.c
+++ b/drivers/crypto/qcom-rng.c
@@ -173,13 +173,9 @@ static int qcom_rng_probe(struct platform_device *pdev)
if (IS_ERR(rng->base))
return PTR_ERR(rng->base);
- /* ACPI systems have clk already on, so skip clk_get */
- if (!has_acpi_companion(&pdev->dev)) {
- rng->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(rng->clk))
- return PTR_ERR(rng->clk);
- }
-
+ rng->clk = devm_clk_get_optional(&pdev->dev, "core");
+ if (IS_ERR(rng->clk))
+ return PTR_ERR(rng->clk);
rng->skip_init = (unsigned long)device_get_match_data(&pdev->dev);
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index 9f6ba770a90a..77d5705a5d96 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -10,14 +10,21 @@
*/
#include "rk3288_crypto.h"
+#include <crypto/engine.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/clk.h>
#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/clk.h>
-#include <linux/crypto.h>
#include <linux/reset.h>
+#include <linux/spinlock.h>
static struct rockchip_ip rocklist = {
.dev_list = LIST_HEAD_INIT(rocklist.dev_list),
@@ -184,7 +191,6 @@ static struct rk_crypto_tmp *rk_cipher_algs[] = {
&rk_ahash_md5,
};
-#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
static int rk_crypto_debugfs_show(struct seq_file *seq, void *v)
{
struct rk_crypto_info *dd;
@@ -204,8 +210,8 @@ static int rk_crypto_debugfs_show(struct seq_file *seq, void *v)
switch (rk_cipher_algs[i]->type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
- rk_cipher_algs[i]->alg.skcipher.base.cra_driver_name,
- rk_cipher_algs[i]->alg.skcipher.base.cra_name,
+ rk_cipher_algs[i]->alg.skcipher.base.base.cra_driver_name,
+ rk_cipher_algs[i]->alg.skcipher.base.base.cra_name,
rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb);
seq_printf(seq, "\tfallback due to length: %lu\n",
rk_cipher_algs[i]->stat_fb_len);
@@ -216,8 +222,8 @@ static int rk_crypto_debugfs_show(struct seq_file *seq, void *v)
break;
case CRYPTO_ALG_TYPE_AHASH:
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
- rk_cipher_algs[i]->alg.hash.halg.base.cra_driver_name,
- rk_cipher_algs[i]->alg.hash.halg.base.cra_name,
+ rk_cipher_algs[i]->alg.hash.base.halg.base.cra_driver_name,
+ rk_cipher_algs[i]->alg.hash.base.halg.base.cra_name,
rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb);
break;
}
@@ -226,17 +232,20 @@ static int rk_crypto_debugfs_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(rk_crypto_debugfs);
-#endif
static void register_debugfs(struct rk_crypto_info *crypto_info)
{
-#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
+ struct dentry *dbgfs_dir __maybe_unused;
+ struct dentry *dbgfs_stats __maybe_unused;
+
/* Ignore error of debugfs */
- rocklist.dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL);
- rocklist.dbgfs_stats = debugfs_create_file("stats", 0444,
- rocklist.dbgfs_dir,
- &rocklist,
- &rk_crypto_debugfs_fops);
+ dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL);
+ dbgfs_stats = debugfs_create_file("stats", 0444, dbgfs_dir, &rocklist,
+ &rk_crypto_debugfs_fops);
+
+#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
+ rocklist.dbgfs_dir = dbgfs_dir;
+ rocklist.dbgfs_stats = dbgfs_stats;
#endif
}
@@ -250,15 +259,15 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
switch (rk_cipher_algs[i]->type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
dev_info(crypto_info->dev, "Register %s as %s\n",
- rk_cipher_algs[i]->alg.skcipher.base.cra_name,
- rk_cipher_algs[i]->alg.skcipher.base.cra_driver_name);
- err = crypto_register_skcipher(&rk_cipher_algs[i]->alg.skcipher);
+ rk_cipher_algs[i]->alg.skcipher.base.base.cra_name,
+ rk_cipher_algs[i]->alg.skcipher.base.base.cra_driver_name);
+ err = crypto_engine_register_skcipher(&rk_cipher_algs[i]->alg.skcipher);
break;
case CRYPTO_ALG_TYPE_AHASH:
dev_info(crypto_info->dev, "Register %s as %s\n",
- rk_cipher_algs[i]->alg.hash.halg.base.cra_name,
- rk_cipher_algs[i]->alg.hash.halg.base.cra_driver_name);
- err = crypto_register_ahash(&rk_cipher_algs[i]->alg.hash);
+ rk_cipher_algs[i]->alg.hash.base.halg.base.cra_name,
+ rk_cipher_algs[i]->alg.hash.base.halg.base.cra_driver_name);
+ err = crypto_engine_register_ahash(&rk_cipher_algs[i]->alg.hash);
break;
default:
dev_err(crypto_info->dev, "unknown algorithm\n");
@@ -271,9 +280,9 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
err_cipher_algs:
for (k = 0; k < i; k++) {
if (rk_cipher_algs[i]->type == CRYPTO_ALG_TYPE_SKCIPHER)
- crypto_unregister_skcipher(&rk_cipher_algs[k]->alg.skcipher);
+ crypto_engine_unregister_skcipher(&rk_cipher_algs[k]->alg.skcipher);
else
- crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
+ crypto_engine_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
}
return err;
}
@@ -284,9 +293,9 @@ static void rk_crypto_unregister(void)
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
if (rk_cipher_algs[i]->type == CRYPTO_ALG_TYPE_SKCIPHER)
- crypto_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher);
+ crypto_engine_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher);
else
- crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
+ crypto_engine_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
}
}
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
index b2695258cade..3aa03cbfb6be 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.h
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -3,21 +3,18 @@
#define __RK3288_CRYPTO_H__
#include <crypto/aes.h>
-#include <crypto/internal/des.h>
-#include <crypto/algapi.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/debugfs.h>
-#include <linux/delay.h>
-#include <linux/pm_runtime.h>
-#include <linux/scatterlist.h>
#include <crypto/engine.h>
+#include <crypto/internal/des.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
-
#include <crypto/md5.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
#define _SBF(v, f) ((v) << (f))
@@ -231,7 +228,6 @@ struct rk_crypto_info {
/* the private variable of hash */
struct rk_ahash_ctx {
- struct crypto_engine_ctx enginectx;
/* for fallback */
struct crypto_ahash *fallback_tfm;
};
@@ -246,7 +242,6 @@ struct rk_ahash_rctx {
/* the private variable of cipher */
struct rk_cipher_ctx {
- struct crypto_engine_ctx enginectx;
unsigned int keylen;
u8 key[AES_MAX_KEY_SIZE];
u8 iv[AES_BLOCK_SIZE];
@@ -264,8 +259,8 @@ struct rk_crypto_tmp {
u32 type;
struct rk_crypto_info *dev;
union {
- struct skcipher_alg skcipher;
- struct ahash_alg hash;
+ struct skcipher_engine_alg skcipher;
+ struct ahash_engine_alg hash;
} alg;
unsigned long stat_req;
unsigned long stat_fb;
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index a78ff3dcd0b1..8c143180645e 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -8,9 +8,15 @@
*
* Some ideas are from marvell/cesa.c and s5p-sss.c driver.
*/
-#include <linux/device.h>
+
#include <asm/unaligned.h>
+#include <crypto/internal/hash.h>
+#include <linux/device.h>
+#include <linux/err.h>
#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
#include "rk3288_crypto.h"
/*
@@ -40,8 +46,8 @@ static int rk_ahash_digest_fb(struct ahash_request *areq)
struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm);
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash.base);
algt->stat_fb++;
@@ -240,14 +246,13 @@ static int rk_hash_prepare(struct crypto_engine *engine, void *breq)
return 0;
}
-static int rk_hash_unprepare(struct crypto_engine *engine, void *breq)
+static void rk_hash_unprepare(struct crypto_engine *engine, void *breq)
{
struct ahash_request *areq = container_of(breq, struct ahash_request, base);
struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
struct rk_crypto_info *rkc = rctx->dev;
dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
- return 0;
}
static int rk_hash_run(struct crypto_engine *engine, void *breq)
@@ -255,11 +260,11 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
struct ahash_request *areq = container_of(breq, struct ahash_request, base);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash.base);
struct scatterlist *sg = areq->src;
struct rk_crypto_info *rkc = rctx->dev;
- int err = 0;
+ int err;
int i;
u32 v;
@@ -267,6 +272,10 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
if (err)
return err;
+ err = rk_hash_prepare(engine, breq);
+ if (err)
+ goto theend;
+
rctx->mode = 0;
algt->stat_req++;
@@ -327,15 +336,17 @@ theend:
crypto_finalize_hash_request(engine, breq, err);
local_bh_enable();
+ rk_hash_unprepare(engine, breq);
+
return 0;
}
-static int rk_cra_hash_init(struct crypto_tfm *tfm)
+static int rk_hash_init_tfm(struct crypto_ahash *tfm)
{
- struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
- const char *alg_name = crypto_tfm_alg_name(tfm);
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
- struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
+ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
+ const char *alg_name = crypto_ahash_alg_name(tfm);
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash.base);
/* for fallback */
tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
@@ -345,27 +356,23 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm)
return PTR_ERR(tctx->fallback_tfm);
}
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ crypto_ahash_set_reqsize(tfm,
sizeof(struct rk_ahash_rctx) +
crypto_ahash_reqsize(tctx->fallback_tfm));
- tctx->enginectx.op.do_one_request = rk_hash_run;
- tctx->enginectx.op.prepare_request = rk_hash_prepare;
- tctx->enginectx.op.unprepare_request = rk_hash_unprepare;
-
return 0;
}
-static void rk_cra_hash_exit(struct crypto_tfm *tfm)
+static void rk_hash_exit_tfm(struct crypto_ahash *tfm)
{
- struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
+ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
crypto_free_ahash(tctx->fallback_tfm);
}
struct rk_crypto_tmp rk_ahash_sha1 = {
.type = CRYPTO_ALG_TYPE_AHASH,
- .alg.hash = {
+ .alg.hash.base = {
.init = rk_ahash_init,
.update = rk_ahash_update,
.final = rk_ahash_final,
@@ -373,6 +380,8 @@ struct rk_crypto_tmp rk_ahash_sha1 = {
.export = rk_ahash_export,
.import = rk_ahash_import,
.digest = rk_ahash_digest,
+ .init_tfm = rk_hash_init_tfm,
+ .exit_tfm = rk_hash_exit_tfm,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct sha1_state),
@@ -385,17 +394,18 @@ struct rk_crypto_tmp rk_ahash_sha1 = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rk_ahash_ctx),
.cra_alignmask = 3,
- .cra_init = rk_cra_hash_init,
- .cra_exit = rk_cra_hash_exit,
.cra_module = THIS_MODULE,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = rk_hash_run,
+ },
};
struct rk_crypto_tmp rk_ahash_sha256 = {
.type = CRYPTO_ALG_TYPE_AHASH,
- .alg.hash = {
+ .alg.hash.base = {
.init = rk_ahash_init,
.update = rk_ahash_update,
.final = rk_ahash_final,
@@ -403,6 +413,8 @@ struct rk_crypto_tmp rk_ahash_sha256 = {
.export = rk_ahash_export,
.import = rk_ahash_import,
.digest = rk_ahash_digest,
+ .init_tfm = rk_hash_init_tfm,
+ .exit_tfm = rk_hash_exit_tfm,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
@@ -415,17 +427,18 @@ struct rk_crypto_tmp rk_ahash_sha256 = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rk_ahash_ctx),
.cra_alignmask = 3,
- .cra_init = rk_cra_hash_init,
- .cra_exit = rk_cra_hash_exit,
.cra_module = THIS_MODULE,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = rk_hash_run,
+ },
};
struct rk_crypto_tmp rk_ahash_md5 = {
.type = CRYPTO_ALG_TYPE_AHASH,
- .alg.hash = {
+ .alg.hash.base = {
.init = rk_ahash_init,
.update = rk_ahash_update,
.final = rk_ahash_final,
@@ -433,6 +446,8 @@ struct rk_crypto_tmp rk_ahash_md5 = {
.export = rk_ahash_export,
.import = rk_ahash_import,
.digest = rk_ahash_digest,
+ .init_tfm = rk_hash_init_tfm,
+ .exit_tfm = rk_hash_exit_tfm,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = sizeof(struct md5_state),
@@ -445,10 +460,11 @@ struct rk_crypto_tmp rk_ahash_md5 = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rk_ahash_ctx),
.cra_alignmask = 3,
- .cra_init = rk_cra_hash_init,
- .cra_exit = rk_cra_hash_exit,
.cra_module = THIS_MODULE,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = rk_hash_run,
+ },
};
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
index 59069457582b..da95747d973f 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -8,8 +8,14 @@
*
* Some ideas are from marvell-cesa.c and s5p-sss.c driver.
*/
-#include <linux/device.h>
+
+#include <crypto/engine.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
#include "rk3288_crypto.h"
#define RK_CRYPTO_DEC BIT(0)
@@ -18,7 +24,7 @@ static int rk_cipher_need_fallback(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base);
struct scatterlist *sgs, *sgd;
unsigned int stodo, dtodo, len;
unsigned int bs = crypto_skcipher_blocksize(tfm);
@@ -65,7 +71,7 @@ static int rk_cipher_fallback(struct skcipher_request *areq)
struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm);
struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base);
int err;
algt->stat_fb++;
@@ -305,7 +311,7 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
unsigned int len = areq->cryptlen;
unsigned int todo;
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base);
struct rk_crypto_info *rkc = rctx->dev;
err = pm_runtime_resume_and_get(rkc->dev);
@@ -430,7 +436,7 @@ static int rk_cipher_tfm_init(struct crypto_skcipher *tfm)
struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
const char *name = crypto_tfm_alg_name(&tfm->base);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base);
ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback_tfm)) {
@@ -442,8 +448,6 @@ static int rk_cipher_tfm_init(struct crypto_skcipher *tfm)
tfm->reqsize = sizeof(struct rk_cipher_rctx) +
crypto_skcipher_reqsize(ctx->fallback_tfm);
- ctx->enginectx.op.do_one_request = rk_cipher_run;
-
return 0;
}
@@ -457,7 +461,7 @@ static void rk_cipher_tfm_exit(struct crypto_skcipher *tfm)
struct rk_crypto_tmp rk_ecb_aes_alg = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base.cra_name = "ecb(aes)",
.base.cra_driver_name = "ecb-aes-rk",
.base.cra_priority = 300,
@@ -474,12 +478,15 @@ struct rk_crypto_tmp rk_ecb_aes_alg = {
.setkey = rk_aes_setkey,
.encrypt = rk_aes_ecb_encrypt,
.decrypt = rk_aes_ecb_decrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = rk_cipher_run,
+ },
};
struct rk_crypto_tmp rk_cbc_aes_alg = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "cbc-aes-rk",
.base.cra_priority = 300,
@@ -497,12 +504,15 @@ struct rk_crypto_tmp rk_cbc_aes_alg = {
.setkey = rk_aes_setkey,
.encrypt = rk_aes_cbc_encrypt,
.decrypt = rk_aes_cbc_decrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = rk_cipher_run,
+ },
};
struct rk_crypto_tmp rk_ecb_des_alg = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base.cra_name = "ecb(des)",
.base.cra_driver_name = "ecb-des-rk",
.base.cra_priority = 300,
@@ -519,12 +529,15 @@ struct rk_crypto_tmp rk_ecb_des_alg = {
.setkey = rk_des_setkey,
.encrypt = rk_des_ecb_encrypt,
.decrypt = rk_des_ecb_decrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = rk_cipher_run,
+ },
};
struct rk_crypto_tmp rk_cbc_des_alg = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base.cra_name = "cbc(des)",
.base.cra_driver_name = "cbc-des-rk",
.base.cra_priority = 300,
@@ -542,12 +555,15 @@ struct rk_crypto_tmp rk_cbc_des_alg = {
.setkey = rk_des_setkey,
.encrypt = rk_des_cbc_encrypt,
.decrypt = rk_des_cbc_decrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = rk_cipher_run,
+ },
};
struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "ecb-des3-ede-rk",
.base.cra_priority = 300,
@@ -564,12 +580,15 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
.setkey = rk_tdes_setkey,
.encrypt = rk_des3_ede_ecb_encrypt,
.decrypt = rk_des3_ede_ecb_decrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = rk_cipher_run,
+ },
};
struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cbc-des3-ede-rk",
.base.cra_priority = 300,
@@ -587,5 +606,8 @@ struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
.setkey = rk_tdes_setkey,
.encrypt = rk_des3_ede_cbc_encrypt,
.decrypt = rk_des3_ede_cbc_decrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = rk_cipher_run,
+ },
};
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 1c4d5fb05d69..fe8cf9ba8005 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index df5f9d675c57..6238d34f8db2 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -15,7 +15,8 @@
#include <linux/dmapool.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 4c799df3e883..62d93526920f 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -27,7 +27,6 @@
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
diff --git a/drivers/crypto/starfive/Kconfig b/drivers/crypto/starfive/Kconfig
index df745fcb09df..2cb192502c1b 100644
--- a/drivers/crypto/starfive/Kconfig
+++ b/drivers/crypto/starfive/Kconfig
@@ -12,6 +12,8 @@ config CRYPTO_DEV_JH7110
select CRYPTO_SHA512
select CRYPTO_SM3_GENERIC
select CRYPTO_RSA
+ select CRYPTO_AES
+ select CRYPTO_CCM
help
Support for StarFive JH7110 crypto hardware acceleration engine.
This module provides acceleration for public key algo,
diff --git a/drivers/crypto/starfive/Makefile b/drivers/crypto/starfive/Makefile
index 98b01d2f1ccf..8c137afe58ad 100644
--- a/drivers/crypto/starfive/Makefile
+++ b/drivers/crypto/starfive/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_JH7110) += jh7110-crypto.o
-jh7110-crypto-objs := jh7110-cryp.o jh7110-hash.o jh7110-rsa.o
+jh7110-crypto-objs := jh7110-cryp.o jh7110-hash.o jh7110-rsa.o jh7110-aes.o
diff --git a/drivers/crypto/starfive/jh7110-aes.c b/drivers/crypto/starfive/jh7110-aes.c
new file mode 100644
index 000000000000..9378e6682f0e
--- /dev/null
+++ b/drivers/crypto/starfive/jh7110-aes.c
@@ -0,0 +1,1024 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * StarFive AES acceleration driver
+ *
+ * Copyright (c) 2022 StarFive Technology
+ */
+
+#include <crypto/engine.h>
+#include <crypto/gcm.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include "jh7110-cryp.h"
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#define STARFIVE_AES_REGS_OFFSET 0x100
+#define STARFIVE_AES_AESDIO0R (STARFIVE_AES_REGS_OFFSET + 0x0)
+#define STARFIVE_AES_KEY0 (STARFIVE_AES_REGS_OFFSET + 0x4)
+#define STARFIVE_AES_KEY1 (STARFIVE_AES_REGS_OFFSET + 0x8)
+#define STARFIVE_AES_KEY2 (STARFIVE_AES_REGS_OFFSET + 0xC)
+#define STARFIVE_AES_KEY3 (STARFIVE_AES_REGS_OFFSET + 0x10)
+#define STARFIVE_AES_KEY4 (STARFIVE_AES_REGS_OFFSET + 0x14)
+#define STARFIVE_AES_KEY5 (STARFIVE_AES_REGS_OFFSET + 0x18)
+#define STARFIVE_AES_KEY6 (STARFIVE_AES_REGS_OFFSET + 0x1C)
+#define STARFIVE_AES_KEY7 (STARFIVE_AES_REGS_OFFSET + 0x20)
+#define STARFIVE_AES_CSR (STARFIVE_AES_REGS_OFFSET + 0x24)
+#define STARFIVE_AES_IV0 (STARFIVE_AES_REGS_OFFSET + 0x28)
+#define STARFIVE_AES_IV1 (STARFIVE_AES_REGS_OFFSET + 0x2C)
+#define STARFIVE_AES_IV2 (STARFIVE_AES_REGS_OFFSET + 0x30)
+#define STARFIVE_AES_IV3 (STARFIVE_AES_REGS_OFFSET + 0x34)
+#define STARFIVE_AES_NONCE0 (STARFIVE_AES_REGS_OFFSET + 0x3C)
+#define STARFIVE_AES_NONCE1 (STARFIVE_AES_REGS_OFFSET + 0x40)
+#define STARFIVE_AES_NONCE2 (STARFIVE_AES_REGS_OFFSET + 0x44)
+#define STARFIVE_AES_NONCE3 (STARFIVE_AES_REGS_OFFSET + 0x48)
+#define STARFIVE_AES_ALEN0 (STARFIVE_AES_REGS_OFFSET + 0x4C)
+#define STARFIVE_AES_ALEN1 (STARFIVE_AES_REGS_OFFSET + 0x50)
+#define STARFIVE_AES_MLEN0 (STARFIVE_AES_REGS_OFFSET + 0x54)
+#define STARFIVE_AES_MLEN1 (STARFIVE_AES_REGS_OFFSET + 0x58)
+#define STARFIVE_AES_IVLEN (STARFIVE_AES_REGS_OFFSET + 0x5C)
+
+#define FLG_MODE_MASK GENMASK(2, 0)
+#define FLG_ENCRYPT BIT(4)
+
+/* Misc */
+#define CCM_B0_ADATA 0x40
+#define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32))
+
+static inline int starfive_aes_wait_busy(struct starfive_cryp_dev *cryp)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->base + STARFIVE_AES_CSR, status,
+ !(status & STARFIVE_AES_BUSY), 10, 100000);
+}
+
+static inline int starfive_aes_wait_keydone(struct starfive_cryp_dev *cryp)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->base + STARFIVE_AES_CSR, status,
+ (status & STARFIVE_AES_KEY_DONE), 10, 100000);
+}
+
+static inline int starfive_aes_wait_gcmdone(struct starfive_cryp_dev *cryp)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->base + STARFIVE_AES_CSR, status,
+ (status & STARFIVE_AES_GCM_DONE), 10, 100000);
+}
+
+static inline int is_gcm(struct starfive_cryp_dev *cryp)
+{
+ return (cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_GCM;
+}
+
+static inline int is_encrypt(struct starfive_cryp_dev *cryp)
+{
+ return cryp->flags & FLG_ENCRYPT;
+}
+
+static void starfive_aes_aead_hw_start(struct starfive_cryp_ctx *ctx, u32 hw_mode)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ unsigned int value;
+
+ switch (hw_mode) {
+ case STARFIVE_AES_MODE_GCM:
+ value = readl(ctx->cryp->base + STARFIVE_AES_CSR);
+ value |= STARFIVE_AES_GCM_START;
+ writel(value, cryp->base + STARFIVE_AES_CSR);
+ starfive_aes_wait_gcmdone(cryp);
+ break;
+ case STARFIVE_AES_MODE_CCM:
+ value = readl(ctx->cryp->base + STARFIVE_AES_CSR);
+ value |= STARFIVE_AES_CCM_START;
+ writel(value, cryp->base + STARFIVE_AES_CSR);
+ break;
+ }
+}
+
+static inline void starfive_aes_set_ivlen(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+
+ if (is_gcm(cryp))
+ writel(GCM_AES_IV_SIZE, cryp->base + STARFIVE_AES_IVLEN);
+ else
+ writel(AES_BLOCK_SIZE, cryp->base + STARFIVE_AES_IVLEN);
+}
+
+static inline void starfive_aes_set_alen(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+
+ writel(upper_32_bits(cryp->assoclen), cryp->base + STARFIVE_AES_ALEN0);
+ writel(lower_32_bits(cryp->assoclen), cryp->base + STARFIVE_AES_ALEN1);
+}
+
+static inline void starfive_aes_set_mlen(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+
+ writel(upper_32_bits(cryp->total_in), cryp->base + STARFIVE_AES_MLEN0);
+ writel(lower_32_bits(cryp->total_in), cryp->base + STARFIVE_AES_MLEN1);
+}
+
+static inline int starfive_aes_ccm_check_iv(const u8 *iv)
+{
+ /* 2 <= L <= 8, so 1 <= L' <= 7. */
+ if (iv[0] < 1 || iv[0] > 7)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int starfive_aes_write_iv(struct starfive_cryp_ctx *ctx, u32 *iv)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+
+ writel(iv[0], cryp->base + STARFIVE_AES_IV0);
+ writel(iv[1], cryp->base + STARFIVE_AES_IV1);
+ writel(iv[2], cryp->base + STARFIVE_AES_IV2);
+
+ if (is_gcm(cryp)) {
+ if (starfive_aes_wait_gcmdone(cryp))
+ return -ETIMEDOUT;
+
+ return 0;
+ }
+
+ writel(iv[3], cryp->base + STARFIVE_AES_IV3);
+
+ return 0;
+}
+
+static inline void starfive_aes_get_iv(struct starfive_cryp_dev *cryp, u32 *iv)
+{
+ iv[0] = readl(cryp->base + STARFIVE_AES_IV0);
+ iv[1] = readl(cryp->base + STARFIVE_AES_IV1);
+ iv[2] = readl(cryp->base + STARFIVE_AES_IV2);
+ iv[3] = readl(cryp->base + STARFIVE_AES_IV3);
+}
+
+static inline void starfive_aes_write_nonce(struct starfive_cryp_ctx *ctx, u32 *nonce)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+
+ writel(nonce[0], cryp->base + STARFIVE_AES_NONCE0);
+ writel(nonce[1], cryp->base + STARFIVE_AES_NONCE1);
+ writel(nonce[2], cryp->base + STARFIVE_AES_NONCE2);
+ writel(nonce[3], cryp->base + STARFIVE_AES_NONCE3);
+}
+
+static int starfive_aes_write_key(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ u32 *key = (u32 *)ctx->key;
+
+ if (ctx->keylen >= AES_KEYSIZE_128) {
+ writel(key[0], cryp->base + STARFIVE_AES_KEY0);
+ writel(key[1], cryp->base + STARFIVE_AES_KEY1);
+ writel(key[2], cryp->base + STARFIVE_AES_KEY2);
+ writel(key[3], cryp->base + STARFIVE_AES_KEY3);
+ }
+
+ if (ctx->keylen >= AES_KEYSIZE_192) {
+ writel(key[4], cryp->base + STARFIVE_AES_KEY4);
+ writel(key[5], cryp->base + STARFIVE_AES_KEY5);
+ }
+
+ if (ctx->keylen >= AES_KEYSIZE_256) {
+ writel(key[6], cryp->base + STARFIVE_AES_KEY6);
+ writel(key[7], cryp->base + STARFIVE_AES_KEY7);
+ }
+
+ if (starfive_aes_wait_keydone(cryp))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int starfive_aes_ccm_init(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ u8 iv[AES_BLOCK_SIZE], b0[AES_BLOCK_SIZE];
+ unsigned int textlen;
+
+ memcpy(iv, cryp->req.areq->iv, AES_BLOCK_SIZE);
+ memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1);
+
+ /* Build B0 */
+ memcpy(b0, iv, AES_BLOCK_SIZE);
+
+ b0[0] |= (8 * ((cryp->authsize - 2) / 2));
+
+ if (cryp->assoclen)
+ b0[0] |= CCM_B0_ADATA;
+
+ textlen = cryp->total_in;
+
+ b0[AES_BLOCK_SIZE - 2] = textlen >> 8;
+ b0[AES_BLOCK_SIZE - 1] = textlen & 0xFF;
+
+ starfive_aes_write_nonce(ctx, (u32 *)b0);
+
+ return 0;
+}
+
+static int starfive_aes_hw_init(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ u32 hw_mode;
+
+ /* reset */
+ rctx->csr.aes.v = 0;
+ rctx->csr.aes.aesrst = 1;
+ writel(rctx->csr.aes.v, cryp->base + STARFIVE_AES_CSR);
+
+ /* csr setup */
+ hw_mode = cryp->flags & FLG_MODE_MASK;
+
+ rctx->csr.aes.v = 0;
+
+ switch (ctx->keylen) {
+ case AES_KEYSIZE_128:
+ rctx->csr.aes.keymode = STARFIVE_AES_KEYMODE_128;
+ break;
+ case AES_KEYSIZE_192:
+ rctx->csr.aes.keymode = STARFIVE_AES_KEYMODE_192;
+ break;
+ case AES_KEYSIZE_256:
+ rctx->csr.aes.keymode = STARFIVE_AES_KEYMODE_256;
+ break;
+ }
+
+ rctx->csr.aes.mode = hw_mode;
+ rctx->csr.aes.cmode = !is_encrypt(cryp);
+ rctx->csr.aes.ie = 1;
+
+ if (hw_mode == STARFIVE_AES_MODE_CFB ||
+ hw_mode == STARFIVE_AES_MODE_OFB)
+ rctx->csr.aes.stmode = STARFIVE_AES_MODE_XFB_128;
+ else
+ rctx->csr.aes.stmode = STARFIVE_AES_MODE_XFB_1;
+
+ if (cryp->side_chan) {
+ rctx->csr.aes.delay_aes = 1;
+ rctx->csr.aes.vaes_start = 1;
+ }
+
+ writel(rctx->csr.aes.v, cryp->base + STARFIVE_AES_CSR);
+
+ cryp->err = starfive_aes_write_key(ctx);
+ if (cryp->err)
+ return cryp->err;
+
+ switch (hw_mode) {
+ case STARFIVE_AES_MODE_GCM:
+ starfive_aes_set_alen(ctx);
+ starfive_aes_set_mlen(ctx);
+ starfive_aes_set_ivlen(ctx);
+ starfive_aes_aead_hw_start(ctx, hw_mode);
+ starfive_aes_write_iv(ctx, (void *)cryp->req.areq->iv);
+ break;
+ case STARFIVE_AES_MODE_CCM:
+ starfive_aes_set_alen(ctx);
+ starfive_aes_set_mlen(ctx);
+ starfive_aes_ccm_init(ctx);
+ starfive_aes_aead_hw_start(ctx, hw_mode);
+ break;
+ case STARFIVE_AES_MODE_OFB:
+ case STARFIVE_AES_MODE_CFB:
+ case STARFIVE_AES_MODE_CBC:
+ case STARFIVE_AES_MODE_CTR:
+ starfive_aes_write_iv(ctx, (void *)cryp->req.sreq->iv);
+ break;
+ default:
+ break;
+ }
+
+ return cryp->err;
+}
+
+static int starfive_aes_read_authtag(struct starfive_cryp_dev *cryp)
+{
+ int i, start_addr;
+
+ if (starfive_aes_wait_busy(cryp))
+ return dev_err_probe(cryp->dev, -ETIMEDOUT,
+ "Timeout waiting for tag generation.");
+
+ start_addr = STARFIVE_AES_NONCE0;
+
+ if (is_gcm(cryp))
+ for (i = 0; i < AES_BLOCK_32; i++, start_addr += 4)
+ cryp->tag_out[i] = readl(cryp->base + start_addr);
+ else
+ for (i = 0; i < AES_BLOCK_32; i++)
+ cryp->tag_out[i] = readl(cryp->base + STARFIVE_AES_AESDIO0R);
+
+ if (is_encrypt(cryp)) {
+ scatterwalk_copychunks(cryp->tag_out, &cryp->out_walk, cryp->authsize, 1);
+ } else {
+ scatterwalk_copychunks(cryp->tag_in, &cryp->in_walk, cryp->authsize, 0);
+
+ if (crypto_memneq(cryp->tag_in, cryp->tag_out, cryp->authsize))
+ return dev_err_probe(cryp->dev, -EBADMSG, "Failed tag verification\n");
+ }
+
+ return 0;
+}
+
+static void starfive_aes_finish_req(struct starfive_cryp_dev *cryp)
+{
+ union starfive_aes_csr csr;
+ int err = cryp->err;
+
+ if (!err && cryp->authsize)
+ err = starfive_aes_read_authtag(cryp);
+
+ if (!err && ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CBC ||
+ (cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CTR))
+ starfive_aes_get_iv(cryp, (void *)cryp->req.sreq->iv);
+
+ /* reset irq flags*/
+ csr.v = 0;
+ csr.aesrst = 1;
+ writel(csr.v, cryp->base + STARFIVE_AES_CSR);
+
+ if (cryp->authsize)
+ crypto_finalize_aead_request(cryp->engine, cryp->req.areq, err);
+ else
+ crypto_finalize_skcipher_request(cryp->engine, cryp->req.sreq,
+ err);
+}
+
+void starfive_aes_done_task(unsigned long param)
+{
+ struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)param;
+ u32 block[AES_BLOCK_32];
+ u32 stat;
+ int i;
+
+ for (i = 0; i < AES_BLOCK_32; i++)
+ block[i] = readl(cryp->base + STARFIVE_AES_AESDIO0R);
+
+ scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, AES_BLOCK_SIZE,
+ cryp->total_out), 1);
+
+ cryp->total_out -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_out);
+
+ if (!cryp->total_out) {
+ starfive_aes_finish_req(cryp);
+ return;
+ }
+
+ memset(block, 0, AES_BLOCK_SIZE);
+ scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE,
+ cryp->total_in), 0);
+ cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in);
+
+ for (i = 0; i < AES_BLOCK_32; i++)
+ writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R);
+
+ stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
+ stat &= ~STARFIVE_IE_MASK_AES_DONE;
+ writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
+}
+
+static int starfive_aes_gcm_write_adata(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ u32 *buffer;
+ int total_len, loop;
+
+ total_len = ALIGN(cryp->assoclen, AES_BLOCK_SIZE) / sizeof(unsigned int);
+ buffer = (u32 *)rctx->adata;
+
+ for (loop = 0; loop < total_len; loop += 4) {
+ writel(*buffer, cryp->base + STARFIVE_AES_NONCE0);
+ buffer++;
+ writel(*buffer, cryp->base + STARFIVE_AES_NONCE1);
+ buffer++;
+ writel(*buffer, cryp->base + STARFIVE_AES_NONCE2);
+ buffer++;
+ writel(*buffer, cryp->base + STARFIVE_AES_NONCE3);
+ buffer++;
+ }
+
+ if (starfive_aes_wait_gcmdone(cryp))
+ return dev_err_probe(cryp->dev, -ETIMEDOUT,
+ "Timeout processing gcm aad block");
+
+ return 0;
+}
+
+static int starfive_aes_ccm_write_adata(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ u32 *buffer;
+ u8 *ci;
+ int total_len, loop;
+
+ total_len = cryp->assoclen;
+
+ ci = rctx->adata;
+ writeb(*ci, cryp->base + STARFIVE_AES_AESDIO0R);
+ ci++;
+ writeb(*ci, cryp->base + STARFIVE_AES_AESDIO0R);
+ ci++;
+ total_len -= 2;
+ buffer = (u32 *)ci;
+
+ for (loop = 0; loop < 3; loop++, buffer++)
+ writel(*buffer, cryp->base + STARFIVE_AES_AESDIO0R);
+
+ total_len -= 12;
+
+ while (total_len > 0) {
+ for (loop = 0; loop < AES_BLOCK_32; loop++, buffer++)
+ writel(*buffer, cryp->base + STARFIVE_AES_AESDIO0R);
+
+ total_len -= AES_BLOCK_SIZE;
+ }
+
+ if (starfive_aes_wait_busy(cryp))
+ return dev_err_probe(cryp->dev, -ETIMEDOUT,
+ "Timeout processing ccm aad block");
+
+ return 0;
+}
+
+static int starfive_aes_prepare_req(struct skcipher_request *req,
+ struct aead_request *areq)
+{
+ struct starfive_cryp_ctx *ctx;
+ struct starfive_cryp_request_ctx *rctx;
+ struct starfive_cryp_dev *cryp;
+
+ if (!req && !areq)
+ return -EINVAL;
+
+ ctx = req ? crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)) :
+ crypto_aead_ctx(crypto_aead_reqtfm(areq));
+
+ cryp = ctx->cryp;
+ rctx = req ? skcipher_request_ctx(req) : aead_request_ctx(areq);
+
+ if (req) {
+ cryp->req.sreq = req;
+ cryp->total_in = req->cryptlen;
+ cryp->total_out = req->cryptlen;
+ cryp->assoclen = 0;
+ cryp->authsize = 0;
+ } else {
+ cryp->req.areq = areq;
+ cryp->assoclen = areq->assoclen;
+ cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq));
+ if (is_encrypt(cryp)) {
+ cryp->total_in = areq->cryptlen;
+ cryp->total_out = areq->cryptlen;
+ } else {
+ cryp->total_in = areq->cryptlen - cryp->authsize;
+ cryp->total_out = cryp->total_in;
+ }
+ }
+
+ rctx->in_sg = req ? req->src : areq->src;
+ scatterwalk_start(&cryp->in_walk, rctx->in_sg);
+
+ rctx->out_sg = req ? req->dst : areq->dst;
+ scatterwalk_start(&cryp->out_walk, rctx->out_sg);
+
+ if (cryp->assoclen) {
+ rctx->adata = kzalloc(ALIGN(cryp->assoclen, AES_BLOCK_SIZE), GFP_KERNEL);
+ if (!rctx->adata)
+ return dev_err_probe(cryp->dev, -ENOMEM,
+ "Failed to alloc memory for adata");
+
+ scatterwalk_copychunks(rctx->adata, &cryp->in_walk, cryp->assoclen, 0);
+ scatterwalk_copychunks(NULL, &cryp->out_walk, cryp->assoclen, 2);
+ }
+
+ ctx->rctx = rctx;
+
+ return starfive_aes_hw_init(ctx);
+}
+
+static int starfive_aes_do_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct skcipher_request *req =
+ container_of(areq, struct skcipher_request, base);
+ struct starfive_cryp_ctx *ctx =
+ crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ u32 block[AES_BLOCK_32];
+ u32 stat;
+ int err;
+ int i;
+
+ err = starfive_aes_prepare_req(req, NULL);
+ if (err)
+ return err;
+
+ /*
+ * Write first plain/ciphertext block to start the module
+ * then let irq tasklet handle the rest of the data blocks.
+ */
+ scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE,
+ cryp->total_in), 0);
+ cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in);
+
+ for (i = 0; i < AES_BLOCK_32; i++)
+ writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R);
+
+ stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
+ stat &= ~STARFIVE_IE_MASK_AES_DONE;
+ writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
+
+ return 0;
+}
+
+static int starfive_aes_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ ctx->cryp = starfive_cryp_find_dev(ctx);
+ if (!ctx->cryp)
+ return -ENODEV;
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) +
+ sizeof(struct skcipher_request));
+
+ return 0;
+}
+
+static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req =
+ container_of(areq, struct aead_request, base);
+ struct starfive_cryp_ctx *ctx =
+ crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ u32 block[AES_BLOCK_32];
+ u32 stat;
+ int err;
+ int i;
+
+ err = starfive_aes_prepare_req(NULL, req);
+ if (err)
+ return err;
+
+ if (!cryp->assoclen)
+ goto write_text;
+
+ if ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CCM)
+ cryp->err = starfive_aes_ccm_write_adata(ctx);
+ else
+ cryp->err = starfive_aes_gcm_write_adata(ctx);
+
+ kfree(rctx->adata);
+
+ if (cryp->err)
+ return cryp->err;
+
+write_text:
+ if (!cryp->total_in)
+ goto finish_req;
+
+ /*
+ * Write first plain/ciphertext block to start the module
+ * then let irq tasklet handle the rest of the data blocks.
+ */
+ scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE,
+ cryp->total_in), 0);
+ cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in);
+
+ for (i = 0; i < AES_BLOCK_32; i++)
+ writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R);
+
+ stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
+ stat &= ~STARFIVE_IE_MASK_AES_DONE;
+ writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
+
+ return 0;
+
+finish_req:
+ starfive_aes_finish_req(cryp);
+ return 0;
+}
+
+static int starfive_aes_aead_init_tfm(struct crypto_aead *tfm)
+{
+ struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm);
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct crypto_tfm *aead = crypto_aead_tfm(tfm);
+ struct crypto_alg *alg = aead->__crt_alg;
+
+ ctx->cryp = starfive_cryp_find_dev(ctx);
+ if (!ctx->cryp)
+ return -ENODEV;
+
+ if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+ ctx->aead_fbk = crypto_alloc_aead(alg->cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->aead_fbk))
+ return dev_err_probe(cryp->dev, PTR_ERR(ctx->aead_fbk),
+ "%s() failed to allocate fallback for %s\n",
+ __func__, alg->cra_name);
+ }
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct starfive_cryp_ctx) +
+ sizeof(struct aead_request));
+
+ return 0;
+}
+
+static void starfive_aes_aead_exit_tfm(struct crypto_aead *tfm)
+{
+ struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_free_aead(ctx->aead_fbk);
+}
+
+static int starfive_aes_crypt(struct skcipher_request *req, unsigned long flags)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ unsigned int blocksize_align = crypto_skcipher_blocksize(tfm) - 1;
+
+ cryp->flags = flags;
+
+ if ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_ECB ||
+ (cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CBC)
+ if (req->cryptlen & blocksize_align)
+ return -EINVAL;
+
+ return crypto_transfer_skcipher_request_to_engine(cryp->engine, req);
+}
+
+static int starfive_aes_aead_crypt(struct aead_request *req, unsigned long flags)
+{
+ struct starfive_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+
+ cryp->flags = flags;
+
+ /*
+ * HW engine could not perform CCM tag verification on
+ * non-blocksize aligned text, use fallback algo instead
+ */
+ if (ctx->aead_fbk && !is_encrypt(cryp)) {
+ struct aead_request *subreq = aead_request_ctx(req);
+
+ aead_request_set_tfm(subreq, ctx->aead_fbk);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src,
+ req->dst, req->cryptlen, req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ return crypto_aead_decrypt(subreq);
+ }
+
+ return crypto_transfer_aead_request_to_engine(cryp->engine, req);
+}
+
+static int starfive_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (!key || !keylen)
+ return -EINVAL;
+
+ if (keylen != AES_KEYSIZE_128 &&
+ keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
+static int starfive_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (!key || !keylen)
+ return -EINVAL;
+
+ if (keylen != AES_KEYSIZE_128 &&
+ keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ if (ctx->aead_fbk)
+ return crypto_aead_setkey(ctx->aead_fbk, key, keylen);
+
+ return 0;
+}
+
+static int starfive_aes_gcm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ return crypto_gcm_check_authsize(authsize);
+}
+
+static int starfive_aes_ccm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm);
+
+ switch (authsize) {
+ case 4:
+ case 6:
+ case 8:
+ case 10:
+ case 12:
+ case 14:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return crypto_aead_setauthsize(ctx->aead_fbk, authsize);
+}
+
+static int starfive_aes_ecb_encrypt(struct skcipher_request *req)
+{
+ return starfive_aes_crypt(req, STARFIVE_AES_MODE_ECB | FLG_ENCRYPT);
+}
+
+static int starfive_aes_ecb_decrypt(struct skcipher_request *req)
+{
+ return starfive_aes_crypt(req, STARFIVE_AES_MODE_ECB);
+}
+
+static int starfive_aes_cbc_encrypt(struct skcipher_request *req)
+{
+ return starfive_aes_crypt(req, STARFIVE_AES_MODE_CBC | FLG_ENCRYPT);
+}
+
+static int starfive_aes_cbc_decrypt(struct skcipher_request *req)
+{
+ return starfive_aes_crypt(req, STARFIVE_AES_MODE_CBC);
+}
+
+static int starfive_aes_cfb_encrypt(struct skcipher_request *req)
+{
+ return starfive_aes_crypt(req, STARFIVE_AES_MODE_CFB | FLG_ENCRYPT);
+}
+
+static int starfive_aes_cfb_decrypt(struct skcipher_request *req)
+{
+ return starfive_aes_crypt(req, STARFIVE_AES_MODE_CFB);
+}
+
+static int starfive_aes_ofb_encrypt(struct skcipher_request *req)
+{
+ return starfive_aes_crypt(req, STARFIVE_AES_MODE_OFB | FLG_ENCRYPT);
+}
+
+static int starfive_aes_ofb_decrypt(struct skcipher_request *req)
+{
+ return starfive_aes_crypt(req, STARFIVE_AES_MODE_OFB);
+}
+
+static int starfive_aes_ctr_encrypt(struct skcipher_request *req)
+{
+ return starfive_aes_crypt(req, STARFIVE_AES_MODE_CTR | FLG_ENCRYPT);
+}
+
+static int starfive_aes_ctr_decrypt(struct skcipher_request *req)
+{
+ return starfive_aes_crypt(req, STARFIVE_AES_MODE_CTR);
+}
+
+static int starfive_aes_gcm_encrypt(struct aead_request *req)
+{
+ return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_GCM | FLG_ENCRYPT);
+}
+
+static int starfive_aes_gcm_decrypt(struct aead_request *req)
+{
+ return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_GCM);
+}
+
+static int starfive_aes_ccm_encrypt(struct aead_request *req)
+{
+ int ret;
+
+ ret = starfive_aes_ccm_check_iv(req->iv);
+ if (ret)
+ return ret;
+
+ return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_CCM | FLG_ENCRYPT);
+}
+
+static int starfive_aes_ccm_decrypt(struct aead_request *req)
+{
+ int ret;
+
+ ret = starfive_aes_ccm_check_iv(req->iv);
+ if (ret)
+ return ret;
+
+ return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_CCM);
+}
+
+static struct skcipher_engine_alg skcipher_algs[] = {
+{
+ .base.init = starfive_aes_init_tfm,
+ .base.setkey = starfive_aes_setkey,
+ .base.encrypt = starfive_aes_ecb_encrypt,
+ .base.decrypt = starfive_aes_ecb_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE,
+ .base.max_keysize = AES_MAX_KEY_SIZE,
+ .base.base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "starfive-ecb-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .op = {
+ .do_one_request = starfive_aes_do_one_req,
+ },
+}, {
+ .base.init = starfive_aes_init_tfm,
+ .base.setkey = starfive_aes_setkey,
+ .base.encrypt = starfive_aes_cbc_encrypt,
+ .base.decrypt = starfive_aes_cbc_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE,
+ .base.max_keysize = AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "starfive-cbc-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .op = {
+ .do_one_request = starfive_aes_do_one_req,
+ },
+}, {
+ .base.init = starfive_aes_init_tfm,
+ .base.setkey = starfive_aes_setkey,
+ .base.encrypt = starfive_aes_ctr_encrypt,
+ .base.decrypt = starfive_aes_ctr_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE,
+ .base.max_keysize = AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "starfive-ctr-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .op = {
+ .do_one_request = starfive_aes_do_one_req,
+ },
+}, {
+ .base.init = starfive_aes_init_tfm,
+ .base.setkey = starfive_aes_setkey,
+ .base.encrypt = starfive_aes_cfb_encrypt,
+ .base.decrypt = starfive_aes_cfb_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE,
+ .base.max_keysize = AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.base = {
+ .cra_name = "cfb(aes)",
+ .cra_driver_name = "starfive-cfb-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .op = {
+ .do_one_request = starfive_aes_do_one_req,
+ },
+}, {
+ .base.init = starfive_aes_init_tfm,
+ .base.setkey = starfive_aes_setkey,
+ .base.encrypt = starfive_aes_ofb_encrypt,
+ .base.decrypt = starfive_aes_ofb_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE,
+ .base.max_keysize = AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.base = {
+ .cra_name = "ofb(aes)",
+ .cra_driver_name = "starfive-ofb-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .op = {
+ .do_one_request = starfive_aes_do_one_req,
+ },
+},
+};
+
+static struct aead_engine_alg aead_algs[] = {
+{
+ .base.setkey = starfive_aes_aead_setkey,
+ .base.setauthsize = starfive_aes_gcm_setauthsize,
+ .base.encrypt = starfive_aes_gcm_encrypt,
+ .base.decrypt = starfive_aes_gcm_decrypt,
+ .base.init = starfive_aes_aead_init_tfm,
+ .base.exit = starfive_aes_aead_exit_tfm,
+ .base.ivsize = GCM_AES_IV_SIZE,
+ .base.maxauthsize = AES_BLOCK_SIZE,
+ .base.base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "starfive-gcm-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .op = {
+ .do_one_request = starfive_aes_aead_do_one_req,
+ },
+}, {
+ .base.setkey = starfive_aes_aead_setkey,
+ .base.setauthsize = starfive_aes_ccm_setauthsize,
+ .base.encrypt = starfive_aes_ccm_encrypt,
+ .base.decrypt = starfive_aes_ccm_decrypt,
+ .base.init = starfive_aes_aead_init_tfm,
+ .base.exit = starfive_aes_aead_exit_tfm,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.maxauthsize = AES_BLOCK_SIZE,
+ .base.base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "starfive-ccm-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .op = {
+ .do_one_request = starfive_aes_aead_do_one_req,
+ },
+},
+};
+
+int starfive_aes_register_algs(void)
+{
+ int ret;
+
+ ret = crypto_engine_register_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs));
+ if (ret)
+ return ret;
+
+ ret = crypto_engine_register_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+ if (ret)
+ crypto_engine_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs));
+
+ return ret;
+}
+
+void starfive_aes_unregister_algs(void)
+{
+ crypto_engine_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+ crypto_engine_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs));
+}
diff --git a/drivers/crypto/starfive/jh7110-cryp.c b/drivers/crypto/starfive/jh7110-cryp.c
index cc43556b6c80..08e974e0dd12 100644
--- a/drivers/crypto/starfive/jh7110-cryp.c
+++ b/drivers/crypto/starfive/jh7110-cryp.c
@@ -7,17 +7,20 @@
*
*/
+#include <crypto/engine.h>
+#include "jh7110-cryp.h"
#include <linux/clk.h>
-#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-
-#include "jh7110-cryp.h"
+#include <linux/spinlock.h>
#define DRIVER_NAME "jh7110-crypto"
@@ -51,6 +54,13 @@ struct starfive_cryp_dev *starfive_cryp_find_dev(struct starfive_cryp_ctx *ctx)
return cryp;
}
+static u16 side_chan;
+module_param(side_chan, ushort, 0);
+MODULE_PARM_DESC(side_chan, "Enable side channel mitigation for AES module.\n"
+ "Enabling this feature will reduce speed performance.\n"
+ " 0 - Disabled\n"
+ " other - Enabled");
+
static int starfive_dma_init(struct starfive_cryp_dev *cryp)
{
dma_cap_mask_t mask;
@@ -82,20 +92,26 @@ static void starfive_dma_cleanup(struct starfive_cryp_dev *cryp)
static irqreturn_t starfive_cryp_irq(int irq, void *priv)
{
u32 status;
+ u32 mask;
struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)priv;
+ mask = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
status = readl(cryp->base + STARFIVE_IE_FLAG_OFFSET);
+ if (status & STARFIVE_IE_FLAG_AES_DONE) {
+ mask |= STARFIVE_IE_MASK_AES_DONE;
+ writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET);
+ tasklet_schedule(&cryp->aes_done);
+ }
+
if (status & STARFIVE_IE_FLAG_HASH_DONE) {
- status = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
- status |= STARFIVE_IE_MASK_HASH_DONE;
- writel(status, cryp->base + STARFIVE_IE_MASK_OFFSET);
+ mask |= STARFIVE_IE_MASK_HASH_DONE;
+ writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET);
tasklet_schedule(&cryp->hash_done);
}
if (status & STARFIVE_IE_FLAG_PKA_DONE) {
- status = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
- status |= STARFIVE_IE_MASK_PKA_DONE;
- writel(status, cryp->base + STARFIVE_IE_MASK_OFFSET);
+ mask |= STARFIVE_IE_MASK_PKA_DONE;
+ writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET);
complete(&cryp->pka_done);
}
@@ -121,10 +137,12 @@ static int starfive_cryp_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(cryp->base),
"Error remapping memory for platform device\n");
+ tasklet_init(&cryp->aes_done, starfive_aes_done_task, (unsigned long)cryp);
tasklet_init(&cryp->hash_done, starfive_hash_done_task, (unsigned long)cryp);
cryp->phys_base = res->start;
cryp->dma_maxburst = 32;
+ cryp->side_chan = side_chan;
cryp->hclk = devm_clk_get(&pdev->dev, "hclk");
if (IS_ERR(cryp->hclk))
@@ -180,6 +198,10 @@ static int starfive_cryp_probe(struct platform_device *pdev)
if (ret)
goto err_engine_start;
+ ret = starfive_aes_register_algs();
+ if (ret)
+ goto err_algs_aes;
+
ret = starfive_hash_register_algs();
if (ret)
goto err_algs_hash;
@@ -193,6 +215,8 @@ static int starfive_cryp_probe(struct platform_device *pdev)
err_algs_rsa:
starfive_hash_unregister_algs();
err_algs_hash:
+ starfive_aes_unregister_algs();
+err_algs_aes:
crypto_engine_stop(cryp->engine);
err_engine_start:
crypto_engine_exit(cryp->engine);
@@ -207,18 +231,21 @@ err_dma_init:
clk_disable_unprepare(cryp->ahb);
reset_control_assert(cryp->rst);
+ tasklet_kill(&cryp->aes_done);
tasklet_kill(&cryp->hash_done);
err_probe_defer:
return ret;
}
-static int starfive_cryp_remove(struct platform_device *pdev)
+static void starfive_cryp_remove(struct platform_device *pdev)
{
struct starfive_cryp_dev *cryp = platform_get_drvdata(pdev);
+ starfive_aes_unregister_algs();
starfive_hash_unregister_algs();
starfive_rsa_unregister_algs();
+ tasklet_kill(&cryp->aes_done);
tasklet_kill(&cryp->hash_done);
crypto_engine_stop(cryp->engine);
@@ -233,8 +260,6 @@ static int starfive_cryp_remove(struct platform_device *pdev)
clk_disable_unprepare(cryp->hclk);
clk_disable_unprepare(cryp->ahb);
reset_control_assert(cryp->rst);
-
- return 0;
}
static const struct of_device_id starfive_dt_ids[] __maybe_unused = {
@@ -245,7 +270,7 @@ MODULE_DEVICE_TABLE(of, starfive_dt_ids);
static struct platform_driver starfive_cryp_driver = {
.probe = starfive_cryp_probe,
- .remove = starfive_cryp_remove,
+ .remove_new = starfive_cryp_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = starfive_dt_ids,
diff --git a/drivers/crypto/starfive/jh7110-cryp.h b/drivers/crypto/starfive/jh7110-cryp.h
index 0cdcffc0d7d4..fe011d50473d 100644
--- a/drivers/crypto/starfive/jh7110-cryp.h
+++ b/drivers/crypto/starfive/jh7110-cryp.h
@@ -2,13 +2,15 @@
#ifndef __STARFIVE_STR_H__
#define __STARFIVE_STR_H__
+#include <crypto/aes.h>
+#include <crypto/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha2.h>
+#include <crypto/sm3.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-
-#include <crypto/engine.h>
-#include <crypto/sha2.h>
-#include <crypto/sm3.h>
+#include <linux/interrupt.h>
#define STARFIVE_ALG_CR_OFFSET 0x0
#define STARFIVE_ALG_FIFO_OFFSET 0x4
@@ -17,13 +19,56 @@
#define STARFIVE_DMA_IN_LEN_OFFSET 0x10
#define STARFIVE_DMA_OUT_LEN_OFFSET 0x14
+#define STARFIVE_IE_MASK_AES_DONE 0x1
#define STARFIVE_IE_MASK_HASH_DONE 0x4
#define STARFIVE_IE_MASK_PKA_DONE 0x8
+#define STARFIVE_IE_FLAG_AES_DONE 0x1
#define STARFIVE_IE_FLAG_HASH_DONE 0x4
#define STARFIVE_IE_FLAG_PKA_DONE 0x8
#define STARFIVE_MSG_BUFFER_SIZE SZ_16K
#define MAX_KEY_SIZE SHA512_BLOCK_SIZE
+#define STARFIVE_AES_IV_LEN AES_BLOCK_SIZE
+#define STARFIVE_AES_CTR_LEN AES_BLOCK_SIZE
+
+union starfive_aes_csr {
+ u32 v;
+ struct {
+ u32 cmode :1;
+#define STARFIVE_AES_KEYMODE_128 0x0
+#define STARFIVE_AES_KEYMODE_192 0x1
+#define STARFIVE_AES_KEYMODE_256 0x2
+ u32 keymode :2;
+#define STARFIVE_AES_BUSY BIT(3)
+ u32 busy :1;
+ u32 done :1;
+#define STARFIVE_AES_KEY_DONE BIT(5)
+ u32 krdy :1;
+ u32 aesrst :1;
+ u32 ie :1;
+#define STARFIVE_AES_CCM_START BIT(8)
+ u32 ccm_start :1;
+#define STARFIVE_AES_MODE_ECB 0x0
+#define STARFIVE_AES_MODE_CBC 0x1
+#define STARFIVE_AES_MODE_CFB 0x2
+#define STARFIVE_AES_MODE_OFB 0x3
+#define STARFIVE_AES_MODE_CTR 0x4
+#define STARFIVE_AES_MODE_CCM 0x5
+#define STARFIVE_AES_MODE_GCM 0x6
+ u32 mode :3;
+#define STARFIVE_AES_GCM_START BIT(12)
+ u32 gcm_start :1;
+#define STARFIVE_AES_GCM_DONE BIT(13)
+ u32 gcm_done :1;
+ u32 delay_aes :1;
+ u32 vaes_start :1;
+ u32 rsvd_0 :8;
+#define STARFIVE_AES_MODE_XFB_1 0x0
+#define STARFIVE_AES_MODE_XFB_128 0x5
+ u32 stmode :3;
+ u32 rsvd_1 :5;
+ };
+};
union starfive_hash_csr {
u32 v;
@@ -105,7 +150,6 @@ union starfive_alg_cr {
};
struct starfive_cryp_ctx {
- struct crypto_engine_ctx enginectx;
struct starfive_cryp_dev *cryp;
struct starfive_cryp_request_ctx *rctx;
@@ -116,6 +160,7 @@ struct starfive_cryp_ctx {
struct starfive_rsa_key rsa_key;
struct crypto_akcipher *akcipher_fbk;
struct crypto_ahash *ahash_fbk;
+ struct crypto_aead *aead_fbk;
};
struct starfive_cryp_dev {
@@ -133,13 +178,26 @@ struct starfive_cryp_dev {
struct dma_chan *rx;
struct dma_slave_config cfg_in;
struct dma_slave_config cfg_out;
+ struct scatter_walk in_walk;
+ struct scatter_walk out_walk;
struct crypto_engine *engine;
+ struct tasklet_struct aes_done;
struct tasklet_struct hash_done;
struct completion pka_done;
+ size_t assoclen;
+ size_t total_in;
+ size_t total_out;
+ u32 tag_in[4];
+ u32 tag_out[4];
+ unsigned int authsize;
+ unsigned long flags;
int err;
+ bool side_chan;
union starfive_alg_cr alg_cr;
union {
struct ahash_request *hreq;
+ struct aead_request *areq;
+ struct skcipher_request *sreq;
} req;
};
@@ -147,6 +205,7 @@ struct starfive_cryp_request_ctx {
union {
union starfive_hash_csr hash;
union starfive_pka_cacr pka;
+ union starfive_aes_csr aes;
} csr;
struct scatterlist *in_sg;
@@ -157,6 +216,7 @@ struct starfive_cryp_request_ctx {
unsigned int blksize;
unsigned int digsize;
unsigned long in_sg_len;
+ unsigned char *adata;
u8 rsa_data[] __aligned(sizeof(u32));
};
@@ -168,5 +228,9 @@ void starfive_hash_unregister_algs(void);
int starfive_rsa_register_algs(void);
void starfive_rsa_unregister_algs(void);
+int starfive_aes_register_algs(void);
+void starfive_aes_unregister_algs(void);
+
void starfive_hash_done_task(unsigned long param);
+void starfive_aes_done_task(unsigned long param);
#endif
diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c
index 5064150b8a1c..cc7650198d70 100644
--- a/drivers/crypto/starfive/jh7110-hash.c
+++ b/drivers/crypto/starfive/jh7110-hash.c
@@ -6,25 +6,20 @@
*
*/
+#include <crypto/engine.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include "jh7110-cryp.h"
+#include <linux/amba/pl080.h>
#include <linux/clk.h>
-#include <linux/crypto.h>
#include <linux/dma-direct.h>
#include <linux/interrupt.h>
-#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-#include <linux/amba/pl080.h>
-
-#include <crypto/hash.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/hash.h>
-
-#include "jh7110-cryp.h"
#define STARFIVE_HASH_REGS_OFFSET 0x300
#define STARFIVE_HASH_SHACSR (STARFIVE_HASH_REGS_OFFSET + 0x0)
@@ -433,10 +428,6 @@ static int starfive_hash_init_tfm(struct crypto_ahash *hash,
ctx->keylen = 0;
ctx->hash_mode = mode;
- ctx->enginectx.op.do_one_request = starfive_hash_one_request;
- ctx->enginectx.op.prepare_request = NULL;
- ctx->enginectx.op.unprepare_request = NULL;
-
return 0;
}
@@ -445,11 +436,6 @@ static void starfive_hash_exit_tfm(struct crypto_ahash *hash)
struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
crypto_free_ahash(ctx->ahash_fbk);
-
- ctx->ahash_fbk = NULL;
- ctx->enginectx.op.do_one_request = NULL;
- ctx->enginectx.op.prepare_request = NULL;
- ctx->enginectx.op.unprepare_request = NULL;
}
static int starfive_hash_long_setkey(struct starfive_cryp_ctx *ctx,
@@ -619,18 +605,18 @@ static int starfive_hmac_sm3_init_tfm(struct crypto_ahash *hash)
STARFIVE_HASH_SM3);
}
-static struct ahash_alg algs_sha2_sm3[] = {
+static struct ahash_engine_alg algs_sha2_sm3[] = {
{
- .init = starfive_hash_init,
- .update = starfive_hash_update,
- .final = starfive_hash_final,
- .finup = starfive_hash_finup,
- .digest = starfive_hash_digest,
- .export = starfive_hash_export,
- .import = starfive_hash_import,
- .init_tfm = starfive_sha224_init_tfm,
- .exit_tfm = starfive_hash_exit_tfm,
- .halg = {
+ .base.init = starfive_hash_init,
+ .base.update = starfive_hash_update,
+ .base.final = starfive_hash_final,
+ .base.finup = starfive_hash_finup,
+ .base.digest = starfive_hash_digest,
+ .base.export = starfive_hash_export,
+ .base.import = starfive_hash_import,
+ .base.init_tfm = starfive_sha224_init_tfm,
+ .base.exit_tfm = starfive_hash_exit_tfm,
+ .base.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
.base = {
@@ -645,19 +631,22 @@ static struct ahash_alg algs_sha2_sm3[] = {
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = starfive_hash_one_request,
+ },
}, {
- .init = starfive_hash_init,
- .update = starfive_hash_update,
- .final = starfive_hash_final,
- .finup = starfive_hash_finup,
- .digest = starfive_hash_digest,
- .export = starfive_hash_export,
- .import = starfive_hash_import,
- .init_tfm = starfive_hmac_sha224_init_tfm,
- .exit_tfm = starfive_hash_exit_tfm,
- .setkey = starfive_hash_setkey,
- .halg = {
+ .base.init = starfive_hash_init,
+ .base.update = starfive_hash_update,
+ .base.final = starfive_hash_final,
+ .base.finup = starfive_hash_finup,
+ .base.digest = starfive_hash_digest,
+ .base.export = starfive_hash_export,
+ .base.import = starfive_hash_import,
+ .base.init_tfm = starfive_hmac_sha224_init_tfm,
+ .base.exit_tfm = starfive_hash_exit_tfm,
+ .base.setkey = starfive_hash_setkey,
+ .base.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
.base = {
@@ -672,18 +661,21 @@ static struct ahash_alg algs_sha2_sm3[] = {
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = starfive_hash_one_request,
+ },
}, {
- .init = starfive_hash_init,
- .update = starfive_hash_update,
- .final = starfive_hash_final,
- .finup = starfive_hash_finup,
- .digest = starfive_hash_digest,
- .export = starfive_hash_export,
- .import = starfive_hash_import,
- .init_tfm = starfive_sha256_init_tfm,
- .exit_tfm = starfive_hash_exit_tfm,
- .halg = {
+ .base.init = starfive_hash_init,
+ .base.update = starfive_hash_update,
+ .base.final = starfive_hash_final,
+ .base.finup = starfive_hash_finup,
+ .base.digest = starfive_hash_digest,
+ .base.export = starfive_hash_export,
+ .base.import = starfive_hash_import,
+ .base.init_tfm = starfive_sha256_init_tfm,
+ .base.exit_tfm = starfive_hash_exit_tfm,
+ .base.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
.base = {
@@ -698,19 +690,22 @@ static struct ahash_alg algs_sha2_sm3[] = {
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = starfive_hash_one_request,
+ },
}, {
- .init = starfive_hash_init,
- .update = starfive_hash_update,
- .final = starfive_hash_final,
- .finup = starfive_hash_finup,
- .digest = starfive_hash_digest,
- .export = starfive_hash_export,
- .import = starfive_hash_import,
- .init_tfm = starfive_hmac_sha256_init_tfm,
- .exit_tfm = starfive_hash_exit_tfm,
- .setkey = starfive_hash_setkey,
- .halg = {
+ .base.init = starfive_hash_init,
+ .base.update = starfive_hash_update,
+ .base.final = starfive_hash_final,
+ .base.finup = starfive_hash_finup,
+ .base.digest = starfive_hash_digest,
+ .base.export = starfive_hash_export,
+ .base.import = starfive_hash_import,
+ .base.init_tfm = starfive_hmac_sha256_init_tfm,
+ .base.exit_tfm = starfive_hash_exit_tfm,
+ .base.setkey = starfive_hash_setkey,
+ .base.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
.base = {
@@ -725,18 +720,21 @@ static struct ahash_alg algs_sha2_sm3[] = {
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = starfive_hash_one_request,
+ },
}, {
- .init = starfive_hash_init,
- .update = starfive_hash_update,
- .final = starfive_hash_final,
- .finup = starfive_hash_finup,
- .digest = starfive_hash_digest,
- .export = starfive_hash_export,
- .import = starfive_hash_import,
- .init_tfm = starfive_sha384_init_tfm,
- .exit_tfm = starfive_hash_exit_tfm,
- .halg = {
+ .base.init = starfive_hash_init,
+ .base.update = starfive_hash_update,
+ .base.final = starfive_hash_final,
+ .base.finup = starfive_hash_finup,
+ .base.digest = starfive_hash_digest,
+ .base.export = starfive_hash_export,
+ .base.import = starfive_hash_import,
+ .base.init_tfm = starfive_sha384_init_tfm,
+ .base.exit_tfm = starfive_hash_exit_tfm,
+ .base.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = sizeof(struct sha512_state),
.base = {
@@ -751,19 +749,22 @@ static struct ahash_alg algs_sha2_sm3[] = {
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = starfive_hash_one_request,
+ },
}, {
- .init = starfive_hash_init,
- .update = starfive_hash_update,
- .final = starfive_hash_final,
- .finup = starfive_hash_finup,
- .digest = starfive_hash_digest,
- .export = starfive_hash_export,
- .import = starfive_hash_import,
- .init_tfm = starfive_hmac_sha384_init_tfm,
- .exit_tfm = starfive_hash_exit_tfm,
- .setkey = starfive_hash_setkey,
- .halg = {
+ .base.init = starfive_hash_init,
+ .base.update = starfive_hash_update,
+ .base.final = starfive_hash_final,
+ .base.finup = starfive_hash_finup,
+ .base.digest = starfive_hash_digest,
+ .base.export = starfive_hash_export,
+ .base.import = starfive_hash_import,
+ .base.init_tfm = starfive_hmac_sha384_init_tfm,
+ .base.exit_tfm = starfive_hash_exit_tfm,
+ .base.setkey = starfive_hash_setkey,
+ .base.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = sizeof(struct sha512_state),
.base = {
@@ -778,18 +779,21 @@ static struct ahash_alg algs_sha2_sm3[] = {
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = starfive_hash_one_request,
+ },
}, {
- .init = starfive_hash_init,
- .update = starfive_hash_update,
- .final = starfive_hash_final,
- .finup = starfive_hash_finup,
- .digest = starfive_hash_digest,
- .export = starfive_hash_export,
- .import = starfive_hash_import,
- .init_tfm = starfive_sha512_init_tfm,
- .exit_tfm = starfive_hash_exit_tfm,
- .halg = {
+ .base.init = starfive_hash_init,
+ .base.update = starfive_hash_update,
+ .base.final = starfive_hash_final,
+ .base.finup = starfive_hash_finup,
+ .base.digest = starfive_hash_digest,
+ .base.export = starfive_hash_export,
+ .base.import = starfive_hash_import,
+ .base.init_tfm = starfive_sha512_init_tfm,
+ .base.exit_tfm = starfive_hash_exit_tfm,
+ .base.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = sizeof(struct sha512_state),
.base = {
@@ -804,19 +808,22 @@ static struct ahash_alg algs_sha2_sm3[] = {
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = starfive_hash_one_request,
+ },
}, {
- .init = starfive_hash_init,
- .update = starfive_hash_update,
- .final = starfive_hash_final,
- .finup = starfive_hash_finup,
- .digest = starfive_hash_digest,
- .export = starfive_hash_export,
- .import = starfive_hash_import,
- .init_tfm = starfive_hmac_sha512_init_tfm,
- .exit_tfm = starfive_hash_exit_tfm,
- .setkey = starfive_hash_setkey,
- .halg = {
+ .base.init = starfive_hash_init,
+ .base.update = starfive_hash_update,
+ .base.final = starfive_hash_final,
+ .base.finup = starfive_hash_finup,
+ .base.digest = starfive_hash_digest,
+ .base.export = starfive_hash_export,
+ .base.import = starfive_hash_import,
+ .base.init_tfm = starfive_hmac_sha512_init_tfm,
+ .base.exit_tfm = starfive_hash_exit_tfm,
+ .base.setkey = starfive_hash_setkey,
+ .base.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = sizeof(struct sha512_state),
.base = {
@@ -831,18 +838,21 @@ static struct ahash_alg algs_sha2_sm3[] = {
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = starfive_hash_one_request,
+ },
}, {
- .init = starfive_hash_init,
- .update = starfive_hash_update,
- .final = starfive_hash_final,
- .finup = starfive_hash_finup,
- .digest = starfive_hash_digest,
- .export = starfive_hash_export,
- .import = starfive_hash_import,
- .init_tfm = starfive_sm3_init_tfm,
- .exit_tfm = starfive_hash_exit_tfm,
- .halg = {
+ .base.init = starfive_hash_init,
+ .base.update = starfive_hash_update,
+ .base.final = starfive_hash_final,
+ .base.finup = starfive_hash_finup,
+ .base.digest = starfive_hash_digest,
+ .base.export = starfive_hash_export,
+ .base.import = starfive_hash_import,
+ .base.init_tfm = starfive_sm3_init_tfm,
+ .base.exit_tfm = starfive_hash_exit_tfm,
+ .base.halg = {
.digestsize = SM3_DIGEST_SIZE,
.statesize = sizeof(struct sm3_state),
.base = {
@@ -857,19 +867,22 @@ static struct ahash_alg algs_sha2_sm3[] = {
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = starfive_hash_one_request,
+ },
}, {
- .init = starfive_hash_init,
- .update = starfive_hash_update,
- .final = starfive_hash_final,
- .finup = starfive_hash_finup,
- .digest = starfive_hash_digest,
- .export = starfive_hash_export,
- .import = starfive_hash_import,
- .init_tfm = starfive_hmac_sm3_init_tfm,
- .exit_tfm = starfive_hash_exit_tfm,
- .setkey = starfive_hash_setkey,
- .halg = {
+ .base.init = starfive_hash_init,
+ .base.update = starfive_hash_update,
+ .base.final = starfive_hash_final,
+ .base.finup = starfive_hash_finup,
+ .base.digest = starfive_hash_digest,
+ .base.export = starfive_hash_export,
+ .base.import = starfive_hash_import,
+ .base.init_tfm = starfive_hmac_sm3_init_tfm,
+ .base.exit_tfm = starfive_hash_exit_tfm,
+ .base.setkey = starfive_hash_setkey,
+ .base.halg = {
.digestsize = SM3_DIGEST_SIZE,
.statesize = sizeof(struct sm3_state),
.base = {
@@ -884,16 +897,19 @@ static struct ahash_alg algs_sha2_sm3[] = {
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = starfive_hash_one_request,
+ },
},
};
int starfive_hash_register_algs(void)
{
- return crypto_register_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3));
+ return crypto_engine_register_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3));
}
void starfive_hash_unregister_algs(void)
{
- crypto_unregister_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3));
+ crypto_engine_unregister_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3));
}
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 4fc581e9e595..49dfd161e9b9 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -16,6 +16,8 @@ config CRYPTO_DEV_STM32_HASH
select CRYPTO_MD5
select CRYPTO_SHA1
select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_SHA3
select CRYPTO_ENGINE
help
This enables support for the HASH hw accelerator which can be found
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 6b8d731092a4..f095f0065428 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -5,22 +5,24 @@
* Ux500 support taken from snippets in the old Ux500 cryp driver
*/
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/interrupt.h>
+#include <linux/err.h>
#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-
-#include <crypto/aes.h>
-#include <crypto/internal/des.h>
-#include <crypto/engine.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/aead.h>
-#include <crypto/internal/skcipher.h>
+#include <linux/string.h>
#define DRIVER_NAME "stm32-cryp"
@@ -156,7 +158,6 @@ struct stm32_cryp_caps {
};
struct stm32_cryp_ctx {
- struct crypto_engine_ctx enginectx;
struct stm32_cryp *cryp;
int keylen;
__be32 key[AES_KEYSIZE_256 / sizeof(u32)];
@@ -825,35 +826,20 @@ static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
}
static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq);
-static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
- void *areq);
static int stm32_cryp_init_tfm(struct crypto_skcipher *tfm)
{
- struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
-
crypto_skcipher_set_reqsize(tfm, sizeof(struct stm32_cryp_reqctx));
- ctx->enginectx.op.do_one_request = stm32_cryp_cipher_one_req;
- ctx->enginectx.op.prepare_request = stm32_cryp_prepare_cipher_req;
- ctx->enginectx.op.unprepare_request = NULL;
return 0;
}
static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq);
-static int stm32_cryp_prepare_aead_req(struct crypto_engine *engine,
- void *areq);
static int stm32_cryp_aes_aead_init(struct crypto_aead *tfm)
{
- struct stm32_cryp_ctx *ctx = crypto_aead_ctx(tfm);
-
tfm->reqsize = sizeof(struct stm32_cryp_reqctx);
- ctx->enginectx.op.do_one_request = stm32_cryp_aead_one_req;
- ctx->enginectx.op.prepare_request = stm32_cryp_prepare_aead_req;
- ctx->enginectx.op.unprepare_request = NULL;
-
return 0;
}
@@ -1180,9 +1166,6 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req,
cryp = ctx->cryp;
- if (!cryp)
- return -ENODEV;
-
rctx = req ? skcipher_request_ctx(req) : aead_request_ctx(areq);
rctx->mode &= FLG_MODE_MASK;
@@ -1248,16 +1231,6 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req,
return ret;
}
-static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
- void *areq)
-{
- struct skcipher_request *req = container_of(areq,
- struct skcipher_request,
- base);
-
- return stm32_cryp_prepare_req(req, NULL);
-}
-
static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq)
{
struct skcipher_request *req = container_of(areq,
@@ -1270,15 +1243,8 @@ static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq)
if (!cryp)
return -ENODEV;
- return stm32_cryp_cpu_start(cryp);
-}
-
-static int stm32_cryp_prepare_aead_req(struct crypto_engine *engine, void *areq)
-{
- struct aead_request *req = container_of(areq, struct aead_request,
- base);
-
- return stm32_cryp_prepare_req(NULL, req);
+ return stm32_cryp_prepare_req(req, NULL) ?:
+ stm32_cryp_cpu_start(cryp);
}
static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq)
@@ -1287,10 +1253,15 @@ static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq)
base);
struct stm32_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct stm32_cryp *cryp = ctx->cryp;
+ int err;
if (!cryp)
return -ENODEV;
+ err = stm32_cryp_prepare_req(NULL, req);
+ if (err)
+ return err;
+
if (unlikely(!cryp->payload_in && !cryp->header_in)) {
/* No input data to process: get tag and finish */
stm32_cryp_finish_req(cryp, 0);
@@ -1709,143 +1680,178 @@ static irqreturn_t stm32_cryp_irq(int irq, void *arg)
return IRQ_WAKE_THREAD;
}
-static struct skcipher_alg crypto_algs[] = {
+static struct skcipher_engine_alg crypto_algs[] = {
{
- .base.cra_name = "ecb(aes)",
- .base.cra_driver_name = "stm32-ecb-aes",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0,
- .base.cra_module = THIS_MODULE,
-
- .init = stm32_cryp_init_tfm,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = stm32_cryp_aes_setkey,
- .encrypt = stm32_cryp_aes_ecb_encrypt,
- .decrypt = stm32_cryp_aes_ecb_decrypt,
+ .base = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "stm32-ecb-aes",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ecb_encrypt,
+ .decrypt = stm32_cryp_aes_ecb_decrypt,
+ },
+ .op = {
+ .do_one_request = stm32_cryp_cipher_one_req,
+ },
},
{
- .base.cra_name = "cbc(aes)",
- .base.cra_driver_name = "stm32-cbc-aes",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0,
- .base.cra_module = THIS_MODULE,
-
- .init = stm32_cryp_init_tfm,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = stm32_cryp_aes_setkey,
- .encrypt = stm32_cryp_aes_cbc_encrypt,
- .decrypt = stm32_cryp_aes_cbc_decrypt,
+ .base = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "stm32-cbc-aes",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_cbc_encrypt,
+ .decrypt = stm32_cryp_aes_cbc_decrypt,
+ },
+ .op = {
+ .do_one_request = stm32_cryp_cipher_one_req,
+ },
},
{
- .base.cra_name = "ctr(aes)",
- .base.cra_driver_name = "stm32-ctr-aes",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0,
- .base.cra_module = THIS_MODULE,
-
- .init = stm32_cryp_init_tfm,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = stm32_cryp_aes_setkey,
- .encrypt = stm32_cryp_aes_ctr_encrypt,
- .decrypt = stm32_cryp_aes_ctr_decrypt,
+ .base = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "stm32-ctr-aes",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ctr_encrypt,
+ .decrypt = stm32_cryp_aes_ctr_decrypt,
+ },
+ .op = {
+ .do_one_request = stm32_cryp_cipher_one_req,
+ },
},
{
- .base.cra_name = "ecb(des)",
- .base.cra_driver_name = "stm32-ecb-des",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0,
- .base.cra_module = THIS_MODULE,
-
- .init = stm32_cryp_init_tfm,
- .min_keysize = DES_BLOCK_SIZE,
- .max_keysize = DES_BLOCK_SIZE,
- .setkey = stm32_cryp_des_setkey,
- .encrypt = stm32_cryp_des_ecb_encrypt,
- .decrypt = stm32_cryp_des_ecb_decrypt,
+ .base = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "stm32-ecb-des",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_ecb_encrypt,
+ .decrypt = stm32_cryp_des_ecb_decrypt,
+ },
+ .op = {
+ .do_one_request = stm32_cryp_cipher_one_req,
+ },
},
{
- .base.cra_name = "cbc(des)",
- .base.cra_driver_name = "stm32-cbc-des",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0,
- .base.cra_module = THIS_MODULE,
-
- .init = stm32_cryp_init_tfm,
- .min_keysize = DES_BLOCK_SIZE,
- .max_keysize = DES_BLOCK_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = stm32_cryp_des_setkey,
- .encrypt = stm32_cryp_des_cbc_encrypt,
- .decrypt = stm32_cryp_des_cbc_decrypt,
+ .base = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "stm32-cbc-des",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_cbc_encrypt,
+ .decrypt = stm32_cryp_des_cbc_decrypt,
+ },
+ .op = {
+ .do_one_request = stm32_cryp_cipher_one_req,
+ },
},
{
- .base.cra_name = "ecb(des3_ede)",
- .base.cra_driver_name = "stm32-ecb-des3",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0,
- .base.cra_module = THIS_MODULE,
-
- .init = stm32_cryp_init_tfm,
- .min_keysize = 3 * DES_BLOCK_SIZE,
- .max_keysize = 3 * DES_BLOCK_SIZE,
- .setkey = stm32_cryp_tdes_setkey,
- .encrypt = stm32_cryp_tdes_ecb_encrypt,
- .decrypt = stm32_cryp_tdes_ecb_decrypt,
+ .base = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "stm32-ecb-des3",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_ecb_encrypt,
+ .decrypt = stm32_cryp_tdes_ecb_decrypt,
+ },
+ .op = {
+ .do_one_request = stm32_cryp_cipher_one_req,
+ },
},
{
- .base.cra_name = "cbc(des3_ede)",
- .base.cra_driver_name = "stm32-cbc-des3",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0,
- .base.cra_module = THIS_MODULE,
-
- .init = stm32_cryp_init_tfm,
- .min_keysize = 3 * DES_BLOCK_SIZE,
- .max_keysize = 3 * DES_BLOCK_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = stm32_cryp_tdes_setkey,
- .encrypt = stm32_cryp_tdes_cbc_encrypt,
- .decrypt = stm32_cryp_tdes_cbc_decrypt,
+ .base = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "stm32-cbc-des3",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_cbc_encrypt,
+ .decrypt = stm32_cryp_tdes_cbc_decrypt,
+ },
+ .op = {
+ .do_one_request = stm32_cryp_cipher_one_req,
+ },
},
};
-static struct aead_alg aead_algs[] = {
+static struct aead_engine_alg aead_algs[] = {
{
- .setkey = stm32_cryp_aes_aead_setkey,
- .setauthsize = stm32_cryp_aes_gcm_setauthsize,
- .encrypt = stm32_cryp_aes_gcm_encrypt,
- .decrypt = stm32_cryp_aes_gcm_decrypt,
- .init = stm32_cryp_aes_aead_init,
- .ivsize = 12,
- .maxauthsize = AES_BLOCK_SIZE,
+ .base.setkey = stm32_cryp_aes_aead_setkey,
+ .base.setauthsize = stm32_cryp_aes_gcm_setauthsize,
+ .base.encrypt = stm32_cryp_aes_gcm_encrypt,
+ .base.decrypt = stm32_cryp_aes_gcm_decrypt,
+ .base.init = stm32_cryp_aes_aead_init,
+ .base.ivsize = 12,
+ .base.maxauthsize = AES_BLOCK_SIZE,
- .base = {
+ .base.base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "stm32-gcm-aes",
.cra_priority = 200,
@@ -1855,17 +1861,20 @@ static struct aead_alg aead_algs[] = {
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
+ .op = {
+ .do_one_request = stm32_cryp_aead_one_req,
+ },
},
{
- .setkey = stm32_cryp_aes_aead_setkey,
- .setauthsize = stm32_cryp_aes_ccm_setauthsize,
- .encrypt = stm32_cryp_aes_ccm_encrypt,
- .decrypt = stm32_cryp_aes_ccm_decrypt,
- .init = stm32_cryp_aes_aead_init,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = AES_BLOCK_SIZE,
+ .base.setkey = stm32_cryp_aes_aead_setkey,
+ .base.setauthsize = stm32_cryp_aes_ccm_setauthsize,
+ .base.encrypt = stm32_cryp_aes_ccm_encrypt,
+ .base.decrypt = stm32_cryp_aes_ccm_decrypt,
+ .base.init = stm32_cryp_aes_aead_init,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.maxauthsize = AES_BLOCK_SIZE,
- .base = {
+ .base.base = {
.cra_name = "ccm(aes)",
.cra_driver_name = "stm32-ccm-aes",
.cra_priority = 200,
@@ -1875,6 +1884,9 @@ static struct aead_alg aead_algs[] = {
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
+ .op = {
+ .do_one_request = stm32_cryp_aead_one_req,
+ },
},
};
@@ -2036,14 +2048,14 @@ static int stm32_cryp_probe(struct platform_device *pdev)
goto err_engine2;
}
- ret = crypto_register_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
+ ret = crypto_engine_register_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
if (ret) {
dev_err(dev, "Could not register algs\n");
goto err_algs;
}
if (cryp->caps->aeads_support) {
- ret = crypto_register_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+ ret = crypto_engine_register_aeads(aead_algs, ARRAY_SIZE(aead_algs));
if (ret)
goto err_aead_algs;
}
@@ -2055,7 +2067,7 @@ static int stm32_cryp_probe(struct platform_device *pdev)
return 0;
err_aead_algs:
- crypto_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
+ crypto_engine_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
err_algs:
err_engine2:
crypto_engine_exit(cryp->engine);
@@ -2085,8 +2097,8 @@ static int stm32_cryp_remove(struct platform_device *pdev)
return ret;
if (cryp->caps->aeads_support)
- crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
- crypto_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
+ crypto_engine_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+ crypto_engine_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
crypto_engine_exit(cryp->engine);
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index f0df32382719..2b2382d4332c 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -6,27 +6,26 @@
* Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
*/
+#include <crypto/engine.h>
+#include <crypto/internal/hash.h>
+#include <crypto/md5.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/sha3.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
-#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-
-#include <crypto/engine.h>
-#include <crypto/hash.h>
-#include <crypto/md5.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/internal/hash.h>
+#include <linux/string.h>
#define HASH_CR 0x00
#define HASH_DIN 0x04
@@ -45,19 +44,11 @@
#define HASH_CR_DMAE BIT(3)
#define HASH_CR_DATATYPE_POS 4
#define HASH_CR_MODE BIT(6)
+#define HASH_CR_ALGO_POS 7
#define HASH_CR_MDMAT BIT(13)
#define HASH_CR_DMAA BIT(14)
#define HASH_CR_LKEY BIT(16)
-#define HASH_CR_ALGO_SHA1 0x0
-#define HASH_CR_ALGO_MD5 0x80
-#define HASH_CR_ALGO_SHA224 0x40000
-#define HASH_CR_ALGO_SHA256 0x40080
-
-#define HASH_CR_UX500_EMPTYMSG BIT(20)
-#define HASH_CR_UX500_ALGO_SHA1 BIT(7)
-#define HASH_CR_UX500_ALGO_SHA256 0x0
-
/* Interrupt */
#define HASH_DINIE BIT(0)
#define HASH_DCIE BIT(1)
@@ -66,9 +57,6 @@
#define HASH_MASK_CALC_COMPLETION BIT(0)
#define HASH_MASK_DATA_INPUT BIT(1)
-/* Context swap register */
-#define HASH_CSR_REGISTER_NUMBER 54
-
/* Status Flags */
#define HASH_SR_DATA_INPUT_READY BIT(0)
#define HASH_SR_OUTPUT_READY BIT(1)
@@ -79,28 +67,39 @@
#define HASH_STR_NBLW_MASK GENMASK(4, 0)
#define HASH_STR_DCAL BIT(8)
+/* HWCFGR Register */
+#define HASH_HWCFG_DMA_MASK GENMASK(3, 0)
+
+/* Context swap register */
+#define HASH_CSR_NB_SHA256_HMAC 54
+#define HASH_CSR_NB_SHA256 38
+#define HASH_CSR_NB_SHA512_HMAC 103
+#define HASH_CSR_NB_SHA512 91
+#define HASH_CSR_NB_SHA3_HMAC 88
+#define HASH_CSR_NB_SHA3 72
+#define HASH_CSR_NB_MAX HASH_CSR_NB_SHA512_HMAC
+
#define HASH_FLAGS_INIT BIT(0)
#define HASH_FLAGS_OUTPUT_READY BIT(1)
#define HASH_FLAGS_CPU BIT(2)
-#define HASH_FLAGS_DMA_READY BIT(3)
-#define HASH_FLAGS_DMA_ACTIVE BIT(4)
-#define HASH_FLAGS_HMAC_INIT BIT(5)
-#define HASH_FLAGS_HMAC_FINAL BIT(6)
-#define HASH_FLAGS_HMAC_KEY BIT(7)
-
+#define HASH_FLAGS_DMA_ACTIVE BIT(3)
+#define HASH_FLAGS_HMAC_INIT BIT(4)
+#define HASH_FLAGS_HMAC_FINAL BIT(5)
+#define HASH_FLAGS_HMAC_KEY BIT(6)
+#define HASH_FLAGS_SHA3_MODE BIT(7)
#define HASH_FLAGS_FINAL BIT(15)
#define HASH_FLAGS_FINUP BIT(16)
-#define HASH_FLAGS_ALGO_MASK GENMASK(21, 18)
-#define HASH_FLAGS_MD5 BIT(18)
-#define HASH_FLAGS_SHA1 BIT(19)
-#define HASH_FLAGS_SHA224 BIT(20)
-#define HASH_FLAGS_SHA256 BIT(21)
+#define HASH_FLAGS_ALGO_MASK GENMASK(20, 17)
+#define HASH_FLAGS_ALGO_SHIFT 17
+#define HASH_FLAGS_ERRORS BIT(21)
#define HASH_FLAGS_EMPTY BIT(22)
#define HASH_FLAGS_HMAC BIT(23)
#define HASH_OP_UPDATE 1
#define HASH_OP_FINAL 2
+#define HASH_BURST_LEVEL 4
+
enum stm32_hash_data_format {
HASH_DATA_32_BITS = 0x0,
HASH_DATA_16_BITS = 0x1,
@@ -108,16 +107,30 @@ enum stm32_hash_data_format {
HASH_DATA_1_BIT = 0x3
};
-#define HASH_BUFLEN 256
-#define HASH_LONG_KEY 64
-#define HASH_MAX_KEY_SIZE (SHA256_BLOCK_SIZE * 8)
-#define HASH_QUEUE_LENGTH 16
-#define HASH_DMA_THRESHOLD 50
+#define HASH_BUFLEN (SHA3_224_BLOCK_SIZE + 4)
+#define HASH_MAX_KEY_SIZE (SHA512_BLOCK_SIZE * 8)
+
+enum stm32_hash_algo {
+ HASH_SHA1 = 0,
+ HASH_MD5 = 1,
+ HASH_SHA224 = 2,
+ HASH_SHA256 = 3,
+ HASH_SHA3_224 = 4,
+ HASH_SHA3_256 = 5,
+ HASH_SHA3_384 = 6,
+ HASH_SHA3_512 = 7,
+ HASH_SHA384 = 12,
+ HASH_SHA512 = 15,
+};
+
+enum ux500_hash_algo {
+ HASH_SHA256_UX500 = 0,
+ HASH_SHA1_UX500 = 1,
+};
#define HASH_AUTOSUSPEND_DELAY 50
struct stm32_hash_ctx {
- struct crypto_engine_ctx enginectx;
struct stm32_hash_dev *hdev;
struct crypto_shash *xtfm;
unsigned long flags;
@@ -130,19 +143,19 @@ struct stm32_hash_state {
u32 flags;
u16 bufcnt;
- u16 buflen;
+ u16 blocklen;
u8 buffer[HASH_BUFLEN] __aligned(4);
/* hash state */
- u32 hw_context[3 + HASH_CSR_REGISTER_NUMBER];
+ u32 hw_context[3 + HASH_CSR_NB_MAX];
};
struct stm32_hash_request_ctx {
struct stm32_hash_dev *hdev;
unsigned long op;
- u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
+ u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
size_t digcnt;
/* DMA */
@@ -161,17 +174,18 @@ struct stm32_hash_request_ctx {
};
struct stm32_hash_algs_info {
- struct ahash_alg *algs_list;
+ struct ahash_engine_alg *algs_list;
size_t size;
};
struct stm32_hash_pdata {
- struct stm32_hash_algs_info *algs_info;
- size_t algs_info_size;
- bool has_sr;
- bool has_mdmat;
- bool broken_emptymsg;
- bool ux500;
+ const int alg_shift;
+ const struct stm32_hash_algs_info *algs_info;
+ size_t algs_info_size;
+ bool has_sr;
+ bool has_mdmat;
+ bool broken_emptymsg;
+ bool ux500;
};
struct stm32_hash_dev {
@@ -182,7 +196,6 @@ struct stm32_hash_dev {
void __iomem *io_base;
phys_addr_t phys_base;
u32 dma_mode;
- u32 dma_maxburst;
bool polled;
struct ahash_request *req;
@@ -269,37 +282,25 @@ static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
return 0;
}
-static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
+static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct stm32_hash_state *state = &rctx->state;
+ u32 alg = (state->flags & HASH_FLAGS_ALGO_MASK) >> HASH_FLAGS_ALGO_SHIFT;
u32 reg = HASH_CR_INIT;
if (!(hdev->flags & HASH_FLAGS_INIT)) {
- switch (state->flags & HASH_FLAGS_ALGO_MASK) {
- case HASH_FLAGS_MD5:
- reg |= HASH_CR_ALGO_MD5;
- break;
- case HASH_FLAGS_SHA1:
- if (hdev->pdata->ux500)
- reg |= HASH_CR_UX500_ALGO_SHA1;
+ if (hdev->pdata->ux500) {
+ reg |= ((alg & BIT(0)) << HASH_CR_ALGO_POS);
+ } else {
+ if (hdev->pdata->alg_shift == HASH_CR_ALGO_POS)
+ reg |= ((alg & BIT(1)) << 17) |
+ ((alg & BIT(0)) << HASH_CR_ALGO_POS);
else
- reg |= HASH_CR_ALGO_SHA1;
- break;
- case HASH_FLAGS_SHA224:
- reg |= HASH_CR_ALGO_SHA224;
- break;
- case HASH_FLAGS_SHA256:
- if (hdev->pdata->ux500)
- reg |= HASH_CR_UX500_ALGO_SHA256;
- else
- reg |= HASH_CR_ALGO_SHA256;
- break;
- default:
- reg |= HASH_CR_ALGO_MD5;
+ reg |= alg << hdev->pdata->alg_shift;
}
reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
@@ -307,7 +308,7 @@ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
if (state->flags & HASH_FLAGS_HMAC) {
hdev->flags |= HASH_FLAGS_HMAC;
reg |= HASH_CR_MODE;
- if (ctx->keylen > HASH_LONG_KEY)
+ if (ctx->keylen > crypto_ahash_blocksize(tfm))
reg |= HASH_CR_LKEY;
}
@@ -318,6 +319,12 @@ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
hdev->flags |= HASH_FLAGS_INIT;
+ /*
+ * After first block + 1 words are fill up,
+ * we only need to fill 1 block to start partial computation
+ */
+ rctx->state.blocklen -= sizeof(u32);
+
dev_dbg(hdev->dev, "Write Control %x\n", reg);
}
}
@@ -327,9 +334,9 @@ static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
struct stm32_hash_state *state = &rctx->state;
size_t count;
- while ((state->bufcnt < state->buflen) && rctx->total) {
+ while ((state->bufcnt < state->blocklen) && rctx->total) {
count = min(rctx->sg->length - rctx->offset, rctx->total);
- count = min_t(size_t, count, state->buflen - state->bufcnt);
+ count = min_t(size_t, count, state->blocklen - state->bufcnt);
if (count <= 0) {
if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
@@ -384,7 +391,7 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
hdev->flags |= HASH_FLAGS_CPU;
- stm32_hash_write_ctrl(hdev, length);
+ stm32_hash_write_ctrl(hdev);
if (stm32_hash_wait_busy(hdev))
return -ETIMEDOUT;
@@ -419,20 +426,59 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
return 0;
}
+static int hash_swap_reg(struct stm32_hash_request_ctx *rctx)
+{
+ struct stm32_hash_state *state = &rctx->state;
+
+ switch ((state->flags & HASH_FLAGS_ALGO_MASK) >>
+ HASH_FLAGS_ALGO_SHIFT) {
+ case HASH_MD5:
+ case HASH_SHA1:
+ case HASH_SHA224:
+ case HASH_SHA256:
+ if (state->flags & HASH_FLAGS_HMAC)
+ return HASH_CSR_NB_SHA256_HMAC;
+ else
+ return HASH_CSR_NB_SHA256;
+ break;
+
+ case HASH_SHA384:
+ case HASH_SHA512:
+ if (state->flags & HASH_FLAGS_HMAC)
+ return HASH_CSR_NB_SHA512_HMAC;
+ else
+ return HASH_CSR_NB_SHA512;
+ break;
+
+ case HASH_SHA3_224:
+ case HASH_SHA3_256:
+ case HASH_SHA3_384:
+ case HASH_SHA3_512:
+ if (state->flags & HASH_FLAGS_HMAC)
+ return HASH_CSR_NB_SHA3_HMAC;
+ else
+ return HASH_CSR_NB_SHA3;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+}
+
static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
struct stm32_hash_state *state = &rctx->state;
u32 *preg = state->hw_context;
int bufcnt, err = 0, final;
- int i;
+ int i, swap_reg;
dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags);
final = state->flags & HASH_FLAGS_FINAL;
- while ((rctx->total >= state->buflen) ||
- (state->bufcnt + rctx->total >= state->buflen)) {
+ while ((rctx->total >= state->blocklen) ||
+ (state->bufcnt + rctx->total >= state->blocklen)) {
stm32_hash_append_sg(rctx);
bufcnt = state->bufcnt;
state->bufcnt = 0;
@@ -455,11 +501,13 @@ static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
if (stm32_hash_wait_busy(hdev))
return -ETIMEDOUT;
+ swap_reg = hash_swap_reg(rctx);
+
if (!hdev->pdata->ux500)
*preg++ = stm32_hash_read(hdev, HASH_IMR);
*preg++ = stm32_hash_read(hdev, HASH_STR);
*preg++ = stm32_hash_read(hdev, HASH_CR);
- for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+ for (i = 0; i < swap_reg; i++)
*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
state->flags |= HASH_FLAGS_INIT;
@@ -492,7 +540,7 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
reg = stm32_hash_read(hdev, HASH_CR);
- if (!hdev->pdata->has_mdmat) {
+ if (hdev->pdata->has_mdmat) {
if (mdma)
reg |= HASH_CR_MDMAT;
else
@@ -533,8 +581,6 @@ static void stm32_hash_dma_callback(void *param)
struct stm32_hash_dev *hdev = param;
complete(&hdev->dma_completion);
-
- hdev->flags |= HASH_FLAGS_DMA_READY;
}
static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
@@ -544,7 +590,7 @@ static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
int err;
- if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
+ if (ctx->keylen < rctx->state.blocklen || hdev->dma_mode == 1) {
err = stm32_hash_write_key(hdev);
if (stm32_hash_wait_busy(hdev))
return -ETIMEDOUT;
@@ -579,8 +625,8 @@ static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
dma_conf.direction = DMA_MEM_TO_DEV;
dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dma_conf.src_maxburst = hdev->dma_maxburst;
- dma_conf.dst_maxburst = hdev->dma_maxburst;
+ dma_conf.src_maxburst = HASH_BURST_LEVEL;
+ dma_conf.dst_maxburst = HASH_BURST_LEVEL;
dma_conf.device_fc = false;
chan = dma_request_chan(hdev->dev, "in");
@@ -607,18 +653,18 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
u32 *buffer = (void *)rctx->state.buffer;
struct scatterlist sg[1], *tsg;
- int err = 0, len = 0, reg, ncp = 0;
- unsigned int i;
+ int err = 0, reg, ncp = 0;
+ unsigned int i, len = 0, bufcnt = 0;
+ bool is_last = false;
rctx->sg = hdev->req->src;
rctx->total = hdev->req->nbytes;
rctx->nents = sg_nents(rctx->sg);
-
if (rctx->nents < 0)
return -EINVAL;
- stm32_hash_write_ctrl(hdev, rctx->total);
+ stm32_hash_write_ctrl(hdev);
if (hdev->flags & HASH_FLAGS_HMAC) {
err = stm32_hash_hmac_dma_send(hdev);
@@ -627,10 +673,12 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
}
for_each_sg(rctx->sg, tsg, rctx->nents, i) {
+ sg[0] = *tsg;
len = sg->length;
- sg[0] = *tsg;
- if (sg_is_last(sg)) {
+ if (sg_is_last(sg) || (bufcnt + sg[0].length) >= rctx->total) {
+ sg->length = rctx->total - bufcnt;
+ is_last = true;
if (hdev->dma_mode == 1) {
len = (ALIGN(sg->length, 16) - 16);
@@ -656,13 +704,15 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
return -ENOMEM;
}
- err = stm32_hash_xmit_dma(hdev, sg, len,
- !sg_is_last(sg));
+ err = stm32_hash_xmit_dma(hdev, sg, len, !is_last);
+ bufcnt += sg[0].length;
dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
if (err == -ENOMEM)
return err;
+ if (is_last)
+ break;
}
if (hdev->dma_mode == 1) {
@@ -718,11 +768,12 @@ static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
{
struct scatterlist *sg;
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
int i;
- if (req->nbytes <= HASH_DMA_THRESHOLD)
+ if (!hdev->dma_lch || req->nbytes <= rctx->state.blocklen)
return false;
if (sg_nents(req->src) > 1) {
@@ -748,31 +799,64 @@ static int stm32_hash_init(struct ahash_request *req)
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
struct stm32_hash_state *state = &rctx->state;
+ bool sha3_mode = ctx->flags & HASH_FLAGS_SHA3_MODE;
rctx->hdev = hdev;
state->flags = HASH_FLAGS_CPU;
+ if (sha3_mode)
+ state->flags |= HASH_FLAGS_SHA3_MODE;
+
rctx->digcnt = crypto_ahash_digestsize(tfm);
switch (rctx->digcnt) {
case MD5_DIGEST_SIZE:
- state->flags |= HASH_FLAGS_MD5;
+ state->flags |= HASH_MD5 << HASH_FLAGS_ALGO_SHIFT;
break;
case SHA1_DIGEST_SIZE:
- state->flags |= HASH_FLAGS_SHA1;
+ if (hdev->pdata->ux500)
+ state->flags |= HASH_SHA1_UX500 << HASH_FLAGS_ALGO_SHIFT;
+ else
+ state->flags |= HASH_SHA1 << HASH_FLAGS_ALGO_SHIFT;
break;
case SHA224_DIGEST_SIZE:
- state->flags |= HASH_FLAGS_SHA224;
+ if (sha3_mode)
+ state->flags |= HASH_SHA3_224 << HASH_FLAGS_ALGO_SHIFT;
+ else
+ state->flags |= HASH_SHA224 << HASH_FLAGS_ALGO_SHIFT;
break;
case SHA256_DIGEST_SIZE:
- state->flags |= HASH_FLAGS_SHA256;
+ if (sha3_mode) {
+ state->flags |= HASH_SHA3_256 << HASH_FLAGS_ALGO_SHIFT;
+ } else {
+ if (hdev->pdata->ux500)
+ state->flags |= HASH_SHA256_UX500 << HASH_FLAGS_ALGO_SHIFT;
+ else
+ state->flags |= HASH_SHA256 << HASH_FLAGS_ALGO_SHIFT;
+ }
+ break;
+ case SHA384_DIGEST_SIZE:
+ if (sha3_mode)
+ state->flags |= HASH_SHA3_384 << HASH_FLAGS_ALGO_SHIFT;
+ else
+ state->flags |= HASH_SHA384 << HASH_FLAGS_ALGO_SHIFT;
+ break;
+ case SHA512_DIGEST_SIZE:
+ if (sha3_mode)
+ state->flags |= HASH_SHA3_512 << HASH_FLAGS_ALGO_SHIFT;
+ else
+ state->flags |= HASH_SHA512 << HASH_FLAGS_ALGO_SHIFT;
break;
default:
return -EINVAL;
}
rctx->state.bufcnt = 0;
- rctx->state.buflen = HASH_BUFLEN;
+ rctx->state.blocklen = crypto_ahash_blocksize(tfm) + sizeof(u32);
+ if (rctx->state.blocklen > HASH_BUFLEN) {
+ dev_err(hdev->dev, "Error, block too large");
+ return -EINVAL;
+ }
rctx->total = 0;
rctx->offset = 0;
rctx->data_type = HASH_DATA_8_BITS;
@@ -842,6 +926,7 @@ static void stm32_hash_emptymsg_fallback(struct ahash_request *req)
static void stm32_hash_copy_hash(struct ahash_request *req)
{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
struct stm32_hash_state *state = &rctx->state;
struct stm32_hash_dev *hdev = rctx->hdev;
@@ -851,22 +936,7 @@ static void stm32_hash_copy_hash(struct ahash_request *req)
if (hdev->pdata->broken_emptymsg && (state->flags & HASH_FLAGS_EMPTY))
return stm32_hash_emptymsg_fallback(req);
- switch (state->flags & HASH_FLAGS_ALGO_MASK) {
- case HASH_FLAGS_MD5:
- hashsize = MD5_DIGEST_SIZE;
- break;
- case HASH_FLAGS_SHA1:
- hashsize = SHA1_DIGEST_SIZE;
- break;
- case HASH_FLAGS_SHA224:
- hashsize = SHA224_DIGEST_SIZE;
- break;
- case HASH_FLAGS_SHA256:
- hashsize = SHA256_DIGEST_SIZE;
- break;
- default:
- return;
- }
+ hashsize = crypto_ahash_digestsize(tfm);
for (i = 0; i < hashsize / sizeof(u32); i++) {
if (hdev->pdata->ux500)
@@ -881,6 +951,11 @@ static void stm32_hash_copy_hash(struct ahash_request *req)
static int stm32_hash_finish(struct ahash_request *req)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ u32 reg;
+
+ reg = stm32_hash_read(rctx->hdev, HASH_SR);
+ reg &= ~HASH_SR_OUTPUT_READY;
+ stm32_hash_write(rctx->hdev, HASH_SR, reg);
if (!req->result)
return -EINVAL;
@@ -920,6 +995,7 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
struct stm32_hash_state *state = &rctx->state;
+ int swap_reg;
int err = 0;
if (!hdev)
@@ -932,6 +1008,7 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
hdev->req = req;
hdev->flags = 0;
+ swap_reg = hash_swap_reg(rctx);
if (state->flags & HASH_FLAGS_INIT) {
u32 *preg = rctx->state.hw_context;
@@ -945,7 +1022,7 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
reg = *preg++ | HASH_CR_INIT;
stm32_hash_write(hdev, HASH_CR, reg);
- for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+ for (i = 0; i < swap_reg; i++)
stm32_hash_write(hdev, HASH_CSR(i), *preg++);
hdev->flags |= HASH_FLAGS_INIT;
@@ -1000,7 +1077,7 @@ static int stm32_hash_update(struct ahash_request *req)
rctx->sg = req->src;
rctx->offset = 0;
- if ((state->bufcnt + rctx->total < state->buflen)) {
+ if ((state->bufcnt + rctx->total < state->blocklen)) {
stm32_hash_append_sg(rctx);
return 0;
}
@@ -1102,8 +1179,7 @@ static int stm32_hash_init_fallback(struct crypto_tfm *tfm)
return 0;
}
-static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
- const char *algs_hmac_name)
+static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, u32 algs_flags)
{
struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1112,38 +1188,33 @@ static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
ctx->keylen = 0;
- if (algs_hmac_name)
- ctx->flags |= HASH_FLAGS_HMAC;
-
- ctx->enginectx.op.do_one_request = stm32_hash_one_request;
+ if (algs_flags)
+ ctx->flags |= algs_flags;
return stm32_hash_init_fallback(tfm);
}
static int stm32_hash_cra_init(struct crypto_tfm *tfm)
{
- return stm32_hash_cra_init_algs(tfm, NULL);
+ return stm32_hash_cra_init_algs(tfm, 0);
}
-static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
+static int stm32_hash_cra_hmac_init(struct crypto_tfm *tfm)
{
- return stm32_hash_cra_init_algs(tfm, "md5");
+ return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_HMAC);
}
-static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
+static int stm32_hash_cra_sha3_init(struct crypto_tfm *tfm)
{
- return stm32_hash_cra_init_algs(tfm, "sha1");
+ return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE);
}
-static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
+static int stm32_hash_cra_sha3_hmac_init(struct crypto_tfm *tfm)
{
- return stm32_hash_cra_init_algs(tfm, "sha224");
+ return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE |
+ HASH_FLAGS_HMAC);
}
-static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
-{
- return stm32_hash_cra_init_algs(tfm, "sha256");
-}
static void stm32_hash_cra_exit(struct crypto_tfm *tfm)
{
@@ -1162,11 +1233,9 @@ static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
goto finish;
}
- } else if (HASH_FLAGS_DMA_READY & hdev->flags) {
- if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
- hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
- goto finish;
- }
+ } else if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
+ hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
+ goto finish;
}
return IRQ_HANDLED;
@@ -1185,8 +1254,6 @@ static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
reg = stm32_hash_read(hdev, HASH_SR);
if (reg & HASH_SR_OUTPUT_READY) {
- reg &= ~HASH_SR_OUTPUT_READY;
- stm32_hash_write(hdev, HASH_SR, reg);
hdev->flags |= HASH_FLAGS_OUTPUT_READY;
/* Disable IT*/
stm32_hash_write(hdev, HASH_IMR, 0);
@@ -1196,16 +1263,16 @@ static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
}
-static struct ahash_alg algs_md5[] = {
+static struct ahash_engine_alg algs_md5[] = {
{
- .init = stm32_hash_init,
- .update = stm32_hash_update,
- .final = stm32_hash_final,
- .finup = stm32_hash_finup,
- .digest = stm32_hash_digest,
- .export = stm32_hash_export,
- .import = stm32_hash_import,
- .halg = {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = sizeof(struct stm32_hash_state),
.base = {
@@ -1221,18 +1288,21 @@ static struct ahash_alg algs_md5[] = {
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
},
{
- .init = stm32_hash_init,
- .update = stm32_hash_update,
- .final = stm32_hash_final,
- .finup = stm32_hash_finup,
- .digest = stm32_hash_digest,
- .export = stm32_hash_export,
- .import = stm32_hash_import,
- .setkey = stm32_hash_setkey,
- .halg = {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.setkey = stm32_hash_setkey,
+ .base.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = sizeof(struct stm32_hash_state),
.base = {
@@ -1244,24 +1314,27 @@ static struct ahash_alg algs_md5[] = {
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
- .cra_init = stm32_hash_cra_md5_init,
+ .cra_init = stm32_hash_cra_hmac_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
- }
- },
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ }
};
-static struct ahash_alg algs_sha1[] = {
+static struct ahash_engine_alg algs_sha1[] = {
{
- .init = stm32_hash_init,
- .update = stm32_hash_update,
- .final = stm32_hash_final,
- .finup = stm32_hash_finup,
- .digest = stm32_hash_digest,
- .export = stm32_hash_export,
- .import = stm32_hash_import,
- .halg = {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct stm32_hash_state),
.base = {
@@ -1277,18 +1350,21 @@ static struct ahash_alg algs_sha1[] = {
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
},
{
- .init = stm32_hash_init,
- .update = stm32_hash_update,
- .final = stm32_hash_final,
- .finup = stm32_hash_finup,
- .digest = stm32_hash_digest,
- .export = stm32_hash_export,
- .import = stm32_hash_import,
- .setkey = stm32_hash_setkey,
- .halg = {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.setkey = stm32_hash_setkey,
+ .base.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct stm32_hash_state),
.base = {
@@ -1300,24 +1376,27 @@ static struct ahash_alg algs_sha1[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
- .cra_init = stm32_hash_cra_sha1_init,
+ .cra_init = stm32_hash_cra_hmac_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
},
};
-static struct ahash_alg algs_sha224[] = {
+static struct ahash_engine_alg algs_sha224[] = {
{
- .init = stm32_hash_init,
- .update = stm32_hash_update,
- .final = stm32_hash_final,
- .finup = stm32_hash_finup,
- .digest = stm32_hash_digest,
- .export = stm32_hash_export,
- .import = stm32_hash_import,
- .halg = {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct stm32_hash_state),
.base = {
@@ -1333,18 +1412,21 @@ static struct ahash_alg algs_sha224[] = {
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
},
{
- .init = stm32_hash_init,
- .update = stm32_hash_update,
- .final = stm32_hash_final,
- .finup = stm32_hash_finup,
- .digest = stm32_hash_digest,
- .setkey = stm32_hash_setkey,
- .export = stm32_hash_export,
- .import = stm32_hash_import,
- .halg = {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.setkey = stm32_hash_setkey,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct stm32_hash_state),
.base = {
@@ -1356,24 +1438,27 @@ static struct ahash_alg algs_sha224[] = {
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
- .cra_init = stm32_hash_cra_sha224_init,
+ .cra_init = stm32_hash_cra_hmac_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
},
};
-static struct ahash_alg algs_sha256[] = {
+static struct ahash_engine_alg algs_sha256[] = {
{
- .init = stm32_hash_init,
- .update = stm32_hash_update,
- .final = stm32_hash_final,
- .finup = stm32_hash_finup,
- .digest = stm32_hash_digest,
- .export = stm32_hash_export,
- .import = stm32_hash_import,
- .halg = {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct stm32_hash_state),
.base = {
@@ -1389,18 +1474,21 @@ static struct ahash_alg algs_sha256[] = {
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
},
{
- .init = stm32_hash_init,
- .update = stm32_hash_update,
- .final = stm32_hash_final,
- .finup = stm32_hash_finup,
- .digest = stm32_hash_digest,
- .export = stm32_hash_export,
- .import = stm32_hash_import,
- .setkey = stm32_hash_setkey,
- .halg = {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.setkey = stm32_hash_setkey,
+ .base.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct stm32_hash_state),
.base = {
@@ -1412,14 +1500,377 @@ static struct ahash_alg algs_sha256[] = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
- .cra_init = stm32_hash_cra_sha256_init,
+ .cra_init = stm32_hash_cra_hmac_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
},
};
+static struct ahash_engine_alg algs_sha384_sha512[] = {
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "stm32-sha384",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.setkey = stm32_hash_setkey,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "stm32-hmac-sha384",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_hmac_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "stm32-sha512",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.setkey = stm32_hash_setkey,
+ .base.halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "stm32-hmac-sha512",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_hmac_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+};
+
+static struct ahash_engine_alg algs_sha3[] = {
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
+ .digestsize = SHA3_224_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "sha3-224",
+ .cra_driver_name = "stm32-sha3-224",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha3_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.setkey = stm32_hash_setkey,
+ .base.halg = {
+ .digestsize = SHA3_224_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "hmac(sha3-224)",
+ .cra_driver_name = "stm32-hmac-sha3-224",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha3_hmac_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
+ .digestsize = SHA3_256_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "sha3-256",
+ .cra_driver_name = "stm32-sha3-256",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha3_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.setkey = stm32_hash_setkey,
+ .base.halg = {
+ .digestsize = SHA3_256_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "hmac(sha3-256)",
+ .cra_driver_name = "stm32-hmac-sha3-256",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha3_hmac_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "sha3-384",
+ .cra_driver_name = "stm32-sha3-384",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha3_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.setkey = stm32_hash_setkey,
+ .base.halg = {
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "hmac(sha3-384)",
+ .cra_driver_name = "stm32-hmac-sha3-384",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha3_hmac_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
+ .digestsize = SHA3_512_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "sha3-512",
+ .cra_driver_name = "stm32-sha3-512",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha3_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.setkey = stm32_hash_setkey,
+ .base.halg = {
+ .digestsize = SHA3_512_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "hmac(sha3-512)",
+ .cra_driver_name = "stm32-hmac-sha3-512",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha3_hmac_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ }
+};
+
static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
{
unsigned int i, j;
@@ -1427,7 +1878,7 @@ static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
for (i = 0; i < hdev->pdata->algs_info_size; i++) {
for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
- err = crypto_register_ahash(
+ err = crypto_engine_register_ahash(
&hdev->pdata->algs_info[i].algs_list[j]);
if (err)
goto err_algs;
@@ -1439,7 +1890,7 @@ err_algs:
dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
for (; i--; ) {
for (; j--;)
- crypto_unregister_ahash(
+ crypto_engine_unregister_ahash(
&hdev->pdata->algs_info[i].algs_list[j]);
}
@@ -1452,7 +1903,7 @@ static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
for (i = 0; i < hdev->pdata->algs_info_size; i++) {
for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
- crypto_unregister_ahash(
+ crypto_engine_unregister_ahash(
&hdev->pdata->algs_info[i].algs_list[j]);
}
@@ -1471,6 +1922,7 @@ static struct stm32_hash_algs_info stm32_hash_algs_info_ux500[] = {
};
static const struct stm32_hash_pdata stm32_hash_pdata_ux500 = {
+ .alg_shift = 7,
.algs_info = stm32_hash_algs_info_ux500,
.algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_ux500),
.broken_emptymsg = true,
@@ -1489,6 +1941,7 @@ static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
};
static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
+ .alg_shift = 7,
.algs_info = stm32_hash_algs_info_stm32f4,
.algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
.has_sr = true,
@@ -1515,25 +1968,49 @@ static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
};
static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
+ .alg_shift = 7,
.algs_info = stm32_hash_algs_info_stm32f7,
.algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
.has_sr = true,
.has_mdmat = true,
};
-static const struct of_device_id stm32_hash_of_match[] = {
+static struct stm32_hash_algs_info stm32_hash_algs_info_stm32mp13[] = {
{
- .compatible = "stericsson,ux500-hash",
- .data = &stm32_hash_pdata_ux500,
+ .algs_list = algs_sha1,
+ .size = ARRAY_SIZE(algs_sha1),
+ },
+ {
+ .algs_list = algs_sha224,
+ .size = ARRAY_SIZE(algs_sha224),
+ },
+ {
+ .algs_list = algs_sha256,
+ .size = ARRAY_SIZE(algs_sha256),
},
{
- .compatible = "st,stm32f456-hash",
- .data = &stm32_hash_pdata_stm32f4,
+ .algs_list = algs_sha384_sha512,
+ .size = ARRAY_SIZE(algs_sha384_sha512),
},
{
- .compatible = "st,stm32f756-hash",
- .data = &stm32_hash_pdata_stm32f7,
+ .algs_list = algs_sha3,
+ .size = ARRAY_SIZE(algs_sha3),
},
+};
+
+static const struct stm32_hash_pdata stm32_hash_pdata_stm32mp13 = {
+ .alg_shift = 17,
+ .algs_info = stm32_hash_algs_info_stm32mp13,
+ .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32mp13),
+ .has_sr = true,
+ .has_mdmat = true,
+};
+
+static const struct of_device_id stm32_hash_of_match[] = {
+ { .compatible = "stericsson,ux500-hash", .data = &stm32_hash_pdata_ux500 },
+ { .compatible = "st,stm32f456-hash", .data = &stm32_hash_pdata_stm32f4 },
+ { .compatible = "st,stm32f756-hash", .data = &stm32_hash_pdata_stm32f7 },
+ { .compatible = "st,stm32mp13-hash", .data = &stm32_hash_pdata_stm32mp13 },
{},
};
@@ -1548,12 +2025,6 @@ static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
return -EINVAL;
}
- if (of_property_read_u32(dev->of_node, "dma-maxburst",
- &hdev->dma_maxburst)) {
- dev_info(dev, "dma-maxburst not specified, using 0\n");
- hdev->dma_maxburst = 0;
- }
-
return 0;
}
@@ -1663,7 +2134,7 @@ static int stm32_hash_probe(struct platform_device *pdev)
/* FIXME: implement DMA mode for Ux500 */
hdev->dma_mode = 0;
else
- hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
+ hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR) & HASH_HWCFG_DMA_MASK;
/* Register algos */
ret = stm32_hash_register_algs(hdev);
@@ -1696,18 +2167,12 @@ err_reset:
return ret;
}
-static int stm32_hash_remove(struct platform_device *pdev)
+static void stm32_hash_remove(struct platform_device *pdev)
{
- struct stm32_hash_dev *hdev;
+ struct stm32_hash_dev *hdev = platform_get_drvdata(pdev);
int ret;
- hdev = platform_get_drvdata(pdev);
- if (!hdev)
- return -ENODEV;
-
- ret = pm_runtime_resume_and_get(hdev->dev);
- if (ret < 0)
- return ret;
+ ret = pm_runtime_get_sync(hdev->dev);
stm32_hash_unregister_algs(hdev);
@@ -1723,9 +2188,8 @@ static int stm32_hash_remove(struct platform_device *pdev)
pm_runtime_disable(hdev->dev);
pm_runtime_put_noidle(hdev->dev);
- clk_disable_unprepare(hdev->clk);
-
- return 0;
+ if (ret >= 0)
+ clk_disable_unprepare(hdev->clk);
}
#ifdef CONFIG_PM
@@ -1762,7 +2226,7 @@ static const struct dev_pm_ops stm32_hash_pm_ops = {
static struct platform_driver stm32_hash_driver = {
.probe = stm32_hash_probe,
- .remove = stm32_hash_remove,
+ .remove_new = stm32_hash_remove,
.driver = {
.name = "stm32-hash",
.pm = &stm32_hash_pm_ops,
@@ -1772,6 +2236,6 @@ static struct platform_driver stm32_hash_driver = {
module_platform_driver(stm32_hash_driver);
-MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
+MODULE_DESCRIPTION("STM32 SHA1/SHA2/SHA3 & MD5 (HMAC) hw accelerator driver");
MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index bb27f011cf31..4ca4fbd227bc 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -19,9 +19,9 @@
#include <linux/interrupt.h>
#include <linux/crypto.h>
#include <linux/hw_random.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/spinlock.h>
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
index 6963344f6a3a..2621ff8a9376 100644
--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
@@ -7,15 +7,16 @@
* Copyright 2022 Bytedance CO., LTD.
*/
-#include <linux/mpi.h>
-#include <linux/scatterlist.h>
-#include <crypto/algapi.h>
+#include <crypto/engine.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/rsa.h>
-#include <linux/err.h>
#include <crypto/scatterwalk.h>
-#include <linux/atomic.h>
-
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/mpi.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include <uapi/linux/virtio_crypto.h>
#include "virtio_crypto_common.h"
@@ -24,7 +25,6 @@ struct virtio_crypto_rsa_ctx {
};
struct virtio_crypto_akcipher_ctx {
- struct crypto_engine_ctx enginectx;
struct virtio_crypto *vcrypto;
struct crypto_akcipher *tfm;
bool session_valid;
@@ -47,7 +47,7 @@ struct virtio_crypto_akcipher_algo {
uint32_t algonum;
uint32_t service;
unsigned int active_devs;
- struct akcipher_alg algo;
+ struct akcipher_engine_alg algo;
};
static DEFINE_MUTEX(algs_lock);
@@ -475,9 +475,6 @@ static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm)
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
ctx->tfm = tfm;
- ctx->enginectx.op.do_one_request = virtio_crypto_rsa_do_req;
- ctx->enginectx.op.prepare_request = NULL;
- ctx->enginectx.op.unprepare_request = NULL;
akcipher_set_reqsize(tfm,
sizeof(struct virtio_crypto_akcipher_request));
@@ -500,7 +497,7 @@ static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
{
.algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
.service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
- .algo = {
+ .algo.base = {
.encrypt = virtio_crypto_rsa_encrypt,
.decrypt = virtio_crypto_rsa_decrypt,
.set_pub_key = virtio_crypto_rsa_raw_set_pub_key,
@@ -516,11 +513,14 @@ static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
.cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
},
},
+ .algo.op = {
+ .do_one_request = virtio_crypto_rsa_do_req,
+ },
},
{
.algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
.service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
- .algo = {
+ .algo.base = {
.encrypt = virtio_crypto_rsa_encrypt,
.decrypt = virtio_crypto_rsa_decrypt,
.sign = virtio_crypto_rsa_sign,
@@ -538,6 +538,9 @@ static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
.cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
},
},
+ .algo.op = {
+ .do_one_request = virtio_crypto_rsa_do_req,
+ },
},
};
@@ -556,14 +559,14 @@ int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto)
continue;
if (virtio_crypto_akcipher_algs[i].active_devs == 0) {
- ret = crypto_register_akcipher(&virtio_crypto_akcipher_algs[i].algo);
+ ret = crypto_engine_register_akcipher(&virtio_crypto_akcipher_algs[i].algo);
if (ret)
goto unlock;
}
virtio_crypto_akcipher_algs[i].active_devs++;
dev_info(&vcrypto->vdev->dev, "Registered akcipher algo %s\n",
- virtio_crypto_akcipher_algs[i].algo.base.cra_name);
+ virtio_crypto_akcipher_algs[i].algo.base.base.cra_name);
}
unlock:
@@ -586,7 +589,7 @@ void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto)
continue;
if (virtio_crypto_akcipher_algs[i].active_devs == 1)
- crypto_unregister_akcipher(&virtio_crypto_akcipher_algs[i].algo);
+ crypto_engine_unregister_akcipher(&virtio_crypto_akcipher_algs[i].algo);
virtio_crypto_akcipher_algs[i].active_devs--;
}
diff --git a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
index e5876286828b..23c41d87d835 100644
--- a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
@@ -6,19 +6,16 @@
* Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
*/
-#include <linux/scatterlist.h>
-#include <crypto/algapi.h>
+#include <crypto/engine.h>
#include <crypto/internal/skcipher.h>
-#include <linux/err.h>
#include <crypto/scatterwalk.h>
-#include <linux/atomic.h>
-
+#include <linux/err.h>
+#include <linux/scatterlist.h>
#include <uapi/linux/virtio_crypto.h>
#include "virtio_crypto_common.h"
struct virtio_crypto_skcipher_ctx {
- struct crypto_engine_ctx enginectx;
struct virtio_crypto *vcrypto;
struct crypto_skcipher *tfm;
@@ -42,7 +39,7 @@ struct virtio_crypto_algo {
uint32_t algonum;
uint32_t service;
unsigned int active_devs;
- struct skcipher_alg algo;
+ struct skcipher_engine_alg algo;
};
/*
@@ -523,9 +520,6 @@ static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
ctx->tfm = tfm;
- ctx->enginectx.op.do_one_request = virtio_crypto_skcipher_crypt_req;
- ctx->enginectx.op.prepare_request = NULL;
- ctx->enginectx.op.unprepare_request = NULL;
return 0;
}
@@ -580,7 +574,7 @@ static void virtio_crypto_skcipher_finalize_req(
static struct virtio_crypto_algo virtio_crypto_algs[] = { {
.algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
.service = VIRTIO_CRYPTO_SERVICE_CIPHER,
- .algo = {
+ .algo.base = {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "virtio_crypto_aes_cbc",
.base.cra_priority = 150,
@@ -598,6 +592,9 @@ static struct virtio_crypto_algo virtio_crypto_algs[] = { {
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
+ .algo.op = {
+ .do_one_request = virtio_crypto_skcipher_crypt_req,
+ },
} };
int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto)
@@ -616,14 +613,14 @@ int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto)
continue;
if (virtio_crypto_algs[i].active_devs == 0) {
- ret = crypto_register_skcipher(&virtio_crypto_algs[i].algo);
+ ret = crypto_engine_register_skcipher(&virtio_crypto_algs[i].algo);
if (ret)
goto unlock;
}
virtio_crypto_algs[i].active_devs++;
dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
- virtio_crypto_algs[i].algo.base.cra_name);
+ virtio_crypto_algs[i].algo.base.base.cra_name);
}
unlock:
@@ -647,7 +644,7 @@ void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto)
continue;
if (virtio_crypto_algs[i].active_devs == 1)
- crypto_unregister_skcipher(&virtio_crypto_algs[i].algo);
+ crypto_engine_unregister_skcipher(&virtio_crypto_algs[i].algo);
virtio_crypto_algs[i].active_devs--;
}
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
index bf1f421e05f2..ce335578b759 100644
--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -9,13 +9,14 @@
#include <crypto/gcm.h>
#include <crypto/internal/aead.h>
#include <crypto/scatterwalk.h>
-
#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
-
-#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/string.h>
#define ZYNQMP_DMA_BIT_MASK 32U
@@ -43,7 +44,7 @@ enum zynqmp_aead_keysrc {
struct zynqmp_aead_drv_ctx {
union {
- struct aead_alg aead;
+ struct aead_engine_alg aead;
} alg;
struct device *dev;
struct crypto_engine *engine;
@@ -60,7 +61,6 @@ struct zynqmp_aead_hw_req {
};
struct zynqmp_aead_tfm_ctx {
- struct crypto_engine_ctx engine_ctx;
struct device *dev;
u8 key[ZYNQMP_AES_KEY_SIZE];
u8 *iv;
@@ -286,7 +286,7 @@ static int zynqmp_aes_aead_encrypt(struct aead_request *req)
struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
rq_ctx->op = ZYNQMP_AES_ENCRYPT;
- drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
+ drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead.base);
return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req);
}
@@ -299,7 +299,7 @@ static int zynqmp_aes_aead_decrypt(struct aead_request *req)
struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
rq_ctx->op = ZYNQMP_AES_DECRYPT;
- drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
+ drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead.base);
return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req);
}
@@ -312,20 +312,16 @@ static int zynqmp_aes_aead_init(struct crypto_aead *aead)
struct zynqmp_aead_drv_ctx *drv_ctx;
struct aead_alg *alg = crypto_aead_alg(aead);
- drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
+ drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead.base);
tfm_ctx->dev = drv_ctx->dev;
- tfm_ctx->engine_ctx.op.do_one_request = zynqmp_handle_aes_req;
- tfm_ctx->engine_ctx.op.prepare_request = NULL;
- tfm_ctx->engine_ctx.op.unprepare_request = NULL;
-
- tfm_ctx->fbk_cipher = crypto_alloc_aead(drv_ctx->alg.aead.base.cra_name,
+ tfm_ctx->fbk_cipher = crypto_alloc_aead(drv_ctx->alg.aead.base.base.cra_name,
0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(tfm_ctx->fbk_cipher)) {
pr_err("%s() Error: failed to allocate fallback for %s\n",
- __func__, drv_ctx->alg.aead.base.cra_name);
+ __func__, drv_ctx->alg.aead.base.base.cra_name);
return PTR_ERR(tfm_ctx->fbk_cipher);
}
@@ -350,7 +346,7 @@ static void zynqmp_aes_aead_exit(struct crypto_aead *aead)
}
static struct zynqmp_aead_drv_ctx aes_drv_ctx = {
- .alg.aead = {
+ .alg.aead.base = {
.setkey = zynqmp_aes_aead_setkey,
.setauthsize = zynqmp_aes_aead_setauthsize,
.encrypt = zynqmp_aes_aead_encrypt,
@@ -372,7 +368,10 @@ static struct zynqmp_aead_drv_ctx aes_drv_ctx = {
.cra_ctxsize = sizeof(struct zynqmp_aead_tfm_ctx),
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.aead.op = {
+ .do_one_request = zynqmp_handle_aes_req,
+ },
};
static int zynqmp_aes_aead_probe(struct platform_device *pdev)
@@ -405,7 +404,7 @@ static int zynqmp_aes_aead_probe(struct platform_device *pdev)
goto err_engine;
}
- err = crypto_register_aead(&aes_drv_ctx.alg.aead);
+ err = crypto_engine_register_aead(&aes_drv_ctx.alg.aead);
if (err < 0) {
dev_err(dev, "Failed to register AEAD alg.\n");
goto err_aead;
@@ -413,7 +412,7 @@ static int zynqmp_aes_aead_probe(struct platform_device *pdev)
return 0;
err_aead:
- crypto_unregister_aead(&aes_drv_ctx.alg.aead);
+ crypto_engine_unregister_aead(&aes_drv_ctx.alg.aead);
err_engine:
if (aes_drv_ctx.engine)
@@ -425,7 +424,7 @@ err_engine:
static int zynqmp_aes_aead_remove(struct platform_device *pdev)
{
crypto_engine_exit(aes_drv_ctx.engine);
- crypto_unregister_aead(&aes_drv_ctx.alg.aead);
+ crypto_engine_unregister_aead(&aes_drv_ctx.alg.aead);
return 0;
}
diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
index 43ff170ff1c2..426bf1a72ba6 100644
--- a/drivers/crypto/xilinx/zynqmp-sha.c
+++ b/drivers/crypto/xilinx/zynqmp-sha.c
@@ -15,7 +15,6 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#define ZYNQMP_DMA_BIT_MASK 32U