summaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/common/sa1111.c6
-rw-r--r--arch/arm/configs/exynos_defconfig3
-rw-r--r--arch/arm/configs/milbeaut_m10v_defconfig4
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/configs/pxa_defconfig4
-rw-r--r--arch/arm/configs/spitz_defconfig2
-rw-r--r--arch/arm/crypto/Kconfig59
-rw-r--r--arch/arm/crypto/Makefile20
-rw-r--r--arch/arm/crypto/aes-ce-glue.c104
-rw-r--r--arch/arm/crypto/aes-neonbs-glue.c118
-rw-r--r--arch/arm/crypto/blake2b-neon-glue.c21
-rw-r--r--arch/arm/crypto/chacha-glue.c352
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c104
-rw-r--r--arch/arm/crypto/poly1305-glue.c274
-rw-r--r--arch/arm/crypto/sha1-ce-glue.c36
-rw-r--r--arch/arm/crypto/sha1.h14
-rw-r--r--arch/arm/crypto/sha1_glue.c33
-rw-r--r--arch/arm/crypto/sha1_neon_glue.c39
-rw-r--r--arch/arm/crypto/sha2-ce-glue.c109
-rw-r--r--arch/arm/crypto/sha256_glue.c117
-rw-r--r--arch/arm/crypto/sha256_glue.h15
-rw-r--r--arch/arm/crypto/sha256_neon_glue.c92
-rw-r--r--arch/arm/crypto/sha512-glue.c36
-rw-r--r--arch/arm/crypto/sha512-neon-glue.c43
-rw-r--r--arch/arm/crypto/sha512.h6
-rw-r--r--arch/arm/include/asm/simd.h8
-rw-r--r--arch/arm/lib/Makefile6
-rw-r--r--arch/arm/lib/crc-t10dif.c (renamed from arch/arm/lib/crc-t10dif-glue.c)6
-rw-r--r--arch/arm/lib/crc32.c (renamed from arch/arm/lib/crc32-glue.c)6
-rw-r--r--arch/arm/lib/crypto/.gitignore3
-rw-r--r--arch/arm/lib/crypto/Kconfig31
-rw-r--r--arch/arm/lib/crypto/Makefile32
-rw-r--r--arch/arm/lib/crypto/blake2s-core.S (renamed from arch/arm/crypto/blake2s-core.S)0
-rw-r--r--arch/arm/lib/crypto/blake2s-glue.c (renamed from arch/arm/crypto/blake2s-glue.c)0
-rw-r--r--arch/arm/lib/crypto/chacha-glue.c138
-rw-r--r--arch/arm/lib/crypto/chacha-neon-core.S (renamed from arch/arm/crypto/chacha-neon-core.S)2
-rw-r--r--arch/arm/lib/crypto/chacha-scalar-core.S (renamed from arch/arm/crypto/chacha-scalar-core.S)5
-rw-r--r--arch/arm/lib/crypto/poly1305-armv4.pl (renamed from arch/arm/crypto/poly1305-armv4.pl)4
-rw-r--r--arch/arm/lib/crypto/poly1305-glue.c80
-rw-r--r--arch/arm/lib/crypto/sha256-armv4.pl (renamed from arch/arm/crypto/sha256-armv4.pl)20
-rw-r--r--arch/arm/lib/crypto/sha256-ce.S (renamed from arch/arm/crypto/sha2-ce-core.S)10
-rw-r--r--arch/arm/lib/crypto/sha256.c64
-rw-r--r--arch/arm/mach-exynos/suspend.c5
-rw-r--r--arch/arm/mach-imx/avic.c4
-rw-r--r--arch/arm/mach-imx/gpc.c5
-rw-r--r--arch/arm/mach-imx/tzic.c4
-rw-r--r--arch/arm/mach-omap1/irq.c3
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c5
-rw-r--r--arch/arm/mach-pxa/irq.c5
-rw-r--r--arch/arm/plat-orion/gpio.c18
51 files changed, 551 insertions, 1526 deletions
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index 9846f30990f7..02eda44a6faa 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -416,9 +416,9 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
writel_relaxed(~0, irqbase + SA1111_INTSTATCLR0);
writel_relaxed(~0, irqbase + SA1111_INTSTATCLR1);
- sachip->irqdomain = irq_domain_add_linear(NULL, SA1111_IRQ_NR,
- &sa1111_irqdomain_ops,
- sachip);
+ sachip->irqdomain = irq_domain_create_linear(NULL, SA1111_IRQ_NR,
+ &sa1111_irqdomain_ops,
+ sachip);
if (!sachip->irqdomain) {
irq_free_descs(sachip->irq_base, SA1111_IRQ_NR);
return -ENOMEM;
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index e81a5d6c1c20..e81964cce516 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -349,7 +349,7 @@ CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
CONFIG_CRYPTO_USER=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_XTS=m
@@ -364,7 +364,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_SHA1_ARM_NEON=m
-CONFIG_CRYPTO_SHA256_ARM=m
CONFIG_CRYPTO_SHA512_ARM=m
CONFIG_CRYPTO_AES_ARM_BS=m
CONFIG_CRYPTO_CHACHA20_NEON=m
diff --git a/arch/arm/configs/milbeaut_m10v_defconfig b/arch/arm/configs/milbeaut_m10v_defconfig
index 275ddf7a3a14..242e7d5a3f68 100644
--- a/arch/arm/configs/milbeaut_m10v_defconfig
+++ b/arch/arm/configs/milbeaut_m10v_defconfig
@@ -93,15 +93,13 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
CONFIG_KEYS=y
-CONFIG_CRYPTO_MANAGER=y
-# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_SELFTESTS=y
# CONFIG_CRYPTO_ECHAINIV is not set
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_SEQIV=m
CONFIG_CRYPTO_GHASH_ARM_CE=m
CONFIG_CRYPTO_SHA1_ARM_NEON=m
CONFIG_CRYPTO_SHA1_ARM_CE=m
-CONFIG_CRYPTO_SHA2_ARM_CE=m
CONFIG_CRYPTO_SHA512_ARM=m
CONFIG_CRYPTO_AES_ARM=m
CONFIG_CRYPTO_AES_ARM_BS=m
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index ad037c175fdb..96178acedad0 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -1301,7 +1301,6 @@ CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_GHASH_ARM_CE=m
CONFIG_CRYPTO_SHA1_ARM_NEON=m
CONFIG_CRYPTO_SHA1_ARM_CE=m
-CONFIG_CRYPTO_SHA2_ARM_CE=m
CONFIG_CRYPTO_SHA512_ARM=m
CONFIG_CRYPTO_AES_ARM=m
CONFIG_CRYPTO_AES_ARM_BS=m
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 75b326bc7830..317f977e509e 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -697,7 +697,6 @@ CONFIG_SECURITY=y
CONFIG_CRYPTO_MICHAEL_MIC=y
CONFIG_CRYPTO_GHASH_ARM_CE=m
CONFIG_CRYPTO_SHA1_ARM_NEON=m
-CONFIG_CRYPTO_SHA256_ARM=m
CONFIG_CRYPTO_SHA512_ARM=m
CONFIG_CRYPTO_AES_ARM=m
CONFIG_CRYPTO_AES_ARM_BS=m
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 24fca8608554..ded4b9a5accf 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -636,10 +636,9 @@ CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_UTF8=m
CONFIG_TIMER_STATS=y
CONFIG_SECURITY=y
-CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_AUTHENC=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
@@ -660,7 +659,6 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
CONFIG_CRYPTO_SHA1_ARM=m
-CONFIG_CRYPTO_SHA256_ARM=m
CONFIG_CRYPTO_SHA512_ARM=m
CONFIG_CRYPTO_AES_ARM=m
CONFIG_FONTS=y
diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig
index ffec59e3f49c..ac2a0f998c73 100644
--- a/arch/arm/configs/spitz_defconfig
+++ b/arch/arm/configs/spitz_defconfig
@@ -215,7 +215,7 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
CONFIG_DEBUG_KERNEL=y
CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_BENCHMARK=m
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_AES=m
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index 23e4ea067ddb..7efb9a8596e4 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -46,30 +46,6 @@ config CRYPTO_NHPOLY1305_NEON
Architecture: arm using:
- NEON (Advanced SIMD) extensions
-config CRYPTO_POLY1305_ARM
- tristate
- select CRYPTO_HASH
- select CRYPTO_ARCH_HAVE_LIB_POLY1305
- default CRYPTO_LIB_POLY1305_INTERNAL
- help
- Poly1305 authenticator algorithm (RFC7539)
-
- Architecture: arm optionally using
- - NEON (Advanced SIMD) extensions
-
-config CRYPTO_BLAKE2S_ARM
- bool "Hash functions: BLAKE2s"
- select CRYPTO_ARCH_HAVE_LIB_BLAKE2S
- help
- BLAKE2s cryptographic hash function (RFC 7693)
-
- Architecture: arm
-
- This is faster than the generic implementations of BLAKE2s and
- BLAKE2b, but slower than the NEON implementation of BLAKE2b.
- There is no NEON implementation of BLAKE2s, since NEON doesn't
- really help with it.
-
config CRYPTO_BLAKE2B_NEON
tristate "Hash functions: BLAKE2b (NEON)"
depends on KERNEL_MODE_NEON
@@ -117,27 +93,6 @@ config CRYPTO_SHA1_ARM_CE
Architecture: arm using ARMv8 Crypto Extensions
-config CRYPTO_SHA2_ARM_CE
- tristate "Hash functions: SHA-224 and SHA-256 (ARMv8 Crypto Extensions)"
- depends on KERNEL_MODE_NEON
- select CRYPTO_SHA256_ARM
- select CRYPTO_HASH
- help
- SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
-
- Architecture: arm using
- - ARMv8 Crypto Extensions
-
-config CRYPTO_SHA256_ARM
- tristate "Hash functions: SHA-224 and SHA-256 (NEON)"
- select CRYPTO_HASH
- depends on !CPU_V7M
- help
- SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
-
- Architecture: arm using
- - NEON (Advanced SIMD) extensions
-
config CRYPTO_SHA512_ARM
tristate "Hash functions: SHA-384 and SHA-512 (NEON)"
select CRYPTO_HASH
@@ -172,7 +127,6 @@ config CRYPTO_AES_ARM_BS
select CRYPTO_AES_ARM
select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
- select CRYPTO_SIMD
help
Length-preserving ciphers: AES cipher algorithms (FIPS-197)
with block cipher modes:
@@ -200,7 +154,6 @@ config CRYPTO_AES_ARM_CE
depends on KERNEL_MODE_NEON
select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
- select CRYPTO_SIMD
help
Length-preserving ciphers: AES cipher algorithms (FIPS-197)
with block cipher modes:
@@ -214,17 +167,5 @@ config CRYPTO_AES_ARM_CE
Architecture: arm using:
- ARMv8 Crypto Extensions
-config CRYPTO_CHACHA20_NEON
- tristate
- select CRYPTO_SKCIPHER
- select CRYPTO_ARCH_HAVE_LIB_CHACHA
- default CRYPTO_LIB_CHACHA_INTERNAL
- help
- Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
- stream cipher algorithms
-
- Architecture: arm using:
- - NEON (Advanced SIMD) extensions
-
endmenu
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index 3d0e23ff9e74..8479137c6e80 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -7,37 +7,25 @@ obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o
obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
-obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o
-obj-$(CONFIG_CRYPTO_BLAKE2S_ARM) += libblake2s-arm.o
obj-$(CONFIG_CRYPTO_BLAKE2B_NEON) += blake2b-neon.o
-obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o
-obj-$(CONFIG_CRYPTO_POLY1305_ARM) += poly1305-arm.o
obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o
obj-$(CONFIG_CRYPTO_CURVE25519_NEON) += curve25519-neon.o
obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
-obj-$(CONFIG_CRYPTO_SHA2_ARM_CE) += sha2-arm-ce.o
obj-$(CONFIG_CRYPTO_GHASH_ARM_CE) += ghash-arm-ce.o
aes-arm-y := aes-cipher-core.o aes-cipher-glue.o
aes-arm-bs-y := aes-neonbs-core.o aes-neonbs-glue.o
sha1-arm-y := sha1-armv4-large.o sha1_glue.o
sha1-arm-neon-y := sha1-armv7-neon.o sha1_neon_glue.o
-sha256-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha256_neon_glue.o
-sha256-arm-y := sha256-core.o sha256_glue.o $(sha256-arm-neon-y)
sha512-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha512-neon-glue.o
sha512-arm-y := sha512-core.o sha512-glue.o $(sha512-arm-neon-y)
-libblake2s-arm-y:= blake2s-core.o blake2s-glue.o
blake2b-neon-y := blake2b-neon-core.o blake2b-neon-glue.o
sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o
-sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o
aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o
ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
-chacha-neon-y := chacha-scalar-core.o chacha-glue.o
-chacha-neon-$(CONFIG_KERNEL_MODE_NEON) += chacha-neon-core.o
-poly1305-arm-y := poly1305-core.o poly1305-glue.o
nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o
curve25519-neon-y := curve25519-core.o curve25519-glue.o
@@ -47,14 +35,8 @@ quiet_cmd_perl = PERL $@
$(obj)/%-core.S: $(src)/%-armv4.pl
$(call cmd,perl)
-clean-files += poly1305-core.S sha256-core.S sha512-core.S
+clean-files += sha512-core.S
aflags-thumb2-$(CONFIG_THUMB2_KERNEL) := -U__thumb2__ -D__thumb2__=1
-AFLAGS_sha256-core.o += $(aflags-thumb2-y)
AFLAGS_sha512-core.o += $(aflags-thumb2-y)
-
-# massage the perlasm code a bit so we only get the NEON routine if we need it
-poly1305-aflags-$(CONFIG_CPU_V7) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=5
-poly1305-aflags-$(CONFIG_KERNEL_MODE_NEON) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=7
-AFLAGS_poly1305-core.o += $(poly1305-aflags-y) $(aflags-thumb2-y)
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
index 1cf61f51e766..00591895d540 100644
--- a/arch/arm/crypto/aes-ce-glue.c
+++ b/arch/arm/crypto/aes-ce-glue.c
@@ -10,8 +10,6 @@
#include <asm/simd.h>
#include <linux/unaligned.h>
#include <crypto/aes.h>
-#include <crypto/ctr.h>
-#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/cpufeature.h>
@@ -418,29 +416,6 @@ static int ctr_encrypt(struct skcipher_request *req)
return err;
}
-static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
-{
- struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- unsigned long flags;
-
- /*
- * Temporarily disable interrupts to avoid races where
- * cachelines are evicted when the CPU is interrupted
- * to do something else.
- */
- local_irq_save(flags);
- aes_encrypt(ctx, dst, src);
- local_irq_restore(flags);
-}
-
-static int ctr_encrypt_sync(struct skcipher_request *req)
-{
- if (!crypto_simd_usable())
- return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
-
- return ctr_encrypt(req);
-}
-
static int xts_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -586,10 +561,9 @@ static int xts_decrypt(struct skcipher_request *req)
}
static struct skcipher_alg aes_algs[] = { {
- .base.cra_name = "__ecb(aes)",
- .base.cra_driver_name = "__ecb-aes-ce",
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-ce",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.base.cra_module = THIS_MODULE,
@@ -600,10 +574,9 @@ static struct skcipher_alg aes_algs[] = { {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(aes)",
- .base.cra_driver_name = "__cbc-aes-ce",
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-ce",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.base.cra_module = THIS_MODULE,
@@ -615,10 +588,9 @@ static struct skcipher_alg aes_algs[] = { {
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
}, {
- .base.cra_name = "__cts(cbc(aes))",
- .base.cra_driver_name = "__cts-cbc-aes-ce",
+ .base.cra_name = "cts(cbc(aes))",
+ .base.cra_driver_name = "cts-cbc-aes-ce",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.base.cra_module = THIS_MODULE,
@@ -631,10 +603,9 @@ static struct skcipher_alg aes_algs[] = { {
.encrypt = cts_cbc_encrypt,
.decrypt = cts_cbc_decrypt,
}, {
- .base.cra_name = "__ctr(aes)",
- .base.cra_driver_name = "__ctr-aes-ce",
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-ce",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.base.cra_module = THIS_MODULE,
@@ -647,25 +618,9 @@ static struct skcipher_alg aes_algs[] = { {
.encrypt = ctr_encrypt,
.decrypt = ctr_encrypt,
}, {
- .base.cra_name = "ctr(aes)",
- .base.cra_driver_name = "ctr-aes-ce-sync",
- .base.cra_priority = 300 - 1,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct crypto_aes_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .chunksize = AES_BLOCK_SIZE,
- .setkey = ce_aes_setkey,
- .encrypt = ctr_encrypt_sync,
- .decrypt = ctr_encrypt_sync,
-}, {
- .base.cra_name = "__xts(aes)",
- .base.cra_driver_name = "__xts-aes-ce",
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "xts-aes-ce",
.base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
.base.cra_module = THIS_MODULE,
@@ -679,51 +634,14 @@ static struct skcipher_alg aes_algs[] = { {
.decrypt = xts_decrypt,
} };
-static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
-
static void aes_exit(void)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(aes_simd_algs) && aes_simd_algs[i]; i++)
- simd_skcipher_free(aes_simd_algs[i]);
-
crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
}
static int __init aes_init(void)
{
- struct simd_skcipher_alg *simd;
- const char *basename;
- const char *algname;
- const char *drvname;
- int err;
- int i;
-
- err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
- if (err)
- return err;
-
- for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL))
- continue;
-
- algname = aes_algs[i].base.cra_name + 2;
- drvname = aes_algs[i].base.cra_driver_name + 2;
- basename = aes_algs[i].base.cra_driver_name;
- simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename);
- err = PTR_ERR(simd);
- if (IS_ERR(simd))
- goto unregister_simds;
-
- aes_simd_algs[i] = simd;
- }
-
- return 0;
-
-unregister_simds:
- aes_exit();
- return err;
+ return crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
}
module_cpu_feature_match(AES, aes_init);
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index f6be80b5938b..c60104dc1585 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -8,8 +8,6 @@
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/aes.h>
-#include <crypto/ctr.h>
-#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/xts.h>
@@ -59,11 +57,6 @@ struct aesbs_xts_ctx {
struct crypto_aes_ctx tweak_key;
};
-struct aesbs_ctr_ctx {
- struct aesbs_ctx key; /* must be first member */
- struct crypto_aes_ctx fallback;
-};
-
static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -200,25 +193,6 @@ static int cbc_decrypt(struct skcipher_request *req)
return err;
}
-static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key,
- unsigned int key_len)
-{
- struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err;
-
- err = aes_expandkey(&ctx->fallback, in_key, key_len);
- if (err)
- return err;
-
- ctx->key.rounds = 6 + key_len / 4;
-
- kernel_neon_begin();
- aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds);
- kernel_neon_end();
-
- return 0;
-}
-
static int ctr_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -254,21 +228,6 @@ static int ctr_encrypt(struct skcipher_request *req)
return err;
}
-static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
-{
- struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- __aes_arm_encrypt(ctx->fallback.key_enc, ctx->key.rounds, src, dst);
-}
-
-static int ctr_encrypt_sync(struct skcipher_request *req)
-{
- if (!crypto_simd_usable())
- return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
-
- return ctr_encrypt(req);
-}
-
static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -374,13 +333,12 @@ static int xts_decrypt(struct skcipher_request *req)
}
static struct skcipher_alg aes_algs[] = { {
- .base.cra_name = "__ecb(aes)",
- .base.cra_driver_name = "__ecb-aes-neonbs",
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-neonbs",
.base.cra_priority = 250,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aesbs_ctx),
.base.cra_module = THIS_MODULE,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -389,13 +347,12 @@ static struct skcipher_alg aes_algs[] = { {
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
}, {
- .base.cra_name = "__cbc(aes)",
- .base.cra_driver_name = "__cbc-aes-neonbs",
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-neonbs",
.base.cra_priority = 250,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
.base.cra_module = THIS_MODULE,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -405,13 +362,12 @@ static struct skcipher_alg aes_algs[] = { {
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
}, {
- .base.cra_name = "__ctr(aes)",
- .base.cra_driver_name = "__ctr-aes-neonbs",
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-neonbs",
.base.cra_priority = 250,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aesbs_ctx),
.base.cra_module = THIS_MODULE,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -422,29 +378,12 @@ static struct skcipher_alg aes_algs[] = { {
.encrypt = ctr_encrypt,
.decrypt = ctr_encrypt,
}, {
- .base.cra_name = "ctr(aes)",
- .base.cra_driver_name = "ctr-aes-neonbs-sync",
- .base.cra_priority = 250 - 1,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct aesbs_ctr_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .chunksize = AES_BLOCK_SIZE,
- .walksize = 8 * AES_BLOCK_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = aesbs_ctr_setkey_sync,
- .encrypt = ctr_encrypt_sync,
- .decrypt = ctr_encrypt_sync,
-}, {
- .base.cra_name = "__xts(aes)",
- .base.cra_driver_name = "__xts-aes-neonbs",
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "xts-aes-neonbs",
.base.cra_priority = 250,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aesbs_xts_ctx),
.base.cra_module = THIS_MODULE,
- .base.cra_flags = CRYPTO_ALG_INTERNAL,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
@@ -455,55 +394,18 @@ static struct skcipher_alg aes_algs[] = { {
.decrypt = xts_decrypt,
} };
-static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
-
static void aes_exit(void)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(aes_simd_algs); i++)
- if (aes_simd_algs[i])
- simd_skcipher_free(aes_simd_algs[i]);
-
crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
}
static int __init aes_init(void)
{
- struct simd_skcipher_alg *simd;
- const char *basename;
- const char *algname;
- const char *drvname;
- int err;
- int i;
-
if (!(elf_hwcap & HWCAP_NEON))
return -ENODEV;
- err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
- if (err)
- return err;
-
- for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL))
- continue;
-
- algname = aes_algs[i].base.cra_name + 2;
- drvname = aes_algs[i].base.cra_driver_name + 2;
- basename = aes_algs[i].base.cra_driver_name;
- simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename);
- err = PTR_ERR(simd);
- if (IS_ERR(simd))
- goto unregister_simds;
-
- aes_simd_algs[i] = simd;
- }
- return 0;
-
-unregister_simds:
- aes_exit();
- return err;
+ return crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
}
-late_initcall(aes_init);
+module_init(aes_init);
module_exit(aes_exit);
diff --git a/arch/arm/crypto/blake2b-neon-glue.c b/arch/arm/crypto/blake2b-neon-glue.c
index 4b59d027ba4a..2ff443a91724 100644
--- a/arch/arm/crypto/blake2b-neon-glue.c
+++ b/arch/arm/crypto/blake2b-neon-glue.c
@@ -7,7 +7,6 @@
#include <crypto/internal/blake2b.h>
#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
#include <linux/module.h>
#include <linux/sizes.h>
@@ -21,11 +20,6 @@ asmlinkage void blake2b_compress_neon(struct blake2b_state *state,
static void blake2b_compress_arch(struct blake2b_state *state,
const u8 *block, size_t nblocks, u32 inc)
{
- if (!crypto_simd_usable()) {
- blake2b_compress_generic(state, block, nblocks, inc);
- return;
- }
-
do {
const size_t blocks = min_t(size_t, nblocks,
SZ_4K / BLAKE2B_BLOCK_SIZE);
@@ -42,12 +36,14 @@ static void blake2b_compress_arch(struct blake2b_state *state,
static int crypto_blake2b_update_neon(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
- return crypto_blake2b_update(desc, in, inlen, blake2b_compress_arch);
+ return crypto_blake2b_update_bo(desc, in, inlen, blake2b_compress_arch);
}
-static int crypto_blake2b_final_neon(struct shash_desc *desc, u8 *out)
+static int crypto_blake2b_finup_neon(struct shash_desc *desc, const u8 *in,
+ unsigned int inlen, u8 *out)
{
- return crypto_blake2b_final(desc, out, blake2b_compress_arch);
+ return crypto_blake2b_finup(desc, in, inlen, out,
+ blake2b_compress_arch);
}
#define BLAKE2B_ALG(name, driver_name, digest_size) \
@@ -55,7 +51,9 @@ static int crypto_blake2b_final_neon(struct shash_desc *desc, u8 *out)
.base.cra_name = name, \
.base.cra_driver_name = driver_name, \
.base.cra_priority = 200, \
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY | \
+ CRYPTO_AHASH_ALG_BLOCK_ONLY | \
+ CRYPTO_AHASH_ALG_FINAL_NONZERO, \
.base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \
.base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \
.base.cra_module = THIS_MODULE, \
@@ -63,8 +61,9 @@ static int crypto_blake2b_final_neon(struct shash_desc *desc, u8 *out)
.setkey = crypto_blake2b_setkey, \
.init = crypto_blake2b_init, \
.update = crypto_blake2b_update_neon, \
- .final = crypto_blake2b_final_neon, \
+ .finup = crypto_blake2b_finup_neon, \
.descsize = sizeof(struct blake2b_state), \
+ .statesize = BLAKE2B_STATE_SIZE, \
}
static struct shash_alg blake2b_neon_algs[] = {
diff --git a/arch/arm/crypto/chacha-glue.c b/arch/arm/crypto/chacha-glue.c
deleted file mode 100644
index 50e635512046..000000000000
--- a/arch/arm/crypto/chacha-glue.c
+++ /dev/null
@@ -1,352 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * ARM NEON accelerated ChaCha and XChaCha stream ciphers,
- * including ChaCha20 (RFC7539)
- *
- * Copyright (C) 2016-2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
- * Copyright (C) 2015 Martin Willi
- */
-
-#include <crypto/algapi.h>
-#include <crypto/internal/chacha.h>
-#include <crypto/internal/simd.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/jump_label.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <asm/cputype.h>
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-
-asmlinkage void chacha_block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
- int nrounds);
-asmlinkage void chacha_4block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
- int nrounds, unsigned int nbytes);
-asmlinkage void hchacha_block_arm(const u32 *state, u32 *out, int nrounds);
-asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds);
-
-asmlinkage void chacha_doarm(u8 *dst, const u8 *src, unsigned int bytes,
- const u32 *state, int nrounds);
-
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_neon);
-
-static inline bool neon_usable(void)
-{
- return static_branch_likely(&use_neon) && crypto_simd_usable();
-}
-
-static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes, int nrounds)
-{
- u8 buf[CHACHA_BLOCK_SIZE];
-
- while (bytes > CHACHA_BLOCK_SIZE) {
- unsigned int l = min(bytes, CHACHA_BLOCK_SIZE * 4U);
-
- chacha_4block_xor_neon(state, dst, src, nrounds, l);
- bytes -= l;
- src += l;
- dst += l;
- state[12] += DIV_ROUND_UP(l, CHACHA_BLOCK_SIZE);
- }
- if (bytes) {
- const u8 *s = src;
- u8 *d = dst;
-
- if (bytes != CHACHA_BLOCK_SIZE)
- s = d = memcpy(buf, src, bytes);
- chacha_block_xor_neon(state, d, s, nrounds);
- if (d != dst)
- memcpy(dst, buf, bytes);
- state[12]++;
- }
-}
-
-void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
-{
- if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable()) {
- hchacha_block_arm(state, stream, nrounds);
- } else {
- kernel_neon_begin();
- hchacha_block_neon(state, stream, nrounds);
- kernel_neon_end();
- }
-}
-EXPORT_SYMBOL(hchacha_block_arch);
-
-void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
- int nrounds)
-{
- if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable() ||
- bytes <= CHACHA_BLOCK_SIZE) {
- chacha_doarm(dst, src, bytes, state, nrounds);
- state[12] += DIV_ROUND_UP(bytes, CHACHA_BLOCK_SIZE);
- return;
- }
-
- do {
- unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
-
- kernel_neon_begin();
- chacha_doneon(state, dst, src, todo, nrounds);
- kernel_neon_end();
-
- bytes -= todo;
- src += todo;
- dst += todo;
- } while (bytes);
-}
-EXPORT_SYMBOL(chacha_crypt_arch);
-
-static int chacha_stream_xor(struct skcipher_request *req,
- const struct chacha_ctx *ctx, const u8 *iv,
- bool neon)
-{
- struct skcipher_walk walk;
- u32 state[16];
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- chacha_init(state, ctx->key, iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = round_down(nbytes, walk.stride);
-
- if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) {
- chacha_doarm(walk.dst.virt.addr, walk.src.virt.addr,
- nbytes, state, ctx->nrounds);
- state[12] += DIV_ROUND_UP(nbytes, CHACHA_BLOCK_SIZE);
- } else {
- kernel_neon_begin();
- chacha_doneon(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes, ctx->nrounds);
- kernel_neon_end();
- }
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
-
- return err;
-}
-
-static int do_chacha(struct skcipher_request *req, bool neon)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return chacha_stream_xor(req, ctx, req->iv, neon);
-}
-
-static int chacha_arm(struct skcipher_request *req)
-{
- return do_chacha(req, false);
-}
-
-static int chacha_neon(struct skcipher_request *req)
-{
- return do_chacha(req, neon_usable());
-}
-
-static int do_xchacha(struct skcipher_request *req, bool neon)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct chacha_ctx subctx;
- u32 state[16];
- u8 real_iv[16];
-
- chacha_init(state, ctx->key, req->iv);
-
- if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) {
- hchacha_block_arm(state, subctx.key, ctx->nrounds);
- } else {
- kernel_neon_begin();
- hchacha_block_neon(state, subctx.key, ctx->nrounds);
- kernel_neon_end();
- }
- subctx.nrounds = ctx->nrounds;
-
- memcpy(&real_iv[0], req->iv + 24, 8);
- memcpy(&real_iv[8], req->iv + 16, 8);
- return chacha_stream_xor(req, &subctx, real_iv, neon);
-}
-
-static int xchacha_arm(struct skcipher_request *req)
-{
- return do_xchacha(req, false);
-}
-
-static int xchacha_neon(struct skcipher_request *req)
-{
- return do_xchacha(req, neon_usable());
-}
-
-static struct skcipher_alg arm_algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-arm",
- .base.cra_priority = 200,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = chacha_arm,
- .decrypt = chacha_arm,
- }, {
- .base.cra_name = "xchacha20",
- .base.cra_driver_name = "xchacha20-arm",
- .base.cra_priority = 200,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = xchacha_arm,
- .decrypt = xchacha_arm,
- }, {
- .base.cra_name = "xchacha12",
- .base.cra_driver_name = "xchacha12-arm",
- .base.cra_priority = 200,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha12_setkey,
- .encrypt = xchacha_arm,
- .decrypt = xchacha_arm,
- },
-};
-
-static struct skcipher_alg neon_algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .walksize = 4 * CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = chacha_neon,
- .decrypt = chacha_neon,
- }, {
- .base.cra_name = "xchacha20",
- .base.cra_driver_name = "xchacha20-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .walksize = 4 * CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = xchacha_neon,
- .decrypt = xchacha_neon,
- }, {
- .base.cra_name = "xchacha12",
- .base.cra_driver_name = "xchacha12-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .walksize = 4 * CHACHA_BLOCK_SIZE,
- .setkey = chacha12_setkey,
- .encrypt = xchacha_neon,
- .decrypt = xchacha_neon,
- }
-};
-
-static int __init chacha_simd_mod_init(void)
-{
- int err = 0;
-
- if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER)) {
- err = crypto_register_skciphers(arm_algs, ARRAY_SIZE(arm_algs));
- if (err)
- return err;
- }
-
- if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) {
- int i;
-
- switch (read_cpuid_part()) {
- case ARM_CPU_PART_CORTEX_A7:
- case ARM_CPU_PART_CORTEX_A5:
- /*
- * The Cortex-A7 and Cortex-A5 do not perform well with
- * the NEON implementation but do incredibly with the
- * scalar one and use less power.
- */
- for (i = 0; i < ARRAY_SIZE(neon_algs); i++)
- neon_algs[i].base.cra_priority = 0;
- break;
- default:
- static_branch_enable(&use_neon);
- }
-
- if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER)) {
- err = crypto_register_skciphers(neon_algs, ARRAY_SIZE(neon_algs));
- if (err)
- crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs));
- }
- }
- return err;
-}
-
-static void __exit chacha_simd_mod_fini(void)
-{
- if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER)) {
- crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs));
- if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON))
- crypto_unregister_skciphers(neon_algs, ARRAY_SIZE(neon_algs));
- }
-}
-
-module_init(chacha_simd_mod_init);
-module_exit(chacha_simd_mod_fini);
-
-MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (scalar and NEON accelerated)");
-MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("chacha20");
-MODULE_ALIAS_CRYPTO("chacha20-arm");
-MODULE_ALIAS_CRYPTO("xchacha20");
-MODULE_ALIAS_CRYPTO("xchacha20-arm");
-MODULE_ALIAS_CRYPTO("xchacha12");
-MODULE_ALIAS_CRYPTO("xchacha12-arm");
-#ifdef CONFIG_KERNEL_MODE_NEON
-MODULE_ALIAS_CRYPTO("chacha20-neon");
-MODULE_ALIAS_CRYPTO("xchacha20-neon");
-MODULE_ALIAS_CRYPTO("xchacha12-neon");
-#endif
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index aabfcf522a2c..a52dcc8c1e33 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -8,22 +8,22 @@
#include <asm/hwcap.h>
#include <asm/neon.h>
-#include <asm/simd.h>
-#include <linux/unaligned.h>
#include <crypto/aes.h>
-#include <crypto/gcm.h>
#include <crypto/b128ops.h>
-#include <crypto/cryptd.h>
+#include <crypto/gcm.h>
+#include <crypto/gf128mul.h>
+#include <crypto/ghash.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
-#include <crypto/gf128mul.h>
#include <crypto/scatterwalk.h>
#include <linux/cpufeature.h>
-#include <linux/crypto.h>
+#include <linux/errno.h>
#include <linux/jump_label.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
MODULE_DESCRIPTION("GHASH hash function using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>");
@@ -32,9 +32,6 @@ MODULE_ALIAS_CRYPTO("ghash");
MODULE_ALIAS_CRYPTO("gcm(aes)");
MODULE_ALIAS_CRYPTO("rfc4106(gcm(aes))");
-#define GHASH_BLOCK_SIZE 16
-#define GHASH_DIGEST_SIZE 16
-
#define RFC4106_NONCE_SIZE 4
struct ghash_key {
@@ -49,10 +46,8 @@ struct gcm_key {
u8 nonce[]; // for RFC4106 nonce
};
-struct ghash_desc_ctx {
+struct arm_ghash_desc_ctx {
u64 digest[GHASH_DIGEST_SIZE/sizeof(u64)];
- u8 buf[GHASH_BLOCK_SIZE];
- u32 count;
};
asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src,
@@ -65,9 +60,9 @@ static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_p64);
static int ghash_init(struct shash_desc *desc)
{
- struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+ struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
- *ctx = (struct ghash_desc_ctx){};
+ *ctx = (struct arm_ghash_desc_ctx){};
return 0;
}
@@ -85,52 +80,49 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src,
static int ghash_update(struct shash_desc *desc, const u8 *src,
unsigned int len)
{
- struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
- unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
-
- ctx->count += len;
+ struct ghash_key *key = crypto_shash_ctx(desc->tfm);
+ struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+ int blocks;
- if ((partial + len) >= GHASH_BLOCK_SIZE) {
- struct ghash_key *key = crypto_shash_ctx(desc->tfm);
- int blocks;
+ blocks = len / GHASH_BLOCK_SIZE;
+ ghash_do_update(blocks, ctx->digest, src, key, NULL);
+ return len - blocks * GHASH_BLOCK_SIZE;
+}
- if (partial) {
- int p = GHASH_BLOCK_SIZE - partial;
+static int ghash_export(struct shash_desc *desc, void *out)
+{
+ struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+ u8 *dst = out;
- memcpy(ctx->buf + partial, src, p);
- src += p;
- len -= p;
- }
+ put_unaligned_be64(ctx->digest[1], dst);
+ put_unaligned_be64(ctx->digest[0], dst + 8);
+ return 0;
+}
- blocks = len / GHASH_BLOCK_SIZE;
- len %= GHASH_BLOCK_SIZE;
+static int ghash_import(struct shash_desc *desc, const void *in)
+{
+ struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
+ const u8 *src = in;
- ghash_do_update(blocks, ctx->digest, src, key,
- partial ? ctx->buf : NULL);
- src += blocks * GHASH_BLOCK_SIZE;
- partial = 0;
- }
- if (len)
- memcpy(ctx->buf + partial, src, len);
+ ctx->digest[1] = get_unaligned_be64(src);
+ ctx->digest[0] = get_unaligned_be64(src + 8);
return 0;
}
-static int ghash_final(struct shash_desc *desc, u8 *dst)
+static int ghash_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *dst)
{
- struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
- unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
+ struct ghash_key *key = crypto_shash_ctx(desc->tfm);
+ struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
- if (partial) {
- struct ghash_key *key = crypto_shash_ctx(desc->tfm);
+ if (len) {
+ u8 buf[GHASH_BLOCK_SIZE] = {};
- memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
- ghash_do_update(1, ctx->digest, ctx->buf, key, NULL);
+ memcpy(buf, src, len);
+ ghash_do_update(1, ctx->digest, buf, key, NULL);
+ memzero_explicit(buf, sizeof(buf));
}
- put_unaligned_be64(ctx->digest[1], dst);
- put_unaligned_be64(ctx->digest[0], dst + 8);
-
- *ctx = (struct ghash_desc_ctx){};
- return 0;
+ return ghash_export(desc, dst);
}
static void ghash_reflect(u64 h[], const be128 *k)
@@ -175,13 +167,17 @@ static struct shash_alg ghash_alg = {
.digestsize = GHASH_DIGEST_SIZE,
.init = ghash_init,
.update = ghash_update,
- .final = ghash_final,
+ .finup = ghash_finup,
.setkey = ghash_setkey,
- .descsize = sizeof(struct ghash_desc_ctx),
+ .export = ghash_export,
+ .import = ghash_import,
+ .descsize = sizeof(struct arm_ghash_desc_ctx),
+ .statesize = sizeof(struct ghash_desc_ctx),
.base.cra_name = "ghash",
.base.cra_driver_name = "ghash-ce",
.base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.base.cra_blocksize = GHASH_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct ghash_key) + sizeof(u64[2]),
.base.cra_module = THIS_MODULE,
@@ -317,9 +313,6 @@ static int gcm_encrypt(struct aead_request *req, const u8 *iv, u32 assoclen)
u8 *tag, *dst;
int tail, err;
- if (WARN_ON_ONCE(!may_use_simd()))
- return -EBUSY;
-
err = skcipher_walk_aead_encrypt(&walk, req, false);
kernel_neon_begin();
@@ -409,9 +402,6 @@ static int gcm_decrypt(struct aead_request *req, const u8 *iv, u32 assoclen)
u8 *tag, *dst;
int tail, err, ret;
- if (WARN_ON_ONCE(!may_use_simd()))
- return -EBUSY;
-
scatterwalk_map_and_copy(otag, req->src,
req->assoclen + req->cryptlen - authsize,
authsize, 0);
diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c
deleted file mode 100644
index 4464ffbf8fd1..000000000000
--- a/arch/arm/crypto/poly1305-glue.c
+++ /dev/null
@@ -1,274 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * OpenSSL/Cryptogams accelerated Poly1305 transform for ARM
- *
- * Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
- */
-
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-#include <linux/unaligned.h>
-#include <crypto/algapi.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/poly1305.h>
-#include <crypto/internal/simd.h>
-#include <linux/cpufeature.h>
-#include <linux/crypto.h>
-#include <linux/jump_label.h>
-#include <linux/module.h>
-
-void poly1305_init_arm(void *state, const u8 *key);
-void poly1305_blocks_arm(void *state, const u8 *src, u32 len, u32 hibit);
-void poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit);
-void poly1305_emit_arm(void *state, u8 *digest, const u32 *nonce);
-
-void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit)
-{
-}
-
-static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
-
-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
-{
- poly1305_init_arm(&dctx->h, key);
- dctx->s[0] = get_unaligned_le32(key + 16);
- dctx->s[1] = get_unaligned_le32(key + 20);
- dctx->s[2] = get_unaligned_le32(key + 24);
- dctx->s[3] = get_unaligned_le32(key + 28);
- dctx->buflen = 0;
-}
-EXPORT_SYMBOL(poly1305_init_arch);
-
-static int arm_poly1305_init(struct shash_desc *desc)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- dctx->buflen = 0;
- dctx->rset = 0;
- dctx->sset = false;
-
- return 0;
-}
-
-static void arm_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
- u32 len, u32 hibit, bool do_neon)
-{
- if (unlikely(!dctx->sset)) {
- if (!dctx->rset) {
- poly1305_init_arm(&dctx->h, src);
- src += POLY1305_BLOCK_SIZE;
- len -= POLY1305_BLOCK_SIZE;
- dctx->rset = 1;
- }
- if (len >= POLY1305_BLOCK_SIZE) {
- dctx->s[0] = get_unaligned_le32(src + 0);
- dctx->s[1] = get_unaligned_le32(src + 4);
- dctx->s[2] = get_unaligned_le32(src + 8);
- dctx->s[3] = get_unaligned_le32(src + 12);
- src += POLY1305_BLOCK_SIZE;
- len -= POLY1305_BLOCK_SIZE;
- dctx->sset = true;
- }
- if (len < POLY1305_BLOCK_SIZE)
- return;
- }
-
- len &= ~(POLY1305_BLOCK_SIZE - 1);
-
- if (static_branch_likely(&have_neon) && likely(do_neon))
- poly1305_blocks_neon(&dctx->h, src, len, hibit);
- else
- poly1305_blocks_arm(&dctx->h, src, len, hibit);
-}
-
-static void arm_poly1305_do_update(struct poly1305_desc_ctx *dctx,
- const u8 *src, u32 len, bool do_neon)
-{
- if (unlikely(dctx->buflen)) {
- u32 bytes = min(len, POLY1305_BLOCK_SIZE - dctx->buflen);
-
- memcpy(dctx->buf + dctx->buflen, src, bytes);
- src += bytes;
- len -= bytes;
- dctx->buflen += bytes;
-
- if (dctx->buflen == POLY1305_BLOCK_SIZE) {
- arm_poly1305_blocks(dctx, dctx->buf,
- POLY1305_BLOCK_SIZE, 1, false);
- dctx->buflen = 0;
- }
- }
-
- if (likely(len >= POLY1305_BLOCK_SIZE)) {
- arm_poly1305_blocks(dctx, src, len, 1, do_neon);
- src += round_down(len, POLY1305_BLOCK_SIZE);
- len %= POLY1305_BLOCK_SIZE;
- }
-
- if (unlikely(len)) {
- dctx->buflen = len;
- memcpy(dctx->buf, src, len);
- }
-}
-
-static int arm_poly1305_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- arm_poly1305_do_update(dctx, src, srclen, false);
- return 0;
-}
-
-static int __maybe_unused arm_poly1305_update_neon(struct shash_desc *desc,
- const u8 *src,
- unsigned int srclen)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
- bool do_neon = crypto_simd_usable() && srclen > 128;
-
- if (static_branch_likely(&have_neon) && do_neon)
- kernel_neon_begin();
- arm_poly1305_do_update(dctx, src, srclen, do_neon);
- if (static_branch_likely(&have_neon) && do_neon)
- kernel_neon_end();
- return 0;
-}
-
-void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
- unsigned int nbytes)
-{
- bool do_neon = IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
- crypto_simd_usable();
-
- if (unlikely(dctx->buflen)) {
- u32 bytes = min(nbytes, POLY1305_BLOCK_SIZE - dctx->buflen);
-
- memcpy(dctx->buf + dctx->buflen, src, bytes);
- src += bytes;
- nbytes -= bytes;
- dctx->buflen += bytes;
-
- if (dctx->buflen == POLY1305_BLOCK_SIZE) {
- poly1305_blocks_arm(&dctx->h, dctx->buf,
- POLY1305_BLOCK_SIZE, 1);
- dctx->buflen = 0;
- }
- }
-
- if (likely(nbytes >= POLY1305_BLOCK_SIZE)) {
- unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE);
-
- if (static_branch_likely(&have_neon) && do_neon) {
- do {
- unsigned int todo = min_t(unsigned int, len, SZ_4K);
-
- kernel_neon_begin();
- poly1305_blocks_neon(&dctx->h, src, todo, 1);
- kernel_neon_end();
-
- len -= todo;
- src += todo;
- } while (len);
- } else {
- poly1305_blocks_arm(&dctx->h, src, len, 1);
- src += len;
- }
- nbytes %= POLY1305_BLOCK_SIZE;
- }
-
- if (unlikely(nbytes)) {
- dctx->buflen = nbytes;
- memcpy(dctx->buf, src, nbytes);
- }
-}
-EXPORT_SYMBOL(poly1305_update_arch);
-
-void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst)
-{
- if (unlikely(dctx->buflen)) {
- dctx->buf[dctx->buflen++] = 1;
- memset(dctx->buf + dctx->buflen, 0,
- POLY1305_BLOCK_SIZE - dctx->buflen);
- poly1305_blocks_arm(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
- }
-
- poly1305_emit_arm(&dctx->h, dst, dctx->s);
- *dctx = (struct poly1305_desc_ctx){};
-}
-EXPORT_SYMBOL(poly1305_final_arch);
-
-static int arm_poly1305_final(struct shash_desc *desc, u8 *dst)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- if (unlikely(!dctx->sset))
- return -ENOKEY;
-
- poly1305_final_arch(dctx, dst);
- return 0;
-}
-
-static struct shash_alg arm_poly1305_algs[] = {{
- .init = arm_poly1305_init,
- .update = arm_poly1305_update,
- .final = arm_poly1305_final,
- .digestsize = POLY1305_DIGEST_SIZE,
- .descsize = sizeof(struct poly1305_desc_ctx),
-
- .base.cra_name = "poly1305",
- .base.cra_driver_name = "poly1305-arm",
- .base.cra_priority = 150,
- .base.cra_blocksize = POLY1305_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-#ifdef CONFIG_KERNEL_MODE_NEON
-}, {
- .init = arm_poly1305_init,
- .update = arm_poly1305_update_neon,
- .final = arm_poly1305_final,
- .digestsize = POLY1305_DIGEST_SIZE,
- .descsize = sizeof(struct poly1305_desc_ctx),
-
- .base.cra_name = "poly1305",
- .base.cra_driver_name = "poly1305-neon",
- .base.cra_priority = 200,
- .base.cra_blocksize = POLY1305_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-#endif
-}};
-
-static int __init arm_poly1305_mod_init(void)
-{
- if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
- (elf_hwcap & HWCAP_NEON))
- static_branch_enable(&have_neon);
- else if (IS_REACHABLE(CONFIG_CRYPTO_HASH))
- /* register only the first entry */
- return crypto_register_shash(&arm_poly1305_algs[0]);
-
- return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
- crypto_register_shashes(arm_poly1305_algs,
- ARRAY_SIZE(arm_poly1305_algs)) : 0;
-}
-
-static void __exit arm_poly1305_mod_exit(void)
-{
- if (!IS_REACHABLE(CONFIG_CRYPTO_HASH))
- return;
- if (!static_branch_likely(&have_neon)) {
- crypto_unregister_shash(&arm_poly1305_algs[0]);
- return;
- }
- crypto_unregister_shashes(arm_poly1305_algs,
- ARRAY_SIZE(arm_poly1305_algs));
-}
-
-module_init(arm_poly1305_mod_init);
-module_exit(arm_poly1305_mod_exit);
-
-MODULE_DESCRIPTION("Accelerated Poly1305 transform for ARM");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("poly1305");
-MODULE_ALIAS_CRYPTO("poly1305-arm");
-MODULE_ALIAS_CRYPTO("poly1305-neon");
diff --git a/arch/arm/crypto/sha1-ce-glue.c b/arch/arm/crypto/sha1-ce-glue.c
index de9100c67b37..fac07a4799de 100644
--- a/arch/arm/crypto/sha1-ce-glue.c
+++ b/arch/arm/crypto/sha1-ce-glue.c
@@ -5,20 +5,14 @@
* Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
*/
+#include <asm/neon.h>
#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
#include <linux/cpufeature.h>
-#include <linux/crypto.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-
-#include "sha1.h"
-
MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
@@ -29,50 +23,36 @@ asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- if (!crypto_simd_usable() ||
- (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
- return sha1_update_arm(desc, data, len);
+ int remain;
kernel_neon_begin();
- sha1_base_do_update(desc, data, len, sha1_ce_transform);
+ remain = sha1_base_do_update_blocks(desc, data, len, sha1_ce_transform);
kernel_neon_end();
- return 0;
+ return remain;
}
static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!crypto_simd_usable())
- return sha1_finup_arm(desc, data, len, out);
-
kernel_neon_begin();
- if (len)
- sha1_base_do_update(desc, data, len, sha1_ce_transform);
- sha1_base_do_finalize(desc, sha1_ce_transform);
+ sha1_base_do_finup(desc, data, len, sha1_ce_transform);
kernel_neon_end();
return sha1_base_finish(desc, out);
}
-static int sha1_ce_final(struct shash_desc *desc, u8 *out)
-{
- return sha1_ce_finup(desc, NULL, 0, out);
-}
-
static struct shash_alg alg = {
.init = sha1_base_init,
.update = sha1_ce_update,
- .final = sha1_ce_final,
.finup = sha1_ce_finup,
- .descsize = sizeof(struct sha1_state),
+ .descsize = SHA1_STATE_SIZE,
.digestsize = SHA1_DIGEST_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ce",
.cra_priority = 200,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha1.h b/arch/arm/crypto/sha1.h
deleted file mode 100644
index b1b7e21da2c3..000000000000
--- a/arch/arm/crypto/sha1.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASM_ARM_CRYPTO_SHA1_H
-#define ASM_ARM_CRYPTO_SHA1_H
-
-#include <linux/crypto.h>
-#include <crypto/sha1.h>
-
-extern int sha1_update_arm(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-extern int sha1_finup_arm(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out);
-
-#endif
diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
index 95a727bcd664..255da00c7d98 100644
--- a/arch/arm/crypto/sha1_glue.c
+++ b/arch/arm/crypto/sha1_glue.c
@@ -12,53 +12,42 @@
*/
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
-#include <asm/byteorder.h>
-
-#include "sha1.h"
+#include <linux/kernel.h>
+#include <linux/module.h>
asmlinkage void sha1_block_data_order(struct sha1_state *digest,
const u8 *data, int rounds);
-int sha1_update_arm(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int sha1_update_arm(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
/* make sure signature matches sha1_block_fn() */
BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0);
- return sha1_base_do_update(desc, data, len, sha1_block_data_order);
+ return sha1_base_do_update_blocks(desc, data, len,
+ sha1_block_data_order);
}
-EXPORT_SYMBOL_GPL(sha1_update_arm);
-static int sha1_final(struct shash_desc *desc, u8 *out)
+static int sha1_finup_arm(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- sha1_base_do_finalize(desc, sha1_block_data_order);
+ sha1_base_do_finup(desc, data, len, sha1_block_data_order);
return sha1_base_finish(desc, out);
}
-int sha1_finup_arm(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- sha1_base_do_update(desc, data, len, sha1_block_data_order);
- return sha1_final(desc, out);
-}
-EXPORT_SYMBOL_GPL(sha1_finup_arm);
-
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = sha1_update_arm,
- .final = sha1_final,
.finup = sha1_finup_arm,
- .descsize = sizeof(struct sha1_state),
+ .descsize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-asm",
.cra_priority = 150,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
index 9c70b87e69f7..d321850f22a6 100644
--- a/arch/arm/crypto/sha1_neon_glue.c
+++ b/arch/arm/crypto/sha1_neon_glue.c
@@ -13,18 +13,12 @@
* Copyright (c) Chandramouli Narayanan <mouli@linux.intel.com>
*/
+#include <asm/neon.h>
#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-
-#include "sha1.h"
+#include <linux/kernel.h>
+#include <linux/module.h>
asmlinkage void sha1_transform_neon(struct sha1_state *state_h,
const u8 *data, int rounds);
@@ -32,50 +26,37 @@ asmlinkage void sha1_transform_neon(struct sha1_state *state_h,
static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- if (!crypto_simd_usable() ||
- (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
- return sha1_update_arm(desc, data, len);
+ int remain;
kernel_neon_begin();
- sha1_base_do_update(desc, data, len, sha1_transform_neon);
+ remain = sha1_base_do_update_blocks(desc, data, len,
+ sha1_transform_neon);
kernel_neon_end();
- return 0;
+ return remain;
}
static int sha1_neon_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!crypto_simd_usable())
- return sha1_finup_arm(desc, data, len, out);
-
kernel_neon_begin();
- if (len)
- sha1_base_do_update(desc, data, len, sha1_transform_neon);
- sha1_base_do_finalize(desc, sha1_transform_neon);
+ sha1_base_do_finup(desc, data, len, sha1_transform_neon);
kernel_neon_end();
return sha1_base_finish(desc, out);
}
-static int sha1_neon_final(struct shash_desc *desc, u8 *out)
-{
- return sha1_neon_finup(desc, NULL, 0, out);
-}
-
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = sha1_neon_update,
- .final = sha1_neon_final,
.finup = sha1_neon_finup,
- .descsize = sizeof(struct sha1_state),
+ .descsize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-neon",
.cra_priority = 250,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha2-ce-glue.c b/arch/arm/crypto/sha2-ce-glue.c
deleted file mode 100644
index aeac45bfbf9f..000000000000
--- a/arch/arm/crypto/sha2-ce-glue.c
+++ /dev/null
@@ -1,109 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions
- *
- * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
- */
-
-#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-#include <crypto/sha2.h>
-#include <crypto/sha256_base.h>
-#include <linux/cpufeature.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
-
-#include <asm/hwcap.h>
-#include <asm/simd.h>
-#include <asm/neon.h>
-#include <linux/unaligned.h>
-
-#include "sha256_glue.h"
-
-MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
-MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
-MODULE_LICENSE("GPL v2");
-
-asmlinkage void sha2_ce_transform(struct sha256_state *sst, u8 const *src,
- int blocks);
-
-static int sha2_ce_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- if (!crypto_simd_usable() ||
- (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
- return crypto_sha256_arm_update(desc, data, len);
-
- kernel_neon_begin();
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha2_ce_transform);
- kernel_neon_end();
-
- return 0;
-}
-
-static int sha2_ce_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- if (!crypto_simd_usable())
- return crypto_sha256_arm_finup(desc, data, len, out);
-
- kernel_neon_begin();
- if (len)
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha2_ce_transform);
- sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
- kernel_neon_end();
-
- return sha256_base_finish(desc, out);
-}
-
-static int sha2_ce_final(struct shash_desc *desc, u8 *out)
-{
- return sha2_ce_finup(desc, NULL, 0, out);
-}
-
-static struct shash_alg algs[] = { {
- .init = sha224_base_init,
- .update = sha2_ce_update,
- .final = sha2_ce_final,
- .finup = sha2_ce_finup,
- .descsize = sizeof(struct sha256_state),
- .digestsize = SHA224_DIGEST_SIZE,
- .base = {
- .cra_name = "sha224",
- .cra_driver_name = "sha224-ce",
- .cra_priority = 300,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .init = sha256_base_init,
- .update = sha2_ce_update,
- .final = sha2_ce_final,
- .finup = sha2_ce_finup,
- .descsize = sizeof(struct sha256_state),
- .digestsize = SHA256_DIGEST_SIZE,
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256-ce",
- .cra_priority = 300,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-} };
-
-static int __init sha2_ce_mod_init(void)
-{
- return crypto_register_shashes(algs, ARRAY_SIZE(algs));
-}
-
-static void __exit sha2_ce_mod_fini(void)
-{
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
-}
-
-module_cpu_feature_match(SHA2, sha2_ce_mod_init);
-module_exit(sha2_ce_mod_fini);
diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c
deleted file mode 100644
index f85933fdec75..000000000000
--- a/arch/arm/crypto/sha256_glue.c
+++ /dev/null
@@ -1,117 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Glue code for the SHA256 Secure Hash Algorithm assembly implementation
- * using optimized ARM assembler and NEON instructions.
- *
- * Copyright © 2015 Google Inc.
- *
- * This file is based on sha256_ssse3_glue.c:
- * Copyright (C) 2013 Intel Corporation
- * Author: Tim Chen <tim.c.chen@linux.intel.com>
- */
-
-#include <crypto/internal/hash.h>
-#include <linux/crypto.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <crypto/sha2.h>
-#include <crypto/sha256_base.h>
-#include <asm/simd.h>
-#include <asm/neon.h>
-
-#include "sha256_glue.h"
-
-asmlinkage void sha256_block_data_order(struct sha256_state *state,
- const u8 *data, int num_blks);
-
-int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- /* make sure casting to sha256_block_fn() is safe */
- BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
-
- return sha256_base_do_update(desc, data, len, sha256_block_data_order);
-}
-EXPORT_SYMBOL(crypto_sha256_arm_update);
-
-static int crypto_sha256_arm_final(struct shash_desc *desc, u8 *out)
-{
- sha256_base_do_finalize(desc, sha256_block_data_order);
- return sha256_base_finish(desc, out);
-}
-
-int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- sha256_base_do_update(desc, data, len, sha256_block_data_order);
- return crypto_sha256_arm_final(desc, out);
-}
-EXPORT_SYMBOL(crypto_sha256_arm_finup);
-
-static struct shash_alg algs[] = { {
- .digestsize = SHA256_DIGEST_SIZE,
- .init = sha256_base_init,
- .update = crypto_sha256_arm_update,
- .final = crypto_sha256_arm_final,
- .finup = crypto_sha256_arm_finup,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256-asm",
- .cra_priority = 150,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .digestsize = SHA224_DIGEST_SIZE,
- .init = sha224_base_init,
- .update = crypto_sha256_arm_update,
- .final = crypto_sha256_arm_final,
- .finup = crypto_sha256_arm_finup,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name = "sha224-asm",
- .cra_priority = 150,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-} };
-
-static int __init sha256_mod_init(void)
-{
- int res = crypto_register_shashes(algs, ARRAY_SIZE(algs));
-
- if (res < 0)
- return res;
-
- if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon()) {
- res = crypto_register_shashes(sha256_neon_algs,
- ARRAY_SIZE(sha256_neon_algs));
-
- if (res < 0)
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
- }
-
- return res;
-}
-
-static void __exit sha256_mod_fini(void)
-{
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
-
- if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon())
- crypto_unregister_shashes(sha256_neon_algs,
- ARRAY_SIZE(sha256_neon_algs));
-}
-
-module_init(sha256_mod_init);
-module_exit(sha256_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm (ARM), including NEON");
-
-MODULE_ALIAS_CRYPTO("sha256");
diff --git a/arch/arm/crypto/sha256_glue.h b/arch/arm/crypto/sha256_glue.h
deleted file mode 100644
index 9f0d578bab5f..000000000000
--- a/arch/arm/crypto/sha256_glue.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _CRYPTO_SHA256_GLUE_H
-#define _CRYPTO_SHA256_GLUE_H
-
-#include <linux/crypto.h>
-
-extern struct shash_alg sha256_neon_algs[2];
-
-int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
-
-#endif /* _CRYPTO_SHA256_GLUE_H */
diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c
deleted file mode 100644
index ccdcfff71910..000000000000
--- a/arch/arm/crypto/sha256_neon_glue.c
+++ /dev/null
@@ -1,92 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Glue code for the SHA256 Secure Hash Algorithm assembly implementation
- * using NEON instructions.
- *
- * Copyright © 2015 Google Inc.
- *
- * This file is based on sha512_neon_glue.c:
- * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
- */
-
-#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <crypto/sha2.h>
-#include <crypto/sha256_base.h>
-#include <asm/byteorder.h>
-#include <asm/simd.h>
-#include <asm/neon.h>
-
-#include "sha256_glue.h"
-
-asmlinkage void sha256_block_data_order_neon(struct sha256_state *digest,
- const u8 *data, int num_blks);
-
-static int crypto_sha256_neon_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- if (!crypto_simd_usable() ||
- (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
- return crypto_sha256_arm_update(desc, data, len);
-
- kernel_neon_begin();
- sha256_base_do_update(desc, data, len, sha256_block_data_order_neon);
- kernel_neon_end();
-
- return 0;
-}
-
-static int crypto_sha256_neon_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- if (!crypto_simd_usable())
- return crypto_sha256_arm_finup(desc, data, len, out);
-
- kernel_neon_begin();
- if (len)
- sha256_base_do_update(desc, data, len,
- sha256_block_data_order_neon);
- sha256_base_do_finalize(desc, sha256_block_data_order_neon);
- kernel_neon_end();
-
- return sha256_base_finish(desc, out);
-}
-
-static int crypto_sha256_neon_final(struct shash_desc *desc, u8 *out)
-{
- return crypto_sha256_neon_finup(desc, NULL, 0, out);
-}
-
-struct shash_alg sha256_neon_algs[] = { {
- .digestsize = SHA256_DIGEST_SIZE,
- .init = sha256_base_init,
- .update = crypto_sha256_neon_update,
- .final = crypto_sha256_neon_final,
- .finup = crypto_sha256_neon_finup,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256-neon",
- .cra_priority = 250,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .digestsize = SHA224_DIGEST_SIZE,
- .init = sha224_base_init,
- .update = crypto_sha256_neon_update,
- .final = crypto_sha256_neon_final,
- .finup = crypto_sha256_neon_finup,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name = "sha224-neon",
- .cra_priority = 250,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-} };
diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c
index 1be5bd498af3..f8a6480889b1 100644
--- a/arch/arm/crypto/sha512-glue.c
+++ b/arch/arm/crypto/sha512-glue.c
@@ -5,15 +5,14 @@
* Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
*/
+#include <asm/hwcap.h>
+#include <asm/neon.h>
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
-#include <linux/crypto.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-
#include "sha512.h"
MODULE_DESCRIPTION("Accelerated SHA-384/SHA-512 secure hash for ARM");
@@ -28,50 +27,47 @@ MODULE_ALIAS_CRYPTO("sha512-arm");
asmlinkage void sha512_block_data_order(struct sha512_state *state,
u8 const *src, int blocks);
-int sha512_arm_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int sha512_arm_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
- return sha512_base_do_update(desc, data, len, sha512_block_data_order);
+ return sha512_base_do_update_blocks(desc, data, len,
+ sha512_block_data_order);
}
-static int sha512_arm_final(struct shash_desc *desc, u8 *out)
+static int sha512_arm_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- sha512_base_do_finalize(desc, sha512_block_data_order);
+ sha512_base_do_finup(desc, data, len, sha512_block_data_order);
return sha512_base_finish(desc, out);
}
-int sha512_arm_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- sha512_base_do_update(desc, data, len, sha512_block_data_order);
- return sha512_arm_final(desc, out);
-}
-
static struct shash_alg sha512_arm_algs[] = { {
.init = sha384_base_init,
.update = sha512_arm_update,
- .final = sha512_arm_final,
.finup = sha512_arm_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.digestsize = SHA384_DIGEST_SIZE,
.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-arm",
.cra_priority = 250,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
}, {
.init = sha512_base_init,
.update = sha512_arm_update,
- .final = sha512_arm_final,
.finup = sha512_arm_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.digestsize = SHA512_DIGEST_SIZE,
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-arm",
.cra_priority = 250,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha512-neon-glue.c b/arch/arm/crypto/sha512-neon-glue.c
index c6e58fe475ac..bd528077fefb 100644
--- a/arch/arm/crypto/sha512-neon-glue.c
+++ b/arch/arm/crypto/sha512-neon-glue.c
@@ -5,16 +5,13 @@
* Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
*/
+#include <asm/neon.h>
#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
-#include <linux/crypto.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/simd.h>
-#include <asm/neon.h>
-
#include "sha512.h"
MODULE_ALIAS_CRYPTO("sha384-neon");
@@ -26,51 +23,36 @@ asmlinkage void sha512_block_data_order_neon(struct sha512_state *state,
static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sha512_state *sctx = shash_desc_ctx(desc);
-
- if (!crypto_simd_usable() ||
- (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
- return sha512_arm_update(desc, data, len);
+ int remain;
kernel_neon_begin();
- sha512_base_do_update(desc, data, len, sha512_block_data_order_neon);
+ remain = sha512_base_do_update_blocks(desc, data, len,
+ sha512_block_data_order_neon);
kernel_neon_end();
-
- return 0;
+ return remain;
}
static int sha512_neon_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!crypto_simd_usable())
- return sha512_arm_finup(desc, data, len, out);
-
kernel_neon_begin();
- if (len)
- sha512_base_do_update(desc, data, len,
- sha512_block_data_order_neon);
- sha512_base_do_finalize(desc, sha512_block_data_order_neon);
+ sha512_base_do_finup(desc, data, len, sha512_block_data_order_neon);
kernel_neon_end();
-
return sha512_base_finish(desc, out);
}
-static int sha512_neon_final(struct shash_desc *desc, u8 *out)
-{
- return sha512_neon_finup(desc, NULL, 0, out);
-}
-
struct shash_alg sha512_neon_algs[] = { {
.init = sha384_base_init,
.update = sha512_neon_update,
- .final = sha512_neon_final,
.finup = sha512_neon_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.digestsize = SHA384_DIGEST_SIZE,
.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-neon",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
@@ -78,14 +60,15 @@ struct shash_alg sha512_neon_algs[] = { {
}, {
.init = sha512_base_init,
.update = sha512_neon_update,
- .final = sha512_neon_final,
.finup = sha512_neon_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.digestsize = SHA512_DIGEST_SIZE,
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-neon",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha512.h b/arch/arm/crypto/sha512.h
index e14572be76d1..eeaee52cda69 100644
--- a/arch/arm/crypto/sha512.h
+++ b/arch/arm/crypto/sha512.h
@@ -1,9 +1,3 @@
/* SPDX-License-Identifier: GPL-2.0 */
-int sha512_arm_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-int sha512_arm_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out);
-
extern struct shash_alg sha512_neon_algs[2];
diff --git a/arch/arm/include/asm/simd.h b/arch/arm/include/asm/simd.h
index 82191dbd7e78..d37559762180 100644
--- a/arch/arm/include/asm/simd.h
+++ b/arch/arm/include/asm/simd.h
@@ -1,8 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_SIMD_H
+#define _ASM_SIMD_H
-#include <linux/hardirq.h>
+#include <linux/compiler_attributes.h>
+#include <linux/preempt.h>
+#include <linux/types.h>
static __must_check inline bool may_use_simd(void)
{
return IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && !in_hardirq();
}
+
+#endif /* _ASM_SIMD_H */
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index 007874320937..91ea0e29107a 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -5,6 +5,8 @@
# Copyright (C) 1995-2000 Russell King
#
+obj-y += crypto/
+
lib-y := changebit.o csumipv6.o csumpartial.o \
csumpartialcopy.o csumpartialcopyuser.o clearbit.o \
delay.o delay-loop.o findbit.o memchr.o memcpy.o \
@@ -47,7 +49,7 @@ endif
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
obj-$(CONFIG_CRC32_ARCH) += crc32-arm.o
-crc32-arm-y := crc32-glue.o crc32-core.o
+crc32-arm-y := crc32.o crc32-core.o
obj-$(CONFIG_CRC_T10DIF_ARCH) += crc-t10dif-arm.o
-crc-t10dif-arm-y := crc-t10dif-glue.o crc-t10dif-core.o
+crc-t10dif-arm-y := crc-t10dif.o crc-t10dif-core.o
diff --git a/arch/arm/lib/crc-t10dif-glue.c b/arch/arm/lib/crc-t10dif.c
index 6efad3d78284..1093f8ec13b0 100644
--- a/arch/arm/lib/crc-t10dif-glue.c
+++ b/arch/arm/lib/crc-t10dif.c
@@ -16,8 +16,8 @@
#include <asm/neon.h>
#include <asm/simd.h>
-static DEFINE_STATIC_KEY_FALSE(have_neon);
-static DEFINE_STATIC_KEY_FALSE(have_pmull);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pmull);
#define CRC_T10DIF_PMULL_CHUNK_SIZE 16U
@@ -60,7 +60,7 @@ static int __init crc_t10dif_arm_init(void)
}
return 0;
}
-arch_initcall(crc_t10dif_arm_init);
+subsys_initcall(crc_t10dif_arm_init);
static void __exit crc_t10dif_arm_exit(void)
{
diff --git a/arch/arm/lib/crc32-glue.c b/arch/arm/lib/crc32.c
index 4340351dbde8..f2bef8849c7c 100644
--- a/arch/arm/lib/crc32-glue.c
+++ b/arch/arm/lib/crc32.c
@@ -18,8 +18,8 @@
#include <asm/neon.h>
#include <asm/simd.h>
-static DEFINE_STATIC_KEY_FALSE(have_crc32);
-static DEFINE_STATIC_KEY_FALSE(have_pmull);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_crc32);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pmull);
#define PMULL_MIN_LEN 64 /* min size of buffer for pmull functions */
@@ -103,7 +103,7 @@ static int __init crc32_arm_init(void)
static_branch_enable(&have_pmull);
return 0;
}
-arch_initcall(crc32_arm_init);
+subsys_initcall(crc32_arm_init);
static void __exit crc32_arm_exit(void)
{
diff --git a/arch/arm/lib/crypto/.gitignore b/arch/arm/lib/crypto/.gitignore
new file mode 100644
index 000000000000..12d74d8b03d0
--- /dev/null
+++ b/arch/arm/lib/crypto/.gitignore
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+poly1305-core.S
+sha256-core.S
diff --git a/arch/arm/lib/crypto/Kconfig b/arch/arm/lib/crypto/Kconfig
new file mode 100644
index 000000000000..d1ad664f0c67
--- /dev/null
+++ b/arch/arm/lib/crypto/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config CRYPTO_BLAKE2S_ARM
+ bool "Hash functions: BLAKE2s"
+ select CRYPTO_ARCH_HAVE_LIB_BLAKE2S
+ help
+ BLAKE2s cryptographic hash function (RFC 7693)
+
+ Architecture: arm
+
+ This is faster than the generic implementations of BLAKE2s and
+ BLAKE2b, but slower than the NEON implementation of BLAKE2b.
+ There is no NEON implementation of BLAKE2s, since NEON doesn't
+ really help with it.
+
+config CRYPTO_CHACHA20_NEON
+ tristate
+ default CRYPTO_LIB_CHACHA
+ select CRYPTO_ARCH_HAVE_LIB_CHACHA
+
+config CRYPTO_POLY1305_ARM
+ tristate
+ default CRYPTO_LIB_POLY1305
+ select CRYPTO_ARCH_HAVE_LIB_POLY1305
+
+config CRYPTO_SHA256_ARM
+ tristate
+ depends on !CPU_V7M
+ default CRYPTO_LIB_SHA256
+ select CRYPTO_ARCH_HAVE_LIB_SHA256
+ select CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD
diff --git a/arch/arm/lib/crypto/Makefile b/arch/arm/lib/crypto/Makefile
new file mode 100644
index 000000000000..431f77c3ff6f
--- /dev/null
+++ b/arch/arm/lib/crypto/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_CRYPTO_BLAKE2S_ARM) += libblake2s-arm.o
+libblake2s-arm-y := blake2s-core.o blake2s-glue.o
+
+obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o
+chacha-neon-y := chacha-scalar-core.o chacha-glue.o
+chacha-neon-$(CONFIG_KERNEL_MODE_NEON) += chacha-neon-core.o
+
+obj-$(CONFIG_CRYPTO_POLY1305_ARM) += poly1305-arm.o
+poly1305-arm-y := poly1305-core.o poly1305-glue.o
+
+obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
+sha256-arm-y := sha256.o sha256-core.o
+sha256-arm-$(CONFIG_KERNEL_MODE_NEON) += sha256-ce.o
+
+quiet_cmd_perl = PERL $@
+ cmd_perl = $(PERL) $(<) > $(@)
+
+$(obj)/%-core.S: $(src)/%-armv4.pl
+ $(call cmd,perl)
+
+clean-files += poly1305-core.S sha256-core.S
+
+aflags-thumb2-$(CONFIG_THUMB2_KERNEL) := -U__thumb2__ -D__thumb2__=1
+
+# massage the perlasm code a bit so we only get the NEON routine if we need it
+poly1305-aflags-$(CONFIG_CPU_V7) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=5
+poly1305-aflags-$(CONFIG_KERNEL_MODE_NEON) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=7
+AFLAGS_poly1305-core.o += $(poly1305-aflags-y) $(aflags-thumb2-y)
+
+AFLAGS_sha256-core.o += $(aflags-thumb2-y)
diff --git a/arch/arm/crypto/blake2s-core.S b/arch/arm/lib/crypto/blake2s-core.S
index df40e46601f1..df40e46601f1 100644
--- a/arch/arm/crypto/blake2s-core.S
+++ b/arch/arm/lib/crypto/blake2s-core.S
diff --git a/arch/arm/crypto/blake2s-glue.c b/arch/arm/lib/crypto/blake2s-glue.c
index 0238a70d9581..0238a70d9581 100644
--- a/arch/arm/crypto/blake2s-glue.c
+++ b/arch/arm/lib/crypto/blake2s-glue.c
diff --git a/arch/arm/lib/crypto/chacha-glue.c b/arch/arm/lib/crypto/chacha-glue.c
new file mode 100644
index 000000000000..88ec96415283
--- /dev/null
+++ b/arch/arm/lib/crypto/chacha-glue.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ChaCha and HChaCha functions (ARM optimized)
+ *
+ * Copyright (C) 2016-2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2015 Martin Willi
+ */
+
+#include <crypto/chacha.h>
+#include <crypto/internal/simd.h>
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <asm/cputype.h>
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+
+asmlinkage void chacha_block_xor_neon(const struct chacha_state *state,
+ u8 *dst, const u8 *src, int nrounds);
+asmlinkage void chacha_4block_xor_neon(const struct chacha_state *state,
+ u8 *dst, const u8 *src,
+ int nrounds, unsigned int nbytes);
+asmlinkage void hchacha_block_arm(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds);
+asmlinkage void hchacha_block_neon(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds);
+
+asmlinkage void chacha_doarm(u8 *dst, const u8 *src, unsigned int bytes,
+ const struct chacha_state *state, int nrounds);
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_neon);
+
+static inline bool neon_usable(void)
+{
+ return static_branch_likely(&use_neon) && crypto_simd_usable();
+}
+
+static void chacha_doneon(struct chacha_state *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+ u8 buf[CHACHA_BLOCK_SIZE];
+
+ while (bytes > CHACHA_BLOCK_SIZE) {
+ unsigned int l = min(bytes, CHACHA_BLOCK_SIZE * 4U);
+
+ chacha_4block_xor_neon(state, dst, src, nrounds, l);
+ bytes -= l;
+ src += l;
+ dst += l;
+ state->x[12] += DIV_ROUND_UP(l, CHACHA_BLOCK_SIZE);
+ }
+ if (bytes) {
+ const u8 *s = src;
+ u8 *d = dst;
+
+ if (bytes != CHACHA_BLOCK_SIZE)
+ s = d = memcpy(buf, src, bytes);
+ chacha_block_xor_neon(state, d, s, nrounds);
+ if (d != dst)
+ memcpy(dst, buf, bytes);
+ state->x[12]++;
+ }
+}
+
+void hchacha_block_arch(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds)
+{
+ if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable()) {
+ hchacha_block_arm(state, out, nrounds);
+ } else {
+ kernel_neon_begin();
+ hchacha_block_neon(state, out, nrounds);
+ kernel_neon_end();
+ }
+}
+EXPORT_SYMBOL(hchacha_block_arch);
+
+void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+ if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable() ||
+ bytes <= CHACHA_BLOCK_SIZE) {
+ chacha_doarm(dst, src, bytes, state, nrounds);
+ state->x[12] += DIV_ROUND_UP(bytes, CHACHA_BLOCK_SIZE);
+ return;
+ }
+
+ do {
+ unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
+
+ kernel_neon_begin();
+ chacha_doneon(state, dst, src, todo, nrounds);
+ kernel_neon_end();
+
+ bytes -= todo;
+ src += todo;
+ dst += todo;
+ } while (bytes);
+}
+EXPORT_SYMBOL(chacha_crypt_arch);
+
+bool chacha_is_arch_optimized(void)
+{
+ /* We always can use at least the ARM scalar implementation. */
+ return true;
+}
+EXPORT_SYMBOL(chacha_is_arch_optimized);
+
+static int __init chacha_arm_mod_init(void)
+{
+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) {
+ switch (read_cpuid_part()) {
+ case ARM_CPU_PART_CORTEX_A7:
+ case ARM_CPU_PART_CORTEX_A5:
+ /*
+ * The Cortex-A7 and Cortex-A5 do not perform well with
+ * the NEON implementation but do incredibly with the
+ * scalar one and use less power.
+ */
+ break;
+ default:
+ static_branch_enable(&use_neon);
+ }
+ }
+ return 0;
+}
+subsys_initcall(chacha_arm_mod_init);
+
+static void __exit chacha_arm_mod_exit(void)
+{
+}
+module_exit(chacha_arm_mod_exit);
+
+MODULE_DESCRIPTION("ChaCha and HChaCha functions (ARM optimized)");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/crypto/chacha-neon-core.S b/arch/arm/lib/crypto/chacha-neon-core.S
index 13d12f672656..ddd62b6294a5 100644
--- a/arch/arm/crypto/chacha-neon-core.S
+++ b/arch/arm/lib/crypto/chacha-neon-core.S
@@ -1,5 +1,5 @@
/*
- * ChaCha/XChaCha NEON helper functions
+ * ChaCha/HChaCha NEON helper functions
*
* Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org>
*
diff --git a/arch/arm/crypto/chacha-scalar-core.S b/arch/arm/lib/crypto/chacha-scalar-core.S
index 083fe1ab96d0..4951df05c158 100644
--- a/arch/arm/crypto/chacha-scalar-core.S
+++ b/arch/arm/lib/crypto/chacha-scalar-core.S
@@ -367,7 +367,7 @@
/*
* void chacha_doarm(u8 *dst, const u8 *src, unsigned int bytes,
- * const u32 *state, int nrounds);
+ * const struct chacha_state *state, int nrounds);
*/
ENTRY(chacha_doarm)
cmp r2, #0 // len == 0?
@@ -407,7 +407,8 @@ ENTRY(chacha_doarm)
ENDPROC(chacha_doarm)
/*
- * void hchacha_block_arm(const u32 state[16], u32 out[8], int nrounds);
+ * void hchacha_block_arm(const struct chacha_state *state,
+ * u32 out[HCHACHA_OUT_WORDS], int nrounds);
*/
ENTRY(hchacha_block_arm)
push {r1,r4-r11,lr}
diff --git a/arch/arm/crypto/poly1305-armv4.pl b/arch/arm/lib/crypto/poly1305-armv4.pl
index 6d79498d3115..d57c6e2fc84a 100644
--- a/arch/arm/crypto/poly1305-armv4.pl
+++ b/arch/arm/lib/crypto/poly1305-armv4.pl
@@ -43,9 +43,9 @@ $code.=<<___;
#else
# define __ARM_ARCH__ __LINUX_ARM_ARCH__
# define __ARM_MAX_ARCH__ __LINUX_ARM_ARCH__
-# define poly1305_init poly1305_init_arm
+# define poly1305_init poly1305_block_init_arch
# define poly1305_blocks poly1305_blocks_arm
-# define poly1305_emit poly1305_emit_arm
+# define poly1305_emit poly1305_emit_arch
.globl poly1305_blocks_neon
#endif
diff --git a/arch/arm/lib/crypto/poly1305-glue.c b/arch/arm/lib/crypto/poly1305-glue.c
new file mode 100644
index 000000000000..2603b0771f2c
--- /dev/null
+++ b/arch/arm/lib/crypto/poly1305-glue.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OpenSSL/Cryptogams accelerated Poly1305 transform for ARM
+ *
+ * Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <crypto/internal/poly1305.h>
+#include <linux/cpufeature.h>
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/unaligned.h>
+
+asmlinkage void poly1305_block_init_arch(
+ struct poly1305_block_state *state,
+ const u8 raw_key[POLY1305_BLOCK_SIZE]);
+EXPORT_SYMBOL_GPL(poly1305_block_init_arch);
+asmlinkage void poly1305_blocks_arm(struct poly1305_block_state *state,
+ const u8 *src, u32 len, u32 hibit);
+asmlinkage void poly1305_blocks_neon(struct poly1305_block_state *state,
+ const u8 *src, u32 len, u32 hibit);
+asmlinkage void poly1305_emit_arch(const struct poly1305_state *state,
+ u8 digest[POLY1305_DIGEST_SIZE],
+ const u32 nonce[4]);
+EXPORT_SYMBOL_GPL(poly1305_emit_arch);
+
+void __weak poly1305_blocks_neon(struct poly1305_block_state *state,
+ const u8 *src, u32 len, u32 hibit)
+{
+}
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
+
+void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *src,
+ unsigned int len, u32 padbit)
+{
+ len = round_down(len, POLY1305_BLOCK_SIZE);
+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
+ static_branch_likely(&have_neon)) {
+ do {
+ unsigned int todo = min_t(unsigned int, len, SZ_4K);
+
+ kernel_neon_begin();
+ poly1305_blocks_neon(state, src, todo, padbit);
+ kernel_neon_end();
+
+ len -= todo;
+ src += todo;
+ } while (len);
+ } else
+ poly1305_blocks_arm(state, src, len, padbit);
+}
+EXPORT_SYMBOL_GPL(poly1305_blocks_arch);
+
+bool poly1305_is_arch_optimized(void)
+{
+ /* We always can use at least the ARM scalar implementation. */
+ return true;
+}
+EXPORT_SYMBOL(poly1305_is_arch_optimized);
+
+static int __init arm_poly1305_mod_init(void)
+{
+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
+ (elf_hwcap & HWCAP_NEON))
+ static_branch_enable(&have_neon);
+ return 0;
+}
+subsys_initcall(arm_poly1305_mod_init);
+
+static void __exit arm_poly1305_mod_exit(void)
+{
+}
+module_exit(arm_poly1305_mod_exit);
+
+MODULE_DESCRIPTION("Accelerated Poly1305 transform for ARM");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/lib/crypto/sha256-armv4.pl
index f3a2b54efd4e..8122db7fd599 100644
--- a/arch/arm/crypto/sha256-armv4.pl
+++ b/arch/arm/lib/crypto/sha256-armv4.pl
@@ -204,18 +204,18 @@ K256:
.word 0 @ terminator
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
.LOPENSSL_armcap:
-.word OPENSSL_armcap_P-sha256_block_data_order
+.word OPENSSL_armcap_P-sha256_blocks_arch
#endif
.align 5
-.global sha256_block_data_order
-.type sha256_block_data_order,%function
-sha256_block_data_order:
-.Lsha256_block_data_order:
+.global sha256_blocks_arch
+.type sha256_blocks_arch,%function
+sha256_blocks_arch:
+.Lsha256_blocks_arch:
#if __ARM_ARCH__<7
- sub r3,pc,#8 @ sha256_block_data_order
+ sub r3,pc,#8 @ sha256_blocks_arch
#else
- adr r3,.Lsha256_block_data_order
+ adr r3,.Lsha256_blocks_arch
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
@@ -282,7 +282,7 @@ $code.=<<___;
moveq pc,lr @ be binary compatible with V4, yet
bx lr @ interoperable with Thumb ISA:-)
#endif
-.size sha256_block_data_order,.-sha256_block_data_order
+.size sha256_blocks_arch,.-sha256_blocks_arch
___
######################################################################
# NEON stuff
@@ -470,8 +470,8 @@ sha256_block_data_order_neon:
stmdb sp!,{r4-r12,lr}
sub $H,sp,#16*4+16
- adr $Ktbl,.Lsha256_block_data_order
- sub $Ktbl,$Ktbl,#.Lsha256_block_data_order-K256
+ adr $Ktbl,.Lsha256_blocks_arch
+ sub $Ktbl,$Ktbl,#.Lsha256_blocks_arch-K256
bic $H,$H,#15 @ align for 128-bit stores
mov $t2,sp
mov sp,$H @ alloca
diff --git a/arch/arm/crypto/sha2-ce-core.S b/arch/arm/lib/crypto/sha256-ce.S
index b6369d2440a1..ac2c9b01b22d 100644
--- a/arch/arm/crypto/sha2-ce-core.S
+++ b/arch/arm/lib/crypto/sha256-ce.S
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * sha2-ce-core.S - SHA-224/256 secure hash using ARMv8 Crypto Extensions
+ * sha256-ce.S - SHA-224/256 secure hash using ARMv8 Crypto Extensions
*
* Copyright (C) 2015 Linaro Ltd.
* Author: Ard Biesheuvel <ard.biesheuvel@linaro.org>
@@ -67,10 +67,10 @@
.word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
/*
- * void sha2_ce_transform(struct sha256_state *sst, u8 const *src,
- int blocks);
+ * void sha256_ce_transform(u32 state[SHA256_STATE_WORDS],
+ * const u8 *data, size_t nblocks);
*/
-ENTRY(sha2_ce_transform)
+ENTRY(sha256_ce_transform)
/* load state */
vld1.32 {dga-dgb}, [r0]
@@ -120,4 +120,4 @@ ENTRY(sha2_ce_transform)
/* store new state */
vst1.32 {dga-dgb}, [r0]
bx lr
-ENDPROC(sha2_ce_transform)
+ENDPROC(sha256_ce_transform)
diff --git a/arch/arm/lib/crypto/sha256.c b/arch/arm/lib/crypto/sha256.c
new file mode 100644
index 000000000000..109192e54b0f
--- /dev/null
+++ b/arch/arm/lib/crypto/sha256.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SHA-256 optimized for ARM
+ *
+ * Copyright 2025 Google LLC
+ */
+#include <asm/neon.h>
+#include <crypto/internal/sha2.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+asmlinkage void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+EXPORT_SYMBOL_GPL(sha256_blocks_arch);
+asmlinkage void sha256_block_data_order_neon(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+asmlinkage void sha256_ce_transform(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_ce);
+
+void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks)
+{
+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
+ static_branch_likely(&have_neon)) {
+ kernel_neon_begin();
+ if (static_branch_likely(&have_ce))
+ sha256_ce_transform(state, data, nblocks);
+ else
+ sha256_block_data_order_neon(state, data, nblocks);
+ kernel_neon_end();
+ } else {
+ sha256_blocks_arch(state, data, nblocks);
+ }
+}
+EXPORT_SYMBOL_GPL(sha256_blocks_simd);
+
+bool sha256_is_arch_optimized(void)
+{
+ /* We always can use at least the ARM scalar implementation. */
+ return true;
+}
+EXPORT_SYMBOL_GPL(sha256_is_arch_optimized);
+
+static int __init sha256_arm_mod_init(void)
+{
+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) {
+ static_branch_enable(&have_neon);
+ if (elf_hwcap2 & HWCAP2_SHA2)
+ static_branch_enable(&have_ce);
+ }
+ return 0;
+}
+subsys_initcall(sha256_arm_mod_init);
+
+static void __exit sha256_arm_mod_exit(void)
+{
+}
+module_exit(sha256_arm_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA-256 optimized for ARM");
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index cac4e82f6c82..150a1e56dcae 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -209,9 +209,8 @@ static int __init exynos_pmu_irq_init(struct device_node *node,
return -ENOMEM;
}
- domain = irq_domain_add_hierarchy(parent_domain, 0, 0,
- node, &exynos_pmu_domain_ops,
- NULL);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, 0, of_fwnode_handle(node),
+ &exynos_pmu_domain_ops, NULL);
if (!domain) {
iounmap(pmu_base_addr);
pmu_base_addr = NULL;
diff --git a/arch/arm/mach-imx/avic.c b/arch/arm/mach-imx/avic.c
index cf6546ddc7a3..3067c06b4b8e 100644
--- a/arch/arm/mach-imx/avic.c
+++ b/arch/arm/mach-imx/avic.c
@@ -201,8 +201,8 @@ static void __init mxc_init_irq(void __iomem *irqbase)
WARN_ON(irq_base < 0);
np = of_find_compatible_node(NULL, NULL, "fsl,avic");
- domain = irq_domain_add_legacy(np, AVIC_NUM_IRQS, irq_base, 0,
- &irq_domain_simple_ops, NULL);
+ domain = irq_domain_create_legacy(of_fwnode_handle(np), AVIC_NUM_IRQS, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
WARN_ON(!domain);
for (i = 0; i < AVIC_NUM_IRQS / 32; i++, irq_base += 32)
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index 5909088d5482..2e633569d2f8 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -245,9 +245,8 @@ static int __init imx_gpc_init(struct device_node *node,
if (WARN_ON(!gpc_base))
return -ENOMEM;
- domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
- node, &imx_gpc_domain_ops,
- NULL);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, GPC_MAX_IRQS, of_fwnode_handle(node),
+ &imx_gpc_domain_ops, NULL);
if (!domain) {
iounmap(gpc_base);
return -ENOMEM;
diff --git a/arch/arm/mach-imx/tzic.c b/arch/arm/mach-imx/tzic.c
index 8b3d98d288d9..50a5668e65d2 100644
--- a/arch/arm/mach-imx/tzic.c
+++ b/arch/arm/mach-imx/tzic.c
@@ -175,8 +175,8 @@ static int __init tzic_init_dt(struct device_node *np, struct device_node *p)
irq_base = irq_alloc_descs(-1, 0, TZIC_NUM_IRQS, numa_node_id());
WARN_ON(irq_base < 0);
- domain = irq_domain_add_legacy(np, TZIC_NUM_IRQS, irq_base, 0,
- &irq_domain_simple_ops, NULL);
+ domain = irq_domain_create_legacy(of_fwnode_handle(np), TZIC_NUM_IRQS, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
WARN_ON(!domain);
for (i = 0; i < 4; i++, irq_base += 32)
diff --git a/arch/arm/mach-omap1/irq.c b/arch/arm/mach-omap1/irq.c
index 9b587ecebb1c..bb1bc060ecd8 100644
--- a/arch/arm/mach-omap1/irq.c
+++ b/arch/arm/mach-omap1/irq.c
@@ -220,8 +220,7 @@ void __init omap1_init_irq(void)
omap_l2_irq = irq_base;
omap_l2_irq -= NR_IRQS_LEGACY;
- domain = irq_domain_add_legacy(NULL, nr_irqs, irq_base, 0,
- &irq_domain_simple_ops, NULL);
+ domain = irq_domain_create_legacy(NULL, nr_irqs, irq_base, 0, &irq_domain_simple_ops, NULL);
pr_info("Total of %lu interrupts in %i interrupt banks\n",
nr_irqs, irq_bank_count);
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index 6f0d6120c174..a66b1dc61571 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -585,9 +585,8 @@ static int __init wakeupgen_init(struct device_node *node,
wakeupgen_ops = &am43xx_wakeupgen_ops;
}
- domain = irq_domain_add_hierarchy(parent_domain, 0, max_irqs,
- node, &wakeupgen_domain_ops,
- NULL);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, max_irqs, of_fwnode_handle(node),
+ &wakeupgen_domain_ops, NULL);
if (!domain) {
iounmap(wakeupgen_base);
return -ENOMEM;
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index d9cadd97748a..5bfce8aa4102 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -147,9 +147,8 @@ pxa_init_irq_common(struct device_node *node, int irq_nr,
int n;
pxa_internal_irq_nr = irq_nr;
- pxa_irq_domain = irq_domain_add_legacy(node, irq_nr,
- PXA_IRQ(0), 0,
- &pxa_irq_ops, NULL);
+ pxa_irq_domain = irq_domain_create_legacy(of_fwnode_handle(node), irq_nr, PXA_IRQ(0), 0,
+ &pxa_irq_ops, NULL);
if (!pxa_irq_domain)
panic("Unable to add PXA IRQ domain\n");
irq_set_default_domain(pxa_irq_domain);
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c
index 595e9cb33c1d..326616fbdc44 100644
--- a/arch/arm/plat-orion/gpio.c
+++ b/arch/arm/plat-orion/gpio.c
@@ -496,11 +496,10 @@ static void orion_gpio_unmask_irq(struct irq_data *d)
u32 reg_val;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
reg_val = irq_reg_readl(gc, ct->regs.mask);
reg_val |= mask;
irq_reg_writel(gc, reg_val, ct->regs.mask);
- irq_gc_unlock(gc);
}
static void orion_gpio_mask_irq(struct irq_data *d)
@@ -510,11 +509,10 @@ static void orion_gpio_mask_irq(struct irq_data *d)
u32 mask = d->mask;
u32 reg_val;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
reg_val = irq_reg_readl(gc, ct->regs.mask);
reg_val &= ~mask;
irq_reg_writel(gc, reg_val, ct->regs.mask);
- irq_gc_unlock(gc);
}
void __init orion_gpio_init(int gpio_base, int ngpio,
@@ -602,12 +600,12 @@ void __init orion_gpio_init(int gpio_base, int ngpio,
IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
/* Setup irq domain on top of the generic chip. */
- ochip->domain = irq_domain_add_legacy(NULL,
- ochip->chip.ngpio,
- ochip->secondary_irq_base,
- ochip->secondary_irq_base,
- &irq_domain_simple_ops,
- ochip);
+ ochip->domain = irq_domain_create_legacy(NULL,
+ ochip->chip.ngpio,
+ ochip->secondary_irq_base,
+ ochip->secondary_irq_base,
+ &irq_domain_simple_ops,
+ ochip);
if (!ochip->domain)
panic("%s: couldn't allocate irq domain (DT).\n",
ochip->chip.label);