diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-11-03 05:15:30 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-11-03 05:15:30 +0300 |
commit | bc3012f4e3a9765de81f454cb8f9bb16aafc6ff5 (patch) | |
tree | 2c127c669218b8c74c843331e455372f88a6a848 /arch | |
parent | 6803bd7956ca8fc43069c2e42016f17f3c2fbf30 (diff) | |
parent | a312e07a65fb598ed239b940434392721385c722 (diff) | |
download | linux-bc3012f4e3a9765de81f454cb8f9bb16aafc6ff5.tar.xz |
Merge tag 'v6.7-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"API:
- Add virtual-address based lskcipher interface
- Optimise ahash/shash performance in light of costly indirect calls
- Remove ahash alignmask attribute
Algorithms:
- Improve AES/XTS performance of 6-way unrolling for ppc
- Remove some uses of obsolete algorithms (md4, md5, sha1)
- Add FIPS 202 SHA-3 support in pkcs1pad
- Add fast path for single-page messages in adiantum
- Remove zlib-deflate
Drivers:
- Add support for S4 in meson RNG driver
- Add STM32MP13x support in stm32
- Add hwrng interface support in qcom-rng
- Add support for deflate algorithm in hisilicon/zip"
* tag 'v6.7-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (283 commits)
crypto: adiantum - flush destination page before unmapping
crypto: testmgr - move pkcs1pad(rsa,sha3-*) to correct place
Documentation/module-signing.txt: bring up to date
module: enable automatic module signing with FIPS 202 SHA-3
crypto: asymmetric_keys - allow FIPS 202 SHA-3 signatures
crypto: rsa-pkcs1pad - Add FIPS 202 SHA-3 support
crypto: FIPS 202 SHA-3 register in hash info for IMA
x509: Add OIDs for FIPS 202 SHA-3 hash and signatures
crypto: ahash - optimize performance when wrapping shash
crypto: ahash - check for shash type instead of not ahash type
crypto: hash - move "ahash wrapping shash" functions to ahash.c
crypto: talitos - stop using crypto_ahash::init
crypto: chelsio - stop using crypto_ahash::init
crypto: ahash - improve file comment
crypto: ahash - remove struct ahash_request_priv
crypto: ahash - remove crypto_ahash_alignmask
crypto: gcm - stop using alignmask of ahash
crypto: chacha20poly1305 - stop using alignmask of ahash
crypto: ccm - stop using alignmask of ahash
net: ipv6: stop checking crypto_ahash_alignmask
...
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/crypto/nhpoly1305-neon-glue.c | 9 | ||||
-rw-r--r-- | arch/arm64/crypto/nhpoly1305-neon-glue.c | 9 | ||||
-rw-r--r-- | arch/arm64/crypto/sha1-ce-core.S | 8 | ||||
-rw-r--r-- | arch/arm64/crypto/sha1-ce-glue.c | 21 | ||||
-rw-r--r-- | arch/arm64/crypto/sha2-ce-core.S | 8 | ||||
-rw-r--r-- | arch/arm64/crypto/sha2-ce-glue.c | 39 | ||||
-rw-r--r-- | arch/arm64/crypto/sha256-glue.c | 26 | ||||
-rw-r--r-- | arch/arm64/crypto/sha512-ce-core.S | 8 | ||||
-rw-r--r-- | arch/arm64/crypto/sha512-ce-glue.c | 26 | ||||
-rw-r--r-- | arch/arm64/crypto/sha512-glue.c | 12 | ||||
-rw-r--r-- | arch/loongarch/crypto/crc32-loongarch.c | 2 | ||||
-rw-r--r-- | arch/mips/crypto/crc32-mips.c | 2 | ||||
-rw-r--r-- | arch/sparc/crypto/crc32c_glue.c | 45 | ||||
-rw-r--r-- | arch/x86/crypto/aesni-intel_asm.S | 4 | ||||
-rw-r--r-- | arch/x86/crypto/aesni-intel_avx-x86_64.S | 4 | ||||
-rw-r--r-- | arch/x86/crypto/aesni-intel_glue.c | 52 | ||||
-rw-r--r-- | arch/x86/crypto/nhpoly1305-avx2-glue.c | 9 | ||||
-rw-r--r-- | arch/x86/crypto/nhpoly1305-sse2-glue.c | 9 | ||||
-rw-r--r-- | arch/x86/crypto/sha1_ssse3_glue.c | 12 | ||||
-rw-r--r-- | arch/x86/crypto/sha256_ssse3_glue.c | 44 |
20 files changed, 222 insertions, 127 deletions
diff --git a/arch/arm/crypto/nhpoly1305-neon-glue.c b/arch/arm/crypto/nhpoly1305-neon-glue.c index e93e41ff2656..62cf7ccdde73 100644 --- a/arch/arm/crypto/nhpoly1305-neon-glue.c +++ b/arch/arm/crypto/nhpoly1305-neon-glue.c @@ -34,6 +34,14 @@ static int nhpoly1305_neon_update(struct shash_desc *desc, return 0; } +static int nhpoly1305_neon_digest(struct shash_desc *desc, + const u8 *src, unsigned int srclen, u8 *out) +{ + return crypto_nhpoly1305_init(desc) ?: + nhpoly1305_neon_update(desc, src, srclen) ?: + crypto_nhpoly1305_final(desc, out); +} + static struct shash_alg nhpoly1305_alg = { .base.cra_name = "nhpoly1305", .base.cra_driver_name = "nhpoly1305-neon", @@ -44,6 +52,7 @@ static struct shash_alg nhpoly1305_alg = { .init = crypto_nhpoly1305_init, .update = nhpoly1305_neon_update, .final = crypto_nhpoly1305_final, + .digest = nhpoly1305_neon_digest, .setkey = crypto_nhpoly1305_setkey, .descsize = sizeof(struct nhpoly1305_state), }; diff --git a/arch/arm64/crypto/nhpoly1305-neon-glue.c b/arch/arm64/crypto/nhpoly1305-neon-glue.c index cd882c35d925..e4a0b463f080 100644 --- a/arch/arm64/crypto/nhpoly1305-neon-glue.c +++ b/arch/arm64/crypto/nhpoly1305-neon-glue.c @@ -34,6 +34,14 @@ static int nhpoly1305_neon_update(struct shash_desc *desc, return 0; } +static int nhpoly1305_neon_digest(struct shash_desc *desc, + const u8 *src, unsigned int srclen, u8 *out) +{ + return crypto_nhpoly1305_init(desc) ?: + nhpoly1305_neon_update(desc, src, srclen) ?: + crypto_nhpoly1305_final(desc, out); +} + static struct shash_alg nhpoly1305_alg = { .base.cra_name = "nhpoly1305", .base.cra_driver_name = "nhpoly1305-neon", @@ -44,6 +52,7 @@ static struct shash_alg nhpoly1305_alg = { .init = crypto_nhpoly1305_init, .update = nhpoly1305_neon_update, .final = crypto_nhpoly1305_final, + .digest = nhpoly1305_neon_digest, .setkey = crypto_nhpoly1305_setkey, .descsize = sizeof(struct nhpoly1305_state), }; diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S index 889ca0f8972b..9b1f2d82a6fe 100644 --- a/arch/arm64/crypto/sha1-ce-core.S +++ b/arch/arm64/crypto/sha1-ce-core.S @@ -62,10 +62,10 @@ .endm /* - * int sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, - * int blocks) + * int __sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, + * int blocks) */ -SYM_FUNC_START(sha1_ce_transform) +SYM_FUNC_START(__sha1_ce_transform) /* load round constants */ loadrc k0.4s, 0x5a827999, w6 loadrc k1.4s, 0x6ed9eba1, w6 @@ -147,4 +147,4 @@ CPU_LE( rev32 v11.16b, v11.16b ) str dgb, [x0, #16] mov w0, w2 ret -SYM_FUNC_END(sha1_ce_transform) +SYM_FUNC_END(__sha1_ce_transform) diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c index 71fa4f1122d7..1dd93e1fcb39 100644 --- a/arch/arm64/crypto/sha1-ce-glue.c +++ b/arch/arm64/crypto/sha1-ce-glue.c @@ -29,18 +29,19 @@ struct sha1_ce_state { extern const u32 sha1_ce_offsetof_count; extern const u32 sha1_ce_offsetof_finalize; -asmlinkage int sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, - int blocks); +asmlinkage int __sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, + int blocks); -static void __sha1_ce_transform(struct sha1_state *sst, u8 const *src, - int blocks) +static void sha1_ce_transform(struct sha1_state *sst, u8 const *src, + int blocks) { while (blocks) { int rem; kernel_neon_begin(); - rem = sha1_ce_transform(container_of(sst, struct sha1_ce_state, - sst), src, blocks); + rem = __sha1_ce_transform(container_of(sst, + struct sha1_ce_state, + sst), src, blocks); kernel_neon_end(); src += (blocks - rem) * SHA1_BLOCK_SIZE; blocks = rem; @@ -59,7 +60,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data, return crypto_sha1_update(desc, data, len); sctx->finalize = 0; - sha1_base_do_update(desc, data, len, __sha1_ce_transform); + sha1_base_do_update(desc, data, len, sha1_ce_transform); return 0; } @@ -79,9 +80,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, */ sctx->finalize = finalize; - sha1_base_do_update(desc, data, len, __sha1_ce_transform); + sha1_base_do_update(desc, data, len, sha1_ce_transform); if (!finalize) - sha1_base_do_finalize(desc, __sha1_ce_transform); + sha1_base_do_finalize(desc, sha1_ce_transform); return sha1_base_finish(desc, out); } @@ -93,7 +94,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out) return crypto_sha1_finup(desc, NULL, 0, out); sctx->finalize = 0; - sha1_base_do_finalize(desc, __sha1_ce_transform); + sha1_base_do_finalize(desc, sha1_ce_transform); return sha1_base_finish(desc, out); } diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S index 491179922f49..fce84d88ddb2 100644 --- a/arch/arm64/crypto/sha2-ce-core.S +++ b/arch/arm64/crypto/sha2-ce-core.S @@ -71,11 +71,11 @@ .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 /* - * void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src, - * int blocks) + * int __sha256_ce_transform(struct sha256_ce_state *sst, u8 const *src, + * int blocks) */ .text -SYM_FUNC_START(sha2_ce_transform) +SYM_FUNC_START(__sha256_ce_transform) /* load round constants */ adr_l x8, .Lsha2_rcon ld1 { v0.4s- v3.4s}, [x8], #64 @@ -154,4 +154,4 @@ CPU_LE( rev32 v19.16b, v19.16b ) 3: st1 {dgav.4s, dgbv.4s}, [x0] mov w0, w2 ret -SYM_FUNC_END(sha2_ce_transform) +SYM_FUNC_END(__sha256_ce_transform) diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index c57a6119fefc..0a44d2e7ee1f 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c @@ -30,18 +30,19 @@ struct sha256_ce_state { extern const u32 sha256_ce_offsetof_count; extern const u32 sha256_ce_offsetof_finalize; -asmlinkage int sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src, - int blocks); +asmlinkage int __sha256_ce_transform(struct sha256_ce_state *sst, u8 const *src, + int blocks); -static void __sha2_ce_transform(struct sha256_state *sst, u8 const *src, +static void sha256_ce_transform(struct sha256_state *sst, u8 const *src, int blocks) { while (blocks) { int rem; kernel_neon_begin(); - rem = sha2_ce_transform(container_of(sst, struct sha256_ce_state, - sst), src, blocks); + rem = __sha256_ce_transform(container_of(sst, + struct sha256_ce_state, + sst), src, blocks); kernel_neon_end(); src += (blocks - rem) * SHA256_BLOCK_SIZE; blocks = rem; @@ -55,8 +56,8 @@ const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state, asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks); -static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src, - int blocks) +static void sha256_arm64_transform(struct sha256_state *sst, u8 const *src, + int blocks) { sha256_block_data_order(sst->state, src, blocks); } @@ -68,10 +69,10 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data, if (!crypto_simd_usable()) return sha256_base_do_update(desc, data, len, - __sha256_block_data_order); + sha256_arm64_transform); sctx->finalize = 0; - sha256_base_do_update(desc, data, len, __sha2_ce_transform); + sha256_base_do_update(desc, data, len, sha256_ce_transform); return 0; } @@ -85,8 +86,8 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data, if (!crypto_simd_usable()) { if (len) sha256_base_do_update(desc, data, len, - __sha256_block_data_order); - sha256_base_do_finalize(desc, __sha256_block_data_order); + sha256_arm64_transform); + sha256_base_do_finalize(desc, sha256_arm64_transform); return sha256_base_finish(desc, out); } @@ -96,9 +97,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data, */ sctx->finalize = finalize; - sha256_base_do_update(desc, data, len, __sha2_ce_transform); + sha256_base_do_update(desc, data, len, sha256_ce_transform); if (!finalize) - sha256_base_do_finalize(desc, __sha2_ce_transform); + sha256_base_do_finalize(desc, sha256_ce_transform); return sha256_base_finish(desc, out); } @@ -107,15 +108,22 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out) struct sha256_ce_state *sctx = shash_desc_ctx(desc); if (!crypto_simd_usable()) { - sha256_base_do_finalize(desc, __sha256_block_data_order); + sha256_base_do_finalize(desc, sha256_arm64_transform); return sha256_base_finish(desc, out); } sctx->finalize = 0; - sha256_base_do_finalize(desc, __sha2_ce_transform); + sha256_base_do_finalize(desc, sha256_ce_transform); return sha256_base_finish(desc, out); } +static int sha256_ce_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + sha256_base_init(desc); + return sha256_ce_finup(desc, data, len, out); +} + static int sha256_ce_export(struct shash_desc *desc, void *out) { struct sha256_ce_state *sctx = shash_desc_ctx(desc); @@ -155,6 +163,7 @@ static struct shash_alg algs[] = { { .update = sha256_ce_update, .final = sha256_ce_final, .finup = sha256_ce_finup, + .digest = sha256_ce_digest, .export = sha256_ce_export, .import = sha256_ce_import, .descsize = sizeof(struct sha256_ce_state), diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c index 9b5c86e07a9a..35356987cc1e 100644 --- a/arch/arm64/crypto/sha256-glue.c +++ b/arch/arm64/crypto/sha256-glue.c @@ -27,8 +27,8 @@ asmlinkage void sha256_block_data_order(u32 *digest, const void *data, unsigned int num_blks); EXPORT_SYMBOL(sha256_block_data_order); -static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src, - int blocks) +static void sha256_arm64_transform(struct sha256_state *sst, u8 const *src, + int blocks) { sha256_block_data_order(sst->state, src, blocks); } @@ -36,8 +36,8 @@ static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src, asmlinkage void sha256_block_neon(u32 *digest, const void *data, unsigned int num_blks); -static void __sha256_block_neon(struct sha256_state *sst, u8 const *src, - int blocks) +static void sha256_neon_transform(struct sha256_state *sst, u8 const *src, + int blocks) { sha256_block_neon(sst->state, src, blocks); } @@ -45,17 +45,15 @@ static void __sha256_block_neon(struct sha256_state *sst, u8 const *src, static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - return sha256_base_do_update(desc, data, len, - __sha256_block_data_order); + return sha256_base_do_update(desc, data, len, sha256_arm64_transform); } static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { if (len) - sha256_base_do_update(desc, data, len, - __sha256_block_data_order); - sha256_base_do_finalize(desc, __sha256_block_data_order); + sha256_base_do_update(desc, data, len, sha256_arm64_transform); + sha256_base_do_finalize(desc, sha256_arm64_transform); return sha256_base_finish(desc, out); } @@ -98,7 +96,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data, if (!crypto_simd_usable()) return sha256_base_do_update(desc, data, len, - __sha256_block_data_order); + sha256_arm64_transform); while (len > 0) { unsigned int chunk = len; @@ -114,7 +112,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data, sctx->count % SHA256_BLOCK_SIZE; kernel_neon_begin(); - sha256_base_do_update(desc, data, chunk, __sha256_block_neon); + sha256_base_do_update(desc, data, chunk, sha256_neon_transform); kernel_neon_end(); data += chunk; len -= chunk; @@ -128,13 +126,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data, if (!crypto_simd_usable()) { if (len) sha256_base_do_update(desc, data, len, - __sha256_block_data_order); - sha256_base_do_finalize(desc, __sha256_block_data_order); + sha256_arm64_transform); + sha256_base_do_finalize(desc, sha256_arm64_transform); } else { if (len) sha256_update_neon(desc, data, len); kernel_neon_begin(); - sha256_base_do_finalize(desc, __sha256_block_neon); + sha256_base_do_finalize(desc, sha256_neon_transform); kernel_neon_end(); } return sha256_base_finish(desc, out); diff --git a/arch/arm64/crypto/sha512-ce-core.S b/arch/arm64/crypto/sha512-ce-core.S index b6a3a36e15f5..91ef68b15fcc 100644 --- a/arch/arm64/crypto/sha512-ce-core.S +++ b/arch/arm64/crypto/sha512-ce-core.S @@ -102,11 +102,11 @@ .endm /* - * void sha512_ce_transform(struct sha512_state *sst, u8 const *src, - * int blocks) + * int __sha512_ce_transform(struct sha512_state *sst, u8 const *src, + * int blocks) */ .text -SYM_FUNC_START(sha512_ce_transform) +SYM_FUNC_START(__sha512_ce_transform) /* load state */ ld1 {v8.2d-v11.2d}, [x0] @@ -203,4 +203,4 @@ CPU_LE( rev64 v19.16b, v19.16b ) 3: st1 {v8.2d-v11.2d}, [x0] mov w0, w2 ret -SYM_FUNC_END(sha512_ce_transform) +SYM_FUNC_END(__sha512_ce_transform) diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c index 94cb7580deb7..f3431fc62315 100644 --- a/arch/arm64/crypto/sha512-ce-glue.c +++ b/arch/arm64/crypto/sha512-ce-glue.c @@ -26,27 +26,27 @@ MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("sha384"); MODULE_ALIAS_CRYPTO("sha512"); -asmlinkage int sha512_ce_transform(struct sha512_state *sst, u8 const *src, - int blocks); +asmlinkage int __sha512_ce_transform(struct sha512_state *sst, u8 const *src, + int blocks); asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks); -static void __sha512_ce_transform(struct sha512_state *sst, u8 const *src, - int blocks) +static void sha512_ce_transform(struct sha512_state *sst, u8 const *src, + int blocks) { while (blocks) { int rem; kernel_neon_begin(); - rem = sha512_ce_transform(sst, src, blocks); + rem = __sha512_ce_transform(sst, src, blocks); kernel_neon_end(); src += (blocks - rem) * SHA512_BLOCK_SIZE; blocks = rem; } } -static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src, - int blocks) +static void sha512_arm64_transform(struct sha512_state *sst, u8 const *src, + int blocks) { sha512_block_data_order(sst->state, src, blocks); } @@ -54,8 +54,8 @@ static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src, static int sha512_ce_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - sha512_block_fn *fn = crypto_simd_usable() ? __sha512_ce_transform - : __sha512_block_data_order; + sha512_block_fn *fn = crypto_simd_usable() ? sha512_ce_transform + : sha512_arm64_transform; sha512_base_do_update(desc, data, len, fn); return 0; @@ -64,8 +64,8 @@ static int sha512_ce_update(struct shash_desc *desc, const u8 *data, static int sha512_ce_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - sha512_block_fn *fn = crypto_simd_usable() ? __sha512_ce_transform - : __sha512_block_data_order; + sha512_block_fn *fn = crypto_simd_usable() ? sha512_ce_transform + : sha512_arm64_transform; sha512_base_do_update(desc, data, len, fn); sha512_base_do_finalize(desc, fn); @@ -74,8 +74,8 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data, static int sha512_ce_final(struct shash_desc *desc, u8 *out) { - sha512_block_fn *fn = crypto_simd_usable() ? __sha512_ce_transform - : __sha512_block_data_order; + sha512_block_fn *fn = crypto_simd_usable() ? sha512_ce_transform + : sha512_arm64_transform; sha512_base_do_finalize(desc, fn); return sha512_base_finish(desc, out); diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c index 2acff1c7df5d..62f129dea83d 100644 --- a/arch/arm64/crypto/sha512-glue.c +++ b/arch/arm64/crypto/sha512-glue.c @@ -23,8 +23,8 @@ asmlinkage void sha512_block_data_order(u64 *digest, const void *data, unsigned int num_blks); EXPORT_SYMBOL(sha512_block_data_order); -static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src, - int blocks) +static void sha512_arm64_transform(struct sha512_state *sst, u8 const *src, + int blocks) { sha512_block_data_order(sst->state, src, blocks); } @@ -32,17 +32,15 @@ static void __sha512_block_data_order(struct sha512_state *sst, u8 const *src, static int sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - return sha512_base_do_update(desc, data, len, - __sha512_block_data_order); + return sha512_base_do_update(desc, data, len, sha512_arm64_transform); } static int sha512_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { if (len) - sha512_base_do_update(desc, data, len, - __sha512_block_data_order); - sha512_base_do_finalize(desc, __sha512_block_data_order); + sha512_base_do_update(desc, data, len, sha512_arm64_transform); + sha512_base_do_finalize(desc, sha512_arm64_transform); return sha512_base_finish(desc, out); } diff --git a/arch/loongarch/crypto/crc32-loongarch.c b/arch/loongarch/crypto/crc32-loongarch.c index 1f2a2c3839bc..a49e507af38c 100644 --- a/arch/loongarch/crypto/crc32-loongarch.c +++ b/arch/loongarch/crypto/crc32-loongarch.c @@ -239,7 +239,6 @@ static struct shash_alg crc32_alg = { .cra_priority = 300, .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_alignmask = 0, .cra_ctxsize = sizeof(struct chksum_ctx), .cra_module = THIS_MODULE, .cra_init = chksum_cra_init, @@ -261,7 +260,6 @@ static struct shash_alg crc32c_alg = { .cra_priority = 300, .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_alignmask = 0, .cra_ctxsize = sizeof(struct chksum_ctx), .cra_module = THIS_MODULE, .cra_init = chksumc_cra_init, diff --git a/arch/mips/crypto/crc32-mips.c b/arch/mips/crypto/crc32-mips.c index 3e4f5ba104f8..ec6d58008f8e 100644 --- a/arch/mips/crypto/crc32-mips.c +++ b/arch/mips/crypto/crc32-mips.c @@ -290,7 +290,6 @@ static struct shash_alg crc32_alg = { .cra_priority = 300, .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_alignmask = 0, .cra_ctxsize = sizeof(struct chksum_ctx), .cra_module = THIS_MODULE, .cra_init = chksum_cra_init, @@ -312,7 +311,6 @@ static struct shash_alg crc32c_alg = { .cra_priority = 300, .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_alignmask = 0, .cra_ctxsize = sizeof(struct chksum_ctx), .cra_module = THIS_MODULE, .cra_init = chksum_cra_init, diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c index 82efb7f81c28..688db0dcb97d 100644 --- a/arch/sparc/crypto/crc32c_glue.c +++ b/arch/sparc/crypto/crc32c_glue.c @@ -20,6 +20,7 @@ #include <asm/pstate.h> #include <asm/elf.h> +#include <asm/unaligned.h> #include "opcodes.h" @@ -35,7 +36,7 @@ static int crc32c_sparc64_setkey(struct crypto_shash *hash, const u8 *key, if (keylen != sizeof(u32)) return -EINVAL; - *mctx = le32_to_cpup((__le32 *)key); + *mctx = get_unaligned_le32(key); return 0; } @@ -51,18 +52,26 @@ static int crc32c_sparc64_init(struct shash_desc *desc) extern void crc32c_sparc64(u32 *crcp, const u64 *data, unsigned int len); -static void crc32c_compute(u32 *crcp, const u64 *data, unsigned int len) +static u32 crc32c_compute(u32 crc, const u8 *data, unsigned int len) { - unsigned int asm_len; - - asm_len = len & ~7U; - if (asm_len) { - crc32c_sparc64(crcp, data, asm_len); - data += asm_len / 8; - len -= asm_len; + unsigned int n = -(uintptr_t)data & 7; + + if (n) { + /* Data isn't 8-byte aligned. Align it. */ + n = min(n, len); + crc = __crc32c_le(crc, data, n); + data += n; + len -= n; + } + n = len & ~7U; + if (n) { + crc32c_sparc64(&crc, (const u64 *)data, n); + data += n; + len -= n; } if (len) - *crcp = __crc32c_le(*crcp, (const unsigned char *) data, len); + crc = __crc32c_le(crc, data, len); + return crc; } static int crc32c_sparc64_update(struct shash_desc *desc, const u8 *data, @@ -70,19 +79,14 @@ static int crc32c_sparc64_update(struct shash_desc *desc, const u8 *data, { u32 *crcp = shash_desc_ctx(desc); - crc32c_compute(crcp, (const u64 *) data, len); - + *crcp = crc32c_compute(*crcp, data, len); return 0; } -static int __crc32c_sparc64_finup(u32 *crcp, const u8 *data, unsigned int len, - u8 *out) +static int __crc32c_sparc64_finup(const u32 *crcp, const u8 *data, + unsigned int len, u8 *out) { - u32 tmp = *crcp; - - crc32c_compute(&tmp, (const u64 *) data, len); - - *(__le32 *) out = ~cpu_to_le32(tmp); + put_unaligned_le32(~crc32c_compute(*crcp, data, len), out); return 0; } @@ -96,7 +100,7 @@ static int crc32c_sparc64_final(struct shash_desc *desc, u8 *out) { u32 *crcp = shash_desc_ctx(desc); - *(__le32 *) out = ~cpu_to_le32p(crcp); + put_unaligned_le32(~*crcp, out); return 0; } @@ -135,7 +139,6 @@ static struct shash_alg alg = { .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), - .cra_alignmask = 7, .cra_module = THIS_MODULE, .cra_init = crc32c_sparc64_cra_init, } diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 3ac7487ecad2..187f913cc239 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -672,7 +672,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff add %r13, %r10 # Set r10 to be the amount of data left in CYPH_PLAIN_IN after filling sub $16, %r10 - # Determine if if partial block is not being filled and + # Determine if partial block is not being filled and # shift mask accordingly jge .L_no_extra_mask_1_\@ sub %r10, %r12 @@ -708,7 +708,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff add %r13, %r10 # Set r10 to be the amount of data left in CYPH_PLAIN_IN after filling sub $16, %r10 - # Determine if if partial block is not being filled and + # Determine if partial block is not being filled and # shift mask accordingly jge .L_no_extra_mask_2_\@ sub %r10, %r12 diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S index 46cddd78857b..74dd230973cf 100644 --- a/arch/x86/crypto/aesni-intel_avx-x86_64.S +++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S @@ -753,7 +753,7 @@ VARIABLE_OFFSET = 16*8 add %r13, %r10 # Set r10 to be the amount of data left in CYPH_PLAIN_IN after filling sub $16, %r10 - # Determine if if partial block is not being filled and + # Determine if partial block is not being filled and # shift mask accordingly jge .L_no_extra_mask_1_\@ sub %r10, %r12 @@ -789,7 +789,7 @@ VARIABLE_OFFSET = 16*8 add %r13, %r10 # Set r10 to be the amount of data left in CYPH_PLAIN_IN after filling sub $16, %r10 - # Determine if if partial block is not being filled and + # Determine if partial block is not being filled and # shift mask accordingly jge .L_no_extra_mask_2_\@ sub %r10, %r12 diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 39d6a62ac627..b1d90c25975a 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -61,8 +61,8 @@ struct generic_gcmaes_ctx { }; struct aesni_xts_ctx { - u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR; - u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR; + struct crypto_aes_ctx tweak_ctx AESNI_ALIGN_ATTR; + struct crypto_aes_ctx crypt_ctx AESNI_ALIGN_ATTR; }; #define GCM_BLOCK_LEN 16 @@ -80,6 +80,13 @@ struct gcm_context_data { u8 hash_keys[GCM_BLOCK_LEN * 16]; }; +static inline void *aes_align_addr(void *addr) +{ + if (crypto_tfm_ctx_alignment() >= AESNI_ALIGN) + return addr; + return PTR_ALIGN(addr, AESNI_ALIGN); +} + asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len); asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in); @@ -201,32 +208,24 @@ static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx2); static inline struct aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm) { - unsigned long align = AESNI_ALIGN; - - if (align <= crypto_tfm_ctx_alignment()) - align = 1; - return PTR_ALIGN(crypto_aead_ctx(tfm), align); + return aes_align_addr(crypto_aead_ctx(tfm)); } static inline struct generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm) { - unsigned long align = AESNI_ALIGN; - - if (align <= crypto_tfm_ctx_alignment()) - align = 1; - return PTR_ALIGN(crypto_aead_ctx(tfm), align); + return aes_align_addr(crypto_aead_ctx(tfm)); } #endif static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) { - unsigned long addr = (unsigned long)raw_ctx; - unsigned long align = AESNI_ALIGN; + return aes_align_addr(raw_ctx); +} - if (align <= crypto_tfm_ctx_alignment()) - align = 1; - return (struct crypto_aes_ctx *)ALIGN(addr, align); +static inline struct aesni_xts_ctx *aes_xts_ctx(struct crypto_skcipher *tfm) +{ + return aes_align_addr(crypto_skcipher_ctx(tfm)); } static int aes_set_key_common(struct crypto_aes_ctx *ctx, @@ -881,7 +880,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req) static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { - struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); + struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm); int err; err = xts_verify_key(tfm, key, keylen); @@ -891,19 +890,18 @@ static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key, keylen /= 2; /* first half of xts-key is for crypt */ - err = aes_set_key_common(aes_ctx(ctx->raw_crypt_ctx), key, keylen); + err = aes_set_key_common(&ctx->crypt_ctx, key, keylen); if (err) return err; /* second half of xts-key is for tweak */ - return aes_set_key_common(aes_ctx(ctx->raw_tweak_ctx), key + keylen, - keylen); + return aes_set_key_common(&ctx->tweak_ctx, key + keylen, keylen); } static int xts_crypt(struct skcipher_request *req, bool encrypt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); + struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm); int tail = req->cryptlen % AES_BLOCK_SIZE; struct skcipher_request subreq; struct skcipher_walk walk; @@ -939,7 +937,7 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt) kernel_fpu_begin(); /* calculate first value of T */ - aesni_enc(aes_ctx(ctx->raw_tweak_ctx), walk.iv, walk.iv); + aesni_enc(&ctx->tweak_ctx, walk.iv, walk.iv); while (walk.nbytes > 0) { int nbytes = walk.nbytes; @@ -948,11 +946,11 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt) nbytes &= ~(AES_BLOCK_SIZE - 1); if (encrypt) - aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx), + aesni_xts_encrypt(&ctx->crypt_ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes, walk.iv); else - aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx), + aesni_xts_decrypt(&ctx->crypt_ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes, walk.iv); kernel_fpu_end(); @@ -980,11 +978,11 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt) kernel_fpu_begin(); if (encrypt) - aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx), + aesni_xts_encrypt(&ctx->crypt_ctx, walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes, walk.iv); else - aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx), + aesni_xts_decrypt(&ctx->crypt_ctx, walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes, walk.iv); kernel_fpu_end(); diff --git a/arch/x86/crypto/nhpoly1305-avx2-glue.c b/arch/x86/crypto/nhpoly1305-avx2-glue.c index 46b036204ed9..c3a872f4d6a7 100644 --- a/arch/x86/crypto/nhpoly1305-avx2-glue.c +++ b/arch/x86/crypto/nhpoly1305-avx2-glue.c @@ -34,6 +34,14 @@ static int nhpoly1305_avx2_update(struct shash_desc *desc, return 0; } +static int nhpoly1305_avx2_digest(struct shash_desc *desc, + const u8 *src, unsigned int srclen, u8 *out) +{ + return crypto_nhpoly1305_init(desc) ?: + nhpoly1305_avx2_update(desc, src, srclen) ?: + crypto_nhpoly1305_final(desc, out); +} + static struct shash_alg nhpoly1305_alg = { .base.cra_name = "nhpoly1305", .base.cra_driver_name = "nhpoly1305-avx2", @@ -44,6 +52,7 @@ static struct shash_alg nhpoly1305_alg = { .init = crypto_nhpoly1305_init, .update = nhpoly1305_avx2_update, .final = crypto_nhpoly1305_final, + .digest = nhpoly1305_avx2_digest, .setkey = crypto_nhpoly1305_setkey, .descsize = sizeof(struct nhpoly1305_state), }; diff --git a/arch/x86/crypto/nhpoly1305-sse2-glue.c b/arch/x86/crypto/nhpoly1305-sse2-glue.c index 4a4970d75107..a268a8439a5c 100644 --- a/arch/x86/crypto/nhpoly1305-sse2-glue.c +++ b/arch/x86/crypto/nhpoly1305-sse2-glue.c @@ -34,6 +34,14 @@ static int nhpoly1305_sse2_update(struct shash_desc *desc, return 0; } +static int nhpoly1305_sse2_digest(struct shash_desc *desc, + const u8 *src, unsigned int srclen, u8 *out) +{ + return crypto_nhpoly1305_init(desc) ?: + nhpoly1305_sse2_update(desc, src, srclen) ?: + crypto_nhpoly1305_final(desc, out); +} + static struct shash_alg nhpoly1305_alg = { .base.cra_name = "nhpoly1305", .base.cra_driver_name = "nhpoly1305-sse2", @@ -44,6 +52,7 @@ static struct shash_alg nhpoly1305_alg = { .init = crypto_nhpoly1305_init, .update = nhpoly1305_sse2_update, .final = crypto_nhpoly1305_final, + .digest = nhpoly1305_sse2_digest, .setkey = crypto_nhpoly1305_setkey, .descsize = sizeof(struct nhpoly1305_state), }; diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c index 44340a1139e0..959afa705e95 100644 --- a/arch/x86/crypto/sha1_ssse3_glue.c +++ b/arch/x86/crypto/sha1_ssse3_glue.c @@ -24,8 +24,17 @@ #include <linux/types.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> +#include <asm/cpu_device_id.h> #include <asm/simd.h> +static const struct x86_cpu_id module_cpu_ids[] = { + X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL), + X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL), + X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids); + static int sha1_update(struct shash_desc *desc, const u8 *data, unsigned int len, sha1_block_fn *sha1_xform) { @@ -301,6 +310,9 @@ static inline void unregister_sha1_ni(void) { } static int __init sha1_ssse3_mod_init(void) { + if (!x86_match_cpu(module_cpu_ids)) + return -ENODEV; + if (register_sha1_ssse3()) goto fail; diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c index 3a5f6be7dbba..4c0383a90e11 100644 --- a/arch/x86/crypto/sha256_ssse3_glue.c +++ b/arch/x86/crypto/sha256_ssse3_glue.c @@ -38,11 +38,20 @@ #include <crypto/sha2.h> #include <crypto/sha256_base.h> #include <linux/string.h> +#include <asm/cpu_device_id.h> #include <asm/simd.h> asmlinkage void sha256_transform_ssse3(struct sha256_state *state, const u8 *data, int blocks); +static const struct x86_cpu_id module_cpu_ids[] = { + X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL), + X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL), + X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids); + static int _sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len, sha256_block_fn *sha256_xform) { @@ -98,12 +107,20 @@ static int sha256_ssse3_final(struct shash_desc *desc, u8 *out) return sha256_ssse3_finup(desc, NULL, 0, out); } +static int sha256_ssse3_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return sha256_base_init(desc) ?: + sha256_ssse3_finup(desc, data, len, out); +} + static struct shash_alg sha256_ssse3_algs[] = { { .digestsize = SHA256_DIGEST_SIZE, .init = sha256_base_init, .update = sha256_ssse3_update, .final = sha256_ssse3_final, .finup = sha256_ssse3_finup, + .digest = sha256_ssse3_digest, .descsize = sizeof(struct sha256_state), .base = { .cra_name = "sha256", @@ -163,12 +180,20 @@ static int sha256_avx_final(struct shash_desc *desc, u8 *out) return sha256_avx_finup(desc, NULL, 0, out); } +static int sha256_avx_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return sha256_base_init(desc) ?: + sha256_avx_finup(desc, data, len, out); +} + static struct shash_alg sha256_avx_algs[] = { { .digestsize = SHA256_DIGEST_SIZE, .init = sha256_base_init, .update = sha256_avx_update, .final = sha256_avx_final, .finup = sha256_avx_finup, + .digest = sha256_avx_digest, .descsize = sizeof(struct sha256_state), .base = { .cra_name = "sha256", @@ -239,12 +264,20 @@ static int sha256_avx2_final(struct shash_desc *desc, u8 *out) return sha256_avx2_finup(desc, NULL, 0, out); } +static int sha256_avx2_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return sha256_base_init(desc) ?: + sha256_avx2_finup(desc, data, len, out); +} + static struct shash_alg sha256_avx2_algs[] = { { .digestsize = SHA256_DIGEST_SIZE, .init = sha256_base_init, .update = sha256_avx2_update, .final = sha256_avx2_final, .finup = sha256_avx2_finup, + .digest = sha256_avx2_digest, .descsize = sizeof(struct sha256_state), .base = { .cra_name = "sha256", @@ -314,12 +347,20 @@ static int sha256_ni_final(struct shash_desc *desc, u8 *out) return sha256_ni_finup(desc, NULL, 0, out); } +static int sha256_ni_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return sha256_base_init(desc) ?: + sha256_ni_finup(desc, data, len, out); +} + static struct shash_alg sha256_ni_algs[] = { { .digestsize = SHA256_DIGEST_SIZE, .init = sha256_base_init, .update = sha256_ni_update, .final = sha256_ni_final, .finup = sha256_ni_finup, + .digest = sha256_ni_digest, .descsize = sizeof(struct sha256_state), .base = { .cra_name = "sha256", @@ -366,6 +407,9 @@ static inline void unregister_sha256_ni(void) { } static int __init sha256_ssse3_mod_init(void) { + if (!x86_match_cpu(module_cpu_ids)) + return -ENODEV; + if (register_sha256_ssse3()) goto fail; |