summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/842.c76
-rw-r--r--crypto/Kconfig129
-rw-r--r--crypto/Makefile31
-rw-r--r--crypto/acompress.c430
-rw-r--r--crypto/adiantum.c2
-rw-r--r--crypto/aead.c6
-rw-r--r--crypto/aegis128-core.c11
-rw-r--r--crypto/aes_generic.c2
-rw-r--r--crypto/ahash.c590
-rw-r--r--crypto/akcipher.c1
-rw-r--r--crypto/algapi.c85
-rw-r--r--crypto/algboss.c10
-rw-r--r--crypto/algif_aead.c101
-rw-r--r--crypto/algif_hash.c4
-rw-r--r--crypto/ansi_cprng.c2
-rw-r--r--crypto/anubis.c2
-rw-r--r--crypto/api.c66
-rw-r--r--crypto/arc4.c2
-rw-r--r--crypto/aria_generic.c2
-rw-r--r--crypto/asymmetric_keys/public_key.c45
-rw-r--r--crypto/asymmetric_keys/verify_pefile.c8
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c3
-rw-r--r--crypto/async_tx/async_xor.c26
-rw-r--r--crypto/authenc.c34
-rw-r--r--crypto/authencesn.c40
-rw-r--r--crypto/blake2b_generic.c33
-rw-r--r--crypto/blowfish_generic.c2
-rw-r--r--crypto/bpf_crypto_skcipher.c1
-rw-r--r--crypto/camellia_generic.c2
-rw-r--r--crypto/cast5_generic.c2
-rw-r--r--crypto/cast6_generic.c2
-rw-r--r--crypto/cbc.c2
-rw-r--r--crypto/ccm.c65
-rw-r--r--crypto/chacha.c260
-rw-r--r--crypto/chacha20poly1305.c321
-rw-r--r--crypto/chacha_generic.c139
-rw-r--r--crypto/cmac.c94
-rw-r--r--crypto/compress.c32
-rw-r--r--crypto/compress.h2
-rw-r--r--crypto/crc32.c (renamed from crypto/crc32_generic.c)2
-rw-r--r--crypto/crc32c.c (renamed from crypto/crc32c_generic.c)10
-rw-r--r--crypto/crc64_rocksoft_generic.c89
-rw-r--r--crypto/crct10dif_generic.c168
-rw-r--r--crypto/cryptd.c2
-rw-r--r--crypto/crypto_engine.c31
-rw-r--r--crypto/crypto_null.c90
-rw-r--r--crypto/crypto_user.c16
-rw-r--r--crypto/ctr.c12
-rw-r--r--crypto/cts.c2
-rw-r--r--crypto/curve25519-generic.c2
-rw-r--r--crypto/deflate.c395
-rw-r--r--crypto/des_generic.c2
-rw-r--r--crypto/dh.c2
-rw-r--r--crypto/drbg.c2
-rw-r--r--crypto/ecb.c2
-rw-r--r--crypto/ecc.c2
-rw-r--r--crypto/ecdh.c2
-rw-r--r--crypto/ecdsa-p1363.c8
-rw-r--r--crypto/ecdsa-x962.c7
-rw-r--r--crypto/ecdsa.c4
-rw-r--r--crypto/echainiv.c20
-rw-r--r--crypto/ecrdsa.c2
-rw-r--r--crypto/essiv.c8
-rw-r--r--crypto/fcrypt.c2
-rw-r--r--crypto/fips.c2
-rw-r--r--crypto/gcm.c43
-rw-r--r--crypto/geniv.c13
-rw-r--r--crypto/ghash-generic.c58
-rw-r--r--crypto/hctr2.c2
-rw-r--r--crypto/hkdf.c573
-rw-r--r--crypto/hmac.c398
-rw-r--r--crypto/internal.h27
-rw-r--r--crypto/kdf_sp800108.c2
-rw-r--r--crypto/khazad.c2
-rw-r--r--crypto/kpp.c1
-rw-r--r--crypto/krb5/Kconfig26
-rw-r--r--crypto/krb5/Makefile18
-rw-r--r--crypto/krb5/internal.h247
-rw-r--r--crypto/krb5/krb5_api.c452
-rw-r--r--crypto/krb5/krb5_kdf.c145
-rw-r--r--crypto/krb5/rfc3961_simplified.c793
-rw-r--r--crypto/krb5/rfc3962_aes.c115
-rw-r--r--crypto/krb5/rfc6803_camellia.c237
-rw-r--r--crypto/krb5/rfc8009_aes2.c362
-rw-r--r--crypto/krb5/selftest.c544
-rw-r--r--crypto/krb5/selftest_data.c291
-rw-r--r--crypto/krb5enc.c504
-rw-r--r--crypto/lrw.c8
-rw-r--r--crypto/lskcipher.c1
-rw-r--r--crypto/lz4.c71
-rw-r--r--crypto/lz4hc.c76
-rw-r--r--crypto/lzo-rle.c80
-rw-r--r--crypto/lzo.c80
-rw-r--r--crypto/md4.c2
-rw-r--r--crypto/md5.c104
-rw-r--r--crypto/michael_mic.c2
-rw-r--r--crypto/nhpoly1305.c2
-rw-r--r--crypto/pcbc.c30
-rw-r--r--crypto/pcrypt.c2
-rw-r--r--crypto/poly1305_generic.c149
-rw-r--r--crypto/polyval-generic.c118
-rw-r--r--crypto/proc.c3
-rw-r--r--crypto/rmd160.c90
-rw-r--r--crypto/rng.c1
-rw-r--r--crypto/rsa.c2
-rw-r--r--crypto/rsassa-pkcs1.c4
-rw-r--r--crypto/scatterwalk.c354
-rw-r--r--crypto/scompress.c295
-rw-r--r--crypto/seed.c2
-rw-r--r--crypto/seqiv.c19
-rw-r--r--crypto/serpent_generic.c2
-rw-r--r--crypto/sha1_generic.c35
-rw-r--r--crypto/sha256.c283
-rw-r--r--crypto/sha256_generic.c110
-rw-r--r--crypto/sha3_generic.c101
-rw-r--r--crypto/sha512_generic.c52
-rw-r--r--crypto/shash.c277
-rw-r--r--crypto/sig.c10
-rw-r--r--crypto/skcipher.c353
-rw-r--r--crypto/sm3.c246
-rw-r--r--crypto/sm3_generic.c33
-rw-r--r--crypto/sm4_generic.c2
-rw-r--r--crypto/streebog_generic.c73
-rw-r--r--crypto/tcrypt.c16
-rw-r--r--crypto/tcrypt.h4
-rw-r--r--crypto/tea.c2
-rw-r--r--crypto/testmgr.c388
-rw-r--r--crypto/testmgr.h942
-rw-r--r--crypto/twofish_generic.c2
-rw-r--r--crypto/wp512.c123
-rw-r--r--crypto/xcbc.c94
-rw-r--r--crypto/xctr.c4
-rw-r--r--crypto/xts.c8
-rw-r--r--crypto/xxhash_generic.c2
-rw-r--r--crypto/zstd.c62
135 files changed, 8341 insertions, 4780 deletions
diff --git a/crypto/842.c b/crypto/842.c
index e59e54d76960..8c257c40e2b9 100644
--- a/crypto/842.c
+++ b/crypto/842.c
@@ -18,17 +18,12 @@
* drivers/crypto/nx/nx-842-crypto.c
*/
+#include <crypto/internal/scompress.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/crypto.h>
#include <linux/sw842.h>
-#include <crypto/internal/scompress.h>
-
-struct crypto842_ctx {
- void *wmem; /* working memory for compress */
-};
-static void *crypto842_alloc_ctx(struct crypto_scomp *tfm)
+static void *crypto842_alloc_ctx(void)
{
void *ctx;
@@ -39,38 +34,11 @@ static void *crypto842_alloc_ctx(struct crypto_scomp *tfm)
return ctx;
}
-static int crypto842_init(struct crypto_tfm *tfm)
-{
- struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
-
- ctx->wmem = crypto842_alloc_ctx(NULL);
- if (IS_ERR(ctx->wmem))
- return -ENOMEM;
-
- return 0;
-}
-
-static void crypto842_free_ctx(struct crypto_scomp *tfm, void *ctx)
+static void crypto842_free_ctx(void *ctx)
{
kfree(ctx);
}
-static void crypto842_exit(struct crypto_tfm *tfm)
-{
- struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto842_free_ctx(NULL, ctx->wmem);
-}
-
-static int crypto842_compress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return sw842_compress(src, slen, dst, dlen, ctx->wmem);
-}
-
static int crypto842_scompress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
@@ -78,13 +46,6 @@ static int crypto842_scompress(struct crypto_scomp *tfm,
return sw842_compress(src, slen, dst, dlen, ctx);
}
-static int crypto842_decompress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- return sw842_decompress(src, slen, dst, dlen);
-}
-
static int crypto842_sdecompress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
@@ -92,20 +53,6 @@ static int crypto842_sdecompress(struct crypto_scomp *tfm,
return sw842_decompress(src, slen, dst, dlen);
}
-static struct crypto_alg alg = {
- .cra_name = "842",
- .cra_driver_name = "842-generic",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct crypto842_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = crypto842_init,
- .cra_exit = crypto842_exit,
- .cra_u = { .compress = {
- .coa_compress = crypto842_compress,
- .coa_decompress = crypto842_decompress } }
-};
-
static struct scomp_alg scomp = {
.alloc_ctx = crypto842_alloc_ctx,
.free_ctx = crypto842_free_ctx,
@@ -121,25 +68,12 @@ static struct scomp_alg scomp = {
static int __init crypto842_mod_init(void)
{
- int ret;
-
- ret = crypto_register_alg(&alg);
- if (ret)
- return ret;
-
- ret = crypto_register_scomp(&scomp);
- if (ret) {
- crypto_unregister_alg(&alg);
- return ret;
- }
-
- return ret;
+ return crypto_register_scomp(&scomp);
}
-subsys_initcall(crypto842_mod_init);
+module_init(crypto842_mod_init);
static void __exit crypto842_mod_exit(void)
{
- crypto_unregister_alg(&alg);
crypto_unregister_scomp(&scomp);
}
module_exit(crypto842_mod_exit);
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 74ae5f52b784..e1cfd0d4cc8f 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -25,7 +25,7 @@ menu "Crypto core or helper"
config CRYPTO_FIPS
bool "FIPS 200 compliance"
- depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS
+ depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && CRYPTO_SELFTESTS
depends on (MODULE_SIG || !MODULES)
help
This option enables the fips boot option which is
@@ -141,12 +141,19 @@ config CRYPTO_ACOMP
select CRYPTO_ALGAPI
select CRYPTO_ACOMP2
+config CRYPTO_HKDF
+ tristate
+ select CRYPTO_SHA256 if CRYPTO_SELFTESTS
+ select CRYPTO_SHA512 if CRYPTO_SELFTESTS
+ select CRYPTO_HASH2
+
config CRYPTO_MANAGER
- tristate "Cryptographic algorithm manager"
+ tristate
+ default CRYPTO_ALGAPI if CRYPTO_SELFTESTS
select CRYPTO_MANAGER2
help
- Create default cryptographic template instantiations such as
- cbc(aes).
+ This provides the support for instantiating templates such as
+ cbc(aes), and the support for the crypto self-tests.
config CRYPTO_MANAGER2
def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y)
@@ -167,35 +174,44 @@ config CRYPTO_USER
Userspace configuration for cryptographic instantiations such as
cbc(aes).
-config CRYPTO_MANAGER_DISABLE_TESTS
- bool "Disable run-time self tests"
- default y
+config CRYPTO_SELFTESTS
+ bool "Enable cryptographic self-tests"
+ depends on EXPERT
help
- Disable run-time self tests that normally take place at
- algorithm registration.
+ Enable the cryptographic self-tests.
-config CRYPTO_MANAGER_EXTRA_TESTS
- bool "Enable extra run-time crypto self tests"
- depends on DEBUG_KERNEL && !CRYPTO_MANAGER_DISABLE_TESTS && CRYPTO_MANAGER
+ The cryptographic self-tests run at boot time, or at algorithm
+ registration time if algorithms are dynamically loaded later.
+
+ There are two main use cases for these tests:
+
+ - Development and pre-release testing. In this case, also enable
+ CRYPTO_SELFTESTS_FULL to get the full set of tests. All crypto code
+ in the kernel is expected to pass the full set of tests.
+
+ - Production kernels, to help prevent buggy drivers from being used
+ and/or meet FIPS 140-3 pre-operational testing requirements. In
+ this case, enable CRYPTO_SELFTESTS but not CRYPTO_SELFTESTS_FULL.
+
+config CRYPTO_SELFTESTS_FULL
+ bool "Enable the full set of cryptographic self-tests"
+ depends on CRYPTO_SELFTESTS
help
- Enable extra run-time self tests of registered crypto algorithms,
- including randomized fuzz tests.
+ Enable the full set of cryptographic self-tests for each algorithm.
+
+ The full set of tests should be enabled for development and
+ pre-release testing, but not in production kernels.
- This is intended for developer use only, as these tests take much
- longer to run than the normal self tests.
+ All crypto code in the kernel is expected to pass the full tests.
config CRYPTO_NULL
tristate "Null algorithms"
- select CRYPTO_NULL2
+ select CRYPTO_ALGAPI
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH
help
These are 'Null' algorithms, used by IPsec, which do nothing.
-config CRYPTO_NULL2
- tristate
- select CRYPTO_ALGAPI2
- select CRYPTO_SKCIPHER2
- select CRYPTO_HASH2
-
config CRYPTO_PCRYPT
tristate "Parallel crypto engine"
depends on SMP
@@ -222,18 +238,32 @@ config CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
select CRYPTO_HASH
- select CRYPTO_NULL
help
Authenc: Combined mode wrapper for IPsec.
This is required for IPSec ESP (XFRM_ESP).
-config CRYPTO_TEST
- tristate "Testing module"
+config CRYPTO_KRB5ENC
+ tristate "Kerberos 5 combined hash+cipher support"
+ select CRYPTO_AEAD
+ select CRYPTO_SKCIPHER
+ select CRYPTO_MANAGER
+ select CRYPTO_HASH
+ help
+ Combined hash and cipher support for Kerberos 5 RFC3961 simplified
+ profile. This is required for Kerberos 5-style encryption, used by
+ sunrpc/NFS and rxrpc/AFS.
+
+config CRYPTO_BENCHMARK
+ tristate "Crypto benchmarking module"
depends on m || EXPERT
select CRYPTO_MANAGER
help
- Quick & dirty crypto test module.
+ Quick & dirty crypto benchmarking module.
+
+ This is mainly intended for use by people developing cryptographic
+ algorithms in the kernel. It should not be enabled in production
+ kernels.
config CRYPTO_SIMD
tristate
@@ -318,6 +348,7 @@ config CRYPTO_CURVE25519
tristate "Curve25519"
select CRYPTO_KPP
select CRYPTO_LIB_CURVE25519_GENERIC
+ select CRYPTO_LIB_CURVE25519_INTERNAL
help
Curve25519 elliptic curve (RFC7748)
@@ -615,6 +646,7 @@ config CRYPTO_ARC4
config CRYPTO_CHACHA20
tristate "ChaCha"
+ select CRYPTO_LIB_CHACHA
select CRYPTO_LIB_CHACHA_GENERIC
select CRYPTO_SKCIPHER
help
@@ -764,8 +796,8 @@ config CRYPTO_AEGIS128_SIMD
config CRYPTO_CHACHA20POLY1305
tristate "ChaCha20-Poly1305"
select CRYPTO_CHACHA20
- select CRYPTO_POLY1305
select CRYPTO_AEAD
+ select CRYPTO_LIB_POLY1305
select CRYPTO_MANAGER
help
ChaCha20 stream cipher and Poly1305 authenticator combined
@@ -786,7 +818,6 @@ config CRYPTO_GCM
select CRYPTO_CTR
select CRYPTO_AEAD
select CRYPTO_GHASH
- select CRYPTO_NULL
select CRYPTO_MANAGER
help
GCM (Galois/Counter Mode) authenticated encryption mode and GMAC
@@ -797,7 +828,6 @@ config CRYPTO_GCM
config CRYPTO_GENIV
tristate
select CRYPTO_AEAD
- select CRYPTO_NULL
select CRYPTO_MANAGER
select CRYPTO_RNG_DEFAULT
@@ -933,17 +963,6 @@ config CRYPTO_POLYVAL
This is used in HCTR2. It is not a general-purpose
cryptographic hash function.
-config CRYPTO_POLY1305
- tristate "Poly1305"
- select CRYPTO_HASH
- select CRYPTO_LIB_POLY1305_GENERIC
- help
- Poly1305 authenticator algorithm (RFC7539)
-
- Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein.
- It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use
- in IETF protocols. This is the portable C implementation of Poly1305.
-
config CRYPTO_RMD160
tristate "RIPEMD-160"
select CRYPTO_HASH
@@ -973,6 +992,7 @@ config CRYPTO_SHA256
tristate "SHA-224 and SHA-256"
select CRYPTO_HASH
select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_SHA256_GENERIC
help
SHA-224 and SHA-256 secure hash algorithms (FIPS 180, ISO/IEC 10118-3)
@@ -991,13 +1011,10 @@ config CRYPTO_SHA3
help
SHA-3 secure hash algorithms (FIPS 202, ISO/IEC 10118-3)
-config CRYPTO_SM3
- tristate
-
config CRYPTO_SM3_GENERIC
tristate "SM3 (ShangMi 3)"
select CRYPTO_HASH
- select CRYPTO_SM3
+ select CRYPTO_LIB_SM3
help
SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012, ISO/IEC 10118-3)
@@ -1081,26 +1098,6 @@ config CRYPTO_CRC32
Used by RoCEv2 and f2fs.
-config CRYPTO_CRCT10DIF
- tristate "CRCT10DIF"
- select CRYPTO_HASH
- select CRC_T10DIF
- help
- CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF)
-
- CRC algorithm used by the SCSI Block Commands standard.
-
-config CRYPTO_CRC64_ROCKSOFT
- tristate "CRC64 based on Rocksoft Model algorithm"
- depends on CRC64
- select CRYPTO_HASH
- help
- CRC64 CRC algorithm based on the Rocksoft Model CRC Algorithm
-
- Used by the NVMe implementation of T10 DIF (BLK_DEV_INTEGRITY)
-
- See https://zlib.net/crc_v3.txt
-
endmenu
menu "Compression"
@@ -1405,7 +1402,6 @@ config CRYPTO_USER_API_AEAD
depends on NET
select CRYPTO_AEAD
select CRYPTO_SKCIPHER
- select CRYPTO_NULL
select CRYPTO_USER_API
help
Enable the userspace interface for AEAD cipher algorithms.
@@ -1460,5 +1456,6 @@ endif
source "drivers/crypto/Kconfig"
source "crypto/asymmetric_keys/Kconfig"
source "certs/Kconfig"
+source "crypto/krb5/Kconfig"
endif # if CRYPTO
diff --git a/crypto/Makefile b/crypto/Makefile
index f67e853c4690..017df3a2e4bb 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -4,7 +4,7 @@
#
obj-$(CONFIG_CRYPTO) += crypto.o
-crypto-y := api.o cipher.o compress.o
+crypto-y := api.o cipher.o
obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o
obj-$(CONFIG_CRYPTO_FIPS) += fips.o
@@ -34,6 +34,7 @@ obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
obj-$(CONFIG_CRYPTO_SIG2) += sig.o
obj-$(CONFIG_CRYPTO_KPP2) += kpp.o
+obj-$(CONFIG_CRYPTO_HKDF) += hkdf.o
dh_generic-y := dh.o
dh_generic-y += dh_helper.o
@@ -70,15 +71,15 @@ obj-$(CONFIG_CRYPTO_USER) += crypto_user.o
obj-$(CONFIG_CRYPTO_CMAC) += cmac.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
-obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o
+obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
obj-$(CONFIG_CRYPTO_MD4) += md4.o
obj-$(CONFIG_CRYPTO_MD5) += md5.o
obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o
obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
-obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
+obj-$(CONFIG_CRYPTO_SHA256) += sha256.o
+CFLAGS_sha256.o += -DARCH=$(ARCH)
obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o
-obj-$(CONFIG_CRYPTO_SM3) += sm3.o
obj-$(CONFIG_CRYPTO_SM3_GENERIC) += sm3_generic.o
obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
@@ -147,18 +148,18 @@ obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
obj-$(CONFIG_CRYPTO_SEED) += seed.o
obj-$(CONFIG_CRYPTO_ARIA) += aria_generic.o
-obj-$(CONFIG_CRYPTO_CHACHA20) += chacha_generic.o
-obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
+obj-$(CONFIG_CRYPTO_CHACHA20) += chacha.o
+CFLAGS_chacha.o += -DARCH=$(ARCH)
obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
-obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o
-obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
-CFLAGS_crc32c_generic.o += -DARCH=$(ARCH)
-CFLAGS_crc32_generic.o += -DARCH=$(ARCH)
-obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_generic.o
-CFLAGS_crct10dif_generic.o += -DARCH=$(ARCH)
-obj-$(CONFIG_CRYPTO_CRC64_ROCKSOFT) += crc64_rocksoft_generic.o
+obj-$(CONFIG_CRYPTO_CRC32C) += crc32c-cryptoapi.o
+crc32c-cryptoapi-y := crc32c.o
+CFLAGS_crc32c.o += -DARCH=$(ARCH)
+obj-$(CONFIG_CRYPTO_CRC32) += crc32-cryptoapi.o
+crc32-cryptoapi-y := crc32.o
+CFLAGS_crc32.o += -DARCH=$(ARCH)
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
+obj-$(CONFIG_CRYPTO_KRB5ENC) += krb5enc.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o lzo-rle.o
obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
@@ -173,7 +174,7 @@ KASAN_SANITIZE_jitterentropy.o = n
UBSAN_SANITIZE_jitterentropy.o = n
jitterentropy_rng-y := jitterentropy.o jitterentropy-kcapi.o
obj-$(CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE) += jitterentropy-testing.o
-obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
+obj-$(CONFIG_CRYPTO_BENCHMARK) += tcrypt.o
obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
obj-$(CONFIG_CRYPTO_POLYVAL) += polyval-generic.o
obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
@@ -212,3 +213,5 @@ obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o
# Key derivation function
#
obj-$(CONFIG_CRYPTO_KDF800108_CTR) += kdf_sp800108.o
+
+obj-$(CONFIG_CRYPTO_KRB5) += krb5/
diff --git a/crypto/acompress.c b/crypto/acompress.c
index 6fdf0ff9f3c0..be28cbfd22e3 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -8,21 +8,36 @@
*/
#include <crypto/internal/acompress.h>
+#include <crypto/scatterwalk.h>
#include <linux/cryptouser.h>
-#include <linux/errno.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include <net/netlink.h>
#include "compress.h"
struct crypto_scomp;
+enum {
+ ACOMP_WALK_SLEEP = 1 << 0,
+ ACOMP_WALK_SRC_LINEAR = 1 << 1,
+ ACOMP_WALK_DST_LINEAR = 1 << 2,
+};
+
static const struct crypto_type crypto_acomp_type;
+static void acomp_reqchain_done(void *data, int err);
+
static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
{
return container_of(alg, struct acomp_alg, calg.base);
@@ -58,29 +73,54 @@ static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
struct acomp_alg *alg = crypto_acomp_alg(acomp);
- alg->exit(acomp);
+ if (alg->exit)
+ alg->exit(acomp);
+
+ if (acomp_is_async(acomp))
+ crypto_free_acomp(crypto_acomp_fb(acomp));
}
static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
struct acomp_alg *alg = crypto_acomp_alg(acomp);
+ struct crypto_acomp *fb = NULL;
+ int err;
if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
return crypto_init_scomp_ops_async(tfm);
+ if (acomp_is_async(acomp)) {
+ fb = crypto_alloc_acomp(crypto_acomp_alg_name(acomp), 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(fb))
+ return PTR_ERR(fb);
+
+ err = -EINVAL;
+ if (crypto_acomp_reqsize(fb) > MAX_SYNC_COMP_REQSIZE)
+ goto out_free_fb;
+
+ tfm->fb = crypto_acomp_tfm(fb);
+ }
+
acomp->compress = alg->compress;
acomp->decompress = alg->decompress;
- acomp->dst_free = alg->dst_free;
- acomp->reqsize = alg->reqsize;
+ acomp->reqsize = alg->base.cra_reqsize;
- if (alg->exit)
- acomp->base.exit = crypto_acomp_exit_tfm;
+ acomp->base.exit = crypto_acomp_exit_tfm;
+
+ if (!alg->init)
+ return 0;
- if (alg->init)
- return alg->init(acomp);
+ err = alg->init(acomp);
+ if (err)
+ goto out_free_fb;
return 0;
+
+out_free_fb:
+ crypto_free_acomp(fb);
+ return err;
}
static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
@@ -106,6 +146,7 @@ static const struct crypto_type crypto_acomp_type = {
.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
.type = CRYPTO_ALG_TYPE_ACOMPRESS,
.tfmsize = offsetof(struct crypto_acomp, base),
+ .algsize = offsetof(struct acomp_alg, base),
};
struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
@@ -123,35 +164,146 @@ struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
}
EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node);
-struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
+static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt)
+{
+ struct acomp_req_chain *state = &req->chain;
+
+ state->compl = req->base.complete;
+ state->data = req->base.data;
+ req->base.complete = cplt;
+ req->base.data = state;
+}
+
+static void acomp_restore_req(struct acomp_req *req)
{
- struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
- struct acomp_req *req;
+ struct acomp_req_chain *state = req->base.data;
- req = __acomp_request_alloc(acomp);
- if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type))
- return crypto_acomp_scomp_alloc_ctx(req);
+ req->base.complete = state->compl;
+ req->base.data = state->data;
+}
- return req;
+static void acomp_reqchain_virt(struct acomp_req *req)
+{
+ struct acomp_req_chain *state = &req->chain;
+ unsigned int slen = req->slen;
+ unsigned int dlen = req->dlen;
+
+ if (state->flags & CRYPTO_ACOMP_REQ_SRC_VIRT)
+ acomp_request_set_src_dma(req, state->src, slen);
+ if (state->flags & CRYPTO_ACOMP_REQ_DST_VIRT)
+ acomp_request_set_dst_dma(req, state->dst, dlen);
}
-EXPORT_SYMBOL_GPL(acomp_request_alloc);
-void acomp_request_free(struct acomp_req *req)
+static void acomp_virt_to_sg(struct acomp_req *req)
{
- struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
- struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+ struct acomp_req_chain *state = &req->chain;
- if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
- crypto_acomp_scomp_free_ctx(req);
+ state->flags = req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
+ CRYPTO_ACOMP_REQ_DST_VIRT);
+
+ if (acomp_request_src_isvirt(req)) {
+ unsigned int slen = req->slen;
+ const u8 *svirt = req->svirt;
+
+ state->src = svirt;
+ sg_init_one(&state->ssg, svirt, slen);
+ acomp_request_set_src_sg(req, &state->ssg, slen);
+ }
+
+ if (acomp_request_dst_isvirt(req)) {
+ unsigned int dlen = req->dlen;
+ u8 *dvirt = req->dvirt;
- if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
- acomp->dst_free(req->dst);
- req->dst = NULL;
+ state->dst = dvirt;
+ sg_init_one(&state->dsg, dvirt, dlen);
+ acomp_request_set_dst_sg(req, &state->dsg, dlen);
}
+}
+
+static int acomp_do_nondma(struct acomp_req *req, bool comp)
+{
+ ACOMP_FBREQ_ON_STACK(fbreq, req);
+ int err;
+
+ if (comp)
+ err = crypto_acomp_compress(fbreq);
+ else
+ err = crypto_acomp_decompress(fbreq);
+
+ req->dlen = fbreq->dlen;
+ return err;
+}
+
+static int acomp_do_one_req(struct acomp_req *req, bool comp)
+{
+ if (acomp_request_isnondma(req))
+ return acomp_do_nondma(req, comp);
+
+ acomp_virt_to_sg(req);
+ return comp ? crypto_acomp_reqtfm(req)->compress(req) :
+ crypto_acomp_reqtfm(req)->decompress(req);
+}
+
+static int acomp_reqchain_finish(struct acomp_req *req, int err)
+{
+ acomp_reqchain_virt(req);
+ acomp_restore_req(req);
+ return err;
+}
+
+static void acomp_reqchain_done(void *data, int err)
+{
+ struct acomp_req *req = data;
+ crypto_completion_t compl;
+
+ compl = req->chain.compl;
+ data = req->chain.data;
+
+ if (err == -EINPROGRESS)
+ goto notify;
+
+ err = acomp_reqchain_finish(req, err);
+
+notify:
+ compl(data, err);
+}
+
+static int acomp_do_req_chain(struct acomp_req *req, bool comp)
+{
+ int err;
+
+ acomp_save_req(req, acomp_reqchain_done);
+
+ err = acomp_do_one_req(req, comp);
+ if (err == -EBUSY || err == -EINPROGRESS)
+ return err;
+
+ return acomp_reqchain_finish(req, err);
+}
- __acomp_request_free(req);
+int crypto_acomp_compress(struct acomp_req *req)
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+ if (acomp_req_on_stack(req) && acomp_is_async(tfm))
+ return -EAGAIN;
+ if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req))
+ return crypto_acomp_reqtfm(req)->compress(req);
+ return acomp_do_req_chain(req, true);
}
-EXPORT_SYMBOL_GPL(acomp_request_free);
+EXPORT_SYMBOL_GPL(crypto_acomp_compress);
+
+int crypto_acomp_decompress(struct acomp_req *req)
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+ if (acomp_req_on_stack(req) && acomp_is_async(tfm))
+ return -EAGAIN;
+ if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req))
+ return crypto_acomp_reqtfm(req)->decompress(req);
+ return acomp_do_req_chain(req, false);
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_decompress);
void comp_prepare_alg(struct comp_alg_common *alg)
{
@@ -208,5 +360,229 @@ void crypto_unregister_acomps(struct acomp_alg *algs, int count)
}
EXPORT_SYMBOL_GPL(crypto_unregister_acomps);
+static void acomp_stream_workfn(struct work_struct *work)
+{
+ struct crypto_acomp_streams *s =
+ container_of(work, struct crypto_acomp_streams, stream_work);
+ struct crypto_acomp_stream __percpu *streams = s->streams;
+ int cpu;
+
+ for_each_cpu(cpu, &s->stream_want) {
+ struct crypto_acomp_stream *ps;
+ void *ctx;
+
+ ps = per_cpu_ptr(streams, cpu);
+ if (ps->ctx)
+ continue;
+
+ ctx = s->alloc_ctx();
+ if (IS_ERR(ctx))
+ break;
+
+ spin_lock_bh(&ps->lock);
+ ps->ctx = ctx;
+ spin_unlock_bh(&ps->lock);
+
+ cpumask_clear_cpu(cpu, &s->stream_want);
+ }
+}
+
+void crypto_acomp_free_streams(struct crypto_acomp_streams *s)
+{
+ struct crypto_acomp_stream __percpu *streams = s->streams;
+ void (*free_ctx)(void *);
+ int i;
+
+ s->streams = NULL;
+ if (!streams)
+ return;
+
+ cancel_work_sync(&s->stream_work);
+ free_ctx = s->free_ctx;
+
+ for_each_possible_cpu(i) {
+ struct crypto_acomp_stream *ps = per_cpu_ptr(streams, i);
+
+ if (!ps->ctx)
+ continue;
+
+ free_ctx(ps->ctx);
+ }
+
+ free_percpu(streams);
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_free_streams);
+
+int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s)
+{
+ struct crypto_acomp_stream __percpu *streams;
+ struct crypto_acomp_stream *ps;
+ unsigned int i;
+ void *ctx;
+
+ if (s->streams)
+ return 0;
+
+ streams = alloc_percpu(struct crypto_acomp_stream);
+ if (!streams)
+ return -ENOMEM;
+
+ ctx = s->alloc_ctx();
+ if (IS_ERR(ctx)) {
+ free_percpu(streams);
+ return PTR_ERR(ctx);
+ }
+
+ i = cpumask_first(cpu_possible_mask);
+ ps = per_cpu_ptr(streams, i);
+ ps->ctx = ctx;
+
+ for_each_possible_cpu(i) {
+ ps = per_cpu_ptr(streams, i);
+ spin_lock_init(&ps->lock);
+ }
+
+ s->streams = streams;
+
+ INIT_WORK(&s->stream_work, acomp_stream_workfn);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_alloc_streams);
+
+struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
+ struct crypto_acomp_streams *s) __acquires(stream)
+{
+ struct crypto_acomp_stream __percpu *streams = s->streams;
+ int cpu = raw_smp_processor_id();
+ struct crypto_acomp_stream *ps;
+
+ ps = per_cpu_ptr(streams, cpu);
+ spin_lock_bh(&ps->lock);
+ if (likely(ps->ctx))
+ return ps;
+ spin_unlock(&ps->lock);
+
+ cpumask_set_cpu(cpu, &s->stream_want);
+ schedule_work(&s->stream_work);
+
+ ps = per_cpu_ptr(streams, cpumask_first(cpu_possible_mask));
+ spin_lock(&ps->lock);
+ return ps;
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_lock_stream_bh);
+
+void acomp_walk_done_src(struct acomp_walk *walk, int used)
+{
+ walk->slen -= used;
+ if ((walk->flags & ACOMP_WALK_SRC_LINEAR))
+ scatterwalk_advance(&walk->in, used);
+ else
+ scatterwalk_done_src(&walk->in, used);
+
+ if ((walk->flags & ACOMP_WALK_SLEEP))
+ cond_resched();
+}
+EXPORT_SYMBOL_GPL(acomp_walk_done_src);
+
+void acomp_walk_done_dst(struct acomp_walk *walk, int used)
+{
+ walk->dlen -= used;
+ if ((walk->flags & ACOMP_WALK_DST_LINEAR))
+ scatterwalk_advance(&walk->out, used);
+ else
+ scatterwalk_done_dst(&walk->out, used);
+
+ if ((walk->flags & ACOMP_WALK_SLEEP))
+ cond_resched();
+}
+EXPORT_SYMBOL_GPL(acomp_walk_done_dst);
+
+int acomp_walk_next_src(struct acomp_walk *walk)
+{
+ unsigned int slen = walk->slen;
+ unsigned int max = UINT_MAX;
+
+ if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
+ max = PAGE_SIZE;
+ if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
+ walk->in.__addr = (void *)(((u8 *)walk->in.sg) +
+ walk->in.offset);
+ return min(slen, max);
+ }
+
+ return slen ? scatterwalk_next(&walk->in, slen) : 0;
+}
+EXPORT_SYMBOL_GPL(acomp_walk_next_src);
+
+int acomp_walk_next_dst(struct acomp_walk *walk)
+{
+ unsigned int dlen = walk->dlen;
+ unsigned int max = UINT_MAX;
+
+ if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
+ max = PAGE_SIZE;
+ if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
+ walk->out.__addr = (void *)(((u8 *)walk->out.sg) +
+ walk->out.offset);
+ return min(dlen, max);
+ }
+
+ return dlen ? scatterwalk_next(&walk->out, dlen) : 0;
+}
+EXPORT_SYMBOL_GPL(acomp_walk_next_dst);
+
+int acomp_walk_virt(struct acomp_walk *__restrict walk,
+ struct acomp_req *__restrict req, bool atomic)
+{
+ struct scatterlist *src = req->src;
+ struct scatterlist *dst = req->dst;
+
+ walk->slen = req->slen;
+ walk->dlen = req->dlen;
+
+ if (!walk->slen || !walk->dlen)
+ return -EINVAL;
+
+ walk->flags = 0;
+ if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
+ walk->flags |= ACOMP_WALK_SLEEP;
+ if ((req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT))
+ walk->flags |= ACOMP_WALK_SRC_LINEAR;
+ if ((req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT))
+ walk->flags |= ACOMP_WALK_DST_LINEAR;
+
+ if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
+ walk->in.sg = (void *)req->svirt;
+ walk->in.offset = 0;
+ } else
+ scatterwalk_start(&walk->in, src);
+ if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
+ walk->out.sg = (void *)req->dvirt;
+ walk->out.offset = 0;
+ } else
+ scatterwalk_start(&walk->out, dst);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acomp_walk_virt);
+
+struct acomp_req *acomp_request_clone(struct acomp_req *req,
+ size_t total, gfp_t gfp)
+{
+ struct acomp_req *nreq;
+
+ nreq = container_of(crypto_request_clone(&req->base, total, gfp),
+ struct acomp_req, base);
+ if (nreq == req)
+ return req;
+
+ if (req->src == &req->chain.ssg)
+ nreq->src = &nreq->chain.ssg;
+ if (req->dst == &req->chain.dsg)
+ nreq->dst = &nreq->chain.dsg;
+ return nreq;
+}
+EXPORT_SYMBOL_GPL(acomp_request_clone);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous compression type");
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index c3ef583598b4..a6bca877c3c7 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -639,7 +639,7 @@ static void __exit adiantum_module_exit(void)
crypto_unregister_template(&adiantum_tmpl);
}
-subsys_initcall(adiantum_module_init);
+module_init(adiantum_module_init);
module_exit(adiantum_module_exit);
MODULE_DESCRIPTION("Adiantum length-preserving encryption mode");
diff --git a/crypto/aead.c b/crypto/aead.c
index cade532413bf..5d14b775036e 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <net/netlink.h>
#include "internal.h"
@@ -156,8 +157,8 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
struct aead_alg *aead = container_of(alg, struct aead_alg, base);
seq_printf(m, "type : aead\n");
- seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
- "yes" : "no");
+ seq_printf(m, "async : %s\n",
+ str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC));
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "ivsize : %u\n", aead->ivsize);
seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize);
@@ -185,6 +186,7 @@ static const struct crypto_type crypto_aead_type = {
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_AEAD,
.tfmsize = offsetof(struct crypto_aead, base),
+ .algsize = offsetof(struct aead_alg, base),
};
int crypto_grab_aead(struct crypto_aead_spawn *spawn,
diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c
index 6cbff298722b..ca80d861345d 100644
--- a/crypto/aegis128-core.c
+++ b/crypto/aegis128-core.c
@@ -284,10 +284,9 @@ static void crypto_aegis128_process_ad(struct aegis_state *state,
scatterwalk_start(&walk, sg_src);
while (assoclen != 0) {
- unsigned int size = scatterwalk_clamp(&walk, assoclen);
+ unsigned int size = scatterwalk_next(&walk, assoclen);
+ const u8 *src = walk.addr;
unsigned int left = size;
- void *mapped = scatterwalk_map(&walk);
- const u8 *src = (const u8 *)mapped;
if (pos + size >= AEGIS_BLOCK_SIZE) {
if (pos > 0) {
@@ -308,9 +307,7 @@ static void crypto_aegis128_process_ad(struct aegis_state *state,
pos += left;
assoclen -= size;
- scatterwalk_unmap(mapped);
- scatterwalk_advance(&walk, size);
- scatterwalk_done(&walk, 0, assoclen);
+ scatterwalk_done_src(&walk, size);
}
if (pos > 0) {
@@ -569,7 +566,7 @@ static void __exit crypto_aegis128_module_exit(void)
crypto_unregister_aead(&crypto_aegis128_alg_generic);
}
-subsys_initcall(crypto_aegis128_module_init);
+module_init(crypto_aegis128_module_init);
module_exit(crypto_aegis128_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index 3c66d425c97b..85d2e78c8ef2 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -1311,7 +1311,7 @@ static void __exit aes_fini(void)
crypto_unregister_alg(&aes_alg);
}
-subsys_initcall(aes_init);
+module_init(aes_init);
module_exit(aes_fini);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
diff --git a/crypto/ahash.c b/crypto/ahash.c
index b08b89ec26ec..bc84a07c924c 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -16,11 +16,13 @@
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/sched.h>
+#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <net/netlink.h>
#include "hash.h"
@@ -28,7 +30,7 @@
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
struct crypto_hash_walk {
- char *data;
+ const char *data;
unsigned int offset;
unsigned int flags;
@@ -40,6 +42,47 @@ struct crypto_hash_walk {
struct scatterlist *sg;
};
+static int ahash_def_finup(struct ahash_request *req);
+
+static inline bool crypto_ahash_block_only(struct crypto_ahash *tfm)
+{
+ return crypto_ahash_alg(tfm)->halg.base.cra_flags &
+ CRYPTO_AHASH_ALG_BLOCK_ONLY;
+}
+
+static inline bool crypto_ahash_final_nonzero(struct crypto_ahash *tfm)
+{
+ return crypto_ahash_alg(tfm)->halg.base.cra_flags &
+ CRYPTO_AHASH_ALG_FINAL_NONZERO;
+}
+
+static inline bool crypto_ahash_need_fallback(struct crypto_ahash *tfm)
+{
+ return crypto_ahash_alg(tfm)->halg.base.cra_flags &
+ CRYPTO_ALG_NEED_FALLBACK;
+}
+
+static inline void ahash_op_done(void *data, int err,
+ int (*finish)(struct ahash_request *, int))
+{
+ struct ahash_request *areq = data;
+ crypto_completion_t compl;
+
+ compl = areq->saved_complete;
+ data = areq->saved_data;
+ if (err == -EINPROGRESS)
+ goto out;
+
+ areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ err = finish(areq, err);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+
+out:
+ compl(data, err);
+}
+
static int hash_walk_next(struct crypto_hash_walk *walk)
{
unsigned int offset = walk->offset;
@@ -58,7 +101,7 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
sg = walk->sg;
walk->offset = sg->offset;
- walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
+ walk->pg = nth_page(sg_page(walk->sg), (walk->offset >> PAGE_SHIFT));
walk->offset = offset_in_page(walk->offset);
walk->entrylen = sg->length;
@@ -73,20 +116,29 @@ static int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk)
{
walk->total = req->nbytes;
+ walk->entrylen = 0;
- if (!walk->total) {
- walk->entrylen = 0;
+ if (!walk->total)
return 0;
+
+ walk->flags = req->base.flags;
+
+ if (ahash_request_isvirt(req)) {
+ walk->data = req->svirt;
+ walk->total = 0;
+ return req->nbytes;
}
walk->sg = req->src;
- walk->flags = req->base.flags;
return hash_walk_new_entry(walk);
}
static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
+ if ((walk->flags & CRYPTO_AHASH_REQ_VIRT))
+ return err;
+
walk->data -= walk->offset;
kunmap_local(walk->data);
@@ -171,21 +223,36 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
unsigned int nbytes = req->nbytes;
struct scatterlist *sg;
unsigned int offset;
+ struct page *page;
+ const u8 *data;
int err;
- if (nbytes &&
- (sg = req->src, offset = sg->offset,
- nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
- void *data;
+ data = req->svirt;
+ if (!nbytes || ahash_request_isvirt(req))
+ return crypto_shash_digest(desc, data, nbytes, req->result);
- data = kmap_local_page(sg_page(sg));
- err = crypto_shash_digest(desc, data + offset, nbytes,
- req->result);
- kunmap_local(data);
- } else
- err = crypto_shash_init(desc) ?:
- shash_ahash_finup(req, desc);
+ sg = req->src;
+ if (nbytes > sg->length)
+ return crypto_shash_init(desc) ?:
+ shash_ahash_finup(req, desc);
+ page = sg_page(sg);
+ offset = sg->offset;
+ data = lowmem_page_address(page) + offset;
+ if (!IS_ENABLED(CONFIG_HIGHMEM))
+ return crypto_shash_digest(desc, data, nbytes, req->result);
+
+ page = nth_page(page, offset >> PAGE_SHIFT);
+ offset = offset_in_page(offset);
+
+ if (nbytes > (unsigned int)PAGE_SIZE - offset)
+ return crypto_shash_init(desc) ?:
+ shash_ahash_finup(req, desc);
+
+ data = kmap_local_page(page);
+ err = crypto_shash_digest(desc, data + offset, nbytes,
+ req->result);
+ kunmap_local(data);
return err;
}
EXPORT_SYMBOL_GPL(shash_ahash_digest);
@@ -219,7 +286,6 @@ static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm)
crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
CRYPTO_TFM_NEED_KEY);
- crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
return 0;
}
@@ -256,6 +322,9 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
int err;
err = alg->setkey(tfm, key, keylen);
+ if (!err && crypto_ahash_need_fallback(tfm))
+ err = crypto_ahash_setkey(crypto_ahash_fb(tfm),
+ key, keylen);
if (unlikely(err)) {
ahash_set_needkey(tfm, alg);
return err;
@@ -266,6 +335,49 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
}
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
+static int ahash_do_req_chain(struct ahash_request *req,
+ int (*const *op)(struct ahash_request *req))
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ int err;
+
+ if (crypto_ahash_req_virt(tfm) || !ahash_request_isvirt(req))
+ return (*op)(req);
+
+ if (crypto_ahash_statesize(tfm) > HASH_MAX_STATESIZE)
+ return -ENOSYS;
+
+ {
+ u8 state[HASH_MAX_STATESIZE];
+
+ if (op == &crypto_ahash_alg(tfm)->digest) {
+ ahash_request_set_tfm(req, crypto_ahash_fb(tfm));
+ err = crypto_ahash_digest(req);
+ goto out_no_state;
+ }
+
+ err = crypto_ahash_export(req, state);
+ ahash_request_set_tfm(req, crypto_ahash_fb(tfm));
+ err = err ?: crypto_ahash_import(req, state);
+
+ if (op == &crypto_ahash_alg(tfm)->finup) {
+ err = err ?: crypto_ahash_finup(req);
+ goto out_no_state;
+ }
+
+ err = err ?:
+ crypto_ahash_update(req) ?:
+ crypto_ahash_export(req, state);
+
+ ahash_request_set_tfm(req, tfm);
+ return err ?: crypto_ahash_import(req, state);
+
+out_no_state:
+ ahash_request_set_tfm(req, tfm);
+ return err;
+ }
+}
+
int crypto_ahash_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -274,103 +386,190 @@ int crypto_ahash_init(struct ahash_request *req)
return crypto_shash_init(prepare_shash_desc(req, tfm));
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
+ if (ahash_req_on_stack(req) && ahash_is_async(tfm))
+ return -EAGAIN;
+ if (crypto_ahash_block_only(tfm)) {
+ u8 *buf = ahash_request_ctx(req);
+
+ buf += crypto_ahash_reqsize(tfm) - 1;
+ *buf = 0;
+ }
return crypto_ahash_alg(tfm)->init(req);
}
EXPORT_SYMBOL_GPL(crypto_ahash_init);
-static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt,
- bool has_state)
+static void ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- unsigned int ds = crypto_ahash_digestsize(tfm);
- struct ahash_request *subreq;
- unsigned int subreq_size;
- unsigned int reqsize;
- u8 *result;
- gfp_t gfp;
- u32 flags;
-
- subreq_size = sizeof(*subreq);
- reqsize = crypto_ahash_reqsize(tfm);
- reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
- subreq_size += reqsize;
- subreq_size += ds;
-
- flags = ahash_request_flags(req);
- gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
- subreq = kmalloc(subreq_size, gfp);
- if (!subreq)
- return -ENOMEM;
-
- ahash_request_set_tfm(subreq, tfm);
- ahash_request_set_callback(subreq, flags, cplt, req);
-
- result = (u8 *)(subreq + 1) + reqsize;
-
- ahash_request_set_crypt(subreq, req->src, result, req->nbytes);
-
- if (has_state) {
- void *state;
-
- state = kmalloc(crypto_ahash_statesize(tfm), gfp);
- if (!state) {
- kfree(subreq);
- return -ENOMEM;
- }
-
- crypto_ahash_export(req, state);
- crypto_ahash_import(subreq, state);
- kfree_sensitive(state);
- }
-
- req->priv = subreq;
+ req->saved_complete = req->base.complete;
+ req->saved_data = req->base.data;
+ req->base.complete = cplt;
+ req->base.data = req;
+}
- return 0;
+static void ahash_restore_req(struct ahash_request *req)
+{
+ req->base.complete = req->saved_complete;
+ req->base.data = req->saved_data;
}
-static void ahash_restore_req(struct ahash_request *req, int err)
+static int ahash_update_finish(struct ahash_request *req, int err)
{
- struct ahash_request *subreq = req->priv;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ bool nonzero = crypto_ahash_final_nonzero(tfm);
+ int bs = crypto_ahash_blocksize(tfm);
+ u8 *blenp = ahash_request_ctx(req);
+ int blen;
+ u8 *buf;
+
+ blenp += crypto_ahash_reqsize(tfm) - 1;
+ blen = *blenp;
+ buf = blenp - bs;
+
+ if (blen) {
+ req->src = req->sg_head + 1;
+ if (sg_is_chain(req->src))
+ req->src = sg_chain_ptr(req->src);
+ }
+
+ req->nbytes += nonzero - blen;
- if (!err)
- memcpy(req->result, subreq->result,
- crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+ blen = err < 0 ? 0 : err + nonzero;
+ if (ahash_request_isvirt(req))
+ memcpy(buf, req->svirt + req->nbytes - blen, blen);
+ else
+ memcpy_from_sglist(buf, req->src, req->nbytes - blen, blen);
+ *blenp = blen;
- req->priv = NULL;
+ ahash_restore_req(req);
- kfree_sensitive(subreq);
+ return err;
+}
+
+static void ahash_update_done(void *data, int err)
+{
+ ahash_op_done(data, err, ahash_update_finish);
}
int crypto_ahash_update(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ bool nonzero = crypto_ahash_final_nonzero(tfm);
+ int bs = crypto_ahash_blocksize(tfm);
+ u8 *blenp = ahash_request_ctx(req);
+ int blen, err;
+ u8 *buf;
if (likely(tfm->using_shash))
return shash_ahash_update(req, ahash_request_ctx(req));
+ if (ahash_req_on_stack(req) && ahash_is_async(tfm))
+ return -EAGAIN;
+ if (!crypto_ahash_block_only(tfm))
+ return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->update);
+
+ blenp += crypto_ahash_reqsize(tfm) - 1;
+ blen = *blenp;
+ buf = blenp - bs;
- return crypto_ahash_alg(tfm)->update(req);
+ if (blen + req->nbytes < bs + nonzero) {
+ if (ahash_request_isvirt(req))
+ memcpy(buf + blen, req->svirt, req->nbytes);
+ else
+ memcpy_from_sglist(buf + blen, req->src, 0,
+ req->nbytes);
+
+ *blenp += req->nbytes;
+ return 0;
+ }
+
+ if (blen) {
+ memset(req->sg_head, 0, sizeof(req->sg_head[0]));
+ sg_set_buf(req->sg_head, buf, blen);
+ if (req->src != req->sg_head + 1)
+ sg_chain(req->sg_head, 2, req->src);
+ req->src = req->sg_head;
+ req->nbytes += blen;
+ }
+ req->nbytes -= nonzero;
+
+ ahash_save_req(req, ahash_update_done);
+
+ err = ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->update);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return err;
+
+ return ahash_update_finish(req, err);
}
EXPORT_SYMBOL_GPL(crypto_ahash_update);
-int crypto_ahash_final(struct ahash_request *req)
+static int ahash_finup_finish(struct ahash_request *req, int err)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ u8 *blenp = ahash_request_ctx(req);
+ int blen;
+
+ blenp += crypto_ahash_reqsize(tfm) - 1;
+ blen = *blenp;
+
+ if (blen) {
+ if (sg_is_last(req->src))
+ req->src = NULL;
+ else {
+ req->src = req->sg_head + 1;
+ if (sg_is_chain(req->src))
+ req->src = sg_chain_ptr(req->src);
+ }
+ req->nbytes -= blen;
+ }
- if (likely(tfm->using_shash))
- return crypto_shash_final(ahash_request_ctx(req), req->result);
+ ahash_restore_req(req);
+
+ return err;
+}
- return crypto_ahash_alg(tfm)->final(req);
+static void ahash_finup_done(void *data, int err)
+{
+ ahash_op_done(data, err, ahash_finup_finish);
}
-EXPORT_SYMBOL_GPL(crypto_ahash_final);
int crypto_ahash_finup(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ int bs = crypto_ahash_blocksize(tfm);
+ u8 *blenp = ahash_request_ctx(req);
+ int blen, err;
+ u8 *buf;
if (likely(tfm->using_shash))
return shash_ahash_finup(req, ahash_request_ctx(req));
+ if (ahash_req_on_stack(req) && ahash_is_async(tfm))
+ return -EAGAIN;
+ if (!crypto_ahash_alg(tfm)->finup)
+ return ahash_def_finup(req);
+ if (!crypto_ahash_block_only(tfm))
+ return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->finup);
+
+ blenp += crypto_ahash_reqsize(tfm) - 1;
+ blen = *blenp;
+ buf = blenp - bs;
+
+ if (blen) {
+ memset(req->sg_head, 0, sizeof(req->sg_head[0]));
+ sg_set_buf(req->sg_head, buf, blen);
+ if (!req->src)
+ sg_mark_end(req->sg_head);
+ else if (req->src != req->sg_head + 1)
+ sg_chain(req->sg_head, 2, req->src);
+ req->src = req->sg_head;
+ req->nbytes += blen;
+ }
+
+ ahash_save_req(req, ahash_finup_done);
+
+ err = ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->finup);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return err;
- return crypto_ahash_alg(tfm)->finup(req);
+ return ahash_finup_finish(req, err);
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
@@ -380,11 +579,11 @@ int crypto_ahash_digest(struct ahash_request *req)
if (likely(tfm->using_shash))
return shash_ahash_digest(req, prepare_shash_desc(req, tfm));
-
+ if (ahash_req_on_stack(req) && ahash_is_async(tfm))
+ return -EAGAIN;
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
-
- return crypto_ahash_alg(tfm)->digest(req);
+ return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->digest);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
@@ -395,74 +594,87 @@ static void ahash_def_finup_done2(void *data, int err)
if (err == -EINPROGRESS)
return;
- ahash_restore_req(areq, err);
-
+ ahash_restore_req(areq);
ahash_request_complete(areq, err);
}
static int ahash_def_finup_finish1(struct ahash_request *req, int err)
{
- struct ahash_request *subreq = req->priv;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
if (err)
goto out;
- subreq->base.complete = ahash_def_finup_done2;
+ req->base.complete = ahash_def_finup_done2;
- err = crypto_ahash_alg(crypto_ahash_reqtfm(req))->final(subreq);
+ err = crypto_ahash_alg(tfm)->final(req);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
out:
- ahash_restore_req(req, err);
+ ahash_restore_req(req);
return err;
}
static void ahash_def_finup_done1(void *data, int err)
{
- struct ahash_request *areq = data;
- struct ahash_request *subreq;
-
- if (err == -EINPROGRESS)
- goto out;
-
- subreq = areq->priv;
- subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
-
- err = ahash_def_finup_finish1(areq, err);
- if (err == -EINPROGRESS || err == -EBUSY)
- return;
-
-out:
- ahash_request_complete(areq, err);
+ ahash_op_done(data, err, ahash_def_finup_finish1);
}
static int ahash_def_finup(struct ahash_request *req)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
int err;
- err = ahash_save_req(req, ahash_def_finup_done1, true);
- if (err)
- return err;
+ ahash_save_req(req, ahash_def_finup_done1);
- err = crypto_ahash_alg(tfm)->update(req->priv);
+ err = crypto_ahash_update(req);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
return ahash_def_finup_finish1(req, err);
}
+int crypto_ahash_export_core(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (likely(tfm->using_shash))
+ return crypto_shash_export_core(ahash_request_ctx(req), out);
+ return crypto_ahash_alg(tfm)->export_core(req, out);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_export_core);
+
int crypto_ahash_export(struct ahash_request *req, void *out)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
if (likely(tfm->using_shash))
return crypto_shash_export(ahash_request_ctx(req), out);
+ if (crypto_ahash_block_only(tfm)) {
+ unsigned int plen = crypto_ahash_blocksize(tfm) + 1;
+ unsigned int reqsize = crypto_ahash_reqsize(tfm);
+ unsigned int ss = crypto_ahash_statesize(tfm);
+ u8 *buf = ahash_request_ctx(req);
+
+ memcpy(out + ss - plen, buf + reqsize - plen, plen);
+ }
return crypto_ahash_alg(tfm)->export(req, out);
}
EXPORT_SYMBOL_GPL(crypto_ahash_export);
+int crypto_ahash_import_core(struct ahash_request *req, const void *in)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (likely(tfm->using_shash))
+ return crypto_shash_import_core(prepare_shash_desc(req, tfm),
+ in);
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+ return crypto_ahash_alg(tfm)->import_core(req, in);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_import_core);
+
int crypto_ahash_import(struct ahash_request *req, const void *in)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -471,6 +683,12 @@ int crypto_ahash_import(struct ahash_request *req, const void *in)
return crypto_shash_import(prepare_shash_desc(req, tfm), in);
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
+ if (crypto_ahash_block_only(tfm)) {
+ unsigned int reqsize = crypto_ahash_reqsize(tfm);
+ u8 *buf = ahash_request_ctx(req);
+
+ buf[reqsize - 1] = 0;
+ }
return crypto_ahash_alg(tfm)->import(req, in);
}
EXPORT_SYMBOL_GPL(crypto_ahash_import);
@@ -480,25 +698,73 @@ static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm)
struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
struct ahash_alg *alg = crypto_ahash_alg(hash);
- alg->exit_tfm(hash);
+ if (alg->exit_tfm)
+ alg->exit_tfm(hash);
+ else if (tfm->__crt_alg->cra_exit)
+ tfm->__crt_alg->cra_exit(tfm);
+
+ if (crypto_ahash_need_fallback(hash))
+ crypto_free_ahash(crypto_ahash_fb(hash));
}
static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
struct ahash_alg *alg = crypto_ahash_alg(hash);
+ struct crypto_ahash *fb = NULL;
+ int err;
crypto_ahash_set_statesize(hash, alg->halg.statesize);
+ crypto_ahash_set_reqsize(hash, crypto_tfm_alg_reqsize(tfm));
if (tfm->__crt_alg->cra_type == &crypto_shash_type)
return crypto_init_ahash_using_shash(tfm);
+ if (crypto_ahash_need_fallback(hash)) {
+ fb = crypto_alloc_ahash(crypto_ahash_alg_name(hash),
+ CRYPTO_ALG_REQ_VIRT,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_REQ_VIRT |
+ CRYPTO_AHASH_ALG_NO_EXPORT_CORE);
+ if (IS_ERR(fb))
+ return PTR_ERR(fb);
+
+ tfm->fb = crypto_ahash_tfm(fb);
+ }
+
ahash_set_needkey(hash, alg);
- if (alg->exit_tfm)
- tfm->exit = crypto_ahash_exit_tfm;
+ tfm->exit = crypto_ahash_exit_tfm;
- return alg->init_tfm ? alg->init_tfm(hash) : 0;
+ if (alg->init_tfm)
+ err = alg->init_tfm(hash);
+ else if (tfm->__crt_alg->cra_init)
+ err = tfm->__crt_alg->cra_init(tfm);
+ else
+ return 0;
+
+ if (err)
+ goto out_free_sync_hash;
+
+ if (!ahash_is_async(hash) && crypto_ahash_reqsize(hash) >
+ MAX_SYNC_HASH_REQSIZE)
+ goto out_exit_tfm;
+
+ BUILD_BUG_ON(HASH_MAX_DESCSIZE > MAX_SYNC_HASH_REQSIZE);
+ if (crypto_ahash_reqsize(hash) < HASH_MAX_DESCSIZE)
+ crypto_ahash_set_reqsize(hash, HASH_MAX_DESCSIZE);
+
+ return 0;
+
+out_exit_tfm:
+ if (alg->exit_tfm)
+ alg->exit_tfm(hash);
+ else if (tfm->__crt_alg->cra_exit)
+ tfm->__crt_alg->cra_exit(tfm);
+ err = -EINVAL;
+out_free_sync_hash:
+ crypto_free_ahash(fb);
+ return err;
}
static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
@@ -536,8 +802,8 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
{
seq_printf(m, "type : ahash\n");
- seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
- "yes" : "no");
+ seq_printf(m, "async : %s\n",
+ str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC));
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "digestsize : %u\n",
__crypto_hash_alg_common(alg)->digestsize);
@@ -557,6 +823,7 @@ static const struct crypto_type crypto_ahash_type = {
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AHASH,
.tfmsize = offsetof(struct crypto_ahash, base),
+ .algsize = offsetof(struct ahash_alg, halg.base),
};
int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
@@ -581,7 +848,7 @@ int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_ahash);
-static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
{
struct crypto_alg *alg = &halg->base;
@@ -590,11 +857,13 @@ static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey;
}
+EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
{
struct hash_alg_common *halg = crypto_hash_alg_common(hash);
struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
+ struct crypto_ahash *fb = NULL;
struct crypto_ahash *nhash;
struct ahash_alg *alg;
int err;
@@ -624,28 +893,52 @@ struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
err = PTR_ERR(shash);
goto out_free_nhash;
}
+ crypto_ahash_tfm(nhash)->exit = crypto_exit_ahash_using_shash;
nhash->using_shash = true;
*nctx = shash;
return nhash;
}
+ if (crypto_ahash_need_fallback(hash)) {
+ fb = crypto_clone_ahash(crypto_ahash_fb(hash));
+ err = PTR_ERR(fb);
+ if (IS_ERR(fb))
+ goto out_free_nhash;
+
+ crypto_ahash_tfm(nhash)->fb = crypto_ahash_tfm(fb);
+ }
+
err = -ENOSYS;
alg = crypto_ahash_alg(hash);
if (!alg->clone_tfm)
- goto out_free_nhash;
+ goto out_free_fb;
err = alg->clone_tfm(nhash, hash);
if (err)
- goto out_free_nhash;
+ goto out_free_fb;
+
+ crypto_ahash_tfm(nhash)->exit = crypto_ahash_exit_tfm;
return nhash;
+out_free_fb:
+ crypto_free_ahash(fb);
out_free_nhash:
crypto_free_ahash(nhash);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_clone_ahash);
+static int ahash_default_export_core(struct ahash_request *req, void *out)
+{
+ return -ENOSYS;
+}
+
+static int ahash_default_import_core(struct ahash_request *req, const void *in)
+{
+ return -ENOSYS;
+}
+
static int ahash_prepare_alg(struct ahash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
@@ -654,6 +947,13 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
if (alg->halg.statesize == 0)
return -EINVAL;
+ if (base->cra_reqsize && base->cra_reqsize < alg->halg.statesize)
+ return -EINVAL;
+
+ if (!(base->cra_flags & CRYPTO_ALG_ASYNC) &&
+ base->cra_reqsize > MAX_SYNC_HASH_REQSIZE)
+ return -EINVAL;
+
err = hash_prepare_alg(&alg->halg);
if (err)
return err;
@@ -661,11 +961,28 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
base->cra_type = &crypto_ahash_type;
base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
- if (!alg->finup)
- alg->finup = ahash_def_finup;
+ if ((base->cra_flags ^ CRYPTO_ALG_REQ_VIRT) &
+ (CRYPTO_ALG_ASYNC | CRYPTO_ALG_REQ_VIRT))
+ base->cra_flags |= CRYPTO_ALG_NEED_FALLBACK;
+
if (!alg->setkey)
alg->setkey = ahash_nosetkey;
+ if (base->cra_flags & CRYPTO_AHASH_ALG_BLOCK_ONLY) {
+ BUILD_BUG_ON(MAX_ALGAPI_BLOCKSIZE >= 256);
+ if (!alg->finup)
+ return -EINVAL;
+
+ base->cra_reqsize += base->cra_blocksize + 1;
+ alg->halg.statesize += base->cra_blocksize + 1;
+ alg->export_core = alg->export;
+ alg->import_core = alg->import;
+ } else if (!alg->export_core || !alg->import_core) {
+ alg->export_core = ahash_default_export_core;
+ alg->import_core = ahash_default_import_core;
+ base->cra_flags |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE;
+ }
+
return 0;
}
@@ -733,5 +1050,42 @@ int ahash_register_instance(struct crypto_template *tmpl,
}
EXPORT_SYMBOL_GPL(ahash_register_instance);
+void ahash_request_free(struct ahash_request *req)
+{
+ if (unlikely(!req))
+ return;
+
+ if (!ahash_req_on_stack(req)) {
+ kfree(req);
+ return;
+ }
+
+ ahash_request_zero(req);
+}
+EXPORT_SYMBOL_GPL(ahash_request_free);
+
+int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ HASH_REQUEST_ON_STACK(req, crypto_ahash_fb(tfm));
+ int err;
+
+ ahash_request_set_callback(req, 0, NULL, NULL);
+ ahash_request_set_virt(req, data, out, len);
+ err = crypto_ahash_digest(req);
+
+ ahash_request_zero(req);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(crypto_hash_digest);
+
+void ahash_free_singlespawn_instance(struct ahash_instance *inst)
+{
+ crypto_drop_spawn(ahash_instance_ctx(inst));
+ kfree(inst);
+}
+EXPORT_SYMBOL_GPL(ahash_free_singlespawn_instance);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index 72c82d9aa077..a36f50c83827 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -97,6 +97,7 @@ static const struct crypto_type crypto_akcipher_type = {
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AKCIPHER,
.tfmsize = offsetof(struct crypto_akcipher, base),
+ .algsize = offsetof(struct akcipher_alg, base),
};
int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn,
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 5318c214debb..e604d0d8b7b4 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -71,12 +71,23 @@ static void crypto_free_instance(struct crypto_instance *inst)
static void crypto_destroy_instance_workfn(struct work_struct *w)
{
- struct crypto_instance *inst = container_of(w, struct crypto_instance,
+ struct crypto_template *tmpl = container_of(w, struct crypto_template,
free_work);
- struct crypto_template *tmpl = inst->tmpl;
+ struct crypto_instance *inst;
+ struct hlist_node *n;
+ HLIST_HEAD(list);
+
+ down_write(&crypto_alg_sem);
+ hlist_for_each_entry_safe(inst, n, &tmpl->dead, list) {
+ if (refcount_read(&inst->alg.cra_refcnt) != -1)
+ continue;
+ hlist_del(&inst->list);
+ hlist_add_head(&inst->list, &list);
+ }
+ up_write(&crypto_alg_sem);
- crypto_free_instance(inst);
- crypto_tmpl_put(tmpl);
+ hlist_for_each_entry_safe(inst, n, &list, list)
+ crypto_free_instance(inst);
}
static void crypto_destroy_instance(struct crypto_alg *alg)
@@ -84,9 +95,10 @@ static void crypto_destroy_instance(struct crypto_alg *alg)
struct crypto_instance *inst = container_of(alg,
struct crypto_instance,
alg);
+ struct crypto_template *tmpl = inst->tmpl;
- INIT_WORK(&inst->free_work, crypto_destroy_instance_workfn);
- schedule_work(&inst->free_work);
+ refcount_set(&alg->cra_refcnt, -1);
+ schedule_work(&tmpl->free_work);
}
/*
@@ -132,14 +144,16 @@ static void crypto_remove_instance(struct crypto_instance *inst,
inst->alg.cra_flags |= CRYPTO_ALG_DEAD;
- if (!tmpl || !crypto_tmpl_get(tmpl))
+ if (!tmpl)
return;
- list_move(&inst->alg.cra_list, list);
+ list_del_init(&inst->alg.cra_list);
hlist_del(&inst->list);
- inst->alg.cra_destroy = crypto_destroy_instance;
+ hlist_add_head(&inst->list, &tmpl->dead);
BUG_ON(!list_empty(&inst->alg.cra_users));
+
+ crypto_alg_put(&inst->alg);
}
/*
@@ -260,8 +274,7 @@ static struct crypto_larval *crypto_alloc_test_larval(struct crypto_alg *alg)
{
struct crypto_larval *larval;
- if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER) ||
- IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) ||
+ if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) ||
(alg->cra_flags & CRYPTO_ALG_INTERNAL))
return NULL; /* No self-test needed */
@@ -404,6 +417,15 @@ void crypto_remove_final(struct list_head *list)
}
EXPORT_SYMBOL_GPL(crypto_remove_final);
+static void crypto_free_alg(struct crypto_alg *alg)
+{
+ unsigned int algsize = alg->cra_type->algsize;
+ u8 *p = (u8 *)alg - algsize;
+
+ crypto_destroy_alg(alg);
+ kfree(p);
+}
+
int crypto_register_alg(struct crypto_alg *alg)
{
struct crypto_larval *larval;
@@ -416,6 +438,19 @@ int crypto_register_alg(struct crypto_alg *alg)
if (err)
return err;
+ if (alg->cra_flags & CRYPTO_ALG_DUP_FIRST &&
+ !WARN_ON_ONCE(alg->cra_destroy)) {
+ unsigned int algsize = alg->cra_type->algsize;
+ u8 *p = (u8 *)alg - algsize;
+
+ p = kmemdup(p, algsize + sizeof(*alg), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ alg = (void *)(p + algsize);
+ alg->cra_destroy = crypto_free_alg;
+ }
+
down_write(&crypto_alg_sem);
larval = __crypto_register_alg(alg, &algs_to_put);
if (!IS_ERR_OR_NULL(larval)) {
@@ -424,8 +459,10 @@ int crypto_register_alg(struct crypto_alg *alg)
}
up_write(&crypto_alg_sem);
- if (IS_ERR(larval))
+ if (IS_ERR(larval)) {
+ crypto_alg_put(alg);
return PTR_ERR(larval);
+ }
if (test_started)
crypto_schedule_test(larval);
@@ -461,12 +498,9 @@ void crypto_unregister_alg(struct crypto_alg *alg)
if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name))
return;
- if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1))
- return;
-
- if (alg->cra_destroy)
- alg->cra_destroy(alg);
+ WARN_ON(!alg->cra_destroy && refcount_read(&alg->cra_refcnt) != 1);
+ list_add(&alg->cra_list, &list);
crypto_remove_final(&list);
}
EXPORT_SYMBOL_GPL(crypto_unregister_alg);
@@ -505,6 +539,8 @@ int crypto_register_template(struct crypto_template *tmpl)
struct crypto_template *q;
int err = -EEXIST;
+ INIT_WORK(&tmpl->free_work, crypto_destroy_instance_workfn);
+
down_write(&crypto_alg_sem);
crypto_check_module_sig(tmpl->module);
@@ -566,6 +602,8 @@ void crypto_unregister_template(struct crypto_template *tmpl)
crypto_free_instance(inst);
}
crypto_remove_final(&users);
+
+ flush_work(&tmpl->free_work);
}
EXPORT_SYMBOL_GPL(crypto_unregister_template);
@@ -619,6 +657,7 @@ int crypto_register_instance(struct crypto_template *tmpl,
inst->alg.cra_module = tmpl->module;
inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE;
+ inst->alg.cra_destroy = crypto_destroy_instance;
down_write(&crypto_alg_sem);
@@ -884,20 +923,20 @@ const char *crypto_attr_alg_name(struct rtattr *rta)
}
EXPORT_SYMBOL_GPL(crypto_attr_alg_name);
-int crypto_inst_setname(struct crypto_instance *inst, const char *name,
- struct crypto_alg *alg)
+int __crypto_inst_setname(struct crypto_instance *inst, const char *name,
+ const char *driver, struct crypto_alg *alg)
{
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name,
alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
- name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ driver, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
return 0;
}
-EXPORT_SYMBOL_GPL(crypto_inst_setname);
+EXPORT_SYMBOL_GPL(__crypto_inst_setname);
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen)
{
@@ -955,7 +994,7 @@ struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
queue->backlog = queue->backlog->next;
request = queue->list.next;
- list_del(request);
+ list_del_init(request);
return list_entry(request, struct crypto_async_request, list);
}
@@ -1019,7 +1058,7 @@ static void __init crypto_start_tests(void)
if (!IS_BUILTIN(CONFIG_CRYPTO_ALGAPI))
return;
- if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
+ if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS))
return;
set_crypto_boot_test_finished();
diff --git a/crypto/algboss.c b/crypto/algboss.c
index a20926bfd34e..846f586889ee 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -189,7 +189,7 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
struct task_struct *thread;
struct crypto_test_param *param;
- if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
+ if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS))
return NOTIFY_DONE;
if (!try_module_get(THIS_MODULE))
@@ -247,13 +247,7 @@ static void __exit cryptomgr_exit(void)
BUG_ON(err);
}
-/*
- * This is arch_initcall() so that the crypto self-tests are run on algorithms
- * registered early by subsys_initcall(). subsys_initcall() is needed for
- * generic implementations so that they're available for comparison tests when
- * other implementations are registered later by module_init().
- */
-arch_initcall(cryptomgr_init);
+module_init(cryptomgr_init);
module_exit(cryptomgr_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 7d58cbbce4af..79b016a899a1 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -27,7 +27,6 @@
#include <crypto/scatterwalk.h>
#include <crypto/if_alg.h>
#include <crypto/skcipher.h>
-#include <crypto/null.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h>
@@ -36,19 +35,13 @@
#include <linux/net.h>
#include <net/sock.h>
-struct aead_tfm {
- struct crypto_aead *aead;
- struct crypto_sync_skcipher *null_tfm;
-};
-
static inline bool aead_sufficient_data(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
- struct aead_tfm *aeadc = pask->private;
- struct crypto_aead *tfm = aeadc->aead;
+ struct crypto_aead *tfm = pask->private;
unsigned int as = crypto_aead_authsize(tfm);
/*
@@ -64,27 +57,12 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
- struct aead_tfm *aeadc = pask->private;
- struct crypto_aead *tfm = aeadc->aead;
+ struct crypto_aead *tfm = pask->private;
unsigned int ivsize = crypto_aead_ivsize(tfm);
return af_alg_sendmsg(sock, msg, size, ivsize);
}
-static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
- struct scatterlist *src,
- struct scatterlist *dst, unsigned int len)
-{
- SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
-
- skcipher_request_set_sync_tfm(skreq, null_tfm);
- skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
- NULL, NULL);
- skcipher_request_set_crypt(skreq, src, dst, len, NULL);
-
- return crypto_skcipher_encrypt(skreq);
-}
-
static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
@@ -93,9 +71,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
- struct aead_tfm *aeadc = pask->private;
- struct crypto_aead *tfm = aeadc->aead;
- struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm;
+ struct crypto_aead *tfm = pask->private;
unsigned int i, as = crypto_aead_authsize(tfm);
struct af_alg_async_req *areq;
struct af_alg_tsgl *tsgl, *tmp;
@@ -223,11 +199,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
* v v
* RX SGL: AAD || PT || Tag
*/
- err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
- areq->first_rsgl.sgl.sgt.sgl,
- processed);
- if (err)
- goto free;
+ memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src,
+ processed);
af_alg_pull_tsgl(sk, processed, NULL, 0);
} else {
/*
@@ -241,12 +214,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
* RX SGL: AAD || CT ----+
*/
- /* Copy AAD || CT to RX SGL buffer for in-place operation. */
- err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
- areq->first_rsgl.sgl.sgt.sgl,
- outlen);
- if (err)
- goto free;
+ /* Copy AAD || CT to RX SGL buffer for in-place operation. */
+ memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src, outlen);
/* Create TX SGL for tag and chain it to RX SGL. */
areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
@@ -379,7 +348,7 @@ static int aead_check_key(struct socket *sock)
int err = 0;
struct sock *psk;
struct alg_sock *pask;
- struct aead_tfm *tfm;
+ struct crypto_aead *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@@ -393,7 +362,7 @@ static int aead_check_key(struct socket *sock)
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
- if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
+ if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
goto unlock;
atomic_dec(&pask->nokey_refcnt);
@@ -454,54 +423,22 @@ static struct proto_ops algif_aead_ops_nokey = {
static void *aead_bind(const char *name, u32 type, u32 mask)
{
- struct aead_tfm *tfm;
- struct crypto_aead *aead;
- struct crypto_sync_skcipher *null_tfm;
-
- tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
- if (!tfm)
- return ERR_PTR(-ENOMEM);
-
- aead = crypto_alloc_aead(name, type, mask);
- if (IS_ERR(aead)) {
- kfree(tfm);
- return ERR_CAST(aead);
- }
-
- null_tfm = crypto_get_default_null_skcipher();
- if (IS_ERR(null_tfm)) {
- crypto_free_aead(aead);
- kfree(tfm);
- return ERR_CAST(null_tfm);
- }
-
- tfm->aead = aead;
- tfm->null_tfm = null_tfm;
-
- return tfm;
+ return crypto_alloc_aead(name, type, mask);
}
static void aead_release(void *private)
{
- struct aead_tfm *tfm = private;
-
- crypto_free_aead(tfm->aead);
- crypto_put_default_null_skcipher();
- kfree(tfm);
+ crypto_free_aead(private);
}
static int aead_setauthsize(void *private, unsigned int authsize)
{
- struct aead_tfm *tfm = private;
-
- return crypto_aead_setauthsize(tfm->aead, authsize);
+ return crypto_aead_setauthsize(private, authsize);
}
static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
{
- struct aead_tfm *tfm = private;
-
- return crypto_aead_setkey(tfm->aead, key, keylen);
+ return crypto_aead_setkey(private, key, keylen);
}
static void aead_sock_destruct(struct sock *sk)
@@ -510,8 +447,7 @@ static void aead_sock_destruct(struct sock *sk)
struct af_alg_ctx *ctx = ask->private;
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
- struct aead_tfm *aeadc = pask->private;
- struct crypto_aead *tfm = aeadc->aead;
+ struct crypto_aead *tfm = pask->private;
unsigned int ivlen = crypto_aead_ivsize(tfm);
af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
@@ -524,10 +460,9 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
{
struct af_alg_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
- struct aead_tfm *tfm = private;
- struct crypto_aead *aead = tfm->aead;
+ struct crypto_aead *tfm = private;
unsigned int len = sizeof(*ctx);
- unsigned int ivlen = crypto_aead_ivsize(aead);
+ unsigned int ivlen = crypto_aead_ivsize(tfm);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
@@ -554,9 +489,9 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
static int aead_accept_parent(void *private, struct sock *sk)
{
- struct aead_tfm *tfm = private;
+ struct crypto_aead *tfm = private;
- if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
+ if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
return aead_accept_parent_nokey(private, sk);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 5498a87249d3..e3f1a4852737 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -265,10 +265,6 @@ static int hash_accept(struct socket *sock, struct socket *newsock,
goto out_free_state;
err = crypto_ahash_import(&ctx2->req, state);
- if (err) {
- sock_orphan(sk2);
- sock_put(sk2);
- }
out_free_state:
kfree_sensitive(state);
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index 64f57c4c4b06..153523ce6076 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -467,7 +467,7 @@ MODULE_DESCRIPTION("Software Pseudo Random Number Generator");
MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
module_param(dbg, int, 0);
MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
-subsys_initcall(prng_mod_init);
+module_init(prng_mod_init);
module_exit(prng_mod_fini);
MODULE_ALIAS_CRYPTO("stdrng");
MODULE_ALIAS_CRYPTO("ansi_cprng");
diff --git a/crypto/anubis.c b/crypto/anubis.c
index 886e7c913688..4268c3833baa 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -694,7 +694,7 @@ static void __exit anubis_mod_fini(void)
crypto_unregister_alg(&anubis_alg);
}
-subsys_initcall(anubis_mod_init);
+module_init(anubis_mod_init);
module_exit(anubis_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/api.c b/crypto/api.c
index bfd177a4313a..5724d62e9d07 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -31,12 +31,12 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem);
BLOCKING_NOTIFIER_HEAD(crypto_chain);
EXPORT_SYMBOL_GPL(crypto_chain);
-#if IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) && \
- !IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)
+#if IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) && IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)
DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished);
#endif
-static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
+static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg,
+ u32 type, u32 mask);
static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
u32 mask);
@@ -145,7 +145,7 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
if (alg != &larval->alg) {
kfree(larval);
if (crypto_is_larval(alg))
- alg = crypto_larval_wait(alg);
+ alg = crypto_larval_wait(alg, type, mask);
}
return alg;
@@ -197,7 +197,8 @@ static void crypto_start_test(struct crypto_larval *larval)
crypto_schedule_test(larval);
}
-static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
+static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg,
+ u32 type, u32 mask)
{
struct crypto_larval *larval;
long time_left;
@@ -218,15 +219,19 @@ again:
if (crypto_is_test_larval(larval))
crypto_larval_kill(larval);
alg = ERR_PTR(-ETIMEDOUT);
- } else if (!alg) {
- u32 type;
- u32 mask;
-
+ } else if (!alg || PTR_ERR(alg) == -EEXIST) {
+ int err = alg ? -EEXIST : -EAGAIN;
+
+ /*
+ * EEXIST is expected because two probes can be scheduled
+ * at the same time with one using alg_name and the other
+ * using driver_name. Do a re-lookup but do not retry in
+ * case we hit a quirk like gcm_base(ctr(aes),...) which
+ * will never match.
+ */
alg = &larval->alg;
- type = alg->cra_flags & ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
- mask = larval->mask;
alg = crypto_alg_lookup(alg->cra_name, type, mask) ?:
- ERR_PTR(-EAGAIN);
+ ERR_PTR(err);
} else if (IS_ERR(alg))
;
else if (crypto_is_test_larval(larval) &&
@@ -304,7 +309,7 @@ static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
}
if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg))
- alg = crypto_larval_wait(alg);
+ alg = crypto_larval_wait(alg, type, mask);
else if (alg)
;
else if (!(mask & CRYPTO_ALG_TESTED))
@@ -352,7 +357,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval);
if (ok == NOTIFY_STOP)
- alg = crypto_larval_wait(larval);
+ alg = crypto_larval_wait(larval, type, mask);
else {
crypto_mod_put(larval);
alg = ERR_PTR(-ENOENT);
@@ -386,10 +391,6 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
case CRYPTO_ALG_TYPE_CIPHER:
len += crypto_cipher_ctxsize(alg);
break;
-
- case CRYPTO_ALG_TYPE_COMPRESS:
- len += crypto_compress_ctxsize(alg);
- break;
}
return len;
@@ -535,6 +536,7 @@ void *crypto_create_tfm_node(struct crypto_alg *alg,
goto out;
tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
+ tfm->fb = tfm;
err = frontend->init_tfm(tfm);
if (err)
@@ -576,7 +578,7 @@ void *crypto_clone_tfm(const struct crypto_type *frontend,
tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
tfm->crt_flags = otfm->crt_flags;
- tfm->exit = otfm->exit;
+ tfm->fb = tfm;
out:
return mem;
@@ -710,5 +712,31 @@ void crypto_req_done(void *data, int err)
}
EXPORT_SYMBOL_GPL(crypto_req_done);
+void crypto_destroy_alg(struct crypto_alg *alg)
+{
+ if (alg->cra_type && alg->cra_type->destroy)
+ alg->cra_type->destroy(alg);
+ if (alg->cra_destroy)
+ alg->cra_destroy(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_destroy_alg);
+
+struct crypto_async_request *crypto_request_clone(
+ struct crypto_async_request *req, size_t total, gfp_t gfp)
+{
+ struct crypto_tfm *tfm = req->tfm;
+ struct crypto_async_request *nreq;
+
+ nreq = kmemdup(req, total, gfp);
+ if (!nreq) {
+ req->tfm = tfm->fb;
+ return req;
+ }
+
+ nreq->flags &= ~CRYPTO_TFM_REQ_ON_STACK;
+ return nreq;
+}
+EXPORT_SYMBOL_GPL(crypto_request_clone);
+
MODULE_DESCRIPTION("Cryptographic core API");
MODULE_LICENSE("GPL");
diff --git a/crypto/arc4.c b/crypto/arc4.c
index 1a4825c97c5a..1608018111d0 100644
--- a/crypto/arc4.c
+++ b/crypto/arc4.c
@@ -73,7 +73,7 @@ static void __exit arc4_exit(void)
crypto_unregister_lskcipher(&arc4_alg);
}
-subsys_initcall(arc4_init);
+module_init(arc4_init);
module_exit(arc4_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/aria_generic.c b/crypto/aria_generic.c
index bd359d3313c2..faa7900383f6 100644
--- a/crypto/aria_generic.c
+++ b/crypto/aria_generic.c
@@ -304,7 +304,7 @@ static void __exit aria_fini(void)
crypto_unregister_alg(&aria_alg);
}
-subsys_initcall(aria_init);
+module_init(aria_init);
module_exit(aria_fini);
MODULE_DESCRIPTION("ARIA Cipher Algorithm");
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index bbd07a9022e6..e5b177c8e842 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -163,10 +163,8 @@ static u8 *pkey_pack_u32(u8 *dst, u32 val)
static int software_key_query(const struct kernel_pkey_params *params,
struct kernel_pkey_query *info)
{
- struct crypto_akcipher *tfm;
struct public_key *pkey = params->key->payload.data[asym_crypto];
char alg_name[CRYPTO_MAX_ALG_NAME];
- struct crypto_sig *sig;
u8 *key, *ptr;
int ret, len;
bool issig;
@@ -188,7 +186,11 @@ static int software_key_query(const struct kernel_pkey_params *params,
ptr = pkey_pack_u32(ptr, pkey->paramlen);
memcpy(ptr, pkey->params, pkey->paramlen);
+ memset(info, 0, sizeof(*info));
+
if (issig) {
+ struct crypto_sig *sig;
+
sig = crypto_alloc_sig(alg_name, 0, 0);
if (IS_ERR(sig)) {
ret = PTR_ERR(sig);
@@ -200,9 +202,10 @@ static int software_key_query(const struct kernel_pkey_params *params,
else
ret = crypto_sig_set_pubkey(sig, key, pkey->keylen);
if (ret < 0)
- goto error_free_tfm;
+ goto error_free_sig;
len = crypto_sig_keysize(sig);
+ info->key_size = len;
info->max_sig_size = crypto_sig_maxsize(sig);
info->max_data_size = crypto_sig_digestsize(sig);
@@ -211,11 +214,19 @@ static int software_key_query(const struct kernel_pkey_params *params,
info->supported_ops |= KEYCTL_SUPPORTS_SIGN;
if (strcmp(params->encoding, "pkcs1") == 0) {
+ info->max_enc_size = len / BITS_PER_BYTE;
+ info->max_dec_size = len / BITS_PER_BYTE;
+
info->supported_ops |= KEYCTL_SUPPORTS_ENCRYPT;
if (pkey->key_is_private)
info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT;
}
+
+error_free_sig:
+ crypto_free_sig(sig);
} else {
+ struct crypto_akcipher *tfm;
+
tfm = crypto_alloc_akcipher(alg_name, 0, 0);
if (IS_ERR(tfm)) {
ret = PTR_ERR(tfm);
@@ -227,28 +238,23 @@ static int software_key_query(const struct kernel_pkey_params *params,
else
ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
if (ret < 0)
- goto error_free_tfm;
+ goto error_free_akcipher;
len = crypto_akcipher_maxsize(tfm);
+ info->key_size = len * BITS_PER_BYTE;
info->max_sig_size = len;
info->max_data_size = len;
+ info->max_enc_size = len;
+ info->max_dec_size = len;
info->supported_ops = KEYCTL_SUPPORTS_ENCRYPT;
if (pkey->key_is_private)
info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT;
- }
- info->key_size = len * 8;
- info->max_enc_size = len;
- info->max_dec_size = len;
-
- ret = 0;
-
-error_free_tfm:
- if (issig)
- crypto_free_sig(sig);
- else
+error_free_akcipher:
crypto_free_akcipher(tfm);
+ }
+
error_free_key:
kfree_sensitive(key);
pr_devel("<==%s() = %d\n", __func__, ret);
@@ -267,7 +273,6 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
struct crypto_sig *sig;
char *key, *ptr;
bool issig;
- int ksz;
int ret;
pr_devel("==>%s()\n", __func__);
@@ -302,8 +307,6 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
ret = crypto_sig_set_pubkey(sig, key, pkey->keylen);
if (ret)
goto error_free_tfm;
-
- ksz = crypto_sig_keysize(sig);
} else {
tfm = crypto_alloc_akcipher(alg_name, 0, 0);
if (IS_ERR(tfm)) {
@@ -317,8 +320,6 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
if (ret)
goto error_free_tfm;
-
- ksz = crypto_akcipher_maxsize(tfm);
}
ret = -EINVAL;
@@ -347,8 +348,8 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
BUG();
}
- if (ret == 0)
- ret = ksz;
+ if (!issig && ret == 0)
+ ret = crypto_akcipher_maxsize(tfm);
error_free_tfm:
if (issig)
diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c
index 2863984b6700..1f3b227ba7f2 100644
--- a/crypto/asymmetric_keys/verify_pefile.c
+++ b/crypto/asymmetric_keys/verify_pefile.c
@@ -40,13 +40,13 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
} while (0)
chkaddr(0, 0, sizeof(*mz));
- if (mz->magic != MZ_MAGIC)
+ if (mz->magic != IMAGE_DOS_SIGNATURE)
return -ELIBBAD;
cursor = sizeof(*mz);
chkaddr(cursor, mz->peaddr, sizeof(*pe));
pe = pebuf + mz->peaddr;
- if (pe->magic != PE_MAGIC)
+ if (pe->magic != IMAGE_NT_SIGNATURE)
return -ELIBBAD;
cursor = mz->peaddr + sizeof(*pe);
@@ -55,7 +55,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
pe64 = pebuf + cursor;
switch (pe32->magic) {
- case PE_OPT_MAGIC_PE32:
+ case IMAGE_NT_OPTIONAL_HDR32_MAGIC:
chkaddr(0, cursor, sizeof(*pe32));
ctx->image_checksum_offset =
(unsigned long)&pe32->csum - (unsigned long)pebuf;
@@ -64,7 +64,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
ctx->n_data_dirents = pe32->data_dirs;
break;
- case PE_OPT_MAGIC_PE32PLUS:
+ case IMAGE_NT_OPTIONAL_HDR64_MAGIC:
chkaddr(0, cursor, sizeof(*pe64));
ctx->image_checksum_offset =
(unsigned long)&pe64->csum - (unsigned long)pebuf;
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index ee2fdab42334..2ffe4ae90bea 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -372,10 +372,9 @@ static int x509_fabricate_name(struct x509_parse_context *ctx, size_t hdrlen,
/* Empty name string if no material */
if (!ctx->cn_size && !ctx->o_size && !ctx->email_size) {
- buffer = kmalloc(1, GFP_KERNEL);
+ buffer = kzalloc(1, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
- buffer[0] = 0;
goto done;
}
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 1a3855284091..2c499654a36c 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -389,32 +389,6 @@ async_xor_val_offs(struct page *dest, unsigned int offset,
}
EXPORT_SYMBOL_GPL(async_xor_val_offs);
-/**
- * async_xor_val - attempt a xor parity check with a dma engine.
- * @dest: destination page used if the xor is performed synchronously
- * @src_list: array of source pages
- * @offset: offset in pages to start transaction
- * @src_cnt: number of source pages
- * @len: length in bytes
- * @result: 0 if sum == 0 else non-zero
- * @submit: submission / completion modifiers
- *
- * honored flags: ASYNC_TX_ACK
- *
- * src_list note: if the dest is also a source it must be at index zero.
- * The contents of this array will be overwritten if a scribble region
- * is not specified.
- */
-struct dma_async_tx_descriptor *
-async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
- int src_cnt, size_t len, enum sum_check_flags *result,
- struct async_submit_ctl *submit)
-{
- return async_xor_val_offs(dest, offset, src_list, NULL, src_cnt,
- len, result, submit);
-}
-EXPORT_SYMBOL_GPL(async_xor_val);
-
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api");
MODULE_LICENSE("GPL");
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 3aaf3ab4e360..a723769c8777 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -9,7 +9,6 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/authenc.h>
-#include <crypto/null.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -28,7 +27,6 @@ struct authenc_instance_ctx {
struct crypto_authenc_ctx {
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_sync_skcipher *null;
};
struct authenc_request_ctx {
@@ -170,21 +168,6 @@ out:
authenc_request_complete(areq, err);
}
-static int crypto_authenc_copy_assoc(struct aead_request *req)
-{
- struct crypto_aead *authenc = crypto_aead_reqtfm(req);
- struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
-
- skcipher_request_set_sync_tfm(skreq, ctx->null);
- skcipher_request_set_callback(skreq, aead_request_flags(req),
- NULL, NULL);
- skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
- NULL);
-
- return crypto_skcipher_encrypt(skreq);
-}
-
static int crypto_authenc_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
@@ -203,10 +186,7 @@ static int crypto_authenc_encrypt(struct aead_request *req)
dst = src;
if (req->src != req->dst) {
- err = crypto_authenc_copy_assoc(req);
- if (err)
- return err;
-
+ memcpy_sglist(req->dst, req->src, req->assoclen);
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
}
@@ -303,7 +283,6 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_sync_skcipher *null;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
@@ -315,14 +294,8 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(enc))
goto err_free_ahash;
- null = crypto_get_default_null_skcipher();
- err = PTR_ERR(null);
- if (IS_ERR(null))
- goto err_free_skcipher;
-
ctx->auth = auth;
ctx->enc = enc;
- ctx->null = null;
crypto_aead_set_reqsize(
tfm,
@@ -336,8 +309,6 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
return 0;
-err_free_skcipher:
- crypto_free_skcipher(enc);
err_free_ahash:
crypto_free_ahash(auth);
return err;
@@ -349,7 +320,6 @@ static void crypto_authenc_exit_tfm(struct crypto_aead *tfm)
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
- crypto_put_default_null_skcipher();
}
static void crypto_authenc_free(struct aead_instance *inst)
@@ -451,7 +421,7 @@ static void __exit crypto_authenc_module_exit(void)
crypto_unregister_template(&crypto_authenc_tmpl);
}
-subsys_initcall(crypto_authenc_module_init);
+module_init(crypto_authenc_module_init);
module_exit(crypto_authenc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 2cc933e2f790..d1bf0fda3f2e 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -12,7 +12,6 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/authenc.h>
-#include <crypto/null.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -31,7 +30,6 @@ struct crypto_authenc_esn_ctx {
unsigned int reqoff;
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_sync_skcipher *null;
};
struct authenc_esn_request_ctx {
@@ -158,20 +156,6 @@ static void crypto_authenc_esn_encrypt_done(void *data, int err)
authenc_esn_request_complete(areq, err);
}
-static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
-{
- struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
- struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
- SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
-
- skcipher_request_set_sync_tfm(skreq, ctx->null);
- skcipher_request_set_callback(skreq, aead_request_flags(req),
- NULL, NULL);
- skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL);
-
- return crypto_skcipher_encrypt(skreq);
-}
-
static int crypto_authenc_esn_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
@@ -190,10 +174,7 @@ static int crypto_authenc_esn_encrypt(struct aead_request *req)
dst = src;
if (req->src != req->dst) {
- err = crypto_authenc_esn_copy(req, assoclen);
- if (err)
- return err;
-
+ memcpy_sglist(req->dst, req->src, assoclen);
sg_init_table(areq_ctx->dst, 2);
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen);
}
@@ -277,11 +258,8 @@ static int crypto_authenc_esn_decrypt(struct aead_request *req)
cryptlen -= authsize;
- if (req->src != dst) {
- err = crypto_authenc_esn_copy(req, assoclen + cryptlen);
- if (err)
- return err;
- }
+ if (req->src != dst)
+ memcpy_sglist(dst, req->src, assoclen + cryptlen);
scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen,
authsize, 0);
@@ -317,7 +295,6 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_sync_skcipher *null;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
@@ -329,14 +306,8 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(enc))
goto err_free_ahash;
- null = crypto_get_default_null_skcipher();
- err = PTR_ERR(null);
- if (IS_ERR(null))
- goto err_free_skcipher;
-
ctx->auth = auth;
ctx->enc = enc;
- ctx->null = null;
ctx->reqoff = 2 * crypto_ahash_digestsize(auth);
@@ -352,8 +323,6 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
return 0;
-err_free_skcipher:
- crypto_free_skcipher(enc);
err_free_ahash:
crypto_free_ahash(auth);
return err;
@@ -365,7 +334,6 @@ static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm)
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
- crypto_put_default_null_skcipher();
}
static void crypto_authenc_esn_free(struct aead_instance *inst)
@@ -465,7 +433,7 @@ static void __exit crypto_authenc_esn_module_exit(void)
crypto_unregister_template(&crypto_authenc_esn_tmpl);
}
-subsys_initcall(crypto_authenc_esn_module_init);
+module_init(crypto_authenc_esn_module_init);
module_exit(crypto_authenc_esn_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c
index 04a712ddfb43..60f056217510 100644
--- a/crypto/blake2b_generic.c
+++ b/crypto/blake2b_generic.c
@@ -15,12 +15,12 @@
* More information about BLAKE2 can be found at https://blake2.net.
*/
-#include <linux/unaligned.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
#include <crypto/internal/blake2b.h>
#include <crypto/internal/hash.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
static const u8 blake2b_sigma[12][16] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
@@ -111,8 +111,8 @@ static void blake2b_compress_one_generic(struct blake2b_state *S,
#undef G
#undef ROUND
-void blake2b_compress_generic(struct blake2b_state *state,
- const u8 *block, size_t nblocks, u32 inc)
+static void blake2b_compress_generic(struct blake2b_state *state,
+ const u8 *block, size_t nblocks, u32 inc)
{
do {
blake2b_increment_counter(state, inc);
@@ -120,17 +120,19 @@ void blake2b_compress_generic(struct blake2b_state *state,
block += BLAKE2B_BLOCK_SIZE;
} while (--nblocks);
}
-EXPORT_SYMBOL(blake2b_compress_generic);
static int crypto_blake2b_update_generic(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
- return crypto_blake2b_update(desc, in, inlen, blake2b_compress_generic);
+ return crypto_blake2b_update_bo(desc, in, inlen,
+ blake2b_compress_generic);
}
-static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out)
+static int crypto_blake2b_finup_generic(struct shash_desc *desc, const u8 *in,
+ unsigned int inlen, u8 *out)
{
- return crypto_blake2b_final(desc, out, blake2b_compress_generic);
+ return crypto_blake2b_finup(desc, in, inlen, out,
+ blake2b_compress_generic);
}
#define BLAKE2B_ALG(name, driver_name, digest_size) \
@@ -138,7 +140,9 @@ static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out)
.base.cra_name = name, \
.base.cra_driver_name = driver_name, \
.base.cra_priority = 100, \
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY | \
+ CRYPTO_AHASH_ALG_BLOCK_ONLY | \
+ CRYPTO_AHASH_ALG_FINAL_NONZERO, \
.base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \
.base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \
.base.cra_module = THIS_MODULE, \
@@ -146,8 +150,9 @@ static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out)
.setkey = crypto_blake2b_setkey, \
.init = crypto_blake2b_init, \
.update = crypto_blake2b_update_generic, \
- .final = crypto_blake2b_final_generic, \
- .descsize = sizeof(struct blake2b_state), \
+ .finup = crypto_blake2b_finup_generic, \
+ .descsize = BLAKE2B_DESC_SIZE, \
+ .statesize = BLAKE2B_STATE_SIZE, \
}
static struct shash_alg blake2b_algs[] = {
@@ -171,7 +176,7 @@ static void __exit blake2b_mod_fini(void)
crypto_unregister_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs));
}
-subsys_initcall(blake2b_mod_init);
+module_init(blake2b_mod_init);
module_exit(blake2b_mod_fini);
MODULE_AUTHOR("David Sterba <kdave@kernel.org>");
diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c
index 0146bc762c09..f3c5f9b09850 100644
--- a/crypto/blowfish_generic.c
+++ b/crypto/blowfish_generic.c
@@ -124,7 +124,7 @@ static void __exit blowfish_mod_fini(void)
crypto_unregister_alg(&alg);
}
-subsys_initcall(blowfish_mod_init);
+module_init(blowfish_mod_init);
module_exit(blowfish_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/bpf_crypto_skcipher.c b/crypto/bpf_crypto_skcipher.c
index b5e657415770..a88798d3e8c8 100644
--- a/crypto/bpf_crypto_skcipher.c
+++ b/crypto/bpf_crypto_skcipher.c
@@ -80,3 +80,4 @@ static void __exit bpf_crypto_skcipher_exit(void)
module_init(bpf_crypto_skcipher_init);
module_exit(bpf_crypto_skcipher_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Symmetric key cipher support for BPF");
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index 197fcf3abc89..ee4336a04b93 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -1064,7 +1064,7 @@ static void __exit camellia_fini(void)
crypto_unregister_alg(&camellia_alg);
}
-subsys_initcall(camellia_init);
+module_init(camellia_init);
module_exit(camellia_fini);
MODULE_DESCRIPTION("Camellia Cipher Algorithm");
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
index f3e57775fa02..f68330793e0c 100644
--- a/crypto/cast5_generic.c
+++ b/crypto/cast5_generic.c
@@ -531,7 +531,7 @@ static void __exit cast5_mod_fini(void)
crypto_unregister_alg(&alg);
}
-subsys_initcall(cast5_mod_init);
+module_init(cast5_mod_init);
module_exit(cast5_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index 11b725b12f27..4c08c42646f0 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -271,7 +271,7 @@ static void __exit cast6_mod_fini(void)
crypto_unregister_alg(&alg);
}
-subsys_initcall(cast6_mod_init);
+module_init(cast6_mod_init);
module_exit(cast6_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/cbc.c b/crypto/cbc.c
index e81918ca68b7..ed3df6246765 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -179,7 +179,7 @@ static void __exit crypto_cbc_module_exit(void)
crypto_unregister_template(&crypto_cbc_tmpl);
}
-subsys_initcall(crypto_cbc_module_init);
+module_init(crypto_cbc_module_init);
module_exit(crypto_cbc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 06476b53b491..2ae929ffdef8 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -10,11 +10,12 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
+#include <crypto/utils.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string.h>
struct ccm_instance_ctx {
struct crypto_skcipher_spawn ctr;
@@ -54,11 +55,6 @@ struct cbcmac_tfm_ctx {
struct crypto_cipher *child;
};
-struct cbcmac_desc_ctx {
- unsigned int len;
- u8 dg[];
-};
-
static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx(
struct aead_request *req)
{
@@ -783,12 +779,10 @@ static int crypto_cbcmac_digest_setkey(struct crypto_shash *parent,
static int crypto_cbcmac_digest_init(struct shash_desc *pdesc)
{
- struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
int bs = crypto_shash_digestsize(pdesc->tfm);
+ u8 *dg = shash_desc_ctx(pdesc);
- ctx->len = 0;
- memset(ctx->dg, 0, bs);
-
+ memset(dg, 0, bs);
return 0;
}
@@ -797,39 +791,34 @@ static int crypto_cbcmac_digest_update(struct shash_desc *pdesc, const u8 *p,
{
struct crypto_shash *parent = pdesc->tfm;
struct cbcmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_digestsize(parent);
-
- while (len > 0) {
- unsigned int l = min(len, bs - ctx->len);
-
- crypto_xor(&ctx->dg[ctx->len], p, l);
- ctx->len +=l;
- len -= l;
- p += l;
-
- if (ctx->len == bs) {
- crypto_cipher_encrypt_one(tfm, ctx->dg, ctx->dg);
- ctx->len = 0;
- }
- }
-
- return 0;
+ u8 *dg = shash_desc_ctx(pdesc);
+
+ do {
+ crypto_xor(dg, p, bs);
+ crypto_cipher_encrypt_one(tfm, dg, dg);
+ p += bs;
+ len -= bs;
+ } while (len >= bs);
+ return len;
}
-static int crypto_cbcmac_digest_final(struct shash_desc *pdesc, u8 *out)
+static int crypto_cbcmac_digest_finup(struct shash_desc *pdesc, const u8 *src,
+ unsigned int len, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
struct cbcmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_digestsize(parent);
+ u8 *dg = shash_desc_ctx(pdesc);
- if (ctx->len)
- crypto_cipher_encrypt_one(tfm, ctx->dg, ctx->dg);
-
- memcpy(out, ctx->dg, bs);
+ if (len) {
+ crypto_xor(dg, src, len);
+ crypto_cipher_encrypt_one(tfm, out, dg);
+ return 0;
+ }
+ memcpy(out, dg, bs);
return 0;
}
@@ -883,19 +872,19 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb)
goto err_free_inst;
inst->alg.base.cra_priority = alg->cra_priority;
- inst->alg.base.cra_blocksize = 1;
+ inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.digestsize = alg->cra_blocksize;
- inst->alg.descsize = sizeof(struct cbcmac_desc_ctx) +
- alg->cra_blocksize;
+ inst->alg.descsize = alg->cra_blocksize;
+ inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY;
inst->alg.base.cra_ctxsize = sizeof(struct cbcmac_tfm_ctx);
inst->alg.base.cra_init = cbcmac_init_tfm;
inst->alg.base.cra_exit = cbcmac_exit_tfm;
inst->alg.init = crypto_cbcmac_digest_init;
inst->alg.update = crypto_cbcmac_digest_update;
- inst->alg.final = crypto_cbcmac_digest_final;
+ inst->alg.finup = crypto_cbcmac_digest_finup;
inst->alg.setkey = crypto_cbcmac_digest_setkey;
inst->free = shash_free_singlespawn_instance;
@@ -940,7 +929,7 @@ static void __exit crypto_ccm_module_exit(void)
ARRAY_SIZE(crypto_ccm_tmpls));
}
-subsys_initcall(crypto_ccm_module_init);
+module_init(crypto_ccm_module_init);
module_exit(crypto_ccm_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/chacha.c b/crypto/chacha.c
new file mode 100644
index 000000000000..c3a11f4e2d13
--- /dev/null
+++ b/crypto/chacha.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Crypto API wrappers for the ChaCha20, XChaCha20, and XChaCha12 stream ciphers
+ *
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2018 Google LLC
+ */
+
+#include <linux/unaligned.h>
+#include <crypto/algapi.h>
+#include <crypto/chacha.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/module.h>
+
+struct chacha_ctx {
+ u32 key[8];
+ int nrounds;
+};
+
+static int chacha_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keysize, int nrounds)
+{
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int i;
+
+ if (keysize != CHACHA_KEY_SIZE)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
+ ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
+
+ ctx->nrounds = nrounds;
+ return 0;
+}
+
+static int chacha20_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keysize)
+{
+ return chacha_setkey(tfm, key, keysize, 20);
+}
+
+static int chacha12_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keysize)
+{
+ return chacha_setkey(tfm, key, keysize, 12);
+}
+
+static int chacha_stream_xor(struct skcipher_request *req,
+ const struct chacha_ctx *ctx,
+ const u8 iv[CHACHA_IV_SIZE], bool arch)
+{
+ struct skcipher_walk walk;
+ struct chacha_state state;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ chacha_init(&state, ctx->key, iv);
+
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);
+
+ if (arch)
+ chacha_crypt(&state, walk.dst.virt.addr,
+ walk.src.virt.addr, nbytes, ctx->nrounds);
+ else
+ chacha_crypt_generic(&state, walk.dst.virt.addr,
+ walk.src.virt.addr, nbytes,
+ ctx->nrounds);
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
+
+ return err;
+}
+
+static int crypto_chacha_crypt_generic(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return chacha_stream_xor(req, ctx, req->iv, false);
+}
+
+static int crypto_chacha_crypt_arch(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return chacha_stream_xor(req, ctx, req->iv, true);
+}
+
+static int crypto_xchacha_crypt(struct skcipher_request *req, bool arch)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct chacha_ctx subctx;
+ struct chacha_state state;
+ u8 real_iv[16];
+
+ /* Compute the subkey given the original key and first 128 nonce bits */
+ chacha_init(&state, ctx->key, req->iv);
+ if (arch)
+ hchacha_block(&state, subctx.key, ctx->nrounds);
+ else
+ hchacha_block_generic(&state, subctx.key, ctx->nrounds);
+ subctx.nrounds = ctx->nrounds;
+
+ /* Build the real IV */
+ memcpy(&real_iv[0], req->iv + 24, 8); /* stream position */
+ memcpy(&real_iv[8], req->iv + 16, 8); /* remaining 64 nonce bits */
+
+ /* Generate the stream and XOR it with the data */
+ return chacha_stream_xor(req, &subctx, real_iv, arch);
+}
+
+static int crypto_xchacha_crypt_generic(struct skcipher_request *req)
+{
+ return crypto_xchacha_crypt(req, false);
+}
+
+static int crypto_xchacha_crypt_arch(struct skcipher_request *req)
+{
+ return crypto_xchacha_crypt(req, true);
+}
+
+static struct skcipher_alg algs[] = {
+ {
+ .base.cra_name = "chacha20",
+ .base.cra_driver_name = "chacha20-generic",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = crypto_chacha_crypt_generic,
+ .decrypt = crypto_chacha_crypt_generic,
+ },
+ {
+ .base.cra_name = "xchacha20",
+ .base.cra_driver_name = "xchacha20-generic",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = crypto_xchacha_crypt_generic,
+ .decrypt = crypto_xchacha_crypt_generic,
+ },
+ {
+ .base.cra_name = "xchacha12",
+ .base.cra_driver_name = "xchacha12-generic",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha12_setkey,
+ .encrypt = crypto_xchacha_crypt_generic,
+ .decrypt = crypto_xchacha_crypt_generic,
+ },
+ {
+ .base.cra_name = "chacha20",
+ .base.cra_driver_name = "chacha20-" __stringify(ARCH),
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = crypto_chacha_crypt_arch,
+ .decrypt = crypto_chacha_crypt_arch,
+ },
+ {
+ .base.cra_name = "xchacha20",
+ .base.cra_driver_name = "xchacha20-" __stringify(ARCH),
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = crypto_xchacha_crypt_arch,
+ .decrypt = crypto_xchacha_crypt_arch,
+ },
+ {
+ .base.cra_name = "xchacha12",
+ .base.cra_driver_name = "xchacha12-" __stringify(ARCH),
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha12_setkey,
+ .encrypt = crypto_xchacha_crypt_arch,
+ .decrypt = crypto_xchacha_crypt_arch,
+ }
+};
+
+static unsigned int num_algs;
+
+static int __init crypto_chacha_mod_init(void)
+{
+ /* register the arch flavours only if they differ from generic */
+ num_algs = ARRAY_SIZE(algs);
+ BUILD_BUG_ON(ARRAY_SIZE(algs) % 2 != 0);
+ if (!chacha_is_arch_optimized())
+ num_algs /= 2;
+
+ return crypto_register_skciphers(algs, num_algs);
+}
+
+static void __exit crypto_chacha_mod_fini(void)
+{
+ crypto_unregister_skciphers(algs, num_algs);
+}
+
+module_init(crypto_chacha_mod_init);
+module_exit(crypto_chacha_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
+MODULE_DESCRIPTION("Crypto API wrappers for the ChaCha20, XChaCha20, and XChaCha12 stream ciphers");
+MODULE_ALIAS_CRYPTO("chacha20");
+MODULE_ALIAS_CRYPTO("chacha20-generic");
+MODULE_ALIAS_CRYPTO("chacha20-" __stringify(ARCH));
+MODULE_ALIAS_CRYPTO("xchacha20");
+MODULE_ALIAS_CRYPTO("xchacha20-generic");
+MODULE_ALIAS_CRYPTO("xchacha20-" __stringify(ARCH));
+MODULE_ALIAS_CRYPTO("xchacha12");
+MODULE_ALIAS_CRYPTO("xchacha12-generic");
+MODULE_ALIAS_CRYPTO("xchacha12-" __stringify(ARCH));
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index d740849f1c19..b4b5a7198d84 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -12,36 +12,23 @@
#include <crypto/chacha.h>
#include <crypto/poly1305.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/string.h>
struct chachapoly_instance_ctx {
struct crypto_skcipher_spawn chacha;
- struct crypto_ahash_spawn poly;
unsigned int saltlen;
};
struct chachapoly_ctx {
struct crypto_skcipher *chacha;
- struct crypto_ahash *poly;
/* key bytes we use for the ChaCha20 IV */
unsigned int saltlen;
u8 salt[] __counted_by(saltlen);
};
-struct poly_req {
- /* zero byte padding for AD/ciphertext, as needed */
- u8 pad[POLY1305_BLOCK_SIZE];
- /* tail data with AD/ciphertext lengths */
- struct {
- __le64 assoclen;
- __le64 cryptlen;
- } tail;
- struct scatterlist src[1];
- struct ahash_request req; /* must be last member */
-};
-
struct chacha_req {
u8 iv[CHACHA_IV_SIZE];
struct scatterlist src[1];
@@ -62,7 +49,6 @@ struct chachapoly_req_ctx {
/* request flags, with MAY_SLEEP cleared if needed */
u32 flags;
union {
- struct poly_req poly;
struct chacha_req chacha;
} u;
};
@@ -105,16 +91,6 @@ static int poly_verify_tag(struct aead_request *req)
return 0;
}
-static int poly_copy_tag(struct aead_request *req)
-{
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
-
- scatterwalk_map_and_copy(rctx->tag, req->dst,
- req->assoclen + rctx->cryptlen,
- sizeof(rctx->tag), 1);
- return 0;
-}
-
static void chacha_decrypt_done(void *data, int err)
{
async_done_continue(data, err, poly_verify_tag);
@@ -151,210 +127,76 @@ skip:
return poly_verify_tag(req);
}
-static int poly_tail_continue(struct aead_request *req)
-{
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
-
- if (rctx->cryptlen == req->cryptlen) /* encrypting */
- return poly_copy_tag(req);
-
- return chacha_decrypt(req);
-}
-
-static void poly_tail_done(void *data, int err)
-{
- async_done_continue(data, err, poly_tail_continue);
-}
-
-static int poly_tail(struct aead_request *req)
-{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- int err;
-
- preq->tail.assoclen = cpu_to_le64(rctx->assoclen);
- preq->tail.cryptlen = cpu_to_le64(rctx->cryptlen);
- sg_init_one(preq->src, &preq->tail, sizeof(preq->tail));
-
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_tail_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, preq->src,
- rctx->tag, sizeof(preq->tail));
-
- err = crypto_ahash_finup(&preq->req);
- if (err)
- return err;
-
- return poly_tail_continue(req);
-}
-
-static void poly_cipherpad_done(void *data, int err)
-{
- async_done_continue(data, err, poly_tail);
-}
-
-static int poly_cipherpad(struct aead_request *req)
+static int poly_hash(struct aead_request *req)
{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
+ const void *zp = page_address(ZERO_PAGE(0));
+ struct scatterlist *sg = req->src;
+ struct poly1305_desc_ctx desc;
+ struct scatter_walk walk;
+ struct {
+ union {
+ struct {
+ __le64 assoclen;
+ __le64 cryptlen;
+ };
+ u8 u8[16];
+ };
+ } tail;
unsigned int padlen;
- int err;
-
- padlen = -rctx->cryptlen % POLY1305_BLOCK_SIZE;
- memset(preq->pad, 0, sizeof(preq->pad));
- sg_init_one(preq->src, preq->pad, padlen);
-
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_cipherpad_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
+ unsigned int total;
- err = crypto_ahash_update(&preq->req);
- if (err)
- return err;
-
- return poly_tail(req);
-}
-
-static void poly_cipher_done(void *data, int err)
-{
- async_done_continue(data, err, poly_cipherpad);
-}
-
-static int poly_cipher(struct aead_request *req)
-{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- struct scatterlist *crypt = req->src;
- int err;
+ if (sg != req->dst)
+ memcpy_sglist(req->dst, sg, req->assoclen);
if (rctx->cryptlen == req->cryptlen) /* encrypting */
- crypt = req->dst;
-
- crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);
-
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_cipher_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen);
-
- err = crypto_ahash_update(&preq->req);
- if (err)
- return err;
+ sg = req->dst;
- return poly_cipherpad(req);
-}
+ poly1305_init(&desc, rctx->key);
+ scatterwalk_start(&walk, sg);
-static void poly_adpad_done(void *data, int err)
-{
- async_done_continue(data, err, poly_cipher);
-}
+ total = rctx->assoclen;
+ while (total) {
+ unsigned int n = scatterwalk_next(&walk, total);
-static int poly_adpad(struct aead_request *req)
-{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- unsigned int padlen;
- int err;
+ poly1305_update(&desc, walk.addr, n);
+ scatterwalk_done_src(&walk, n);
+ total -= n;
+ }
padlen = -rctx->assoclen % POLY1305_BLOCK_SIZE;
- memset(preq->pad, 0, sizeof(preq->pad));
- sg_init_one(preq->src, preq->pad, padlen);
-
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_adpad_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
-
- err = crypto_ahash_update(&preq->req);
- if (err)
- return err;
-
- return poly_cipher(req);
-}
-
-static void poly_ad_done(void *data, int err)
-{
- async_done_continue(data, err, poly_adpad);
-}
-
-static int poly_ad(struct aead_request *req)
-{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- int err;
-
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_ad_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
-
- err = crypto_ahash_update(&preq->req);
- if (err)
- return err;
-
- return poly_adpad(req);
-}
-
-static void poly_setkey_done(void *data, int err)
-{
- async_done_continue(data, err, poly_ad);
-}
+ poly1305_update(&desc, zp, padlen);
-static int poly_setkey(struct aead_request *req)
-{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- int err;
+ scatterwalk_skip(&walk, req->assoclen - rctx->assoclen);
- sg_init_one(preq->src, rctx->key, sizeof(rctx->key));
+ total = rctx->cryptlen;
+ while (total) {
+ unsigned int n = scatterwalk_next(&walk, total);
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_setkey_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
- ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key));
-
- err = crypto_ahash_update(&preq->req);
- if (err)
- return err;
-
- return poly_ad(req);
-}
-
-static void poly_init_done(void *data, int err)
-{
- async_done_continue(data, err, poly_setkey);
-}
+ poly1305_update(&desc, walk.addr, n);
+ scatterwalk_done_src(&walk, n);
+ total -= n;
+ }
-static int poly_init(struct aead_request *req)
-{
- struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
- struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
- struct poly_req *preq = &rctx->u.poly;
- int err;
+ padlen = -rctx->cryptlen % POLY1305_BLOCK_SIZE;
+ poly1305_update(&desc, zp, padlen);
- ahash_request_set_callback(&preq->req, rctx->flags,
- poly_init_done, req);
- ahash_request_set_tfm(&preq->req, ctx->poly);
+ tail.assoclen = cpu_to_le64(rctx->assoclen);
+ tail.cryptlen = cpu_to_le64(rctx->cryptlen);
+ poly1305_update(&desc, tail.u8, sizeof(tail));
+ memzero_explicit(&tail, sizeof(tail));
+ poly1305_final(&desc, rctx->tag);
- err = crypto_ahash_init(&preq->req);
- if (err)
- return err;
+ if (rctx->cryptlen != req->cryptlen)
+ return chacha_decrypt(req);
- return poly_setkey(req);
+ memcpy_to_scatterwalk(&walk, rctx->tag, sizeof(rctx->tag));
+ return 0;
}
static void poly_genkey_done(void *data, int err)
{
- async_done_continue(data, err, poly_init);
+ async_done_continue(data, err, poly_hash);
}
static int poly_genkey(struct aead_request *req)
@@ -388,7 +230,7 @@ static int poly_genkey(struct aead_request *req)
if (err)
return err;
- return poly_init(req);
+ return poly_hash(req);
}
static void chacha_encrypt_done(void *data, int err)
@@ -437,14 +279,7 @@ static int chachapoly_encrypt(struct aead_request *req)
/* encrypt call chain:
* - chacha_encrypt/done()
* - poly_genkey/done()
- * - poly_init/done()
- * - poly_setkey/done()
- * - poly_ad/done()
- * - poly_adpad/done()
- * - poly_cipher/done()
- * - poly_cipherpad/done()
- * - poly_tail/done/continue()
- * - poly_copy_tag()
+ * - poly_hash()
*/
return chacha_encrypt(req);
}
@@ -458,13 +293,7 @@ static int chachapoly_decrypt(struct aead_request *req)
/* decrypt call chain:
* - poly_genkey/done()
- * - poly_init/done()
- * - poly_setkey/done()
- * - poly_ad/done()
- * - poly_adpad/done()
- * - poly_cipher/done()
- * - poly_cipherpad/done()
- * - poly_tail/done/continue()
+ * - poly_hash()
* - chacha_decrypt/done()
* - poly_verify_tag()
*/
@@ -503,21 +332,13 @@ static int chachapoly_init(struct crypto_aead *tfm)
struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst);
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_skcipher *chacha;
- struct crypto_ahash *poly;
unsigned long align;
- poly = crypto_spawn_ahash(&ictx->poly);
- if (IS_ERR(poly))
- return PTR_ERR(poly);
-
chacha = crypto_spawn_skcipher(&ictx->chacha);
- if (IS_ERR(chacha)) {
- crypto_free_ahash(poly);
+ if (IS_ERR(chacha))
return PTR_ERR(chacha);
- }
ctx->chacha = chacha;
- ctx->poly = poly;
ctx->saltlen = ictx->saltlen;
align = crypto_aead_alignmask(tfm);
@@ -525,12 +346,9 @@ static int chachapoly_init(struct crypto_aead *tfm)
crypto_aead_set_reqsize(
tfm,
align + offsetof(struct chachapoly_req_ctx, u) +
- max(offsetof(struct chacha_req, req) +
- sizeof(struct skcipher_request) +
- crypto_skcipher_reqsize(chacha),
- offsetof(struct poly_req, req) +
- sizeof(struct ahash_request) +
- crypto_ahash_reqsize(poly)));
+ offsetof(struct chacha_req, req) +
+ sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(chacha));
return 0;
}
@@ -539,7 +357,6 @@ static void chachapoly_exit(struct crypto_aead *tfm)
{
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
- crypto_free_ahash(ctx->poly);
crypto_free_skcipher(ctx->chacha);
}
@@ -548,7 +365,6 @@ static void chachapoly_free(struct aead_instance *inst)
struct chachapoly_instance_ctx *ctx = aead_instance_ctx(inst);
crypto_drop_skcipher(&ctx->chacha);
- crypto_drop_ahash(&ctx->poly);
kfree(inst);
}
@@ -559,7 +375,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
struct aead_instance *inst;
struct chachapoly_instance_ctx *ctx;
struct skcipher_alg_common *chacha;
- struct hash_alg_common *poly;
int err;
if (ivsize > CHACHAPOLY_IV_SIZE)
@@ -581,14 +396,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
goto err_free_inst;
chacha = crypto_spawn_skcipher_alg_common(&ctx->chacha);
- err = crypto_grab_ahash(&ctx->poly, aead_crypto_instance(inst),
- crypto_attr_alg_name(tb[2]), 0, mask);
- if (err)
- goto err_free_inst;
- poly = crypto_spawn_ahash_alg(&ctx->poly);
-
err = -EINVAL;
- if (poly->digestsize != POLY1305_DIGEST_SIZE)
+ if (strcmp(crypto_attr_alg_name(tb[2]), "poly1305") &&
+ strcmp(crypto_attr_alg_name(tb[2]), "poly1305-generic"))
goto err_free_inst;
/* Need 16-byte IV size, including Initial Block Counter value */
if (chacha->ivsize != CHACHA_IV_SIZE)
@@ -599,16 +409,15 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
- "%s(%s,%s)", name, chacha->base.cra_name,
- poly->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
+ "%s(%s,poly1305)", name,
+ chacha->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "%s(%s,%s)", name, chacha->base.cra_driver_name,
- poly->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ "%s(%s,poly1305-generic)", name,
+ chacha->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_priority = (chacha->base.cra_priority +
- poly->base.cra_priority) / 2;
+ inst->alg.base.cra_priority = chacha->base.cra_priority;
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = chacha->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) +
@@ -667,7 +476,7 @@ static void __exit chacha20poly1305_module_exit(void)
ARRAY_SIZE(rfc7539_tmpls));
}
-subsys_initcall(chacha20poly1305_module_init);
+module_init(chacha20poly1305_module_init);
module_exit(chacha20poly1305_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/chacha_generic.c b/crypto/chacha_generic.c
deleted file mode 100644
index ba7fcb47f9aa..000000000000
--- a/crypto/chacha_generic.c
+++ /dev/null
@@ -1,139 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * ChaCha and XChaCha stream ciphers, including ChaCha20 (RFC7539)
- *
- * Copyright (C) 2015 Martin Willi
- * Copyright (C) 2018 Google LLC
- */
-
-#include <linux/unaligned.h>
-#include <crypto/algapi.h>
-#include <crypto/internal/chacha.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/module.h>
-
-static int chacha_stream_xor(struct skcipher_request *req,
- const struct chacha_ctx *ctx, const u8 *iv)
-{
- struct skcipher_walk walk;
- u32 state[16];
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- chacha_init_generic(state, ctx->key, iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);
-
- chacha_crypt_generic(state, walk.dst.virt.addr,
- walk.src.virt.addr, nbytes, ctx->nrounds);
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
-
- return err;
-}
-
-static int crypto_chacha_crypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return chacha_stream_xor(req, ctx, req->iv);
-}
-
-static int crypto_xchacha_crypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct chacha_ctx subctx;
- u32 state[16];
- u8 real_iv[16];
-
- /* Compute the subkey given the original key and first 128 nonce bits */
- chacha_init_generic(state, ctx->key, req->iv);
- hchacha_block_generic(state, subctx.key, ctx->nrounds);
- subctx.nrounds = ctx->nrounds;
-
- /* Build the real IV */
- memcpy(&real_iv[0], req->iv + 24, 8); /* stream position */
- memcpy(&real_iv[8], req->iv + 16, 8); /* remaining 64 nonce bits */
-
- /* Generate the stream and XOR it with the data */
- return chacha_stream_xor(req, &subctx, real_iv);
-}
-
-static struct skcipher_alg algs[] = {
- {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-generic",
- .base.cra_priority = 100,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = CHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = crypto_chacha_crypt,
- .decrypt = crypto_chacha_crypt,
- }, {
- .base.cra_name = "xchacha20",
- .base.cra_driver_name = "xchacha20-generic",
- .base.cra_priority = 100,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha20_setkey,
- .encrypt = crypto_xchacha_crypt,
- .decrypt = crypto_xchacha_crypt,
- }, {
- .base.cra_name = "xchacha12",
- .base.cra_driver_name = "xchacha12-generic",
- .base.cra_priority = 100,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA_KEY_SIZE,
- .max_keysize = CHACHA_KEY_SIZE,
- .ivsize = XCHACHA_IV_SIZE,
- .chunksize = CHACHA_BLOCK_SIZE,
- .setkey = chacha12_setkey,
- .encrypt = crypto_xchacha_crypt,
- .decrypt = crypto_xchacha_crypt,
- }
-};
-
-static int __init chacha_generic_mod_init(void)
-{
- return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-static void __exit chacha_generic_mod_fini(void)
-{
- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
-}
-
-subsys_initcall(chacha_generic_mod_init);
-module_exit(chacha_generic_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
-MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (generic)");
-MODULE_ALIAS_CRYPTO("chacha20");
-MODULE_ALIAS_CRYPTO("chacha20-generic");
-MODULE_ALIAS_CRYPTO("xchacha20");
-MODULE_ALIAS_CRYPTO("xchacha20-generic");
-MODULE_ALIAS_CRYPTO("xchacha12");
-MODULE_ALIAS_CRYPTO("xchacha12-generic");
diff --git a/crypto/cmac.c b/crypto/cmac.c
index c66a0f4d8808..1b03964abe00 100644
--- a/crypto/cmac.c
+++ b/crypto/cmac.c
@@ -13,9 +13,12 @@
#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
+#include <crypto/utils.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
/*
* +------------------------
@@ -31,22 +34,6 @@ struct cmac_tfm_ctx {
__be64 consts[];
};
-/*
- * +------------------------
- * | <shash desc>
- * +------------------------
- * | cmac_desc_ctx
- * +------------------------
- * | odds (block size)
- * +------------------------
- * | prev (block size)
- * +------------------------
- */
-struct cmac_desc_ctx {
- unsigned int len;
- u8 odds[];
-};
-
static int crypto_cmac_digest_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
@@ -102,13 +89,10 @@ static int crypto_cmac_digest_setkey(struct crypto_shash *parent,
static int crypto_cmac_digest_init(struct shash_desc *pdesc)
{
- struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
int bs = crypto_shash_blocksize(pdesc->tfm);
- u8 *prev = &ctx->odds[bs];
+ u8 *prev = shash_desc_ctx(pdesc);
- ctx->len = 0;
memset(prev, 0, bs);
-
return 0;
}
@@ -117,77 +101,36 @@ static int crypto_cmac_digest_update(struct shash_desc *pdesc, const u8 *p,
{
struct crypto_shash *parent = pdesc->tfm;
struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *odds = ctx->odds;
- u8 *prev = odds + bs;
-
- /* checking the data can fill the block */
- if ((ctx->len + len) <= bs) {
- memcpy(odds + ctx->len, p, len);
- ctx->len += len;
- return 0;
- }
-
- /* filling odds with new data and encrypting it */
- memcpy(odds + ctx->len, p, bs - ctx->len);
- len -= bs - ctx->len;
- p += bs - ctx->len;
-
- crypto_xor(prev, odds, bs);
- crypto_cipher_encrypt_one(tfm, prev, prev);
+ u8 *prev = shash_desc_ctx(pdesc);
- /* clearing the length */
- ctx->len = 0;
-
- /* encrypting the rest of data */
- while (len > bs) {
+ do {
crypto_xor(prev, p, bs);
crypto_cipher_encrypt_one(tfm, prev, prev);
p += bs;
len -= bs;
- }
-
- /* keeping the surplus of blocksize */
- if (len) {
- memcpy(odds, p, len);
- ctx->len = len;
- }
-
- return 0;
+ } while (len >= bs);
+ return len;
}
-static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out)
+static int crypto_cmac_digest_finup(struct shash_desc *pdesc, const u8 *src,
+ unsigned int len, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *odds = ctx->odds;
- u8 *prev = odds + bs;
+ u8 *prev = shash_desc_ctx(pdesc);
unsigned int offset = 0;
- if (ctx->len != bs) {
- unsigned int rlen;
- u8 *p = odds + ctx->len;
-
- *p = 0x80;
- p++;
-
- rlen = bs - ctx->len - 1;
- if (rlen)
- memset(p, 0, rlen);
-
+ crypto_xor(prev, src, len);
+ if (len != bs) {
+ prev[len] ^= 0x80;
offset += bs;
}
-
- crypto_xor(prev, odds, bs);
crypto_xor(prev, (const u8 *)tctx->consts + offset, bs);
-
crypto_cipher_encrypt_one(tfm, out, prev);
-
return 0;
}
@@ -269,13 +212,14 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_ctxsize = sizeof(struct cmac_tfm_ctx) +
alg->cra_blocksize * 2;
+ inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINAL_NONZERO;
inst->alg.digestsize = alg->cra_blocksize;
- inst->alg.descsize = sizeof(struct cmac_desc_ctx) +
- alg->cra_blocksize * 2;
+ inst->alg.descsize = alg->cra_blocksize;
inst->alg.init = crypto_cmac_digest_init;
inst->alg.update = crypto_cmac_digest_update;
- inst->alg.final = crypto_cmac_digest_final;
+ inst->alg.finup = crypto_cmac_digest_finup;
inst->alg.setkey = crypto_cmac_digest_setkey;
inst->alg.init_tfm = cmac_init_tfm;
inst->alg.clone_tfm = cmac_clone_tfm;
@@ -307,7 +251,7 @@ static void __exit crypto_cmac_module_exit(void)
crypto_unregister_template(&crypto_cmac_tmpl);
}
-subsys_initcall(crypto_cmac_module_init);
+module_init(crypto_cmac_module_init);
module_exit(crypto_cmac_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/compress.c b/crypto/compress.c
deleted file mode 100644
index 9048fe390c46..000000000000
--- a/crypto/compress.c
+++ /dev/null
@@ -1,32 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cryptographic API.
- *
- * Compression operations.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- */
-#include <linux/crypto.h>
-#include "internal.h"
-
-int crypto_comp_compress(struct crypto_comp *comp,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- struct crypto_tfm *tfm = crypto_comp_tfm(comp);
-
- return tfm->__crt_alg->cra_compress.coa_compress(tfm, src, slen, dst,
- dlen);
-}
-EXPORT_SYMBOL_GPL(crypto_comp_compress);
-
-int crypto_comp_decompress(struct crypto_comp *comp,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- struct crypto_tfm *tfm = crypto_comp_tfm(comp);
-
- return tfm->__crt_alg->cra_compress.coa_decompress(tfm, src, slen, dst,
- dlen);
-}
-EXPORT_SYMBOL_GPL(crypto_comp_decompress);
diff --git a/crypto/compress.h b/crypto/compress.h
index c3cedfb5e606..f7737a1fcbbd 100644
--- a/crypto/compress.h
+++ b/crypto/compress.h
@@ -15,8 +15,6 @@ struct acomp_req;
struct comp_alg_common;
int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
-struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
-void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
void comp_prepare_alg(struct comp_alg_common *alg);
diff --git a/crypto/crc32_generic.c b/crypto/crc32.c
index 783a30b27398..cc371d42601f 100644
--- a/crypto/crc32_generic.c
+++ b/crypto/crc32.c
@@ -172,7 +172,7 @@ static void __exit crc32_mod_fini(void)
crypto_unregister_shashes(algs, num_algs);
}
-subsys_initcall(crc32_mod_init);
+module_init(crc32_mod_init);
module_exit(crc32_mod_fini);
MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c.c
index 985da981d6e2..e5377898414a 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c.c
@@ -85,7 +85,7 @@ static int chksum_update(struct shash_desc *desc, const u8 *data,
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
- ctx->crc = crc32c_le_base(ctx->crc, data, length);
+ ctx->crc = crc32c_base(ctx->crc, data, length);
return 0;
}
@@ -94,7 +94,7 @@ static int chksum_update_arch(struct shash_desc *desc, const u8 *data,
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
- ctx->crc = __crc32c_le(ctx->crc, data, length);
+ ctx->crc = crc32c(ctx->crc, data, length);
return 0;
}
@@ -108,14 +108,14 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
static int __chksum_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out)
{
- put_unaligned_le32(~crc32c_le_base(*crcp, data, len), out);
+ put_unaligned_le32(~crc32c_base(*crcp, data, len), out);
return 0;
}
static int __chksum_finup_arch(u32 *crcp, const u8 *data, unsigned int len,
u8 *out)
{
- put_unaligned_le32(~__crc32c_le(*crcp, data, len), out);
+ put_unaligned_le32(~crc32c(*crcp, data, len), out);
return 0;
}
@@ -212,7 +212,7 @@ static void __exit crc32c_mod_fini(void)
crypto_unregister_shashes(algs, num_algs);
}
-subsys_initcall(crc32c_mod_init);
+module_init(crc32c_mod_init);
module_exit(crc32c_mod_fini);
MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
diff --git a/crypto/crc64_rocksoft_generic.c b/crypto/crc64_rocksoft_generic.c
deleted file mode 100644
index ce0f3059b912..000000000000
--- a/crypto/crc64_rocksoft_generic.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <linux/crc64.h>
-#include <linux/module.h>
-#include <crypto/internal/hash.h>
-#include <linux/unaligned.h>
-
-static int chksum_init(struct shash_desc *desc)
-{
- u64 *crc = shash_desc_ctx(desc);
-
- *crc = 0;
-
- return 0;
-}
-
-static int chksum_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u64 *crc = shash_desc_ctx(desc);
-
- *crc = crc64_rocksoft_generic(*crc, data, length);
-
- return 0;
-}
-
-static int chksum_final(struct shash_desc *desc, u8 *out)
-{
- u64 *crc = shash_desc_ctx(desc);
-
- put_unaligned_le64(*crc, out);
- return 0;
-}
-
-static int __chksum_finup(u64 crc, const u8 *data, unsigned int len, u8 *out)
-{
- crc = crc64_rocksoft_generic(crc, data, len);
- put_unaligned_le64(crc, out);
- return 0;
-}
-
-static int chksum_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- u64 *crc = shash_desc_ctx(desc);
-
- return __chksum_finup(*crc, data, len, out);
-}
-
-static int chksum_digest(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- return __chksum_finup(0, data, length, out);
-}
-
-static struct shash_alg alg = {
- .digestsize = sizeof(u64),
- .init = chksum_init,
- .update = chksum_update,
- .final = chksum_final,
- .finup = chksum_finup,
- .digest = chksum_digest,
- .descsize = sizeof(u64),
- .base = {
- .cra_name = CRC64_ROCKSOFT_STRING,
- .cra_driver_name = "crc64-rocksoft-generic",
- .cra_priority = 200,
- .cra_blocksize = 1,
- .cra_module = THIS_MODULE,
- }
-};
-
-static int __init crc64_rocksoft_init(void)
-{
- return crypto_register_shash(&alg);
-}
-
-static void __exit crc64_rocksoft_exit(void)
-{
- crypto_unregister_shash(&alg);
-}
-
-module_init(crc64_rocksoft_init);
-module_exit(crc64_rocksoft_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Rocksoft model CRC64 calculation.");
-MODULE_ALIAS_CRYPTO("crc64-rocksoft");
-MODULE_ALIAS_CRYPTO("crc64-rocksoft-generic");
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
deleted file mode 100644
index 259cb01932cb..000000000000
--- a/crypto/crct10dif_generic.c
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Cryptographic API.
- *
- * T10 Data Integrity Field CRC16 Crypto Transform
- *
- * Copyright (c) 2007 Oracle Corporation. All rights reserved.
- * Written by Martin K. Petersen <martin.petersen@oracle.com>
- * Copyright (C) 2013 Intel Corporation
- * Author: Tim Chen <tim.c.chen@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/crc-t10dif.h>
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-struct chksum_desc_ctx {
- __u16 crc;
-};
-
-/*
- * Steps through buffer one byte at a time, calculates reflected
- * crc using table.
- */
-
-static int chksum_init(struct shash_desc *desc)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = 0;
-
- return 0;
-}
-
-static int chksum_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = crc_t10dif_generic(ctx->crc, data, length);
- return 0;
-}
-
-static int chksum_update_arch(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = crc_t10dif_update(ctx->crc, data, length);
- return 0;
-}
-
-static int chksum_final(struct shash_desc *desc, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- *(__u16 *)out = ctx->crc;
- return 0;
-}
-
-static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
-{
- *(__u16 *)out = crc_t10dif_generic(crc, data, len);
- return 0;
-}
-
-static int __chksum_finup_arch(__u16 crc, const u8 *data, unsigned int len,
- u8 *out)
-{
- *(__u16 *)out = crc_t10dif_update(crc, data, len);
- return 0;
-}
-
-static int chksum_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksum_finup(ctx->crc, data, len, out);
-}
-
-static int chksum_finup_arch(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksum_finup_arch(ctx->crc, data, len, out);
-}
-
-static int chksum_digest(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- return __chksum_finup(0, data, length, out);
-}
-
-static int chksum_digest_arch(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- return __chksum_finup_arch(0, data, length, out);
-}
-
-static struct shash_alg algs[] = {{
- .digestsize = CRC_T10DIF_DIGEST_SIZE,
- .init = chksum_init,
- .update = chksum_update,
- .final = chksum_final,
- .finup = chksum_finup,
- .digest = chksum_digest,
- .descsize = sizeof(struct chksum_desc_ctx),
- .base.cra_name = "crct10dif",
- .base.cra_driver_name = "crct10dif-generic",
- .base.cra_priority = 100,
- .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-}, {
- .digestsize = CRC_T10DIF_DIGEST_SIZE,
- .init = chksum_init,
- .update = chksum_update_arch,
- .final = chksum_final,
- .finup = chksum_finup_arch,
- .digest = chksum_digest_arch,
- .descsize = sizeof(struct chksum_desc_ctx),
- .base.cra_name = "crct10dif",
- .base.cra_driver_name = "crct10dif-" __stringify(ARCH),
- .base.cra_priority = 150,
- .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-}};
-
-static int num_algs;
-
-static int __init crct10dif_mod_init(void)
-{
- /* register the arch flavor only if it differs from the generic one */
- num_algs = 1 + crc_t10dif_is_optimized();
-
- return crypto_register_shashes(algs, num_algs);
-}
-
-static void __exit crct10dif_mod_fini(void)
-{
- crypto_unregister_shashes(algs, num_algs);
-}
-
-subsys_initcall(crct10dif_mod_init);
-module_exit(crct10dif_mod_fini);
-
-MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
-MODULE_DESCRIPTION("T10 DIF CRC calculation.");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CRYPTO("crct10dif");
-MODULE_ALIAS_CRYPTO("crct10dif-generic");
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 31d022d47f7a..5bb6f8d88cc2 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -1138,7 +1138,7 @@ static void __exit cryptd_exit(void)
crypto_unregister_template(&cryptd_tmpl);
}
-subsys_initcall(cryptd_init);
+module_init(cryptd_init);
module_exit(cryptd_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index c7c16da5e649..445d3c113ee1 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -23,9 +23,6 @@
#define CRYPTO_ENGINE_MAX_QLEN 10
-/* Temporary algorithm flag used to indicate an updated driver. */
-#define CRYPTO_ALG_ENGINE 0x200
-
struct crypto_engine_alg {
struct crypto_alg base;
struct crypto_engine_op op;
@@ -148,16 +145,9 @@ start_request:
}
}
- if (async_req->tfm->__crt_alg->cra_flags & CRYPTO_ALG_ENGINE) {
- alg = container_of(async_req->tfm->__crt_alg,
- struct crypto_engine_alg, base);
- op = &alg->op;
- } else {
- dev_err(engine->dev, "failed to do request\n");
- ret = -EINVAL;
- goto req_err_1;
- }
-
+ alg = container_of(async_req->tfm->__crt_alg,
+ struct crypto_engine_alg, base);
+ op = &alg->op;
ret = op->do_one_request(engine, async_req);
/* Request unsuccessfully executed by hardware */
@@ -569,9 +559,6 @@ int crypto_engine_register_aead(struct aead_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
-
- alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
-
return crypto_register_aead(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_aead);
@@ -614,9 +601,6 @@ int crypto_engine_register_ahash(struct ahash_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
-
- alg->base.halg.base.cra_flags |= CRYPTO_ALG_ENGINE;
-
return crypto_register_ahash(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_ahash);
@@ -660,9 +644,6 @@ int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
-
- alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
-
return crypto_register_akcipher(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_akcipher);
@@ -677,9 +658,6 @@ int crypto_engine_register_kpp(struct kpp_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
-
- alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
-
return crypto_register_kpp(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_kpp);
@@ -694,9 +672,6 @@ int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
-
- alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
-
return crypto_register_skcipher(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_skcipher);
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 5b84b0f7cc17..34588f39fdfc 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -15,25 +15,11 @@
#include <crypto/null.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/mm.h>
#include <linux/string.h>
-static DEFINE_MUTEX(crypto_default_null_skcipher_lock);
-static struct crypto_sync_skcipher *crypto_default_null_skcipher;
-static int crypto_default_null_skcipher_refcnt;
-
-static int null_compress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- if (slen > *dlen)
- return -EINVAL;
- memcpy(dst, src, slen);
- *dlen = slen;
- return 0;
-}
-
static int null_init(struct shash_desc *desc)
{
return 0;
@@ -75,19 +61,9 @@ static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
static int null_skcipher_crypt(struct skcipher_request *req)
{
- struct skcipher_walk walk;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while (walk.nbytes) {
- if (walk.src.virt.addr != walk.dst.virt.addr)
- memcpy(walk.dst.virt.addr, walk.src.virt.addr,
- walk.nbytes);
- err = skcipher_walk_done(&walk, 0);
- }
-
- return err;
+ if (req->src != req->dst)
+ memcpy_sglist(req->dst, req->src, req->cryptlen);
+ return 0;
}
static struct shash_alg digest_null = {
@@ -121,7 +97,7 @@ static struct skcipher_alg skcipher_null = {
.decrypt = null_skcipher_crypt,
};
-static struct crypto_alg null_algs[] = { {
+static struct crypto_alg cipher_null = {
.cra_name = "cipher_null",
.cra_driver_name = "cipher_null-generic",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
@@ -134,62 +110,16 @@ static struct crypto_alg null_algs[] = { {
.cia_setkey = null_setkey,
.cia_encrypt = null_crypt,
.cia_decrypt = null_crypt } }
-}, {
- .cra_name = "compress_null",
- .cra_driver_name = "compress_null-generic",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_blocksize = NULL_BLOCK_SIZE,
- .cra_ctxsize = 0,
- .cra_module = THIS_MODULE,
- .cra_u = { .compress = {
- .coa_compress = null_compress,
- .coa_decompress = null_compress } }
-} };
+};
-MODULE_ALIAS_CRYPTO("compress_null");
MODULE_ALIAS_CRYPTO("digest_null");
MODULE_ALIAS_CRYPTO("cipher_null");
-struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void)
-{
- struct crypto_sync_skcipher *tfm;
-
- mutex_lock(&crypto_default_null_skcipher_lock);
- tfm = crypto_default_null_skcipher;
-
- if (!tfm) {
- tfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0);
- if (IS_ERR(tfm))
- goto unlock;
-
- crypto_default_null_skcipher = tfm;
- }
-
- crypto_default_null_skcipher_refcnt++;
-
-unlock:
- mutex_unlock(&crypto_default_null_skcipher_lock);
-
- return tfm;
-}
-EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher);
-
-void crypto_put_default_null_skcipher(void)
-{
- mutex_lock(&crypto_default_null_skcipher_lock);
- if (!--crypto_default_null_skcipher_refcnt) {
- crypto_free_sync_skcipher(crypto_default_null_skcipher);
- crypto_default_null_skcipher = NULL;
- }
- mutex_unlock(&crypto_default_null_skcipher_lock);
-}
-EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher);
-
static int __init crypto_null_mod_init(void)
{
int ret = 0;
- ret = crypto_register_algs(null_algs, ARRAY_SIZE(null_algs));
+ ret = crypto_register_alg(&cipher_null);
if (ret < 0)
goto out;
@@ -206,19 +136,19 @@ static int __init crypto_null_mod_init(void)
out_unregister_shash:
crypto_unregister_shash(&digest_null);
out_unregister_algs:
- crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs));
+ crypto_unregister_alg(&cipher_null);
out:
return ret;
}
static void __exit crypto_null_mod_fini(void)
{
- crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs));
+ crypto_unregister_alg(&cipher_null);
crypto_unregister_shash(&digest_null);
crypto_unregister_skcipher(&skcipher_null);
}
-subsys_initcall(crypto_null_mod_init);
+module_init(crypto_null_mod_init);
module_exit(crypto_null_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 6c571834e86a..aad429bef03e 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -84,17 +84,6 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
sizeof(rcipher), &rcipher);
}
-static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_report_comp rcomp;
-
- memset(&rcomp, 0, sizeof(rcomp));
-
- strscpy(rcomp.type, "compression", sizeof(rcomp.type));
-
- return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(rcomp), &rcomp);
-}
-
static int crypto_report_one(struct crypto_alg *alg,
struct crypto_user_alg *ualg, struct sk_buff *skb)
{
@@ -136,11 +125,6 @@ static int crypto_report_one(struct crypto_alg *alg,
goto nla_put_failure;
break;
- case CRYPTO_ALG_TYPE_COMPRESS:
- if (crypto_report_comp(skb, alg))
- goto nla_put_failure;
-
- break;
}
out:
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 73c0d6e53b2f..a388f0ceb3a0 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -33,7 +33,7 @@ static void crypto_ctr_crypt_final(struct skcipher_walk *walk,
u8 *ctrblk = walk->iv;
u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
- u8 *src = walk->src.virt.addr;
+ const u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
@@ -50,7 +50,7 @@ static int crypto_ctr_crypt_segment(struct skcipher_walk *walk,
crypto_cipher_alg(tfm)->cia_encrypt;
unsigned int bsize = crypto_cipher_blocksize(tfm);
u8 *ctrblk = walk->iv;
- u8 *src = walk->src.virt.addr;
+ const u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
@@ -77,20 +77,20 @@ static int crypto_ctr_crypt_inplace(struct skcipher_walk *walk,
unsigned int bsize = crypto_cipher_blocksize(tfm);
unsigned long alignmask = crypto_cipher_alignmask(tfm);
unsigned int nbytes = walk->nbytes;
+ u8 *dst = walk->dst.virt.addr;
u8 *ctrblk = walk->iv;
- u8 *src = walk->src.virt.addr;
u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
do {
/* create keystream */
fn(crypto_cipher_tfm(tfm), keystream, ctrblk);
- crypto_xor(src, keystream, bsize);
+ crypto_xor(dst, keystream, bsize);
/* increment counter in counterblock */
crypto_inc(ctrblk, bsize);
- src += bsize;
+ dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
@@ -350,7 +350,7 @@ static void __exit crypto_ctr_module_exit(void)
ARRAY_SIZE(crypto_ctr_tmpls));
}
-subsys_initcall(crypto_ctr_module_init);
+module_init(crypto_ctr_module_init);
module_exit(crypto_ctr_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/cts.c b/crypto/cts.c
index f5b42156b6c7..48898d5e24ff 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -402,7 +402,7 @@ static void __exit crypto_cts_module_exit(void)
crypto_unregister_template(&crypto_cts_tmpl);
}
-subsys_initcall(crypto_cts_module_init);
+module_init(crypto_cts_module_init);
module_exit(crypto_cts_module_exit);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/curve25519-generic.c b/crypto/curve25519-generic.c
index 68a673262e04..f3e56e73c66c 100644
--- a/crypto/curve25519-generic.c
+++ b/crypto/curve25519-generic.c
@@ -82,7 +82,7 @@ static void __exit curve25519_exit(void)
crypto_unregister_kpp(&curve25519_alg);
}
-subsys_initcall(curve25519_init);
+module_init(curve25519_init);
module_exit(curve25519_exit);
MODULE_ALIAS_CRYPTO("curve25519");
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 98e8bcb81a6a..fe8e4ad0fee1 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -6,309 +6,250 @@
* by IPCOMP (RFC 3173 & RFC 2394).
*
* Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
- *
- * FIXME: deflate transforms will require up to a total of about 436k of kernel
- * memory on i386 (390k for compression, the rest for decompression), as the
- * current zlib kernel code uses a worst case pre-allocation system by default.
- * This needs to be fixed so that the amount of memory required is properly
- * related to the winbits and memlevel parameters.
- *
- * The default winbits of 11 should suit most packets, and it may be something
- * to configure on a per-tfm basis in the future.
- *
- * Currently, compression history is not maintained between tfm calls, as
- * it is not needed for IPCOMP and keeps the code simpler. It can be
- * implemented if someone wants it.
+ * Copyright (c) 2023 Google, LLC. <ardb@kernel.org>
+ * Copyright (c) 2025 Herbert Xu <herbert@gondor.apana.org.au>
*/
+#include <crypto/internal/acompress.h>
+#include <crypto/scatterwalk.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/crypto.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/zlib.h>
-#include <linux/vmalloc.h>
-#include <linux/interrupt.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-#include <crypto/internal/scompress.h>
#define DEFLATE_DEF_LEVEL Z_DEFAULT_COMPRESSION
#define DEFLATE_DEF_WINBITS 11
#define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL
-struct deflate_ctx {
- struct z_stream_s comp_stream;
- struct z_stream_s decomp_stream;
+struct deflate_stream {
+ struct z_stream_s stream;
+ u8 workspace[];
};
-static int deflate_comp_init(struct deflate_ctx *ctx)
-{
- int ret = 0;
- struct z_stream_s *stream = &ctx->comp_stream;
-
- stream->workspace = vzalloc(zlib_deflate_workspacesize(
- -DEFLATE_DEF_WINBITS, MAX_MEM_LEVEL));
- if (!stream->workspace) {
- ret = -ENOMEM;
- goto out;
- }
- ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
- -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
- Z_DEFAULT_STRATEGY);
- if (ret != Z_OK) {
- ret = -EINVAL;
- goto out_free;
- }
-out:
- return ret;
-out_free:
- vfree(stream->workspace);
- goto out;
-}
+static DEFINE_MUTEX(deflate_stream_lock);
-static int deflate_decomp_init(struct deflate_ctx *ctx)
+static void *deflate_alloc_stream(void)
{
- int ret = 0;
- struct z_stream_s *stream = &ctx->decomp_stream;
+ size_t size = max(zlib_inflate_workspacesize(),
+ zlib_deflate_workspacesize(-DEFLATE_DEF_WINBITS,
+ DEFLATE_DEF_MEMLEVEL));
+ struct deflate_stream *ctx;
- stream->workspace = vzalloc(zlib_inflate_workspacesize());
- if (!stream->workspace) {
- ret = -ENOMEM;
- goto out;
- }
- ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS);
- if (ret != Z_OK) {
- ret = -EINVAL;
- goto out_free;
- }
-out:
- return ret;
-out_free:
- vfree(stream->workspace);
- goto out;
-}
+ ctx = kvmalloc(sizeof(*ctx) + size, GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
-static void deflate_comp_exit(struct deflate_ctx *ctx)
-{
- zlib_deflateEnd(&ctx->comp_stream);
- vfree(ctx->comp_stream.workspace);
-}
+ ctx->stream.workspace = ctx->workspace;
-static void deflate_decomp_exit(struct deflate_ctx *ctx)
-{
- zlib_inflateEnd(&ctx->decomp_stream);
- vfree(ctx->decomp_stream.workspace);
+ return ctx;
}
-static int __deflate_init(void *ctx)
+static struct crypto_acomp_streams deflate_streams = {
+ .alloc_ctx = deflate_alloc_stream,
+ .cfree_ctx = kvfree,
+};
+
+static int deflate_compress_one(struct acomp_req *req,
+ struct deflate_stream *ds)
{
+ struct z_stream_s *stream = &ds->stream;
+ struct acomp_walk walk;
int ret;
- ret = deflate_comp_init(ctx);
+ ret = acomp_walk_virt(&walk, req, true);
if (ret)
- goto out;
- ret = deflate_decomp_init(ctx);
- if (ret)
- deflate_comp_exit(ctx);
-out:
- return ret;
-}
+ return ret;
-static void *deflate_alloc_ctx(struct crypto_scomp *tfm)
-{
- struct deflate_ctx *ctx;
- int ret;
+ do {
+ unsigned int dcur;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
+ dcur = acomp_walk_next_dst(&walk);
+ if (!dcur)
+ return -ENOSPC;
- ret = __deflate_init(ctx);
- if (ret) {
- kfree(ctx);
- return ERR_PTR(ret);
- }
+ stream->avail_out = dcur;
+ stream->next_out = walk.dst.virt.addr;
- return ctx;
-}
+ do {
+ int flush = Z_FINISH;
+ unsigned int scur;
-static int deflate_init(struct crypto_tfm *tfm)
-{
- struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
+ stream->avail_in = 0;
+ stream->next_in = NULL;
- return __deflate_init(ctx);
-}
+ scur = acomp_walk_next_src(&walk);
+ if (scur) {
+ if (acomp_walk_more_src(&walk, scur))
+ flush = Z_NO_FLUSH;
+ stream->avail_in = scur;
+ stream->next_in = walk.src.virt.addr;
+ }
-static void __deflate_exit(void *ctx)
-{
- deflate_comp_exit(ctx);
- deflate_decomp_exit(ctx);
-}
+ ret = zlib_deflate(stream, flush);
-static void deflate_free_ctx(struct crypto_scomp *tfm, void *ctx)
-{
- __deflate_exit(ctx);
- kfree_sensitive(ctx);
-}
+ if (scur) {
+ scur -= stream->avail_in;
+ acomp_walk_done_src(&walk, scur);
+ }
+ } while (ret == Z_OK && stream->avail_out);
-static void deflate_exit(struct crypto_tfm *tfm)
-{
- struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
+ acomp_walk_done_dst(&walk, dcur);
+ } while (ret == Z_OK);
- __deflate_exit(ctx);
+ if (ret != Z_STREAM_END)
+ return -EINVAL;
+
+ req->dlen = stream->total_out;
+ return 0;
}
-static int __deflate_compress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
+static int deflate_compress(struct acomp_req *req)
{
- int ret = 0;
- struct deflate_ctx *dctx = ctx;
- struct z_stream_s *stream = &dctx->comp_stream;
+ struct crypto_acomp_stream *s;
+ struct deflate_stream *ds;
+ int err;
+
+ s = crypto_acomp_lock_stream_bh(&deflate_streams);
+ ds = s->ctx;
- ret = zlib_deflateReset(stream);
- if (ret != Z_OK) {
- ret = -EINVAL;
+ err = zlib_deflateInit2(&ds->stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
+ -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
+ Z_DEFAULT_STRATEGY);
+ if (err != Z_OK) {
+ err = -EINVAL;
goto out;
}
- stream->next_in = (u8 *)src;
- stream->avail_in = slen;
- stream->next_out = (u8 *)dst;
- stream->avail_out = *dlen;
+ err = deflate_compress_one(req, ds);
- ret = zlib_deflate(stream, Z_FINISH);
- if (ret != Z_STREAM_END) {
- ret = -EINVAL;
- goto out;
- }
- ret = 0;
- *dlen = stream->total_out;
out:
- return ret;
+ crypto_acomp_unlock_stream_bh(s);
+
+ return err;
}
-static int deflate_compress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int deflate_decompress_one(struct acomp_req *req,
+ struct deflate_stream *ds)
{
- struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
+ struct z_stream_s *stream = &ds->stream;
+ bool out_of_space = false;
+ struct acomp_walk walk;
+ int ret;
- return __deflate_compress(src, slen, dst, dlen, dctx);
-}
+ ret = acomp_walk_virt(&walk, req, true);
+ if (ret)
+ return ret;
-static int deflate_scompress(struct crypto_scomp *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen,
- void *ctx)
-{
- return __deflate_compress(src, slen, dst, dlen, ctx);
+ do {
+ unsigned int scur;
+
+ stream->avail_in = 0;
+ stream->next_in = NULL;
+
+ scur = acomp_walk_next_src(&walk);
+ if (scur) {
+ stream->avail_in = scur;
+ stream->next_in = walk.src.virt.addr;
+ }
+
+ do {
+ unsigned int dcur;
+
+ dcur = acomp_walk_next_dst(&walk);
+ if (!dcur) {
+ out_of_space = true;
+ break;
+ }
+
+ stream->avail_out = dcur;
+ stream->next_out = walk.dst.virt.addr;
+
+ ret = zlib_inflate(stream, Z_NO_FLUSH);
+
+ dcur -= stream->avail_out;
+ acomp_walk_done_dst(&walk, dcur);
+ } while (ret == Z_OK && stream->avail_in);
+
+ if (scur)
+ acomp_walk_done_src(&walk, scur);
+
+ if (out_of_space)
+ return -ENOSPC;
+ } while (ret == Z_OK);
+
+ if (ret != Z_STREAM_END)
+ return -EINVAL;
+
+ req->dlen = stream->total_out;
+ return 0;
}
-static int __deflate_decompress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
+static int deflate_decompress(struct acomp_req *req)
{
+ struct crypto_acomp_stream *s;
+ struct deflate_stream *ds;
+ int err;
- int ret = 0;
- struct deflate_ctx *dctx = ctx;
- struct z_stream_s *stream = &dctx->decomp_stream;
+ s = crypto_acomp_lock_stream_bh(&deflate_streams);
+ ds = s->ctx;
- ret = zlib_inflateReset(stream);
- if (ret != Z_OK) {
- ret = -EINVAL;
+ err = zlib_inflateInit2(&ds->stream, -DEFLATE_DEF_WINBITS);
+ if (err != Z_OK) {
+ err = -EINVAL;
goto out;
}
- stream->next_in = (u8 *)src;
- stream->avail_in = slen;
- stream->next_out = (u8 *)dst;
- stream->avail_out = *dlen;
-
- ret = zlib_inflate(stream, Z_SYNC_FLUSH);
- /*
- * Work around a bug in zlib, which sometimes wants to taste an extra
- * byte when being used in the (undocumented) raw deflate mode.
- * (From USAGI).
- */
- if (ret == Z_OK && !stream->avail_in && stream->avail_out) {
- u8 zerostuff = 0;
- stream->next_in = &zerostuff;
- stream->avail_in = 1;
- ret = zlib_inflate(stream, Z_FINISH);
- }
- if (ret != Z_STREAM_END) {
- ret = -EINVAL;
- goto out;
- }
- ret = 0;
- *dlen = stream->total_out;
+ err = deflate_decompress_one(req, ds);
+
out:
- return ret;
+ crypto_acomp_unlock_stream_bh(s);
+
+ return err;
}
-static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
+static int deflate_init(struct crypto_acomp *tfm)
{
- struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
+ int ret;
- return __deflate_decompress(src, slen, dst, dlen, dctx);
-}
+ mutex_lock(&deflate_stream_lock);
+ ret = crypto_acomp_alloc_streams(&deflate_streams);
+ mutex_unlock(&deflate_stream_lock);
-static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen,
- void *ctx)
-{
- return __deflate_decompress(src, slen, dst, dlen, ctx);
+ return ret;
}
-static struct crypto_alg alg = {
- .cra_name = "deflate",
- .cra_driver_name = "deflate-generic",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct deflate_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = deflate_init,
- .cra_exit = deflate_exit,
- .cra_u = { .compress = {
- .coa_compress = deflate_compress,
- .coa_decompress = deflate_decompress } }
-};
-
-static struct scomp_alg scomp = {
- .alloc_ctx = deflate_alloc_ctx,
- .free_ctx = deflate_free_ctx,
- .compress = deflate_scompress,
- .decompress = deflate_sdecompress,
- .base = {
- .cra_name = "deflate",
- .cra_driver_name = "deflate-scomp",
- .cra_module = THIS_MODULE,
- }
+static struct acomp_alg acomp = {
+ .compress = deflate_compress,
+ .decompress = deflate_decompress,
+ .init = deflate_init,
+ .base.cra_name = "deflate",
+ .base.cra_driver_name = "deflate-generic",
+ .base.cra_flags = CRYPTO_ALG_REQ_VIRT,
+ .base.cra_module = THIS_MODULE,
};
static int __init deflate_mod_init(void)
{
- int ret;
-
- ret = crypto_register_alg(&alg);
- if (ret)
- return ret;
-
- ret = crypto_register_scomp(&scomp);
- if (ret) {
- crypto_unregister_alg(&alg);
- return ret;
- }
-
- return ret;
+ return crypto_register_acomp(&acomp);
}
static void __exit deflate_mod_fini(void)
{
- crypto_unregister_alg(&alg);
- crypto_unregister_scomp(&scomp);
+ crypto_unregister_acomp(&acomp);
+ crypto_acomp_free_streams(&deflate_streams);
}
-subsys_initcall(deflate_mod_init);
+module_init(deflate_mod_init);
module_exit(deflate_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP");
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
+MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>");
+MODULE_AUTHOR("Herbert Xu <herbert@gondor.apana.org.au>");
MODULE_ALIAS_CRYPTO("deflate");
MODULE_ALIAS_CRYPTO("deflate-generic");
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index 1274e18d3eb9..fce341400914 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -122,7 +122,7 @@ static void __exit des_generic_mod_fini(void)
crypto_unregister_algs(des_algs, ARRAY_SIZE(des_algs));
}
-subsys_initcall(des_generic_mod_init);
+module_init(des_generic_mod_init);
module_exit(des_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/dh.c b/crypto/dh.c
index afc0fd847761..8250eeeebd0f 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -920,7 +920,7 @@ static void __exit dh_exit(void)
crypto_unregister_kpp(&dh);
}
-subsys_initcall(dh_init);
+module_init(dh_init);
module_exit(dh_exit);
MODULE_ALIAS_CRYPTO("dh");
MODULE_LICENSE("GPL");
diff --git a/crypto/drbg.c b/crypto/drbg.c
index f28dfc2511a2..dbe4c8bb5ceb 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -2132,7 +2132,7 @@ static void __exit drbg_exit(void)
crypto_unregister_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
}
-subsys_initcall(drbg_init);
+module_init(drbg_init);
module_exit(drbg_exit);
#ifndef CRYPTO_DRBG_HASH_STRING
#define CRYPTO_DRBG_HASH_STRING ""
diff --git a/crypto/ecb.c b/crypto/ecb.c
index 95d7e972865a..cd1b20456dad 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -219,7 +219,7 @@ static void __exit crypto_ecb_module_exit(void)
crypto_unregister_template(&crypto_ecb_tmpl);
}
-subsys_initcall(crypto_ecb_module_init);
+module_init(crypto_ecb_module_init);
module_exit(crypto_ecb_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/ecc.c b/crypto/ecc.c
index 50ad2d4ed672..6cf9a945fc6c 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(ecc_get_curve);
void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes,
u64 *out, unsigned int ndigits)
{
- int diff = ndigits - DIV_ROUND_UP(nbytes, sizeof(u64));
+ int diff = ndigits - DIV_ROUND_UP_POW2(nbytes, sizeof(u64));
unsigned int o = nbytes & 7;
__be64 msd = 0;
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index 72cfd1590156..9f0b93b3166d 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -240,7 +240,7 @@ static void __exit ecdh_exit(void)
crypto_unregister_kpp(&ecdh_nist_p384);
}
-subsys_initcall(ecdh_init);
+module_init(ecdh_init);
module_exit(ecdh_exit);
MODULE_ALIAS_CRYPTO("ecdh");
MODULE_LICENSE("GPL");
diff --git a/crypto/ecdsa-p1363.c b/crypto/ecdsa-p1363.c
index eaae7214d69b..e0c55c64711c 100644
--- a/crypto/ecdsa-p1363.c
+++ b/crypto/ecdsa-p1363.c
@@ -21,8 +21,9 @@ static int ecdsa_p1363_verify(struct crypto_sig *tfm,
const void *digest, unsigned int dlen)
{
struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm);
- unsigned int keylen = crypto_sig_keysize(ctx->child);
- unsigned int ndigits = DIV_ROUND_UP(keylen, sizeof(u64));
+ unsigned int keylen = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child),
+ BITS_PER_BYTE);
+ unsigned int ndigits = DIV_ROUND_UP_POW2(keylen, sizeof(u64));
struct ecdsa_raw_sig sig;
if (slen != 2 * keylen)
@@ -45,7 +46,8 @@ static unsigned int ecdsa_p1363_max_size(struct crypto_sig *tfm)
{
struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm);
- return 2 * crypto_sig_keysize(ctx->child);
+ return 2 * DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child),
+ BITS_PER_BYTE);
}
static unsigned int ecdsa_p1363_digest_size(struct crypto_sig *tfm)
diff --git a/crypto/ecdsa-x962.c b/crypto/ecdsa-x962.c
index 6a77c13e192b..ee71594d10a0 100644
--- a/crypto/ecdsa-x962.c
+++ b/crypto/ecdsa-x962.c
@@ -81,8 +81,8 @@ static int ecdsa_x962_verify(struct crypto_sig *tfm,
struct ecdsa_x962_signature_ctx sig_ctx;
int err;
- sig_ctx.ndigits = DIV_ROUND_UP(crypto_sig_keysize(ctx->child),
- sizeof(u64));
+ sig_ctx.ndigits = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child),
+ sizeof(u64) * BITS_PER_BYTE);
err = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx, src, slen);
if (err < 0)
@@ -103,7 +103,8 @@ static unsigned int ecdsa_x962_max_size(struct crypto_sig *tfm)
{
struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm);
struct sig_alg *alg = crypto_sig_alg(ctx->child);
- int slen = crypto_sig_keysize(ctx->child);
+ int slen = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child),
+ BITS_PER_BYTE);
/*
* Verify takes ECDSA-Sig-Value (described in RFC 5480) as input,
diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c
index 117526d15dde..ce8e4364842f 100644
--- a/crypto/ecdsa.c
+++ b/crypto/ecdsa.c
@@ -167,7 +167,7 @@ static unsigned int ecdsa_key_size(struct crypto_sig *tfm)
{
struct ecc_ctx *ctx = crypto_sig_ctx(tfm);
- return DIV_ROUND_UP(ctx->curve->nbits, 8);
+ return ctx->curve->nbits;
}
static unsigned int ecdsa_digest_size(struct crypto_sig *tfm)
@@ -334,7 +334,7 @@ static void __exit ecdsa_exit(void)
crypto_unregister_sig(&ecdsa_nist_p521);
}
-subsys_initcall(ecdsa_init);
+module_init(ecdsa_init);
module_exit(ecdsa_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index 69686668625e..e0a2d3209938 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -32,7 +32,6 @@ static int echainiv_encrypt(struct aead_request *req)
u64 seqno;
u8 *info;
unsigned int ivsize = crypto_aead_ivsize(geniv);
- int err;
if (req->cryptlen < ivsize)
return -EINVAL;
@@ -41,20 +40,9 @@ static int echainiv_encrypt(struct aead_request *req)
info = req->iv;
- if (req->src != req->dst) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
-
- skcipher_request_set_sync_tfm(nreq, ctx->sknull);
- skcipher_request_set_callback(nreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(nreq, req->src, req->dst,
- req->assoclen + req->cryptlen,
- NULL);
-
- err = crypto_skcipher_encrypt(nreq);
- if (err)
- return err;
- }
+ if (req->src != req->dst)
+ memcpy_sglist(req->dst, req->src,
+ req->assoclen + req->cryptlen);
aead_request_set_callback(subreq, req->base.flags,
req->base.complete, req->base.data);
@@ -157,7 +145,7 @@ static void __exit echainiv_module_exit(void)
crypto_unregister_template(&echainiv_tmpl);
}
-subsys_initcall(echainiv_module_init);
+module_init(echainiv_module_init);
module_exit(echainiv_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c
index b3dd8a3ddeb7..2c0602f0cd40 100644
--- a/crypto/ecrdsa.c
+++ b/crypto/ecrdsa.c
@@ -249,7 +249,7 @@ static unsigned int ecrdsa_key_size(struct crypto_sig *tfm)
* Verify doesn't need any output, so it's just informational
* for keyctl to determine the key bit size.
*/
- return ctx->pub_key.ndigits * sizeof(u64);
+ return ctx->pub_key.ndigits * sizeof(u64) * BITS_PER_BYTE;
}
static unsigned int ecrdsa_max_size(struct crypto_sig *tfm)
diff --git a/crypto/essiv.c b/crypto/essiv.c
index 1c00c3324058..d003b78fcd85 100644
--- a/crypto/essiv.c
+++ b/crypto/essiv.c
@@ -405,8 +405,7 @@ static bool parse_cipher_name(char *essiv_cipher_name, const char *cra_name)
if (len >= CRYPTO_MAX_ALG_NAME)
return false;
- memcpy(essiv_cipher_name, p, len);
- essiv_cipher_name[len] = '\0';
+ strscpy(essiv_cipher_name, p, len + 1);
return true;
}
@@ -549,8 +548,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
}
/* record the driver name so we can instantiate this exact algo later */
- strscpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name,
- CRYPTO_MAX_ALG_NAME);
+ strscpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name);
/* Instance fields */
@@ -643,7 +641,7 @@ static void __exit essiv_module_exit(void)
crypto_unregister_template(&essiv_tmpl);
}
-subsys_initcall(essiv_module_init);
+module_init(essiv_module_init);
module_exit(essiv_module_exit);
MODULE_DESCRIPTION("ESSIV skcipher/aead wrapper for block encryption");
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
index 95a16e88899b..80036835cec5 100644
--- a/crypto/fcrypt.c
+++ b/crypto/fcrypt.c
@@ -411,7 +411,7 @@ static void __exit fcrypt_mod_fini(void)
crypto_unregister_alg(&fcrypt_alg);
}
-subsys_initcall(fcrypt_mod_init);
+module_init(fcrypt_mod_init);
module_exit(fcrypt_mod_fini);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/fips.c b/crypto/fips.c
index 2fa3a9ee61a1..e88a604cb42b 100644
--- a/crypto/fips.c
+++ b/crypto/fips.c
@@ -95,5 +95,5 @@ static void __exit fips_exit(void)
crypto_proc_fips_exit();
}
-subsys_initcall(fips_init);
+module_init(fips_init);
module_exit(fips_exit);
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 84f7c23d14e4..97716482bed0 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -9,7 +9,6 @@
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
-#include <crypto/null.h>
#include <crypto/scatterwalk.h>
#include <crypto/gcm.h>
#include <crypto/hash.h>
@@ -46,7 +45,6 @@ struct crypto_rfc4543_instance_ctx {
struct crypto_rfc4543_ctx {
struct crypto_aead *child;
- struct crypto_sync_skcipher *null;
u8 nonce[4];
};
@@ -79,8 +77,6 @@ static struct {
struct scatterlist sg;
} *gcm_zeroes;
-static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc);
-
static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
struct aead_request *req)
{
@@ -930,12 +926,12 @@ static int crypto_rfc4543_crypt(struct aead_request *req, bool enc)
unsigned int authsize = crypto_aead_authsize(aead);
u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child),
crypto_aead_alignmask(ctx->child) + 1);
- int err;
if (req->src != req->dst) {
- err = crypto_rfc4543_copy_src_to_dst(req, enc);
- if (err)
- return err;
+ unsigned int nbytes = req->assoclen + req->cryptlen -
+ (enc ? 0 : authsize);
+
+ memcpy_sglist(req->dst, req->src, nbytes);
}
memcpy(iv, ctx->nonce, 4);
@@ -952,22 +948,6 @@ static int crypto_rfc4543_crypt(struct aead_request *req, bool enc)
return enc ? crypto_aead_encrypt(subreq) : crypto_aead_decrypt(subreq);
}
-static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead);
- unsigned int authsize = crypto_aead_authsize(aead);
- unsigned int nbytes = req->assoclen + req->cryptlen -
- (enc ? 0 : authsize);
- SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null);
-
- skcipher_request_set_sync_tfm(nreq, ctx->null);
- skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL);
- skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL);
-
- return crypto_skcipher_encrypt(nreq);
-}
-
static int crypto_rfc4543_encrypt(struct aead_request *req)
{
return crypto_ipsec_check_assoclen(req->assoclen) ?:
@@ -987,21 +967,13 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
struct crypto_aead_spawn *spawn = &ictx->aead;
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_aead *aead;
- struct crypto_sync_skcipher *null;
unsigned long align;
- int err = 0;
aead = crypto_spawn_aead(spawn);
if (IS_ERR(aead))
return PTR_ERR(aead);
- null = crypto_get_default_null_skcipher();
- err = PTR_ERR(null);
- if (IS_ERR(null))
- goto err_free_aead;
-
ctx->child = aead;
- ctx->null = null;
align = crypto_aead_alignmask(aead);
align &= ~(crypto_tfm_ctx_alignment() - 1);
@@ -1012,10 +984,6 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
align + GCM_AES_IV_SIZE);
return 0;
-
-err_free_aead:
- crypto_free_aead(aead);
- return err;
}
static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm)
@@ -1023,7 +991,6 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm)
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
- crypto_put_default_null_skcipher();
}
static void crypto_rfc4543_free(struct aead_instance *inst)
@@ -1152,7 +1119,7 @@ static void __exit crypto_gcm_module_exit(void)
ARRAY_SIZE(crypto_gcm_tmpls));
}
-subsys_initcall(crypto_gcm_module_init);
+module_init(crypto_gcm_module_init);
module_exit(crypto_gcm_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/geniv.c b/crypto/geniv.c
index bee4621b4f12..42eff6a7387c 100644
--- a/crypto/geniv.c
+++ b/crypto/geniv.c
@@ -9,7 +9,6 @@
#include <crypto/internal/geniv.h>
#include <crypto/internal/rng.h>
-#include <crypto/null.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -125,15 +124,10 @@ int aead_init_geniv(struct crypto_aead *aead)
if (err)
goto out;
- ctx->sknull = crypto_get_default_null_skcipher();
- err = PTR_ERR(ctx->sknull);
- if (IS_ERR(ctx->sknull))
- goto out;
-
child = crypto_spawn_aead(aead_instance_ctx(inst));
err = PTR_ERR(child);
if (IS_ERR(child))
- goto drop_null;
+ goto out;
ctx->child = child;
crypto_aead_set_reqsize(aead, crypto_aead_reqsize(child) +
@@ -143,10 +137,6 @@ int aead_init_geniv(struct crypto_aead *aead)
out:
return err;
-
-drop_null:
- crypto_put_default_null_skcipher();
- goto out;
}
EXPORT_SYMBOL_GPL(aead_init_geniv);
@@ -155,7 +145,6 @@ void aead_exit_geniv(struct crypto_aead *tfm)
struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
- crypto_put_default_null_skcipher();
}
EXPORT_SYMBOL_GPL(aead_exit_geniv);
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index c70d163c1ac9..e5803c249c12 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -34,14 +34,14 @@
* (https://csrc.nist.gov/publications/detail/sp/800-38d/final)
*/
-#include <crypto/algapi.h>
#include <crypto/gf128mul.h>
#include <crypto/ghash.h>
#include <crypto/internal/hash.h>
-#include <linux/crypto.h>
-#include <linux/init.h>
+#include <crypto/utils.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/string.h>
static int ghash_init(struct shash_desc *desc)
{
@@ -82,59 +82,36 @@ static int ghash_update(struct shash_desc *desc,
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *dst = dctx->buffer;
- if (dctx->bytes) {
- int n = min(srclen, dctx->bytes);
- u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
-
- dctx->bytes -= n;
- srclen -= n;
-
- while (n--)
- *pos++ ^= *src++;
-
- if (!dctx->bytes)
- gf128mul_4k_lle((be128 *)dst, ctx->gf128);
- }
-
- while (srclen >= GHASH_BLOCK_SIZE) {
+ do {
crypto_xor(dst, src, GHASH_BLOCK_SIZE);
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
src += GHASH_BLOCK_SIZE;
srclen -= GHASH_BLOCK_SIZE;
- }
-
- if (srclen) {
- dctx->bytes = GHASH_BLOCK_SIZE - srclen;
- while (srclen--)
- *dst++ ^= *src++;
- }
+ } while (srclen >= GHASH_BLOCK_SIZE);
- return 0;
+ return srclen;
}
-static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+static void ghash_flush(struct shash_desc *desc, const u8 *src,
+ unsigned int len)
{
+ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
u8 *dst = dctx->buffer;
- if (dctx->bytes) {
- u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
-
- while (dctx->bytes--)
- *tmp++ ^= 0;
-
+ if (len) {
+ crypto_xor(dst, src, len);
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
}
-
- dctx->bytes = 0;
}
-static int ghash_final(struct shash_desc *desc, u8 *dst)
+static int ghash_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *dst)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *buf = dctx->buffer;
- ghash_flush(ctx, dctx);
+ ghash_flush(desc, src, len);
memcpy(dst, buf, GHASH_BLOCK_SIZE);
return 0;
@@ -151,13 +128,14 @@ static struct shash_alg ghash_alg = {
.digestsize = GHASH_DIGEST_SIZE,
.init = ghash_init,
.update = ghash_update,
- .final = ghash_final,
+ .finup = ghash_finup,
.setkey = ghash_setkey,
.descsize = sizeof(struct ghash_desc_ctx),
.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
@@ -175,7 +153,7 @@ static void __exit ghash_mod_exit(void)
crypto_unregister_shash(&ghash_alg);
}
-subsys_initcall(ghash_mod_init);
+module_init(ghash_mod_init);
module_exit(ghash_mod_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/hctr2.c b/crypto/hctr2.c
index cbcd673be481..c8932777bba8 100644
--- a/crypto/hctr2.c
+++ b/crypto/hctr2.c
@@ -570,7 +570,7 @@ static void __exit hctr2_module_exit(void)
ARRAY_SIZE(hctr2_tmpls));
}
-subsys_initcall(hctr2_module_init);
+module_init(hctr2_module_init);
module_exit(hctr2_module_exit);
MODULE_DESCRIPTION("HCTR2 length-preserving encryption mode");
diff --git a/crypto/hkdf.c b/crypto/hkdf.c
new file mode 100644
index 000000000000..82d1b32ca6ce
--- /dev/null
+++ b/crypto/hkdf.c
@@ -0,0 +1,573 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implementation of HKDF ("HMAC-based Extract-and-Expand Key Derivation
+ * Function"), aka RFC 5869. See also the original paper (Krawczyk 2010):
+ * "Cryptographic Extraction and Key Derivation: The HKDF Scheme".
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha2.h>
+#include <crypto/hkdf.h>
+#include <linux/module.h>
+
+/*
+ * HKDF consists of two steps:
+ *
+ * 1. HKDF-Extract: extract a pseudorandom key from the input keying material
+ * and optional salt.
+ * 2. HKDF-Expand: expand the pseudorandom key into output keying material of
+ * any length, parameterized by an application-specific info string.
+ *
+ */
+
+/**
+ * hkdf_extract - HKDF-Extract (RFC 5869 section 2.2)
+ * @hmac_tfm: an HMAC transform using the hash function desired for HKDF. The
+ * caller is responsible for setting the @prk afterwards.
+ * @ikm: input keying material
+ * @ikmlen: length of @ikm
+ * @salt: input salt value
+ * @saltlen: length of @salt
+ * @prk: resulting pseudorandom key
+ *
+ * Extracts a pseudorandom key @prk from the input keying material
+ * @ikm with length @ikmlen and salt @salt with length @saltlen.
+ * The length of @prk is given by the digest size of @hmac_tfm.
+ * For an 'unsalted' version of HKDF-Extract @salt must be set
+ * to all zeroes and @saltlen must be set to the length of @prk.
+ *
+ * Returns 0 on success with the pseudorandom key stored in @prk,
+ * or a negative errno value otherwise.
+ */
+int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm,
+ unsigned int ikmlen, const u8 *salt, unsigned int saltlen,
+ u8 *prk)
+{
+ int err;
+
+ err = crypto_shash_setkey(hmac_tfm, salt, saltlen);
+ if (!err)
+ err = crypto_shash_tfm_digest(hmac_tfm, ikm, ikmlen, prk);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(hkdf_extract);
+
+/**
+ * hkdf_expand - HKDF-Expand (RFC 5869 section 2.3)
+ * @hmac_tfm: hash context keyed with pseudorandom key
+ * @info: application-specific information
+ * @infolen: length of @info
+ * @okm: output keying material
+ * @okmlen: length of @okm
+ *
+ * This expands the pseudorandom key, which was already keyed into @hmac_tfm,
+ * into @okmlen bytes of output keying material parameterized by the
+ * application-specific @info of length @infolen bytes.
+ * This is thread-safe and may be called by multiple threads in parallel.
+ *
+ * Returns 0 on success with output keying material stored in @okm,
+ * or a negative errno value otherwise.
+ */
+int hkdf_expand(struct crypto_shash *hmac_tfm,
+ const u8 *info, unsigned int infolen,
+ u8 *okm, unsigned int okmlen)
+{
+ SHASH_DESC_ON_STACK(desc, hmac_tfm);
+ unsigned int i, hashlen = crypto_shash_digestsize(hmac_tfm);
+ int err;
+ const u8 *prev = NULL;
+ u8 counter = 1;
+ u8 tmp[HASH_MAX_DIGESTSIZE] = {};
+
+ if (WARN_ON(okmlen > 255 * hashlen))
+ return -EINVAL;
+
+ desc->tfm = hmac_tfm;
+
+ for (i = 0; i < okmlen; i += hashlen) {
+ err = crypto_shash_init(desc);
+ if (err)
+ goto out;
+
+ if (prev) {
+ err = crypto_shash_update(desc, prev, hashlen);
+ if (err)
+ goto out;
+ }
+
+ if (infolen) {
+ err = crypto_shash_update(desc, info, infolen);
+ if (err)
+ goto out;
+ }
+
+ BUILD_BUG_ON(sizeof(counter) != 1);
+ if (okmlen - i < hashlen) {
+ err = crypto_shash_finup(desc, &counter, 1, tmp);
+ if (err)
+ goto out;
+ memcpy(&okm[i], tmp, okmlen - i);
+ memzero_explicit(tmp, sizeof(tmp));
+ } else {
+ err = crypto_shash_finup(desc, &counter, 1, &okm[i]);
+ if (err)
+ goto out;
+ }
+ counter++;
+ prev = &okm[i];
+ }
+ err = 0;
+out:
+ if (unlikely(err))
+ memzero_explicit(okm, okmlen); /* so caller doesn't need to */
+ shash_desc_zero(desc);
+ memzero_explicit(tmp, HASH_MAX_DIGESTSIZE);
+ return err;
+}
+EXPORT_SYMBOL_GPL(hkdf_expand);
+
+struct hkdf_testvec {
+ const char *test;
+ const u8 *ikm;
+ const u8 *salt;
+ const u8 *info;
+ const u8 *prk;
+ const u8 *okm;
+ u16 ikm_size;
+ u16 salt_size;
+ u16 info_size;
+ u16 prk_size;
+ u16 okm_size;
+};
+
+/*
+ * HKDF test vectors from RFC5869
+ *
+ * Additional HKDF test vectors from
+ * https://github.com/brycx/Test-Vector-Generation/blob/master/HKDF/hkdf-hmac-sha2-test-vectors.md
+ */
+static const struct hkdf_testvec hkdf_sha256_tv[] = {
+ {
+ .test = "basic hdkf test",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x07\x77\x09\x36\x2c\x2e\x32\xdf\x0d\xdc\x3f\x0d\xc4\x7b\xba\x63"
+ "\x90\xb6\xc7\x3b\xb5\x0f\x9c\x31\x22\xec\x84\x4a\xd7\xc2\xb3\xe5",
+ .prk_size = 32,
+ .okm = "\x3c\xb2\x5f\x25\xfa\xac\xd5\x7a\x90\x43\x4f\x64\xd0\x36\x2f\x2a"
+ "\x2d\x2d\x0a\x90\xcf\x1a\x5a\x4c\x5d\xb0\x2d\x56\xec\xc4\xc5\xbf"
+ "\x34\x00\x72\x08\xd5\xb8\x87\x18\x58\x65",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with long input",
+ .ikm = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f",
+ .ikm_size = 80,
+ .salt = "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf",
+ .salt_size = 80,
+ .info = "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .info_size = 80,
+ .prk = "\x06\xa6\xb8\x8c\x58\x53\x36\x1a\x06\x10\x4c\x9c\xeb\x35\xb4\x5c"
+ "\xef\x76\x00\x14\x90\x46\x71\x01\x4a\x19\x3f\x40\xc1\x5f\xc2\x44",
+ .prk_size = 32,
+ .okm = "\xb1\x1e\x39\x8d\xc8\x03\x27\xa1\xc8\xe7\xf7\x8c\x59\x6a\x49\x34"
+ "\x4f\x01\x2e\xda\x2d\x4e\xfa\xd8\xa0\x50\xcc\x4c\x19\xaf\xa9\x7c"
+ "\x59\x04\x5a\x99\xca\xc7\x82\x72\x71\xcb\x41\xc6\x5e\x59\x0e\x09"
+ "\xda\x32\x75\x60\x0c\x2f\x09\xb8\x36\x77\x93\xa9\xac\xa3\xdb\x71"
+ "\xcc\x30\xc5\x81\x79\xec\x3e\x87\xc1\x4c\x01\xd5\xc1\xf3\x43\x4f"
+ "\x1d\x87",
+ .okm_size = 82,
+ }, {
+ .test = "hkdf test with zero salt and info",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = NULL,
+ .salt_size = 0,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\x19\xef\x24\xa3\x2c\x71\x7b\x16\x7f\x33\xa9\x1d\x6f\x64\x8b\xdf"
+ "\x96\x59\x67\x76\xaf\xdb\x63\x77\xac\x43\x4c\x1c\x29\x3c\xcb\x04",
+ .prk_size = 32,
+ .okm = "\x8d\xa4\xe7\x75\xa5\x63\xc1\x8f\x71\x5f\x80\x2a\x06\x3c\x5a\x31"
+ "\xb8\xa1\x1f\x5c\x5e\xe1\x87\x9e\xc3\x45\x4e\x5f\x3c\x73\x8d\x2d"
+ "\x9d\x20\x13\x95\xfa\xa4\xb6\x1a\x96\xc8",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with short input",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 11,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x82\x65\xf6\x9d\x7f\xf7\xe5\x01\x37\x93\x01\x5c\xa0\xef\x92\x0c"
+ "\xb1\x68\x21\x99\xc8\xbc\x3a\x00\xda\x0c\xab\x47\xb7\xb0\x0f\xdf",
+ .prk_size = 32,
+ .okm = "\x58\xdc\xe1\x0d\x58\x01\xcd\xfd\xa8\x31\x72\x6b\xfe\xbc\xb7\x43"
+ "\xd1\x4a\x7e\xe8\x3a\xa0\x57\xa9\x3d\x59\xb0\xa1\x31\x7f\xf0\x9d"
+ "\x10\x5c\xce\xcf\x53\x56\x92\xb1\x4d\xd5",
+ .okm_size = 42,
+ }, {
+ .test = "unsalted hkdf test with zero info",
+ .ikm = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c"
+ "\x0c\x0c\x0c\x0c\x0c\x0c",
+ .ikm_size = 22,
+ .salt = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
+ .salt_size = 32,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\xaa\x84\x1e\x1f\x35\x74\xf3\x2d\x13\xfb\xa8\x00\x5f\xcd\x9b\x8d"
+ "\x77\x67\x82\xa5\xdf\xa1\x92\x38\x92\xfd\x8b\x63\x5d\x3a\x89\xdf",
+ .prk_size = 32,
+ .okm = "\x59\x68\x99\x17\x9a\xb1\xbc\x00\xa7\xc0\x37\x86\xff\x43\xee\x53"
+ "\x50\x04\xbe\x2b\xb9\xbe\x68\xbc\x14\x06\x63\x6f\x54\xbd\x33\x8a"
+ "\x66\xa2\x37\xba\x2a\xcb\xce\xe3\xc9\xa7",
+ .okm_size = 42,
+ }
+};
+
+static const struct hkdf_testvec hkdf_sha384_tv[] = {
+ {
+ .test = "basic hkdf test",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x70\x4b\x39\x99\x07\x79\xce\x1d\xc5\x48\x05\x2c\x7d\xc3\x9f\x30"
+ "\x35\x70\xdd\x13\xfb\x39\xf7\xac\xc5\x64\x68\x0b\xef\x80\xe8\xde"
+ "\xc7\x0e\xe9\xa7\xe1\xf3\xe2\x93\xef\x68\xec\xeb\x07\x2a\x5a\xde",
+ .prk_size = 48,
+ .okm = "\x9b\x50\x97\xa8\x60\x38\xb8\x05\x30\x90\x76\xa4\x4b\x3a\x9f\x38"
+ "\x06\x3e\x25\xb5\x16\xdc\xbf\x36\x9f\x39\x4c\xfa\xb4\x36\x85\xf7"
+ "\x48\xb6\x45\x77\x63\xe4\xf0\x20\x4f\xc5",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with long input",
+ .ikm = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f",
+ .ikm_size = 80,
+ .salt = "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf",
+ .salt_size = 80,
+ .info = "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .info_size = 80,
+ .prk = "\xb3\x19\xf6\x83\x1d\xff\x93\x14\xef\xb6\x43\xba\xa2\x92\x63\xb3"
+ "\x0e\x4a\x8d\x77\x9f\xe3\x1e\x9c\x90\x1e\xfd\x7d\xe7\x37\xc8\x5b"
+ "\x62\xe6\x76\xd4\xdc\x87\xb0\x89\x5c\x6a\x7d\xc9\x7b\x52\xce\xbb",
+ .prk_size = 48,
+ .okm = "\x48\x4c\xa0\x52\xb8\xcc\x72\x4f\xd1\xc4\xec\x64\xd5\x7b\x4e\x81"
+ "\x8c\x7e\x25\xa8\xe0\xf4\x56\x9e\xd7\x2a\x6a\x05\xfe\x06\x49\xee"
+ "\xbf\x69\xf8\xd5\xc8\x32\x85\x6b\xf4\xe4\xfb\xc1\x79\x67\xd5\x49"
+ "\x75\x32\x4a\x94\x98\x7f\x7f\x41\x83\x58\x17\xd8\x99\x4f\xdb\xd6"
+ "\xf4\xc0\x9c\x55\x00\xdc\xa2\x4a\x56\x22\x2f\xea\x53\xd8\x96\x7a"
+ "\x8b\x2e",
+ .okm_size = 82,
+ }, {
+ .test = "hkdf test with zero salt and info",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = NULL,
+ .salt_size = 0,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\x10\xe4\x0c\xf0\x72\xa4\xc5\x62\x6e\x43\xdd\x22\xc1\xcf\x72\x7d"
+ "\x4b\xb1\x40\x97\x5c\x9a\xd0\xcb\xc8\xe4\x5b\x40\x06\x8f\x8f\x0b"
+ "\xa5\x7c\xdb\x59\x8a\xf9\xdf\xa6\x96\x3a\x96\x89\x9a\xf0\x47\xe5",
+ .prk_size = 48,
+ .okm = "\xc8\xc9\x6e\x71\x0f\x89\xb0\xd7\x99\x0b\xca\x68\xbc\xde\xc8\xcf"
+ "\x85\x40\x62\xe5\x4c\x73\xa7\xab\xc7\x43\xfa\xde\x9b\x24\x2d\xaa"
+ "\xcc\x1c\xea\x56\x70\x41\x5b\x52\x84\x9c",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with short input",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 11,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x6d\x31\x69\x98\x28\x79\x80\x88\xb3\x59\xda\xd5\x0b\x8f\x01\xb0"
+ "\x15\xf1\x7a\xa3\xbd\x4e\x27\xa6\xe9\xf8\x73\xb7\x15\x85\xca\x6a"
+ "\x00\xd1\xf0\x82\x12\x8a\xdb\x3c\xf0\x53\x0b\x57\xc0\xf9\xac\x72",
+ .prk_size = 48,
+ .okm = "\xfb\x7e\x67\x43\xeb\x42\xcd\xe9\x6f\x1b\x70\x77\x89\x52\xab\x75"
+ "\x48\xca\xfe\x53\x24\x9f\x7f\xfe\x14\x97\xa1\x63\x5b\x20\x1f\xf1"
+ "\x85\xb9\x3e\x95\x19\x92\xd8\x58\xf1\x1a",
+ .okm_size = 42,
+ }, {
+ .test = "unsalted hkdf test with zero info",
+ .ikm = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c"
+ "\x0c\x0c\x0c\x0c\x0c\x0c",
+ .ikm_size = 22,
+ .salt = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
+ .salt_size = 48,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\x9d\x2d\xa5\x06\x6f\x05\xd1\x6c\x59\xfe\xdf\x6c\x5f\x32\xc7\x5e"
+ "\xda\x9a\x47\xa7\x9c\x93\x6a\xa4\x4c\xb7\x63\xa8\xe2\x2f\xfb\xfc"
+ "\xd8\xfe\x55\x43\x58\x53\x47\x21\x90\x39\xd1\x68\x28\x36\x33\xf5",
+ .prk_size = 48,
+ .okm = "\x6a\xd7\xc7\x26\xc8\x40\x09\x54\x6a\x76\xe0\x54\x5d\xf2\x66\x78"
+ "\x7e\x2b\x2c\xd6\xca\x43\x73\xa1\xf3\x14\x50\xa7\xbd\xf9\x48\x2b"
+ "\xfa\xb8\x11\xf5\x54\x20\x0e\xad\x8f\x53",
+ .okm_size = 42,
+ }
+};
+
+static const struct hkdf_testvec hkdf_sha512_tv[] = {
+ {
+ .test = "basic hkdf test",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x66\x57\x99\x82\x37\x37\xde\xd0\x4a\x88\xe4\x7e\x54\xa5\x89\x0b"
+ "\xb2\xc3\xd2\x47\xc7\xa4\x25\x4a\x8e\x61\x35\x07\x23\x59\x0a\x26"
+ "\xc3\x62\x38\x12\x7d\x86\x61\xb8\x8c\xf8\x0e\xf8\x02\xd5\x7e\x2f"
+ "\x7c\xeb\xcf\x1e\x00\xe0\x83\x84\x8b\xe1\x99\x29\xc6\x1b\x42\x37",
+ .prk_size = 64,
+ .okm = "\x83\x23\x90\x08\x6c\xda\x71\xfb\x47\x62\x5b\xb5\xce\xb1\x68\xe4"
+ "\xc8\xe2\x6a\x1a\x16\xed\x34\xd9\xfc\x7f\xe9\x2c\x14\x81\x57\x93"
+ "\x38\xda\x36\x2c\xb8\xd9\xf9\x25\xd7\xcb",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with long input",
+ .ikm = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f",
+ .ikm_size = 80,
+ .salt = "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf",
+ .salt_size = 80,
+ .info = "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .info_size = 80,
+ .prk = "\x35\x67\x25\x42\x90\x7d\x4e\x14\x2c\x00\xe8\x44\x99\xe7\x4e\x1d"
+ "\xe0\x8b\xe8\x65\x35\xf9\x24\xe0\x22\x80\x4a\xd7\x75\xdd\xe2\x7e"
+ "\xc8\x6c\xd1\xe5\xb7\xd1\x78\xc7\x44\x89\xbd\xbe\xb3\x07\x12\xbe"
+ "\xb8\x2d\x4f\x97\x41\x6c\x5a\x94\xea\x81\xeb\xdf\x3e\x62\x9e\x4a",
+ .prk_size = 64,
+ .okm = "\xce\x6c\x97\x19\x28\x05\xb3\x46\xe6\x16\x1e\x82\x1e\xd1\x65\x67"
+ "\x3b\x84\xf4\x00\xa2\xb5\x14\xb2\xfe\x23\xd8\x4c\xd1\x89\xdd\xf1"
+ "\xb6\x95\xb4\x8c\xbd\x1c\x83\x88\x44\x11\x37\xb3\xce\x28\xf1\x6a"
+ "\xa6\x4b\xa3\x3b\xa4\x66\xb2\x4d\xf6\xcf\xcb\x02\x1e\xcf\xf2\x35"
+ "\xf6\xa2\x05\x6c\xe3\xaf\x1d\xe4\x4d\x57\x20\x97\xa8\x50\x5d\x9e"
+ "\x7a\x93",
+ .okm_size = 82,
+ }, {
+ .test = "hkdf test with zero salt and info",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = NULL,
+ .salt_size = 0,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\xfd\x20\x0c\x49\x87\xac\x49\x13\x13\xbd\x4a\x2a\x13\x28\x71\x21"
+ "\x24\x72\x39\xe1\x1c\x9e\xf8\x28\x02\x04\x4b\x66\xef\x35\x7e\x5b"
+ "\x19\x44\x98\xd0\x68\x26\x11\x38\x23\x48\x57\x2a\x7b\x16\x11\xde"
+ "\x54\x76\x40\x94\x28\x63\x20\x57\x8a\x86\x3f\x36\x56\x2b\x0d\xf6",
+ .prk_size = 64,
+ .okm = "\xf5\xfa\x02\xb1\x82\x98\xa7\x2a\x8c\x23\x89\x8a\x87\x03\x47\x2c"
+ "\x6e\xb1\x79\xdc\x20\x4c\x03\x42\x5c\x97\x0e\x3b\x16\x4b\xf9\x0f"
+ "\xff\x22\xd0\x48\x36\xd0\xe2\x34\x3b\xac",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with short input",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 11,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x67\x40\x9c\x9c\xac\x28\xb5\x2e\xe9\xfa\xd9\x1c\x2f\xda\x99\x9f"
+ "\x7c\xa2\x2e\x34\x34\xf0\xae\x77\x28\x63\x83\x65\x68\xad\x6a\x7f"
+ "\x10\xcf\x11\x3b\xfd\xdd\x56\x01\x29\xa5\x94\xa8\xf5\x23\x85\xc2"
+ "\xd6\x61\xd7\x85\xd2\x9c\xe9\x3a\x11\x40\x0c\x92\x06\x83\x18\x1d",
+ .prk_size = 64,
+ .okm = "\x74\x13\xe8\x99\x7e\x02\x06\x10\xfb\xf6\x82\x3f\x2c\xe1\x4b\xff"
+ "\x01\x87\x5d\xb1\xca\x55\xf6\x8c\xfc\xf3\x95\x4d\xc8\xaf\xf5\x35"
+ "\x59\xbd\x5e\x30\x28\xb0\x80\xf7\xc0\x68",
+ .okm_size = 42,
+ }, {
+ .test = "unsalted hkdf test with zero info",
+ .ikm = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c"
+ "\x0c\x0c\x0c\x0c\x0c\x0c",
+ .ikm_size = 22,
+ .salt = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
+ .salt_size = 64,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\x53\x46\xb3\x76\xbf\x3a\xa9\xf8\x4f\x8f\x6e\xd5\xb1\xc4\xf4\x89"
+ "\x17\x2e\x24\x4d\xac\x30\x3d\x12\xf6\x8e\xcc\x76\x6e\xa6\x00\xaa"
+ "\x88\x49\x5e\x7f\xb6\x05\x80\x31\x22\xfa\x13\x69\x24\xa8\x40\xb1"
+ "\xf0\x71\x9d\x2d\x5f\x68\xe2\x9b\x24\x22\x99\xd7\x58\xed\x68\x0c",
+ .prk_size = 64,
+ .okm = "\x14\x07\xd4\x60\x13\xd9\x8b\xc6\xde\xce\xfc\xfe\xe5\x5f\x0f\x90"
+ "\xb0\xc7\xf6\x3d\x68\xeb\x1a\x80\xea\xf0\x7e\x95\x3c\xfc\x0a\x3a"
+ "\x52\x40\xa1\x55\xd6\xe4\xda\xa9\x65\xbb",
+ .okm_size = 42,
+ }
+};
+
+static int hkdf_test(const char *shash, const struct hkdf_testvec *tv)
+{ struct crypto_shash *tfm = NULL;
+ u8 *prk = NULL, *okm = NULL;
+ unsigned int prk_size;
+ const char *driver;
+ int err;
+
+ tfm = crypto_alloc_shash(shash, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("%s(%s): failed to allocate transform: %ld\n",
+ tv->test, shash, PTR_ERR(tfm));
+ return PTR_ERR(tfm);
+ }
+ driver = crypto_shash_driver_name(tfm);
+
+ prk_size = crypto_shash_digestsize(tfm);
+ prk = kzalloc(prk_size, GFP_KERNEL);
+ if (!prk) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ if (tv->prk_size != prk_size) {
+ pr_err("%s(%s): prk size mismatch (vec %u, digest %u\n",
+ tv->test, driver, tv->prk_size, prk_size);
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ err = hkdf_extract(tfm, tv->ikm, tv->ikm_size,
+ tv->salt, tv->salt_size, prk);
+ if (err) {
+ pr_err("%s(%s): hkdf_extract failed with %d\n",
+ tv->test, driver, err);
+ goto out_free;
+ }
+
+ if (memcmp(prk, tv->prk, tv->prk_size)) {
+ pr_err("%s(%s): hkdf_extract prk mismatch\n",
+ tv->test, driver);
+ print_hex_dump(KERN_ERR, "prk: ", DUMP_PREFIX_NONE,
+ 16, 1, prk, tv->prk_size, false);
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ okm = kzalloc(tv->okm_size, GFP_KERNEL);
+ if (!okm) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ err = crypto_shash_setkey(tfm, tv->prk, tv->prk_size);
+ if (err) {
+ pr_err("%s(%s): failed to set prk, error %d\n",
+ tv->test, driver, err);
+ goto out_free;
+ }
+
+ err = hkdf_expand(tfm, tv->info, tv->info_size,
+ okm, tv->okm_size);
+ if (err) {
+ pr_err("%s(%s): hkdf_expand() failed with %d\n",
+ tv->test, driver, err);
+ } else if (memcmp(okm, tv->okm, tv->okm_size)) {
+ pr_err("%s(%s): hkdf_expand() okm mismatch\n",
+ tv->test, driver);
+ print_hex_dump(KERN_ERR, "okm: ", DUMP_PREFIX_NONE,
+ 16, 1, okm, tv->okm_size, false);
+ err = -EINVAL;
+ }
+out_free:
+ kfree(okm);
+ kfree(prk);
+ crypto_free_shash(tfm);
+ return err;
+}
+
+static int __init crypto_hkdf_module_init(void)
+{
+ int ret = 0, i;
+
+ if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(hkdf_sha256_tv); i++) {
+ ret = hkdf_test("hmac(sha256)", &hkdf_sha256_tv[i]);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < ARRAY_SIZE(hkdf_sha384_tv); i++) {
+ ret = hkdf_test("hmac(sha384)", &hkdf_sha384_tv[i]);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < ARRAY_SIZE(hkdf_sha512_tv); i++) {
+ ret = hkdf_test("hmac(sha512)", &hkdf_sha512_tv[i]);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static void __exit crypto_hkdf_module_exit(void) {}
+
+late_initcall(crypto_hkdf_module_init);
+module_exit(crypto_hkdf_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("HMAC-based Key Derivation Function (HKDF)");
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 7cec25ff9889..148af460ae97 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -13,13 +13,11 @@
#include <crypto/hmac.h>
#include <crypto/internal/hash.h>
-#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/fips.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/scatterlist.h>
+#include <linux/slab.h>
#include <linux/string.h>
struct hmac_ctx {
@@ -28,6 +26,12 @@ struct hmac_ctx {
u8 pads[];
};
+struct ahash_hmac_ctx {
+ struct crypto_ahash *hash;
+ /* Contains 'u8 ipad[statesize];', then 'u8 opad[statesize];' */
+ u8 pads[];
+};
+
static int hmac_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
@@ -39,7 +43,7 @@ static int hmac_setkey(struct crypto_shash *parent,
u8 *ipad = &tctx->pads[0];
u8 *opad = &tctx->pads[ss];
SHASH_DESC_ON_STACK(shash, hash);
- unsigned int i;
+ int err, i;
if (fips_enabled && (keylen < 112 / 8))
return -EINVAL;
@@ -65,12 +69,14 @@ static int hmac_setkey(struct crypto_shash *parent,
opad[i] ^= HMAC_OPAD_VALUE;
}
- return crypto_shash_init(shash) ?:
- crypto_shash_update(shash, ipad, bs) ?:
- crypto_shash_export(shash, ipad) ?:
- crypto_shash_init(shash) ?:
- crypto_shash_update(shash, opad, bs) ?:
- crypto_shash_export(shash, opad);
+ err = crypto_shash_init(shash) ?:
+ crypto_shash_update(shash, ipad, bs) ?:
+ crypto_shash_export(shash, ipad) ?:
+ crypto_shash_init(shash) ?:
+ crypto_shash_update(shash, opad, bs) ?:
+ crypto_shash_export(shash, opad);
+ shash_desc_zero(shash);
+ return err;
}
static int hmac_export(struct shash_desc *pdesc, void *out)
@@ -90,6 +96,22 @@ static int hmac_import(struct shash_desc *pdesc, const void *in)
return crypto_shash_import(desc, in);
}
+static int hmac_export_core(struct shash_desc *pdesc, void *out)
+{
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
+
+ return crypto_shash_export_core(desc, out);
+}
+
+static int hmac_import_core(struct shash_desc *pdesc, const void *in)
+{
+ const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm);
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
+
+ desc->tfm = tctx->hash;
+ return crypto_shash_import_core(desc, in);
+}
+
static int hmac_init(struct shash_desc *pdesc)
{
const struct hmac_ctx *tctx = crypto_shash_ctx(pdesc->tfm);
@@ -105,20 +127,6 @@ static int hmac_update(struct shash_desc *pdesc,
return crypto_shash_update(desc, data, nbytes);
}
-static int hmac_final(struct shash_desc *pdesc, u8 *out)
-{
- struct crypto_shash *parent = pdesc->tfm;
- int ds = crypto_shash_digestsize(parent);
- int ss = crypto_shash_statesize(parent);
- const struct hmac_ctx *tctx = crypto_shash_ctx(parent);
- const u8 *opad = &tctx->pads[ss];
- struct shash_desc *desc = shash_desc_ctx(pdesc);
-
- return crypto_shash_final(desc, out) ?:
- crypto_shash_import(desc, opad) ?:
- crypto_shash_finup(desc, out, ds, out);
-}
-
static int hmac_finup(struct shash_desc *pdesc, const u8 *data,
unsigned int nbytes, u8 *out)
{
@@ -146,9 +154,6 @@ static int hmac_init_tfm(struct crypto_shash *parent)
if (IS_ERR(hash))
return PTR_ERR(hash);
- parent->descsize = sizeof(struct shash_desc) +
- crypto_shash_descsize(hash);
-
tctx->hash = hash;
return 0;
}
@@ -174,26 +179,23 @@ static void hmac_exit_tfm(struct crypto_shash *parent)
crypto_free_shash(tctx->hash);
}
-static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+static int __hmac_create_shash(struct crypto_template *tmpl,
+ struct rtattr **tb, u32 mask)
{
struct shash_instance *inst;
struct crypto_shash_spawn *spawn;
struct crypto_alg *alg;
struct shash_alg *salg;
- u32 mask;
int err;
int ds;
int ss;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
- if (err)
- return err;
-
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
spawn = shash_instance_ctx(inst);
+ mask |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE;
err = crypto_grab_shash(spawn, shash_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
@@ -212,7 +214,8 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
ss < alg->cra_blocksize)
goto err_free_inst;
- err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
+ err = crypto_inst_setname(shash_crypto_instance(inst), "hmac",
+ "hmac-shash", alg);
if (err)
goto err_free_inst;
@@ -222,12 +225,14 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.digestsize = ds;
inst->alg.statesize = ss;
+ inst->alg.descsize = sizeof(struct shash_desc) + salg->descsize;
inst->alg.init = hmac_init;
inst->alg.update = hmac_update;
- inst->alg.final = hmac_final;
inst->alg.finup = hmac_finup;
inst->alg.export = hmac_export;
inst->alg.import = hmac_import;
+ inst->alg.export_core = hmac_export_core;
+ inst->alg.import_core = hmac_import_core;
inst->alg.setkey = hmac_setkey;
inst->alg.init_tfm = hmac_init_tfm;
inst->alg.clone_tfm = hmac_clone_tfm;
@@ -243,23 +248,332 @@ err_free_inst:
return err;
}
-static struct crypto_template hmac_tmpl = {
- .name = "hmac",
- .create = hmac_create,
- .module = THIS_MODULE,
+static int hmac_setkey_ahash(struct crypto_ahash *parent,
+ const u8 *inkey, unsigned int keylen)
+{
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent);
+ struct crypto_ahash *fb = crypto_ahash_fb(tctx->hash);
+ int ds = crypto_ahash_digestsize(parent);
+ int bs = crypto_ahash_blocksize(parent);
+ int ss = crypto_ahash_statesize(parent);
+ HASH_REQUEST_ON_STACK(req, fb);
+ u8 *opad = &tctx->pads[ss];
+ u8 *ipad = &tctx->pads[0];
+ int err, i;
+
+ if (fips_enabled && (keylen < 112 / 8))
+ return -EINVAL;
+
+ ahash_request_set_callback(req, 0, NULL, NULL);
+
+ if (keylen > bs) {
+ ahash_request_set_virt(req, inkey, ipad, keylen);
+ err = crypto_ahash_digest(req);
+ if (err)
+ goto out_zero_req;
+
+ keylen = ds;
+ } else
+ memcpy(ipad, inkey, keylen);
+
+ memset(ipad + keylen, 0, bs - keylen);
+ memcpy(opad, ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ ipad[i] ^= HMAC_IPAD_VALUE;
+ opad[i] ^= HMAC_OPAD_VALUE;
+ }
+
+ ahash_request_set_virt(req, ipad, NULL, bs);
+ err = crypto_ahash_init(req) ?:
+ crypto_ahash_update(req) ?:
+ crypto_ahash_export(req, ipad);
+
+ ahash_request_set_virt(req, opad, NULL, bs);
+ err = err ?:
+ crypto_ahash_init(req) ?:
+ crypto_ahash_update(req) ?:
+ crypto_ahash_export(req, opad);
+
+out_zero_req:
+ HASH_REQUEST_ZERO(req);
+ return err;
+}
+
+static int hmac_export_ahash(struct ahash_request *preq, void *out)
+{
+ return crypto_ahash_export(ahash_request_ctx(preq), out);
+}
+
+static int hmac_import_ahash(struct ahash_request *preq, const void *in)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq);
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *req = ahash_request_ctx(preq);
+
+ ahash_request_set_tfm(req, tctx->hash);
+ return crypto_ahash_import(req, in);
+}
+
+static int hmac_export_core_ahash(struct ahash_request *preq, void *out)
+{
+ return crypto_ahash_export_core(ahash_request_ctx(preq), out);
+}
+
+static int hmac_import_core_ahash(struct ahash_request *preq, const void *in)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq);
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *req = ahash_request_ctx(preq);
+
+ ahash_request_set_tfm(req, tctx->hash);
+ return crypto_ahash_import_core(req, in);
+}
+
+static int hmac_init_ahash(struct ahash_request *preq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq);
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ return hmac_import_ahash(preq, &tctx->pads[0]);
+}
+
+static int hmac_update_ahash(struct ahash_request *preq)
+{
+ struct ahash_request *req = ahash_request_ctx(preq);
+
+ ahash_request_set_callback(req, ahash_request_flags(preq),
+ preq->base.complete, preq->base.data);
+ if (ahash_request_isvirt(preq))
+ ahash_request_set_virt(req, preq->svirt, NULL, preq->nbytes);
+ else
+ ahash_request_set_crypt(req, preq->src, NULL, preq->nbytes);
+ return crypto_ahash_update(req);
+}
+
+static int hmac_finup_finish(struct ahash_request *preq, unsigned int mask)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq);
+ struct ahash_request *req = ahash_request_ctx(preq);
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm);
+ int ds = crypto_ahash_digestsize(tfm);
+ int ss = crypto_ahash_statesize(tfm);
+ const u8 *opad = &tctx->pads[ss];
+
+ ahash_request_set_callback(req, ahash_request_flags(preq) & ~mask,
+ preq->base.complete, preq->base.data);
+ ahash_request_set_virt(req, preq->result, preq->result, ds);
+ return crypto_ahash_import(req, opad) ?:
+ crypto_ahash_finup(req);
+
+}
+
+static void hmac_finup_done(void *data, int err)
+{
+ struct ahash_request *preq = data;
+
+ if (err)
+ goto out;
+
+ err = hmac_finup_finish(preq, CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+
+out:
+ ahash_request_complete(preq, err);
+}
+
+static int hmac_finup_ahash(struct ahash_request *preq)
+{
+ struct ahash_request *req = ahash_request_ctx(preq);
+
+ ahash_request_set_callback(req, ahash_request_flags(preq),
+ hmac_finup_done, preq);
+ if (ahash_request_isvirt(preq))
+ ahash_request_set_virt(req, preq->svirt, preq->result,
+ preq->nbytes);
+ else
+ ahash_request_set_crypt(req, preq->src, preq->result,
+ preq->nbytes);
+ return crypto_ahash_finup(req) ?:
+ hmac_finup_finish(preq, 0);
+}
+
+static int hmac_digest_ahash(struct ahash_request *preq)
+{
+ return hmac_init_ahash(preq) ?:
+ hmac_finup_ahash(preq);
+}
+
+static int hmac_init_ahash_tfm(struct crypto_ahash *parent)
+{
+ struct ahash_instance *inst = ahash_alg_instance(parent);
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent);
+ struct crypto_ahash *hash;
+
+ hash = crypto_spawn_ahash(ahash_instance_ctx(inst));
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
+
+ if (crypto_ahash_reqsize(parent) < sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(hash))
+ return -EINVAL;
+
+ tctx->hash = hash;
+ return 0;
+}
+
+static int hmac_clone_ahash_tfm(struct crypto_ahash *dst,
+ struct crypto_ahash *src)
+{
+ struct ahash_hmac_ctx *sctx = crypto_ahash_ctx(src);
+ struct ahash_hmac_ctx *dctx = crypto_ahash_ctx(dst);
+ struct crypto_ahash *hash;
+
+ hash = crypto_clone_ahash(sctx->hash);
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
+
+ dctx->hash = hash;
+ return 0;
+}
+
+static void hmac_exit_ahash_tfm(struct crypto_ahash *parent)
+{
+ struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent);
+
+ crypto_free_ahash(tctx->hash);
+}
+
+static int hmac_create_ahash(struct crypto_template *tmpl, struct rtattr **tb,
+ u32 mask)
+{
+ struct crypto_ahash_spawn *spawn;
+ struct ahash_instance *inst;
+ struct crypto_alg *alg;
+ struct hash_alg_common *halg;
+ int ds, ss, err;
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+ spawn = ahash_instance_ctx(inst);
+
+ mask |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE;
+ err = crypto_grab_ahash(spawn, ahash_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, mask);
+ if (err)
+ goto err_free_inst;
+ halg = crypto_spawn_ahash_alg(spawn);
+ alg = &halg->base;
+
+ /* The underlying hash algorithm must not require a key */
+ err = -EINVAL;
+ if (crypto_hash_alg_needs_key(halg))
+ goto err_free_inst;
+
+ ds = halg->digestsize;
+ ss = halg->statesize;
+ if (ds > alg->cra_blocksize || ss < alg->cra_blocksize)
+ goto err_free_inst;
+
+ err = crypto_inst_setname(ahash_crypto_instance(inst), tmpl->name, alg);
+ if (err)
+ goto err_free_inst;
+
+ inst->alg.halg.base.cra_flags = alg->cra_flags &
+ CRYPTO_ALG_INHERITED_FLAGS;
+ inst->alg.halg.base.cra_flags |= CRYPTO_ALG_REQ_VIRT;
+ inst->alg.halg.base.cra_priority = alg->cra_priority + 100;
+ inst->alg.halg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.halg.base.cra_ctxsize = sizeof(struct ahash_hmac_ctx) +
+ (ss * 2);
+ inst->alg.halg.base.cra_reqsize = sizeof(struct ahash_request) +
+ alg->cra_reqsize;
+
+ inst->alg.halg.digestsize = ds;
+ inst->alg.halg.statesize = ss;
+ inst->alg.init = hmac_init_ahash;
+ inst->alg.update = hmac_update_ahash;
+ inst->alg.finup = hmac_finup_ahash;
+ inst->alg.digest = hmac_digest_ahash;
+ inst->alg.export = hmac_export_ahash;
+ inst->alg.import = hmac_import_ahash;
+ inst->alg.export_core = hmac_export_core_ahash;
+ inst->alg.import_core = hmac_import_core_ahash;
+ inst->alg.setkey = hmac_setkey_ahash;
+ inst->alg.init_tfm = hmac_init_ahash_tfm;
+ inst->alg.clone_tfm = hmac_clone_ahash_tfm;
+ inst->alg.exit_tfm = hmac_exit_ahash_tfm;
+
+ inst->free = ahash_free_singlespawn_instance;
+
+ err = ahash_register_instance(tmpl, inst);
+ if (err) {
+err_free_inst:
+ ahash_free_singlespawn_instance(inst);
+ }
+ return err;
+}
+
+static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct crypto_attr_type *algt;
+ u32 mask;
+
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return PTR_ERR(algt);
+
+ mask = crypto_algt_inherited_mask(algt);
+
+ if (!((algt->type ^ CRYPTO_ALG_TYPE_AHASH) &
+ algt->mask & CRYPTO_ALG_TYPE_MASK))
+ return hmac_create_ahash(tmpl, tb, mask);
+
+ if ((algt->type ^ CRYPTO_ALG_TYPE_SHASH) &
+ algt->mask & CRYPTO_ALG_TYPE_MASK)
+ return -EINVAL;
+
+ return __hmac_create_shash(tmpl, tb, mask);
+}
+
+static int hmac_create_shash(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ u32 mask;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
+ if (err)
+ return err == -EINVAL ? -ENOENT : err;
+
+ return __hmac_create_shash(tmpl, tb, mask);
+}
+
+static struct crypto_template hmac_tmpls[] = {
+ {
+ .name = "hmac",
+ .create = hmac_create,
+ .module = THIS_MODULE,
+ },
+ {
+ .name = "hmac-shash",
+ .create = hmac_create_shash,
+ .module = THIS_MODULE,
+ },
};
static int __init hmac_module_init(void)
{
- return crypto_register_template(&hmac_tmpl);
+ return crypto_register_templates(hmac_tmpls, ARRAY_SIZE(hmac_tmpls));
}
static void __exit hmac_module_exit(void)
{
- crypto_unregister_template(&hmac_tmpl);
+ crypto_unregister_templates(hmac_tmpls, ARRAY_SIZE(hmac_tmpls));
}
-subsys_initcall(hmac_module_init);
+module_init(hmac_module_init);
module_exit(hmac_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/internal.h b/crypto/internal.h
index 46b661be0f90..b9afd68767c1 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -33,6 +33,22 @@ struct crypto_larval {
bool test_started;
};
+struct crypto_type {
+ unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
+ unsigned int (*extsize)(struct crypto_alg *alg);
+ int (*init_tfm)(struct crypto_tfm *tfm);
+ void (*show)(struct seq_file *m, struct crypto_alg *alg);
+ int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
+ void (*free)(struct crypto_instance *inst);
+ void (*destroy)(struct crypto_alg *alg);
+
+ unsigned int type;
+ unsigned int maskclear;
+ unsigned int maskset;
+ unsigned int tfmsize;
+ unsigned int algsize;
+};
+
enum {
CRYPTOA_UNSPEC,
CRYPTOA_ALG,
@@ -51,8 +67,7 @@ extern struct blocking_notifier_head crypto_chain;
int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
-#if !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) || \
- IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)
+#if !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) || !IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)
static inline bool crypto_boot_test_finished(void)
{
return true;
@@ -71,7 +86,7 @@ static inline void set_crypto_boot_test_finished(void)
static_branch_enable(&__crypto_boot_test_finished);
}
#endif /* !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) ||
- * IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)
+ * !IS_ENABLED(CONFIG_CRYPTO_SELFTESTS)
*/
#ifdef CONFIG_PROC_FS
@@ -147,10 +162,12 @@ static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
return alg;
}
+void crypto_destroy_alg(struct crypto_alg *alg);
+
static inline void crypto_alg_put(struct crypto_alg *alg)
{
- if (refcount_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy)
- alg->cra_destroy(alg);
+ if (refcount_dec_and_test(&alg->cra_refcnt))
+ crypto_destroy_alg(alg);
}
static inline int crypto_tmpl_get(struct crypto_template *tmpl)
diff --git a/crypto/kdf_sp800108.c b/crypto/kdf_sp800108.c
index c3f9938e1ad2..b7a6bf9da773 100644
--- a/crypto/kdf_sp800108.c
+++ b/crypto/kdf_sp800108.c
@@ -127,7 +127,7 @@ static int __init crypto_kdf108_init(void)
{
int ret;
- if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
+ if (!IS_ENABLED(CONFIG_CRYPTO_SELFTESTS))
return 0;
ret = kdf_test(&kdf_ctr_hmac_sha256_tv_template[0], "hmac(sha256)",
diff --git a/crypto/khazad.c b/crypto/khazad.c
index 7ad338ca2c18..024264ee9cd1 100644
--- a/crypto/khazad.c
+++ b/crypto/khazad.c
@@ -871,7 +871,7 @@ static void __exit khazad_mod_fini(void)
}
-subsys_initcall(khazad_mod_init);
+module_init(khazad_mod_init);
module_exit(khazad_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/kpp.c b/crypto/kpp.c
index ecc63a1a948d..2e0cefe7a25f 100644
--- a/crypto/kpp.c
+++ b/crypto/kpp.c
@@ -80,6 +80,7 @@ static const struct crypto_type crypto_kpp_type = {
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_KPP,
.tfmsize = offsetof(struct crypto_kpp, base),
+ .algsize = offsetof(struct kpp_alg, base),
};
struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask)
diff --git a/crypto/krb5/Kconfig b/crypto/krb5/Kconfig
new file mode 100644
index 000000000000..4d0476e13f3c
--- /dev/null
+++ b/crypto/krb5/Kconfig
@@ -0,0 +1,26 @@
+config CRYPTO_KRB5
+ tristate "Kerberos 5 crypto"
+ select CRYPTO_MANAGER
+ select CRYPTO_KRB5ENC
+ select CRYPTO_AUTHENC
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH_INFO
+ select CRYPTO_HMAC
+ select CRYPTO_CMAC
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_CBC
+ select CRYPTO_CTS
+ select CRYPTO_AES
+ select CRYPTO_CAMELLIA
+ help
+ Provide a library for provision of Kerberos-5-based crypto. This is
+ intended for network filesystems to use.
+
+config CRYPTO_KRB5_SELFTESTS
+ bool "Kerberos 5 crypto selftests"
+ depends on CRYPTO_KRB5
+ help
+ Turn on some self-testing for the kerberos 5 crypto functions. These
+ will be performed on module load or boot, if compiled in.
diff --git a/crypto/krb5/Makefile b/crypto/krb5/Makefile
new file mode 100644
index 000000000000..d38890c0b247
--- /dev/null
+++ b/crypto/krb5/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for asymmetric cryptographic keys
+#
+
+krb5-y += \
+ krb5_kdf.o \
+ krb5_api.o \
+ rfc3961_simplified.o \
+ rfc3962_aes.o \
+ rfc6803_camellia.o \
+ rfc8009_aes2.o
+
+krb5-$(CONFIG_CRYPTO_KRB5_SELFTESTS) += \
+ selftest.o \
+ selftest_data.o
+
+obj-$(CONFIG_CRYPTO_KRB5) += krb5.o
diff --git a/crypto/krb5/internal.h b/crypto/krb5/internal.h
new file mode 100644
index 000000000000..a59084ffafe8
--- /dev/null
+++ b/crypto/krb5/internal.h
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Kerberos5 crypto internals
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/scatterlist.h>
+#include <crypto/krb5.h>
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
+
+/*
+ * Profile used for key derivation and encryption.
+ */
+struct krb5_crypto_profile {
+ /* Pseudo-random function */
+ int (*calc_PRF)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *protocol_key,
+ const struct krb5_buffer *octet_string,
+ struct krb5_buffer *result,
+ gfp_t gfp);
+
+ /* Checksum key derivation */
+ int (*calc_Kc)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ const struct krb5_buffer *usage_constant,
+ struct krb5_buffer *Kc,
+ gfp_t gfp);
+
+ /* Encryption key derivation */
+ int (*calc_Ke)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ const struct krb5_buffer *usage_constant,
+ struct krb5_buffer *Ke,
+ gfp_t gfp);
+
+ /* Integrity key derivation */
+ int (*calc_Ki)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ const struct krb5_buffer *usage_constant,
+ struct krb5_buffer *Ki,
+ gfp_t gfp);
+
+ /* Derive the keys needed for an encryption AEAD object. */
+ int (*derive_encrypt_keys)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ unsigned int usage,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+
+ /* Directly load the keys needed for an encryption AEAD object. */
+ int (*load_encrypt_keys)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Ke,
+ const struct krb5_buffer *Ki,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+
+ /* Derive the key needed for a checksum hash object. */
+ int (*derive_checksum_key)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ unsigned int usage,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+
+ /* Directly load the keys needed for a checksum hash object. */
+ int (*load_checksum_key)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Kc,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+
+ /* Encrypt data in-place, inserting confounder and checksum. */
+ ssize_t (*encrypt)(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t sg_len,
+ size_t data_offset, size_t data_len,
+ bool preconfounded);
+
+ /* Decrypt data in-place, removing confounder and checksum */
+ int (*decrypt)(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len);
+
+ /* Generate a MIC on part of a packet, inserting the checksum */
+ ssize_t (*get_mic)(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t sg_len,
+ size_t data_offset, size_t data_len);
+
+ /* Verify the MIC on a piece of data, removing the checksum */
+ int (*verify_mic)(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len);
+};
+
+/*
+ * Crypto size/alignment rounding convenience macros.
+ */
+#define crypto_roundup(X) ((unsigned int)round_up((X), CRYPTO_MINALIGN))
+
+#define krb5_aead_size(TFM) \
+ crypto_roundup(sizeof(struct aead_request) + crypto_aead_reqsize(TFM))
+#define krb5_aead_ivsize(TFM) \
+ crypto_roundup(crypto_aead_ivsize(TFM))
+#define krb5_shash_size(TFM) \
+ crypto_roundup(sizeof(struct shash_desc) + crypto_shash_descsize(TFM))
+#define krb5_digest_size(TFM) \
+ crypto_roundup(crypto_shash_digestsize(TFM))
+#define round16(x) (((x) + 15) & ~15)
+
+/*
+ * Self-testing data.
+ */
+struct krb5_prf_test {
+ u32 etype;
+ const char *name, *key, *octet, *prf;
+};
+
+struct krb5_key_test_one {
+ u32 use;
+ const char *key;
+};
+
+struct krb5_key_test {
+ u32 etype;
+ const char *name, *key;
+ struct krb5_key_test_one Kc, Ke, Ki;
+};
+
+struct krb5_enc_test {
+ u32 etype;
+ u32 usage;
+ const char *name, *plain, *conf, *K0, *Ke, *Ki, *ct;
+};
+
+struct krb5_mic_test {
+ u32 etype;
+ u32 usage;
+ const char *name, *plain, *K0, *Kc, *mic;
+};
+
+/*
+ * krb5_api.c
+ */
+struct crypto_aead *krb5_prepare_encryption(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *keys,
+ gfp_t gfp);
+struct crypto_shash *krb5_prepare_checksum(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Kc,
+ gfp_t gfp);
+
+/*
+ * krb5_kdf.c
+ */
+int krb5_derive_Kc(const struct krb5_enctype *krb5, const struct krb5_buffer *TK,
+ u32 usage, struct krb5_buffer *key, gfp_t gfp);
+int krb5_derive_Ke(const struct krb5_enctype *krb5, const struct krb5_buffer *TK,
+ u32 usage, struct krb5_buffer *key, gfp_t gfp);
+int krb5_derive_Ki(const struct krb5_enctype *krb5, const struct krb5_buffer *TK,
+ u32 usage, struct krb5_buffer *key, gfp_t gfp);
+
+/*
+ * rfc3961_simplified.c
+ */
+extern const struct krb5_crypto_profile rfc3961_simplified_profile;
+
+int crypto_shash_update_sg(struct shash_desc *desc, struct scatterlist *sg,
+ size_t offset, size_t len);
+int authenc_derive_encrypt_keys(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ unsigned int usage,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+int authenc_load_encrypt_keys(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Ke,
+ const struct krb5_buffer *Ki,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+int rfc3961_derive_checksum_key(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ unsigned int usage,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+int rfc3961_load_checksum_key(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Kc,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+ssize_t krb5_aead_encrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg, size_t sg_len,
+ size_t data_offset, size_t data_len,
+ bool preconfounded);
+int krb5_aead_decrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len);
+ssize_t rfc3961_get_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg, size_t sg_len,
+ size_t data_offset, size_t data_len);
+int rfc3961_verify_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len);
+
+/*
+ * rfc3962_aes.c
+ */
+extern const struct krb5_enctype krb5_aes128_cts_hmac_sha1_96;
+extern const struct krb5_enctype krb5_aes256_cts_hmac_sha1_96;
+
+/*
+ * rfc6803_camellia.c
+ */
+extern const struct krb5_enctype krb5_camellia128_cts_cmac;
+extern const struct krb5_enctype krb5_camellia256_cts_cmac;
+
+/*
+ * rfc8009_aes2.c
+ */
+extern const struct krb5_enctype krb5_aes128_cts_hmac_sha256_128;
+extern const struct krb5_enctype krb5_aes256_cts_hmac_sha384_192;
+
+/*
+ * selftest.c
+ */
+#ifdef CONFIG_CRYPTO_KRB5_SELFTESTS
+int krb5_selftest(void);
+#else
+static inline int krb5_selftest(void) { return 0; }
+#endif
+
+/*
+ * selftest_data.c
+ */
+extern const struct krb5_prf_test krb5_prf_tests[];
+extern const struct krb5_key_test krb5_key_tests[];
+extern const struct krb5_enc_test krb5_enc_tests[];
+extern const struct krb5_mic_test krb5_mic_tests[];
diff --git a/crypto/krb5/krb5_api.c b/crypto/krb5/krb5_api.c
new file mode 100644
index 000000000000..23026d4206c8
--- /dev/null
+++ b/crypto/krb5/krb5_api.c
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Kerberos 5 crypto library.
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include "internal.h"
+
+MODULE_DESCRIPTION("Kerberos 5 crypto");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
+static const struct krb5_enctype *const krb5_supported_enctypes[] = {
+ &krb5_aes128_cts_hmac_sha1_96,
+ &krb5_aes256_cts_hmac_sha1_96,
+ &krb5_aes128_cts_hmac_sha256_128,
+ &krb5_aes256_cts_hmac_sha384_192,
+ &krb5_camellia128_cts_cmac,
+ &krb5_camellia256_cts_cmac,
+};
+
+/**
+ * crypto_krb5_find_enctype - Find the handler for a Kerberos5 encryption type
+ * @enctype: The standard Kerberos encryption type number
+ *
+ * Look up a Kerberos encryption type by number. If successful, returns a
+ * pointer to the type tables; returns NULL otherwise.
+ */
+const struct krb5_enctype *crypto_krb5_find_enctype(u32 enctype)
+{
+ const struct krb5_enctype *krb5;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(krb5_supported_enctypes); i++) {
+ krb5 = krb5_supported_enctypes[i];
+ if (krb5->etype == enctype)
+ return krb5;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(crypto_krb5_find_enctype);
+
+/**
+ * crypto_krb5_how_much_buffer - Work out how much buffer is required for an amount of data
+ * @krb5: The encoding to use.
+ * @mode: The mode in which to operated (checksum/encrypt)
+ * @data_size: How much data we want to allow for
+ * @_offset: Where to place the offset into the buffer
+ *
+ * Calculate how much buffer space is required to wrap a given amount of data.
+ * This allows for a confounder, padding and checksum as appropriate. The
+ * amount of buffer required is returned and the offset into the buffer at
+ * which the data will start is placed in *_offset.
+ */
+size_t crypto_krb5_how_much_buffer(const struct krb5_enctype *krb5,
+ enum krb5_crypto_mode mode,
+ size_t data_size, size_t *_offset)
+{
+ switch (mode) {
+ case KRB5_CHECKSUM_MODE:
+ *_offset = krb5->cksum_len;
+ return krb5->cksum_len + data_size;
+
+ case KRB5_ENCRYPT_MODE:
+ *_offset = krb5->conf_len;
+ return krb5->conf_len + data_size + krb5->cksum_len;
+
+ default:
+ WARN_ON(1);
+ *_offset = 0;
+ return 0;
+ }
+}
+EXPORT_SYMBOL(crypto_krb5_how_much_buffer);
+
+/**
+ * crypto_krb5_how_much_data - Work out how much data can fit in an amount of buffer
+ * @krb5: The encoding to use.
+ * @mode: The mode in which to operated (checksum/encrypt)
+ * @_buffer_size: How much buffer we want to allow for (may be reduced)
+ * @_offset: Where to place the offset into the buffer
+ *
+ * Calculate how much data can be fitted into given amount of buffer. This
+ * allows for a confounder, padding and checksum as appropriate. The amount of
+ * data that will fit is returned, the amount of buffer required is shrunk to
+ * allow for alignment and the offset into the buffer at which the data will
+ * start is placed in *_offset.
+ */
+size_t crypto_krb5_how_much_data(const struct krb5_enctype *krb5,
+ enum krb5_crypto_mode mode,
+ size_t *_buffer_size, size_t *_offset)
+{
+ size_t buffer_size = *_buffer_size, data_size;
+
+ switch (mode) {
+ case KRB5_CHECKSUM_MODE:
+ if (WARN_ON(buffer_size < krb5->cksum_len + 1))
+ goto bad;
+ *_offset = krb5->cksum_len;
+ return buffer_size - krb5->cksum_len;
+
+ case KRB5_ENCRYPT_MODE:
+ if (WARN_ON(buffer_size < krb5->conf_len + 1 + krb5->cksum_len))
+ goto bad;
+ data_size = buffer_size - krb5->cksum_len;
+ *_offset = krb5->conf_len;
+ return data_size - krb5->conf_len;
+
+ default:
+ WARN_ON(1);
+ goto bad;
+ }
+
+bad:
+ *_offset = 0;
+ return 0;
+}
+EXPORT_SYMBOL(crypto_krb5_how_much_data);
+
+/**
+ * crypto_krb5_where_is_the_data - Find the data in a decrypted message
+ * @krb5: The encoding to use.
+ * @mode: Mode of operation
+ * @_offset: Offset of the secure blob in the buffer; updated to data offset.
+ * @_len: The length of the secure blob; updated to data length.
+ *
+ * Find the offset and size of the data in a secure message so that this
+ * information can be used in the metadata buffer which will get added to the
+ * digest by crypto_krb5_verify_mic().
+ */
+void crypto_krb5_where_is_the_data(const struct krb5_enctype *krb5,
+ enum krb5_crypto_mode mode,
+ size_t *_offset, size_t *_len)
+{
+ switch (mode) {
+ case KRB5_CHECKSUM_MODE:
+ *_offset += krb5->cksum_len;
+ *_len -= krb5->cksum_len;
+ return;
+ case KRB5_ENCRYPT_MODE:
+ *_offset += krb5->conf_len;
+ *_len -= krb5->conf_len + krb5->cksum_len;
+ return;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+}
+EXPORT_SYMBOL(crypto_krb5_where_is_the_data);
+
+/*
+ * Prepare the encryption with derived key data.
+ */
+struct crypto_aead *krb5_prepare_encryption(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *keys,
+ gfp_t gfp)
+{
+ struct crypto_aead *ci = NULL;
+ int ret = -ENOMEM;
+
+ ci = crypto_alloc_aead(krb5->encrypt_name, 0, 0);
+ if (IS_ERR(ci)) {
+ ret = PTR_ERR(ci);
+ if (ret == -ENOENT)
+ ret = -ENOPKG;
+ goto err;
+ }
+
+ ret = crypto_aead_setkey(ci, keys->data, keys->len);
+ if (ret < 0) {
+ pr_err("Couldn't set AEAD key %s: %d\n", krb5->encrypt_name, ret);
+ goto err_ci;
+ }
+
+ ret = crypto_aead_setauthsize(ci, krb5->cksum_len);
+ if (ret < 0) {
+ pr_err("Couldn't set AEAD authsize %s: %d\n", krb5->encrypt_name, ret);
+ goto err_ci;
+ }
+
+ return ci;
+err_ci:
+ crypto_free_aead(ci);
+err:
+ return ERR_PTR(ret);
+}
+
+/**
+ * crypto_krb5_prepare_encryption - Prepare AEAD crypto object for encryption-mode
+ * @krb5: The encoding to use.
+ * @TK: The transport key to use.
+ * @usage: The usage constant for key derivation.
+ * @gfp: Allocation flags.
+ *
+ * Allocate a crypto object that does all the necessary crypto, key it and set
+ * its parameters and return the crypto handle to it. This can then be used to
+ * dispatch encrypt and decrypt operations.
+ */
+struct crypto_aead *crypto_krb5_prepare_encryption(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ u32 usage, gfp_t gfp)
+{
+ struct crypto_aead *ci = NULL;
+ struct krb5_buffer keys = {};
+ int ret;
+
+ ret = krb5->profile->derive_encrypt_keys(krb5, TK, usage, &keys, gfp);
+ if (ret < 0)
+ goto err;
+
+ ci = krb5_prepare_encryption(krb5, &keys, gfp);
+ if (IS_ERR(ci)) {
+ ret = PTR_ERR(ci);
+ goto err;
+ }
+
+ kfree(keys.data);
+ return ci;
+err:
+ kfree(keys.data);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(crypto_krb5_prepare_encryption);
+
+/*
+ * Prepare the checksum with derived key data.
+ */
+struct crypto_shash *krb5_prepare_checksum(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Kc,
+ gfp_t gfp)
+{
+ struct crypto_shash *ci = NULL;
+ int ret = -ENOMEM;
+
+ ci = crypto_alloc_shash(krb5->cksum_name, 0, 0);
+ if (IS_ERR(ci)) {
+ ret = PTR_ERR(ci);
+ if (ret == -ENOENT)
+ ret = -ENOPKG;
+ goto err;
+ }
+
+ ret = crypto_shash_setkey(ci, Kc->data, Kc->len);
+ if (ret < 0) {
+ pr_err("Couldn't set shash key %s: %d\n", krb5->cksum_name, ret);
+ goto err_ci;
+ }
+
+ return ci;
+err_ci:
+ crypto_free_shash(ci);
+err:
+ return ERR_PTR(ret);
+}
+
+/**
+ * crypto_krb5_prepare_checksum - Prepare AEAD crypto object for checksum-mode
+ * @krb5: The encoding to use.
+ * @TK: The transport key to use.
+ * @usage: The usage constant for key derivation.
+ * @gfp: Allocation flags.
+ *
+ * Allocate a crypto object that does all the necessary crypto, key it and set
+ * its parameters and return the crypto handle to it. This can then be used to
+ * dispatch get_mic and verify_mic operations.
+ */
+struct crypto_shash *crypto_krb5_prepare_checksum(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ u32 usage, gfp_t gfp)
+{
+ struct crypto_shash *ci = NULL;
+ struct krb5_buffer keys = {};
+ int ret;
+
+ ret = krb5->profile->derive_checksum_key(krb5, TK, usage, &keys, gfp);
+ if (ret < 0) {
+ pr_err("get_Kc failed %d\n", ret);
+ goto err;
+ }
+
+ ci = krb5_prepare_checksum(krb5, &keys, gfp);
+ if (IS_ERR(ci)) {
+ ret = PTR_ERR(ci);
+ goto err;
+ }
+
+ kfree(keys.data);
+ return ci;
+err:
+ kfree(keys.data);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(crypto_krb5_prepare_checksum);
+
+/**
+ * crypto_krb5_encrypt - Apply Kerberos encryption and integrity.
+ * @krb5: The encoding to use.
+ * @aead: The keyed crypto object to use.
+ * @sg: Scatterlist defining the crypto buffer.
+ * @nr_sg: The number of elements in @sg.
+ * @sg_len: The size of the buffer.
+ * @data_offset: The offset of the data in the @sg buffer.
+ * @data_len: The length of the data.
+ * @preconfounded: True if the confounder is already inserted.
+ *
+ * Using the specified Kerberos encoding, insert a confounder and padding as
+ * needed, encrypt this and the data in place and insert an integrity checksum
+ * into the buffer.
+ *
+ * The buffer must include space for the confounder, the checksum and any
+ * padding required. The caller can preinsert the confounder into the buffer
+ * (for testing, for example).
+ *
+ * The resulting secured blob may be less than the size of the buffer.
+ *
+ * Returns the size of the secure blob if successful, -ENOMEM on an allocation
+ * failure, -EFAULT if there is insufficient space, -EMSGSIZE if the confounder
+ * is too short or the data is misaligned. Other errors may also be returned
+ * from the crypto layer.
+ */
+ssize_t crypto_krb5_encrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t sg_len,
+ size_t data_offset, size_t data_len,
+ bool preconfounded)
+{
+ if (WARN_ON(data_offset > sg_len ||
+ data_len > sg_len ||
+ data_offset > sg_len - data_len))
+ return -EMSGSIZE;
+ return krb5->profile->encrypt(krb5, aead, sg, nr_sg, sg_len,
+ data_offset, data_len, preconfounded);
+}
+EXPORT_SYMBOL(crypto_krb5_encrypt);
+
+/**
+ * crypto_krb5_decrypt - Validate and remove Kerberos encryption and integrity.
+ * @krb5: The encoding to use.
+ * @aead: The keyed crypto object to use.
+ * @sg: Scatterlist defining the crypto buffer.
+ * @nr_sg: The number of elements in @sg.
+ * @_offset: Offset of the secure blob in the buffer; updated to data offset.
+ * @_len: The length of the secure blob; updated to data length.
+ *
+ * Using the specified Kerberos encoding, check and remove the integrity
+ * checksum and decrypt the secure region, stripping off the confounder.
+ *
+ * If successful, @_offset and @_len are updated to outline the region in which
+ * the data plus the trailing padding are stored. The caller is responsible
+ * for working out how much padding there is and removing it.
+ *
+ * Returns the 0 if successful, -ENOMEM on an allocation failure; sets
+ * *_error_code and returns -EPROTO if the data cannot be parsed, or -EBADMSG
+ * if the integrity checksum doesn't match). Other errors may also be returned
+ * from the crypto layer.
+ */
+int crypto_krb5_decrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len)
+{
+ return krb5->profile->decrypt(krb5, aead, sg, nr_sg, _offset, _len);
+}
+EXPORT_SYMBOL(crypto_krb5_decrypt);
+
+/**
+ * crypto_krb5_get_mic - Apply Kerberos integrity checksum.
+ * @krb5: The encoding to use.
+ * @shash: The keyed hash to use.
+ * @metadata: Metadata to add into the hash before adding the data.
+ * @sg: Scatterlist defining the crypto buffer.
+ * @nr_sg: The number of elements in @sg.
+ * @sg_len: The size of the buffer.
+ * @data_offset: The offset of the data in the @sg buffer.
+ * @data_len: The length of the data.
+ *
+ * Using the specified Kerberos encoding, calculate and insert an integrity
+ * checksum into the buffer.
+ *
+ * The buffer must include space for the checksum at the front.
+ *
+ * Returns the size of the secure blob if successful, -ENOMEM on an allocation
+ * failure, -EFAULT if there is insufficient space, -EMSGSIZE if the gap for
+ * the checksum is too short. Other errors may also be returned from the
+ * crypto layer.
+ */
+ssize_t crypto_krb5_get_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t sg_len,
+ size_t data_offset, size_t data_len)
+{
+ if (WARN_ON(data_offset > sg_len ||
+ data_len > sg_len ||
+ data_offset > sg_len - data_len))
+ return -EMSGSIZE;
+ return krb5->profile->get_mic(krb5, shash, metadata, sg, nr_sg, sg_len,
+ data_offset, data_len);
+}
+EXPORT_SYMBOL(crypto_krb5_get_mic);
+
+/**
+ * crypto_krb5_verify_mic - Validate and remove Kerberos integrity checksum.
+ * @krb5: The encoding to use.
+ * @shash: The keyed hash to use.
+ * @metadata: Metadata to add into the hash before adding the data.
+ * @sg: Scatterlist defining the crypto buffer.
+ * @nr_sg: The number of elements in @sg.
+ * @_offset: Offset of the secure blob in the buffer; updated to data offset.
+ * @_len: The length of the secure blob; updated to data length.
+ *
+ * Using the specified Kerberos encoding, check and remove the integrity
+ * checksum.
+ *
+ * If successful, @_offset and @_len are updated to outline the region in which
+ * the data is stored.
+ *
+ * Returns the 0 if successful, -ENOMEM on an allocation failure; sets
+ * *_error_code and returns -EPROTO if the data cannot be parsed, or -EBADMSG
+ * if the checksum doesn't match). Other errors may also be returned from the
+ * crypto layer.
+ */
+int crypto_krb5_verify_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len)
+{
+ return krb5->profile->verify_mic(krb5, shash, metadata, sg, nr_sg,
+ _offset, _len);
+}
+EXPORT_SYMBOL(crypto_krb5_verify_mic);
+
+static int __init crypto_krb5_init(void)
+{
+ return krb5_selftest();
+}
+module_init(crypto_krb5_init);
+
+static void __exit crypto_krb5_exit(void)
+{
+}
+module_exit(crypto_krb5_exit);
diff --git a/crypto/krb5/krb5_kdf.c b/crypto/krb5/krb5_kdf.c
new file mode 100644
index 000000000000..6699e5469d1b
--- /dev/null
+++ b/crypto/krb5/krb5_kdf.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Kerberos key derivation.
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <crypto/skcipher.h>
+#include <crypto/hash.h>
+#include "internal.h"
+
+/**
+ * crypto_krb5_calc_PRFplus - Calculate PRF+ [RFC4402]
+ * @krb5: The encryption type to use
+ * @K: The protocol key for the pseudo-random function
+ * @L: The length of the output
+ * @S: The input octet string
+ * @result: Result buffer, sized to krb5->prf_len
+ * @gfp: Allocation restrictions
+ *
+ * Calculate the kerberos pseudo-random function, PRF+() by the following
+ * method:
+ *
+ * PRF+(K, L, S) = truncate(L, T1 || T2 || .. || Tn)
+ * Tn = PRF(K, n || S)
+ * [rfc4402 sec 2]
+ */
+int crypto_krb5_calc_PRFplus(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *K,
+ unsigned int L,
+ const struct krb5_buffer *S,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ struct krb5_buffer T_series, Tn, n_S;
+ void *buffer;
+ int ret, n = 1;
+
+ Tn.len = krb5->prf_len;
+ T_series.len = 0;
+ n_S.len = 4 + S->len;
+
+ buffer = kzalloc(round16(L + Tn.len) + round16(n_S.len), gfp);
+ if (!buffer)
+ return -ENOMEM;
+
+ T_series.data = buffer;
+ n_S.data = buffer + round16(L + Tn.len);
+ memcpy(n_S.data + 4, S->data, S->len);
+
+ while (T_series.len < L) {
+ *(__be32 *)(n_S.data) = htonl(n);
+ Tn.data = T_series.data + Tn.len * (n - 1);
+ ret = krb5->profile->calc_PRF(krb5, K, &n_S, &Tn, gfp);
+ if (ret < 0)
+ goto err;
+ T_series.len += Tn.len;
+ n++;
+ }
+
+ /* Truncate to L */
+ memcpy(result->data, T_series.data, L);
+ ret = 0;
+
+err:
+ kfree_sensitive(buffer);
+ return ret;
+}
+EXPORT_SYMBOL(crypto_krb5_calc_PRFplus);
+
+/**
+ * krb5_derive_Kc - Derive key Kc and install into a hash
+ * @krb5: The encryption type to use
+ * @TK: The base key
+ * @usage: The key usage number
+ * @key: Prepped buffer to store the key into
+ * @gfp: Allocation restrictions
+ *
+ * Derive the Kerberos Kc checksumming key. The key is stored into the
+ * prepared buffer.
+ */
+int krb5_derive_Kc(const struct krb5_enctype *krb5, const struct krb5_buffer *TK,
+ u32 usage, struct krb5_buffer *key, gfp_t gfp)
+{
+ u8 buf[5] __aligned(CRYPTO_MINALIGN);
+ struct krb5_buffer usage_constant = { .len = 5, .data = buf };
+
+ *(__be32 *)buf = cpu_to_be32(usage);
+ buf[4] = KEY_USAGE_SEED_CHECKSUM;
+
+ key->len = krb5->Kc_len;
+ return krb5->profile->calc_Kc(krb5, TK, &usage_constant, key, gfp);
+}
+
+/**
+ * krb5_derive_Ke - Derive key Ke and install into an skcipher
+ * @krb5: The encryption type to use
+ * @TK: The base key
+ * @usage: The key usage number
+ * @key: Prepped buffer to store the key into
+ * @gfp: Allocation restrictions
+ *
+ * Derive the Kerberos Ke encryption key. The key is stored into the prepared
+ * buffer.
+ */
+int krb5_derive_Ke(const struct krb5_enctype *krb5, const struct krb5_buffer *TK,
+ u32 usage, struct krb5_buffer *key, gfp_t gfp)
+{
+ u8 buf[5] __aligned(CRYPTO_MINALIGN);
+ struct krb5_buffer usage_constant = { .len = 5, .data = buf };
+
+ *(__be32 *)buf = cpu_to_be32(usage);
+ buf[4] = KEY_USAGE_SEED_ENCRYPTION;
+
+ key->len = krb5->Ke_len;
+ return krb5->profile->calc_Ke(krb5, TK, &usage_constant, key, gfp);
+}
+
+/**
+ * krb5_derive_Ki - Derive key Ki and install into a hash
+ * @krb5: The encryption type to use
+ * @TK: The base key
+ * @usage: The key usage number
+ * @key: Prepped buffer to store the key into
+ * @gfp: Allocation restrictions
+ *
+ * Derive the Kerberos Ki integrity checksum key. The key is stored into the
+ * prepared buffer.
+ */
+int krb5_derive_Ki(const struct krb5_enctype *krb5, const struct krb5_buffer *TK,
+ u32 usage, struct krb5_buffer *key, gfp_t gfp)
+{
+ u8 buf[5] __aligned(CRYPTO_MINALIGN);
+ struct krb5_buffer usage_constant = { .len = 5, .data = buf };
+
+ *(__be32 *)buf = cpu_to_be32(usage);
+ buf[4] = KEY_USAGE_SEED_INTEGRITY;
+
+ key->len = krb5->Ki_len;
+ return krb5->profile->calc_Ki(krb5, TK, &usage_constant, key, gfp);
+}
diff --git a/crypto/krb5/rfc3961_simplified.c b/crypto/krb5/rfc3961_simplified.c
new file mode 100644
index 000000000000..e49cbdec7c40
--- /dev/null
+++ b/crypto/krb5/rfc3961_simplified.c
@@ -0,0 +1,793 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* rfc3961 Kerberos 5 simplified crypto profile.
+ *
+ * Parts borrowed from net/sunrpc/auth_gss/.
+ */
+/*
+ * COPYRIGHT (c) 2008
+ * The Regents of the University of Michigan
+ * ALL RIGHTS RESERVED
+ *
+ * Permission is granted to use, copy, create derivative works
+ * and redistribute this software and such derivative works
+ * for any purpose, so long as the name of The University of
+ * Michigan is not used in any advertising or publicity
+ * pertaining to the use of distribution of this software
+ * without specific, written prior authorization. If the
+ * above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any
+ * portion of this software, then the disclaimer below must
+ * also be included.
+ *
+ * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
+ * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
+ * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
+ * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
+ * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
+ * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
+ * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
+ * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
+ * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGES.
+ */
+
+/*
+ * Copyright (C) 1998 by the FundsXpress, INC.
+ *
+ * All rights reserved.
+ *
+ * Export of this software from the United States of America may require
+ * a specific license from the United States Government. It is the
+ * responsibility of any person or organization contemplating export to
+ * obtain such a license before exporting.
+ *
+ * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
+ * distribute this software and its documentation for any purpose and
+ * without fee is hereby granted, provided that the above copyright
+ * notice appear in all copies and that both that copyright notice and
+ * this permission notice appear in supporting documentation, and that
+ * the name of FundsXpress. not be used in advertising or publicity pertaining
+ * to distribution of the software without specific, written prior
+ * permission. FundsXpress makes no representations about the suitability of
+ * this software for any purpose. It is provided "as is" without express
+ * or implied warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/lcm.h>
+#include <linux/rtnetlink.h>
+#include <crypto/authenc.h>
+#include <crypto/skcipher.h>
+#include <crypto/hash.h>
+#include "internal.h"
+
+/* Maximum blocksize for the supported crypto algorithms */
+#define KRB5_MAX_BLOCKSIZE (16)
+
+int crypto_shash_update_sg(struct shash_desc *desc, struct scatterlist *sg,
+ size_t offset, size_t len)
+{
+ struct sg_mapping_iter miter;
+ size_t i, n;
+ int ret = 0;
+
+ sg_miter_start(&miter, sg, sg_nents(sg),
+ SG_MITER_FROM_SG | SG_MITER_LOCAL);
+ sg_miter_skip(&miter, offset);
+ for (i = 0; i < len; i += n) {
+ sg_miter_next(&miter);
+ n = min(miter.length, len - i);
+ ret = crypto_shash_update(desc, miter.addr, n);
+ if (ret < 0)
+ break;
+ }
+ sg_miter_stop(&miter);
+ return ret;
+}
+
+static int rfc3961_do_encrypt(struct crypto_sync_skcipher *tfm, void *iv,
+ const struct krb5_buffer *in, struct krb5_buffer *out)
+{
+ struct scatterlist sg[1];
+ u8 local_iv[KRB5_MAX_BLOCKSIZE] __aligned(KRB5_MAX_BLOCKSIZE) = {0};
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ int ret;
+
+ if (WARN_ON(in->len != out->len))
+ return -EINVAL;
+ if (out->len % crypto_sync_skcipher_blocksize(tfm) != 0)
+ return -EINVAL;
+
+ if (crypto_sync_skcipher_ivsize(tfm) > KRB5_MAX_BLOCKSIZE)
+ return -EINVAL;
+
+ if (iv)
+ memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
+
+ memcpy(out->data, in->data, out->len);
+ sg_init_one(sg, out->data, out->len);
+
+ skcipher_request_set_sync_tfm(req, tfm);
+ skcipher_request_set_callback(req, 0, NULL, NULL);
+ skcipher_request_set_crypt(req, sg, sg, out->len, local_iv);
+
+ ret = crypto_skcipher_encrypt(req);
+ skcipher_request_zero(req);
+ return ret;
+}
+
+/*
+ * Calculate an unkeyed basic hash.
+ */
+static int rfc3961_calc_H(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *data,
+ struct krb5_buffer *digest,
+ gfp_t gfp)
+{
+ struct crypto_shash *tfm;
+ struct shash_desc *desc;
+ size_t desc_size;
+ int ret = -ENOMEM;
+
+ tfm = crypto_alloc_shash(krb5->hash_name, 0, 0);
+ if (IS_ERR(tfm))
+ return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm);
+
+ desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
+
+ desc = kzalloc(desc_size, gfp);
+ if (!desc)
+ goto error_tfm;
+
+ digest->len = crypto_shash_digestsize(tfm);
+ digest->data = kzalloc(digest->len, gfp);
+ if (!digest->data)
+ goto error_desc;
+
+ desc->tfm = tfm;
+ ret = crypto_shash_init(desc);
+ if (ret < 0)
+ goto error_digest;
+
+ ret = crypto_shash_finup(desc, data->data, data->len, digest->data);
+ if (ret < 0)
+ goto error_digest;
+
+ goto error_desc;
+
+error_digest:
+ kfree_sensitive(digest->data);
+error_desc:
+ kfree_sensitive(desc);
+error_tfm:
+ crypto_free_shash(tfm);
+ return ret;
+}
+
+/*
+ * This is the n-fold function as described in rfc3961, sec 5.1
+ * Taken from MIT Kerberos and modified.
+ */
+static void rfc3961_nfold(const struct krb5_buffer *source, struct krb5_buffer *result)
+{
+ const u8 *in = source->data;
+ u8 *out = result->data;
+ unsigned long ulcm;
+ unsigned int inbits, outbits;
+ int byte, i, msbit;
+
+ /* the code below is more readable if I make these bytes instead of bits */
+ inbits = source->len;
+ outbits = result->len;
+
+ /* first compute lcm(n,k) */
+ ulcm = lcm(inbits, outbits);
+
+ /* now do the real work */
+ memset(out, 0, outbits);
+ byte = 0;
+
+ /* this will end up cycling through k lcm(k,n)/k times, which
+ * is correct.
+ */
+ for (i = ulcm-1; i >= 0; i--) {
+ /* compute the msbit in k which gets added into this byte */
+ msbit = (
+ /* first, start with the msbit in the first,
+ * unrotated byte
+ */
+ ((inbits << 3) - 1) +
+ /* then, for each byte, shift to the right
+ * for each repetition
+ */
+ (((inbits << 3) + 13) * (i/inbits)) +
+ /* last, pick out the correct byte within
+ * that shifted repetition
+ */
+ ((inbits - (i % inbits)) << 3)
+ ) % (inbits << 3);
+
+ /* pull out the byte value itself */
+ byte += (((in[((inbits - 1) - (msbit >> 3)) % inbits] << 8) |
+ (in[((inbits) - (msbit >> 3)) % inbits]))
+ >> ((msbit & 7) + 1)) & 0xff;
+
+ /* do the addition */
+ byte += out[i % outbits];
+ out[i % outbits] = byte & 0xff;
+
+ /* keep around the carry bit, if any */
+ byte >>= 8;
+ }
+
+ /* if there's a carry bit left over, add it back in */
+ if (byte) {
+ for (i = outbits - 1; i >= 0; i--) {
+ /* do the addition */
+ byte += out[i];
+ out[i] = byte & 0xff;
+
+ /* keep around the carry bit, if any */
+ byte >>= 8;
+ }
+ }
+}
+
+/*
+ * Calculate a derived key, DK(Base Key, Well-Known Constant)
+ *
+ * DK(Key, Constant) = random-to-key(DR(Key, Constant))
+ * DR(Key, Constant) = k-truncate(E(Key, Constant, initial-cipher-state))
+ * K1 = E(Key, n-fold(Constant), initial-cipher-state)
+ * K2 = E(Key, K1, initial-cipher-state)
+ * K3 = E(Key, K2, initial-cipher-state)
+ * K4 = ...
+ * DR(Key, Constant) = k-truncate(K1 | K2 | K3 | K4 ...)
+ * [rfc3961 sec 5.1]
+ */
+static int rfc3961_calc_DK(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *inkey,
+ const struct krb5_buffer *in_constant,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ unsigned int blocksize, keybytes, keylength, n;
+ struct krb5_buffer inblock, outblock, rawkey;
+ struct crypto_sync_skcipher *cipher;
+ int ret = -EINVAL;
+
+ blocksize = krb5->block_len;
+ keybytes = krb5->key_bytes;
+ keylength = krb5->key_len;
+
+ if (inkey->len != keylength || result->len != keylength)
+ return -EINVAL;
+ if (!krb5->random_to_key && result->len != keybytes)
+ return -EINVAL;
+
+ cipher = crypto_alloc_sync_skcipher(krb5->derivation_enc, 0, 0);
+ if (IS_ERR(cipher)) {
+ ret = (PTR_ERR(cipher) == -ENOENT) ? -ENOPKG : PTR_ERR(cipher);
+ goto err_return;
+ }
+ ret = crypto_sync_skcipher_setkey(cipher, inkey->data, inkey->len);
+ if (ret < 0)
+ goto err_free_cipher;
+
+ ret = -ENOMEM;
+ inblock.data = kzalloc(blocksize * 2 + keybytes, gfp);
+ if (!inblock.data)
+ goto err_free_cipher;
+
+ inblock.len = blocksize;
+ outblock.data = inblock.data + blocksize;
+ outblock.len = blocksize;
+ rawkey.data = outblock.data + blocksize;
+ rawkey.len = keybytes;
+
+ /* initialize the input block */
+
+ if (in_constant->len == inblock.len)
+ memcpy(inblock.data, in_constant->data, inblock.len);
+ else
+ rfc3961_nfold(in_constant, &inblock);
+
+ /* loop encrypting the blocks until enough key bytes are generated */
+ n = 0;
+ while (n < rawkey.len) {
+ rfc3961_do_encrypt(cipher, NULL, &inblock, &outblock);
+
+ if (keybytes - n <= outblock.len) {
+ memcpy(rawkey.data + n, outblock.data, keybytes - n);
+ break;
+ }
+
+ memcpy(rawkey.data + n, outblock.data, outblock.len);
+ memcpy(inblock.data, outblock.data, outblock.len);
+ n += outblock.len;
+ }
+
+ /* postprocess the key */
+ if (!krb5->random_to_key) {
+ /* Identity random-to-key function. */
+ memcpy(result->data, rawkey.data, rawkey.len);
+ ret = 0;
+ } else {
+ ret = krb5->random_to_key(krb5, &rawkey, result);
+ }
+
+ kfree_sensitive(inblock.data);
+err_free_cipher:
+ crypto_free_sync_skcipher(cipher);
+err_return:
+ return ret;
+}
+
+/*
+ * Calculate single encryption, E()
+ *
+ * E(Key, octets)
+ */
+static int rfc3961_calc_E(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *key,
+ const struct krb5_buffer *in_data,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ struct crypto_sync_skcipher *cipher;
+ int ret;
+
+ cipher = crypto_alloc_sync_skcipher(krb5->derivation_enc, 0, 0);
+ if (IS_ERR(cipher)) {
+ ret = (PTR_ERR(cipher) == -ENOENT) ? -ENOPKG : PTR_ERR(cipher);
+ goto err;
+ }
+
+ ret = crypto_sync_skcipher_setkey(cipher, key->data, key->len);
+ if (ret < 0)
+ goto err_free;
+
+ ret = rfc3961_do_encrypt(cipher, NULL, in_data, result);
+
+err_free:
+ crypto_free_sync_skcipher(cipher);
+err:
+ return ret;
+}
+
+/*
+ * Calculate the pseudo-random function, PRF().
+ *
+ * tmp1 = H(octet-string)
+ * tmp2 = truncate tmp1 to multiple of m
+ * PRF = E(DK(protocol-key, prfconstant), tmp2, initial-cipher-state)
+ *
+ * The "prfconstant" used in the PRF operation is the three-octet string
+ * "prf".
+ * [rfc3961 sec 5.3]
+ */
+static int rfc3961_calc_PRF(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *protocol_key,
+ const struct krb5_buffer *octet_string,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ static const struct krb5_buffer prfconstant = { 3, "prf" };
+ struct krb5_buffer derived_key;
+ struct krb5_buffer tmp1, tmp2;
+ unsigned int m = krb5->block_len;
+ void *buffer;
+ int ret;
+
+ if (result->len != krb5->prf_len)
+ return -EINVAL;
+
+ tmp1.len = krb5->hash_len;
+ derived_key.len = krb5->key_bytes;
+ buffer = kzalloc(round16(tmp1.len) + round16(derived_key.len), gfp);
+ if (!buffer)
+ return -ENOMEM;
+
+ tmp1.data = buffer;
+ derived_key.data = buffer + round16(tmp1.len);
+
+ ret = rfc3961_calc_H(krb5, octet_string, &tmp1, gfp);
+ if (ret < 0)
+ goto err;
+
+ tmp2.len = tmp1.len & ~(m - 1);
+ tmp2.data = tmp1.data;
+
+ ret = rfc3961_calc_DK(krb5, protocol_key, &prfconstant, &derived_key, gfp);
+ if (ret < 0)
+ goto err;
+
+ ret = rfc3961_calc_E(krb5, &derived_key, &tmp2, result, gfp);
+
+err:
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+/*
+ * Derive the Ke and Ki keys and package them into a key parameter that can be
+ * given to the setkey of a authenc AEAD crypto object.
+ */
+int authenc_derive_encrypt_keys(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ unsigned int usage,
+ struct krb5_buffer *setkey,
+ gfp_t gfp)
+{
+ struct crypto_authenc_key_param *param;
+ struct krb5_buffer Ke, Ki;
+ struct rtattr *rta;
+ int ret;
+
+ Ke.len = krb5->Ke_len;
+ Ki.len = krb5->Ki_len;
+ setkey->len = RTA_LENGTH(sizeof(*param)) + Ke.len + Ki.len;
+ setkey->data = kzalloc(setkey->len, GFP_KERNEL);
+ if (!setkey->data)
+ return -ENOMEM;
+
+ rta = setkey->data;
+ rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
+ rta->rta_len = RTA_LENGTH(sizeof(*param));
+ param = RTA_DATA(rta);
+ param->enckeylen = htonl(Ke.len);
+
+ Ki.data = (void *)(param + 1);
+ Ke.data = Ki.data + Ki.len;
+
+ ret = krb5_derive_Ke(krb5, TK, usage, &Ke, gfp);
+ if (ret < 0) {
+ pr_err("get_Ke failed %d\n", ret);
+ return ret;
+ }
+ ret = krb5_derive_Ki(krb5, TK, usage, &Ki, gfp);
+ if (ret < 0)
+ pr_err("get_Ki failed %d\n", ret);
+ return ret;
+}
+
+/*
+ * Package predefined Ke and Ki keys and into a key parameter that can be given
+ * to the setkey of an authenc AEAD crypto object.
+ */
+int authenc_load_encrypt_keys(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Ke,
+ const struct krb5_buffer *Ki,
+ struct krb5_buffer *setkey,
+ gfp_t gfp)
+{
+ struct crypto_authenc_key_param *param;
+ struct rtattr *rta;
+
+ setkey->len = RTA_LENGTH(sizeof(*param)) + Ke->len + Ki->len;
+ setkey->data = kzalloc(setkey->len, GFP_KERNEL);
+ if (!setkey->data)
+ return -ENOMEM;
+
+ rta = setkey->data;
+ rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
+ rta->rta_len = RTA_LENGTH(sizeof(*param));
+ param = RTA_DATA(rta);
+ param->enckeylen = htonl(Ke->len);
+ memcpy((void *)(param + 1), Ki->data, Ki->len);
+ memcpy((void *)(param + 1) + Ki->len, Ke->data, Ke->len);
+ return 0;
+}
+
+/*
+ * Derive the Kc key for checksum-only mode and package it into a key parameter
+ * that can be given to the setkey of a hash crypto object.
+ */
+int rfc3961_derive_checksum_key(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ unsigned int usage,
+ struct krb5_buffer *setkey,
+ gfp_t gfp)
+{
+ int ret;
+
+ setkey->len = krb5->Kc_len;
+ setkey->data = kzalloc(setkey->len, GFP_KERNEL);
+ if (!setkey->data)
+ return -ENOMEM;
+
+ ret = krb5_derive_Kc(krb5, TK, usage, setkey, gfp);
+ if (ret < 0)
+ pr_err("get_Kc failed %d\n", ret);
+ return ret;
+}
+
+/*
+ * Package a predefined Kc key for checksum-only mode into a key parameter that
+ * can be given to the setkey of a hash crypto object.
+ */
+int rfc3961_load_checksum_key(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Kc,
+ struct krb5_buffer *setkey,
+ gfp_t gfp)
+{
+ setkey->len = krb5->Kc_len;
+ setkey->data = kmemdup(Kc->data, Kc->len, GFP_KERNEL);
+ if (!setkey->data)
+ return -ENOMEM;
+ return 0;
+}
+
+/*
+ * Apply encryption and checksumming functions to part of a scatterlist.
+ */
+ssize_t krb5_aead_encrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg, size_t sg_len,
+ size_t data_offset, size_t data_len,
+ bool preconfounded)
+{
+ struct aead_request *req;
+ ssize_t ret, done;
+ size_t bsize, base_len, secure_offset, secure_len, pad_len, cksum_offset;
+ void *buffer;
+ u8 *iv;
+
+ if (WARN_ON(data_offset != krb5->conf_len))
+ return -EINVAL; /* Data is in wrong place */
+
+ secure_offset = 0;
+ base_len = krb5->conf_len + data_len;
+ pad_len = 0;
+ secure_len = base_len + pad_len;
+ cksum_offset = secure_len;
+ if (WARN_ON(cksum_offset + krb5->cksum_len > sg_len))
+ return -EFAULT;
+
+ bsize = krb5_aead_size(aead) +
+ krb5_aead_ivsize(aead);
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ return -ENOMEM;
+
+ /* Insert the confounder into the buffer */
+ ret = -EFAULT;
+ if (!preconfounded) {
+ get_random_bytes(buffer, krb5->conf_len);
+ done = sg_pcopy_from_buffer(sg, nr_sg, buffer, krb5->conf_len,
+ secure_offset);
+ if (done != krb5->conf_len)
+ goto error;
+ }
+
+ /* We may need to pad out to the crypto blocksize. */
+ if (pad_len) {
+ done = sg_zero_buffer(sg, nr_sg, pad_len, data_offset + data_len);
+ if (done != pad_len)
+ goto error;
+ }
+
+ /* Hash and encrypt the message. */
+ req = buffer;
+ iv = buffer + krb5_aead_size(aead);
+
+ aead_request_set_tfm(req, aead);
+ aead_request_set_callback(req, 0, NULL, NULL);
+ aead_request_set_crypt(req, sg, sg, secure_len, iv);
+ ret = crypto_aead_encrypt(req);
+ if (ret < 0)
+ goto error;
+
+ ret = secure_len + krb5->cksum_len;
+
+error:
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+/*
+ * Apply decryption and checksumming functions to a message. The offset and
+ * length are updated to reflect the actual content of the encrypted region.
+ */
+int krb5_aead_decrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len)
+{
+ struct aead_request *req;
+ size_t bsize;
+ void *buffer;
+ int ret;
+ u8 *iv;
+
+ if (WARN_ON(*_offset != 0))
+ return -EINVAL; /* Can't set offset on aead */
+
+ if (*_len < krb5->conf_len + krb5->cksum_len)
+ return -EPROTO;
+
+ bsize = krb5_aead_size(aead) +
+ krb5_aead_ivsize(aead);
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ return -ENOMEM;
+
+ /* Decrypt the message and verify its checksum. */
+ req = buffer;
+ iv = buffer + krb5_aead_size(aead);
+
+ aead_request_set_tfm(req, aead);
+ aead_request_set_callback(req, 0, NULL, NULL);
+ aead_request_set_crypt(req, sg, sg, *_len, iv);
+ ret = crypto_aead_decrypt(req);
+ if (ret < 0)
+ goto error;
+
+ /* Adjust the boundaries of the data. */
+ *_offset += krb5->conf_len;
+ *_len -= krb5->conf_len + krb5->cksum_len;
+ ret = 0;
+
+error:
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+/*
+ * Generate a checksum over some metadata and part of an skbuff and insert the
+ * MIC into the skbuff immediately prior to the data.
+ */
+ssize_t rfc3961_get_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg, size_t sg_len,
+ size_t data_offset, size_t data_len)
+{
+ struct shash_desc *desc;
+ ssize_t ret, done;
+ size_t bsize;
+ void *buffer, *digest;
+
+ if (WARN_ON(data_offset != krb5->cksum_len))
+ return -EMSGSIZE;
+
+ bsize = krb5_shash_size(shash) +
+ krb5_digest_size(shash);
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ return -ENOMEM;
+
+ /* Calculate the MIC with key Kc and store it into the skb */
+ desc = buffer;
+ desc->tfm = shash;
+ ret = crypto_shash_init(desc);
+ if (ret < 0)
+ goto error;
+
+ if (metadata) {
+ ret = crypto_shash_update(desc, metadata->data, metadata->len);
+ if (ret < 0)
+ goto error;
+ }
+
+ ret = crypto_shash_update_sg(desc, sg, data_offset, data_len);
+ if (ret < 0)
+ goto error;
+
+ digest = buffer + krb5_shash_size(shash);
+ ret = crypto_shash_final(desc, digest);
+ if (ret < 0)
+ goto error;
+
+ ret = -EFAULT;
+ done = sg_pcopy_from_buffer(sg, nr_sg, digest, krb5->cksum_len,
+ data_offset - krb5->cksum_len);
+ if (done != krb5->cksum_len)
+ goto error;
+
+ ret = krb5->cksum_len + data_len;
+
+error:
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+/*
+ * Check the MIC on a region of an skbuff. The offset and length are updated
+ * to reflect the actual content of the secure region.
+ */
+int rfc3961_verify_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len)
+{
+ struct shash_desc *desc;
+ ssize_t done;
+ size_t bsize, data_offset, data_len, offset = *_offset, len = *_len;
+ void *buffer = NULL;
+ int ret;
+ u8 *cksum, *cksum2;
+
+ if (len < krb5->cksum_len)
+ return -EPROTO;
+ data_offset = offset + krb5->cksum_len;
+ data_len = len - krb5->cksum_len;
+
+ bsize = krb5_shash_size(shash) +
+ krb5_digest_size(shash) * 2;
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ return -ENOMEM;
+
+ cksum = buffer +
+ krb5_shash_size(shash);
+ cksum2 = buffer +
+ krb5_shash_size(shash) +
+ krb5_digest_size(shash);
+
+ /* Calculate the MIC */
+ desc = buffer;
+ desc->tfm = shash;
+ ret = crypto_shash_init(desc);
+ if (ret < 0)
+ goto error;
+
+ if (metadata) {
+ ret = crypto_shash_update(desc, metadata->data, metadata->len);
+ if (ret < 0)
+ goto error;
+ }
+
+ crypto_shash_update_sg(desc, sg, data_offset, data_len);
+ crypto_shash_final(desc, cksum);
+
+ ret = -EFAULT;
+ done = sg_pcopy_to_buffer(sg, nr_sg, cksum2, krb5->cksum_len, offset);
+ if (done != krb5->cksum_len)
+ goto error;
+
+ if (memcmp(cksum, cksum2, krb5->cksum_len) != 0) {
+ ret = -EBADMSG;
+ goto error;
+ }
+
+ *_offset += krb5->cksum_len;
+ *_len -= krb5->cksum_len;
+ ret = 0;
+
+error:
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+const struct krb5_crypto_profile rfc3961_simplified_profile = {
+ .calc_PRF = rfc3961_calc_PRF,
+ .calc_Kc = rfc3961_calc_DK,
+ .calc_Ke = rfc3961_calc_DK,
+ .calc_Ki = rfc3961_calc_DK,
+ .derive_encrypt_keys = authenc_derive_encrypt_keys,
+ .load_encrypt_keys = authenc_load_encrypt_keys,
+ .derive_checksum_key = rfc3961_derive_checksum_key,
+ .load_checksum_key = rfc3961_load_checksum_key,
+ .encrypt = krb5_aead_encrypt,
+ .decrypt = krb5_aead_decrypt,
+ .get_mic = rfc3961_get_mic,
+ .verify_mic = rfc3961_verify_mic,
+};
diff --git a/crypto/krb5/rfc3962_aes.c b/crypto/krb5/rfc3962_aes.c
new file mode 100644
index 000000000000..5cbf8f4638b9
--- /dev/null
+++ b/crypto/krb5/rfc3962_aes.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* rfc3962 Advanced Encryption Standard (AES) Encryption for Kerberos 5
+ *
+ * Parts borrowed from net/sunrpc/auth_gss/.
+ */
+/*
+ * COPYRIGHT (c) 2008
+ * The Regents of the University of Michigan
+ * ALL RIGHTS RESERVED
+ *
+ * Permission is granted to use, copy, create derivative works
+ * and redistribute this software and such derivative works
+ * for any purpose, so long as the name of The University of
+ * Michigan is not used in any advertising or publicity
+ * pertaining to the use of distribution of this software
+ * without specific, written prior authorization. If the
+ * above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any
+ * portion of this software, then the disclaimer below must
+ * also be included.
+ *
+ * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
+ * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
+ * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
+ * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
+ * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
+ * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
+ * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
+ * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
+ * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGES.
+ */
+
+/*
+ * Copyright (C) 1998 by the FundsXpress, INC.
+ *
+ * All rights reserved.
+ *
+ * Export of this software from the United States of America may require
+ * a specific license from the United States Government. It is the
+ * responsibility of any person or organization contemplating export to
+ * obtain such a license before exporting.
+ *
+ * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
+ * distribute this software and its documentation for any purpose and
+ * without fee is hereby granted, provided that the above copyright
+ * notice appear in all copies and that both that copyright notice and
+ * this permission notice appear in supporting documentation, and that
+ * the name of FundsXpress. not be used in advertising or publicity pertaining
+ * to distribution of the software without specific, written prior
+ * permission. FundsXpress makes no representations about the suitability of
+ * this software for any purpose. It is provided "as is" without express
+ * or implied warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "internal.h"
+
+const struct krb5_enctype krb5_aes128_cts_hmac_sha1_96 = {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA1_96,
+ .ctype = KRB5_CKSUMTYPE_HMAC_SHA1_96_AES128,
+ .name = "aes128-cts-hmac-sha1-96",
+ .encrypt_name = "krb5enc(hmac(sha1),cts(cbc(aes)))",
+ .cksum_name = "hmac(sha1)",
+ .hash_name = "sha1",
+ .derivation_enc = "cts(cbc(aes))",
+ .key_bytes = 16,
+ .key_len = 16,
+ .Kc_len = 16,
+ .Ke_len = 16,
+ .Ki_len = 16,
+ .block_len = 16,
+ .conf_len = 16,
+ .cksum_len = 12,
+ .hash_len = 20,
+ .prf_len = 16,
+ .keyed_cksum = true,
+ .random_to_key = NULL, /* Identity */
+ .profile = &rfc3961_simplified_profile,
+};
+
+const struct krb5_enctype krb5_aes256_cts_hmac_sha1_96 = {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA1_96,
+ .ctype = KRB5_CKSUMTYPE_HMAC_SHA1_96_AES256,
+ .name = "aes256-cts-hmac-sha1-96",
+ .encrypt_name = "krb5enc(hmac(sha1),cts(cbc(aes)))",
+ .cksum_name = "hmac(sha1)",
+ .hash_name = "sha1",
+ .derivation_enc = "cts(cbc(aes))",
+ .key_bytes = 32,
+ .key_len = 32,
+ .Kc_len = 32,
+ .Ke_len = 32,
+ .Ki_len = 32,
+ .block_len = 16,
+ .conf_len = 16,
+ .cksum_len = 12,
+ .hash_len = 20,
+ .prf_len = 16,
+ .keyed_cksum = true,
+ .random_to_key = NULL, /* Identity */
+ .profile = &rfc3961_simplified_profile,
+};
diff --git a/crypto/krb5/rfc6803_camellia.c b/crypto/krb5/rfc6803_camellia.c
new file mode 100644
index 000000000000..77cd4ce023f1
--- /dev/null
+++ b/crypto/krb5/rfc6803_camellia.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* rfc6803 Camellia Encryption for Kerberos 5
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include "internal.h"
+
+/*
+ * Calculate the key derivation function KDF-FEEDBACK_CMAC(key, constant)
+ *
+ * n = ceiling(k / 128)
+ * K(0) = zeros
+ * K(i) = CMAC(key, K(i-1) | i | constant | 0x00 | k)
+ * DR(key, constant) = k-truncate(K(1) | K(2) | ... | K(n))
+ * KDF-FEEDBACK-CMAC(key, constant) = random-to-key(DR(key, constant))
+ *
+ * [rfc6803 sec 3]
+ */
+static int rfc6803_calc_KDF_FEEDBACK_CMAC(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *key,
+ const struct krb5_buffer *constant,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ struct crypto_shash *shash;
+ struct krb5_buffer K, data;
+ struct shash_desc *desc;
+ __be32 tmp;
+ size_t bsize, offset, seg;
+ void *buffer;
+ u32 i = 0, k = result->len * 8;
+ u8 *p;
+ int ret = -ENOMEM;
+
+ shash = crypto_alloc_shash(krb5->cksum_name, 0, 0);
+ if (IS_ERR(shash))
+ return (PTR_ERR(shash) == -ENOENT) ? -ENOPKG : PTR_ERR(shash);
+ ret = crypto_shash_setkey(shash, key->data, key->len);
+ if (ret < 0)
+ goto error_shash;
+
+ ret = -ENOMEM;
+ K.len = crypto_shash_digestsize(shash);
+ data.len = K.len + 4 + constant->len + 1 + 4;
+ bsize = krb5_shash_size(shash) +
+ krb5_digest_size(shash) +
+ crypto_roundup(K.len) +
+ crypto_roundup(data.len);
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ goto error_shash;
+
+ desc = buffer;
+ desc->tfm = shash;
+
+ K.data = buffer +
+ krb5_shash_size(shash) +
+ krb5_digest_size(shash);
+ data.data = buffer +
+ krb5_shash_size(shash) +
+ krb5_digest_size(shash) +
+ crypto_roundup(K.len);
+
+ p = data.data + K.len + 4;
+ memcpy(p, constant->data, constant->len);
+ p += constant->len;
+ *p++ = 0x00;
+ tmp = htonl(k);
+ memcpy(p, &tmp, 4);
+ p += 4;
+
+ ret = -EINVAL;
+ if (WARN_ON(p - (u8 *)data.data != data.len))
+ goto error;
+
+ offset = 0;
+ do {
+ i++;
+ p = data.data;
+ memcpy(p, K.data, K.len);
+ p += K.len;
+ *(__be32 *)p = htonl(i);
+
+ ret = crypto_shash_init(desc);
+ if (ret < 0)
+ goto error;
+ ret = crypto_shash_finup(desc, data.data, data.len, K.data);
+ if (ret < 0)
+ goto error;
+
+ seg = min_t(size_t, result->len - offset, K.len);
+ memcpy(result->data + offset, K.data, seg);
+ offset += seg;
+ } while (offset < result->len);
+
+error:
+ kfree_sensitive(buffer);
+error_shash:
+ crypto_free_shash(shash);
+ return ret;
+}
+
+/*
+ * Calculate the pseudo-random function, PRF().
+ *
+ * Kp = KDF-FEEDBACK-CMAC(protocol-key, "prf")
+ * PRF = CMAC(Kp, octet-string)
+ * [rfc6803 sec 6]
+ */
+static int rfc6803_calc_PRF(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *protocol_key,
+ const struct krb5_buffer *octet_string,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ static const struct krb5_buffer prfconstant = { 3, "prf" };
+ struct crypto_shash *shash;
+ struct krb5_buffer Kp;
+ struct shash_desc *desc;
+ size_t bsize;
+ void *buffer;
+ int ret;
+
+ Kp.len = krb5->prf_len;
+
+ shash = crypto_alloc_shash(krb5->cksum_name, 0, 0);
+ if (IS_ERR(shash))
+ return (PTR_ERR(shash) == -ENOENT) ? -ENOPKG : PTR_ERR(shash);
+
+ ret = -EINVAL;
+ if (result->len != crypto_shash_digestsize(shash))
+ goto out_shash;
+
+ ret = -ENOMEM;
+ bsize = krb5_shash_size(shash) +
+ krb5_digest_size(shash) +
+ crypto_roundup(Kp.len);
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ goto out_shash;
+
+ Kp.data = buffer +
+ krb5_shash_size(shash) +
+ krb5_digest_size(shash);
+
+ ret = rfc6803_calc_KDF_FEEDBACK_CMAC(krb5, protocol_key, &prfconstant,
+ &Kp, gfp);
+ if (ret < 0)
+ goto out;
+
+ ret = crypto_shash_setkey(shash, Kp.data, Kp.len);
+ if (ret < 0)
+ goto out;
+
+ desc = buffer;
+ desc->tfm = shash;
+ ret = crypto_shash_init(desc);
+ if (ret < 0)
+ goto out;
+
+ ret = crypto_shash_finup(desc, octet_string->data, octet_string->len, result->data);
+ if (ret < 0)
+ goto out;
+
+out:
+ kfree_sensitive(buffer);
+out_shash:
+ crypto_free_shash(shash);
+ return ret;
+}
+
+
+static const struct krb5_crypto_profile rfc6803_crypto_profile = {
+ .calc_PRF = rfc6803_calc_PRF,
+ .calc_Kc = rfc6803_calc_KDF_FEEDBACK_CMAC,
+ .calc_Ke = rfc6803_calc_KDF_FEEDBACK_CMAC,
+ .calc_Ki = rfc6803_calc_KDF_FEEDBACK_CMAC,
+ .derive_encrypt_keys = authenc_derive_encrypt_keys,
+ .load_encrypt_keys = authenc_load_encrypt_keys,
+ .derive_checksum_key = rfc3961_derive_checksum_key,
+ .load_checksum_key = rfc3961_load_checksum_key,
+ .encrypt = krb5_aead_encrypt,
+ .decrypt = krb5_aead_decrypt,
+ .get_mic = rfc3961_get_mic,
+ .verify_mic = rfc3961_verify_mic,
+};
+
+const struct krb5_enctype krb5_camellia128_cts_cmac = {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .ctype = KRB5_CKSUMTYPE_CMAC_CAMELLIA128,
+ .name = "camellia128-cts-cmac",
+ .encrypt_name = "krb5enc(cmac(camellia),cts(cbc(camellia)))",
+ .cksum_name = "cmac(camellia)",
+ .hash_name = NULL,
+ .derivation_enc = "cts(cbc(camellia))",
+ .key_bytes = 16,
+ .key_len = 16,
+ .Kc_len = 16,
+ .Ke_len = 16,
+ .Ki_len = 16,
+ .block_len = 16,
+ .conf_len = 16,
+ .cksum_len = 16,
+ .hash_len = 16,
+ .prf_len = 16,
+ .keyed_cksum = true,
+ .random_to_key = NULL, /* Identity */
+ .profile = &rfc6803_crypto_profile,
+};
+
+const struct krb5_enctype krb5_camellia256_cts_cmac = {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .ctype = KRB5_CKSUMTYPE_CMAC_CAMELLIA256,
+ .name = "camellia256-cts-cmac",
+ .encrypt_name = "krb5enc(cmac(camellia),cts(cbc(camellia)))",
+ .cksum_name = "cmac(camellia)",
+ .hash_name = NULL,
+ .derivation_enc = "cts(cbc(camellia))",
+ .key_bytes = 32,
+ .key_len = 32,
+ .Kc_len = 32,
+ .Ke_len = 32,
+ .Ki_len = 32,
+ .block_len = 16,
+ .conf_len = 16,
+ .cksum_len = 16,
+ .hash_len = 16,
+ .prf_len = 16,
+ .keyed_cksum = true,
+ .random_to_key = NULL, /* Identity */
+ .profile = &rfc6803_crypto_profile,
+};
diff --git a/crypto/krb5/rfc8009_aes2.c b/crypto/krb5/rfc8009_aes2.c
new file mode 100644
index 000000000000..d39851fc3a4e
--- /dev/null
+++ b/crypto/krb5/rfc8009_aes2.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* rfc8009 AES Encryption with HMAC-SHA2 for Kerberos 5
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <crypto/authenc.h>
+#include "internal.h"
+
+static const struct krb5_buffer rfc8009_no_context = { .len = 0, .data = "" };
+
+/*
+ * Calculate the key derivation function KDF-HMAC-SHA2(key, label, [context,] k)
+ *
+ * KDF-HMAC-SHA2(key, label, [context,] k) = k-truncate(K1)
+ *
+ * Using the appropriate one of:
+ * K1 = HMAC-SHA-256(key, 0x00000001 | label | 0x00 | k)
+ * K1 = HMAC-SHA-384(key, 0x00000001 | label | 0x00 | k)
+ * K1 = HMAC-SHA-256(key, 0x00000001 | label | 0x00 | context | k)
+ * K1 = HMAC-SHA-384(key, 0x00000001 | label | 0x00 | context | k)
+ * [rfc8009 sec 3]
+ */
+static int rfc8009_calc_KDF_HMAC_SHA2(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *key,
+ const struct krb5_buffer *label,
+ const struct krb5_buffer *context,
+ unsigned int k,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ struct crypto_shash *shash;
+ struct krb5_buffer K1, data;
+ struct shash_desc *desc;
+ __be32 tmp;
+ size_t bsize;
+ void *buffer;
+ u8 *p;
+ int ret = -ENOMEM;
+
+ if (WARN_ON(result->len != k / 8))
+ return -EINVAL;
+
+ shash = crypto_alloc_shash(krb5->cksum_name, 0, 0);
+ if (IS_ERR(shash))
+ return (PTR_ERR(shash) == -ENOENT) ? -ENOPKG : PTR_ERR(shash);
+ ret = crypto_shash_setkey(shash, key->data, key->len);
+ if (ret < 0)
+ goto error_shash;
+
+ ret = -EINVAL;
+ if (WARN_ON(crypto_shash_digestsize(shash) * 8 < k))
+ goto error_shash;
+
+ ret = -ENOMEM;
+ data.len = 4 + label->len + 1 + context->len + 4;
+ bsize = krb5_shash_size(shash) +
+ krb5_digest_size(shash) +
+ crypto_roundup(data.len);
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ goto error_shash;
+
+ desc = buffer;
+ desc->tfm = shash;
+ ret = crypto_shash_init(desc);
+ if (ret < 0)
+ goto error;
+
+ p = data.data = buffer +
+ krb5_shash_size(shash) +
+ krb5_digest_size(shash);
+ *(__be32 *)p = htonl(0x00000001);
+ p += 4;
+ memcpy(p, label->data, label->len);
+ p += label->len;
+ *p++ = 0;
+ memcpy(p, context->data, context->len);
+ p += context->len;
+ tmp = htonl(k);
+ memcpy(p, &tmp, 4);
+ p += 4;
+
+ ret = -EINVAL;
+ if (WARN_ON(p - (u8 *)data.data != data.len))
+ goto error;
+
+ K1.len = crypto_shash_digestsize(shash);
+ K1.data = buffer +
+ krb5_shash_size(shash);
+
+ ret = crypto_shash_finup(desc, data.data, data.len, K1.data);
+ if (ret < 0)
+ goto error;
+
+ memcpy(result->data, K1.data, result->len);
+
+error:
+ kfree_sensitive(buffer);
+error_shash:
+ crypto_free_shash(shash);
+ return ret;
+}
+
+/*
+ * Calculate the pseudo-random function, PRF().
+ *
+ * PRF = KDF-HMAC-SHA2(input-key, "prf", octet-string, 256)
+ * PRF = KDF-HMAC-SHA2(input-key, "prf", octet-string, 384)
+ *
+ * The "prfconstant" used in the PRF operation is the three-octet string
+ * "prf".
+ * [rfc8009 sec 5]
+ */
+static int rfc8009_calc_PRF(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *input_key,
+ const struct krb5_buffer *octet_string,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ static const struct krb5_buffer prfconstant = { 3, "prf" };
+
+ return rfc8009_calc_KDF_HMAC_SHA2(krb5, input_key, &prfconstant,
+ octet_string, krb5->prf_len * 8,
+ result, gfp);
+}
+
+/*
+ * Derive Ke.
+ * Ke = KDF-HMAC-SHA2(base-key, usage | 0xAA, 128)
+ * Ke = KDF-HMAC-SHA2(base-key, usage | 0xAA, 256)
+ * [rfc8009 sec 5]
+ */
+static int rfc8009_calc_Ke(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *base_key,
+ const struct krb5_buffer *usage_constant,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ return rfc8009_calc_KDF_HMAC_SHA2(krb5, base_key, usage_constant,
+ &rfc8009_no_context, krb5->key_bytes * 8,
+ result, gfp);
+}
+
+/*
+ * Derive Kc/Ki
+ * Kc = KDF-HMAC-SHA2(base-key, usage | 0x99, 128)
+ * Ki = KDF-HMAC-SHA2(base-key, usage | 0x55, 128)
+ * Kc = KDF-HMAC-SHA2(base-key, usage | 0x99, 192)
+ * Ki = KDF-HMAC-SHA2(base-key, usage | 0x55, 192)
+ * [rfc8009 sec 5]
+ */
+static int rfc8009_calc_Ki(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *base_key,
+ const struct krb5_buffer *usage_constant,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ return rfc8009_calc_KDF_HMAC_SHA2(krb5, base_key, usage_constant,
+ &rfc8009_no_context, krb5->cksum_len * 8,
+ result, gfp);
+}
+
+/*
+ * Apply encryption and checksumming functions to a message. Unlike for
+ * RFC3961, for RFC8009, we have to chuck the starting IV into the hash first.
+ */
+static ssize_t rfc8009_encrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg, size_t sg_len,
+ size_t data_offset, size_t data_len,
+ bool preconfounded)
+{
+ struct aead_request *req;
+ struct scatterlist bsg[2];
+ ssize_t ret, done;
+ size_t bsize, base_len, secure_offset, secure_len, pad_len, cksum_offset;
+ void *buffer;
+ u8 *iv, *ad;
+
+ if (WARN_ON(data_offset != krb5->conf_len))
+ return -EINVAL; /* Data is in wrong place */
+
+ secure_offset = 0;
+ base_len = krb5->conf_len + data_len;
+ pad_len = 0;
+ secure_len = base_len + pad_len;
+ cksum_offset = secure_len;
+ if (WARN_ON(cksum_offset + krb5->cksum_len > sg_len))
+ return -EFAULT;
+
+ bsize = krb5_aead_size(aead) +
+ krb5_aead_ivsize(aead) * 2;
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ return -ENOMEM;
+
+ req = buffer;
+ iv = buffer + krb5_aead_size(aead);
+ ad = buffer + krb5_aead_size(aead) + krb5_aead_ivsize(aead);
+
+ /* Insert the confounder into the buffer */
+ ret = -EFAULT;
+ if (!preconfounded) {
+ get_random_bytes(buffer, krb5->conf_len);
+ done = sg_pcopy_from_buffer(sg, nr_sg, buffer, krb5->conf_len,
+ secure_offset);
+ if (done != krb5->conf_len)
+ goto error;
+ }
+
+ /* We may need to pad out to the crypto blocksize. */
+ if (pad_len) {
+ done = sg_zero_buffer(sg, nr_sg, pad_len, data_offset + data_len);
+ if (done != pad_len)
+ goto error;
+ }
+
+ /* We need to include the starting IV in the hash. */
+ sg_init_table(bsg, 2);
+ sg_set_buf(&bsg[0], ad, krb5_aead_ivsize(aead));
+ sg_chain(bsg, 2, sg);
+
+ /* Hash and encrypt the message. */
+ aead_request_set_tfm(req, aead);
+ aead_request_set_callback(req, 0, NULL, NULL);
+ aead_request_set_ad(req, krb5_aead_ivsize(aead));
+ aead_request_set_crypt(req, bsg, bsg, secure_len, iv);
+ ret = crypto_aead_encrypt(req);
+ if (ret < 0)
+ goto error;
+
+ ret = secure_len + krb5->cksum_len;
+
+error:
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+/*
+ * Apply decryption and checksumming functions to a message. Unlike for
+ * RFC3961, for RFC8009, we have to chuck the starting IV into the hash first.
+ *
+ * The offset and length are updated to reflect the actual content of the
+ * encrypted region.
+ */
+static int rfc8009_decrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len)
+{
+ struct aead_request *req;
+ struct scatterlist bsg[2];
+ size_t bsize;
+ void *buffer;
+ int ret;
+ u8 *iv, *ad;
+
+ if (WARN_ON(*_offset != 0))
+ return -EINVAL; /* Can't set offset on aead */
+
+ if (*_len < krb5->conf_len + krb5->cksum_len)
+ return -EPROTO;
+
+ bsize = krb5_aead_size(aead) +
+ krb5_aead_ivsize(aead) * 2;
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ return -ENOMEM;
+
+ req = buffer;
+ iv = buffer + krb5_aead_size(aead);
+ ad = buffer + krb5_aead_size(aead) + krb5_aead_ivsize(aead);
+
+ /* We need to include the starting IV in the hash. */
+ sg_init_table(bsg, 2);
+ sg_set_buf(&bsg[0], ad, krb5_aead_ivsize(aead));
+ sg_chain(bsg, 2, sg);
+
+ /* Decrypt the message and verify its checksum. */
+ aead_request_set_tfm(req, aead);
+ aead_request_set_callback(req, 0, NULL, NULL);
+ aead_request_set_ad(req, krb5_aead_ivsize(aead));
+ aead_request_set_crypt(req, bsg, bsg, *_len, iv);
+ ret = crypto_aead_decrypt(req);
+ if (ret < 0)
+ goto error;
+
+ /* Adjust the boundaries of the data. */
+ *_offset += krb5->conf_len;
+ *_len -= krb5->conf_len + krb5->cksum_len;
+ ret = 0;
+
+error:
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+static const struct krb5_crypto_profile rfc8009_crypto_profile = {
+ .calc_PRF = rfc8009_calc_PRF,
+ .calc_Kc = rfc8009_calc_Ki,
+ .calc_Ke = rfc8009_calc_Ke,
+ .calc_Ki = rfc8009_calc_Ki,
+ .derive_encrypt_keys = authenc_derive_encrypt_keys,
+ .load_encrypt_keys = authenc_load_encrypt_keys,
+ .derive_checksum_key = rfc3961_derive_checksum_key,
+ .load_checksum_key = rfc3961_load_checksum_key,
+ .encrypt = rfc8009_encrypt,
+ .decrypt = rfc8009_decrypt,
+ .get_mic = rfc3961_get_mic,
+ .verify_mic = rfc3961_verify_mic,
+};
+
+const struct krb5_enctype krb5_aes128_cts_hmac_sha256_128 = {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .ctype = KRB5_CKSUMTYPE_HMAC_SHA256_128_AES128,
+ .name = "aes128-cts-hmac-sha256-128",
+ .encrypt_name = "authenc(hmac(sha256),cts(cbc(aes)))",
+ .cksum_name = "hmac(sha256)",
+ .hash_name = "sha256",
+ .derivation_enc = "cts(cbc(aes))",
+ .key_bytes = 16,
+ .key_len = 16,
+ .Kc_len = 16,
+ .Ke_len = 16,
+ .Ki_len = 16,
+ .block_len = 16,
+ .conf_len = 16,
+ .cksum_len = 16,
+ .hash_len = 20,
+ .prf_len = 32,
+ .keyed_cksum = true,
+ .random_to_key = NULL, /* Identity */
+ .profile = &rfc8009_crypto_profile,
+};
+
+const struct krb5_enctype krb5_aes256_cts_hmac_sha384_192 = {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .ctype = KRB5_CKSUMTYPE_HMAC_SHA384_192_AES256,
+ .name = "aes256-cts-hmac-sha384-192",
+ .encrypt_name = "authenc(hmac(sha384),cts(cbc(aes)))",
+ .cksum_name = "hmac(sha384)",
+ .hash_name = "sha384",
+ .derivation_enc = "cts(cbc(aes))",
+ .key_bytes = 32,
+ .key_len = 32,
+ .Kc_len = 24,
+ .Ke_len = 32,
+ .Ki_len = 24,
+ .block_len = 16,
+ .conf_len = 16,
+ .cksum_len = 24,
+ .hash_len = 20,
+ .prf_len = 48,
+ .keyed_cksum = true,
+ .random_to_key = NULL, /* Identity */
+ .profile = &rfc8009_crypto_profile,
+};
diff --git a/crypto/krb5/selftest.c b/crypto/krb5/selftest.c
new file mode 100644
index 000000000000..2a81a6315a0d
--- /dev/null
+++ b/crypto/krb5/selftest.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Kerberos library self-testing
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <crypto/skcipher.h>
+#include <crypto/hash.h>
+#include "internal.h"
+
+#define VALID(X) \
+ ({ \
+ bool __x = (X); \
+ if (__x) { \
+ pr_warn("!!! TESTINVAL %s:%u\n", __FILE__, __LINE__); \
+ ret = -EBADMSG; \
+ } \
+ __x; \
+ })
+
+#define CHECK(X) \
+ ({ \
+ bool __x = (X); \
+ if (__x) { \
+ pr_warn("!!! TESTFAIL %s:%u\n", __FILE__, __LINE__); \
+ ret = -EBADMSG; \
+ } \
+ __x; \
+ })
+
+enum which_key {
+ TEST_KC, TEST_KE, TEST_KI,
+};
+
+#if 0
+static void dump_sg(struct scatterlist *sg, unsigned int limit)
+{
+ unsigned int index = 0, n = 0;
+
+ for (; sg && limit > 0; sg = sg_next(sg)) {
+ unsigned int off = sg->offset, len = umin(sg->length, limit);
+ const void *p = kmap_local_page(sg_page(sg));
+
+ limit -= len;
+ while (len > 0) {
+ unsigned int part = umin(len, 32);
+
+ pr_notice("[%x] %04x: %*phN\n", n, index, part, p + off);
+ index += part;
+ off += part;
+ len -= part;
+ }
+
+ kunmap_local(p);
+ n++;
+ }
+}
+#endif
+
+static int prep_buf(struct krb5_buffer *buf)
+{
+ buf->data = kmalloc(buf->len, GFP_KERNEL);
+ if (!buf->data)
+ return -ENOMEM;
+ return 0;
+}
+
+#define PREP_BUF(BUF, LEN) \
+ do { \
+ (BUF)->len = (LEN); \
+ ret = prep_buf((BUF)); \
+ if (ret < 0) \
+ goto out; \
+ } while (0)
+
+static int load_buf(struct krb5_buffer *buf, const char *from)
+{
+ size_t len = strlen(from);
+ int ret;
+
+ if (len > 1 && from[0] == '\'') {
+ PREP_BUF(buf, len - 1);
+ memcpy(buf->data, from + 1, len - 1);
+ ret = 0;
+ goto out;
+ }
+
+ if (VALID(len & 1))
+ return -EINVAL;
+
+ PREP_BUF(buf, len / 2);
+ ret = hex2bin(buf->data, from, buf->len);
+ if (ret < 0) {
+ VALID(1);
+ goto out;
+ }
+out:
+ return ret;
+}
+
+#define LOAD_BUF(BUF, FROM) do { ret = load_buf(BUF, FROM); if (ret < 0) goto out; } while (0)
+
+static void clear_buf(struct krb5_buffer *buf)
+{
+ kfree(buf->data);
+ buf->len = 0;
+ buf->data = NULL;
+}
+
+/*
+ * Perform a pseudo-random function check.
+ */
+static int krb5_test_one_prf(const struct krb5_prf_test *test)
+{
+ const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype);
+ struct krb5_buffer key = {}, octet = {}, result = {}, prf = {};
+ int ret;
+
+ if (!krb5)
+ return -EOPNOTSUPP;
+
+ pr_notice("Running %s %s\n", krb5->name, test->name);
+
+ LOAD_BUF(&key, test->key);
+ LOAD_BUF(&octet, test->octet);
+ LOAD_BUF(&prf, test->prf);
+ PREP_BUF(&result, krb5->prf_len);
+
+ if (VALID(result.len != prf.len)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = krb5->profile->calc_PRF(krb5, &key, &octet, &result, GFP_KERNEL);
+ if (ret < 0) {
+ CHECK(1);
+ pr_warn("PRF calculation failed %d\n", ret);
+ goto out;
+ }
+
+ if (memcmp(result.data, prf.data, result.len) != 0) {
+ CHECK(1);
+ ret = -EKEYREJECTED;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ clear_buf(&result);
+ clear_buf(&octet);
+ clear_buf(&key);
+ return ret;
+}
+
+/*
+ * Perform a key derivation check.
+ */
+static int krb5_test_key(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *base_key,
+ const struct krb5_key_test_one *test,
+ enum which_key which)
+{
+ struct krb5_buffer key = {}, result = {};
+ int ret;
+
+ LOAD_BUF(&key, test->key);
+ PREP_BUF(&result, key.len);
+
+ switch (which) {
+ case TEST_KC:
+ ret = krb5_derive_Kc(krb5, base_key, test->use, &result, GFP_KERNEL);
+ break;
+ case TEST_KE:
+ ret = krb5_derive_Ke(krb5, base_key, test->use, &result, GFP_KERNEL);
+ break;
+ case TEST_KI:
+ ret = krb5_derive_Ki(krb5, base_key, test->use, &result, GFP_KERNEL);
+ break;
+ default:
+ VALID(1);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (ret < 0) {
+ CHECK(1);
+ pr_warn("Key derivation failed %d\n", ret);
+ goto out;
+ }
+
+ if (memcmp(result.data, key.data, result.len) != 0) {
+ CHECK(1);
+ ret = -EKEYREJECTED;
+ goto out;
+ }
+
+out:
+ clear_buf(&key);
+ clear_buf(&result);
+ return ret;
+}
+
+static int krb5_test_one_key(const struct krb5_key_test *test)
+{
+ const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype);
+ struct krb5_buffer base_key = {};
+ int ret;
+
+ if (!krb5)
+ return -EOPNOTSUPP;
+
+ pr_notice("Running %s %s\n", krb5->name, test->name);
+
+ LOAD_BUF(&base_key, test->key);
+
+ ret = krb5_test_key(krb5, &base_key, &test->Kc, TEST_KC);
+ if (ret < 0)
+ goto out;
+ ret = krb5_test_key(krb5, &base_key, &test->Ke, TEST_KE);
+ if (ret < 0)
+ goto out;
+ ret = krb5_test_key(krb5, &base_key, &test->Ki, TEST_KI);
+ if (ret < 0)
+ goto out;
+
+out:
+ clear_buf(&base_key);
+ return ret;
+}
+
+/*
+ * Perform an encryption test.
+ */
+static int krb5_test_one_enc(const struct krb5_enc_test *test, void *buf)
+{
+ const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype);
+ struct crypto_aead *ci = NULL;
+ struct krb5_buffer K0 = {}, Ke = {}, Ki = {}, keys = {};
+ struct krb5_buffer conf = {}, plain = {}, ct = {};
+ struct scatterlist sg[1];
+ size_t data_len, data_offset, message_len;
+ int ret;
+
+ if (!krb5)
+ return -EOPNOTSUPP;
+
+ pr_notice("Running %s %s\n", krb5->name, test->name);
+
+ /* Load the test data into binary buffers. */
+ LOAD_BUF(&conf, test->conf);
+ LOAD_BUF(&plain, test->plain);
+ LOAD_BUF(&ct, test->ct);
+
+ if (test->K0) {
+ LOAD_BUF(&K0, test->K0);
+ } else {
+ LOAD_BUF(&Ke, test->Ke);
+ LOAD_BUF(&Ki, test->Ki);
+
+ ret = krb5->profile->load_encrypt_keys(krb5, &Ke, &Ki, &keys, GFP_KERNEL);
+ if (ret < 0)
+ goto out;
+ }
+
+ if (VALID(conf.len != krb5->conf_len) ||
+ VALID(ct.len != krb5->conf_len + plain.len + krb5->cksum_len))
+ goto out;
+
+ data_len = plain.len;
+ message_len = crypto_krb5_how_much_buffer(krb5, KRB5_ENCRYPT_MODE,
+ data_len, &data_offset);
+
+ if (CHECK(message_len != ct.len)) {
+ pr_warn("Encrypted length mismatch %zu != %u\n", message_len, ct.len);
+ goto out;
+ }
+ if (CHECK(data_offset != conf.len)) {
+ pr_warn("Data offset mismatch %zu != %u\n", data_offset, conf.len);
+ goto out;
+ }
+
+ memcpy(buf, conf.data, conf.len);
+ memcpy(buf + data_offset, plain.data, plain.len);
+
+ /* Allocate a crypto object and set its key. */
+ if (test->K0)
+ ci = crypto_krb5_prepare_encryption(krb5, &K0, test->usage, GFP_KERNEL);
+ else
+ ci = krb5_prepare_encryption(krb5, &keys, GFP_KERNEL);
+
+ if (IS_ERR(ci)) {
+ ret = PTR_ERR(ci);
+ ci = NULL;
+ pr_err("Couldn't alloc AEAD %s: %d\n", krb5->encrypt_name, ret);
+ goto out;
+ }
+
+ /* Encrypt the message. */
+ sg_init_one(sg, buf, message_len);
+ ret = crypto_krb5_encrypt(krb5, ci, sg, 1, message_len,
+ data_offset, data_len, true);
+ if (ret < 0) {
+ CHECK(1);
+ pr_warn("Encryption failed %d\n", ret);
+ goto out;
+ }
+ if (ret != message_len) {
+ CHECK(1);
+ pr_warn("Encrypted message wrong size %x != %zx\n", ret, message_len);
+ goto out;
+ }
+
+ if (memcmp(buf, ct.data, ct.len) != 0) {
+ CHECK(1);
+ pr_warn("Ciphertext mismatch\n");
+ pr_warn("BUF %*phN\n", ct.len, buf);
+ pr_warn("CT %*phN\n", ct.len, ct.data);
+ pr_warn("PT %*phN%*phN\n", conf.len, conf.data, plain.len, plain.data);
+ ret = -EKEYREJECTED;
+ goto out;
+ }
+
+ /* Decrypt the encrypted message. */
+ data_offset = 0;
+ data_len = message_len;
+ ret = crypto_krb5_decrypt(krb5, ci, sg, 1, &data_offset, &data_len);
+ if (ret < 0) {
+ CHECK(1);
+ pr_warn("Decryption failed %d\n", ret);
+ goto out;
+ }
+
+ if (CHECK(data_offset != conf.len) ||
+ CHECK(data_len != plain.len))
+ goto out;
+
+ if (memcmp(buf, conf.data, conf.len) != 0) {
+ CHECK(1);
+ pr_warn("Confounder mismatch\n");
+ pr_warn("ENC %*phN\n", conf.len, buf);
+ pr_warn("DEC %*phN\n", conf.len, conf.data);
+ ret = -EKEYREJECTED;
+ goto out;
+ }
+
+ if (memcmp(buf + conf.len, plain.data, plain.len) != 0) {
+ CHECK(1);
+ pr_warn("Plaintext mismatch\n");
+ pr_warn("BUF %*phN\n", plain.len, buf + conf.len);
+ pr_warn("PT %*phN\n", plain.len, plain.data);
+ ret = -EKEYREJECTED;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ clear_buf(&ct);
+ clear_buf(&plain);
+ clear_buf(&conf);
+ clear_buf(&keys);
+ clear_buf(&Ki);
+ clear_buf(&Ke);
+ clear_buf(&K0);
+ if (ci)
+ crypto_free_aead(ci);
+ return ret;
+}
+
+/*
+ * Perform a checksum test.
+ */
+static int krb5_test_one_mic(const struct krb5_mic_test *test, void *buf)
+{
+ const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype);
+ struct crypto_shash *ci = NULL;
+ struct scatterlist sg[1];
+ struct krb5_buffer K0 = {}, Kc = {}, keys = {}, plain = {}, mic = {};
+ size_t offset, len, message_len;
+ int ret;
+
+ if (!krb5)
+ return -EOPNOTSUPP;
+
+ pr_notice("Running %s %s\n", krb5->name, test->name);
+
+ /* Allocate a crypto object and set its key. */
+ if (test->K0) {
+ LOAD_BUF(&K0, test->K0);
+ ci = crypto_krb5_prepare_checksum(krb5, &K0, test->usage, GFP_KERNEL);
+ } else {
+ LOAD_BUF(&Kc, test->Kc);
+
+ ret = krb5->profile->load_checksum_key(krb5, &Kc, &keys, GFP_KERNEL);
+ if (ret < 0)
+ goto out;
+
+ ci = krb5_prepare_checksum(krb5, &Kc, GFP_KERNEL);
+ }
+ if (IS_ERR(ci)) {
+ ret = PTR_ERR(ci);
+ ci = NULL;
+ pr_err("Couldn't alloc shash %s: %d\n", krb5->cksum_name, ret);
+ goto out;
+ }
+
+ /* Load the test data into binary buffers. */
+ LOAD_BUF(&plain, test->plain);
+ LOAD_BUF(&mic, test->mic);
+
+ len = plain.len;
+ message_len = crypto_krb5_how_much_buffer(krb5, KRB5_CHECKSUM_MODE,
+ len, &offset);
+
+ if (CHECK(message_len != mic.len + plain.len)) {
+ pr_warn("MIC length mismatch %zu != %u\n",
+ message_len, mic.len + plain.len);
+ goto out;
+ }
+
+ memcpy(buf + offset, plain.data, plain.len);
+
+ /* Generate a MIC generation request. */
+ sg_init_one(sg, buf, 1024);
+
+ ret = crypto_krb5_get_mic(krb5, ci, NULL, sg, 1, 1024,
+ krb5->cksum_len, plain.len);
+ if (ret < 0) {
+ CHECK(1);
+ pr_warn("Get MIC failed %d\n", ret);
+ goto out;
+ }
+ len = ret;
+
+ if (CHECK(len != plain.len + mic.len)) {
+ pr_warn("MIC length mismatch %zu != %u\n", len, plain.len + mic.len);
+ goto out;
+ }
+
+ if (memcmp(buf, mic.data, mic.len) != 0) {
+ CHECK(1);
+ pr_warn("MIC mismatch\n");
+ pr_warn("BUF %*phN\n", mic.len, buf);
+ pr_warn("MIC %*phN\n", mic.len, mic.data);
+ ret = -EKEYREJECTED;
+ goto out;
+ }
+
+ /* Generate a verification request. */
+ offset = 0;
+ ret = crypto_krb5_verify_mic(krb5, ci, NULL, sg, 1, &offset, &len);
+ if (ret < 0) {
+ CHECK(1);
+ pr_warn("Verify MIC failed %d\n", ret);
+ goto out;
+ }
+
+ if (CHECK(offset != mic.len) ||
+ CHECK(len != plain.len))
+ goto out;
+
+ if (memcmp(buf + offset, plain.data, plain.len) != 0) {
+ CHECK(1);
+ pr_warn("Plaintext mismatch\n");
+ pr_warn("BUF %*phN\n", plain.len, buf + offset);
+ pr_warn("PT %*phN\n", plain.len, plain.data);
+ ret = -EKEYREJECTED;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ clear_buf(&mic);
+ clear_buf(&plain);
+ clear_buf(&keys);
+ clear_buf(&K0);
+ clear_buf(&Kc);
+ if (ci)
+ crypto_free_shash(ci);
+ return ret;
+}
+
+int krb5_selftest(void)
+{
+ void *buf;
+ int ret = 0, i;
+
+ buf = kmalloc(4096, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pr_notice("\n");
+ pr_notice("Running selftests\n");
+
+ for (i = 0; krb5_prf_tests[i].name; i++) {
+ ret = krb5_test_one_prf(&krb5_prf_tests[i]);
+ if (ret < 0) {
+ if (ret != -EOPNOTSUPP)
+ goto out;
+ pr_notice("Skipping %s\n", krb5_prf_tests[i].name);
+ }
+ }
+
+ for (i = 0; krb5_key_tests[i].name; i++) {
+ ret = krb5_test_one_key(&krb5_key_tests[i]);
+ if (ret < 0) {
+ if (ret != -EOPNOTSUPP)
+ goto out;
+ pr_notice("Skipping %s\n", krb5_key_tests[i].name);
+ }
+ }
+
+ for (i = 0; krb5_enc_tests[i].name; i++) {
+ memset(buf, 0x5a, 4096);
+ ret = krb5_test_one_enc(&krb5_enc_tests[i], buf);
+ if (ret < 0) {
+ if (ret != -EOPNOTSUPP)
+ goto out;
+ pr_notice("Skipping %s\n", krb5_enc_tests[i].name);
+ }
+ }
+
+ for (i = 0; krb5_mic_tests[i].name; i++) {
+ memset(buf, 0x5a, 4096);
+ ret = krb5_test_one_mic(&krb5_mic_tests[i], buf);
+ if (ret < 0) {
+ if (ret != -EOPNOTSUPP)
+ goto out;
+ pr_notice("Skipping %s\n", krb5_mic_tests[i].name);
+ }
+ }
+
+ ret = 0;
+out:
+ pr_notice("Selftests %s\n", ret == 0 ? "succeeded" : "failed");
+ kfree(buf);
+ return ret;
+}
diff --git a/crypto/krb5/selftest_data.c b/crypto/krb5/selftest_data.c
new file mode 100644
index 000000000000..24447ee8bf07
--- /dev/null
+++ b/crypto/krb5/selftest_data.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Data for Kerberos library self-testing
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "internal.h"
+
+/*
+ * Pseudo-random function tests.
+ */
+const struct krb5_prf_test krb5_prf_tests[] = {
+ /* rfc8009 Appendix A */
+ {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .name = "prf",
+ .key = "3705D96080C17728A0E800EAB6E0D23C",
+ .octet = "74657374",
+ .prf = "9D188616F63852FE86915BB840B4A886FF3E6BB0F819B49B893393D393854295",
+ }, {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .name = "prf",
+ .key = "6D404D37FAF79F9DF0D33568D320669800EB4836472EA8A026D16B7182460C52",
+ .octet = "74657374",
+ .prf =
+ "9801F69A368C2BF675E59521E177D9A07F67EFE1CFDE8D3C8D6F6A0256E3B17D"
+ "B3C1B62AD1B8553360D17367EB1514D2",
+ },
+ {/* END */}
+};
+
+/*
+ * Key derivation tests.
+ */
+const struct krb5_key_test krb5_key_tests[] = {
+ /* rfc8009 Appendix A */
+ {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .name = "key",
+ .key = "3705D96080C17728A0E800EAB6E0D23C",
+ .Kc.use = 0x00000002,
+ .Kc.key = "B31A018A48F54776F403E9A396325DC3",
+ .Ke.use = 0x00000002,
+ .Ke.key = "9B197DD1E8C5609D6E67C3E37C62C72E",
+ .Ki.use = 0x00000002,
+ .Ki.key = "9FDA0E56AB2D85E1569A688696C26A6C",
+ }, {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .name = "key",
+ .key = "6D404D37FAF79F9DF0D33568D320669800EB4836472EA8A026D16B7182460C52",
+ .Kc.use = 0x00000002,
+ .Kc.key = "EF5718BE86CC84963D8BBB5031E9F5C4BA41F28FAF69E73D",
+ .Ke.use = 0x00000002,
+ .Ke.key = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49",
+ .Ki.use = 0x00000002,
+ .Ki.key = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F",
+ },
+ /* rfc6803 sec 10 */
+ {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "key",
+ .key = "57D0297298FFD9D35DE5A47FB4BDE24B",
+ .Kc.use = 0x00000002,
+ .Kc.key = "D155775A209D05F02B38D42A389E5A56",
+ .Ke.use = 0x00000002,
+ .Ke.key = "64DF83F85A532F17577D8C37035796AB",
+ .Ki.use = 0x00000002,
+ .Ki.key = "3E4FBDF30FB8259C425CB6C96F1F4635",
+ },
+ {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "key",
+ .key = "B9D6828B2056B7BE656D88A123B1FAC68214AC2B727ECF5F69AFE0C4DF2A6D2C",
+ .Kc.use = 0x00000002,
+ .Kc.key = "E467F9A9552BC7D3155A6220AF9C19220EEED4FF78B0D1E6A1544991461A9E50",
+ .Ke.use = 0x00000002,
+ .Ke.key = "412AEFC362A7285FC3966C6A5181E7605AE675235B6D549FBFC9AB6630A4C604",
+ .Ki.use = 0x00000002,
+ .Ki.key = "FA624FA0E523993FA388AEFDC67E67EBCD8C08E8A0246B1D73B0D1DD9FC582B0",
+ },
+ {/* END */}
+};
+
+/*
+ * Encryption tests.
+ */
+const struct krb5_enc_test krb5_enc_tests[] = {
+ /* rfc8009 Appendix A */
+ {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .name = "enc no plain",
+ .plain = "",
+ .conf = "7E5895EAF2672435BAD817F545A37148",
+ .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E",
+ .Ki = "9FDA0E56AB2D85E1569A688696C26A6C",
+ .ct = "EF85FB890BB8472F4DAB20394DCA781DAD877EDA39D50C870C0D5A0A8E48C718",
+ }, {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .name = "enc plain<block",
+ .plain = "000102030405",
+ .conf = "7BCA285E2FD4130FB55B1A5C83BC5B24",
+ .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E",
+ .Ki = "9FDA0E56AB2D85E1569A688696C26A6C",
+ .ct = "84D7F30754ED987BAB0BF3506BEB09CFB55402CEF7E6877CE99E247E52D16ED4421DFDF8976C",
+ }, {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .name = "enc plain==block",
+ .plain = "000102030405060708090A0B0C0D0E0F",
+ .conf = "56AB21713FF62C0A1457200F6FA9948F",
+ .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E",
+ .Ki = "9FDA0E56AB2D85E1569A688696C26A6C",
+ .ct = "3517D640F50DDC8AD3628722B3569D2AE07493FA8263254080EA65C1008E8FC295FB4852E7D83E1E7C48C37EEBE6B0D3",
+ }, {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .name = "enc plain>block",
+ .plain = "000102030405060708090A0B0C0D0E0F1011121314",
+ .conf = "A7A4E29A4728CE10664FB64E49AD3FAC",
+ .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E",
+ .Ki = "9FDA0E56AB2D85E1569A688696C26A6C",
+ .ct = "720F73B18D9859CD6CCB4346115CD336C70F58EDC0C4437C5573544C31C813BCE1E6D072C186B39A413C2F92CA9B8334A287FFCBFC",
+ }, {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .name = "enc no plain",
+ .plain = "",
+ .conf = "F764E9FA15C276478B2C7D0C4E5F58E4",
+ .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49",
+ .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F",
+ .ct = "41F53FA5BFE7026D91FAF9BE959195A058707273A96A40F0A01960621AC612748B9BBFBE7EB4CE3C",
+ }, {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .name = "enc plain<block",
+ .plain = "000102030405",
+ .conf = "B80D3251C1F6471494256FFE712D0B9A",
+ .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49",
+ .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F",
+ .ct = "4ED7B37C2BCAC8F74F23C1CF07E62BC7B75FB3F637B9F559C7F664F69EAB7B6092237526EA0D1F61CB20D69D10F2",
+ }, {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .name = "enc plain==block",
+ .plain = "000102030405060708090A0B0C0D0E0F",
+ .conf = "53BF8A0D105265D4E276428624CE5E63",
+ .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49",
+ .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F",
+ .ct = "BC47FFEC7998EB91E8115CF8D19DAC4BBBE2E163E87DD37F49BECA92027764F68CF51F14D798C2273F35DF574D1F932E40C4FF255B36A266",
+ }, {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .name = "enc plain>block",
+ .plain = "000102030405060708090A0B0C0D0E0F1011121314",
+ .conf = "763E65367E864F02F55153C7E3B58AF1",
+ .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49",
+ .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F",
+ .ct = "40013E2DF58E8751957D2878BCD2D6FE101CCFD556CB1EAE79DB3C3EE86429F2B2A602AC86FEF6ECB647D6295FAE077A1FEB517508D2C16B4192E01F62",
+ },
+ /* rfc6803 sec 10 */
+ {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "enc no plain",
+ .plain = "",
+ .conf = "B69822A19A6B09C0EBC8557D1F1B6C0A",
+ .K0 = "1DC46A8D763F4F93742BCBA3387576C3",
+ .usage = 0,
+ .ct = "C466F1871069921EDB7C6FDE244A52DB0BA10EDC197BDB8006658CA3CCCE6EB8",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "enc 1 plain",
+ .plain = "'1",
+ .conf = "6F2FC3C2A166FD8898967A83DE9596D9",
+ .K0 = "5027BC231D0F3A9D23333F1CA6FDBE7C",
+ .usage = 1,
+ .ct = "842D21FD950311C0DD464A3F4BE8D6DA88A56D559C9B47D3F9A85067AF661559B8",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "enc 9 plain",
+ .plain = "'9 bytesss",
+ .conf = "A5B4A71E077AEEF93C8763C18FDB1F10",
+ .K0 = "A1BB61E805F9BA6DDE8FDBDDC05CDEA0",
+ .usage = 2,
+ .ct = "619FF072E36286FF0A28DEB3A352EC0D0EDF5C5160D663C901758CCF9D1ED33D71DB8F23AABF8348A0",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "enc 13 plain",
+ .plain = "'13 bytes byte",
+ .conf = "19FEE40D810C524B5B22F01874C693DA",
+ .K0 = "2CA27A5FAF5532244506434E1CEF6676",
+ .usage = 3,
+ .ct = "B8ECA3167AE6315512E59F98A7C500205E5F63FF3BB389AF1C41A21D640D8615C9ED3FBEB05AB6ACB67689B5EA",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "enc 30 plain",
+ .plain = "'30 bytes bytes bytes bytes byt",
+ .conf = "CA7A7AB4BE192DABD603506DB19C39E2",
+ .K0 = "7824F8C16F83FF354C6BF7515B973F43",
+ .usage = 4,
+ .ct = "A26A3905A4FFD5816B7B1E27380D08090C8EC1F304496E1ABDCD2BDCD1DFFC660989E117A713DDBB57A4146C1587CBA4356665591D2240282F5842B105A5",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "enc no plain",
+ .plain = "",
+ .conf = "3CBBD2B45917941067F96599BB98926C",
+ .K0 = "B61C86CC4E5D2757545AD423399FB7031ECAB913CBB900BD7A3C6DD8BF92015B",
+ .usage = 0,
+ .ct = "03886D03310B47A6D8F06D7B94D1DD837ECCE315EF652AFF620859D94A259266",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "enc 1 plain",
+ .plain = "'1",
+ .conf = "DEF487FCEBE6DE6346D4DA4521BBA2D2",
+ .K0 = "1B97FE0A190E2021EB30753E1B6E1E77B0754B1D684610355864104963463833",
+ .usage = 1,
+ .ct = "2C9C1570133C99BF6A34BC1B0212002FD194338749DB4135497A347CFCD9D18A12",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "enc 9 plain",
+ .plain = "'9 bytesss",
+ .conf = "AD4FF904D34E555384B14100FC465F88",
+ .K0 = "32164C5B434D1D1538E4CFD9BE8040FE8C4AC7ACC4B93D3314D2133668147A05",
+ .usage = 2,
+ .ct = "9C6DE75F812DE7ED0D28B2963557A115640998275B0AF5152709913FF52A2A9C8E63B872F92E64C839",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "enc 13 plain",
+ .plain = "'13 bytes byte",
+ .conf = "CF9BCA6DF1144E0C0AF9B8F34C90D514",
+ .K0 = "B038B132CD8E06612267FAB7170066D88AECCBA0B744BFC60DC89BCA182D0715",
+ .usage = 3,
+ .ct = "EEEC85A9813CDC536772AB9B42DEFC5706F726E975DDE05A87EB5406EA324CA185C9986B42AABE794B84821BEE",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "enc 30 plain",
+ .plain = "'30 bytes bytes bytes bytes byt",
+ .conf = "644DEF38DA35007275878D216855E228",
+ .K0 = "CCFCD349BF4C6677E86E4B02B8EAB924A546AC731CF9BF6989B996E7D6BFBBA7",
+ .usage = 4,
+ .ct = "0E44680985855F2D1F1812529CA83BFD8E349DE6FD9ADA0BAAA048D68E265FEBF34AD1255A344999AD37146887A6C6845731AC7F46376A0504CD06571474",
+ },
+ {/* END */}
+};
+
+/*
+ * Checksum generation tests.
+ */
+const struct krb5_mic_test krb5_mic_tests[] = {
+ /* rfc8009 Appendix A */
+ {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .name = "mic",
+ .plain = "000102030405060708090A0B0C0D0E0F1011121314",
+ .Kc = "B31A018A48F54776F403E9A396325DC3",
+ .mic = "D78367186643D67B411CBA9139FC1DEE",
+ }, {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .name = "mic",
+ .plain = "000102030405060708090A0B0C0D0E0F1011121314",
+ .Kc = "EF5718BE86CC84963D8BBB5031E9F5C4BA41F28FAF69E73D",
+ .mic = "45EE791567EEFCA37F4AC1E0222DE80D43C3BFA06699672A",
+ },
+ /* rfc6803 sec 10 */
+ {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "mic abc",
+ .plain = "'abcdefghijk",
+ .K0 = "1DC46A8D763F4F93742BCBA3387576C3",
+ .usage = 7,
+ .mic = "1178E6C5C47A8C1AE0C4B9C7D4EB7B6B",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "mic ABC",
+ .plain = "'ABCDEFGHIJKLMNOPQRSTUVWXYZ",
+ .K0 = "5027BC231D0F3A9D23333F1CA6FDBE7C",
+ .usage = 8,
+ .mic = "D1B34F7004A731F23A0C00BF6C3F753A",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "mic 123",
+ .plain = "'123456789",
+ .K0 = "B61C86CC4E5D2757545AD423399FB7031ECAB913CBB900BD7A3C6DD8BF92015B",
+ .usage = 9,
+ .mic = "87A12CFD2B96214810F01C826E7744B1",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "mic !@#",
+ .plain = "'!@#$%^&*()!@#$%^&*()!@#$%^&*()",
+ .K0 = "32164C5B434D1D1538E4CFD9BE8040FE8C4AC7ACC4B93D3314D2133668147A05",
+ .usage = 10,
+ .mic = "3FA0B42355E52B189187294AA252AB64",
+ },
+ {/* END */}
+};
diff --git a/crypto/krb5enc.c b/crypto/krb5enc.c
new file mode 100644
index 000000000000..a1de55994d92
--- /dev/null
+++ b/crypto/krb5enc.c
@@ -0,0 +1,504 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AEAD wrapper for Kerberos 5 RFC3961 simplified profile.
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Derived from authenc:
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+struct krb5enc_instance_ctx {
+ struct crypto_ahash_spawn auth;
+ struct crypto_skcipher_spawn enc;
+ unsigned int reqoff;
+};
+
+struct krb5enc_ctx {
+ struct crypto_ahash *auth;
+ struct crypto_skcipher *enc;
+};
+
+struct krb5enc_request_ctx {
+ struct scatterlist src[2];
+ struct scatterlist dst[2];
+ char tail[];
+};
+
+static void krb5enc_request_complete(struct aead_request *req, int err)
+{
+ if (err != -EINPROGRESS)
+ aead_request_complete(req, err);
+}
+
+/**
+ * crypto_krb5enc_extractkeys - Extract Ke and Ki keys from the key blob.
+ * @keys: Where to put the key sizes and pointers
+ * @key: Encoded key material
+ * @keylen: Amount of key material
+ *
+ * Decode the key blob we're given. It starts with an rtattr that indicates
+ * the format and the length. Format CRYPTO_AUTHENC_KEYA_PARAM is:
+ *
+ * rtattr || __be32 enckeylen || authkey || enckey
+ *
+ * Note that the rtattr is in cpu-endian form, unlike enckeylen. This must be
+ * handled correctly in static testmgr data.
+ */
+int crypto_krb5enc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
+ unsigned int keylen)
+{
+ struct rtattr *rta = (struct rtattr *)key;
+ struct crypto_authenc_key_param *param;
+
+ if (!RTA_OK(rta, keylen))
+ return -EINVAL;
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ return -EINVAL;
+
+ /*
+ * RTA_OK() didn't align the rtattr's payload when validating that it
+ * fits in the buffer. Yet, the keys should start on the next 4-byte
+ * aligned boundary. To avoid confusion, require that the rtattr
+ * payload be exactly the param struct, which has a 4-byte aligned size.
+ */
+ if (RTA_PAYLOAD(rta) != sizeof(*param))
+ return -EINVAL;
+ BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
+
+ param = RTA_DATA(rta);
+ keys->enckeylen = be32_to_cpu(param->enckeylen);
+
+ key += rta->rta_len;
+ keylen -= rta->rta_len;
+
+ if (keylen < keys->enckeylen)
+ return -EINVAL;
+
+ keys->authkeylen = keylen - keys->enckeylen;
+ keys->authkey = key;
+ keys->enckey = key + keys->authkeylen;
+ return 0;
+}
+EXPORT_SYMBOL(crypto_krb5enc_extractkeys);
+
+static int krb5enc_setkey(struct crypto_aead *krb5enc, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc);
+ struct crypto_skcipher *enc = ctx->enc;
+ struct crypto_ahash *auth = ctx->auth;
+ unsigned int flags = crypto_aead_get_flags(krb5enc);
+ int err = -EINVAL;
+
+ if (crypto_krb5enc_extractkeys(&keys, key, keylen) != 0)
+ goto out;
+
+ crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
+ crypto_ahash_set_flags(auth, flags & CRYPTO_TFM_REQ_MASK);
+ err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
+ if (err)
+ goto out;
+
+ crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(enc, flags & CRYPTO_TFM_REQ_MASK);
+ err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
+out:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+}
+
+static void krb5enc_encrypt_done(void *data, int err)
+{
+ struct aead_request *req = data;
+
+ krb5enc_request_complete(req, err);
+}
+
+/*
+ * Start the encryption of the plaintext. We skip over the associated data as
+ * that only gets included in the hash.
+ */
+static int krb5enc_dispatch_encrypt(struct aead_request *req,
+ unsigned int flags)
+{
+ struct crypto_aead *krb5enc = crypto_aead_reqtfm(req);
+ struct aead_instance *inst = aead_alg_instance(krb5enc);
+ struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc);
+ struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct crypto_skcipher *enc = ctx->enc;
+ struct skcipher_request *skreq = (void *)(areq_ctx->tail +
+ ictx->reqoff);
+ struct scatterlist *src, *dst;
+
+ src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
+ if (req->src == req->dst)
+ dst = src;
+ else
+ dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
+
+ skcipher_request_set_tfm(skreq, enc);
+ skcipher_request_set_callback(skreq, aead_request_flags(req),
+ krb5enc_encrypt_done, req);
+ skcipher_request_set_crypt(skreq, src, dst, req->cryptlen, req->iv);
+
+ return crypto_skcipher_encrypt(skreq);
+}
+
+/*
+ * Insert the hash into the checksum field in the destination buffer directly
+ * after the encrypted region.
+ */
+static void krb5enc_insert_checksum(struct aead_request *req, u8 *hash)
+{
+ struct crypto_aead *krb5enc = crypto_aead_reqtfm(req);
+
+ scatterwalk_map_and_copy(hash, req->dst,
+ req->assoclen + req->cryptlen,
+ crypto_aead_authsize(krb5enc), 1);
+}
+
+/*
+ * Upon completion of an asynchronous digest, transfer the hash to the checksum
+ * field.
+ */
+static void krb5enc_encrypt_ahash_done(void *data, int err)
+{
+ struct aead_request *req = data;
+ struct crypto_aead *krb5enc = crypto_aead_reqtfm(req);
+ struct aead_instance *inst = aead_alg_instance(krb5enc);
+ struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
+
+ if (err)
+ return krb5enc_request_complete(req, err);
+
+ krb5enc_insert_checksum(req, ahreq->result);
+
+ err = krb5enc_dispatch_encrypt(req, 0);
+ if (err != -EINPROGRESS)
+ aead_request_complete(req, err);
+}
+
+/*
+ * Start the digest of the plaintext for encryption. In theory, this could be
+ * run in parallel with the encryption, provided the src and dst buffers don't
+ * overlap.
+ */
+static int krb5enc_dispatch_encrypt_hash(struct aead_request *req)
+{
+ struct crypto_aead *krb5enc = crypto_aead_reqtfm(req);
+ struct aead_instance *inst = aead_alg_instance(krb5enc);
+ struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc);
+ struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct crypto_ahash *auth = ctx->auth;
+ struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
+ u8 *hash = areq_ctx->tail;
+ int err;
+
+ ahash_request_set_callback(ahreq, aead_request_flags(req),
+ krb5enc_encrypt_ahash_done, req);
+ ahash_request_set_tfm(ahreq, auth);
+ ahash_request_set_crypt(ahreq, req->src, hash, req->assoclen + req->cryptlen);
+
+ err = crypto_ahash_digest(ahreq);
+ if (err)
+ return err;
+
+ krb5enc_insert_checksum(req, hash);
+ return 0;
+}
+
+/*
+ * Process an encryption operation. We can perform the cipher and the hash in
+ * parallel, provided the src and dst buffers are separate.
+ */
+static int krb5enc_encrypt(struct aead_request *req)
+{
+ int err;
+
+ err = krb5enc_dispatch_encrypt_hash(req);
+ if (err < 0)
+ return err;
+
+ return krb5enc_dispatch_encrypt(req, aead_request_flags(req));
+}
+
+static int krb5enc_verify_hash(struct aead_request *req)
+{
+ struct crypto_aead *krb5enc = crypto_aead_reqtfm(req);
+ struct aead_instance *inst = aead_alg_instance(krb5enc);
+ struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
+ unsigned int authsize = crypto_aead_authsize(krb5enc);
+ u8 *calc_hash = areq_ctx->tail;
+ u8 *msg_hash = areq_ctx->tail + authsize;
+
+ scatterwalk_map_and_copy(msg_hash, req->src, ahreq->nbytes, authsize, 0);
+
+ if (crypto_memneq(msg_hash, calc_hash, authsize))
+ return -EBADMSG;
+ return 0;
+}
+
+static void krb5enc_decrypt_hash_done(void *data, int err)
+{
+ struct aead_request *req = data;
+
+ if (err)
+ return krb5enc_request_complete(req, err);
+
+ err = krb5enc_verify_hash(req);
+ krb5enc_request_complete(req, err);
+}
+
+/*
+ * Dispatch the hashing of the plaintext after we've done the decryption.
+ */
+static int krb5enc_dispatch_decrypt_hash(struct aead_request *req)
+{
+ struct crypto_aead *krb5enc = crypto_aead_reqtfm(req);
+ struct aead_instance *inst = aead_alg_instance(krb5enc);
+ struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc);
+ struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
+ struct crypto_ahash *auth = ctx->auth;
+ unsigned int authsize = crypto_aead_authsize(krb5enc);
+ u8 *hash = areq_ctx->tail;
+ int err;
+
+ ahash_request_set_tfm(ahreq, auth);
+ ahash_request_set_crypt(ahreq, req->dst, hash,
+ req->assoclen + req->cryptlen - authsize);
+ ahash_request_set_callback(ahreq, aead_request_flags(req),
+ krb5enc_decrypt_hash_done, req);
+
+ err = crypto_ahash_digest(ahreq);
+ if (err < 0)
+ return err;
+
+ return krb5enc_verify_hash(req);
+}
+
+/*
+ * Dispatch the decryption of the ciphertext.
+ */
+static int krb5enc_dispatch_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *krb5enc = crypto_aead_reqtfm(req);
+ struct aead_instance *inst = aead_alg_instance(krb5enc);
+ struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc);
+ struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct skcipher_request *skreq = (void *)(areq_ctx->tail +
+ ictx->reqoff);
+ unsigned int authsize = crypto_aead_authsize(krb5enc);
+ struct scatterlist *src, *dst;
+
+ src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
+ dst = src;
+
+ if (req->src != req->dst)
+ dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
+
+ skcipher_request_set_tfm(skreq, ctx->enc);
+ skcipher_request_set_callback(skreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ skcipher_request_set_crypt(skreq, src, dst,
+ req->cryptlen - authsize, req->iv);
+
+ return crypto_skcipher_decrypt(skreq);
+}
+
+static int krb5enc_decrypt(struct aead_request *req)
+{
+ int err;
+
+ err = krb5enc_dispatch_decrypt(req);
+ if (err < 0)
+ return err;
+
+ return krb5enc_dispatch_decrypt_hash(req);
+}
+
+static int krb5enc_init_tfm(struct crypto_aead *tfm)
+{
+ struct aead_instance *inst = aead_alg_instance(tfm);
+ struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct krb5enc_ctx *ctx = crypto_aead_ctx(tfm);
+ struct crypto_ahash *auth;
+ struct crypto_skcipher *enc;
+ int err;
+
+ auth = crypto_spawn_ahash(&ictx->auth);
+ if (IS_ERR(auth))
+ return PTR_ERR(auth);
+
+ enc = crypto_spawn_skcipher(&ictx->enc);
+ err = PTR_ERR(enc);
+ if (IS_ERR(enc))
+ goto err_free_ahash;
+
+ ctx->auth = auth;
+ ctx->enc = enc;
+
+ crypto_aead_set_reqsize(
+ tfm,
+ sizeof(struct krb5enc_request_ctx) +
+ ictx->reqoff + /* Space for two checksums */
+ umax(sizeof(struct ahash_request) + crypto_ahash_reqsize(auth),
+ sizeof(struct skcipher_request) + crypto_skcipher_reqsize(enc)));
+
+ return 0;
+
+err_free_ahash:
+ crypto_free_ahash(auth);
+ return err;
+}
+
+static void krb5enc_exit_tfm(struct crypto_aead *tfm)
+{
+ struct krb5enc_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_free_ahash(ctx->auth);
+ crypto_free_skcipher(ctx->enc);
+}
+
+static void krb5enc_free(struct aead_instance *inst)
+{
+ struct krb5enc_instance_ctx *ctx = aead_instance_ctx(inst);
+
+ crypto_drop_skcipher(&ctx->enc);
+ crypto_drop_ahash(&ctx->auth);
+ kfree(inst);
+}
+
+/*
+ * Create an instance of a template for a specific hash and cipher pair.
+ */
+static int krb5enc_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct krb5enc_instance_ctx *ictx;
+ struct skcipher_alg_common *enc;
+ struct hash_alg_common *auth;
+ struct aead_instance *inst;
+ struct crypto_alg *auth_base;
+ u32 mask;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
+ if (err) {
+ pr_err("attr_type failed\n");
+ return err;
+ }
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+ ictx = aead_instance_ctx(inst);
+
+ err = crypto_grab_ahash(&ictx->auth, aead_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, mask);
+ if (err) {
+ pr_err("grab ahash failed\n");
+ goto err_free_inst;
+ }
+ auth = crypto_spawn_ahash_alg(&ictx->auth);
+ auth_base = &auth->base;
+
+ err = crypto_grab_skcipher(&ictx->enc, aead_crypto_instance(inst),
+ crypto_attr_alg_name(tb[2]), 0, mask);
+ if (err) {
+ pr_err("grab skcipher failed\n");
+ goto err_free_inst;
+ }
+ enc = crypto_spawn_skcipher_alg_common(&ictx->enc);
+
+ ictx->reqoff = 2 * auth->digestsize;
+
+ err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "krb5enc(%s,%s)", auth_base->cra_name,
+ enc->base.cra_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
+
+ if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "krb5enc(%s,%s)", auth_base->cra_driver_name,
+ enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
+
+ inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
+ auth_base->cra_priority;
+ inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
+ inst->alg.base.cra_alignmask = enc->base.cra_alignmask;
+ inst->alg.base.cra_ctxsize = sizeof(struct krb5enc_ctx);
+
+ inst->alg.ivsize = enc->ivsize;
+ inst->alg.chunksize = enc->chunksize;
+ inst->alg.maxauthsize = auth->digestsize;
+
+ inst->alg.init = krb5enc_init_tfm;
+ inst->alg.exit = krb5enc_exit_tfm;
+
+ inst->alg.setkey = krb5enc_setkey;
+ inst->alg.encrypt = krb5enc_encrypt;
+ inst->alg.decrypt = krb5enc_decrypt;
+
+ inst->free = krb5enc_free;
+
+ err = aead_register_instance(tmpl, inst);
+ if (err) {
+ pr_err("ref failed\n");
+ goto err_free_inst;
+ }
+
+ return 0;
+
+err_free_inst:
+ krb5enc_free(inst);
+ return err;
+}
+
+static struct crypto_template crypto_krb5enc_tmpl = {
+ .name = "krb5enc",
+ .create = krb5enc_create,
+ .module = THIS_MODULE,
+};
+
+static int __init crypto_krb5enc_module_init(void)
+{
+ return crypto_register_template(&crypto_krb5enc_tmpl);
+}
+
+static void __exit crypto_krb5enc_module_exit(void)
+{
+ crypto_unregister_template(&crypto_krb5enc_tmpl);
+}
+
+module_init(crypto_krb5enc_module_init);
+module_exit(crypto_krb5enc_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Simple AEAD wrapper for Kerberos 5 RFC3961");
+MODULE_ALIAS_CRYPTO("krb5enc");
diff --git a/crypto/lrw.c b/crypto/lrw.c
index e216fbf2b786..dd403b800513 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -167,7 +167,7 @@ static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass)
while (w.nbytes) {
unsigned int avail = w.nbytes;
- be128 *wsrc;
+ const be128 *wsrc;
be128 *wdst;
wsrc = w.src.virt.addr;
@@ -322,7 +322,7 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
cipher_name, 0, mask);
- if (err == -ENOENT) {
+ if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) {
err = -ENAMETOOLONG;
if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
cipher_name) >= CRYPTO_MAX_ALG_NAME)
@@ -356,7 +356,7 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
/* Alas we screwed up the naming so we have to mangle the
* cipher name.
*/
- if (!strncmp(cipher_name, "ecb(", 4)) {
+ if (!memcmp(cipher_name, "ecb(", 4)) {
int len;
len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
@@ -420,7 +420,7 @@ static void __exit lrw_module_exit(void)
crypto_unregister_template(&lrw_tmpl);
}
-subsys_initcall(lrw_module_init);
+module_init(lrw_module_init);
module_exit(lrw_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c
index cdb4897c63e6..c2e2c38b5aa8 100644
--- a/crypto/lskcipher.c
+++ b/crypto/lskcipher.c
@@ -294,6 +294,7 @@ static const struct crypto_type crypto_lskcipher_type = {
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_LSKCIPHER,
.tfmsize = offsetof(struct crypto_lskcipher, base),
+ .algsize = offsetof(struct lskcipher_alg, co.base),
};
static void crypto_lskcipher_exit_tfm_sg(struct crypto_tfm *tfm)
diff --git a/crypto/lz4.c b/crypto/lz4.c
index 0606f8862e78..7a984ae5ae52 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -12,11 +12,7 @@
#include <linux/lz4.h>
#include <crypto/internal/scompress.h>
-struct lz4_ctx {
- void *lz4_comp_mem;
-};
-
-static void *lz4_alloc_ctx(struct crypto_scomp *tfm)
+static void *lz4_alloc_ctx(void)
{
void *ctx;
@@ -27,29 +23,11 @@ static void *lz4_alloc_ctx(struct crypto_scomp *tfm)
return ctx;
}
-static int lz4_init(struct crypto_tfm *tfm)
-{
- struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
-
- ctx->lz4_comp_mem = lz4_alloc_ctx(NULL);
- if (IS_ERR(ctx->lz4_comp_mem))
- return -ENOMEM;
-
- return 0;
-}
-
-static void lz4_free_ctx(struct crypto_scomp *tfm, void *ctx)
+static void lz4_free_ctx(void *ctx)
{
vfree(ctx);
}
-static void lz4_exit(struct crypto_tfm *tfm)
-{
- struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lz4_free_ctx(NULL, ctx->lz4_comp_mem);
-}
-
static int __lz4_compress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
@@ -70,14 +48,6 @@ static int lz4_scompress(struct crypto_scomp *tfm, const u8 *src,
return __lz4_compress_crypto(src, slen, dst, dlen, ctx);
}
-static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return __lz4_compress_crypto(src, slen, dst, dlen, ctx->lz4_comp_mem);
-}
-
static int __lz4_decompress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
@@ -97,26 +67,6 @@ static int lz4_sdecompress(struct crypto_scomp *tfm, const u8 *src,
return __lz4_decompress_crypto(src, slen, dst, dlen, NULL);
}
-static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst,
- unsigned int *dlen)
-{
- return __lz4_decompress_crypto(src, slen, dst, dlen, NULL);
-}
-
-static struct crypto_alg alg_lz4 = {
- .cra_name = "lz4",
- .cra_driver_name = "lz4-generic",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct lz4_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = lz4_init,
- .cra_exit = lz4_exit,
- .cra_u = { .compress = {
- .coa_compress = lz4_compress_crypto,
- .coa_decompress = lz4_decompress_crypto } }
-};
-
static struct scomp_alg scomp = {
.alloc_ctx = lz4_alloc_ctx,
.free_ctx = lz4_free_ctx,
@@ -131,28 +81,15 @@ static struct scomp_alg scomp = {
static int __init lz4_mod_init(void)
{
- int ret;
-
- ret = crypto_register_alg(&alg_lz4);
- if (ret)
- return ret;
-
- ret = crypto_register_scomp(&scomp);
- if (ret) {
- crypto_unregister_alg(&alg_lz4);
- return ret;
- }
-
- return ret;
+ return crypto_register_scomp(&scomp);
}
static void __exit lz4_mod_fini(void)
{
- crypto_unregister_alg(&alg_lz4);
crypto_unregister_scomp(&scomp);
}
-subsys_initcall(lz4_mod_init);
+module_init(lz4_mod_init);
module_exit(lz4_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index d7cc94aa2fcf..9c61d05b6214 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -4,18 +4,13 @@
*
* Copyright (c) 2013 Chanho Min <chanho.min@lge.com>
*/
+#include <crypto/internal/scompress.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/crypto.h>
#include <linux/vmalloc.h>
#include <linux/lz4.h>
-#include <crypto/internal/scompress.h>
-
-struct lz4hc_ctx {
- void *lz4hc_comp_mem;
-};
-static void *lz4hc_alloc_ctx(struct crypto_scomp *tfm)
+static void *lz4hc_alloc_ctx(void)
{
void *ctx;
@@ -26,29 +21,11 @@ static void *lz4hc_alloc_ctx(struct crypto_scomp *tfm)
return ctx;
}
-static int lz4hc_init(struct crypto_tfm *tfm)
-{
- struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
-
- ctx->lz4hc_comp_mem = lz4hc_alloc_ctx(NULL);
- if (IS_ERR(ctx->lz4hc_comp_mem))
- return -ENOMEM;
-
- return 0;
-}
-
-static void lz4hc_free_ctx(struct crypto_scomp *tfm, void *ctx)
+static void lz4hc_free_ctx(void *ctx)
{
vfree(ctx);
}
-static void lz4hc_exit(struct crypto_tfm *tfm)
-{
- struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lz4hc_free_ctx(NULL, ctx->lz4hc_comp_mem);
-}
-
static int __lz4hc_compress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
@@ -69,16 +46,6 @@ static int lz4hc_scompress(struct crypto_scomp *tfm, const u8 *src,
return __lz4hc_compress_crypto(src, slen, dst, dlen, ctx);
}
-static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst,
- unsigned int *dlen)
-{
- struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return __lz4hc_compress_crypto(src, slen, dst, dlen,
- ctx->lz4hc_comp_mem);
-}
-
static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
@@ -98,26 +65,6 @@ static int lz4hc_sdecompress(struct crypto_scomp *tfm, const u8 *src,
return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL);
}
-static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst,
- unsigned int *dlen)
-{
- return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL);
-}
-
-static struct crypto_alg alg_lz4hc = {
- .cra_name = "lz4hc",
- .cra_driver_name = "lz4hc-generic",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct lz4hc_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = lz4hc_init,
- .cra_exit = lz4hc_exit,
- .cra_u = { .compress = {
- .coa_compress = lz4hc_compress_crypto,
- .coa_decompress = lz4hc_decompress_crypto } }
-};
-
static struct scomp_alg scomp = {
.alloc_ctx = lz4hc_alloc_ctx,
.free_ctx = lz4hc_free_ctx,
@@ -132,28 +79,15 @@ static struct scomp_alg scomp = {
static int __init lz4hc_mod_init(void)
{
- int ret;
-
- ret = crypto_register_alg(&alg_lz4hc);
- if (ret)
- return ret;
-
- ret = crypto_register_scomp(&scomp);
- if (ret) {
- crypto_unregister_alg(&alg_lz4hc);
- return ret;
- }
-
- return ret;
+ return crypto_register_scomp(&scomp);
}
static void __exit lz4hc_mod_fini(void)
{
- crypto_unregister_alg(&alg_lz4hc);
crypto_unregister_scomp(&scomp);
}
-subsys_initcall(lz4hc_mod_init);
+module_init(lz4hc_mod_init);
module_exit(lz4hc_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c
index 0631d975bfac..ba013f2d5090 100644
--- a/crypto/lzo-rle.c
+++ b/crypto/lzo-rle.c
@@ -3,19 +3,13 @@
* Cryptographic API.
*/
+#include <crypto/internal/scompress.h>
#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/crypto.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
#include <linux/lzo.h>
-#include <crypto/internal/scompress.h>
-
-struct lzorle_ctx {
- void *lzorle_comp_mem;
-};
+#include <linux/module.h>
+#include <linux/slab.h>
-static void *lzorle_alloc_ctx(struct crypto_scomp *tfm)
+static void *lzorle_alloc_ctx(void)
{
void *ctx;
@@ -26,36 +20,18 @@ static void *lzorle_alloc_ctx(struct crypto_scomp *tfm)
return ctx;
}
-static int lzorle_init(struct crypto_tfm *tfm)
-{
- struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm);
-
- ctx->lzorle_comp_mem = lzorle_alloc_ctx(NULL);
- if (IS_ERR(ctx->lzorle_comp_mem))
- return -ENOMEM;
-
- return 0;
-}
-
-static void lzorle_free_ctx(struct crypto_scomp *tfm, void *ctx)
+static void lzorle_free_ctx(void *ctx)
{
kvfree(ctx);
}
-static void lzorle_exit(struct crypto_tfm *tfm)
-{
- struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lzorle_free_ctx(NULL, ctx->lzorle_comp_mem);
-}
-
static int __lzorle_compress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
int err;
- err = lzorle1x_1_compress(src, slen, dst, &tmp_len, ctx);
+ err = lzorle1x_1_compress_safe(src, slen, dst, &tmp_len, ctx);
if (err != LZO_E_OK)
return -EINVAL;
@@ -64,14 +40,6 @@ static int __lzorle_compress(const u8 *src, unsigned int slen,
return 0;
}
-static int lzorle_compress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return __lzorle_compress(src, slen, dst, dlen, ctx->lzorle_comp_mem);
-}
-
static int lzorle_scompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
@@ -94,12 +62,6 @@ static int __lzorle_decompress(const u8 *src, unsigned int slen,
return 0;
}
-static int lzorle_decompress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- return __lzorle_decompress(src, slen, dst, dlen);
-}
-
static int lzorle_sdecompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
@@ -107,19 +69,6 @@ static int lzorle_sdecompress(struct crypto_scomp *tfm, const u8 *src,
return __lzorle_decompress(src, slen, dst, dlen);
}
-static struct crypto_alg alg = {
- .cra_name = "lzo-rle",
- .cra_driver_name = "lzo-rle-generic",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct lzorle_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = lzorle_init,
- .cra_exit = lzorle_exit,
- .cra_u = { .compress = {
- .coa_compress = lzorle_compress,
- .coa_decompress = lzorle_decompress } }
-};
-
static struct scomp_alg scomp = {
.alloc_ctx = lzorle_alloc_ctx,
.free_ctx = lzorle_free_ctx,
@@ -134,28 +83,15 @@ static struct scomp_alg scomp = {
static int __init lzorle_mod_init(void)
{
- int ret;
-
- ret = crypto_register_alg(&alg);
- if (ret)
- return ret;
-
- ret = crypto_register_scomp(&scomp);
- if (ret) {
- crypto_unregister_alg(&alg);
- return ret;
- }
-
- return ret;
+ return crypto_register_scomp(&scomp);
}
static void __exit lzorle_mod_fini(void)
{
- crypto_unregister_alg(&alg);
crypto_unregister_scomp(&scomp);
}
-subsys_initcall(lzorle_mod_init);
+module_init(lzorle_mod_init);
module_exit(lzorle_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/lzo.c b/crypto/lzo.c
index ebda132dd22b..7867e2c67c4e 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -3,19 +3,13 @@
* Cryptographic API.
*/
+#include <crypto/internal/scompress.h>
#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/crypto.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
#include <linux/lzo.h>
-#include <crypto/internal/scompress.h>
-
-struct lzo_ctx {
- void *lzo_comp_mem;
-};
+#include <linux/module.h>
+#include <linux/slab.h>
-static void *lzo_alloc_ctx(struct crypto_scomp *tfm)
+static void *lzo_alloc_ctx(void)
{
void *ctx;
@@ -26,36 +20,18 @@ static void *lzo_alloc_ctx(struct crypto_scomp *tfm)
return ctx;
}
-static int lzo_init(struct crypto_tfm *tfm)
-{
- struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
-
- ctx->lzo_comp_mem = lzo_alloc_ctx(NULL);
- if (IS_ERR(ctx->lzo_comp_mem))
- return -ENOMEM;
-
- return 0;
-}
-
-static void lzo_free_ctx(struct crypto_scomp *tfm, void *ctx)
+static void lzo_free_ctx(void *ctx)
{
kvfree(ctx);
}
-static void lzo_exit(struct crypto_tfm *tfm)
-{
- struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lzo_free_ctx(NULL, ctx->lzo_comp_mem);
-}
-
static int __lzo_compress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
int err;
- err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx);
+ err = lzo1x_1_compress_safe(src, slen, dst, &tmp_len, ctx);
if (err != LZO_E_OK)
return -EINVAL;
@@ -64,14 +40,6 @@ static int __lzo_compress(const u8 *src, unsigned int slen,
return 0;
}
-static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return __lzo_compress(src, slen, dst, dlen, ctx->lzo_comp_mem);
-}
-
static int lzo_scompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
@@ -94,12 +62,6 @@ static int __lzo_decompress(const u8 *src, unsigned int slen,
return 0;
}
-static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- return __lzo_decompress(src, slen, dst, dlen);
-}
-
static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
@@ -107,19 +69,6 @@ static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src,
return __lzo_decompress(src, slen, dst, dlen);
}
-static struct crypto_alg alg = {
- .cra_name = "lzo",
- .cra_driver_name = "lzo-generic",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct lzo_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = lzo_init,
- .cra_exit = lzo_exit,
- .cra_u = { .compress = {
- .coa_compress = lzo_compress,
- .coa_decompress = lzo_decompress } }
-};
-
static struct scomp_alg scomp = {
.alloc_ctx = lzo_alloc_ctx,
.free_ctx = lzo_free_ctx,
@@ -134,28 +83,15 @@ static struct scomp_alg scomp = {
static int __init lzo_mod_init(void)
{
- int ret;
-
- ret = crypto_register_alg(&alg);
- if (ret)
- return ret;
-
- ret = crypto_register_scomp(&scomp);
- if (ret) {
- crypto_unregister_alg(&alg);
- return ret;
- }
-
- return ret;
+ return crypto_register_scomp(&scomp);
}
static void __exit lzo_mod_fini(void)
{
- crypto_unregister_alg(&alg);
crypto_unregister_scomp(&scomp);
}
-subsys_initcall(lzo_mod_init);
+module_init(lzo_mod_init);
module_exit(lzo_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/md4.c b/crypto/md4.c
index 2e7f2f319f95..55bf47e23c13 100644
--- a/crypto/md4.c
+++ b/crypto/md4.c
@@ -233,7 +233,7 @@ static void __exit md4_mod_fini(void)
crypto_unregister_shash(&alg);
}
-subsys_initcall(md4_mod_init);
+module_init(md4_mod_init);
module_exit(md4_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/md5.c b/crypto/md5.c
index 72c0c46fb5ee..32c0819f5118 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -17,11 +17,9 @@
*/
#include <crypto/internal/hash.h>
#include <crypto/md5.h>
-#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
@@ -120,10 +118,11 @@ static void md5_transform(__u32 *hash, __u32 const *in)
hash[3] += d;
}
-static inline void md5_transform_helper(struct md5_state *ctx)
+static inline void md5_transform_helper(struct md5_state *ctx,
+ u32 block[MD5_BLOCK_WORDS])
{
- le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32));
- md5_transform(ctx->hash, ctx->block);
+ le32_to_cpu_array(block, MD5_BLOCK_WORDS);
+ md5_transform(ctx->hash, block);
}
static int md5_init(struct shash_desc *desc)
@@ -142,76 +141,53 @@ static int md5_init(struct shash_desc *desc)
static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len)
{
struct md5_state *mctx = shash_desc_ctx(desc);
- const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
+ u32 block[MD5_BLOCK_WORDS];
mctx->byte_count += len;
-
- if (avail > len) {
- memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
- data, len);
- return 0;
- }
-
- memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
- data, avail);
-
- md5_transform_helper(mctx);
- data += avail;
- len -= avail;
-
- while (len >= sizeof(mctx->block)) {
- memcpy(mctx->block, data, sizeof(mctx->block));
- md5_transform_helper(mctx);
- data += sizeof(mctx->block);
- len -= sizeof(mctx->block);
- }
-
- memcpy(mctx->block, data, len);
-
- return 0;
+ do {
+ memcpy(block, data, sizeof(block));
+ md5_transform_helper(mctx, block);
+ data += sizeof(block);
+ len -= sizeof(block);
+ } while (len >= sizeof(block));
+ memzero_explicit(block, sizeof(block));
+ mctx->byte_count -= len;
+ return len;
}
-static int md5_final(struct shash_desc *desc, u8 *out)
+static int md5_finup(struct shash_desc *desc, const u8 *data, unsigned int len,
+ u8 *out)
{
struct md5_state *mctx = shash_desc_ctx(desc);
- const unsigned int offset = mctx->byte_count & 0x3f;
- char *p = (char *)mctx->block + offset;
- int padding = 56 - (offset + 1);
+ u32 block[MD5_BLOCK_WORDS];
+ unsigned int offset;
+ int padding;
+ char *p;
+
+ memcpy(block, data, len);
+
+ offset = len;
+ p = (char *)block + offset;
+ padding = 56 - (offset + 1);
*p++ = 0x80;
if (padding < 0) {
memset(p, 0x00, padding + sizeof (u64));
- md5_transform_helper(mctx);
- p = (char *)mctx->block;
+ md5_transform_helper(mctx, block);
+ p = (char *)block;
padding = 56;
}
memset(p, 0, padding);
- mctx->block[14] = mctx->byte_count << 3;
- mctx->block[15] = mctx->byte_count >> 29;
- le32_to_cpu_array(mctx->block, (sizeof(mctx->block) -
- sizeof(u64)) / sizeof(u32));
- md5_transform(mctx->hash, mctx->block);
+ mctx->byte_count += len;
+ block[14] = mctx->byte_count << 3;
+ block[15] = mctx->byte_count >> 29;
+ le32_to_cpu_array(block, (sizeof(block) - sizeof(u64)) / sizeof(u32));
+ md5_transform(mctx->hash, block);
+ memzero_explicit(block, sizeof(block));
cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32));
memcpy(out, mctx->hash, sizeof(mctx->hash));
- memset(mctx, 0, sizeof(*mctx));
-
- return 0;
-}
-
-static int md5_export(struct shash_desc *desc, void *out)
-{
- struct md5_state *ctx = shash_desc_ctx(desc);
-
- memcpy(out, ctx, sizeof(*ctx));
- return 0;
-}
-
-static int md5_import(struct shash_desc *desc, const void *in)
-{
- struct md5_state *ctx = shash_desc_ctx(desc);
- memcpy(ctx, in, sizeof(*ctx));
return 0;
}
@@ -219,14 +195,12 @@ static struct shash_alg alg = {
.digestsize = MD5_DIGEST_SIZE,
.init = md5_init,
.update = md5_update,
- .final = md5_final,
- .export = md5_export,
- .import = md5_import,
- .descsize = sizeof(struct md5_state),
- .statesize = sizeof(struct md5_state),
+ .finup = md5_finup,
+ .descsize = MD5_STATE_SIZE,
.base = {
.cra_name = "md5",
.cra_driver_name = "md5-generic",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -242,7 +216,7 @@ static void __exit md5_mod_fini(void)
crypto_unregister_shash(&alg);
}
-subsys_initcall(md5_mod_init);
+module_init(md5_mod_init);
module_exit(md5_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
index 0d14e980d4d6..69ad35f524d7 100644
--- a/crypto/michael_mic.c
+++ b/crypto/michael_mic.c
@@ -167,7 +167,7 @@ static void __exit michael_mic_exit(void)
}
-subsys_initcall(michael_mic_init);
+module_init(michael_mic_init);
module_exit(michael_mic_exit);
MODULE_LICENSE("GPL v2");
diff --git a/crypto/nhpoly1305.c b/crypto/nhpoly1305.c
index a661d4f667cd..2b648615b5ec 100644
--- a/crypto/nhpoly1305.c
+++ b/crypto/nhpoly1305.c
@@ -245,7 +245,7 @@ static void __exit nhpoly1305_mod_exit(void)
crypto_unregister_shash(&nhpoly1305_alg);
}
-subsys_initcall(nhpoly1305_mod_init);
+module_init(nhpoly1305_mod_init);
module_exit(nhpoly1305_mod_exit);
MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function");
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index cbfb3ac14b3a..d092717ea4fc 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -22,8 +22,8 @@ static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
struct crypto_cipher *tfm)
{
int bsize = crypto_cipher_blocksize(tfm);
+ const u8 *src = walk->src.virt.addr;
unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 * const iv = walk->iv;
@@ -45,17 +45,17 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
{
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
u8 * const iv = walk->iv;
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE];
do {
- memcpy(tmpbuf, src, bsize);
- crypto_xor(iv, src, bsize);
- crypto_cipher_encrypt_one(tfm, src, iv);
- crypto_xor_cpy(iv, tmpbuf, src, bsize);
+ memcpy(tmpbuf, dst, bsize);
+ crypto_xor(iv, dst, bsize);
+ crypto_cipher_encrypt_one(tfm, dst, iv);
+ crypto_xor_cpy(iv, tmpbuf, dst, bsize);
- src += bsize;
+ dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
@@ -89,8 +89,8 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
struct crypto_cipher *tfm)
{
int bsize = crypto_cipher_blocksize(tfm);
+ const u8 *src = walk->src.virt.addr;
unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 * const iv = walk->iv;
@@ -112,17 +112,17 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
{
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
u8 * const iv = walk->iv;
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32));
do {
- memcpy(tmpbuf, src, bsize);
- crypto_cipher_decrypt_one(tfm, src, src);
- crypto_xor(src, iv, bsize);
- crypto_xor_cpy(iv, src, tmpbuf, bsize);
+ memcpy(tmpbuf, dst, bsize);
+ crypto_cipher_decrypt_one(tfm, dst, dst);
+ crypto_xor(dst, iv, bsize);
+ crypto_xor_cpy(iv, dst, tmpbuf, bsize);
- src += bsize;
+ dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
@@ -186,7 +186,7 @@ static void __exit crypto_pcbc_module_exit(void)
crypto_unregister_template(&crypto_pcbc_tmpl);
}
-subsys_initcall(crypto_pcbc_module_init);
+module_init(crypto_pcbc_module_init);
module_exit(crypto_pcbc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 7fc79e7dce44..c33d29a523e0 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -381,7 +381,7 @@ static void __exit pcrypt_exit(void)
kset_unregister(pcrypt_kset);
}
-subsys_initcall(pcrypt_init);
+module_init(pcrypt_init);
module_exit(pcrypt_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
deleted file mode 100644
index e6f29a98725a..000000000000
--- a/crypto/poly1305_generic.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Poly1305 authenticator algorithm, RFC7539
- *
- * Copyright (C) 2015 Martin Willi
- *
- * Based on public domain code by Andrew Moon and Daniel J. Bernstein.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <crypto/algapi.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/poly1305.h>
-#include <linux/crypto.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/unaligned.h>
-
-static int crypto_poly1305_init(struct shash_desc *desc)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- poly1305_core_init(&dctx->h);
- dctx->buflen = 0;
- dctx->rset = 0;
- dctx->sset = false;
-
- return 0;
-}
-
-static unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
- const u8 *src, unsigned int srclen)
-{
- if (!dctx->sset) {
- if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) {
- poly1305_core_setkey(&dctx->core_r, src);
- src += POLY1305_BLOCK_SIZE;
- srclen -= POLY1305_BLOCK_SIZE;
- dctx->rset = 2;
- }
- if (srclen >= POLY1305_BLOCK_SIZE) {
- dctx->s[0] = get_unaligned_le32(src + 0);
- dctx->s[1] = get_unaligned_le32(src + 4);
- dctx->s[2] = get_unaligned_le32(src + 8);
- dctx->s[3] = get_unaligned_le32(src + 12);
- src += POLY1305_BLOCK_SIZE;
- srclen -= POLY1305_BLOCK_SIZE;
- dctx->sset = true;
- }
- }
- return srclen;
-}
-
-static void poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
- unsigned int srclen)
-{
- unsigned int datalen;
-
- if (unlikely(!dctx->sset)) {
- datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
- src += srclen - datalen;
- srclen = datalen;
- }
-
- poly1305_core_blocks(&dctx->h, &dctx->core_r, src,
- srclen / POLY1305_BLOCK_SIZE, 1);
-}
-
-static int crypto_poly1305_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
- unsigned int bytes;
-
- if (unlikely(dctx->buflen)) {
- bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
- memcpy(dctx->buf + dctx->buflen, src, bytes);
- src += bytes;
- srclen -= bytes;
- dctx->buflen += bytes;
-
- if (dctx->buflen == POLY1305_BLOCK_SIZE) {
- poly1305_blocks(dctx, dctx->buf,
- POLY1305_BLOCK_SIZE);
- dctx->buflen = 0;
- }
- }
-
- if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
- poly1305_blocks(dctx, src, srclen);
- src += srclen - (srclen % POLY1305_BLOCK_SIZE);
- srclen %= POLY1305_BLOCK_SIZE;
- }
-
- if (unlikely(srclen)) {
- dctx->buflen = srclen;
- memcpy(dctx->buf, src, srclen);
- }
-
- return 0;
-}
-
-static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
-{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
-
- if (unlikely(!dctx->sset))
- return -ENOKEY;
-
- poly1305_final_generic(dctx, dst);
- return 0;
-}
-
-static struct shash_alg poly1305_alg = {
- .digestsize = POLY1305_DIGEST_SIZE,
- .init = crypto_poly1305_init,
- .update = crypto_poly1305_update,
- .final = crypto_poly1305_final,
- .descsize = sizeof(struct poly1305_desc_ctx),
- .base = {
- .cra_name = "poly1305",
- .cra_driver_name = "poly1305-generic",
- .cra_priority = 100,
- .cra_blocksize = POLY1305_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- },
-};
-
-static int __init poly1305_mod_init(void)
-{
- return crypto_register_shash(&poly1305_alg);
-}
-
-static void __exit poly1305_mod_exit(void)
-{
- crypto_unregister_shash(&poly1305_alg);
-}
-
-subsys_initcall(poly1305_mod_init);
-module_exit(poly1305_mod_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
-MODULE_DESCRIPTION("Poly1305 authenticator");
-MODULE_ALIAS_CRYPTO("poly1305");
-MODULE_ALIAS_CRYPTO("poly1305-generic");
diff --git a/crypto/polyval-generic.c b/crypto/polyval-generic.c
index 4f98910bcdb5..db8adb56e4ca 100644
--- a/crypto/polyval-generic.c
+++ b/crypto/polyval-generic.c
@@ -44,15 +44,15 @@
*
*/
-#include <linux/unaligned.h>
-#include <crypto/algapi.h>
#include <crypto/gf128mul.h>
-#include <crypto/polyval.h>
#include <crypto/internal/hash.h>
-#include <linux/crypto.h>
-#include <linux/init.h>
+#include <crypto/polyval.h>
+#include <crypto/utils.h>
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
struct polyval_tfm_ctx {
struct gf128mul_4k *gf128;
@@ -63,7 +63,6 @@ struct polyval_desc_ctx {
u8 buffer[POLYVAL_BLOCK_SIZE];
be128 buffer128;
};
- u32 bytes;
};
static void copy_and_reverse(u8 dst[POLYVAL_BLOCK_SIZE],
@@ -76,46 +75,6 @@ static void copy_and_reverse(u8 dst[POLYVAL_BLOCK_SIZE],
put_unaligned(swab64(b), (u64 *)&dst[0]);
}
-/*
- * Performs multiplication in the POLYVAL field using the GHASH field as a
- * subroutine. This function is used as a fallback for hardware accelerated
- * implementations when simd registers are unavailable.
- *
- * Note: This function is not used for polyval-generic, instead we use the 4k
- * lookup table implementation for finite field multiplication.
- */
-void polyval_mul_non4k(u8 *op1, const u8 *op2)
-{
- be128 a, b;
-
- // Assume one argument is in Montgomery form and one is not.
- copy_and_reverse((u8 *)&a, op1);
- copy_and_reverse((u8 *)&b, op2);
- gf128mul_x_lle(&a, &a);
- gf128mul_lle(&a, &b);
- copy_and_reverse(op1, (u8 *)&a);
-}
-EXPORT_SYMBOL_GPL(polyval_mul_non4k);
-
-/*
- * Perform a POLYVAL update using non4k multiplication. This function is used
- * as a fallback for hardware accelerated implementations when simd registers
- * are unavailable.
- *
- * Note: This function is not used for polyval-generic, instead we use the 4k
- * lookup table implementation of finite field multiplication.
- */
-void polyval_update_non4k(const u8 *key, const u8 *in,
- size_t nblocks, u8 *accumulator)
-{
- while (nblocks--) {
- crypto_xor(accumulator, in, POLYVAL_BLOCK_SIZE);
- polyval_mul_non4k(accumulator, key);
- in += POLYVAL_BLOCK_SIZE;
- }
-}
-EXPORT_SYMBOL_GPL(polyval_update_non4k);
-
static int polyval_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
@@ -154,56 +113,53 @@ static int polyval_update(struct shash_desc *desc,
{
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
const struct polyval_tfm_ctx *ctx = crypto_shash_ctx(desc->tfm);
- u8 *pos;
u8 tmp[POLYVAL_BLOCK_SIZE];
- int n;
-
- if (dctx->bytes) {
- n = min(srclen, dctx->bytes);
- pos = dctx->buffer + dctx->bytes - 1;
-
- dctx->bytes -= n;
- srclen -= n;
-
- while (n--)
- *pos-- ^= *src++;
- if (!dctx->bytes)
- gf128mul_4k_lle(&dctx->buffer128, ctx->gf128);
- }
-
- while (srclen >= POLYVAL_BLOCK_SIZE) {
+ do {
copy_and_reverse(tmp, src);
crypto_xor(dctx->buffer, tmp, POLYVAL_BLOCK_SIZE);
gf128mul_4k_lle(&dctx->buffer128, ctx->gf128);
src += POLYVAL_BLOCK_SIZE;
srclen -= POLYVAL_BLOCK_SIZE;
- }
+ } while (srclen >= POLYVAL_BLOCK_SIZE);
+
+ return srclen;
+}
- if (srclen) {
- dctx->bytes = POLYVAL_BLOCK_SIZE - srclen;
- pos = dctx->buffer + POLYVAL_BLOCK_SIZE - 1;
- while (srclen--)
- *pos-- ^= *src++;
+static int polyval_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *dst)
+{
+ struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ if (len) {
+ u8 tmp[POLYVAL_BLOCK_SIZE] = {};
+
+ memcpy(tmp, src, len);
+ polyval_update(desc, tmp, POLYVAL_BLOCK_SIZE);
}
+ copy_and_reverse(dst, dctx->buffer);
+ return 0;
+}
+
+static int polyval_export(struct shash_desc *desc, void *out)
+{
+ struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
+ copy_and_reverse(out, dctx->buffer);
return 0;
}
-static int polyval_final(struct shash_desc *desc, u8 *dst)
+static int polyval_import(struct shash_desc *desc, const void *in)
{
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
- const struct polyval_tfm_ctx *ctx = crypto_shash_ctx(desc->tfm);
- if (dctx->bytes)
- gf128mul_4k_lle(&dctx->buffer128, ctx->gf128);
- copy_and_reverse(dst, dctx->buffer);
+ copy_and_reverse(dctx->buffer, in);
return 0;
}
-static void polyval_exit_tfm(struct crypto_tfm *tfm)
+static void polyval_exit_tfm(struct crypto_shash *tfm)
{
- struct polyval_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct polyval_tfm_ctx *ctx = crypto_shash_ctx(tfm);
gf128mul_free_4k(ctx->gf128);
}
@@ -212,17 +168,21 @@ static struct shash_alg polyval_alg = {
.digestsize = POLYVAL_DIGEST_SIZE,
.init = polyval_init,
.update = polyval_update,
- .final = polyval_final,
+ .finup = polyval_finup,
.setkey = polyval_setkey,
+ .export = polyval_export,
+ .import = polyval_import,
+ .exit_tfm = polyval_exit_tfm,
+ .statesize = sizeof(struct polyval_desc_ctx),
.descsize = sizeof(struct polyval_desc_ctx),
.base = {
.cra_name = "polyval",
.cra_driver_name = "polyval-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = POLYVAL_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct polyval_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_exit = polyval_exit_tfm,
},
};
@@ -236,7 +196,7 @@ static void __exit polyval_mod_exit(void)
crypto_unregister_shash(&polyval_alg);
}
-subsys_initcall(polyval_mod_init);
+module_init(polyval_mod_init);
module_exit(polyval_mod_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/proc.c b/crypto/proc.c
index 522b27d90d29..82f15b967e85 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -72,9 +72,6 @@ static int c_show(struct seq_file *m, void *p)
seq_printf(m, "max keysize : %u\n",
alg->cra_cipher.cia_max_keysize);
break;
- case CRYPTO_ALG_TYPE_COMPRESS:
- seq_printf(m, "type : compression\n");
- break;
default:
seq_printf(m, "type : unknown\n");
break;
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
index c5fe4034b153..9860b60c9be4 100644
--- a/crypto/rmd160.c
+++ b/crypto/rmd160.c
@@ -9,18 +9,14 @@
* Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
*/
#include <crypto/internal/hash.h>
-#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
+#include <linux/string.h>
#include "ripemd.h"
struct rmd160_ctx {
u64 byte_count;
u32 state[5];
- __le32 buffer[16];
};
#define K1 RMD_K1
@@ -265,72 +261,59 @@ static int rmd160_init(struct shash_desc *desc)
rctx->state[3] = RMD_H3;
rctx->state[4] = RMD_H4;
- memset(rctx->buffer, 0, sizeof(rctx->buffer));
-
return 0;
}
static int rmd160_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
+ int remain = len - round_down(len, RMD160_BLOCK_SIZE);
struct rmd160_ctx *rctx = shash_desc_ctx(desc);
- const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
-
- rctx->byte_count += len;
+ __le32 buffer[RMD160_BLOCK_SIZE / 4];
- /* Enough space in buffer? If so copy and we're done */
- if (avail > len) {
- memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
- data, len);
- goto out;
- }
-
- memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
- data, avail);
+ rctx->byte_count += len - remain;
- rmd160_transform(rctx->state, rctx->buffer);
- data += avail;
- len -= avail;
-
- while (len >= sizeof(rctx->buffer)) {
- memcpy(rctx->buffer, data, sizeof(rctx->buffer));
- rmd160_transform(rctx->state, rctx->buffer);
- data += sizeof(rctx->buffer);
- len -= sizeof(rctx->buffer);
- }
+ do {
+ memcpy(buffer, data, sizeof(buffer));
+ rmd160_transform(rctx->state, buffer);
+ data += sizeof(buffer);
+ len -= sizeof(buffer);
+ } while (len >= sizeof(buffer));
- memcpy(rctx->buffer, data, len);
-
-out:
- return 0;
+ memzero_explicit(buffer, sizeof(buffer));
+ return remain;
}
/* Add padding and return the message digest. */
-static int rmd160_final(struct shash_desc *desc, u8 *out)
+static int rmd160_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
+ unsigned int bit_offset = RMD160_BLOCK_SIZE / 8 - 1;
struct rmd160_ctx *rctx = shash_desc_ctx(desc);
- u32 i, index, padlen;
- __le64 bits;
+ union {
+ __le64 l64[RMD160_BLOCK_SIZE / 4];
+ __le32 l32[RMD160_BLOCK_SIZE / 2];
+ u8 u8[RMD160_BLOCK_SIZE * 2];
+ } block = {};
__le32 *dst = (__le32 *)out;
- static const u8 padding[64] = { 0x80, };
-
- bits = cpu_to_le64(rctx->byte_count << 3);
-
- /* Pad out to 56 mod 64 */
- index = rctx->byte_count & 0x3f;
- padlen = (index < 56) ? (56 - index) : ((64+56) - index);
- rmd160_update(desc, padding, padlen);
+ u32 i;
- /* Append length */
- rmd160_update(desc, (const u8 *)&bits, sizeof(bits));
+ rctx->byte_count += len;
+ if (len >= bit_offset * 8)
+ bit_offset += RMD160_BLOCK_SIZE / 8;
+ memcpy(&block, src, len);
+ block.u8[len] = 0x80;
+ block.l64[bit_offset] = cpu_to_le64(rctx->byte_count << 3);
+
+ rmd160_transform(rctx->state, block.l32);
+ if (bit_offset > RMD160_BLOCK_SIZE / 8)
+ rmd160_transform(rctx->state,
+ block.l32 + RMD160_BLOCK_SIZE / 4);
+ memzero_explicit(&block, sizeof(block));
/* Store state in digest */
for (i = 0; i < 5; i++)
dst[i] = cpu_to_le32p(&rctx->state[i]);
-
- /* Wipe context */
- memset(rctx, 0, sizeof(*rctx));
-
return 0;
}
@@ -338,11 +321,12 @@ static struct shash_alg alg = {
.digestsize = RMD160_DIGEST_SIZE,
.init = rmd160_init,
.update = rmd160_update,
- .final = rmd160_final,
+ .finup = rmd160_finup,
.descsize = sizeof(struct rmd160_ctx),
.base = {
.cra_name = "rmd160",
.cra_driver_name = "rmd160-generic",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = RMD160_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -358,7 +342,7 @@ static void __exit rmd160_mod_fini(void)
crypto_unregister_shash(&alg);
}
-subsys_initcall(rmd160_mod_init);
+module_init(rmd160_mod_init);
module_exit(rmd160_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/rng.c b/crypto/rng.c
index 9d8804e46422..b8ae6ebc091d 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -98,6 +98,7 @@ static const struct crypto_type crypto_rng_type = {
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_RNG,
.tfmsize = offsetof(struct crypto_rng, base),
+ .algsize = offsetof(struct rng_alg, base),
};
struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask)
diff --git a/crypto/rsa.c b/crypto/rsa.c
index b7d21529c552..6c7734083c98 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -430,7 +430,7 @@ static void __exit rsa_exit(void)
crypto_unregister_akcipher(&rsa);
}
-subsys_initcall(rsa_init);
+module_init(rsa_init);
module_exit(rsa_exit);
MODULE_ALIAS_CRYPTO("rsa");
MODULE_LICENSE("GPL");
diff --git a/crypto/rsassa-pkcs1.c b/crypto/rsassa-pkcs1.c
index f68ffd338f48..94fa5e9600e7 100644
--- a/crypto/rsassa-pkcs1.c
+++ b/crypto/rsassa-pkcs1.c
@@ -210,7 +210,7 @@ static int rsassa_pkcs1_sign(struct crypto_sig *tfm,
memset(dst, 0, pad_len);
}
- return 0;
+ return ctx->key_size;
}
static int rsassa_pkcs1_verify(struct crypto_sig *tfm,
@@ -301,7 +301,7 @@ static unsigned int rsassa_pkcs1_key_size(struct crypto_sig *tfm)
{
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
- return ctx->key_size;
+ return ctx->key_size * BITS_PER_BYTE;
}
static int rsassa_pkcs1_set_pub_key(struct crypto_sig *tfm,
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 16f6ba896fb6..1d010e2a1b1a 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -10,64 +10,119 @@
*/
#include <crypto/scatterwalk.h>
+#include <linux/crypto.h>
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
+#include <linux/slab.h>
-static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
+enum {
+ SKCIPHER_WALK_SLOW = 1 << 0,
+ SKCIPHER_WALK_COPY = 1 << 1,
+ SKCIPHER_WALK_DIFF = 1 << 2,
+ SKCIPHER_WALK_SLEEP = 1 << 3,
+};
+
+static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
+{
+ return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+}
+
+void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes)
{
- void *src = out ? buf : sgdata;
- void *dst = out ? sgdata : buf;
+ struct scatterlist *sg = walk->sg;
+
+ nbytes += walk->offset - sg->offset;
- memcpy(dst, src, nbytes);
+ while (nbytes > sg->length) {
+ nbytes -= sg->length;
+ sg = sg_next(sg);
+ }
+ walk->sg = sg;
+ walk->offset = sg->offset + nbytes;
}
+EXPORT_SYMBOL_GPL(scatterwalk_skip);
-void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
- size_t nbytes, int out)
+inline void memcpy_from_scatterwalk(void *buf, struct scatter_walk *walk,
+ unsigned int nbytes)
{
- for (;;) {
- unsigned int len_this_page = scatterwalk_pagelen(walk);
- u8 *vaddr;
+ do {
+ unsigned int to_copy;
- if (len_this_page > nbytes)
- len_this_page = nbytes;
+ to_copy = scatterwalk_next(walk, nbytes);
+ memcpy(buf, walk->addr, to_copy);
+ scatterwalk_done_src(walk, to_copy);
+ buf += to_copy;
+ nbytes -= to_copy;
+ } while (nbytes);
+}
+EXPORT_SYMBOL_GPL(memcpy_from_scatterwalk);
- if (out != 2) {
- vaddr = scatterwalk_map(walk);
- memcpy_dir(buf, vaddr, len_this_page, out);
- scatterwalk_unmap(vaddr);
- }
+inline void memcpy_to_scatterwalk(struct scatter_walk *walk, const void *buf,
+ unsigned int nbytes)
+{
+ do {
+ unsigned int to_copy;
- scatterwalk_advance(walk, len_this_page);
+ to_copy = scatterwalk_next(walk, nbytes);
+ memcpy(walk->addr, buf, to_copy);
+ scatterwalk_done_dst(walk, to_copy);
+ buf += to_copy;
+ nbytes -= to_copy;
+ } while (nbytes);
+}
+EXPORT_SYMBOL_GPL(memcpy_to_scatterwalk);
- if (nbytes == len_this_page)
- break;
+void memcpy_from_sglist(void *buf, struct scatterlist *sg,
+ unsigned int start, unsigned int nbytes)
+{
+ struct scatter_walk walk;
- buf += len_this_page;
- nbytes -= len_this_page;
+ if (unlikely(nbytes == 0)) /* in case sg == NULL */
+ return;
- scatterwalk_pagedone(walk, out & 1, 1);
- }
+ scatterwalk_start_at_pos(&walk, sg, start);
+ memcpy_from_scatterwalk(buf, &walk, nbytes);
}
-EXPORT_SYMBOL_GPL(scatterwalk_copychunks);
+EXPORT_SYMBOL_GPL(memcpy_from_sglist);
-void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
- unsigned int start, unsigned int nbytes, int out)
+void memcpy_to_sglist(struct scatterlist *sg, unsigned int start,
+ const void *buf, unsigned int nbytes)
{
struct scatter_walk walk;
- struct scatterlist tmp[2];
- if (!nbytes)
+ if (unlikely(nbytes == 0)) /* in case sg == NULL */
return;
- sg = scatterwalk_ffwd(tmp, sg, start);
+ scatterwalk_start_at_pos(&walk, sg, start);
+ memcpy_to_scatterwalk(&walk, buf, nbytes);
+}
+EXPORT_SYMBOL_GPL(memcpy_to_sglist);
+
+void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct skcipher_walk walk = {};
+
+ if (unlikely(nbytes == 0)) /* in case sg == NULL */
+ return;
+
+ walk.total = nbytes;
+
+ scatterwalk_start(&walk.in, src);
+ scatterwalk_start(&walk.out, dst);
- scatterwalk_start(&walk, sg);
- scatterwalk_copychunks(buf, &walk, nbytes, out);
- scatterwalk_done(&walk, out, 0);
+ skcipher_walk_first(&walk, true);
+ do {
+ if (walk.src.virt.addr != walk.dst.virt.addr)
+ memcpy(walk.dst.virt.addr, walk.src.virt.addr,
+ walk.nbytes);
+ skcipher_walk_done(&walk, 0);
+ } while (walk.nbytes);
}
-EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy);
+EXPORT_SYMBOL_GPL(memcpy_sglist);
struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
struct scatterlist *src,
@@ -91,3 +146,236 @@ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
return dst;
}
EXPORT_SYMBOL_GPL(scatterwalk_ffwd);
+
+static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
+{
+ unsigned alignmask = walk->alignmask;
+ unsigned n;
+ void *buffer;
+
+ if (!walk->buffer)
+ walk->buffer = walk->page;
+ buffer = walk->buffer;
+ if (!buffer) {
+ /* Min size for a buffer of bsize bytes aligned to alignmask */
+ n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
+
+ buffer = kzalloc(n, skcipher_walk_gfp(walk));
+ if (!buffer)
+ return skcipher_walk_done(walk, -ENOMEM);
+ walk->buffer = buffer;
+ }
+
+ buffer = PTR_ALIGN(buffer, alignmask + 1);
+ memcpy_from_scatterwalk(buffer, &walk->in, bsize);
+ walk->out.__addr = buffer;
+ walk->in.__addr = walk->out.addr;
+
+ walk->nbytes = bsize;
+ walk->flags |= SKCIPHER_WALK_SLOW;
+
+ return 0;
+}
+
+static int skcipher_next_copy(struct skcipher_walk *walk)
+{
+ void *tmp = walk->page;
+
+ scatterwalk_map(&walk->in);
+ memcpy(tmp, walk->in.addr, walk->nbytes);
+ scatterwalk_unmap(&walk->in);
+ /*
+ * walk->in is advanced later when the number of bytes actually
+ * processed (which might be less than walk->nbytes) is known.
+ */
+
+ walk->in.__addr = tmp;
+ walk->out.__addr = tmp;
+ return 0;
+}
+
+static int skcipher_next_fast(struct skcipher_walk *walk)
+{
+ unsigned long diff;
+
+ diff = offset_in_page(walk->in.offset) -
+ offset_in_page(walk->out.offset);
+ diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) -
+ (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT));
+
+ scatterwalk_map(&walk->out);
+ walk->in.__addr = walk->out.__addr;
+
+ if (diff) {
+ walk->flags |= SKCIPHER_WALK_DIFF;
+ scatterwalk_map(&walk->in);
+ }
+
+ return 0;
+}
+
+static int skcipher_walk_next(struct skcipher_walk *walk)
+{
+ unsigned int bsize;
+ unsigned int n;
+
+ n = walk->total;
+ bsize = min(walk->stride, max(n, walk->blocksize));
+ n = scatterwalk_clamp(&walk->in, n);
+ n = scatterwalk_clamp(&walk->out, n);
+
+ if (unlikely(n < bsize)) {
+ if (unlikely(walk->total < walk->blocksize))
+ return skcipher_walk_done(walk, -EINVAL);
+
+slow_path:
+ return skcipher_next_slow(walk, bsize);
+ }
+ walk->nbytes = n;
+
+ if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
+ if (!walk->page) {
+ gfp_t gfp = skcipher_walk_gfp(walk);
+
+ walk->page = (void *)__get_free_page(gfp);
+ if (!walk->page)
+ goto slow_path;
+ }
+ walk->flags |= SKCIPHER_WALK_COPY;
+ return skcipher_next_copy(walk);
+ }
+
+ return skcipher_next_fast(walk);
+}
+
+static int skcipher_copy_iv(struct skcipher_walk *walk)
+{
+ unsigned alignmask = walk->alignmask;
+ unsigned ivsize = walk->ivsize;
+ unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1);
+ unsigned size;
+ u8 *iv;
+
+ /* Min size for a buffer of stride + ivsize, aligned to alignmask */
+ size = aligned_stride + ivsize +
+ (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
+
+ walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
+ if (!walk->buffer)
+ return -ENOMEM;
+
+ iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride;
+
+ walk->iv = memcpy(iv, walk->iv, walk->ivsize);
+ return 0;
+}
+
+int skcipher_walk_first(struct skcipher_walk *walk, bool atomic)
+{
+ if (WARN_ON_ONCE(in_hardirq()))
+ return -EDEADLK;
+
+ walk->flags = atomic ? 0 : SKCIPHER_WALK_SLEEP;
+
+ walk->buffer = NULL;
+ if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
+ int err = skcipher_copy_iv(walk);
+ if (err)
+ return err;
+ }
+
+ walk->page = NULL;
+
+ return skcipher_walk_next(walk);
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_first);
+
+/**
+ * skcipher_walk_done() - finish one step of a skcipher_walk
+ * @walk: the skcipher_walk
+ * @res: number of bytes *not* processed (>= 0) from walk->nbytes,
+ * or a -errno value to terminate the walk due to an error
+ *
+ * This function cleans up after one step of walking through the source and
+ * destination scatterlists, and advances to the next step if applicable.
+ * walk->nbytes is set to the number of bytes available in the next step,
+ * walk->total is set to the new total number of bytes remaining, and
+ * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there
+ * is no more data, or if an error occurred (i.e. -errno return), then
+ * walk->nbytes and walk->total are set to 0 and all resources owned by the
+ * skcipher_walk are freed.
+ *
+ * Return: 0 or a -errno value. If @res was a -errno value then it will be
+ * returned, but other errors may occur too.
+ */
+int skcipher_walk_done(struct skcipher_walk *walk, int res)
+{
+ unsigned int n = walk->nbytes; /* num bytes processed this step */
+ unsigned int total = 0; /* new total remaining */
+
+ if (!n)
+ goto finish;
+
+ if (likely(res >= 0)) {
+ n -= res; /* subtract num bytes *not* processed */
+ total = walk->total - n;
+ }
+
+ if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW |
+ SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF)))) {
+ scatterwalk_advance(&walk->in, n);
+ } else if (walk->flags & SKCIPHER_WALK_DIFF) {
+ scatterwalk_done_src(&walk->in, n);
+ } else if (walk->flags & SKCIPHER_WALK_COPY) {
+ scatterwalk_advance(&walk->in, n);
+ scatterwalk_map(&walk->out);
+ memcpy(walk->out.addr, walk->page, n);
+ } else { /* SKCIPHER_WALK_SLOW */
+ if (res > 0) {
+ /*
+ * Didn't process all bytes. Either the algorithm is
+ * broken, or this was the last step and it turned out
+ * the message wasn't evenly divisible into blocks but
+ * the algorithm requires it.
+ */
+ res = -EINVAL;
+ total = 0;
+ } else
+ memcpy_to_scatterwalk(&walk->out, walk->out.addr, n);
+ goto dst_done;
+ }
+
+ scatterwalk_done_dst(&walk->out, n);
+dst_done:
+
+ if (res > 0)
+ res = 0;
+
+ walk->total = total;
+ walk->nbytes = 0;
+
+ if (total) {
+ if (walk->flags & SKCIPHER_WALK_SLEEP)
+ cond_resched();
+ walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF);
+ return skcipher_walk_next(walk);
+ }
+
+finish:
+ /* Short-circuit for the common/fast path. */
+ if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
+ goto out;
+
+ if (walk->iv != walk->oiv)
+ memcpy(walk->oiv, walk->iv, walk->ivsize);
+ if (walk->buffer != walk->page)
+ kfree(walk->buffer);
+ if (walk->page)
+ free_page((unsigned long)walk->page);
+
+out:
+ return res;
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_done);
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 1cef6bb06a81..c651e7f2197a 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -7,26 +7,30 @@
* Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
*/
-#include <crypto/internal/acompress.h>
#include <crypto/internal/scompress.h>
#include <crypto/scatterwalk.h>
+#include <linux/cpumask.h>
#include <linux/cryptouser.h>
#include <linux/err.h>
+#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/overflow.h>
#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
#include <net/netlink.h>
#include "compress.h"
struct scomp_scratch {
spinlock_t lock;
- void *src;
- void *dst;
+ union {
+ void *src;
+ unsigned long saddr;
+ };
};
static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
@@ -37,6 +41,10 @@ static const struct crypto_type crypto_scomp_type;
static int scomp_scratch_users;
static DEFINE_MUTEX(scomp_lock);
+static cpumask_t scomp_scratch_want;
+static void scomp_scratch_workfn(struct work_struct *work);
+static DECLARE_WORK(scomp_scratch_work, scomp_scratch_workfn);
+
static int __maybe_unused crypto_scomp_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
@@ -66,119 +74,206 @@ static void crypto_scomp_free_scratches(void)
for_each_possible_cpu(i) {
scratch = per_cpu_ptr(&scomp_scratch, i);
- vfree(scratch->src);
- vfree(scratch->dst);
+ free_page(scratch->saddr);
scratch->src = NULL;
- scratch->dst = NULL;
}
}
-static int crypto_scomp_alloc_scratches(void)
+static int scomp_alloc_scratch(struct scomp_scratch *scratch, int cpu)
{
- struct scomp_scratch *scratch;
- int i;
+ int node = cpu_to_node(cpu);
+ struct page *page;
+
+ page = alloc_pages_node(node, GFP_KERNEL, 0);
+ if (!page)
+ return -ENOMEM;
+ spin_lock_bh(&scratch->lock);
+ scratch->src = page_address(page);
+ spin_unlock_bh(&scratch->lock);
+ return 0;
+}
- for_each_possible_cpu(i) {
- void *mem;
+static void scomp_scratch_workfn(struct work_struct *work)
+{
+ int cpu;
- scratch = per_cpu_ptr(&scomp_scratch, i);
+ for_each_cpu(cpu, &scomp_scratch_want) {
+ struct scomp_scratch *scratch;
+
+ scratch = per_cpu_ptr(&scomp_scratch, cpu);
+ if (scratch->src)
+ continue;
+ if (scomp_alloc_scratch(scratch, cpu))
+ break;
- mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
- if (!mem)
- goto error;
- scratch->src = mem;
- mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
- if (!mem)
- goto error;
- scratch->dst = mem;
+ cpumask_clear_cpu(cpu, &scomp_scratch_want);
}
- return 0;
-error:
- crypto_scomp_free_scratches();
- return -ENOMEM;
+}
+
+static int crypto_scomp_alloc_scratches(void)
+{
+ unsigned int i = cpumask_first(cpu_possible_mask);
+ struct scomp_scratch *scratch;
+
+ scratch = per_cpu_ptr(&scomp_scratch, i);
+ return scomp_alloc_scratch(scratch, i);
}
static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
{
+ struct scomp_alg *alg = crypto_scomp_alg(__crypto_scomp_tfm(tfm));
int ret = 0;
mutex_lock(&scomp_lock);
- if (!scomp_scratch_users++)
+ ret = crypto_acomp_alloc_streams(&alg->streams);
+ if (ret)
+ goto unlock;
+ if (!scomp_scratch_users++) {
ret = crypto_scomp_alloc_scratches();
+ if (ret)
+ scomp_scratch_users--;
+ }
+unlock:
mutex_unlock(&scomp_lock);
return ret;
}
+static struct scomp_scratch *scomp_lock_scratch(void) __acquires(scratch)
+{
+ int cpu = raw_smp_processor_id();
+ struct scomp_scratch *scratch;
+
+ scratch = per_cpu_ptr(&scomp_scratch, cpu);
+ spin_lock(&scratch->lock);
+ if (likely(scratch->src))
+ return scratch;
+ spin_unlock(&scratch->lock);
+
+ cpumask_set_cpu(cpu, &scomp_scratch_want);
+ schedule_work(&scomp_scratch_work);
+
+ scratch = per_cpu_ptr(&scomp_scratch, cpumask_first(cpu_possible_mask));
+ spin_lock(&scratch->lock);
+ return scratch;
+}
+
+static inline void scomp_unlock_scratch(struct scomp_scratch *scratch)
+ __releases(scratch)
+{
+ spin_unlock(&scratch->lock);
+}
+
static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
- void **tfm_ctx = acomp_tfm_ctx(tfm);
+ struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm);
+ bool src_isvirt = acomp_request_src_isvirt(req);
+ bool dst_isvirt = acomp_request_dst_isvirt(req);
struct crypto_scomp *scomp = *tfm_ctx;
- void **ctx = acomp_request_ctx(req);
+ struct crypto_acomp_stream *stream;
struct scomp_scratch *scratch;
- void *src, *dst;
- unsigned int dlen;
+ unsigned int slen = req->slen;
+ unsigned int dlen = req->dlen;
+ struct page *spage, *dpage;
+ unsigned int n;
+ const u8 *src;
+ size_t soff;
+ size_t doff;
+ u8 *dst;
int ret;
- if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
+ if (!req->src || !slen)
return -EINVAL;
- if (req->dst && !req->dlen)
+ if (!req->dst || !dlen)
return -EINVAL;
- if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
- req->dlen = SCOMP_SCRATCH_SIZE;
-
- dlen = req->dlen;
-
- scratch = raw_cpu_ptr(&scomp_scratch);
- spin_lock(&scratch->lock);
+ if (dst_isvirt)
+ dst = req->dvirt;
+ else {
+ if (dlen <= req->dst->length) {
+ dpage = sg_page(req->dst);
+ doff = req->dst->offset;
+ } else
+ return -ENOSYS;
+
+ dpage = nth_page(dpage, doff / PAGE_SIZE);
+ doff = offset_in_page(doff);
+
+ n = (dlen - 1) / PAGE_SIZE;
+ n += (offset_in_page(dlen - 1) + doff) / PAGE_SIZE;
+ if (PageHighMem(dpage + n) &&
+ size_add(doff, dlen) > PAGE_SIZE)
+ return -ENOSYS;
+ dst = kmap_local_page(dpage) + doff;
+ }
- if (sg_nents(req->src) == 1 && !PageHighMem(sg_page(req->src))) {
- src = page_to_virt(sg_page(req->src)) + req->src->offset;
- } else {
- scatterwalk_map_and_copy(scratch->src, req->src, 0,
- req->slen, 0);
- src = scratch->src;
+ if (src_isvirt)
+ src = req->svirt;
+ else {
+ src = NULL;
+ do {
+ if (slen <= req->src->length) {
+ spage = sg_page(req->src);
+ soff = req->src->offset;
+ } else
+ break;
+
+ spage = nth_page(spage, soff / PAGE_SIZE);
+ soff = offset_in_page(soff);
+
+ n = (slen - 1) / PAGE_SIZE;
+ n += (offset_in_page(slen - 1) + soff) / PAGE_SIZE;
+ if (PageHighMem(nth_page(spage, n)) &&
+ size_add(soff, slen) > PAGE_SIZE)
+ break;
+ src = kmap_local_page(spage) + soff;
+ } while (0);
}
- if (req->dst && sg_nents(req->dst) == 1 && !PageHighMem(sg_page(req->dst)))
- dst = page_to_virt(sg_page(req->dst)) + req->dst->offset;
- else
- dst = scratch->dst;
+ stream = crypto_acomp_lock_stream_bh(&crypto_scomp_alg(scomp)->streams);
- if (dir)
- ret = crypto_scomp_compress(scomp, src, req->slen,
- dst, &req->dlen, *ctx);
+ if (!src_isvirt && !src) {
+ const u8 *src;
+
+ scratch = scomp_lock_scratch();
+ src = scratch->src;
+ memcpy_from_sglist(scratch->src, req->src, 0, slen);
+
+ if (dir)
+ ret = crypto_scomp_compress(scomp, src, slen,
+ dst, &dlen, stream->ctx);
+ else
+ ret = crypto_scomp_decompress(scomp, src, slen,
+ dst, &dlen, stream->ctx);
+
+ scomp_unlock_scratch(scratch);
+ } else if (dir)
+ ret = crypto_scomp_compress(scomp, src, slen,
+ dst, &dlen, stream->ctx);
else
- ret = crypto_scomp_decompress(scomp, src, req->slen,
- dst, &req->dlen, *ctx);
- if (!ret) {
- if (!req->dst) {
- req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
- if (!req->dst) {
- ret = -ENOMEM;
- goto out;
- }
- } else if (req->dlen > dlen) {
- ret = -ENOSPC;
- goto out;
- }
- if (dst == scratch->dst) {
- scatterwalk_map_and_copy(scratch->dst, req->dst, 0,
- req->dlen, 1);
- } else {
- int nr_pages = DIV_ROUND_UP(req->dst->offset + req->dlen, PAGE_SIZE);
- int i;
- struct page *dst_page = sg_page(req->dst);
-
- for (i = 0; i < nr_pages; i++)
- flush_dcache_page(dst_page + i);
+ ret = crypto_scomp_decompress(scomp, src, slen,
+ dst, &dlen, stream->ctx);
+
+ crypto_acomp_unlock_stream_bh(stream);
+
+ req->dlen = dlen;
+
+ if (!src_isvirt && src)
+ kunmap_local(src);
+ if (!dst_isvirt) {
+ kunmap_local(dst);
+ dlen += doff;
+ for (;;) {
+ flush_dcache_page(dpage);
+ if (dlen <= PAGE_SIZE)
+ break;
+ dlen -= PAGE_SIZE;
+ dpage = nth_page(dpage, 1);
}
}
-out:
- spin_unlock(&scratch->lock);
+
return ret;
}
@@ -198,6 +293,7 @@ static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
crypto_free_scomp(*ctx);
+ flush_work(&scomp_scratch_work);
mutex_lock(&scomp_lock);
if (!--scomp_scratch_users)
crypto_scomp_free_scratches();
@@ -225,46 +321,21 @@ int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
crt->compress = scomp_acomp_compress;
crt->decompress = scomp_acomp_decompress;
- crt->dst_free = sgl_free;
- crt->reqsize = sizeof(void *);
return 0;
}
-struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
-{
- struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
- struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
- struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
- struct crypto_scomp *scomp = *tfm_ctx;
- void *ctx;
-
- ctx = crypto_scomp_alloc_ctx(scomp);
- if (IS_ERR(ctx)) {
- kfree(req);
- return NULL;
- }
-
- *req->__ctx = ctx;
-
- return req;
-}
-
-void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
+static void crypto_scomp_destroy(struct crypto_alg *alg)
{
- struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
- struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
- struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
- struct crypto_scomp *scomp = *tfm_ctx;
- void *ctx = *req->__ctx;
+ struct scomp_alg *scomp = __crypto_scomp_alg(alg);
- if (ctx)
- crypto_scomp_free_ctx(scomp, ctx);
+ crypto_acomp_free_streams(&scomp->streams);
}
static const struct crypto_type crypto_scomp_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_scomp_init_tfm,
+ .destroy = crypto_scomp_destroy,
#ifdef CONFIG_PROC_FS
.show = crypto_scomp_show,
#endif
@@ -275,14 +346,24 @@ static const struct crypto_type crypto_scomp_type = {
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SCOMPRESS,
.tfmsize = offsetof(struct crypto_scomp, base),
+ .algsize = offsetof(struct scomp_alg, base),
};
-int crypto_register_scomp(struct scomp_alg *alg)
+static void scomp_prepare_alg(struct scomp_alg *alg)
{
struct crypto_alg *base = &alg->calg.base;
comp_prepare_alg(&alg->calg);
+ base->cra_flags |= CRYPTO_ALG_REQ_VIRT;
+}
+
+int crypto_register_scomp(struct scomp_alg *alg)
+{
+ struct crypto_alg *base = &alg->calg.base;
+
+ scomp_prepare_alg(alg);
+
base->cra_type = &crypto_scomp_type;
base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
diff --git a/crypto/seed.c b/crypto/seed.c
index d05d8ed909fa..815391f213de 100644
--- a/crypto/seed.c
+++ b/crypto/seed.c
@@ -460,7 +460,7 @@ static void __exit seed_fini(void)
crypto_unregister_alg(&seed_alg);
}
-subsys_initcall(seed_init);
+module_init(seed_init);
module_exit(seed_fini);
MODULE_DESCRIPTION("SEED Cipher Algorithm");
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 17e11d51ddc3..2bae99e33526 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -64,20 +64,9 @@ static int seqiv_aead_encrypt(struct aead_request *req)
data = req->base.data;
info = req->iv;
- if (req->src != req->dst) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
-
- skcipher_request_set_sync_tfm(nreq, ctx->sknull);
- skcipher_request_set_callback(nreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(nreq, req->src, req->dst,
- req->assoclen + req->cryptlen,
- NULL);
-
- err = crypto_skcipher_encrypt(nreq);
- if (err)
- return err;
- }
+ if (req->src != req->dst)
+ memcpy_sglist(req->dst, req->src,
+ req->assoclen + req->cryptlen);
if (unlikely(!IS_ALIGNED((unsigned long)info,
crypto_aead_alignmask(geniv) + 1))) {
@@ -179,7 +168,7 @@ static void __exit seqiv_module_exit(void)
crypto_unregister_template(&seqiv_tmpl);
}
-subsys_initcall(seqiv_module_init);
+module_init(seqiv_module_init);
module_exit(seqiv_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index f6ef187be6fe..b21e7606c652 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -599,7 +599,7 @@ static void __exit serpent_mod_fini(void)
crypto_unregister_alg(&srp_alg);
}
-subsys_initcall(serpent_mod_init);
+module_init(serpent_mod_init);
module_exit(serpent_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 325b57fe28dc..024e8043bab0 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -12,13 +12,11 @@
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
*/
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
-#include <asm/byteorder.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = {
0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
@@ -39,38 +37,31 @@ static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src,
memzero_explicit(temp, sizeof(temp));
}
-int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
- return sha1_base_do_update(desc, data, len, sha1_generic_block_fn);
+ return sha1_base_do_update_blocks(desc, data, len,
+ sha1_generic_block_fn);
}
-EXPORT_SYMBOL(crypto_sha1_update);
-static int sha1_final(struct shash_desc *desc, u8 *out)
+static int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
- sha1_base_do_finalize(desc, sha1_generic_block_fn);
+ sha1_base_do_finup(desc, data, len, sha1_generic_block_fn);
return sha1_base_finish(desc, out);
}
-int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- sha1_base_do_update(desc, data, len, sha1_generic_block_fn);
- return sha1_final(desc, out);
-}
-EXPORT_SYMBOL(crypto_sha1_finup);
-
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = crypto_sha1_update,
- .final = sha1_final,
.finup = crypto_sha1_finup,
- .descsize = sizeof(struct sha1_state),
+ .descsize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -86,7 +77,7 @@ static void __exit sha1_generic_mod_fini(void)
crypto_unregister_shash(&alg);
}
-subsys_initcall(sha1_generic_mod_init);
+module_init(sha1_generic_mod_init);
module_exit(sha1_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/sha256.c b/crypto/sha256.c
new file mode 100644
index 000000000000..4aeb213bab11
--- /dev/null
+++ b/crypto/sha256.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Crypto API wrapper for the SHA-256 and SHA-224 library functions
+ *
+ * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com>
+ */
+#include <crypto/internal/hash.h>
+#include <crypto/internal/sha2.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = {
+ 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
+ 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
+ 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
+ 0x2f
+};
+EXPORT_SYMBOL_GPL(sha224_zero_message_hash);
+
+const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = {
+ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
+ 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
+ 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
+ 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
+};
+EXPORT_SYMBOL_GPL(sha256_zero_message_hash);
+
+static int crypto_sha256_init(struct shash_desc *desc)
+{
+ sha256_block_init(shash_desc_ctx(desc));
+ return 0;
+}
+
+static inline int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len, bool force_generic)
+{
+ struct crypto_sha256_state *sctx = shash_desc_ctx(desc);
+ int remain = len % SHA256_BLOCK_SIZE;
+
+ sctx->count += len - remain;
+ sha256_choose_blocks(sctx->state, data, len / SHA256_BLOCK_SIZE,
+ force_generic, !force_generic);
+ return remain;
+}
+
+static int crypto_sha256_update_generic(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return crypto_sha256_update(desc, data, len, true);
+}
+
+static int crypto_sha256_update_lib(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ sha256_update(shash_desc_ctx(desc), data, len);
+ return 0;
+}
+
+static int crypto_sha256_update_arch(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return crypto_sha256_update(desc, data, len, false);
+}
+
+static int crypto_sha256_final_lib(struct shash_desc *desc, u8 *out)
+{
+ sha256_final(shash_desc_ctx(desc), out);
+ return 0;
+}
+
+static __always_inline int crypto_sha256_finup(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len, u8 *out,
+ bool force_generic)
+{
+ struct crypto_sha256_state *sctx = shash_desc_ctx(desc);
+ unsigned int remain = len;
+ u8 *buf;
+
+ if (len >= SHA256_BLOCK_SIZE)
+ remain = crypto_sha256_update(desc, data, len, force_generic);
+ sctx->count += remain;
+ buf = memcpy(sctx + 1, data + len - remain, remain);
+ sha256_finup(sctx, buf, remain, out,
+ crypto_shash_digestsize(desc->tfm), force_generic,
+ !force_generic);
+ return 0;
+}
+
+static int crypto_sha256_finup_generic(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ return crypto_sha256_finup(desc, data, len, out, true);
+}
+
+static int crypto_sha256_finup_arch(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ return crypto_sha256_finup(desc, data, len, out, false);
+}
+
+static int crypto_sha256_digest_generic(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ crypto_sha256_init(desc);
+ return crypto_sha256_finup_generic(desc, data, len, out);
+}
+
+static int crypto_sha256_digest_lib(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ sha256(data, len, out);
+ return 0;
+}
+
+static int crypto_sha256_digest_arch(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ crypto_sha256_init(desc);
+ return crypto_sha256_finup_arch(desc, data, len, out);
+}
+
+static int crypto_sha224_init(struct shash_desc *desc)
+{
+ sha224_block_init(shash_desc_ctx(desc));
+ return 0;
+}
+
+static int crypto_sha224_final_lib(struct shash_desc *desc, u8 *out)
+{
+ sha224_final(shash_desc_ctx(desc), out);
+ return 0;
+}
+
+static int crypto_sha256_import_lib(struct shash_desc *desc, const void *in)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ const u8 *p = in;
+
+ memcpy(sctx, p, sizeof(*sctx));
+ p += sizeof(*sctx);
+ sctx->count += *p;
+ return 0;
+}
+
+static int crypto_sha256_export_lib(struct shash_desc *desc, void *out)
+{
+ struct sha256_state *sctx0 = shash_desc_ctx(desc);
+ struct sha256_state sctx = *sctx0;
+ unsigned int partial;
+ u8 *p = out;
+
+ partial = sctx.count % SHA256_BLOCK_SIZE;
+ sctx.count -= partial;
+ memcpy(p, &sctx, sizeof(sctx));
+ p += sizeof(sctx);
+ *p = partial;
+ return 0;
+}
+
+static struct shash_alg algs[] = {
+ {
+ .base.cra_name = "sha256",
+ .base.cra_driver_name = "sha256-generic",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
+ .base.cra_blocksize = SHA256_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = crypto_sha256_init,
+ .update = crypto_sha256_update_generic,
+ .finup = crypto_sha256_finup_generic,
+ .digest = crypto_sha256_digest_generic,
+ .descsize = sizeof(struct crypto_sha256_state),
+ },
+ {
+ .base.cra_name = "sha224",
+ .base.cra_driver_name = "sha224-generic",
+ .base.cra_priority = 100,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
+ .base.cra_blocksize = SHA224_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA224_DIGEST_SIZE,
+ .init = crypto_sha224_init,
+ .update = crypto_sha256_update_generic,
+ .finup = crypto_sha256_finup_generic,
+ .descsize = sizeof(struct crypto_sha256_state),
+ },
+ {
+ .base.cra_name = "sha256",
+ .base.cra_driver_name = "sha256-lib",
+ .base.cra_blocksize = SHA256_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = crypto_sha256_init,
+ .update = crypto_sha256_update_lib,
+ .final = crypto_sha256_final_lib,
+ .digest = crypto_sha256_digest_lib,
+ .descsize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct crypto_sha256_state) +
+ SHA256_BLOCK_SIZE + 1,
+ .import = crypto_sha256_import_lib,
+ .export = crypto_sha256_export_lib,
+ },
+ {
+ .base.cra_name = "sha224",
+ .base.cra_driver_name = "sha224-lib",
+ .base.cra_blocksize = SHA224_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA224_DIGEST_SIZE,
+ .init = crypto_sha224_init,
+ .update = crypto_sha256_update_lib,
+ .final = crypto_sha224_final_lib,
+ .descsize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct crypto_sha256_state) +
+ SHA256_BLOCK_SIZE + 1,
+ .import = crypto_sha256_import_lib,
+ .export = crypto_sha256_export_lib,
+ },
+ {
+ .base.cra_name = "sha256",
+ .base.cra_driver_name = "sha256-" __stringify(ARCH),
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
+ .base.cra_blocksize = SHA256_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = crypto_sha256_init,
+ .update = crypto_sha256_update_arch,
+ .finup = crypto_sha256_finup_arch,
+ .digest = crypto_sha256_digest_arch,
+ .descsize = sizeof(struct crypto_sha256_state),
+ },
+ {
+ .base.cra_name = "sha224",
+ .base.cra_driver_name = "sha224-" __stringify(ARCH),
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
+ .base.cra_blocksize = SHA224_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .digestsize = SHA224_DIGEST_SIZE,
+ .init = crypto_sha224_init,
+ .update = crypto_sha256_update_arch,
+ .finup = crypto_sha256_finup_arch,
+ .descsize = sizeof(struct crypto_sha256_state),
+ },
+};
+
+static unsigned int num_algs;
+
+static int __init crypto_sha256_mod_init(void)
+{
+ /* register the arch flavours only if they differ from generic */
+ num_algs = ARRAY_SIZE(algs);
+ BUILD_BUG_ON(ARRAY_SIZE(algs) <= 2);
+ if (!sha256_is_arch_optimized())
+ num_algs -= 2;
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+module_init(crypto_sha256_mod_init);
+
+static void __exit crypto_sha256_mod_exit(void)
+{
+ crypto_unregister_shashes(algs, num_algs);
+}
+module_exit(crypto_sha256_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Crypto API wrapper for the SHA-256 and SHA-224 library functions");
+
+MODULE_ALIAS_CRYPTO("sha256");
+MODULE_ALIAS_CRYPTO("sha256-generic");
+MODULE_ALIAS_CRYPTO("sha256-" __stringify(ARCH));
+MODULE_ALIAS_CRYPTO("sha224");
+MODULE_ALIAS_CRYPTO("sha224-generic");
+MODULE_ALIAS_CRYPTO("sha224-" __stringify(ARCH));
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
deleted file mode 100644
index b00521f1a6d4..000000000000
--- a/crypto/sha256_generic.c
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Crypto API wrapper for the generic SHA256 code from lib/crypto/sha256.c
- *
- * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
- * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com>
- */
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <crypto/sha2.h>
-#include <crypto/sha256_base.h>
-#include <asm/byteorder.h>
-#include <linux/unaligned.h>
-
-const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = {
- 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
- 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
- 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
- 0x2f
-};
-EXPORT_SYMBOL_GPL(sha224_zero_message_hash);
-
-const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = {
- 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
- 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
- 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
- 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
-};
-EXPORT_SYMBOL_GPL(sha256_zero_message_hash);
-
-int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- sha256_update(shash_desc_ctx(desc), data, len);
- return 0;
-}
-EXPORT_SYMBOL(crypto_sha256_update);
-
-static int crypto_sha256_final(struct shash_desc *desc, u8 *out)
-{
- if (crypto_shash_digestsize(desc->tfm) == SHA224_DIGEST_SIZE)
- sha224_final(shash_desc_ctx(desc), out);
- else
- sha256_final(shash_desc_ctx(desc), out);
- return 0;
-}
-
-int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash)
-{
- sha256_update(shash_desc_ctx(desc), data, len);
- return crypto_sha256_final(desc, hash);
-}
-EXPORT_SYMBOL(crypto_sha256_finup);
-
-static struct shash_alg sha256_algs[2] = { {
- .digestsize = SHA256_DIGEST_SIZE,
- .init = sha256_base_init,
- .update = crypto_sha256_update,
- .final = crypto_sha256_final,
- .finup = crypto_sha256_finup,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name= "sha256-generic",
- .cra_priority = 100,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .digestsize = SHA224_DIGEST_SIZE,
- .init = sha224_base_init,
- .update = crypto_sha256_update,
- .final = crypto_sha256_final,
- .finup = crypto_sha256_finup,
- .descsize = sizeof(struct sha256_state),
- .base = {
- .cra_name = "sha224",
- .cra_driver_name= "sha224-generic",
- .cra_priority = 100,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-} };
-
-static int __init sha256_generic_mod_init(void)
-{
- return crypto_register_shashes(sha256_algs, ARRAY_SIZE(sha256_algs));
-}
-
-static void __exit sha256_generic_mod_fini(void)
-{
- crypto_unregister_shashes(sha256_algs, ARRAY_SIZE(sha256_algs));
-}
-
-subsys_initcall(sha256_generic_mod_init);
-module_exit(sha256_generic_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
-
-MODULE_ALIAS_CRYPTO("sha224");
-MODULE_ALIAS_CRYPTO("sha224-generic");
-MODULE_ALIAS_CRYPTO("sha256");
-MODULE_ALIAS_CRYPTO("sha256-generic");
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index b103642b56ea..41d1e506e6de 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -9,10 +9,10 @@
* Ard Biesheuvel <ard.biesheuvel@linaro.org>
*/
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
#include <crypto/sha3.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
#include <linux/unaligned.h>
/*
@@ -161,68 +161,51 @@ static void keccakf(u64 st[25])
int crypto_sha3_init(struct shash_desc *desc)
{
struct sha3_state *sctx = shash_desc_ctx(desc);
- unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
-
- sctx->rsiz = 200 - 2 * digest_size;
- sctx->rsizw = sctx->rsiz / 8;
- sctx->partial = 0;
memset(sctx->st, 0, sizeof(sctx->st));
return 0;
}
EXPORT_SYMBOL(crypto_sha3_init);
-int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
+ unsigned int rsiz = crypto_shash_blocksize(desc->tfm);
struct sha3_state *sctx = shash_desc_ctx(desc);
- unsigned int done;
- const u8 *src;
-
- done = 0;
- src = data;
-
- if ((sctx->partial + len) > (sctx->rsiz - 1)) {
- if (sctx->partial) {
- done = -sctx->partial;
- memcpy(sctx->buf + sctx->partial, data,
- done + sctx->rsiz);
- src = sctx->buf;
- }
+ unsigned int rsizw = rsiz / 8;
- do {
- unsigned int i;
+ do {
+ int i;
- for (i = 0; i < sctx->rsizw; i++)
- sctx->st[i] ^= get_unaligned_le64(src + 8 * i);
- keccakf(sctx->st);
+ for (i = 0; i < rsizw; i++)
+ sctx->st[i] ^= get_unaligned_le64(data + 8 * i);
+ keccakf(sctx->st);
- done += sctx->rsiz;
- src = data + done;
- } while (done + (sctx->rsiz - 1) < len);
-
- sctx->partial = 0;
- }
- memcpy(sctx->buf + sctx->partial, src, len - done);
- sctx->partial += (len - done);
-
- return 0;
+ data += rsiz;
+ len -= rsiz;
+ } while (len >= rsiz);
+ return len;
}
-EXPORT_SYMBOL(crypto_sha3_update);
-int crypto_sha3_final(struct shash_desc *desc, u8 *out)
+static int crypto_sha3_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
- struct sha3_state *sctx = shash_desc_ctx(desc);
- unsigned int i, inlen = sctx->partial;
unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ unsigned int rsiz = crypto_shash_blocksize(desc->tfm);
+ struct sha3_state *sctx = shash_desc_ctx(desc);
+ __le64 block[SHA3_224_BLOCK_SIZE / 8] = {};
__le64 *digest = (__le64 *)out;
+ unsigned int rsizw = rsiz / 8;
+ u8 *p;
+ int i;
- sctx->buf[inlen++] = 0x06;
- memset(sctx->buf + inlen, 0, sctx->rsiz - inlen);
- sctx->buf[sctx->rsiz - 1] |= 0x80;
+ p = memcpy(block, src, len);
+ p[len++] = 0x06;
+ p[rsiz - 1] |= 0x80;
- for (i = 0; i < sctx->rsizw; i++)
- sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i);
+ for (i = 0; i < rsizw; i++)
+ sctx->st[i] ^= le64_to_cpu(block[i]);
+ memzero_explicit(block, sizeof(block));
keccakf(sctx->st);
@@ -232,49 +215,51 @@ int crypto_sha3_final(struct shash_desc *desc, u8 *out)
if (digest_size & 4)
put_unaligned_le32(sctx->st[i], (__le32 *)digest);
- memset(sctx, 0, sizeof(*sctx));
return 0;
}
-EXPORT_SYMBOL(crypto_sha3_final);
static struct shash_alg algs[] = { {
.digestsize = SHA3_224_DIGEST_SIZE,
.init = crypto_sha3_init,
.update = crypto_sha3_update,
- .final = crypto_sha3_final,
- .descsize = sizeof(struct sha3_state),
+ .finup = crypto_sha3_finup,
+ .descsize = SHA3_STATE_SIZE,
.base.cra_name = "sha3-224",
.base.cra_driver_name = "sha3-224-generic",
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.base.cra_blocksize = SHA3_224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
.digestsize = SHA3_256_DIGEST_SIZE,
.init = crypto_sha3_init,
.update = crypto_sha3_update,
- .final = crypto_sha3_final,
- .descsize = sizeof(struct sha3_state),
+ .finup = crypto_sha3_finup,
+ .descsize = SHA3_STATE_SIZE,
.base.cra_name = "sha3-256",
.base.cra_driver_name = "sha3-256-generic",
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.base.cra_blocksize = SHA3_256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
.digestsize = SHA3_384_DIGEST_SIZE,
.init = crypto_sha3_init,
.update = crypto_sha3_update,
- .final = crypto_sha3_final,
- .descsize = sizeof(struct sha3_state),
+ .finup = crypto_sha3_finup,
+ .descsize = SHA3_STATE_SIZE,
.base.cra_name = "sha3-384",
.base.cra_driver_name = "sha3-384-generic",
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.base.cra_blocksize = SHA3_384_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
.digestsize = SHA3_512_DIGEST_SIZE,
.init = crypto_sha3_init,
.update = crypto_sha3_update,
- .final = crypto_sha3_final,
- .descsize = sizeof(struct sha3_state),
+ .finup = crypto_sha3_finup,
+ .descsize = SHA3_STATE_SIZE,
.base.cra_name = "sha3-512",
.base.cra_driver_name = "sha3-512-generic",
+ .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.base.cra_blocksize = SHA3_512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
@@ -289,7 +274,7 @@ static void __exit sha3_generic_mod_fini(void)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
-subsys_initcall(sha3_generic_mod_init);
+module_init(sha3_generic_mod_init);
module_exit(sha3_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index ed81813bd420..7368173f545e 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -6,16 +6,10 @@
* Copyright (c) 2003 Kyle McMartin <kyle@debian.org>
*/
#include <crypto/internal/hash.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/crypto.h>
-#include <linux/types.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
-#include <linux/percpu.h>
-#include <asm/byteorder.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/unaligned.h>
const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE] = {
@@ -145,47 +139,42 @@ sha512_transform(u64 *state, const u8 *input)
state[4] += e; state[5] += f; state[6] += g; state[7] += h;
}
-static void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src,
- int blocks)
+void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src,
+ int blocks)
{
- while (blocks--) {
+ do {
sha512_transform(sst->state, src);
src += SHA512_BLOCK_SIZE;
- }
+ } while (--blocks);
}
+EXPORT_SYMBOL_GPL(sha512_generic_block_fn);
-int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
- return sha512_base_do_update(desc, data, len, sha512_generic_block_fn);
+ return sha512_base_do_update_blocks(desc, data, len,
+ sha512_generic_block_fn);
}
-EXPORT_SYMBOL(crypto_sha512_update);
-static int sha512_final(struct shash_desc *desc, u8 *hash)
+static int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash)
{
- sha512_base_do_finalize(desc, sha512_generic_block_fn);
+ sha512_base_do_finup(desc, data, len, sha512_generic_block_fn);
return sha512_base_finish(desc, hash);
}
-int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash)
-{
- sha512_base_do_update(desc, data, len, sha512_generic_block_fn);
- return sha512_final(desc, hash);
-}
-EXPORT_SYMBOL(crypto_sha512_finup);
-
static struct shash_alg sha512_algs[2] = { {
.digestsize = SHA512_DIGEST_SIZE,
.init = sha512_base_init,
.update = crypto_sha512_update,
- .final = sha512_final,
.finup = crypto_sha512_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -193,13 +182,14 @@ static struct shash_alg sha512_algs[2] = { {
.digestsize = SHA384_DIGEST_SIZE,
.init = sha384_base_init,
.update = crypto_sha512_update,
- .final = sha512_final,
.finup = crypto_sha512_finup,
- .descsize = sizeof(struct sha512_state),
+ .descsize = SHA512_STATE_SIZE,
.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -215,7 +205,7 @@ static void __exit sha512_generic_mod_fini(void)
crypto_unregister_shashes(sha512_algs, ARRAY_SIZE(sha512_algs));
}
-subsys_initcall(sha512_generic_mod_init);
+module_init(sha512_generic_mod_init);
module_exit(sha512_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/shash.c b/crypto/shash.c
index 301ab42bf849..4721f5f134f4 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -16,6 +16,24 @@
#include "hash.h"
+static inline bool crypto_shash_block_only(struct crypto_shash *tfm)
+{
+ return crypto_shash_alg(tfm)->base.cra_flags &
+ CRYPTO_AHASH_ALG_BLOCK_ONLY;
+}
+
+static inline bool crypto_shash_final_nonzero(struct crypto_shash *tfm)
+{
+ return crypto_shash_alg(tfm)->base.cra_flags &
+ CRYPTO_AHASH_ALG_FINAL_NONZERO;
+}
+
+static inline bool crypto_shash_finup_max(struct crypto_shash *tfm)
+{
+ return crypto_shash_alg(tfm)->base.cra_flags &
+ CRYPTO_AHASH_ALG_FINUP_MAX;
+}
+
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -46,18 +64,27 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
}
EXPORT_SYMBOL_GPL(crypto_shash_setkey);
-int crypto_shash_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int __crypto_shash_init(struct shash_desc *desc)
{
- return crypto_shash_alg(desc->tfm)->update(desc, data, len);
+ struct crypto_shash *tfm = desc->tfm;
+
+ if (crypto_shash_block_only(tfm)) {
+ u8 *buf = shash_desc_ctx(desc);
+
+ buf += crypto_shash_descsize(tfm) - 1;
+ *buf = 0;
+ }
+
+ return crypto_shash_alg(tfm)->init(desc);
}
-EXPORT_SYMBOL_GPL(crypto_shash_update);
-int crypto_shash_final(struct shash_desc *desc, u8 *out)
+int crypto_shash_init(struct shash_desc *desc)
{
- return crypto_shash_alg(desc->tfm)->final(desc, out);
+ if (crypto_shash_get_flags(desc->tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+ return __crypto_shash_init(desc);
}
-EXPORT_SYMBOL_GPL(crypto_shash_final);
+EXPORT_SYMBOL_GPL(crypto_shash_init);
static int shash_default_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
@@ -68,20 +95,89 @@ static int shash_default_finup(struct shash_desc *desc, const u8 *data,
shash->final(desc, out);
}
-int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
+static int crypto_shash_op_and_zero(
+ int (*op)(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out),
+ struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
{
- return crypto_shash_alg(desc->tfm)->finup(desc, data, len, out);
+ int err;
+
+ err = op(desc, data, len, out);
+ memset(shash_desc_ctx(desc), 0, crypto_shash_descsize(desc->tfm));
+ return err;
+}
+
+int crypto_shash_finup(struct shash_desc *restrict desc, const u8 *data,
+ unsigned int len, u8 *restrict out)
+{
+ struct crypto_shash *tfm = desc->tfm;
+ u8 *blenp = shash_desc_ctx(desc);
+ bool finup_max, nonzero;
+ unsigned int bs;
+ int err;
+ u8 *buf;
+
+ if (!crypto_shash_block_only(tfm)) {
+ if (out)
+ goto finup;
+ return crypto_shash_alg(tfm)->update(desc, data, len);
+ }
+
+ finup_max = out && crypto_shash_finup_max(tfm);
+
+ /* Retain extra block for final nonzero algorithms. */
+ nonzero = crypto_shash_final_nonzero(tfm);
+
+ /*
+ * The partial block buffer follows the algorithm desc context.
+ * The byte following that contains the length.
+ */
+ blenp += crypto_shash_descsize(tfm) - 1;
+ bs = crypto_shash_blocksize(tfm);
+ buf = blenp - bs;
+
+ if (likely(!*blenp && finup_max))
+ goto finup;
+
+ while ((*blenp + len) >= bs + nonzero) {
+ unsigned int nbytes = len - nonzero;
+ const u8 *src = data;
+
+ if (*blenp) {
+ memcpy(buf + *blenp, data, bs - *blenp);
+ nbytes = bs;
+ src = buf;
+ }
+
+ err = crypto_shash_alg(tfm)->update(desc, src, nbytes);
+ if (err < 0)
+ return err;
+
+ data += nbytes - err - *blenp;
+ len -= nbytes - err - *blenp;
+ *blenp = 0;
+ }
+
+ if (*blenp || !out) {
+ memcpy(buf + *blenp, data, len);
+ *blenp += len;
+ if (!out)
+ return 0;
+ data = buf;
+ len = *blenp;
+ }
+
+finup:
+ return crypto_shash_op_and_zero(crypto_shash_alg(tfm)->finup, desc,
+ data, len, out);
}
EXPORT_SYMBOL_GPL(crypto_shash_finup);
static int shash_default_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- struct shash_alg *shash = crypto_shash_alg(desc->tfm);
-
- return shash->init(desc) ?:
- shash->finup(desc, data, len, out);
+ return __crypto_shash_init(desc) ?:
+ crypto_shash_finup(desc, data, len, out);
}
int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
@@ -92,7 +188,8 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
- return crypto_shash_alg(tfm)->digest(desc, data, len, out);
+ return crypto_shash_op_and_zero(crypto_shash_alg(tfm)->digest, desc,
+ data, len, out);
}
EXPORT_SYMBOL_GPL(crypto_shash_digest);
@@ -100,44 +197,105 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
unsigned int len, u8 *out)
{
SHASH_DESC_ON_STACK(desc, tfm);
- int err;
desc->tfm = tfm;
+ return crypto_shash_digest(desc, data, len, out);
+}
+EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest);
- err = crypto_shash_digest(desc, data, len, out);
+static int __crypto_shash_export(struct shash_desc *desc, void *out,
+ int (*export)(struct shash_desc *desc,
+ void *out))
+{
+ struct crypto_shash *tfm = desc->tfm;
+ u8 *buf = shash_desc_ctx(desc);
+ unsigned int plen, ss;
+
+ plen = crypto_shash_blocksize(tfm) + 1;
+ ss = crypto_shash_statesize(tfm);
+ if (crypto_shash_block_only(tfm))
+ ss -= plen;
+ if (!export) {
+ memcpy(out, buf, ss);
+ return 0;
+ }
- shash_desc_zero(desc);
+ return export(desc, out);
+}
- return err;
+int crypto_shash_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_shash_export(desc, out,
+ crypto_shash_alg(desc->tfm)->export_core);
}
-EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest);
+EXPORT_SYMBOL_GPL(crypto_shash_export_core);
int crypto_shash_export(struct shash_desc *desc, void *out)
{
struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *shash = crypto_shash_alg(tfm);
- if (shash->export)
- return shash->export(desc, out);
+ if (crypto_shash_block_only(tfm)) {
+ unsigned int plen = crypto_shash_blocksize(tfm) + 1;
+ unsigned int descsize = crypto_shash_descsize(tfm);
+ unsigned int ss = crypto_shash_statesize(tfm);
+ u8 *buf = shash_desc_ctx(desc);
- memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(tfm));
- return 0;
+ memcpy(out + ss - plen, buf + descsize - plen, plen);
+ }
+ return __crypto_shash_export(desc, out, crypto_shash_alg(tfm)->export);
}
EXPORT_SYMBOL_GPL(crypto_shash_export);
-int crypto_shash_import(struct shash_desc *desc, const void *in)
+static int __crypto_shash_import(struct shash_desc *desc, const void *in,
+ int (*import)(struct shash_desc *desc,
+ const void *in))
{
struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *shash = crypto_shash_alg(tfm);
+ unsigned int descsize, plen, ss;
+ u8 *buf = shash_desc_ctx(desc);
if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
- if (shash->import)
- return shash->import(desc, in);
+ ss = crypto_shash_statesize(tfm);
+ if (crypto_shash_block_only(tfm)) {
+ plen = crypto_shash_blocksize(tfm) + 1;
+ ss -= plen;
+ descsize = crypto_shash_descsize(tfm);
+ buf[descsize - 1] = 0;
+ }
+ if (!import) {
+ memcpy(buf, in, ss);
+ return 0;
+ }
- memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(tfm));
- return 0;
+ return import(desc, in);
+}
+
+int crypto_shash_import_core(struct shash_desc *desc, const void *in)
+{
+ return __crypto_shash_import(desc, in,
+ crypto_shash_alg(desc->tfm)->import_core);
+}
+EXPORT_SYMBOL_GPL(crypto_shash_import_core);
+
+int crypto_shash_import(struct shash_desc *desc, const void *in)
+{
+ struct crypto_shash *tfm = desc->tfm;
+ int err;
+
+ err = __crypto_shash_import(desc, in, crypto_shash_alg(tfm)->import);
+ if (crypto_shash_block_only(tfm)) {
+ unsigned int plen = crypto_shash_blocksize(tfm) + 1;
+ unsigned int descsize = crypto_shash_descsize(tfm);
+ unsigned int ss = crypto_shash_statesize(tfm);
+ u8 *buf = shash_desc_ctx(desc);
+
+ memcpy(buf + descsize - plen, in + ss - plen, plen);
+ if (buf[descsize - 1] >= plen)
+ err = -EOVERFLOW;
+ }
+ return err;
}
EXPORT_SYMBOL_GPL(crypto_shash_import);
@@ -153,9 +311,6 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_shash *hash = __crypto_shash_cast(tfm);
struct shash_alg *alg = crypto_shash_alg(hash);
- int err;
-
- hash->descsize = alg->descsize;
shash_set_needkey(hash, alg);
@@ -165,18 +320,7 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
if (!alg->init_tfm)
return 0;
- err = alg->init_tfm(hash);
- if (err)
- return err;
-
- /* ->init_tfm() may have increased the descsize. */
- if (WARN_ON_ONCE(hash->descsize > HASH_MAX_DESCSIZE)) {
- if (alg->exit_tfm)
- alg->exit_tfm(hash);
- return -EINVAL;
- }
-
- return 0;
+ return alg->init_tfm(hash);
}
static void crypto_shash_free_instance(struct crypto_instance *inst)
@@ -227,6 +371,7 @@ const struct crypto_type crypto_shash_type = {
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SHASH,
.tfmsize = offsetof(struct crypto_shash, base),
+ .algsize = offsetof(struct shash_alg, base),
};
int crypto_grab_shash(struct crypto_shash_spawn *spawn,
@@ -273,8 +418,6 @@ struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash)
if (IS_ERR(nhash))
return nhash;
- nhash->descsize = hash->descsize;
-
if (alg->clone_tfm) {
err = alg->clone_tfm(nhash, hash);
if (err) {
@@ -283,6 +426,9 @@ struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash)
}
}
+ if (alg->exit_tfm)
+ crypto_shash_tfm(nhash)->exit = crypto_shash_exit_tfm;
+
return nhash;
}
EXPORT_SYMBOL_GPL(crypto_clone_shash);
@@ -303,14 +449,21 @@ int hash_prepare_alg(struct hash_alg_common *alg)
return 0;
}
+static int shash_default_export_core(struct shash_desc *desc, void *out)
+{
+ return -ENOSYS;
+}
+
+static int shash_default_import_core(struct shash_desc *desc, const void *in)
+{
+ return -ENOSYS;
+}
+
static int shash_prepare_alg(struct shash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
int err;
- if (alg->descsize > HASH_MAX_DESCSIZE)
- return -EINVAL;
-
if ((alg->export && !alg->import) || (alg->import && !alg->export))
return -EINVAL;
@@ -320,6 +473,7 @@ static int shash_prepare_alg(struct shash_alg *alg)
base->cra_type = &crypto_shash_type;
base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;
+ base->cra_flags |= CRYPTO_ALG_REQ_VIRT;
/*
* Handle missing optional functions. For each one we can either
@@ -336,11 +490,30 @@ static int shash_prepare_alg(struct shash_alg *alg)
alg->finup = shash_default_finup;
if (!alg->digest)
alg->digest = shash_default_digest;
- if (!alg->export)
+ if (!alg->export && !alg->halg.statesize)
alg->halg.statesize = alg->descsize;
if (!alg->setkey)
alg->setkey = shash_no_setkey;
+ if (base->cra_flags & CRYPTO_AHASH_ALG_BLOCK_ONLY) {
+ BUILD_BUG_ON(MAX_ALGAPI_BLOCKSIZE >= 256);
+ alg->descsize += base->cra_blocksize + 1;
+ alg->statesize += base->cra_blocksize + 1;
+ alg->export_core = alg->export;
+ alg->import_core = alg->import;
+ } else if (!alg->export_core || !alg->import_core) {
+ alg->export_core = shash_default_export_core;
+ alg->import_core = shash_default_import_core;
+ base->cra_flags |= CRYPTO_AHASH_ALG_NO_EXPORT_CORE;
+ }
+
+ if (alg->descsize > HASH_MAX_DESCSIZE)
+ return -EINVAL;
+ if (alg->statesize > HASH_MAX_STATESIZE)
+ return -EINVAL;
+
+ base->cra_reqsize = sizeof(struct shash_desc) + alg->descsize;
+
return 0;
}
diff --git a/crypto/sig.c b/crypto/sig.c
index dfc7cae90802..beba745b6405 100644
--- a/crypto/sig.c
+++ b/crypto/sig.c
@@ -74,6 +74,7 @@ static const struct crypto_type crypto_sig_type = {
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SIG,
.tfmsize = offsetof(struct crypto_sig, base),
+ .algsize = offsetof(struct sig_alg, base),
};
struct crypto_sig *crypto_alloc_sig(const char *alg_name, u32 type, u32 mask)
@@ -102,6 +103,11 @@ static int sig_default_set_key(struct crypto_sig *tfm,
return -ENOSYS;
}
+static unsigned int sig_default_size(struct crypto_sig *tfm)
+{
+ return DIV_ROUND_UP_POW2(crypto_sig_keysize(tfm), BITS_PER_BYTE);
+}
+
static int sig_prepare_alg(struct sig_alg *alg)
{
struct crypto_alg *base = &alg->base;
@@ -117,9 +123,9 @@ static int sig_prepare_alg(struct sig_alg *alg)
if (!alg->key_size)
return -EINVAL;
if (!alg->max_size)
- alg->max_size = alg->key_size;
+ alg->max_size = sig_default_size;
if (!alg->digest_size)
- alg->digest_size = alg->key_size;
+ alg->digest_size = sig_default_size;
base->cra_type = &crypto_sig_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index a9eb2dcf2898..de5fc91bba26 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -17,310 +17,40 @@
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
-#include <linux/mm.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <net/netlink.h>
#include "skcipher.h"
#define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e
-enum {
- SKCIPHER_WALK_SLOW = 1 << 0,
- SKCIPHER_WALK_COPY = 1 << 1,
- SKCIPHER_WALK_DIFF = 1 << 2,
- SKCIPHER_WALK_SLEEP = 1 << 3,
-};
-
static const struct crypto_type crypto_skcipher_type;
-static int skcipher_walk_next(struct skcipher_walk *walk);
-
-static inline void skcipher_map_src(struct skcipher_walk *walk)
-{
- walk->src.virt.addr = scatterwalk_map(&walk->in);
-}
-
-static inline void skcipher_map_dst(struct skcipher_walk *walk)
-{
- walk->dst.virt.addr = scatterwalk_map(&walk->out);
-}
-
-static inline void skcipher_unmap_src(struct skcipher_walk *walk)
-{
- scatterwalk_unmap(walk->src.virt.addr);
-}
-
-static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
-{
- scatterwalk_unmap(walk->dst.virt.addr);
-}
-
-static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
-{
- return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
-}
-
static inline struct skcipher_alg *__crypto_skcipher_alg(
struct crypto_alg *alg)
{
return container_of(alg, struct skcipher_alg, base);
}
-static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
-{
- u8 *addr = PTR_ALIGN(walk->buffer, walk->alignmask + 1);
-
- scatterwalk_copychunks(addr, &walk->out, bsize, 1);
- return 0;
-}
-
-/**
- * skcipher_walk_done() - finish one step of a skcipher_walk
- * @walk: the skcipher_walk
- * @res: number of bytes *not* processed (>= 0) from walk->nbytes,
- * or a -errno value to terminate the walk due to an error
- *
- * This function cleans up after one step of walking through the source and
- * destination scatterlists, and advances to the next step if applicable.
- * walk->nbytes is set to the number of bytes available in the next step,
- * walk->total is set to the new total number of bytes remaining, and
- * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there
- * is no more data, or if an error occurred (i.e. -errno return), then
- * walk->nbytes and walk->total are set to 0 and all resources owned by the
- * skcipher_walk are freed.
- *
- * Return: 0 or a -errno value. If @res was a -errno value then it will be
- * returned, but other errors may occur too.
- */
-int skcipher_walk_done(struct skcipher_walk *walk, int res)
-{
- unsigned int n = walk->nbytes; /* num bytes processed this step */
- unsigned int total = 0; /* new total remaining */
-
- if (!n)
- goto finish;
-
- if (likely(res >= 0)) {
- n -= res; /* subtract num bytes *not* processed */
- total = walk->total - n;
- }
-
- if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW |
- SKCIPHER_WALK_COPY |
- SKCIPHER_WALK_DIFF)))) {
-unmap_src:
- skcipher_unmap_src(walk);
- } else if (walk->flags & SKCIPHER_WALK_DIFF) {
- skcipher_unmap_dst(walk);
- goto unmap_src;
- } else if (walk->flags & SKCIPHER_WALK_COPY) {
- skcipher_map_dst(walk);
- memcpy(walk->dst.virt.addr, walk->page, n);
- skcipher_unmap_dst(walk);
- } else { /* SKCIPHER_WALK_SLOW */
- if (res > 0) {
- /*
- * Didn't process all bytes. Either the algorithm is
- * broken, or this was the last step and it turned out
- * the message wasn't evenly divisible into blocks but
- * the algorithm requires it.
- */
- res = -EINVAL;
- total = 0;
- } else
- n = skcipher_done_slow(walk, n);
- }
-
- if (res > 0)
- res = 0;
-
- walk->total = total;
- walk->nbytes = 0;
-
- scatterwalk_advance(&walk->in, n);
- scatterwalk_advance(&walk->out, n);
- scatterwalk_done(&walk->in, 0, total);
- scatterwalk_done(&walk->out, 1, total);
-
- if (total) {
- if (walk->flags & SKCIPHER_WALK_SLEEP)
- cond_resched();
- walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
- SKCIPHER_WALK_DIFF);
- return skcipher_walk_next(walk);
- }
-
-finish:
- /* Short-circuit for the common/fast path. */
- if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
- goto out;
-
- if (walk->iv != walk->oiv)
- memcpy(walk->oiv, walk->iv, walk->ivsize);
- if (walk->buffer != walk->page)
- kfree(walk->buffer);
- if (walk->page)
- free_page((unsigned long)walk->page);
-
-out:
- return res;
-}
-EXPORT_SYMBOL_GPL(skcipher_walk_done);
-
-static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
-{
- unsigned alignmask = walk->alignmask;
- unsigned n;
- u8 *buffer;
-
- if (!walk->buffer)
- walk->buffer = walk->page;
- buffer = walk->buffer;
- if (!buffer) {
- /* Min size for a buffer of bsize bytes aligned to alignmask */
- n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
-
- buffer = kzalloc(n, skcipher_walk_gfp(walk));
- if (!buffer)
- return skcipher_walk_done(walk, -ENOMEM);
- walk->buffer = buffer;
- }
- walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
- walk->src.virt.addr = walk->dst.virt.addr;
-
- scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
-
- walk->nbytes = bsize;
- walk->flags |= SKCIPHER_WALK_SLOW;
-
- return 0;
-}
-
-static int skcipher_next_copy(struct skcipher_walk *walk)
-{
- u8 *tmp = walk->page;
-
- skcipher_map_src(walk);
- memcpy(tmp, walk->src.virt.addr, walk->nbytes);
- skcipher_unmap_src(walk);
-
- walk->src.virt.addr = tmp;
- walk->dst.virt.addr = tmp;
- return 0;
-}
-
-static int skcipher_next_fast(struct skcipher_walk *walk)
-{
- unsigned long diff;
-
- diff = offset_in_page(walk->in.offset) -
- offset_in_page(walk->out.offset);
- diff |= (u8 *)scatterwalk_page(&walk->in) -
- (u8 *)scatterwalk_page(&walk->out);
-
- skcipher_map_src(walk);
- walk->dst.virt.addr = walk->src.virt.addr;
-
- if (diff) {
- walk->flags |= SKCIPHER_WALK_DIFF;
- skcipher_map_dst(walk);
- }
-
- return 0;
-}
-
-static int skcipher_walk_next(struct skcipher_walk *walk)
-{
- unsigned int bsize;
- unsigned int n;
-
- n = walk->total;
- bsize = min(walk->stride, max(n, walk->blocksize));
- n = scatterwalk_clamp(&walk->in, n);
- n = scatterwalk_clamp(&walk->out, n);
-
- if (unlikely(n < bsize)) {
- if (unlikely(walk->total < walk->blocksize))
- return skcipher_walk_done(walk, -EINVAL);
-
-slow_path:
- return skcipher_next_slow(walk, bsize);
- }
- walk->nbytes = n;
-
- if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
- if (!walk->page) {
- gfp_t gfp = skcipher_walk_gfp(walk);
-
- walk->page = (void *)__get_free_page(gfp);
- if (!walk->page)
- goto slow_path;
- }
- walk->flags |= SKCIPHER_WALK_COPY;
- return skcipher_next_copy(walk);
- }
-
- return skcipher_next_fast(walk);
-}
-
-static int skcipher_copy_iv(struct skcipher_walk *walk)
-{
- unsigned alignmask = walk->alignmask;
- unsigned ivsize = walk->ivsize;
- unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1);
- unsigned size;
- u8 *iv;
-
- /* Min size for a buffer of stride + ivsize, aligned to alignmask */
- size = aligned_stride + ivsize +
- (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
-
- walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
- if (!walk->buffer)
- return -ENOMEM;
-
- iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride;
-
- walk->iv = memcpy(iv, walk->iv, walk->ivsize);
- return 0;
-}
-
-static int skcipher_walk_first(struct skcipher_walk *walk)
-{
- if (WARN_ON_ONCE(in_hardirq()))
- return -EDEADLK;
-
- walk->buffer = NULL;
- if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
- int err = skcipher_copy_iv(walk);
- if (err)
- return err;
- }
-
- walk->page = NULL;
-
- return skcipher_walk_next(walk);
-}
-
-int skcipher_walk_virt(struct skcipher_walk *walk,
- struct skcipher_request *req, bool atomic)
+int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
+ struct skcipher_request *__restrict req, bool atomic)
{
- const struct skcipher_alg *alg =
- crypto_skcipher_alg(crypto_skcipher_reqtfm(req));
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_alg *alg;
might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ alg = crypto_skcipher_alg(tfm);
+
walk->total = req->cryptlen;
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
- if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
- walk->flags = SKCIPHER_WALK_SLEEP;
- else
- walk->flags = 0;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP))
+ atomic = true;
if (unlikely(!walk->total))
return 0;
@@ -328,64 +58,48 @@ int skcipher_walk_virt(struct skcipher_walk *walk,
scatterwalk_start(&walk->in, req->src);
scatterwalk_start(&walk->out, req->dst);
- /*
- * Accessing 'alg' directly generates better code than using the
- * crypto_skcipher_blocksize() and similar helper functions here, as it
- * prevents the algorithm pointer from being repeatedly reloaded.
- */
- walk->blocksize = alg->base.cra_blocksize;
- walk->ivsize = alg->co.ivsize;
- walk->alignmask = alg->base.cra_alignmask;
+ walk->blocksize = crypto_skcipher_blocksize(tfm);
+ walk->ivsize = crypto_skcipher_ivsize(tfm);
+ walk->alignmask = crypto_skcipher_alignmask(tfm);
if (alg->co.base.cra_type != &crypto_skcipher_type)
walk->stride = alg->co.chunksize;
else
walk->stride = alg->walksize;
- return skcipher_walk_first(walk);
+ return skcipher_walk_first(walk, atomic);
}
EXPORT_SYMBOL_GPL(skcipher_walk_virt);
-static int skcipher_walk_aead_common(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic)
+static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk,
+ struct aead_request *__restrict req,
+ bool atomic)
{
- const struct aead_alg *alg = crypto_aead_alg(crypto_aead_reqtfm(req));
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
- if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
- walk->flags = SKCIPHER_WALK_SLEEP;
- else
- walk->flags = 0;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP))
+ atomic = true;
if (unlikely(!walk->total))
return 0;
- scatterwalk_start(&walk->in, req->src);
- scatterwalk_start(&walk->out, req->dst);
-
- scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
- scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
+ scatterwalk_start_at_pos(&walk->in, req->src, req->assoclen);
+ scatterwalk_start_at_pos(&walk->out, req->dst, req->assoclen);
- scatterwalk_done(&walk->in, 0, walk->total);
- scatterwalk_done(&walk->out, 0, walk->total);
-
- /*
- * Accessing 'alg' directly generates better code than using the
- * crypto_aead_blocksize() and similar helper functions here, as it
- * prevents the algorithm pointer from being repeatedly reloaded.
- */
- walk->blocksize = alg->base.cra_blocksize;
- walk->stride = alg->chunksize;
- walk->ivsize = alg->ivsize;
- walk->alignmask = alg->base.cra_alignmask;
+ walk->blocksize = crypto_aead_blocksize(tfm);
+ walk->stride = crypto_aead_chunksize(tfm);
+ walk->ivsize = crypto_aead_ivsize(tfm);
+ walk->alignmask = crypto_aead_alignmask(tfm);
- return skcipher_walk_first(walk);
+ return skcipher_walk_first(walk, atomic);
}
-int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic)
+int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk,
+ struct aead_request *__restrict req,
+ bool atomic)
{
walk->total = req->cryptlen;
@@ -393,8 +107,9 @@ int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
}
EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
-int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic)
+int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk,
+ struct aead_request *__restrict req,
+ bool atomic)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -612,7 +327,7 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "type : skcipher\n");
seq_printf(m, "async : %s\n",
- alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
+ str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC));
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
@@ -656,6 +371,7 @@ static const struct crypto_type crypto_skcipher_type = {
.maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK,
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.tfmsize = offsetof(struct crypto_skcipher, base),
+ .algsize = offsetof(struct skcipher_alg, base),
};
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
@@ -681,6 +397,7 @@ struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
/* Only sync algorithms allowed. */
mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE;
+ type &= ~(CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE);
tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
diff --git a/crypto/sm3.c b/crypto/sm3.c
deleted file mode 100644
index 18c2fb73ba16..000000000000
--- a/crypto/sm3.c
+++ /dev/null
@@ -1,246 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and described
- * at https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02
- *
- * Copyright (C) 2017 ARM Limited or its affiliates.
- * Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com>
- * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
- */
-
-#include <linux/module.h>
-#include <linux/unaligned.h>
-#include <crypto/sm3.h>
-
-static const u32 ____cacheline_aligned K[64] = {
- 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb,
- 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc,
- 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce,
- 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6,
- 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c,
- 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce,
- 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec,
- 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5,
- 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53,
- 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d,
- 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4,
- 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43,
- 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c,
- 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce,
- 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec,
- 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5
-};
-
-/*
- * Transform the message X which consists of 16 32-bit-words. See
- * GM/T 004-2012 for details.
- */
-#define R(i, a, b, c, d, e, f, g, h, t, w1, w2) \
- do { \
- ss1 = rol32((rol32((a), 12) + (e) + (t)), 7); \
- ss2 = ss1 ^ rol32((a), 12); \
- d += FF ## i(a, b, c) + ss2 + ((w1) ^ (w2)); \
- h += GG ## i(e, f, g) + ss1 + (w1); \
- b = rol32((b), 9); \
- f = rol32((f), 19); \
- h = P0((h)); \
- } while (0)
-
-#define R1(a, b, c, d, e, f, g, h, t, w1, w2) \
- R(1, a, b, c, d, e, f, g, h, t, w1, w2)
-#define R2(a, b, c, d, e, f, g, h, t, w1, w2) \
- R(2, a, b, c, d, e, f, g, h, t, w1, w2)
-
-#define FF1(x, y, z) (x ^ y ^ z)
-#define FF2(x, y, z) ((x & y) | (x & z) | (y & z))
-
-#define GG1(x, y, z) FF1(x, y, z)
-#define GG2(x, y, z) ((x & y) | (~x & z))
-
-/* Message expansion */
-#define P0(x) ((x) ^ rol32((x), 9) ^ rol32((x), 17))
-#define P1(x) ((x) ^ rol32((x), 15) ^ rol32((x), 23))
-#define I(i) (W[i] = get_unaligned_be32(data + i * 4))
-#define W1(i) (W[i & 0x0f])
-#define W2(i) (W[i & 0x0f] = \
- P1(W[i & 0x0f] \
- ^ W[(i-9) & 0x0f] \
- ^ rol32(W[(i-3) & 0x0f], 15)) \
- ^ rol32(W[(i-13) & 0x0f], 7) \
- ^ W[(i-6) & 0x0f])
-
-static void sm3_transform(struct sm3_state *sctx, u8 const *data, u32 W[16])
-{
- u32 a, b, c, d, e, f, g, h, ss1, ss2;
-
- a = sctx->state[0];
- b = sctx->state[1];
- c = sctx->state[2];
- d = sctx->state[3];
- e = sctx->state[4];
- f = sctx->state[5];
- g = sctx->state[6];
- h = sctx->state[7];
-
- R1(a, b, c, d, e, f, g, h, K[0], I(0), I(4));
- R1(d, a, b, c, h, e, f, g, K[1], I(1), I(5));
- R1(c, d, a, b, g, h, e, f, K[2], I(2), I(6));
- R1(b, c, d, a, f, g, h, e, K[3], I(3), I(7));
- R1(a, b, c, d, e, f, g, h, K[4], W1(4), I(8));
- R1(d, a, b, c, h, e, f, g, K[5], W1(5), I(9));
- R1(c, d, a, b, g, h, e, f, K[6], W1(6), I(10));
- R1(b, c, d, a, f, g, h, e, K[7], W1(7), I(11));
- R1(a, b, c, d, e, f, g, h, K[8], W1(8), I(12));
- R1(d, a, b, c, h, e, f, g, K[9], W1(9), I(13));
- R1(c, d, a, b, g, h, e, f, K[10], W1(10), I(14));
- R1(b, c, d, a, f, g, h, e, K[11], W1(11), I(15));
- R1(a, b, c, d, e, f, g, h, K[12], W1(12), W2(16));
- R1(d, a, b, c, h, e, f, g, K[13], W1(13), W2(17));
- R1(c, d, a, b, g, h, e, f, K[14], W1(14), W2(18));
- R1(b, c, d, a, f, g, h, e, K[15], W1(15), W2(19));
-
- R2(a, b, c, d, e, f, g, h, K[16], W1(16), W2(20));
- R2(d, a, b, c, h, e, f, g, K[17], W1(17), W2(21));
- R2(c, d, a, b, g, h, e, f, K[18], W1(18), W2(22));
- R2(b, c, d, a, f, g, h, e, K[19], W1(19), W2(23));
- R2(a, b, c, d, e, f, g, h, K[20], W1(20), W2(24));
- R2(d, a, b, c, h, e, f, g, K[21], W1(21), W2(25));
- R2(c, d, a, b, g, h, e, f, K[22], W1(22), W2(26));
- R2(b, c, d, a, f, g, h, e, K[23], W1(23), W2(27));
- R2(a, b, c, d, e, f, g, h, K[24], W1(24), W2(28));
- R2(d, a, b, c, h, e, f, g, K[25], W1(25), W2(29));
- R2(c, d, a, b, g, h, e, f, K[26], W1(26), W2(30));
- R2(b, c, d, a, f, g, h, e, K[27], W1(27), W2(31));
- R2(a, b, c, d, e, f, g, h, K[28], W1(28), W2(32));
- R2(d, a, b, c, h, e, f, g, K[29], W1(29), W2(33));
- R2(c, d, a, b, g, h, e, f, K[30], W1(30), W2(34));
- R2(b, c, d, a, f, g, h, e, K[31], W1(31), W2(35));
-
- R2(a, b, c, d, e, f, g, h, K[32], W1(32), W2(36));
- R2(d, a, b, c, h, e, f, g, K[33], W1(33), W2(37));
- R2(c, d, a, b, g, h, e, f, K[34], W1(34), W2(38));
- R2(b, c, d, a, f, g, h, e, K[35], W1(35), W2(39));
- R2(a, b, c, d, e, f, g, h, K[36], W1(36), W2(40));
- R2(d, a, b, c, h, e, f, g, K[37], W1(37), W2(41));
- R2(c, d, a, b, g, h, e, f, K[38], W1(38), W2(42));
- R2(b, c, d, a, f, g, h, e, K[39], W1(39), W2(43));
- R2(a, b, c, d, e, f, g, h, K[40], W1(40), W2(44));
- R2(d, a, b, c, h, e, f, g, K[41], W1(41), W2(45));
- R2(c, d, a, b, g, h, e, f, K[42], W1(42), W2(46));
- R2(b, c, d, a, f, g, h, e, K[43], W1(43), W2(47));
- R2(a, b, c, d, e, f, g, h, K[44], W1(44), W2(48));
- R2(d, a, b, c, h, e, f, g, K[45], W1(45), W2(49));
- R2(c, d, a, b, g, h, e, f, K[46], W1(46), W2(50));
- R2(b, c, d, a, f, g, h, e, K[47], W1(47), W2(51));
-
- R2(a, b, c, d, e, f, g, h, K[48], W1(48), W2(52));
- R2(d, a, b, c, h, e, f, g, K[49], W1(49), W2(53));
- R2(c, d, a, b, g, h, e, f, K[50], W1(50), W2(54));
- R2(b, c, d, a, f, g, h, e, K[51], W1(51), W2(55));
- R2(a, b, c, d, e, f, g, h, K[52], W1(52), W2(56));
- R2(d, a, b, c, h, e, f, g, K[53], W1(53), W2(57));
- R2(c, d, a, b, g, h, e, f, K[54], W1(54), W2(58));
- R2(b, c, d, a, f, g, h, e, K[55], W1(55), W2(59));
- R2(a, b, c, d, e, f, g, h, K[56], W1(56), W2(60));
- R2(d, a, b, c, h, e, f, g, K[57], W1(57), W2(61));
- R2(c, d, a, b, g, h, e, f, K[58], W1(58), W2(62));
- R2(b, c, d, a, f, g, h, e, K[59], W1(59), W2(63));
- R2(a, b, c, d, e, f, g, h, K[60], W1(60), W2(64));
- R2(d, a, b, c, h, e, f, g, K[61], W1(61), W2(65));
- R2(c, d, a, b, g, h, e, f, K[62], W1(62), W2(66));
- R2(b, c, d, a, f, g, h, e, K[63], W1(63), W2(67));
-
- sctx->state[0] ^= a;
- sctx->state[1] ^= b;
- sctx->state[2] ^= c;
- sctx->state[3] ^= d;
- sctx->state[4] ^= e;
- sctx->state[5] ^= f;
- sctx->state[6] ^= g;
- sctx->state[7] ^= h;
-}
-#undef R
-#undef R1
-#undef R2
-#undef I
-#undef W1
-#undef W2
-
-static inline void sm3_block(struct sm3_state *sctx,
- u8 const *data, int blocks, u32 W[16])
-{
- while (blocks--) {
- sm3_transform(sctx, data, W);
- data += SM3_BLOCK_SIZE;
- }
-}
-
-void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len)
-{
- unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
- u32 W[16];
-
- sctx->count += len;
-
- if ((partial + len) >= SM3_BLOCK_SIZE) {
- int blocks;
-
- if (partial) {
- int p = SM3_BLOCK_SIZE - partial;
-
- memcpy(sctx->buffer + partial, data, p);
- data += p;
- len -= p;
-
- sm3_block(sctx, sctx->buffer, 1, W);
- }
-
- blocks = len / SM3_BLOCK_SIZE;
- len %= SM3_BLOCK_SIZE;
-
- if (blocks) {
- sm3_block(sctx, data, blocks, W);
- data += blocks * SM3_BLOCK_SIZE;
- }
-
- memzero_explicit(W, sizeof(W));
-
- partial = 0;
- }
- if (len)
- memcpy(sctx->buffer + partial, data, len);
-}
-EXPORT_SYMBOL_GPL(sm3_update);
-
-void sm3_final(struct sm3_state *sctx, u8 *out)
-{
- const int bit_offset = SM3_BLOCK_SIZE - sizeof(u64);
- __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
- __be32 *digest = (__be32 *)out;
- unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
- u32 W[16];
- int i;
-
- sctx->buffer[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buffer + partial, 0, SM3_BLOCK_SIZE - partial);
- partial = 0;
-
- sm3_block(sctx, sctx->buffer, 1, W);
- }
-
- memset(sctx->buffer + partial, 0, bit_offset - partial);
- *bits = cpu_to_be64(sctx->count << 3);
- sm3_block(sctx, sctx->buffer, 1, W);
-
- for (i = 0; i < 8; i++)
- put_unaligned_be32(sctx->state[i], digest++);
-
- /* Zeroize sensitive information. */
- memzero_explicit(W, sizeof(W));
- memzero_explicit(sctx, sizeof(*sctx));
-}
-EXPORT_SYMBOL_GPL(sm3_final);
-
-MODULE_DESCRIPTION("Generic SM3 library");
-MODULE_LICENSE("GPL v2");
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
index a2d23a46924e..7529139fcc96 100644
--- a/crypto/sm3_generic.c
+++ b/crypto/sm3_generic.c
@@ -9,15 +9,10 @@
*/
#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
#include <crypto/sm3.h>
#include <crypto/sm3_base.h>
-#include <linux/bitops.h>
-#include <asm/byteorder.h>
-#include <linux/unaligned.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = {
0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F,
@@ -30,38 +25,28 @@ EXPORT_SYMBOL_GPL(sm3_zero_message_hash);
static int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- sm3_update(shash_desc_ctx(desc), data, len);
- return 0;
-}
-
-static int crypto_sm3_final(struct shash_desc *desc, u8 *out)
-{
- sm3_final(shash_desc_ctx(desc), out);
- return 0;
+ return sm3_base_do_update_blocks(desc, data, len, sm3_block_generic);
}
static int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *hash)
{
- struct sm3_state *sctx = shash_desc_ctx(desc);
-
- if (len)
- sm3_update(sctx, data, len);
- sm3_final(sctx, hash);
- return 0;
+ sm3_base_do_finup(desc, data, len, sm3_block_generic);
+ return sm3_base_finish(desc, hash);
}
static struct shash_alg sm3_alg = {
.digestsize = SM3_DIGEST_SIZE,
.init = sm3_base_init,
.update = crypto_sm3_update,
- .final = crypto_sm3_final,
.finup = crypto_sm3_finup,
- .descsize = sizeof(struct sm3_state),
+ .descsize = SM3_STATE_SIZE,
.base = {
.cra_name = "sm3",
.cra_driver_name = "sm3-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -77,7 +62,7 @@ static void __exit sm3_generic_mod_fini(void)
crypto_unregister_shash(&sm3_alg);
}
-subsys_initcall(sm3_generic_mod_init);
+module_init(sm3_generic_mod_init);
module_exit(sm3_generic_mod_fini);
MODULE_LICENSE("GPL v2");
diff --git a/crypto/sm4_generic.c b/crypto/sm4_generic.c
index 7df86369ac00..d57444e8428c 100644
--- a/crypto/sm4_generic.c
+++ b/crypto/sm4_generic.c
@@ -83,7 +83,7 @@ static void __exit sm4_fini(void)
crypto_unregister_alg(&sm4_alg);
}
-subsys_initcall(sm4_init);
+module_init(sm4_init);
module_exit(sm4_fini);
MODULE_DESCRIPTION("SM4 Cipher Algorithm");
diff --git a/crypto/streebog_generic.c b/crypto/streebog_generic.c
index dc625ffc54ad..57bbf70f4c22 100644
--- a/crypto/streebog_generic.c
+++ b/crypto/streebog_generic.c
@@ -13,9 +13,10 @@
*/
#include <crypto/internal/hash.h>
-#include <linux/module.h>
-#include <linux/crypto.h>
#include <crypto/streebog.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
static const struct streebog_uint512 buffer0 = { {
0, 0, 0, 0, 0, 0, 0, 0
@@ -919,17 +920,6 @@ static int streebog_init(struct shash_desc *desc)
return 0;
}
-static void streebog_pad(struct streebog_state *ctx)
-{
- if (ctx->fillsize >= STREEBOG_BLOCK_SIZE)
- return;
-
- memset(ctx->buffer + ctx->fillsize, 0,
- sizeof(ctx->buffer) - ctx->fillsize);
-
- ctx->buffer[ctx->fillsize] = 1;
-}
-
static void streebog_add512(const struct streebog_uint512 *x,
const struct streebog_uint512 *y,
struct streebog_uint512 *r)
@@ -984,16 +974,23 @@ static void streebog_stage2(struct streebog_state *ctx, const u8 *data)
streebog_add512(&ctx->Sigma, &m, &ctx->Sigma);
}
-static void streebog_stage3(struct streebog_state *ctx)
+static void streebog_stage3(struct streebog_state *ctx, const u8 *src,
+ unsigned int len)
{
struct streebog_uint512 buf = { { 0 } };
+ union {
+ u8 buffer[STREEBOG_BLOCK_SIZE];
+ struct streebog_uint512 m;
+ } u = {};
- buf.qword[0] = cpu_to_le64(ctx->fillsize << 3);
- streebog_pad(ctx);
+ buf.qword[0] = cpu_to_le64(len << 3);
+ memcpy(u.buffer, src, len);
+ u.buffer[len] = 1;
- streebog_g(&ctx->h, &ctx->N, &ctx->m);
+ streebog_g(&ctx->h, &ctx->N, &u.m);
streebog_add512(&ctx->N, &buf, &ctx->N);
- streebog_add512(&ctx->Sigma, &ctx->m, &ctx->Sigma);
+ streebog_add512(&ctx->Sigma, &u.m, &ctx->Sigma);
+ memzero_explicit(&u, sizeof(u));
streebog_g(&ctx->h, &buffer0, &ctx->N);
streebog_g(&ctx->h, &buffer0, &ctx->Sigma);
memcpy(&ctx->hash, &ctx->h, sizeof(struct streebog_uint512));
@@ -1003,42 +1000,22 @@ static int streebog_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct streebog_state *ctx = shash_desc_ctx(desc);
- size_t chunksize;
- if (ctx->fillsize) {
- chunksize = STREEBOG_BLOCK_SIZE - ctx->fillsize;
- if (chunksize > len)
- chunksize = len;
- memcpy(&ctx->buffer[ctx->fillsize], data, chunksize);
- ctx->fillsize += chunksize;
- len -= chunksize;
- data += chunksize;
-
- if (ctx->fillsize == STREEBOG_BLOCK_SIZE) {
- streebog_stage2(ctx, ctx->buffer);
- ctx->fillsize = 0;
- }
- }
-
- while (len >= STREEBOG_BLOCK_SIZE) {
+ do {
streebog_stage2(ctx, data);
data += STREEBOG_BLOCK_SIZE;
len -= STREEBOG_BLOCK_SIZE;
- }
+ } while (len >= STREEBOG_BLOCK_SIZE);
- if (len) {
- memcpy(&ctx->buffer, data, len);
- ctx->fillsize = len;
- }
- return 0;
+ return len;
}
-static int streebog_final(struct shash_desc *desc, u8 *digest)
+static int streebog_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *digest)
{
struct streebog_state *ctx = shash_desc_ctx(desc);
- streebog_stage3(ctx);
- ctx->fillsize = 0;
+ streebog_stage3(ctx, src, len);
if (crypto_shash_digestsize(desc->tfm) == STREEBOG256_DIGEST_SIZE)
memcpy(digest, &ctx->hash.qword[4], STREEBOG256_DIGEST_SIZE);
else
@@ -1050,11 +1027,12 @@ static struct shash_alg algs[2] = { {
.digestsize = STREEBOG256_DIGEST_SIZE,
.init = streebog_init,
.update = streebog_update,
- .final = streebog_final,
+ .finup = streebog_finup,
.descsize = sizeof(struct streebog_state),
.base = {
.cra_name = "streebog256",
.cra_driver_name = "streebog256-generic",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = STREEBOG_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},
@@ -1062,11 +1040,12 @@ static struct shash_alg algs[2] = { {
.digestsize = STREEBOG512_DIGEST_SIZE,
.init = streebog_init,
.update = streebog_update,
- .final = streebog_final,
+ .finup = streebog_finup,
.descsize = sizeof(struct streebog_state),
.base = {
.cra_name = "streebog512",
.cra_driver_name = "streebog512-generic",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = STREEBOG_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -1082,7 +1061,7 @@ static void __exit streebog_mod_fini(void)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
-subsys_initcall(streebog_mod_init);
+module_init(streebog_mod_init);
module_exit(streebog_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index e1a74cb2cfbe..d1d88debbd71 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Quick & dirty crypto testing module.
+ * Quick & dirty crypto benchmarking module.
*
- * This will only exist until we have a better testing mechanism
+ * This will only exist until we have a better benchmarking mechanism
* (e.g. a char device).
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
@@ -39,7 +39,7 @@
#include "tcrypt.h"
/*
- * Need slab memory for testing (size in number of pages).
+ * Need slab memory for benchmarking (size in number of pages).
*/
#define TVMEMSIZE 4
@@ -1654,10 +1654,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret = min(ret, tcrypt_test("ghash"));
break;
- case 47:
- ret = min(ret, tcrypt_test("crct10dif"));
- break;
-
case 48:
ret = min(ret, tcrypt_test("sha3-224"));
break;
@@ -2272,10 +2268,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
test_hash_speed("crc32c", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
- case 320:
- test_hash_speed("crct10dif", sec, generic_hash_speed_template);
- if (mode > 300 && mode < 400) break;
- fallthrough;
case 321:
test_hash_speed("poly1305", sec, poly1305_speed_template);
if (mode > 300 && mode < 400) break;
@@ -2876,5 +2868,5 @@ module_param(klen, uint, 0);
MODULE_PARM_DESC(klen, "Key length (defaults to 0)");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Quick & dirty crypto testing module");
+MODULE_DESCRIPTION("Quick & dirty crypto benchmarking module");
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 96c843a24607..7f938ac93e58 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Quick & dirty crypto testing module.
+ * Quick & dirty crypto benchmarking module.
*
- * This will only exist until we have a better testing mechanism
+ * This will only exist until we have a better benchmarking mechanism
* (e.g. a char device).
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
diff --git a/crypto/tea.c b/crypto/tea.c
index b315da8c89eb..cb05140e3470 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -255,7 +255,7 @@ MODULE_ALIAS_CRYPTO("tea");
MODULE_ALIAS_CRYPTO("xtea");
MODULE_ALIAS_CRYPTO("xeta");
-subsys_initcall(tea_mod_init);
+module_init(tea_mod_init);
module_exit(tea_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index e61490ba4095..32f753d6c430 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -43,22 +43,22 @@ MODULE_IMPORT_NS("CRYPTO_INTERNAL");
static bool notests;
module_param(notests, bool, 0644);
-MODULE_PARM_DESC(notests, "disable crypto self-tests");
+MODULE_PARM_DESC(notests, "disable all crypto self-tests");
-static bool panic_on_fail;
-module_param(panic_on_fail, bool, 0444);
-
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
-static bool noextratests;
-module_param(noextratests, bool, 0644);
-MODULE_PARM_DESC(noextratests, "disable expensive crypto self-tests");
+#ifdef CONFIG_CRYPTO_SELFTESTS_FULL
+static bool noslowtests;
+module_param(noslowtests, bool, 0644);
+MODULE_PARM_DESC(noslowtests, "disable slow crypto self-tests");
static unsigned int fuzz_iterations = 100;
module_param(fuzz_iterations, uint, 0644);
MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations");
+#else
+#define noslowtests 1
+#define fuzz_iterations 0
#endif
-#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+#ifndef CONFIG_CRYPTO_SELFTESTS
/* a perfect nop */
int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
@@ -324,10 +324,9 @@ struct testvec_config {
/*
* The following are the lists of testvec_configs to test for each algorithm
- * type when the basic crypto self-tests are enabled, i.e. when
- * CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is unset. They aim to provide good test
- * coverage, while keeping the test time much shorter than the full fuzz tests
- * so that the basic tests can be enabled in a wider range of circumstances.
+ * type when the "fast" crypto self-tests are enabled. They aim to provide good
+ * test coverage, while keeping the test time much shorter than the "full" tests
+ * so that the "fast" tests can be enabled in a wider range of circumstances.
*/
/* Configs for skciphers and aeads */
@@ -876,8 +875,6 @@ static int prepare_keybuf(const u8 *key, unsigned int ksize,
err; \
})
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
-
/*
* The fuzz tests use prandom instead of the normal Linux RNG since they don't
* need cryptographically secure random numbers. This greatly improves the
@@ -1191,14 +1188,18 @@ static void generate_random_testvec_config(struct rnd_state *rng,
static void crypto_disable_simd_for_test(void)
{
+#ifdef CONFIG_CRYPTO_SELFTESTS_FULL
migrate_disable();
__this_cpu_write(crypto_simd_disabled_for_test, true);
+#endif
}
static void crypto_reenable_simd_for_test(void)
{
+#ifdef CONFIG_CRYPTO_SELFTESTS_FULL
__this_cpu_write(crypto_simd_disabled_for_test, false);
migrate_enable();
+#endif
}
/*
@@ -1242,15 +1243,6 @@ too_long:
algname);
return -ENAMETOOLONG;
}
-#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
-static void crypto_disable_simd_for_test(void)
-{
-}
-
-static void crypto_reenable_simd_for_test(void)
-{
-}
-#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int build_hash_sglist(struct test_sglist *tsgl,
const struct hash_testvec *vec,
@@ -1691,8 +1683,7 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
return err;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- if (!noextratests) {
+ if (!noslowtests) {
struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
@@ -1709,17 +1700,15 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
cond_resched();
}
}
-#endif
return 0;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
/*
* Generate a hash test vector from the given implementation.
* Assumes the buffers in 'vec' were already allocated.
*/
static void generate_random_hash_testvec(struct rnd_state *rng,
- struct shash_desc *desc,
+ struct ahash_request *req,
struct hash_testvec *vec,
unsigned int maxkeysize,
unsigned int maxdatasize,
@@ -1741,16 +1730,17 @@ static void generate_random_hash_testvec(struct rnd_state *rng,
vec->ksize = prandom_u32_inclusive(rng, 1, maxkeysize);
generate_random_bytes(rng, (u8 *)vec->key, vec->ksize);
- vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
- vec->ksize);
+ vec->setkey_error = crypto_ahash_setkey(
+ crypto_ahash_reqtfm(req), vec->key, vec->ksize);
/* If the key couldn't be set, no need to continue to digest. */
if (vec->setkey_error)
goto done;
}
/* Digest */
- vec->digest_error = crypto_shash_digest(desc, vec->plaintext,
- vec->psize, (u8 *)vec->digest);
+ vec->digest_error = crypto_hash_digest(
+ crypto_ahash_reqtfm(req), vec->plaintext,
+ vec->psize, (u8 *)vec->digest);
done:
snprintf(name, max_namelen, "\"random: psize=%u ksize=%u\"",
vec->psize, vec->ksize);
@@ -1775,8 +1765,8 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
const char *driver = crypto_ahash_driver_name(tfm);
struct rnd_state rng;
char _generic_driver[CRYPTO_MAX_ALG_NAME];
- struct crypto_shash *generic_tfm = NULL;
- struct shash_desc *generic_desc = NULL;
+ struct ahash_request *generic_req = NULL;
+ struct crypto_ahash *generic_tfm = NULL;
unsigned int i;
struct hash_testvec vec = { 0 };
char vec_name[64];
@@ -1784,7 +1774,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
char cfgname[TESTVEC_CONFIG_NAMELEN];
int err;
- if (noextratests)
+ if (noslowtests)
return 0;
init_rnd_state(&rng);
@@ -1799,7 +1789,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
return 0;
- generic_tfm = crypto_alloc_shash(generic_driver, 0, 0);
+ generic_tfm = crypto_alloc_ahash(generic_driver, 0, 0);
if (IS_ERR(generic_tfm)) {
err = PTR_ERR(generic_tfm);
if (err == -ENOENT) {
@@ -1818,27 +1808,25 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
goto out;
}
- generic_desc = kzalloc(sizeof(*desc) +
- crypto_shash_descsize(generic_tfm), GFP_KERNEL);
- if (!generic_desc) {
+ generic_req = ahash_request_alloc(generic_tfm, GFP_KERNEL);
+ if (!generic_req) {
err = -ENOMEM;
goto out;
}
- generic_desc->tfm = generic_tfm;
/* Check the algorithm properties for consistency. */
- if (digestsize != crypto_shash_digestsize(generic_tfm)) {
+ if (digestsize != crypto_ahash_digestsize(generic_tfm)) {
pr_err("alg: hash: digestsize for %s (%u) doesn't match generic impl (%u)\n",
driver, digestsize,
- crypto_shash_digestsize(generic_tfm));
+ crypto_ahash_digestsize(generic_tfm));
err = -EINVAL;
goto out;
}
- if (blocksize != crypto_shash_blocksize(generic_tfm)) {
+ if (blocksize != crypto_ahash_blocksize(generic_tfm)) {
pr_err("alg: hash: blocksize for %s (%u) doesn't match generic impl (%u)\n",
- driver, blocksize, crypto_shash_blocksize(generic_tfm));
+ driver, blocksize, crypto_ahash_blocksize(generic_tfm));
err = -EINVAL;
goto out;
}
@@ -1857,7 +1845,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
}
for (i = 0; i < fuzz_iterations * 8; i++) {
- generate_random_hash_testvec(&rng, generic_desc, &vec,
+ generate_random_hash_testvec(&rng, generic_req, &vec,
maxkeysize, maxdatasize,
vec_name, sizeof(vec_name));
generate_random_testvec_config(&rng, cfg, cfgname,
@@ -1875,21 +1863,10 @@ out:
kfree(vec.key);
kfree(vec.plaintext);
kfree(vec.digest);
- crypto_free_shash(generic_tfm);
- kfree_sensitive(generic_desc);
+ ahash_request_free(generic_req);
+ crypto_free_ahash(generic_tfm);
return err;
}
-#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
-static int test_hash_vs_generic_impl(const char *generic_driver,
- unsigned int maxkeysize,
- struct ahash_request *req,
- struct shash_desc *desc,
- struct test_sglist *tsgl,
- u8 *hashstate)
-{
- return 0;
-}
-#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int alloc_shash(const char *driver, u32 type, u32 mask,
struct crypto_shash **tfm_ret,
@@ -1900,7 +1877,7 @@ static int alloc_shash(const char *driver, u32 type, u32 mask,
tfm = crypto_alloc_shash(driver, type, mask);
if (IS_ERR(tfm)) {
- if (PTR_ERR(tfm) == -ENOENT) {
+ if (PTR_ERR(tfm) == -ENOENT || PTR_ERR(tfm) == -EEXIST) {
/*
* This algorithm is only available through the ahash
* API, not the shash API, so skip the shash tests.
@@ -2263,8 +2240,7 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec,
return err;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- if (!noextratests) {
+ if (!noslowtests) {
struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
@@ -2281,13 +2257,10 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec,
cond_resched();
}
}
-#endif
return 0;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
-
-struct aead_extra_tests_ctx {
+struct aead_slow_tests_ctx {
struct rnd_state rng;
struct aead_request *req;
struct crypto_aead *tfm;
@@ -2462,8 +2435,7 @@ static void generate_random_aead_testvec(struct rnd_state *rng,
vec->alen, vec->plen, authsize, vec->klen, vec->novrfy);
}
-static void try_to_generate_inauthentic_testvec(
- struct aead_extra_tests_ctx *ctx)
+static void try_to_generate_inauthentic_testvec(struct aead_slow_tests_ctx *ctx)
{
int i;
@@ -2482,7 +2454,7 @@ static void try_to_generate_inauthentic_testvec(
* Generate inauthentic test vectors (i.e. ciphertext, AAD pairs that aren't the
* result of an encryption with the key) and verify that decryption fails.
*/
-static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx)
+static int test_aead_inauthentic_inputs(struct aead_slow_tests_ctx *ctx)
{
unsigned int i;
int err;
@@ -2517,7 +2489,7 @@ static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx)
* Test the AEAD algorithm against the corresponding generic implementation, if
* one is available.
*/
-static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx)
+static int test_aead_vs_generic_impl(struct aead_slow_tests_ctx *ctx)
{
struct crypto_aead *tfm = ctx->tfm;
const char *algname = crypto_aead_alg(tfm)->base.cra_name;
@@ -2621,15 +2593,15 @@ out:
return err;
}
-static int test_aead_extra(const struct alg_test_desc *test_desc,
- struct aead_request *req,
- struct cipher_test_sglists *tsgls)
+static int test_aead_slow(const struct alg_test_desc *test_desc,
+ struct aead_request *req,
+ struct cipher_test_sglists *tsgls)
{
- struct aead_extra_tests_ctx *ctx;
+ struct aead_slow_tests_ctx *ctx;
unsigned int i;
int err;
- if (noextratests)
+ if (noslowtests)
return 0;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -2671,14 +2643,6 @@ out:
kfree(ctx);
return err;
}
-#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
-static int test_aead_extra(const struct alg_test_desc *test_desc,
- struct aead_request *req,
- struct cipher_test_sglists *tsgls)
-{
- return 0;
-}
-#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int test_aead(int enc, const struct aead_test_suite *suite,
struct aead_request *req,
@@ -2744,7 +2708,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
if (err)
goto out;
- err = test_aead_extra(desc, req, tsgls);
+ err = test_aead_slow(desc, req, tsgls);
out:
free_cipher_test_sglists(tsgls);
aead_request_free(req);
@@ -3018,8 +2982,7 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
return err;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- if (!noextratests) {
+ if (!noslowtests) {
struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
@@ -3036,11 +2999,9 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
cond_resched();
}
}
-#endif
return 0;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
/*
* Generate a symmetric cipher test vector from the given implementation.
* Assumes the buffers in 'vec' were already allocated.
@@ -3123,7 +3084,7 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
char cfgname[TESTVEC_CONFIG_NAMELEN];
int err;
- if (noextratests)
+ if (noslowtests)
return 0;
init_rnd_state(&rng);
@@ -3239,14 +3200,6 @@ out:
skcipher_request_free(generic_req);
return err;
}
-#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
-static int test_skcipher_vs_generic_impl(const char *generic_driver,
- struct skcipher_request *req,
- struct cipher_test_sglists *tsgls)
-{
- return 0;
-}
-#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int test_skcipher(int enc, const struct cipher_test_suite *suite,
struct skcipher_request *req,
@@ -3320,112 +3273,6 @@ out:
return err;
}
-static int test_comp(struct crypto_comp *tfm,
- const struct comp_testvec *ctemplate,
- const struct comp_testvec *dtemplate,
- int ctcount, int dtcount)
-{
- const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
- char *output, *decomp_output;
- unsigned int i;
- int ret;
-
- output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
- if (!output)
- return -ENOMEM;
-
- decomp_output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
- if (!decomp_output) {
- kfree(output);
- return -ENOMEM;
- }
-
- for (i = 0; i < ctcount; i++) {
- int ilen;
- unsigned int dlen = COMP_BUF_SIZE;
-
- memset(output, 0, COMP_BUF_SIZE);
- memset(decomp_output, 0, COMP_BUF_SIZE);
-
- ilen = ctemplate[i].inlen;
- ret = crypto_comp_compress(tfm, ctemplate[i].input,
- ilen, output, &dlen);
- if (ret) {
- printk(KERN_ERR "alg: comp: compression failed "
- "on test %d for %s: ret=%d\n", i + 1, algo,
- -ret);
- goto out;
- }
-
- ilen = dlen;
- dlen = COMP_BUF_SIZE;
- ret = crypto_comp_decompress(tfm, output,
- ilen, decomp_output, &dlen);
- if (ret) {
- pr_err("alg: comp: compression failed: decompress: on test %d for %s failed: ret=%d\n",
- i + 1, algo, -ret);
- goto out;
- }
-
- if (dlen != ctemplate[i].inlen) {
- printk(KERN_ERR "alg: comp: Compression test %d "
- "failed for %s: output len = %d\n", i + 1, algo,
- dlen);
- ret = -EINVAL;
- goto out;
- }
-
- if (memcmp(decomp_output, ctemplate[i].input,
- ctemplate[i].inlen)) {
- pr_err("alg: comp: compression failed: output differs: on test %d for %s\n",
- i + 1, algo);
- hexdump(decomp_output, dlen);
- ret = -EINVAL;
- goto out;
- }
- }
-
- for (i = 0; i < dtcount; i++) {
- int ilen;
- unsigned int dlen = COMP_BUF_SIZE;
-
- memset(decomp_output, 0, COMP_BUF_SIZE);
-
- ilen = dtemplate[i].inlen;
- ret = crypto_comp_decompress(tfm, dtemplate[i].input,
- ilen, decomp_output, &dlen);
- if (ret) {
- printk(KERN_ERR "alg: comp: decompression failed "
- "on test %d for %s: ret=%d\n", i + 1, algo,
- -ret);
- goto out;
- }
-
- if (dlen != dtemplate[i].outlen) {
- printk(KERN_ERR "alg: comp: Decompression test %d "
- "failed for %s: output len = %d\n", i + 1, algo,
- dlen);
- ret = -EINVAL;
- goto out;
- }
-
- if (memcmp(decomp_output, dtemplate[i].output, dlen)) {
- printk(KERN_ERR "alg: comp: Decompression test %d "
- "failed for %s\n", i + 1, algo);
- hexdump(decomp_output, dlen);
- ret = -EINVAL;
- goto out;
- }
- }
-
- ret = 0;
-
-out:
- kfree(decomp_output);
- kfree(output);
- return ret;
-}
-
static int test_acomp(struct crypto_acomp *tfm,
const struct comp_testvec *ctemplate,
const struct comp_testvec *dtemplate,
@@ -3522,21 +3369,6 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- crypto_init_wait(&wait);
- sg_init_one(&src, input_vec, ilen);
- acomp_request_set_params(req, &src, NULL, ilen, 0);
-
- ret = crypto_wait_req(crypto_acomp_compress(req), &wait);
- if (ret) {
- pr_err("alg: acomp: compression failed on NULL dst buffer test %d for %s: ret=%d\n",
- i + 1, algo, -ret);
- kfree(input_vec);
- acomp_request_free(req);
- goto out;
- }
-#endif
-
kfree(input_vec);
acomp_request_free(req);
}
@@ -3598,20 +3430,6 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- crypto_init_wait(&wait);
- acomp_request_set_params(req, &src, NULL, ilen, 0);
-
- ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
- if (ret) {
- pr_err("alg: acomp: decompression failed on NULL dst buffer test %d for %s: ret=%d\n",
- i + 1, algo, -ret);
- kfree(input_vec);
- acomp_request_free(req);
- goto out;
- }
-#endif
-
kfree(input_vec);
acomp_request_free(req);
}
@@ -3713,42 +3531,22 @@ static int alg_test_cipher(const struct alg_test_desc *desc,
static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
- struct crypto_comp *comp;
struct crypto_acomp *acomp;
int err;
- u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
-
- if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) {
- acomp = crypto_alloc_acomp(driver, type, mask);
- if (IS_ERR(acomp)) {
- if (PTR_ERR(acomp) == -ENOENT)
- return 0;
- pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
- driver, PTR_ERR(acomp));
- return PTR_ERR(acomp);
- }
- err = test_acomp(acomp, desc->suite.comp.comp.vecs,
- desc->suite.comp.decomp.vecs,
- desc->suite.comp.comp.count,
- desc->suite.comp.decomp.count);
- crypto_free_acomp(acomp);
- } else {
- comp = crypto_alloc_comp(driver, type, mask);
- if (IS_ERR(comp)) {
- if (PTR_ERR(comp) == -ENOENT)
- return 0;
- pr_err("alg: comp: Failed to load transform for %s: %ld\n",
- driver, PTR_ERR(comp));
- return PTR_ERR(comp);
- }
-
- err = test_comp(comp, desc->suite.comp.comp.vecs,
- desc->suite.comp.decomp.vecs,
- desc->suite.comp.comp.count,
- desc->suite.comp.decomp.count);
- crypto_free_comp(comp);
- }
+ acomp = crypto_alloc_acomp(driver, type, mask);
+ if (IS_ERR(acomp)) {
+ if (PTR_ERR(acomp) == -ENOENT)
+ return 0;
+ pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
+ driver, PTR_ERR(acomp));
+ return PTR_ERR(acomp);
+ }
+ err = test_acomp(acomp, desc->suite.comp.comp.vecs,
+ desc->suite.comp.decomp.vecs,
+ desc->suite.comp.comp.count,
+ desc->suite.comp.decomp.count);
+ crypto_free_acomp(acomp);
return err;
}
@@ -4328,7 +4126,7 @@ static int test_sig_one(struct crypto_sig *tfm, const struct sig_testvec *vecs)
if (vecs->public_key_vec)
return 0;
- sig_size = crypto_sig_keysize(tfm);
+ sig_size = crypto_sig_maxsize(tfm);
if (sig_size < vecs->c_size) {
pr_err("alg: sig: invalid maxsize %u\n", sig_size);
return -EINVAL;
@@ -4340,13 +4138,14 @@ static int test_sig_one(struct crypto_sig *tfm, const struct sig_testvec *vecs)
/* Run asymmetric signature generation */
err = crypto_sig_sign(tfm, vecs->m, vecs->m_size, sig, sig_size);
- if (err) {
+ if (err < 0) {
pr_err("alg: sig: sign test failed: err %d\n", err);
return err;
}
/* Verify that generated signature equals cooked signature */
- if (memcmp(sig, vecs->c, vecs->c_size) ||
+ if (err != vecs->c_size ||
+ memcmp(sig, vecs->c, vecs->c_size) ||
memchr_inv(sig + vecs->c_size, 0, sig_size - vecs->c_size)) {
pr_err("alg: sig: sign test failed: invalid output\n");
hexdump(sig, sig_size);
@@ -4505,6 +4304,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_null,
.fips_allowed = 1,
}, {
+ .alg = "authenc(hmac(sha256),cts(cbc(aes)))",
+ .test = alg_test_aead,
+ .suite = {
+ .aead = __VECS(krb5_test_aes128_cts_hmac_sha256_128)
+ }
+ }, {
.alg = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
.test = alg_test_null,
.fips_allowed = 1,
@@ -4525,6 +4330,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_null,
.fips_allowed = 1,
}, {
+ .alg = "authenc(hmac(sha384),cts(cbc(aes)))",
+ .test = alg_test_aead,
+ .suite = {
+ .aead = __VECS(krb5_test_aes256_cts_hmac_sha384_192)
+ }
+ }, {
.alg = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
.test = alg_test_null,
.fips_allowed = 1,
@@ -4743,9 +4554,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(sm4_cmac128_tv_template)
}
}, {
- .alg = "compress_null",
- .test = alg_test_null,
- }, {
.alg = "crc32",
.test = alg_test_hash,
.fips_allowed = 1,
@@ -4760,20 +4568,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(crc32c_tv_template)
}
}, {
- .alg = "crc64-rocksoft",
- .test = alg_test_hash,
- .fips_allowed = 1,
- .suite = {
- .hash = __VECS(crc64_rocksoft_tv_template)
- }
- }, {
- .alg = "crct10dif",
- .test = alg_test_hash,
- .fips_allowed = 1,
- .suite = {
- .hash = __VECS(crct10dif_tv_template)
- }
- }, {
.alg = "ctr(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
@@ -5398,6 +5192,10 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
.test = alg_test_null,
}, {
+ .alg = "krb5enc(cmac(camellia),cts(cbc(camellia)))",
+ .test = alg_test_aead,
+ .suite.aead = __VECS(krb5_test_camellia_cts_cmac)
+ }, {
.alg = "lrw(aes)",
.generic_driver = "lrw(ecb(aes-generic))",
.test = alg_test_skcipher,
@@ -5562,12 +5360,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_null,
.fips_allowed = 1,
}, {
- .alg = "poly1305",
- .test = alg_test_hash,
- .suite = {
- .hash = __VECS(poly1305_tv_template)
- }
- }, {
.alg = "polyval",
.test = alg_test_hash,
.suite = {
@@ -5924,9 +5716,8 @@ static void testmgr_onetime_init(void)
alg_check_test_descs_order();
alg_check_testvec_configs();
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- pr_warn("alg: extra crypto tests enabled. This is intended for developer use only.\n");
-#endif
+ if (!noslowtests)
+ pr_warn("alg: full crypto tests enabled. This is intended for developer use only.\n");
}
static int alg_find_test(const char *alg)
@@ -6015,11 +5806,10 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
test_done:
if (rc) {
- if (fips_enabled || panic_on_fail) {
+ if (fips_enabled) {
fips_fail_notify();
- panic("alg: self-tests for %s (%s) failed in %s mode!\n",
- driver, alg,
- fips_enabled ? "fips" : "panic_on_fail");
+ panic("alg: self-tests for %s (%s) failed in fips mode!\n",
+ driver, alg);
}
pr_warn("alg: self-tests for %s using %s failed (rc=%d)",
alg, driver, rc);
@@ -6064,6 +5854,6 @@ non_fips_alg:
return alg_fips_disabled(driver, alg);
}
-#endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */
+#endif /* CONFIG_CRYPTO_SELFTESTS */
EXPORT_SYMBOL_GPL(alg_test);
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index d754ab997186..32d099ac9e73 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -6017,309 +6017,6 @@ static const struct hash_testvec rmd160_tv_template[] = {
}
};
-static const u8 zeroes[4096] = { [0 ... 4095] = 0 };
-static const u8 ones[4096] = { [0 ... 4095] = 0xff };
-
-static const struct hash_testvec crc64_rocksoft_tv_template[] = {
- {
- .plaintext = zeroes,
- .psize = 4096,
- .digest = "\x4e\xb6\x22\xeb\x67\xd3\x82\x64",
- }, {
- .plaintext = ones,
- .psize = 4096,
- .digest = "\xac\xa3\xec\x02\x73\xba\xdd\xc0",
- }
-};
-
-static const struct hash_testvec crct10dif_tv_template[] = {
- {
- .plaintext = "abc",
- .psize = 3,
- .digest = (u8 *)(u16 []){ 0x443b },
- }, {
- .plaintext = "1234567890123456789012345678901234567890"
- "123456789012345678901234567890123456789",
- .psize = 79,
- .digest = (u8 *)(u16 []){ 0x4b70 },
- }, {
- .plaintext = "abcdddddddddddddddddddddddddddddddddddddddd"
- "ddddddddddddd",
- .psize = 56,
- .digest = (u8 *)(u16 []){ 0x9ce3 },
- }, {
- .plaintext = "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "123456789012345678901234567890123456789",
- .psize = 319,
- .digest = (u8 *)(u16 []){ 0x44c6 },
- }, {
- .plaintext = "\x6e\x05\x79\x10\xa7\x1b\xb2\x49"
- "\xe0\x54\xeb\x82\x19\x8d\x24\xbb"
- "\x2f\xc6\x5d\xf4\x68\xff\x96\x0a"
- "\xa1\x38\xcf\x43\xda\x71\x08\x7c"
- "\x13\xaa\x1e\xb5\x4c\xe3\x57\xee"
- "\x85\x1c\x90\x27\xbe\x32\xc9\x60"
- "\xf7\x6b\x02\x99\x0d\xa4\x3b\xd2"
- "\x46\xdd\x74\x0b\x7f\x16\xad\x21"
- "\xb8\x4f\xe6\x5a\xf1\x88\x1f\x93"
- "\x2a\xc1\x35\xcc\x63\xfa\x6e\x05"
- "\x9c\x10\xa7\x3e\xd5\x49\xe0\x77"
- "\x0e\x82\x19\xb0\x24\xbb\x52\xe9"
- "\x5d\xf4\x8b\x22\x96\x2d\xc4\x38"
- "\xcf\x66\xfd\x71\x08\x9f\x13\xaa"
- "\x41\xd8\x4c\xe3\x7a\x11\x85\x1c"
- "\xb3\x27\xbe\x55\xec\x60\xf7\x8e"
- "\x02\x99\x30\xc7\x3b\xd2\x69\x00"
- "\x74\x0b\xa2\x16\xad\x44\xdb\x4f"
- "\xe6\x7d\x14\x88\x1f\xb6\x2a\xc1"
- "\x58\xef\x63\xfa\x91\x05\x9c\x33"
- "\xca\x3e\xd5\x6c\x03\x77\x0e\xa5"
- "\x19\xb0\x47\xde\x52\xe9\x80\x17"
- "\x8b\x22\xb9\x2d\xc4\x5b\xf2\x66"
- "\xfd\x94\x08\x9f\x36\xcd\x41\xd8"
- "\x6f\x06\x7a\x11\xa8\x1c\xb3\x4a"
- "\xe1\x55\xec\x83\x1a\x8e\x25\xbc"
- "\x30\xc7\x5e\xf5\x69\x00\x97\x0b"
- "\xa2\x39\xd0\x44\xdb\x72\x09\x7d"
- "\x14\xab\x1f\xb6\x4d\xe4\x58\xef"
- "\x86\x1d\x91\x28\xbf\x33\xca\x61"
- "\xf8\x6c\x03\x9a\x0e\xa5\x3c\xd3"
- "\x47\xde\x75\x0c\x80\x17\xae\x22"
- "\xb9\x50\xe7\x5b\xf2\x89\x20\x94"
- "\x2b\xc2\x36\xcd\x64\xfb\x6f\x06"
- "\x9d\x11\xa8\x3f\xd6\x4a\xe1\x78"
- "\x0f\x83\x1a\xb1\x25\xbc\x53\xea"
- "\x5e\xf5\x8c\x00\x97\x2e\xc5\x39"
- "\xd0\x67\xfe\x72\x09\xa0\x14\xab"
- "\x42\xd9\x4d\xe4\x7b\x12\x86\x1d"
- "\xb4\x28\xbf\x56\xed\x61\xf8\x8f"
- "\x03\x9a\x31\xc8\x3c\xd3\x6a\x01"
- "\x75\x0c\xa3\x17\xae\x45\xdc\x50"
- "\xe7\x7e\x15\x89\x20\xb7\x2b\xc2"
- "\x59\xf0\x64\xfb\x92\x06\x9d\x34"
- "\xcb\x3f\xd6\x6d\x04\x78\x0f\xa6"
- "\x1a\xb1\x48\xdf\x53\xea\x81\x18"
- "\x8c\x23\xba\x2e\xc5\x5c\xf3\x67"
- "\xfe\x95\x09\xa0\x37\xce\x42\xd9"
- "\x70\x07\x7b\x12\xa9\x1d\xb4\x4b"
- "\xe2\x56\xed\x84\x1b\x8f\x26\xbd"
- "\x31\xc8\x5f\xf6\x6a\x01\x98\x0c"
- "\xa3\x3a\xd1\x45\xdc\x73\x0a\x7e"
- "\x15\xac\x20\xb7\x4e\xe5\x59\xf0"
- "\x87\x1e\x92\x29\xc0\x34\xcb\x62"
- "\xf9\x6d\x04\x9b\x0f\xa6\x3d\xd4"
- "\x48\xdf\x76\x0d\x81\x18\xaf\x23"
- "\xba\x51\xe8\x5c\xf3\x8a\x21\x95"
- "\x2c\xc3\x37\xce\x65\xfc\x70\x07"
- "\x9e\x12\xa9\x40\xd7\x4b\xe2\x79"
- "\x10\x84\x1b\xb2\x26\xbd\x54\xeb"
- "\x5f\xf6\x8d\x01\x98\x2f\xc6\x3a"
- "\xd1\x68\xff\x73\x0a\xa1\x15\xac"
- "\x43\xda\x4e\xe5\x7c\x13\x87\x1e"
- "\xb5\x29\xc0\x57\xee\x62\xf9\x90"
- "\x04\x9b\x32\xc9\x3d\xd4\x6b\x02"
- "\x76\x0d\xa4\x18\xaf\x46\xdd\x51"
- "\xe8\x7f\x16\x8a\x21\xb8\x2c\xc3"
- "\x5a\xf1\x65\xfc\x93\x07\x9e\x35"
- "\xcc\x40\xd7\x6e\x05\x79\x10\xa7"
- "\x1b\xb2\x49\xe0\x54\xeb\x82\x19"
- "\x8d\x24\xbb\x2f\xc6\x5d\xf4\x68"
- "\xff\x96\x0a\xa1\x38\xcf\x43\xda"
- "\x71\x08\x7c\x13\xaa\x1e\xb5\x4c"
- "\xe3\x57\xee\x85\x1c\x90\x27\xbe"
- "\x32\xc9\x60\xf7\x6b\x02\x99\x0d"
- "\xa4\x3b\xd2\x46\xdd\x74\x0b\x7f"
- "\x16\xad\x21\xb8\x4f\xe6\x5a\xf1"
- "\x88\x1f\x93\x2a\xc1\x35\xcc\x63"
- "\xfa\x6e\x05\x9c\x10\xa7\x3e\xd5"
- "\x49\xe0\x77\x0e\x82\x19\xb0\x24"
- "\xbb\x52\xe9\x5d\xf4\x8b\x22\x96"
- "\x2d\xc4\x38\xcf\x66\xfd\x71\x08"
- "\x9f\x13\xaa\x41\xd8\x4c\xe3\x7a"
- "\x11\x85\x1c\xb3\x27\xbe\x55\xec"
- "\x60\xf7\x8e\x02\x99\x30\xc7\x3b"
- "\xd2\x69\x00\x74\x0b\xa2\x16\xad"
- "\x44\xdb\x4f\xe6\x7d\x14\x88\x1f"
- "\xb6\x2a\xc1\x58\xef\x63\xfa\x91"
- "\x05\x9c\x33\xca\x3e\xd5\x6c\x03"
- "\x77\x0e\xa5\x19\xb0\x47\xde\x52"
- "\xe9\x80\x17\x8b\x22\xb9\x2d\xc4"
- "\x5b\xf2\x66\xfd\x94\x08\x9f\x36"
- "\xcd\x41\xd8\x6f\x06\x7a\x11\xa8"
- "\x1c\xb3\x4a\xe1\x55\xec\x83\x1a"
- "\x8e\x25\xbc\x30\xc7\x5e\xf5\x69"
- "\x00\x97\x0b\xa2\x39\xd0\x44\xdb"
- "\x72\x09\x7d\x14\xab\x1f\xb6\x4d"
- "\xe4\x58\xef\x86\x1d\x91\x28\xbf"
- "\x33\xca\x61\xf8\x6c\x03\x9a\x0e"
- "\xa5\x3c\xd3\x47\xde\x75\x0c\x80"
- "\x17\xae\x22\xb9\x50\xe7\x5b\xf2"
- "\x89\x20\x94\x2b\xc2\x36\xcd\x64"
- "\xfb\x6f\x06\x9d\x11\xa8\x3f\xd6"
- "\x4a\xe1\x78\x0f\x83\x1a\xb1\x25"
- "\xbc\x53\xea\x5e\xf5\x8c\x00\x97"
- "\x2e\xc5\x39\xd0\x67\xfe\x72\x09"
- "\xa0\x14\xab\x42\xd9\x4d\xe4\x7b"
- "\x12\x86\x1d\xb4\x28\xbf\x56\xed"
- "\x61\xf8\x8f\x03\x9a\x31\xc8\x3c"
- "\xd3\x6a\x01\x75\x0c\xa3\x17\xae"
- "\x45\xdc\x50\xe7\x7e\x15\x89\x20"
- "\xb7\x2b\xc2\x59\xf0\x64\xfb\x92"
- "\x06\x9d\x34\xcb\x3f\xd6\x6d\x04"
- "\x78\x0f\xa6\x1a\xb1\x48\xdf\x53"
- "\xea\x81\x18\x8c\x23\xba\x2e\xc5"
- "\x5c\xf3\x67\xfe\x95\x09\xa0\x37"
- "\xce\x42\xd9\x70\x07\x7b\x12\xa9"
- "\x1d\xb4\x4b\xe2\x56\xed\x84\x1b"
- "\x8f\x26\xbd\x31\xc8\x5f\xf6\x6a"
- "\x01\x98\x0c\xa3\x3a\xd1\x45\xdc"
- "\x73\x0a\x7e\x15\xac\x20\xb7\x4e"
- "\xe5\x59\xf0\x87\x1e\x92\x29\xc0"
- "\x34\xcb\x62\xf9\x6d\x04\x9b\x0f"
- "\xa6\x3d\xd4\x48\xdf\x76\x0d\x81"
- "\x18\xaf\x23\xba\x51\xe8\x5c\xf3"
- "\x8a\x21\x95\x2c\xc3\x37\xce\x65"
- "\xfc\x70\x07\x9e\x12\xa9\x40\xd7"
- "\x4b\xe2\x79\x10\x84\x1b\xb2\x26"
- "\xbd\x54\xeb\x5f\xf6\x8d\x01\x98"
- "\x2f\xc6\x3a\xd1\x68\xff\x73\x0a"
- "\xa1\x15\xac\x43\xda\x4e\xe5\x7c"
- "\x13\x87\x1e\xb5\x29\xc0\x57\xee"
- "\x62\xf9\x90\x04\x9b\x32\xc9\x3d"
- "\xd4\x6b\x02\x76\x0d\xa4\x18\xaf"
- "\x46\xdd\x51\xe8\x7f\x16\x8a\x21"
- "\xb8\x2c\xc3\x5a\xf1\x65\xfc\x93"
- "\x07\x9e\x35\xcc\x40\xd7\x6e\x05"
- "\x79\x10\xa7\x1b\xb2\x49\xe0\x54"
- "\xeb\x82\x19\x8d\x24\xbb\x2f\xc6"
- "\x5d\xf4\x68\xff\x96\x0a\xa1\x38"
- "\xcf\x43\xda\x71\x08\x7c\x13\xaa"
- "\x1e\xb5\x4c\xe3\x57\xee\x85\x1c"
- "\x90\x27\xbe\x32\xc9\x60\xf7\x6b"
- "\x02\x99\x0d\xa4\x3b\xd2\x46\xdd"
- "\x74\x0b\x7f\x16\xad\x21\xb8\x4f"
- "\xe6\x5a\xf1\x88\x1f\x93\x2a\xc1"
- "\x35\xcc\x63\xfa\x6e\x05\x9c\x10"
- "\xa7\x3e\xd5\x49\xe0\x77\x0e\x82"
- "\x19\xb0\x24\xbb\x52\xe9\x5d\xf4"
- "\x8b\x22\x96\x2d\xc4\x38\xcf\x66"
- "\xfd\x71\x08\x9f\x13\xaa\x41\xd8"
- "\x4c\xe3\x7a\x11\x85\x1c\xb3\x27"
- "\xbe\x55\xec\x60\xf7\x8e\x02\x99"
- "\x30\xc7\x3b\xd2\x69\x00\x74\x0b"
- "\xa2\x16\xad\x44\xdb\x4f\xe6\x7d"
- "\x14\x88\x1f\xb6\x2a\xc1\x58\xef"
- "\x63\xfa\x91\x05\x9c\x33\xca\x3e"
- "\xd5\x6c\x03\x77\x0e\xa5\x19\xb0"
- "\x47\xde\x52\xe9\x80\x17\x8b\x22"
- "\xb9\x2d\xc4\x5b\xf2\x66\xfd\x94"
- "\x08\x9f\x36\xcd\x41\xd8\x6f\x06"
- "\x7a\x11\xa8\x1c\xb3\x4a\xe1\x55"
- "\xec\x83\x1a\x8e\x25\xbc\x30\xc7"
- "\x5e\xf5\x69\x00\x97\x0b\xa2\x39"
- "\xd0\x44\xdb\x72\x09\x7d\x14\xab"
- "\x1f\xb6\x4d\xe4\x58\xef\x86\x1d"
- "\x91\x28\xbf\x33\xca\x61\xf8\x6c"
- "\x03\x9a\x0e\xa5\x3c\xd3\x47\xde"
- "\x75\x0c\x80\x17\xae\x22\xb9\x50"
- "\xe7\x5b\xf2\x89\x20\x94\x2b\xc2"
- "\x36\xcd\x64\xfb\x6f\x06\x9d\x11"
- "\xa8\x3f\xd6\x4a\xe1\x78\x0f\x83"
- "\x1a\xb1\x25\xbc\x53\xea\x5e\xf5"
- "\x8c\x00\x97\x2e\xc5\x39\xd0\x67"
- "\xfe\x72\x09\xa0\x14\xab\x42\xd9"
- "\x4d\xe4\x7b\x12\x86\x1d\xb4\x28"
- "\xbf\x56\xed\x61\xf8\x8f\x03\x9a"
- "\x31\xc8\x3c\xd3\x6a\x01\x75\x0c"
- "\xa3\x17\xae\x45\xdc\x50\xe7\x7e"
- "\x15\x89\x20\xb7\x2b\xc2\x59\xf0"
- "\x64\xfb\x92\x06\x9d\x34\xcb\x3f"
- "\xd6\x6d\x04\x78\x0f\xa6\x1a\xb1"
- "\x48\xdf\x53\xea\x81\x18\x8c\x23"
- "\xba\x2e\xc5\x5c\xf3\x67\xfe\x95"
- "\x09\xa0\x37\xce\x42\xd9\x70\x07"
- "\x7b\x12\xa9\x1d\xb4\x4b\xe2\x56"
- "\xed\x84\x1b\x8f\x26\xbd\x31\xc8"
- "\x5f\xf6\x6a\x01\x98\x0c\xa3\x3a"
- "\xd1\x45\xdc\x73\x0a\x7e\x15\xac"
- "\x20\xb7\x4e\xe5\x59\xf0\x87\x1e"
- "\x92\x29\xc0\x34\xcb\x62\xf9\x6d"
- "\x04\x9b\x0f\xa6\x3d\xd4\x48\xdf"
- "\x76\x0d\x81\x18\xaf\x23\xba\x51"
- "\xe8\x5c\xf3\x8a\x21\x95\x2c\xc3"
- "\x37\xce\x65\xfc\x70\x07\x9e\x12"
- "\xa9\x40\xd7\x4b\xe2\x79\x10\x84"
- "\x1b\xb2\x26\xbd\x54\xeb\x5f\xf6"
- "\x8d\x01\x98\x2f\xc6\x3a\xd1\x68"
- "\xff\x73\x0a\xa1\x15\xac\x43\xda"
- "\x4e\xe5\x7c\x13\x87\x1e\xb5\x29"
- "\xc0\x57\xee\x62\xf9\x90\x04\x9b"
- "\x32\xc9\x3d\xd4\x6b\x02\x76\x0d"
- "\xa4\x18\xaf\x46\xdd\x51\xe8\x7f"
- "\x16\x8a\x21\xb8\x2c\xc3\x5a\xf1"
- "\x65\xfc\x93\x07\x9e\x35\xcc\x40"
- "\xd7\x6e\x05\x79\x10\xa7\x1b\xb2"
- "\x49\xe0\x54\xeb\x82\x19\x8d\x24"
- "\xbb\x2f\xc6\x5d\xf4\x68\xff\x96"
- "\x0a\xa1\x38\xcf\x43\xda\x71\x08"
- "\x7c\x13\xaa\x1e\xb5\x4c\xe3\x57"
- "\xee\x85\x1c\x90\x27\xbe\x32\xc9"
- "\x60\xf7\x6b\x02\x99\x0d\xa4\x3b"
- "\xd2\x46\xdd\x74\x0b\x7f\x16\xad"
- "\x21\xb8\x4f\xe6\x5a\xf1\x88\x1f"
- "\x93\x2a\xc1\x35\xcc\x63\xfa\x6e"
- "\x05\x9c\x10\xa7\x3e\xd5\x49\xe0"
- "\x77\x0e\x82\x19\xb0\x24\xbb\x52"
- "\xe9\x5d\xf4\x8b\x22\x96\x2d\xc4"
- "\x38\xcf\x66\xfd\x71\x08\x9f\x13"
- "\xaa\x41\xd8\x4c\xe3\x7a\x11\x85"
- "\x1c\xb3\x27\xbe\x55\xec\x60\xf7"
- "\x8e\x02\x99\x30\xc7\x3b\xd2\x69"
- "\x00\x74\x0b\xa2\x16\xad\x44\xdb"
- "\x4f\xe6\x7d\x14\x88\x1f\xb6\x2a"
- "\xc1\x58\xef\x63\xfa\x91\x05\x9c"
- "\x33\xca\x3e\xd5\x6c\x03\x77\x0e"
- "\xa5\x19\xb0\x47\xde\x52\xe9\x80"
- "\x17\x8b\x22\xb9\x2d\xc4\x5b\xf2"
- "\x66\xfd\x94\x08\x9f\x36\xcd\x41"
- "\xd8\x6f\x06\x7a\x11\xa8\x1c\xb3"
- "\x4a\xe1\x55\xec\x83\x1a\x8e\x25"
- "\xbc\x30\xc7\x5e\xf5\x69\x00\x97"
- "\x0b\xa2\x39\xd0\x44\xdb\x72\x09"
- "\x7d\x14\xab\x1f\xb6\x4d\xe4\x58"
- "\xef\x86\x1d\x91\x28\xbf\x33\xca"
- "\x61\xf8\x6c\x03\x9a\x0e\xa5\x3c"
- "\xd3\x47\xde\x75\x0c\x80\x17\xae"
- "\x22\xb9\x50\xe7\x5b\xf2\x89\x20"
- "\x94\x2b\xc2\x36\xcd\x64\xfb\x6f"
- "\x06\x9d\x11\xa8\x3f\xd6\x4a\xe1"
- "\x78\x0f\x83\x1a\xb1\x25\xbc\x53"
- "\xea\x5e\xf5\x8c\x00\x97\x2e\xc5"
- "\x39\xd0\x67\xfe\x72\x09\xa0\x14"
- "\xab\x42\xd9\x4d\xe4\x7b\x12\x86"
- "\x1d\xb4\x28\xbf\x56\xed\x61\xf8"
- "\x8f\x03\x9a\x31\xc8\x3c\xd3\x6a"
- "\x01\x75\x0c\xa3\x17\xae\x45\xdc"
- "\x50\xe7\x7e\x15\x89\x20\xb7\x2b"
- "\xc2\x59\xf0\x64\xfb\x92\x06\x9d"
- "\x34\xcb\x3f\xd6\x6d\x04\x78\x0f"
- "\xa6\x1a\xb1\x48\xdf\x53\xea\x81"
- "\x18\x8c\x23\xba\x2e\xc5\x5c\xf3"
- "\x67\xfe\x95\x09\xa0\x37\xce\x42"
- "\xd9\x70\x07\x7b\x12\xa9\x1d\xb4"
- "\x4b\xe2\x56\xed\x84\x1b\x8f\x26"
- "\xbd\x31\xc8\x5f\xf6\x6a\x01\x98",
- .psize = 2048,
- .digest = (u8 *)(u16 []){ 0x23ca },
- }
-};
-
/*
* Streebog test vectors from RFC 6986 and GOST R 34.11-2012
*/
@@ -9139,294 +8836,6 @@ static const struct hash_testvec hmac_sha3_512_tv_template[] = {
},
};
-/*
- * Poly1305 test vectors from RFC7539 A.3.
- */
-
-static const struct hash_testvec poly1305_tv_template[] = {
- { /* Test Vector #1 */
- .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 96,
- .digest = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #2 */
- .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70"
- "\xf0\xef\xca\x96\x22\x7a\x86\x3e"
- "\x41\x6e\x79\x20\x73\x75\x62\x6d"
- "\x69\x73\x73\x69\x6f\x6e\x20\x74"
- "\x6f\x20\x74\x68\x65\x20\x49\x45"
- "\x54\x46\x20\x69\x6e\x74\x65\x6e"
- "\x64\x65\x64\x20\x62\x79\x20\x74"
- "\x68\x65\x20\x43\x6f\x6e\x74\x72"
- "\x69\x62\x75\x74\x6f\x72\x20\x66"
- "\x6f\x72\x20\x70\x75\x62\x6c\x69"
- "\x63\x61\x74\x69\x6f\x6e\x20\x61"
- "\x73\x20\x61\x6c\x6c\x20\x6f\x72"
- "\x20\x70\x61\x72\x74\x20\x6f\x66"
- "\x20\x61\x6e\x20\x49\x45\x54\x46"
- "\x20\x49\x6e\x74\x65\x72\x6e\x65"
- "\x74\x2d\x44\x72\x61\x66\x74\x20"
- "\x6f\x72\x20\x52\x46\x43\x20\x61"
- "\x6e\x64\x20\x61\x6e\x79\x20\x73"
- "\x74\x61\x74\x65\x6d\x65\x6e\x74"
- "\x20\x6d\x61\x64\x65\x20\x77\x69"
- "\x74\x68\x69\x6e\x20\x74\x68\x65"
- "\x20\x63\x6f\x6e\x74\x65\x78\x74"
- "\x20\x6f\x66\x20\x61\x6e\x20\x49"
- "\x45\x54\x46\x20\x61\x63\x74\x69"
- "\x76\x69\x74\x79\x20\x69\x73\x20"
- "\x63\x6f\x6e\x73\x69\x64\x65\x72"
- "\x65\x64\x20\x61\x6e\x20\x22\x49"
- "\x45\x54\x46\x20\x43\x6f\x6e\x74"
- "\x72\x69\x62\x75\x74\x69\x6f\x6e"
- "\x22\x2e\x20\x53\x75\x63\x68\x20"
- "\x73\x74\x61\x74\x65\x6d\x65\x6e"
- "\x74\x73\x20\x69\x6e\x63\x6c\x75"
- "\x64\x65\x20\x6f\x72\x61\x6c\x20"
- "\x73\x74\x61\x74\x65\x6d\x65\x6e"
- "\x74\x73\x20\x69\x6e\x20\x49\x45"
- "\x54\x46\x20\x73\x65\x73\x73\x69"
- "\x6f\x6e\x73\x2c\x20\x61\x73\x20"
- "\x77\x65\x6c\x6c\x20\x61\x73\x20"
- "\x77\x72\x69\x74\x74\x65\x6e\x20"
- "\x61\x6e\x64\x20\x65\x6c\x65\x63"
- "\x74\x72\x6f\x6e\x69\x63\x20\x63"
- "\x6f\x6d\x6d\x75\x6e\x69\x63\x61"
- "\x74\x69\x6f\x6e\x73\x20\x6d\x61"
- "\x64\x65\x20\x61\x74\x20\x61\x6e"
- "\x79\x20\x74\x69\x6d\x65\x20\x6f"
- "\x72\x20\x70\x6c\x61\x63\x65\x2c"
- "\x20\x77\x68\x69\x63\x68\x20\x61"
- "\x72\x65\x20\x61\x64\x64\x72\x65"
- "\x73\x73\x65\x64\x20\x74\x6f",
- .psize = 407,
- .digest = "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70"
- "\xf0\xef\xca\x96\x22\x7a\x86\x3e",
- }, { /* Test Vector #3 */
- .plaintext = "\x36\xe5\xf6\xb5\xc5\xe0\x60\x70"
- "\xf0\xef\xca\x96\x22\x7a\x86\x3e"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x41\x6e\x79\x20\x73\x75\x62\x6d"
- "\x69\x73\x73\x69\x6f\x6e\x20\x74"
- "\x6f\x20\x74\x68\x65\x20\x49\x45"
- "\x54\x46\x20\x69\x6e\x74\x65\x6e"
- "\x64\x65\x64\x20\x62\x79\x20\x74"
- "\x68\x65\x20\x43\x6f\x6e\x74\x72"
- "\x69\x62\x75\x74\x6f\x72\x20\x66"
- "\x6f\x72\x20\x70\x75\x62\x6c\x69"
- "\x63\x61\x74\x69\x6f\x6e\x20\x61"
- "\x73\x20\x61\x6c\x6c\x20\x6f\x72"
- "\x20\x70\x61\x72\x74\x20\x6f\x66"
- "\x20\x61\x6e\x20\x49\x45\x54\x46"
- "\x20\x49\x6e\x74\x65\x72\x6e\x65"
- "\x74\x2d\x44\x72\x61\x66\x74\x20"
- "\x6f\x72\x20\x52\x46\x43\x20\x61"
- "\x6e\x64\x20\x61\x6e\x79\x20\x73"
- "\x74\x61\x74\x65\x6d\x65\x6e\x74"
- "\x20\x6d\x61\x64\x65\x20\x77\x69"
- "\x74\x68\x69\x6e\x20\x74\x68\x65"
- "\x20\x63\x6f\x6e\x74\x65\x78\x74"
- "\x20\x6f\x66\x20\x61\x6e\x20\x49"
- "\x45\x54\x46\x20\x61\x63\x74\x69"
- "\x76\x69\x74\x79\x20\x69\x73\x20"
- "\x63\x6f\x6e\x73\x69\x64\x65\x72"
- "\x65\x64\x20\x61\x6e\x20\x22\x49"
- "\x45\x54\x46\x20\x43\x6f\x6e\x74"
- "\x72\x69\x62\x75\x74\x69\x6f\x6e"
- "\x22\x2e\x20\x53\x75\x63\x68\x20"
- "\x73\x74\x61\x74\x65\x6d\x65\x6e"
- "\x74\x73\x20\x69\x6e\x63\x6c\x75"
- "\x64\x65\x20\x6f\x72\x61\x6c\x20"
- "\x73\x74\x61\x74\x65\x6d\x65\x6e"
- "\x74\x73\x20\x69\x6e\x20\x49\x45"
- "\x54\x46\x20\x73\x65\x73\x73\x69"
- "\x6f\x6e\x73\x2c\x20\x61\x73\x20"
- "\x77\x65\x6c\x6c\x20\x61\x73\x20"
- "\x77\x72\x69\x74\x74\x65\x6e\x20"
- "\x61\x6e\x64\x20\x65\x6c\x65\x63"
- "\x74\x72\x6f\x6e\x69\x63\x20\x63"
- "\x6f\x6d\x6d\x75\x6e\x69\x63\x61"
- "\x74\x69\x6f\x6e\x73\x20\x6d\x61"
- "\x64\x65\x20\x61\x74\x20\x61\x6e"
- "\x79\x20\x74\x69\x6d\x65\x20\x6f"
- "\x72\x20\x70\x6c\x61\x63\x65\x2c"
- "\x20\x77\x68\x69\x63\x68\x20\x61"
- "\x72\x65\x20\x61\x64\x64\x72\x65"
- "\x73\x73\x65\x64\x20\x74\x6f",
- .psize = 407,
- .digest = "\xf3\x47\x7e\x7c\xd9\x54\x17\xaf"
- "\x89\xa6\xb8\x79\x4c\x31\x0c\xf0",
- }, { /* Test Vector #4 */
- .plaintext = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
- "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
- "\x47\x39\x17\xc1\x40\x2b\x80\x09"
- "\x9d\xca\x5c\xbc\x20\x70\x75\xc0"
- "\x27\x54\x77\x61\x73\x20\x62\x72"
- "\x69\x6c\x6c\x69\x67\x2c\x20\x61"
- "\x6e\x64\x20\x74\x68\x65\x20\x73"
- "\x6c\x69\x74\x68\x79\x20\x74\x6f"
- "\x76\x65\x73\x0a\x44\x69\x64\x20"
- "\x67\x79\x72\x65\x20\x61\x6e\x64"
- "\x20\x67\x69\x6d\x62\x6c\x65\x20"
- "\x69\x6e\x20\x74\x68\x65\x20\x77"
- "\x61\x62\x65\x3a\x0a\x41\x6c\x6c"
- "\x20\x6d\x69\x6d\x73\x79\x20\x77"
- "\x65\x72\x65\x20\x74\x68\x65\x20"
- "\x62\x6f\x72\x6f\x67\x6f\x76\x65"
- "\x73\x2c\x0a\x41\x6e\x64\x20\x74"
- "\x68\x65\x20\x6d\x6f\x6d\x65\x20"
- "\x72\x61\x74\x68\x73\x20\x6f\x75"
- "\x74\x67\x72\x61\x62\x65\x2e",
- .psize = 159,
- .digest = "\x45\x41\x66\x9a\x7e\xaa\xee\x61"
- "\xe7\x08\xdc\x7c\xbc\xc5\xeb\x62",
- }, { /* Test Vector #5 */
- .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
- .psize = 48,
- .digest = "\x03\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #6 */
- .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 48,
- .digest = "\x03\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #7 */
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xf0\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\x11\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 80,
- .digest = "\x05\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #8 */
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xfb\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
- "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
- "\x01\x01\x01\x01\x01\x01\x01\x01"
- "\x01\x01\x01\x01\x01\x01\x01\x01",
- .psize = 80,
- .digest = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #9 */
- .plaintext = "\x02\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xfd\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
- .psize = 48,
- .digest = "\xfa\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff",
- }, { /* Test Vector #10 */
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x04\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xe3\x35\x94\xd7\x50\x5e\x43\xb9"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x33\x94\xd7\x50\x5e\x43\x79\xcd"
- "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 96,
- .digest = "\x14\x00\x00\x00\x00\x00\x00\x00"
- "\x55\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Test Vector #11 */
- .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x04\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\xe3\x35\x94\xd7\x50\x5e\x43\xb9"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x33\x94\xd7\x50\x5e\x43\x79\xcd"
- "\x01\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .psize = 80,
- .digest = "\x13\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- }, { /* Regression test for overflow in AVX2 implementation */
- .plaintext = "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff\xff\xff\xff\xff"
- "\xff\xff\xff\xff",
- .psize = 300,
- .digest = "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8"
- "\x78\xe5\x87\xcc\x2d\x5a\x22\xe1",
- }
-};
-
/* NHPoly1305 test vectors from https://github.com/google/adiantum */
static const struct hash_testvec nhpoly1305_tv_template[] = {
{
@@ -38894,4 +38303,355 @@ static const struct cipher_testvec aes_hctr2_tv_template[] = {
};
+#ifdef __LITTLE_ENDIAN
+#define AUTHENC_KEY_HEADER(enckeylen) \
+ "\x08\x00\x01\x00" /* LE rtattr */ \
+ enckeylen /* crypto_authenc_key_param */
+#else
+#define AUTHENC_KEY_HEADER(enckeylen) \
+ "\x00\x08\x00\x01" /* BE rtattr */ \
+ enckeylen /* crypto_authenc_key_param */
+#endif
+
+static const struct aead_testvec krb5_test_aes128_cts_hmac_sha256_128[] = {
+ /* rfc8009 Appendix A */
+ {
+ /* "enc no plain" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki
+ "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke
+ .klen = 4 + 4 + 16 + 16,
+ .ptext =
+ "\x7E\x58\x95\xEA\xF2\x67\x24\x35\xBA\xD8\x17\xF5\x45\xA3\x71\x48" // Confounder
+ "", // Plain
+ .plen = 16 + 0,
+ .ctext =
+ "\xEF\x85\xFB\x89\x0B\xB8\x47\x2F\x4D\xAB\x20\x39\x4D\xCA\x78\x1D"
+ "\xAD\x87\x7E\xDA\x39\xD5\x0C\x87\x0C\x0D\x5A\x0A\x8E\x48\xC7\x18",
+ .clen = 16 + 0 + 16,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ }, {
+ /* "enc plain<block" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki
+ "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke
+ .klen = 4 + 4 + 16 + 16,
+ .ptext =
+ "\x7B\xCA\x28\x5E\x2F\xD4\x13\x0F\xB5\x5B\x1A\x5C\x83\xBC\x5B\x24" // Confounder
+ "\x00\x01\x02\x03\x04\x05", // Plain
+ .plen = 16 + 6,
+ .ctext =
+ "\x84\xD7\xF3\x07\x54\xED\x98\x7B\xAB\x0B\xF3\x50\x6B\xEB\x09\xCF"
+ "\xB5\x54\x02\xCE\xF7\xE6\x87\x7C\xE9\x9E\x24\x7E\x52\xD1\x6E\xD4"
+ "\x42\x1D\xFD\xF8\x97\x6C",
+ .clen = 16 + 6 + 16,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ }, {
+ /* "enc plain==block" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki
+ "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke
+ .klen = 4 + 4 + 16 + 16,
+ .ptext =
+ "\x56\xAB\x21\x71\x3F\xF6\x2C\x0A\x14\x57\x20\x0F\x6F\xA9\x94\x8F" // Confounder
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", // Plain
+ .plen = 16 + 16,
+ .ctext =
+ "\x35\x17\xD6\x40\xF5\x0D\xDC\x8A\xD3\x62\x87\x22\xB3\x56\x9D\x2A"
+ "\xE0\x74\x93\xFA\x82\x63\x25\x40\x80\xEA\x65\xC1\x00\x8E\x8F\xC2"
+ "\x95\xFB\x48\x52\xE7\xD8\x3E\x1E\x7C\x48\xC3\x7E\xEB\xE6\xB0\xD3",
+ .clen = 16 + 16 + 16,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ }, {
+ /* "enc plain>block" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki
+ "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke
+ .klen = 4 + 4 + 16 + 16,
+ .ptext =
+ "\xA7\xA4\xE2\x9A\x47\x28\xCE\x10\x66\x4F\xB6\x4E\x49\xAD\x3F\xAC" // Confounder
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F"
+ "\x10\x11\x12\x13\x14", // Plain
+ .plen = 16 + 21,
+ .ctext =
+ "\x72\x0F\x73\xB1\x8D\x98\x59\xCD\x6C\xCB\x43\x46\x11\x5C\xD3\x36"
+ "\xC7\x0F\x58\xED\xC0\xC4\x43\x7C\x55\x73\x54\x4C\x31\xC8\x13\xBC"
+ "\xE1\xE6\xD0\x72\xC1\x86\xB3\x9A\x41\x3C\x2F\x92\xCA\x9B\x83\x34"
+ "\xA2\x87\xFF\xCB\xFC",
+ .clen = 16 + 21 + 16,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ },
+};
+
+static const struct aead_testvec krb5_test_aes256_cts_hmac_sha384_192[] = {
+ /* rfc8009 Appendix A */
+ {
+ /* "enc no plain" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6"
+ "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki
+ "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7"
+ "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke
+ .klen = 4 + 4 + 32 + 24,
+ .ptext =
+ "\xF7\x64\xE9\xFA\x15\xC2\x76\x47\x8B\x2C\x7D\x0C\x4E\x5F\x58\xE4" // Confounder
+ "", // Plain
+ .plen = 16 + 0,
+ .ctext =
+ "\x41\xF5\x3F\xA5\xBF\xE7\x02\x6D\x91\xFA\xF9\xBE\x95\x91\x95\xA0"
+ "\x58\x70\x72\x73\xA9\x6A\x40\xF0\xA0\x19\x60\x62\x1A\xC6\x12\x74"
+ "\x8B\x9B\xBF\xBE\x7E\xB4\xCE\x3C",
+ .clen = 16 + 0 + 24,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ }, {
+ /* "enc plain<block" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6"
+ "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki
+ "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7"
+ "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke
+ .klen = 4 + 4 + 32 + 24,
+ .ptext =
+ "\xB8\x0D\x32\x51\xC1\xF6\x47\x14\x94\x25\x6F\xFE\x71\x2D\x0B\x9A" // Confounder
+ "\x00\x01\x02\x03\x04\x05", // Plain
+ .plen = 16 + 6,
+ .ctext =
+ "\x4E\xD7\xB3\x7C\x2B\xCA\xC8\xF7\x4F\x23\xC1\xCF\x07\xE6\x2B\xC7"
+ "\xB7\x5F\xB3\xF6\x37\xB9\xF5\x59\xC7\xF6\x64\xF6\x9E\xAB\x7B\x60"
+ "\x92\x23\x75\x26\xEA\x0D\x1F\x61\xCB\x20\xD6\x9D\x10\xF2",
+ .clen = 16 + 6 + 24,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ }, {
+ /* "enc plain==block" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6"
+ "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki
+ "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7"
+ "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke
+ .klen = 4 + 4 + 32 + 24,
+ .ptext =
+ "\x53\xBF\x8A\x0D\x10\x52\x65\xD4\xE2\x76\x42\x86\x24\xCE\x5E\x63" // Confounder
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", // Plain
+ .plen = 16 + 16,
+ .ctext =
+ "\xBC\x47\xFF\xEC\x79\x98\xEB\x91\xE8\x11\x5C\xF8\xD1\x9D\xAC\x4B"
+ "\xBB\xE2\xE1\x63\xE8\x7D\xD3\x7F\x49\xBE\xCA\x92\x02\x77\x64\xF6"
+ "\x8C\xF5\x1F\x14\xD7\x98\xC2\x27\x3F\x35\xDF\x57\x4D\x1F\x93\x2E"
+ "\x40\xC4\xFF\x25\x5B\x36\xA2\x66",
+ .clen = 16 + 16 + 24,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ }, {
+ /* "enc plain>block" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6"
+ "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki
+ "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7"
+ "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke
+ .klen = 4 + 4 + 32 + 24,
+ .ptext =
+ "\x76\x3E\x65\x36\x7E\x86\x4F\x02\xF5\x51\x53\xC7\xE3\xB5\x8A\xF1" // Confounder
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F"
+ "\x10\x11\x12\x13\x14", // Plain
+ .plen = 16 + 21,
+ .ctext =
+ "\x40\x01\x3E\x2D\xF5\x8E\x87\x51\x95\x7D\x28\x78\xBC\xD2\xD6\xFE"
+ "\x10\x1C\xCF\xD5\x56\xCB\x1E\xAE\x79\xDB\x3C\x3E\xE8\x64\x29\xF2"
+ "\xB2\xA6\x02\xAC\x86\xFE\xF6\xEC\xB6\x47\xD6\x29\x5F\xAE\x07\x7A"
+ "\x1F\xEB\x51\x75\x08\xD2\xC1\x6B\x41\x92\xE0\x1F\x62",
+ .clen = 16 + 21 + 24,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ },
+};
+
+static const struct aead_testvec krb5_test_camellia_cts_cmac[] = {
+ /* rfc6803 sec 10 */
+ {
+ // "enc no plain"
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x45\xeb\x66\xe2\xef\xa8\x77\x8f\x7d\xf1\x46\x54\x53\x05\x98\x06" // Ki
+ "\xe9\x9b\x82\xb3\x6c\x4a\xe8\xea\x19\xe9\x5d\xfa\x9e\xde\x88\x2c", // Ke
+ .klen = 4 + 4 + 16 * 2,
+ .ptext =
+ "\xB6\x98\x22\xA1\x9A\x6B\x09\xC0\xEB\xC8\x55\x7D\x1F\x1B\x6C\x0A" // Confounder
+ "", // Plain
+ .plen = 16 + 0,
+ .ctext =
+ "\xC4\x66\xF1\x87\x10\x69\x92\x1E\xDB\x7C\x6F\xDE\x24\x4A\x52\xDB"
+ "\x0B\xA1\x0E\xDC\x19\x7B\xDB\x80\x06\x65\x8C\xA3\xCC\xCE\x6E\xB8",
+ .clen = 16 + 0 + 16,
+ }, {
+ // "enc 1 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x13\x5f\xe7\x11\x6f\x53\xc2\xaa\x36\x12\xb7\xea\xe0\xf2\x84\xaa" // Ki
+ "\xa7\xed\xcd\x53\x97\xea\x6d\x12\xb0\xaf\xf4\xcb\x8d\xaa\x57\xad", // Ke
+ .klen = 4 + 4 + 16 * 2,
+ .ptext =
+ "\x6F\x2F\xC3\xC2\xA1\x66\xFD\x88\x98\x96\x7A\x83\xDE\x95\x96\xD9" // Confounder
+ "1", // Plain
+ .plen = 16 + 1,
+ .ctext =
+ "\x84\x2D\x21\xFD\x95\x03\x11\xC0\xDD\x46\x4A\x3F\x4B\xE8\xD6\xDA"
+ "\x88\xA5\x6D\x55\x9C\x9B\x47\xD3\xF9\xA8\x50\x67\xAF\x66\x15\x59"
+ "\xB8",
+ .clen = 16 + 1 + 16,
+ }, {
+ // "enc 9 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x10\x2c\x34\xd0\x75\x74\x9f\x77\x8a\x15\xca\xd1\xe9\x7d\xa9\x86" // Ki
+ "\xdd\xe4\x2e\xca\x7c\xd9\x86\x3f\xc3\xce\x89\xcb\xc9\x43\x62\xd7", // Ke
+ .klen = 4 + 4 + 16 * 2,
+ .ptext =
+ "\xA5\xB4\xA7\x1E\x07\x7A\xEE\xF9\x3C\x87\x63\xC1\x8F\xDB\x1F\x10" // Confounder
+ "9 bytesss", // Plain
+ .plen = 16 + 9,
+ .ctext =
+ "\x61\x9F\xF0\x72\xE3\x62\x86\xFF\x0A\x28\xDE\xB3\xA3\x52\xEC\x0D"
+ "\x0E\xDF\x5C\x51\x60\xD6\x63\xC9\x01\x75\x8C\xCF\x9D\x1E\xD3\x3D"
+ "\x71\xDB\x8F\x23\xAA\xBF\x83\x48\xA0",
+ .clen = 16 + 9 + 16,
+ }, {
+ // "enc 13 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\xb8\xc4\x38\xcc\x1a\x00\x60\xfc\x91\x3a\x8e\x07\x16\x96\xbd\x08" // Ki
+ "\xc3\x11\x3a\x25\x85\x90\xb9\xae\xbf\x72\x1b\x1a\xf6\xb0\xcb\xf8", // Ke
+ .klen = 4 + 4 + 16 * 2,
+ .ptext =
+ "\x19\xFE\xE4\x0D\x81\x0C\x52\x4B\x5B\x22\xF0\x18\x74\xC6\x93\xDA" // Confounder
+ "13 bytes byte", // Plain
+ .plen = 16 + 13,
+ .ctext =
+ "\xB8\xEC\xA3\x16\x7A\xE6\x31\x55\x12\xE5\x9F\x98\xA7\xC5\x00\x20"
+ "\x5E\x5F\x63\xFF\x3B\xB3\x89\xAF\x1C\x41\xA2\x1D\x64\x0D\x86\x15"
+ "\xC9\xED\x3F\xBE\xB0\x5A\xB6\xAC\xB6\x76\x89\xB5\xEA",
+ .clen = 16 + 13 + 16,
+ }, {
+ // "enc 30 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x18\xaf\x19\xb0\x23\x74\x44\xfd\x75\x04\xad\x7d\xbd\x48\xad\xd3" // Ki
+ "\x8b\x07\xee\xd3\x01\x49\x91\x6a\xa2\x0d\xb3\xf5\xce\xd8\xaf\xad", // Ke
+ .klen = 4 + 4 + 16 * 2,
+ .ptext =
+ "\xCA\x7A\x7A\xB4\xBE\x19\x2D\xAB\xD6\x03\x50\x6D\xB1\x9C\x39\xE2" // Confounder
+ "30 bytes bytes bytes bytes byt", // Plain
+ .plen = 16 + 30,
+ .ctext =
+ "\xA2\x6A\x39\x05\xA4\xFF\xD5\x81\x6B\x7B\x1E\x27\x38\x0D\x08\x09"
+ "\x0C\x8E\xC1\xF3\x04\x49\x6E\x1A\xBD\xCD\x2B\xDC\xD1\xDF\xFC\x66"
+ "\x09\x89\xE1\x17\xA7\x13\xDD\xBB\x57\xA4\x14\x6C\x15\x87\xCB\xA4"
+ "\x35\x66\x65\x59\x1D\x22\x40\x28\x2F\x58\x42\xB1\x05\xA5",
+ .clen = 16 + 30 + 16,
+ }, {
+ // "enc no plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\xa2\xb8\x33\xe9\x43\xbb\x10\xee\x53\xb4\xa1\x9b\xc2\xbb\xc7\xe1"
+ "\x9b\x87\xad\x5d\xe9\x21\x22\xa4\x33\x8b\xe6\xf7\x32\xfd\x8a\x0e" // Ki
+ "\x6c\xcb\x3f\x25\xd8\xae\x57\xf4\xe8\xf6\xca\x47\x4b\xdd\xef\xf1"
+ "\x16\xce\x13\x1b\x3f\x71\x01\x2e\x75\x6d\x6b\x1e\x3f\x70\xa7\xf1", // Ke
+ .klen = 4 + 4 + 32 * 2,
+ .ptext =
+ "\x3C\xBB\xD2\xB4\x59\x17\x94\x10\x67\xF9\x65\x99\xBB\x98\x92\x6C" // Confounder
+ "", // Plain
+ .plen = 16 + 0,
+ .ctext =
+ "\x03\x88\x6D\x03\x31\x0B\x47\xA6\xD8\xF0\x6D\x7B\x94\xD1\xDD\x83"
+ "\x7E\xCC\xE3\x15\xEF\x65\x2A\xFF\x62\x08\x59\xD9\x4A\x25\x92\x66",
+ .clen = 16 + 0 + 16,
+ }, {
+ // "enc 1 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x84\x61\x4b\xfa\x98\xf1\x74\x8a\xa4\xaf\x99\x2b\x8c\x26\x28\x0d"
+ "\xc8\x98\x73\x29\xdf\x77\x5c\x1d\xb0\x4a\x43\xf1\x21\xaa\x86\x65" // Ki
+ "\xe9\x31\x73\xaa\x01\xeb\x3c\x24\x62\x31\xda\xfc\x78\x02\xee\x32"
+ "\xaf\x24\x85\x1d\x8c\x73\x87\xd1\x8c\xb9\xb2\xc5\xb7\xf5\x70\xb8", // Ke
+ .klen = 4 + 4 + 32 * 2,
+ .ptext =
+ "\xDE\xF4\x87\xFC\xEB\xE6\xDE\x63\x46\xD4\xDA\x45\x21\xBB\xA2\xD2" // Confounder
+ "1", // Plain
+ .plen = 16 + 1,
+ .ctext =
+ "\x2C\x9C\x15\x70\x13\x3C\x99\xBF\x6A\x34\xBC\x1B\x02\x12\x00\x2F"
+ "\xD1\x94\x33\x87\x49\xDB\x41\x35\x49\x7A\x34\x7C\xFC\xD9\xD1\x8A"
+ "\x12",
+ .clen = 16 + 1 + 16,
+ }, {
+ // "enc 9 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x47\xb9\xf5\xba\xd7\x63\x00\x58\x2a\x54\x45\xfa\x0c\x1b\x29\xc3"
+ "\xaa\x83\xec\x63\xb9\x0b\x4a\xb0\x08\x48\xc1\x85\x67\x4f\x44\xa7" // Ki
+ "\xcd\xa2\xd3\x9a\x9b\x24\x3f\xfe\xb5\x6e\x8d\x5f\x4b\xd5\x28\x74"
+ "\x1e\xcb\x52\x0c\x62\x12\x3f\xb0\x40\xb8\x41\x8b\x15\xc7\xd7\x0c", // Ke
+ .klen = 4 + 4 + 32 * 2,
+ .ptext =
+ "\xAD\x4F\xF9\x04\xD3\x4E\x55\x53\x84\xB1\x41\x00\xFC\x46\x5F\x88" // Confounder
+ "9 bytesss", // Plain
+ .plen = 16 + 9,
+ .ctext =
+ "\x9C\x6D\xE7\x5F\x81\x2D\xE7\xED\x0D\x28\xB2\x96\x35\x57\xA1\x15"
+ "\x64\x09\x98\x27\x5B\x0A\xF5\x15\x27\x09\x91\x3F\xF5\x2A\x2A\x9C"
+ "\x8E\x63\xB8\x72\xF9\x2E\x64\xC8\x39",
+ .clen = 16 + 9 + 16,
+ }, {
+ // "enc 13 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x15\x2f\x8c\x9d\xc9\x85\x79\x6e\xb1\x94\xed\x14\xc5\x9e\xac\xdd"
+ "\x41\x8a\x33\x32\x36\xb7\x8f\xaf\xa7\xc7\x9b\x04\xe0\xac\xe7\xbf" // Ki
+ "\xcd\x8a\x10\xe2\x79\xda\xdd\xb6\x90\x1e\xc3\x0b\xdf\x98\x73\x25"
+ "\x0f\x6e\xfc\x6a\x77\x36\x7d\x74\xdc\x3e\xe7\xf7\x4b\xc7\x77\x4e", // Ke
+ .klen = 4 + 4 + 32 * 2,
+ .ptext =
+ "\xCF\x9B\xCA\x6D\xF1\x14\x4E\x0C\x0A\xF9\xB8\xF3\x4C\x90\xD5\x14" // Confounder
+ "13 bytes byte",
+ .plen = 16 + 13,
+ .ctext =
+ "\xEE\xEC\x85\xA9\x81\x3C\xDC\x53\x67\x72\xAB\x9B\x42\xDE\xFC\x57"
+ "\x06\xF7\x26\xE9\x75\xDD\xE0\x5A\x87\xEB\x54\x06\xEA\x32\x4C\xA1"
+ "\x85\xC9\x98\x6B\x42\xAA\xBE\x79\x4B\x84\x82\x1B\xEE",
+ .clen = 16 + 13 + 16,
+ }, {
+ // "enc 30 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x04\x8d\xeb\xf7\xb1\x2c\x09\x32\xe8\xb2\x96\x99\x6c\x23\xf8\xb7"
+ "\x9d\x59\xb9\x7e\xa1\x19\xfc\x0c\x15\x6b\xf7\x88\xdc\x8c\x85\xe8" // Ki
+ "\x1d\x51\x47\xf3\x4b\xb0\x01\xa0\x4a\x68\xa7\x13\x46\xe7\x65\x4e"
+ "\x02\x23\xa6\x0d\x90\xbc\x2b\x79\xb4\xd8\x79\x56\xd4\x7c\xd4\x2a", // Ke
+ .klen = 4 + 4 + 32 * 2,
+ .ptext =
+ "\x64\x4D\xEF\x38\xDA\x35\x00\x72\x75\x87\x8D\x21\x68\x55\xE2\x28" // Confounder
+ "30 bytes bytes bytes bytes byt", // Plain
+ .plen = 16 + 30,
+ .ctext =
+ "\x0E\x44\x68\x09\x85\x85\x5F\x2D\x1F\x18\x12\x52\x9C\xA8\x3B\xFD"
+ "\x8E\x34\x9D\xE6\xFD\x9A\xDA\x0B\xAA\xA0\x48\xD6\x8E\x26\x5F\xEB"
+ "\xF3\x4A\xD1\x25\x5A\x34\x49\x99\xAD\x37\x14\x68\x87\xA6\xC6\x84"
+ "\x57\x31\xAC\x7F\x46\x37\x6A\x05\x04\xCD\x06\x57\x14\x74",
+ .clen = 16 + 30 + 16,
+ },
+};
+
#endif /* _CRYPTO_TESTMGR_H */
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
index 19f2b365e140..368018cfa9bf 100644
--- a/crypto/twofish_generic.c
+++ b/crypto/twofish_generic.c
@@ -187,7 +187,7 @@ static void __exit twofish_mod_fini(void)
crypto_unregister_alg(&alg);
}
-subsys_initcall(twofish_mod_init);
+module_init(twofish_mod_init);
module_exit(twofish_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/wp512.c b/crypto/wp512.c
index 07994e5ebf4e..229b189a7988 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -21,10 +21,10 @@
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/mm.h>
-#include <asm/byteorder.h>
-#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
#define WP512_DIGEST_SIZE 64
#define WP384_DIGEST_SIZE 48
@@ -37,9 +37,6 @@
struct wp512_ctx {
u8 bitLength[WP512_LENGTHBYTES];
- u8 buffer[WP512_BLOCK_SIZE];
- int bufferBits;
- int bufferPos;
u64 hash[WP512_DIGEST_SIZE/8];
};
@@ -779,16 +776,16 @@ static const u64 rc[WHIRLPOOL_ROUNDS] = {
* The core Whirlpool transform.
*/
-static __no_kmsan_checks void wp512_process_buffer(struct wp512_ctx *wctx) {
+static __no_kmsan_checks void wp512_process_buffer(struct wp512_ctx *wctx,
+ const u8 *buffer) {
int i, r;
u64 K[8]; /* the round key */
u64 block[8]; /* mu(buffer) */
u64 state[8]; /* the cipher state */
u64 L[8];
- const __be64 *buffer = (const __be64 *)wctx->buffer;
for (i = 0; i < 8; i++)
- block[i] = be64_to_cpu(buffer[i]);
+ block[i] = get_unaligned_be64(buffer + i * 8);
state[0] = block[0] ^ (K[0] = wctx->hash[0]);
state[1] = block[1] ^ (K[1] = wctx->hash[1]);
@@ -991,8 +988,6 @@ static int wp512_init(struct shash_desc *desc) {
int i;
memset(wctx->bitLength, 0, 32);
- wctx->bufferBits = wctx->bufferPos = 0;
- wctx->buffer[0] = 0;
for (i = 0; i < 8; i++) {
wctx->hash[i] = 0L;
}
@@ -1000,84 +995,54 @@ static int wp512_init(struct shash_desc *desc) {
return 0;
}
-static int wp512_update(struct shash_desc *desc, const u8 *source,
- unsigned int len)
+static void wp512_add_length(u8 *bitLength, u64 value)
{
- struct wp512_ctx *wctx = shash_desc_ctx(desc);
- int sourcePos = 0;
- unsigned int bits_len = len * 8; // convert to number of bits
- int sourceGap = (8 - ((int)bits_len & 7)) & 7;
- int bufferRem = wctx->bufferBits & 7;
+ u32 carry;
int i;
- u32 b, carry;
- u8 *buffer = wctx->buffer;
- u8 *bitLength = wctx->bitLength;
- int bufferBits = wctx->bufferBits;
- int bufferPos = wctx->bufferPos;
- u64 value = bits_len;
for (i = 31, carry = 0; i >= 0 && (carry != 0 || value != 0ULL); i--) {
carry += bitLength[i] + ((u32)value & 0xff);
bitLength[i] = (u8)carry;
carry >>= 8;
value >>= 8;
}
- while (bits_len > 8) {
- b = ((source[sourcePos] << sourceGap) & 0xff) |
- ((source[sourcePos + 1] & 0xff) >> (8 - sourceGap));
- buffer[bufferPos++] |= (u8)(b >> bufferRem);
- bufferBits += 8 - bufferRem;
- if (bufferBits == WP512_BLOCK_SIZE * 8) {
- wp512_process_buffer(wctx);
- bufferBits = bufferPos = 0;
- }
- buffer[bufferPos] = b << (8 - bufferRem);
- bufferBits += bufferRem;
- bits_len -= 8;
- sourcePos++;
- }
- if (bits_len > 0) {
- b = (source[sourcePos] << sourceGap) & 0xff;
- buffer[bufferPos] |= b >> bufferRem;
- } else {
- b = 0;
- }
- if (bufferRem + bits_len < 8) {
- bufferBits += bits_len;
- } else {
- bufferPos++;
- bufferBits += 8 - bufferRem;
- bits_len -= 8 - bufferRem;
- if (bufferBits == WP512_BLOCK_SIZE * 8) {
- wp512_process_buffer(wctx);
- bufferBits = bufferPos = 0;
- }
- buffer[bufferPos] = b << (8 - bufferRem);
- bufferBits += (int)bits_len;
- }
+}
- wctx->bufferBits = bufferBits;
- wctx->bufferPos = bufferPos;
+static int wp512_update(struct shash_desc *desc, const u8 *source,
+ unsigned int len)
+{
+ struct wp512_ctx *wctx = shash_desc_ctx(desc);
+ unsigned int remain = len % WP512_BLOCK_SIZE;
+ u64 bits_len = (len - remain) * 8ull;
+ u8 *bitLength = wctx->bitLength;
- return 0;
+ wp512_add_length(bitLength, bits_len);
+ do {
+ wp512_process_buffer(wctx, source);
+ source += WP512_BLOCK_SIZE;
+ bits_len -= WP512_BLOCK_SIZE * 8;
+ } while (bits_len);
+
+ return remain;
}
-static int wp512_final(struct shash_desc *desc, u8 *out)
+static int wp512_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int bufferPos, u8 *out)
{
struct wp512_ctx *wctx = shash_desc_ctx(desc);
int i;
- u8 *buffer = wctx->buffer;
u8 *bitLength = wctx->bitLength;
- int bufferBits = wctx->bufferBits;
- int bufferPos = wctx->bufferPos;
__be64 *digest = (__be64 *)out;
+ u8 buffer[WP512_BLOCK_SIZE];
- buffer[bufferPos] |= 0x80U >> (bufferBits & 7);
+ wp512_add_length(bitLength, bufferPos * 8);
+ memcpy(buffer, src, bufferPos);
+ buffer[bufferPos] = 0x80U;
bufferPos++;
if (bufferPos > WP512_BLOCK_SIZE - WP512_LENGTHBYTES) {
if (bufferPos < WP512_BLOCK_SIZE)
memset(&buffer[bufferPos], 0, WP512_BLOCK_SIZE - bufferPos);
- wp512_process_buffer(wctx);
+ wp512_process_buffer(wctx, buffer);
bufferPos = 0;
}
if (bufferPos < WP512_BLOCK_SIZE - WP512_LENGTHBYTES)
@@ -1086,31 +1051,32 @@ static int wp512_final(struct shash_desc *desc, u8 *out)
bufferPos = WP512_BLOCK_SIZE - WP512_LENGTHBYTES;
memcpy(&buffer[WP512_BLOCK_SIZE - WP512_LENGTHBYTES],
bitLength, WP512_LENGTHBYTES);
- wp512_process_buffer(wctx);
+ wp512_process_buffer(wctx, buffer);
+ memzero_explicit(buffer, sizeof(buffer));
for (i = 0; i < WP512_DIGEST_SIZE/8; i++)
digest[i] = cpu_to_be64(wctx->hash[i]);
- wctx->bufferBits = bufferBits;
- wctx->bufferPos = bufferPos;
return 0;
}
-static int wp384_final(struct shash_desc *desc, u8 *out)
+static int wp384_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
u8 D[64];
- wp512_final(desc, D);
+ wp512_finup(desc, src, len, D);
memcpy(out, D, WP384_DIGEST_SIZE);
memzero_explicit(D, WP512_DIGEST_SIZE);
return 0;
}
-static int wp256_final(struct shash_desc *desc, u8 *out)
+static int wp256_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len, u8 *out)
{
u8 D[64];
- wp512_final(desc, D);
+ wp512_finup(desc, src, len, D);
memcpy(out, D, WP256_DIGEST_SIZE);
memzero_explicit(D, WP512_DIGEST_SIZE);
@@ -1121,11 +1087,12 @@ static struct shash_alg wp_algs[3] = { {
.digestsize = WP512_DIGEST_SIZE,
.init = wp512_init,
.update = wp512_update,
- .final = wp512_final,
+ .finup = wp512_finup,
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp512",
.cra_driver_name = "wp512-generic",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -1133,11 +1100,12 @@ static struct shash_alg wp_algs[3] = { {
.digestsize = WP384_DIGEST_SIZE,
.init = wp512_init,
.update = wp512_update,
- .final = wp384_final,
+ .finup = wp384_finup,
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp384",
.cra_driver_name = "wp384-generic",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -1145,11 +1113,12 @@ static struct shash_alg wp_algs[3] = { {
.digestsize = WP256_DIGEST_SIZE,
.init = wp512_init,
.update = wp512_update,
- .final = wp256_final,
+ .finup = wp256_finup,
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp256",
.cra_driver_name = "wp256-generic",
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -1169,7 +1138,7 @@ MODULE_ALIAS_CRYPTO("wp512");
MODULE_ALIAS_CRYPTO("wp384");
MODULE_ALIAS_CRYPTO("wp256");
-subsys_initcall(wp512_mod_init);
+module_init(wp512_mod_init);
module_exit(wp512_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index fc785667b134..6c5f6766fdd6 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -8,9 +8,12 @@
#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
+#include <crypto/utils.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
0x02020202, 0x02020202, 0x02020202, 0x02020202,
@@ -30,22 +33,6 @@ struct xcbc_tfm_ctx {
u8 consts[];
};
-/*
- * +------------------------
- * | <shash desc>
- * +------------------------
- * | xcbc_desc_ctx
- * +------------------------
- * | odds (block size)
- * +------------------------
- * | prev (block size)
- * +------------------------
- */
-struct xcbc_desc_ctx {
- unsigned int len;
- u8 odds[];
-};
-
#define XCBC_BLOCKSIZE 16
static int crypto_xcbc_digest_setkey(struct crypto_shash *parent,
@@ -70,13 +57,10 @@ static int crypto_xcbc_digest_setkey(struct crypto_shash *parent,
static int crypto_xcbc_digest_init(struct shash_desc *pdesc)
{
- struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
int bs = crypto_shash_blocksize(pdesc->tfm);
- u8 *prev = &ctx->odds[bs];
+ u8 *prev = shash_desc_ctx(pdesc);
- ctx->len = 0;
memset(prev, 0, bs);
-
return 0;
}
@@ -85,77 +69,36 @@ static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p,
{
struct crypto_shash *parent = pdesc->tfm;
struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *odds = ctx->odds;
- u8 *prev = odds + bs;
-
- /* checking the data can fill the block */
- if ((ctx->len + len) <= bs) {
- memcpy(odds + ctx->len, p, len);
- ctx->len += len;
- return 0;
- }
-
- /* filling odds with new data and encrypting it */
- memcpy(odds + ctx->len, p, bs - ctx->len);
- len -= bs - ctx->len;
- p += bs - ctx->len;
-
- crypto_xor(prev, odds, bs);
- crypto_cipher_encrypt_one(tfm, prev, prev);
+ u8 *prev = shash_desc_ctx(pdesc);
- /* clearing the length */
- ctx->len = 0;
-
- /* encrypting the rest of data */
- while (len > bs) {
+ do {
crypto_xor(prev, p, bs);
crypto_cipher_encrypt_one(tfm, prev, prev);
p += bs;
len -= bs;
- }
-
- /* keeping the surplus of blocksize */
- if (len) {
- memcpy(odds, p, len);
- ctx->len = len;
- }
-
- return 0;
+ } while (len >= bs);
+ return len;
}
-static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out)
+static int crypto_xcbc_digest_finup(struct shash_desc *pdesc, const u8 *src,
+ unsigned int len, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
- struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
- u8 *odds = ctx->odds;
- u8 *prev = odds + bs;
+ u8 *prev = shash_desc_ctx(pdesc);
unsigned int offset = 0;
- if (ctx->len != bs) {
- unsigned int rlen;
- u8 *p = odds + ctx->len;
-
- *p = 0x80;
- p++;
-
- rlen = bs - ctx->len -1;
- if (rlen)
- memset(p, 0, rlen);
-
+ crypto_xor(prev, src, len);
+ if (len != bs) {
+ prev[len] ^= 0x80;
offset += bs;
}
-
- crypto_xor(prev, odds, bs);
crypto_xor(prev, &tctx->consts[offset], bs);
-
crypto_cipher_encrypt_one(tfm, out, prev);
-
return 0;
}
@@ -216,17 +159,18 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_ctxsize = sizeof(struct xcbc_tfm_ctx) +
alg->cra_blocksize * 2;
+ inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINAL_NONZERO;
inst->alg.digestsize = alg->cra_blocksize;
- inst->alg.descsize = sizeof(struct xcbc_desc_ctx) +
- alg->cra_blocksize * 2;
+ inst->alg.descsize = alg->cra_blocksize;
inst->alg.base.cra_init = xcbc_init_tfm;
inst->alg.base.cra_exit = xcbc_exit_tfm;
inst->alg.init = crypto_xcbc_digest_init;
inst->alg.update = crypto_xcbc_digest_update;
- inst->alg.final = crypto_xcbc_digest_final;
+ inst->alg.finup = crypto_xcbc_digest_finup;
inst->alg.setkey = crypto_xcbc_digest_setkey;
inst->free = shash_free_singlespawn_instance;
@@ -255,7 +199,7 @@ static void __exit crypto_xcbc_module_exit(void)
crypto_unregister_template(&crypto_xcbc_tmpl);
}
-subsys_initcall(crypto_xcbc_module_init);
+module_init(crypto_xcbc_module_init);
module_exit(crypto_xcbc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/xctr.c b/crypto/xctr.c
index 6ed9c85ededa..607ab82cb19b 100644
--- a/crypto/xctr.c
+++ b/crypto/xctr.c
@@ -78,7 +78,7 @@ static int crypto_xctr_crypt_inplace(struct skcipher_walk *walk,
crypto_cipher_alg(tfm)->cia_encrypt;
unsigned long alignmask = crypto_cipher_alignmask(tfm);
unsigned int nbytes = walk->nbytes;
- u8 *data = walk->src.virt.addr;
+ u8 *data = walk->dst.virt.addr;
u8 tmp[XCTR_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
__le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1);
@@ -182,7 +182,7 @@ static void __exit crypto_xctr_module_exit(void)
crypto_unregister_template(&crypto_xctr_tmpl);
}
-subsys_initcall(crypto_xctr_module_init);
+module_init(crypto_xctr_module_init);
module_exit(crypto_xctr_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/xts.c b/crypto/xts.c
index 821060ede2cf..3da8f5e053d6 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -99,7 +99,7 @@ static int xts_xor_tweak(struct skcipher_request *req, bool second_pass,
while (w.nbytes) {
unsigned int avail = w.nbytes;
- le128 *wsrc;
+ const le128 *wsrc;
le128 *wdst;
wsrc = w.src.virt.addr;
@@ -363,7 +363,7 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
cipher_name, 0, mask);
- if (err == -ENOENT) {
+ if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) {
err = -ENAMETOOLONG;
if (snprintf(name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
cipher_name) >= CRYPTO_MAX_ALG_NAME)
@@ -397,7 +397,7 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
/* Alas we screwed up the naming so we have to mangle the
* cipher name.
*/
- if (!strncmp(cipher_name, "ecb(", 4)) {
+ if (!memcmp(cipher_name, "ecb(", 4)) {
int len;
len = strscpy(name, cipher_name + 4, sizeof(name));
@@ -466,7 +466,7 @@ static void __exit xts_module_exit(void)
crypto_unregister_template(&xts_tmpl);
}
-subsys_initcall(xts_module_init);
+module_init(xts_module_init);
module_exit(xts_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/xxhash_generic.c b/crypto/xxhash_generic.c
index ac206ad4184d..175bb7ae0fcd 100644
--- a/crypto/xxhash_generic.c
+++ b/crypto/xxhash_generic.c
@@ -96,7 +96,7 @@ static void __exit xxhash_mod_fini(void)
crypto_unregister_shash(&alg);
}
-subsys_initcall(xxhash_mod_init);
+module_init(xxhash_mod_init);
module_exit(xxhash_mod_fini);
MODULE_AUTHOR("Nikolay Borisov <nborisov@suse.com>");
diff --git a/crypto/zstd.c b/crypto/zstd.c
index 154a969c83a8..7570e11b4ee6 100644
--- a/crypto/zstd.c
+++ b/crypto/zstd.c
@@ -103,7 +103,7 @@ static int __zstd_init(void *ctx)
return ret;
}
-static void *zstd_alloc_ctx(struct crypto_scomp *tfm)
+static void *zstd_alloc_ctx(void)
{
int ret;
struct zstd_ctx *ctx;
@@ -121,32 +121,18 @@ static void *zstd_alloc_ctx(struct crypto_scomp *tfm)
return ctx;
}
-static int zstd_init(struct crypto_tfm *tfm)
-{
- struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return __zstd_init(ctx);
-}
-
static void __zstd_exit(void *ctx)
{
zstd_comp_exit(ctx);
zstd_decomp_exit(ctx);
}
-static void zstd_free_ctx(struct crypto_scomp *tfm, void *ctx)
+static void zstd_free_ctx(void *ctx)
{
__zstd_exit(ctx);
kfree_sensitive(ctx);
}
-static void zstd_exit(struct crypto_tfm *tfm)
-{
- struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
-
- __zstd_exit(ctx);
-}
-
static int __zstd_compress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
@@ -161,14 +147,6 @@ static int __zstd_compress(const u8 *src, unsigned int slen,
return 0;
}
-static int zstd_compress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return __zstd_compress(src, slen, dst, dlen, ctx);
-}
-
static int zstd_scompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
@@ -189,14 +167,6 @@ static int __zstd_decompress(const u8 *src, unsigned int slen,
return 0;
}
-static int zstd_decompress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return __zstd_decompress(src, slen, dst, dlen, ctx);
-}
-
static int zstd_sdecompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
@@ -204,19 +174,6 @@ static int zstd_sdecompress(struct crypto_scomp *tfm, const u8 *src,
return __zstd_decompress(src, slen, dst, dlen, ctx);
}
-static struct crypto_alg alg = {
- .cra_name = "zstd",
- .cra_driver_name = "zstd-generic",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct zstd_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = zstd_init,
- .cra_exit = zstd_exit,
- .cra_u = { .compress = {
- .coa_compress = zstd_compress,
- .coa_decompress = zstd_decompress } }
-};
-
static struct scomp_alg scomp = {
.alloc_ctx = zstd_alloc_ctx,
.free_ctx = zstd_free_ctx,
@@ -231,26 +188,15 @@ static struct scomp_alg scomp = {
static int __init zstd_mod_init(void)
{
- int ret;
-
- ret = crypto_register_alg(&alg);
- if (ret)
- return ret;
-
- ret = crypto_register_scomp(&scomp);
- if (ret)
- crypto_unregister_alg(&alg);
-
- return ret;
+ return crypto_register_scomp(&scomp);
}
static void __exit zstd_mod_fini(void)
{
- crypto_unregister_alg(&alg);
crypto_unregister_scomp(&scomp);
}
-subsys_initcall(zstd_mod_init);
+module_init(zstd_mod_init);
module_exit(zstd_mod_fini);
MODULE_LICENSE("GPL");