From 5e99a0a7a9468a18efec66ee97f1e34886e55550 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 20 May 2019 09:55:15 -0700 Subject: crypto: algapi - remove crypto_tfm_in_queue() Remove the crypto_tfm_in_queue() function, which is unused. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- include/crypto/internal/hash.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'include/crypto/internal') diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index e355fdb642a9..669b4ee5c16d 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -201,12 +201,6 @@ static inline struct ahash_request *ahash_dequeue_request( return ahash_request_cast(crypto_dequeue_request(queue)); } -static inline int ahash_tfm_in_queue(struct crypto_queue *queue, - struct crypto_ahash *tfm) -{ - return crypto_tfm_in_queue(queue, crypto_ahash_tfm(tfm)); -} - static inline void *crypto_shash_ctx(struct crypto_shash *tfm) { return crypto_tfm_ctx(&tfm->base); -- cgit v1.2.3 From 314d0f0ea69f2fb8e6fc4cb5d5ae6b1e3418e36c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 2 Jun 2019 22:46:11 -0700 Subject: crypto: skcipher - make chunksize and walksize accessors internal The 'chunksize' and 'walksize' properties of skcipher algorithms are implementation details that users of the skcipher API should not be looking at. So move their accessor functions from to . Signed-off-by: Eric Biggers Acked-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- include/crypto/internal/skcipher.h | 60 ++++++++++++++++++++++++++++++++++++++ include/crypto/skcipher.h | 60 -------------------------------------- 2 files changed, 60 insertions(+), 60 deletions(-) (limited to 'include/crypto/internal') diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 9de6032209cb..abb1096495c2 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -205,6 +205,66 @@ static inline unsigned int crypto_skcipher_alg_max_keysize( return alg->max_keysize; } +static inline unsigned int crypto_skcipher_alg_chunksize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blocksize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_blocksize; + + return alg->chunksize; +} + +static inline unsigned int crypto_skcipher_alg_walksize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blocksize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_blocksize; + + return alg->walksize; +} + +/** + * crypto_skcipher_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CTR. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_skcipher_chunksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); +} + +/** + * crypto_skcipher_walksize() - obtain walk size + * @tfm: cipher handle + * + * In some cases, algorithms can only perform optimally when operating on + * multiple blocks in parallel. This is reflected by the walksize, which + * must be a multiple of the chunksize (or equal if the concern does not + * apply) + * + * Return: walk size in bytes + */ +static inline unsigned int crypto_skcipher_walksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); +} + /* Helpers for simple block cipher modes of operation */ struct skcipher_ctx_simple { struct crypto_cipher *cipher; /* underlying block cipher */ diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 98547d1f18c5..694397fb0faa 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -293,66 +293,6 @@ static inline unsigned int crypto_sync_skcipher_ivsize( return crypto_skcipher_ivsize(&tfm->base); } -static inline unsigned int crypto_skcipher_alg_chunksize( - struct skcipher_alg *alg) -{ - if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == - CRYPTO_ALG_TYPE_BLKCIPHER) - return alg->base.cra_blocksize; - - if (alg->base.cra_ablkcipher.encrypt) - return alg->base.cra_blocksize; - - return alg->chunksize; -} - -static inline unsigned int crypto_skcipher_alg_walksize( - struct skcipher_alg *alg) -{ - if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == - CRYPTO_ALG_TYPE_BLKCIPHER) - return alg->base.cra_blocksize; - - if (alg->base.cra_ablkcipher.encrypt) - return alg->base.cra_blocksize; - - return alg->walksize; -} - -/** - * crypto_skcipher_chunksize() - obtain chunk size - * @tfm: cipher handle - * - * The block size is set to one for ciphers such as CTR. However, - * you still need to provide incremental updates in multiples of - * the underlying block size as the IV does not have sub-block - * granularity. This is known in this API as the chunk size. - * - * Return: chunk size in bytes - */ -static inline unsigned int crypto_skcipher_chunksize( - struct crypto_skcipher *tfm) -{ - return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); -} - -/** - * crypto_skcipher_walksize() - obtain walk size - * @tfm: cipher handle - * - * In some cases, algorithms can only perform optimally when operating on - * multiple blocks in parallel. This is reflected by the walksize, which - * must be a multiple of the chunksize (or equal if the concern does not - * apply) - * - * Return: walk size in bytes - */ -static inline unsigned int crypto_skcipher_walksize( - struct crypto_skcipher *tfm) -{ - return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); -} - /** * crypto_skcipher_blocksize() - obtain block size of cipher * @tfm: cipher handle -- cgit v1.2.3