From d6104733178293b40044525b06d6a26356934da3 Mon Sep 17 00:00:00 2001 From: Christian Marangi Date: Tue, 14 Jan 2025 13:36:34 +0100 Subject: spinlock: extend guard with spinlock_bh variants Extend guard APIs with missing raw/spinlock_bh variants. Signed-off-by: Christian Marangi Acked-by: Peter Zijlstra (Intel) Signed-off-by: Herbert Xu --- include/linux/spinlock.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include/linux') diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 63dd8cf3c3c2..d3561c4a080e 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -548,6 +548,12 @@ DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t, DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock)) +DEFINE_LOCK_GUARD_1(raw_spinlock_bh, raw_spinlock_t, + raw_spin_lock_bh(_T->lock), + raw_spin_unlock_bh(_T->lock)) + +DEFINE_LOCK_GUARD_1_COND(raw_spinlock_bh, _try, raw_spin_trylock_bh(_T->lock)) + DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t, raw_spin_lock_irqsave(_T->lock, _T->flags), raw_spin_unlock_irqrestore(_T->lock, _T->flags), @@ -569,6 +575,13 @@ DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t, DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try, spin_trylock_irq(_T->lock)) +DEFINE_LOCK_GUARD_1(spinlock_bh, spinlock_t, + spin_lock_bh(_T->lock), + spin_unlock_bh(_T->lock)) + +DEFINE_LOCK_GUARD_1_COND(spinlock_bh, _try, + spin_trylock_bh(_T->lock)) + DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t, spin_lock_irqsave(_T->lock, _T->flags), spin_unlock_irqrestore(_T->lock, _T->flags), -- cgit v1.2.3 From b16510a530d1e6ab9683f04f8fb34f2e0f538275 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sun, 2 Feb 2025 20:00:52 +0100 Subject: crypto: ecdsa - Harden against integer overflows in DIV_ROUND_UP() Herbert notes that DIV_ROUND_UP() may overflow unnecessarily if an ecdsa implementation's ->key_size() callback returns an unusually large value. Herbert instead suggests (for a division by 8): X / 8 + !!(X & 7) Based on this formula, introduce a generic DIV_ROUND_UP_POW2() macro and use it in lieu of DIV_ROUND_UP() for ->key_size() return values. Additionally, use the macro in ecc_digits_from_bytes(), whose "nbytes" parameter is a ->key_size() return value in some instances, or a user-specified ASN.1 length in the case of ecdsa_get_signature_rs(). Link: https://lore.kernel.org/r/Z3iElsILmoSu6FuC@gondor.apana.org.au/ Signed-off-by: Lukas Wunner Signed-off-by: Lukas Wunner Signed-off-by: Herbert Xu --- crypto/ecc.c | 2 +- crypto/ecdsa-p1363.c | 2 +- crypto/ecdsa-x962.c | 4 ++-- include/linux/math.h | 12 ++++++++++++ 4 files changed, 16 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/crypto/ecc.c b/crypto/ecc.c index 50ad2d4ed672..6cf9a945fc6c 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -71,7 +71,7 @@ EXPORT_SYMBOL(ecc_get_curve); void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes, u64 *out, unsigned int ndigits) { - int diff = ndigits - DIV_ROUND_UP(nbytes, sizeof(u64)); + int diff = ndigits - DIV_ROUND_UP_POW2(nbytes, sizeof(u64)); unsigned int o = nbytes & 7; __be64 msd = 0; diff --git a/crypto/ecdsa-p1363.c b/crypto/ecdsa-p1363.c index eaae7214d69b..4454f1f8f33f 100644 --- a/crypto/ecdsa-p1363.c +++ b/crypto/ecdsa-p1363.c @@ -22,7 +22,7 @@ static int ecdsa_p1363_verify(struct crypto_sig *tfm, { struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm); unsigned int keylen = crypto_sig_keysize(ctx->child); - unsigned int ndigits = DIV_ROUND_UP(keylen, sizeof(u64)); + unsigned int ndigits = DIV_ROUND_UP_POW2(keylen, sizeof(u64)); struct ecdsa_raw_sig sig; if (slen != 2 * keylen) diff --git a/crypto/ecdsa-x962.c b/crypto/ecdsa-x962.c index 6a77c13e192b..90a04f4b9a2f 100644 --- a/crypto/ecdsa-x962.c +++ b/crypto/ecdsa-x962.c @@ -81,8 +81,8 @@ static int ecdsa_x962_verify(struct crypto_sig *tfm, struct ecdsa_x962_signature_ctx sig_ctx; int err; - sig_ctx.ndigits = DIV_ROUND_UP(crypto_sig_keysize(ctx->child), - sizeof(u64)); + sig_ctx.ndigits = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child), + sizeof(u64)); err = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx, src, slen); if (err < 0) diff --git a/include/linux/math.h b/include/linux/math.h index f5f18dc3616b..0198c92cbe3e 100644 --- a/include/linux/math.h +++ b/include/linux/math.h @@ -34,6 +34,18 @@ */ #define round_down(x, y) ((x) & ~__round_mask(x, y)) +/** + * DIV_ROUND_UP_POW2 - divide and round up + * @n: numerator + * @d: denominator (must be a power of 2) + * + * Divides @n by @d and rounds up to next multiple of @d (which must be a power + * of 2). Avoids integer overflows that may occur with __KERNEL_DIV_ROUND_UP(). + * Performance is roughly equivalent to __KERNEL_DIV_ROUND_UP(). + */ +#define DIV_ROUND_UP_POW2(n, d) \ + ((n) / (d) + !!((n) & ((d) - 1))) + #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP #define DIV_ROUND_DOWN_ULL(ll, d) \ -- cgit v1.2.3 From f2ffe5a9183d22eec718edac03e8bfcedf4dee70 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 16 Feb 2025 11:07:17 +0800 Subject: crypto: hash - Add request chaining API This adds request chaining to the ahash interface. Request chaining allows multiple requests to be submitted in one shot. An algorithm can elect to receive chained requests by setting the flag CRYPTO_ALG_REQ_CHAIN. If this bit is not set, the API will break up chained requests and submit them one-by-one. A new err field is added to struct crypto_async_request to record the return value for each individual request. Signed-off-by: Herbert Xu --- crypto/ahash.c | 261 +++++++++++++++++++++++++++++++++++++---- crypto/algapi.c | 2 +- include/crypto/algapi.h | 11 ++ include/crypto/hash.h | 28 +++-- include/crypto/internal/hash.h | 10 ++ include/linux/crypto.h | 24 ++++ 6 files changed, 299 insertions(+), 37 deletions(-) (limited to 'include/linux') diff --git a/crypto/ahash.c b/crypto/ahash.c index e54e06dd76f5..e6bdb5ae2dac 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -42,11 +42,19 @@ struct crypto_hash_walk { }; struct ahash_save_req_state { - struct ahash_request *req; + struct list_head head; + struct ahash_request *req0; + struct ahash_request *cur; + int (*op)(struct ahash_request *req); crypto_completion_t compl; void *data; }; +static void ahash_reqchain_done(void *data, int err); +static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt); +static void ahash_restore_req(struct ahash_request *req); +static int ahash_def_finup(struct ahash_request *req); + static int hash_walk_next(struct crypto_hash_walk *walk) { unsigned int offset = walk->offset; @@ -273,24 +281,145 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, } EXPORT_SYMBOL_GPL(crypto_ahash_setkey); +static int ahash_reqchain_finish(struct ahash_save_req_state *state, + int err, u32 mask) +{ + struct ahash_request *req0 = state->req0; + struct ahash_request *req = state->cur; + struct ahash_request *n; + + req->base.err = err; + + if (req != req0) + list_add_tail(&req->base.list, &req0->base.list); + + list_for_each_entry_safe(req, n, &state->head, base.list) { + list_del_init(&req->base.list); + + req->base.flags &= mask; + req->base.complete = ahash_reqchain_done; + req->base.data = state; + state->cur = req; + err = state->op(req); + + if (err == -EINPROGRESS) { + if (!list_empty(&state->head)) + err = -EBUSY; + goto out; + } + + if (err == -EBUSY) + goto out; + + req->base.err = err; + list_add_tail(&req->base.list, &req0->base.list); + } + + ahash_restore_req(req0); + +out: + return err; +} + +static void ahash_reqchain_done(void *data, int err) +{ + struct ahash_save_req_state *state = data; + crypto_completion_t compl = state->compl; + + data = state->data; + + if (err == -EINPROGRESS) { + if (!list_empty(&state->head)) + return; + goto notify; + } + + err = ahash_reqchain_finish(state, err, CRYPTO_TFM_REQ_MAY_BACKLOG); + if (err == -EBUSY) + return; + +notify: + compl(data, err); +} + +static int ahash_do_req_chain(struct ahash_request *req, + int (*op)(struct ahash_request *req)) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ahash_save_req_state *state; + struct ahash_save_req_state state0; + int err; + + if (!ahash_request_chained(req) || crypto_ahash_req_chain(tfm)) + return op(req); + + state = &state0; + + if (ahash_is_async(tfm)) { + err = ahash_save_req(req, ahash_reqchain_done); + if (err) { + struct ahash_request *r2; + + req->base.err = err; + list_for_each_entry(r2, &req->base.list, base.list) + r2->base.err = err; + + return err; + } + + state = req->base.data; + } + + state->op = op; + state->cur = req; + INIT_LIST_HEAD(&state->head); + list_splice_init(&req->base.list, &state->head); + + err = op(req); + if (err == -EBUSY || err == -EINPROGRESS) + return -EBUSY; + + return ahash_reqchain_finish(state, err, ~0); +} + int crypto_ahash_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - if (likely(tfm->using_shash)) - return crypto_shash_init(prepare_shash_desc(req, tfm)); + if (likely(tfm->using_shash)) { + struct ahash_request *r2; + int err; + + err = crypto_shash_init(prepare_shash_desc(req, tfm)); + req->base.err = err; + + list_for_each_entry(r2, &req->base.list, base.list) { + struct shash_desc *desc; + + desc = prepare_shash_desc(r2, tfm); + r2->base.err = crypto_shash_init(desc); + } + + return err; + } + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; - return crypto_ahash_alg(tfm)->init(req); + + return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->init); } EXPORT_SYMBOL_GPL(crypto_ahash_init); static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ahash_save_req_state *state; gfp_t gfp; u32 flags; + if (!ahash_is_async(tfm)) + return 0; + flags = ahash_request_flags(req); gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; state = kmalloc(sizeof(*state), gfp); @@ -301,14 +430,20 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) state->data = req->base.data; req->base.complete = cplt; req->base.data = state; - state->req = req; + state->req0 = req; return 0; } static void ahash_restore_req(struct ahash_request *req) { - struct ahash_save_req_state *state = req->base.data; + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ahash_save_req_state *state; + + if (!ahash_is_async(tfm)) + return; + + state = req->base.data; req->base.complete = state->compl; req->base.data = state->data; @@ -319,10 +454,24 @@ int crypto_ahash_update(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - if (likely(tfm->using_shash)) - return shash_ahash_update(req, ahash_request_ctx(req)); + if (likely(tfm->using_shash)) { + struct ahash_request *r2; + int err; + + err = shash_ahash_update(req, ahash_request_ctx(req)); + req->base.err = err; + + list_for_each_entry(r2, &req->base.list, base.list) { + struct shash_desc *desc; - return crypto_ahash_alg(tfm)->update(req); + desc = ahash_request_ctx(r2); + r2->base.err = shash_ahash_update(r2, desc); + } + + return err; + } + + return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->update); } EXPORT_SYMBOL_GPL(crypto_ahash_update); @@ -330,10 +479,24 @@ int crypto_ahash_final(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - if (likely(tfm->using_shash)) - return crypto_shash_final(ahash_request_ctx(req), req->result); + if (likely(tfm->using_shash)) { + struct ahash_request *r2; + int err; + + err = crypto_shash_final(ahash_request_ctx(req), req->result); + req->base.err = err; + + list_for_each_entry(r2, &req->base.list, base.list) { + struct shash_desc *desc; - return crypto_ahash_alg(tfm)->final(req); + desc = ahash_request_ctx(r2); + r2->base.err = crypto_shash_final(desc, r2->result); + } + + return err; + } + + return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->final); } EXPORT_SYMBOL_GPL(crypto_ahash_final); @@ -341,10 +504,27 @@ int crypto_ahash_finup(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - if (likely(tfm->using_shash)) - return shash_ahash_finup(req, ahash_request_ctx(req)); + if (likely(tfm->using_shash)) { + struct ahash_request *r2; + int err; + + err = shash_ahash_finup(req, ahash_request_ctx(req)); + req->base.err = err; + + list_for_each_entry(r2, &req->base.list, base.list) { + struct shash_desc *desc; + + desc = ahash_request_ctx(r2); + r2->base.err = shash_ahash_finup(r2, desc); + } + + return err; + } - return crypto_ahash_alg(tfm)->finup(req); + if (!crypto_ahash_alg(tfm)->finup) + return ahash_def_finup(req); + + return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->finup); } EXPORT_SYMBOL_GPL(crypto_ahash_finup); @@ -352,20 +532,34 @@ int crypto_ahash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - if (likely(tfm->using_shash)) - return shash_ahash_digest(req, prepare_shash_desc(req, tfm)); + if (likely(tfm->using_shash)) { + struct ahash_request *r2; + int err; + + err = shash_ahash_digest(req, prepare_shash_desc(req, tfm)); + req->base.err = err; + + list_for_each_entry(r2, &req->base.list, base.list) { + struct shash_desc *desc; + + desc = prepare_shash_desc(r2, tfm); + r2->base.err = shash_ahash_digest(r2, desc); + } + + return err; + } if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; - return crypto_ahash_alg(tfm)->digest(req); + return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->digest); } EXPORT_SYMBOL_GPL(crypto_ahash_digest); static void ahash_def_finup_done2(void *data, int err) { struct ahash_save_req_state *state = data; - struct ahash_request *areq = state->req; + struct ahash_request *areq = state->req0; if (err == -EINPROGRESS) return; @@ -376,12 +570,15 @@ static void ahash_def_finup_done2(void *data, int err) static int ahash_def_finup_finish1(struct ahash_request *req, int err) { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + if (err) goto out; - req->base.complete = ahash_def_finup_done2; + if (ahash_is_async(tfm)) + req->base.complete = ahash_def_finup_done2; - err = crypto_ahash_alg(crypto_ahash_reqtfm(req))->final(req); + err = crypto_ahash_final(req); if (err == -EINPROGRESS || err == -EBUSY) return err; @@ -397,7 +594,7 @@ static void ahash_def_finup_done1(void *data, int err) struct ahash_request *areq; state = *state0; - areq = state.req; + areq = state.req0; if (err == -EINPROGRESS) goto out; @@ -413,14 +610,13 @@ out: static int ahash_def_finup(struct ahash_request *req) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int err; err = ahash_save_req(req, ahash_def_finup_done1); if (err) return err; - err = crypto_ahash_alg(tfm)->update(req); + err = crypto_ahash_update(req); if (err == -EINPROGRESS || err == -EBUSY) return err; @@ -635,8 +831,6 @@ static int ahash_prepare_alg(struct ahash_alg *alg) base->cra_type = &crypto_ahash_type; base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; - if (!alg->finup) - alg->finup = ahash_def_finup; if (!alg->setkey) alg->setkey = ahash_nosetkey; @@ -707,5 +901,20 @@ int ahash_register_instance(struct crypto_template *tmpl, } EXPORT_SYMBOL_GPL(ahash_register_instance); +void ahash_request_free(struct ahash_request *req) +{ + struct ahash_request *tmp; + struct ahash_request *r2; + + if (unlikely(!req)) + return; + + list_for_each_entry_safe(r2, tmp, &req->base.list, base.list) + kfree_sensitive(r2); + + kfree_sensitive(req); +} +EXPORT_SYMBOL_GPL(ahash_request_free); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); diff --git a/crypto/algapi.c b/crypto/algapi.c index 5318c214debb..e7a9a2ada2cf 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -955,7 +955,7 @@ struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) queue->backlog = queue->backlog->next; request = queue->list.next; - list_del(request); + list_del_init(request); return list_entry(request, struct crypto_async_request, list); } diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 156de41ca760..11065978d360 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -271,4 +272,14 @@ static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm) return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; } +static inline bool crypto_request_chained(struct crypto_async_request *req) +{ + return !list_empty(&req->list); +} + +static inline bool crypto_tfm_req_chain(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_flags & CRYPTO_ALG_REQ_CHAIN; +} + #endif /* _CRYPTO_ALGAPI_H */ diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 9c1f8ca59a77..0a6f744ce4a1 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -572,16 +572,7 @@ static inline struct ahash_request *ahash_request_alloc_noprof( * ahash_request_free() - zeroize and free the request data structure * @req: request data structure cipher handle to be freed */ -static inline void ahash_request_free(struct ahash_request *req) -{ - kfree_sensitive(req); -} - -static inline void ahash_request_zero(struct ahash_request *req) -{ - memzero_explicit(req, sizeof(*req) + - crypto_ahash_reqsize(crypto_ahash_reqtfm(req))); -} +void ahash_request_free(struct ahash_request *req); static inline struct ahash_request *ahash_request_cast( struct crypto_async_request *req) @@ -622,6 +613,7 @@ static inline void ahash_request_set_callback(struct ahash_request *req, req->base.complete = compl; req->base.data = data; req->base.flags = flags; + crypto_reqchain_init(&req->base); } /** @@ -646,6 +638,12 @@ static inline void ahash_request_set_crypt(struct ahash_request *req, req->result = result; } +static inline void ahash_request_chain(struct ahash_request *req, + struct ahash_request *head) +{ + crypto_request_chain(&req->base, &head->base); +} + /** * DOC: Synchronous Message Digest API * @@ -947,4 +945,14 @@ static inline void shash_desc_zero(struct shash_desc *desc) sizeof(*desc) + crypto_shash_descsize(desc->tfm)); } +static inline int ahash_request_err(struct ahash_request *req) +{ + return req->base.err; +} + +static inline bool ahash_is_async(struct crypto_ahash *tfm) +{ + return crypto_tfm_is_async(&tfm->base); +} + #endif /* _CRYPTO_HASH_H */ diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 84da3424decc..36425ecd2c37 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -247,5 +247,15 @@ static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm) return container_of(tfm, struct crypto_shash, base); } +static inline bool ahash_request_chained(struct ahash_request *req) +{ + return crypto_request_chained(&req->base); +} + +static inline bool crypto_ahash_req_chain(struct crypto_ahash *tfm) +{ + return crypto_tfm_req_chain(&tfm->base); +} + #endif /* _CRYPTO_INTERNAL_HASH_H */ diff --git a/include/linux/crypto.h b/include/linux/crypto.h index b164da5e129e..1d2a6c515d58 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -13,6 +13,8 @@ #define _LINUX_CRYPTO_H #include +#include +#include #include #include #include @@ -124,6 +126,9 @@ */ #define CRYPTO_ALG_FIPS_INTERNAL 0x00020000 +/* Set if the algorithm supports request chains. */ +#define CRYPTO_ALG_REQ_CHAIN 0x00040000 + /* * Transform masks and values (for crt_flags). */ @@ -174,6 +179,7 @@ struct crypto_async_request { struct crypto_tfm *tfm; u32 flags; + int err; }; /** @@ -540,5 +546,23 @@ int crypto_comp_decompress(struct crypto_comp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen); +static inline void crypto_reqchain_init(struct crypto_async_request *req) +{ + req->err = -EINPROGRESS; + INIT_LIST_HEAD(&req->list); +} + +static inline void crypto_request_chain(struct crypto_async_request *req, + struct crypto_async_request *head) +{ + req->err = -EINPROGRESS; + list_add_tail(&req->list, &head->list); +} + +static inline bool crypto_tfm_is_async(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC; +} + #endif /* _LINUX_CRYPTO_H */ -- cgit v1.2.3 From 439963cdc3aa447dfd3b5abc05fcd25cef4d22dc Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 16 Feb 2025 11:07:22 +0800 Subject: crypto: ahash - Add virtual address support This patch adds virtual address support to ahash. Virtual addresses were previously only supported through shash. The user may choose to use virtual addresses with ahash by calling ahash_request_set_virt instead of ahash_request_set_crypt. The API will take care of translating this to an SG list if necessary, unless the algorithm declares that it supports chaining. Therefore in order for an ahash algorithm to support chaining, it must also support virtual addresses directly. Signed-off-by: Herbert Xu --- crypto/ahash.c | 282 ++++++++++++++++++++++++++++++++++++----- include/crypto/hash.h | 38 +++++- include/crypto/internal/hash.h | 5 + include/linux/crypto.h | 2 +- 4 files changed, 293 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/crypto/ahash.c b/crypto/ahash.c index e6bdb5ae2dac..208aa4c8d725 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -29,7 +29,7 @@ #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e struct crypto_hash_walk { - char *data; + const char *data; unsigned int offset; unsigned int flags; @@ -48,11 +48,17 @@ struct ahash_save_req_state { int (*op)(struct ahash_request *req); crypto_completion_t compl; void *data; + struct scatterlist sg; + const u8 *src; + u8 *page; + unsigned int offset; + unsigned int nbytes; }; static void ahash_reqchain_done(void *data, int err); static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt); -static void ahash_restore_req(struct ahash_request *req); +static void ahash_restore_req(struct ahash_save_req_state *state); +static void ahash_def_finup_done1(void *data, int err); static int ahash_def_finup(struct ahash_request *req); static int hash_walk_next(struct crypto_hash_walk *walk) @@ -88,20 +94,29 @@ static int crypto_hash_walk_first(struct ahash_request *req, struct crypto_hash_walk *walk) { walk->total = req->nbytes; + walk->entrylen = 0; - if (!walk->total) { - walk->entrylen = 0; + if (!walk->total) return 0; + + walk->flags = req->base.flags; + + if (ahash_request_isvirt(req)) { + walk->data = req->svirt; + walk->total = 0; + return req->nbytes; } walk->sg = req->src; - walk->flags = req->base.flags; return hash_walk_new_entry(walk); } static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) { + if ((walk->flags & CRYPTO_AHASH_REQ_VIRT)) + return err; + walk->data -= walk->offset; kunmap_local(walk->data); @@ -188,6 +203,10 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) unsigned int offset; int err; + if (ahash_request_isvirt(req)) + return crypto_shash_digest(desc, req->svirt, nbytes, + req->result); + if (nbytes && (sg = req->src, offset = sg->offset, nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { @@ -281,18 +300,82 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, } EXPORT_SYMBOL_GPL(crypto_ahash_setkey); +static bool ahash_request_hasvirt(struct ahash_request *req) +{ + struct ahash_request *r2; + + if (ahash_request_isvirt(req)) + return true; + + list_for_each_entry(r2, &req->base.list, base.list) + if (ahash_request_isvirt(r2)) + return true; + + return false; +} + +static int ahash_reqchain_virt(struct ahash_save_req_state *state, + int err, u32 mask) +{ + struct ahash_request *req = state->cur; + + for (;;) { + unsigned len = state->nbytes; + + req->base.err = err; + + if (!state->offset) + break; + + if (state->offset == len || err) { + u8 *result = req->result; + + ahash_request_set_virt(req, state->src, result, len); + state->offset = 0; + break; + } + + len -= state->offset; + + len = min(PAGE_SIZE, len); + memcpy(state->page, state->src + state->offset, len); + state->offset += len; + req->nbytes = len; + + err = state->op(req); + if (err == -EINPROGRESS) { + if (!list_empty(&state->head) || + state->offset < state->nbytes) + err = -EBUSY; + break; + } + + if (err == -EBUSY) + break; + } + + return err; +} + static int ahash_reqchain_finish(struct ahash_save_req_state *state, int err, u32 mask) { struct ahash_request *req0 = state->req0; struct ahash_request *req = state->cur; + struct crypto_ahash *tfm; struct ahash_request *n; + bool update; - req->base.err = err; + err = ahash_reqchain_virt(state, err, mask); + if (err == -EINPROGRESS || err == -EBUSY) + goto out; if (req != req0) list_add_tail(&req->base.list, &req0->base.list); + tfm = crypto_ahash_reqtfm(req); + update = state->op == crypto_ahash_alg(tfm)->update; + list_for_each_entry_safe(req, n, &state->head, base.list) { list_del_init(&req->base.list); @@ -300,10 +383,27 @@ static int ahash_reqchain_finish(struct ahash_save_req_state *state, req->base.complete = ahash_reqchain_done; req->base.data = state; state->cur = req; + + if (update && ahash_request_isvirt(req) && req->nbytes) { + unsigned len = req->nbytes; + u8 *result = req->result; + + state->src = req->svirt; + state->nbytes = len; + + len = min(PAGE_SIZE, len); + + memcpy(state->page, req->svirt, len); + state->offset = len; + + ahash_request_set_crypt(req, &state->sg, result, len); + } + err = state->op(req); if (err == -EINPROGRESS) { - if (!list_empty(&state->head)) + if (!list_empty(&state->head) || + state->offset < state->nbytes) err = -EBUSY; goto out; } @@ -311,11 +411,14 @@ static int ahash_reqchain_finish(struct ahash_save_req_state *state, if (err == -EBUSY) goto out; - req->base.err = err; + err = ahash_reqchain_virt(state, err, mask); + if (err == -EINPROGRESS || err == -EBUSY) + goto out; + list_add_tail(&req->base.list, &req0->base.list); } - ahash_restore_req(req0); + ahash_restore_req(state); out: return err; @@ -329,7 +432,7 @@ static void ahash_reqchain_done(void *data, int err) data = state->data; if (err == -EINPROGRESS) { - if (!list_empty(&state->head)) + if (!list_empty(&state->head) || state->offset < state->nbytes) return; goto notify; } @@ -346,40 +449,84 @@ static int ahash_do_req_chain(struct ahash_request *req, int (*op)(struct ahash_request *req)) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + bool update = op == crypto_ahash_alg(tfm)->update; struct ahash_save_req_state *state; struct ahash_save_req_state state0; + struct ahash_request *r2; + u8 *page = NULL; int err; - if (!ahash_request_chained(req) || crypto_ahash_req_chain(tfm)) + if (crypto_ahash_req_chain(tfm) || + (!ahash_request_chained(req) && + (!update || !ahash_request_isvirt(req)))) return op(req); - state = &state0; + if (update && ahash_request_hasvirt(req)) { + gfp_t gfp; + u32 flags; + + flags = ahash_request_flags(req); + gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + page = (void *)__get_free_page(gfp); + err = -ENOMEM; + if (!page) + goto out_set_chain; + } + state = &state0; if (ahash_is_async(tfm)) { err = ahash_save_req(req, ahash_reqchain_done); - if (err) { - struct ahash_request *r2; - - req->base.err = err; - list_for_each_entry(r2, &req->base.list, base.list) - r2->base.err = err; - - return err; - } + if (err) + goto out_free_page; state = req->base.data; } state->op = op; state->cur = req; + state->page = page; + state->offset = 0; + state->nbytes = 0; INIT_LIST_HEAD(&state->head); list_splice_init(&req->base.list, &state->head); + if (page) + sg_init_one(&state->sg, page, PAGE_SIZE); + + if (update && ahash_request_isvirt(req) && req->nbytes) { + unsigned len = req->nbytes; + u8 *result = req->result; + + state->src = req->svirt; + state->nbytes = len; + + len = min(PAGE_SIZE, len); + + memcpy(page, req->svirt, len); + state->offset = len; + + ahash_request_set_crypt(req, &state->sg, result, len); + } + err = op(req); if (err == -EBUSY || err == -EINPROGRESS) return -EBUSY; return ahash_reqchain_finish(state, err, ~0); + +out_free_page: + if (page) { + memset(page, 0, PAGE_SIZE); + free_page((unsigned long)page); + } + +out_set_chain: + req->base.err = err; + list_for_each_entry(r2, &req->base.list, base.list) + r2->base.err = err; + + return err; } int crypto_ahash_init(struct ahash_request *req) @@ -431,15 +578,19 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) req->base.complete = cplt; req->base.data = state; state->req0 = req; + state->page = NULL; return 0; } -static void ahash_restore_req(struct ahash_request *req) +static void ahash_restore_req(struct ahash_save_req_state *state) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ahash_save_req_state *state; + struct ahash_request *req = state->req0; + struct crypto_ahash *tfm; + free_page((unsigned long)state->page); + + tfm = crypto_ahash_reqtfm(req); if (!ahash_is_async(tfm)) return; @@ -521,13 +672,74 @@ int crypto_ahash_finup(struct ahash_request *req) return err; } - if (!crypto_ahash_alg(tfm)->finup) + if (!crypto_ahash_alg(tfm)->finup || + (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req))) return ahash_def_finup(req); return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->finup); } EXPORT_SYMBOL_GPL(crypto_ahash_finup); +static int ahash_def_digest_finish(struct ahash_save_req_state *state, int err) +{ + struct ahash_request *req = state->req0; + struct crypto_ahash *tfm; + + if (err) + goto out; + + tfm = crypto_ahash_reqtfm(req); + if (ahash_is_async(tfm)) + req->base.complete = ahash_def_finup_done1; + + err = crypto_ahash_update(req); + if (err == -EINPROGRESS || err == -EBUSY) + return err; + +out: + ahash_restore_req(state); + return err; +} + +static void ahash_def_digest_done(void *data, int err) +{ + struct ahash_save_req_state *state0 = data; + struct ahash_save_req_state state; + struct ahash_request *areq; + + state = *state0; + areq = state.req0; + if (err == -EINPROGRESS) + goto out; + + areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + err = ahash_def_digest_finish(state0, err); + if (err == -EINPROGRESS || err == -EBUSY) + return; + +out: + state.compl(state.data, err); +} + +static int ahash_def_digest(struct ahash_request *req) +{ + struct ahash_save_req_state *state; + int err; + + err = ahash_save_req(req, ahash_def_digest_done); + if (err) + return err; + + state = req->base.data; + + err = crypto_ahash_init(req); + if (err == -EINPROGRESS || err == -EBUSY) + return err; + + return ahash_def_digest_finish(state, err); +} + int crypto_ahash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -549,6 +761,9 @@ int crypto_ahash_digest(struct ahash_request *req) return err; } + if (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req)) + return ahash_def_digest(req); + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; @@ -564,17 +779,19 @@ static void ahash_def_finup_done2(void *data, int err) if (err == -EINPROGRESS) return; - ahash_restore_req(areq); + ahash_restore_req(state); ahash_request_complete(areq, err); } -static int ahash_def_finup_finish1(struct ahash_request *req, int err) +static int ahash_def_finup_finish1(struct ahash_save_req_state *state, int err) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ahash_request *req = state->req0; + struct crypto_ahash *tfm; if (err) goto out; + tfm = crypto_ahash_reqtfm(req); if (ahash_is_async(tfm)) req->base.complete = ahash_def_finup_done2; @@ -583,7 +800,7 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err) return err; out: - ahash_restore_req(req); + ahash_restore_req(state); return err; } @@ -600,7 +817,7 @@ static void ahash_def_finup_done1(void *data, int err) areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - err = ahash_def_finup_finish1(areq, err); + err = ahash_def_finup_finish1(state0, err); if (err == -EINPROGRESS || err == -EBUSY) return; @@ -610,17 +827,20 @@ out: static int ahash_def_finup(struct ahash_request *req) { + struct ahash_save_req_state *state; int err; err = ahash_save_req(req, ahash_def_finup_done1); if (err) return err; + state = req->base.data; + err = crypto_ahash_update(req); if (err == -EINPROGRESS || err == -EBUSY) return err; - return ahash_def_finup_finish1(req, err); + return ahash_def_finup_finish1(state, err); } int crypto_ahash_export(struct ahash_request *req, void *out) diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 0a6f744ce4a1..4e87e39679cb 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -12,6 +12,9 @@ #include #include +/* Set this bit for virtual address instead of SG list. */ +#define CRYPTO_AHASH_REQ_VIRT 0x00000001 + struct crypto_ahash; /** @@ -52,7 +55,10 @@ struct ahash_request { struct crypto_async_request base; unsigned int nbytes; - struct scatterlist *src; + union { + struct scatterlist *src; + const u8 *svirt; + }; u8 *result; void *__ctx[] CRYPTO_MINALIGN_ATTR; @@ -610,9 +616,13 @@ static inline void ahash_request_set_callback(struct ahash_request *req, crypto_completion_t compl, void *data) { + u32 keep = CRYPTO_AHASH_REQ_VIRT; + req->base.complete = compl; req->base.data = data; - req->base.flags = flags; + flags &= ~keep; + req->base.flags &= keep; + req->base.flags |= flags; crypto_reqchain_init(&req->base); } @@ -636,6 +646,30 @@ static inline void ahash_request_set_crypt(struct ahash_request *req, req->src = src; req->nbytes = nbytes; req->result = result; + req->base.flags &= ~CRYPTO_AHASH_REQ_VIRT; +} + +/** + * ahash_request_set_virt() - set virtual address data buffers + * @req: ahash_request handle to be updated + * @src: source virtual address + * @result: buffer that is filled with the message digest -- the caller must + * ensure that the buffer has sufficient space by, for example, calling + * crypto_ahash_digestsize() + * @nbytes: number of bytes to process from the source virtual address + * + * By using this call, the caller references the source virtual address. + * The source virtual address points to the data the message digest is to + * be calculated for. + */ +static inline void ahash_request_set_virt(struct ahash_request *req, + const u8 *src, u8 *result, + unsigned int nbytes) +{ + req->svirt = src; + req->nbytes = nbytes; + req->result = result; + req->base.flags |= CRYPTO_AHASH_REQ_VIRT; } static inline void ahash_request_chain(struct ahash_request *req, diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 36425ecd2c37..485e22cf517e 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -252,6 +252,11 @@ static inline bool ahash_request_chained(struct ahash_request *req) return crypto_request_chained(&req->base); } +static inline bool ahash_request_isvirt(struct ahash_request *req) +{ + return req->base.flags & CRYPTO_AHASH_REQ_VIRT; +} + static inline bool crypto_ahash_req_chain(struct crypto_ahash *tfm) { return crypto_tfm_req_chain(&tfm->base); diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 1d2a6c515d58..61ac11226638 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -126,7 +126,7 @@ */ #define CRYPTO_ALG_FIPS_INTERNAL 0x00020000 -/* Set if the algorithm supports request chains. */ +/* Set if the algorithm supports request chains and virtual addresses. */ #define CRYPTO_ALG_REQ_CHAIN 0x00040000 /* -- cgit v1.2.3 From cc47f07234f72cbd8e2c973cdbf2a6730660a463 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 27 Feb 2025 17:04:46 +0800 Subject: crypto: lzo - Fix compression buffer overrun Unlike the decompression code, the compression code in LZO never checked for output overruns. It instead assumes that the caller always provides enough buffer space, disregarding the buffer length provided by the caller. Add a safe compression interface that checks for the end of buffer before each write. Use the safe interface in crypto/lzo. Signed-off-by: Herbert Xu Reviewed-by: David Sterba Signed-off-by: Herbert Xu --- crypto/lzo-rle.c | 2 +- crypto/lzo.c | 2 +- include/linux/lzo.h | 8 ++++ lib/lzo/Makefile | 2 +- lib/lzo/lzo1x_compress.c | 102 +++++++++++++++++++++++++++++++----------- lib/lzo/lzo1x_compress_safe.c | 18 ++++++++ 6 files changed, 106 insertions(+), 28 deletions(-) create mode 100644 lib/lzo/lzo1x_compress_safe.c (limited to 'include/linux') diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c index 0631d975bfac..0abc2d87f042 100644 --- a/crypto/lzo-rle.c +++ b/crypto/lzo-rle.c @@ -55,7 +55,7 @@ static int __lzorle_compress(const u8 *src, unsigned int slen, size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ int err; - err = lzorle1x_1_compress(src, slen, dst, &tmp_len, ctx); + err = lzorle1x_1_compress_safe(src, slen, dst, &tmp_len, ctx); if (err != LZO_E_OK) return -EINVAL; diff --git a/crypto/lzo.c b/crypto/lzo.c index ebda132dd22b..8338851c7406 100644 --- a/crypto/lzo.c +++ b/crypto/lzo.c @@ -55,7 +55,7 @@ static int __lzo_compress(const u8 *src, unsigned int slen, size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ int err; - err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx); + err = lzo1x_1_compress_safe(src, slen, dst, &tmp_len, ctx); if (err != LZO_E_OK) return -EINVAL; diff --git a/include/linux/lzo.h b/include/linux/lzo.h index e95c7d1092b2..4d30e3624acd 100644 --- a/include/linux/lzo.h +++ b/include/linux/lzo.h @@ -24,10 +24,18 @@ int lzo1x_1_compress(const unsigned char *src, size_t src_len, unsigned char *dst, size_t *dst_len, void *wrkmem); +/* Same as above but does not write more than dst_len to dst. */ +int lzo1x_1_compress_safe(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem); + /* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */ int lzorle1x_1_compress(const unsigned char *src, size_t src_len, unsigned char *dst, size_t *dst_len, void *wrkmem); +/* Same as above but does not write more than dst_len to dst. */ +int lzorle1x_1_compress_safe(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem); + /* safe decompression with overrun testing */ int lzo1x_decompress_safe(const unsigned char *src, size_t src_len, unsigned char *dst, size_t *dst_len); diff --git a/lib/lzo/Makefile b/lib/lzo/Makefile index 2f58fafbbddd..fc7b2b7ef4b2 100644 --- a/lib/lzo/Makefile +++ b/lib/lzo/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -lzo_compress-objs := lzo1x_compress.o +lzo_compress-objs := lzo1x_compress.o lzo1x_compress_safe.o lzo_decompress-objs := lzo1x_decompress_safe.o obj-$(CONFIG_LZO_COMPRESS) += lzo_compress.o diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c index 47d6d43ea957..7b10ca86a893 100644 --- a/lib/lzo/lzo1x_compress.c +++ b/lib/lzo/lzo1x_compress.c @@ -18,11 +18,22 @@ #include #include "lzodefs.h" -static noinline size_t -lzo1x_1_do_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - size_t ti, void *wrkmem, signed char *state_offset, - const unsigned char bitstream_version) +#undef LZO_UNSAFE + +#ifndef LZO_SAFE +#define LZO_UNSAFE 1 +#define LZO_SAFE(name) name +#define HAVE_OP(x) 1 +#endif + +#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun + +static noinline int +LZO_SAFE(lzo1x_1_do_compress)(const unsigned char *in, size_t in_len, + unsigned char **out, unsigned char *op_end, + size_t *tp, void *wrkmem, + signed char *state_offset, + const unsigned char bitstream_version) { const unsigned char *ip; unsigned char *op; @@ -30,8 +41,9 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, const unsigned char * const ip_end = in + in_len - 20; const unsigned char *ii; lzo_dict_t * const dict = (lzo_dict_t *) wrkmem; + size_t ti = *tp; - op = out; + op = *out; ip = in; ii = ip; ip += ti < 4 ? 4 - ti : 0; @@ -116,25 +128,32 @@ next: if (t != 0) { if (t <= 3) { op[*state_offset] |= t; + NEED_OP(4); COPY4(op, ii); op += t; } else if (t <= 16) { + NEED_OP(17); *op++ = (t - 3); COPY8(op, ii); COPY8(op + 8, ii + 8); op += t; } else { if (t <= 18) { + NEED_OP(1); *op++ = (t - 3); } else { size_t tt = t - 18; + NEED_OP(1); *op++ = 0; while (unlikely(tt > 255)) { tt -= 255; + NEED_OP(1); *op++ = 0; } + NEED_OP(1); *op++ = tt; } + NEED_OP(t); do { COPY8(op, ii); COPY8(op + 8, ii + 8); @@ -151,6 +170,7 @@ next: if (unlikely(run_length)) { ip += run_length; run_length -= MIN_ZERO_RUN_LENGTH; + NEED_OP(4); put_unaligned_le32((run_length << 21) | 0xfffc18 | (run_length & 0x7), op); op += 4; @@ -243,10 +263,12 @@ m_len_done: ip += m_len; if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) { m_off -= 1; + NEED_OP(2); *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2)); *op++ = (m_off >> 3); } else if (m_off <= M3_MAX_OFFSET) { m_off -= 1; + NEED_OP(1); if (m_len <= M3_MAX_LEN) *op++ = (M3_MARKER | (m_len - 2)); else { @@ -254,14 +276,18 @@ m_len_done: *op++ = M3_MARKER | 0; while (unlikely(m_len > 255)) { m_len -= 255; + NEED_OP(1); *op++ = 0; } + NEED_OP(1); *op++ = (m_len); } + NEED_OP(2); *op++ = (m_off << 2); *op++ = (m_off >> 6); } else { m_off -= 0x4000; + NEED_OP(1); if (m_len <= M4_MAX_LEN) *op++ = (M4_MARKER | ((m_off >> 11) & 8) | (m_len - 2)); @@ -282,11 +308,14 @@ m_len_done: m_len -= M4_MAX_LEN; *op++ = (M4_MARKER | ((m_off >> 11) & 8)); while (unlikely(m_len > 255)) { + NEED_OP(1); m_len -= 255; *op++ = 0; } + NEED_OP(1); *op++ = (m_len); } + NEED_OP(2); *op++ = (m_off << 2); *op++ = (m_off >> 6); } @@ -295,14 +324,20 @@ finished_writing_instruction: ii = ip; goto next; } - *out_len = op - out; - return in_end - (ii - ti); + *out = op; + *tp = in_end - (ii - ti); + return LZO_E_OK; + +output_overrun: + return LZO_E_OUTPUT_OVERRUN; } -static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - void *wrkmem, const unsigned char bitstream_version) +static int LZO_SAFE(lzogeneric1x_1_compress)( + const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem, const unsigned char bitstream_version) { + unsigned char * const op_end = out + *out_len; const unsigned char *ip = in; unsigned char *op = out; unsigned char *data_start; @@ -326,14 +361,18 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, while (l > 20) { size_t ll = min_t(size_t, l, m4_max_offset + 1); uintptr_t ll_end = (uintptr_t) ip + ll; + int err; + if ((ll_end + ((t + ll) >> 5)) <= ll_end) break; BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS); memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t)); - t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem, - &state_offset, bitstream_version); + err = LZO_SAFE(lzo1x_1_do_compress)( + ip, ll, &op, op_end, &t, wrkmem, + &state_offset, bitstream_version); + if (err != LZO_E_OK) + return err; ip += ll; - op += *out_len; l -= ll; } t += l; @@ -342,20 +381,26 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, const unsigned char *ii = in + in_len - t; if (op == data_start && t <= 238) { + NEED_OP(1); *op++ = (17 + t); } else if (t <= 3) { op[state_offset] |= t; } else if (t <= 18) { + NEED_OP(1); *op++ = (t - 3); } else { size_t tt = t - 18; + NEED_OP(1); *op++ = 0; while (tt > 255) { tt -= 255; + NEED_OP(1); *op++ = 0; } + NEED_OP(1); *op++ = tt; } + NEED_OP(t); if (t >= 16) do { COPY8(op, ii); COPY8(op + 8, ii + 8); @@ -368,31 +413,38 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, } while (--t > 0); } + NEED_OP(3); *op++ = M4_MARKER | 1; *op++ = 0; *op++ = 0; *out_len = op - out; return LZO_E_OK; + +output_overrun: + return LZO_E_OUTPUT_OVERRUN; } -int lzo1x_1_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - void *wrkmem) +int LZO_SAFE(lzo1x_1_compress)(const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem) { - return lzogeneric1x_1_compress(in, in_len, out, out_len, wrkmem, 0); + return LZO_SAFE(lzogeneric1x_1_compress)( + in, in_len, out, out_len, wrkmem, 0); } -int lzorle1x_1_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - void *wrkmem) +int LZO_SAFE(lzorle1x_1_compress)(const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem) { - return lzogeneric1x_1_compress(in, in_len, out, out_len, - wrkmem, LZO_VERSION); + return LZO_SAFE(lzogeneric1x_1_compress)( + in, in_len, out, out_len, wrkmem, LZO_VERSION); } -EXPORT_SYMBOL_GPL(lzo1x_1_compress); -EXPORT_SYMBOL_GPL(lzorle1x_1_compress); +EXPORT_SYMBOL_GPL(LZO_SAFE(lzo1x_1_compress)); +EXPORT_SYMBOL_GPL(LZO_SAFE(lzorle1x_1_compress)); +#ifndef LZO_UNSAFE MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LZO1X-1 Compressor"); +#endif diff --git a/lib/lzo/lzo1x_compress_safe.c b/lib/lzo/lzo1x_compress_safe.c new file mode 100644 index 000000000000..371c9f849492 --- /dev/null +++ b/lib/lzo/lzo1x_compress_safe.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * LZO1X Compressor from LZO + * + * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer + * + * The full LZO package can be found at: + * http://www.oberhumer.com/opensource/lzo/ + * + * Changed for Linux kernel use by: + * Nitin Gupta + * Richard Purdie + */ + +#define LZO_SAFE(name) name##_safe +#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x)) + +#include "lzo1x_compress.c" -- cgit v1.2.3 From 20238d49448cdb406da2b9bd3e50f892b26da318 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Sun, 29 Sep 2024 14:21:48 +0100 Subject: async_xor: Remove unused 'async_xor_val' async_xor_val has been unused since commit a7c224a820c3 ("md/raid5: convert to new xor compution interface") Remove it. Signed-off-by: Dr. David Alan Gilbert Signed-off-by: Herbert Xu --- crypto/async_tx/async_xor.c | 26 -------------------------- include/linux/async_tx.h | 5 ----- 2 files changed, 31 deletions(-) (limited to 'include/linux') diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 1a3855284091..2c499654a36c 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -389,32 +389,6 @@ async_xor_val_offs(struct page *dest, unsigned int offset, } EXPORT_SYMBOL_GPL(async_xor_val_offs); -/** - * async_xor_val - attempt a xor parity check with a dma engine. - * @dest: destination page used if the xor is performed synchronously - * @src_list: array of source pages - * @offset: offset in pages to start transaction - * @src_cnt: number of source pages - * @len: length in bytes - * @result: 0 if sum == 0 else non-zero - * @submit: submission / completion modifiers - * - * honored flags: ASYNC_TX_ACK - * - * src_list note: if the dest is also a source it must be at index zero. - * The contents of this array will be overwritten if a scribble region - * is not specified. - */ -struct dma_async_tx_descriptor * -async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, - int src_cnt, size_t len, enum sum_check_flags *result, - struct async_submit_ctl *submit) -{ - return async_xor_val_offs(dest, offset, src_list, NULL, src_cnt, - len, result, submit); -} -EXPORT_SYMBOL_GPL(async_xor_val); - MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api"); MODULE_LICENSE("GPL"); diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 5cc73d7e5b52..1ca9f9e05f4f 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -167,11 +167,6 @@ async_xor_offs(struct page *dest, unsigned int offset, struct page **src_list, unsigned int *src_offset, int src_cnt, size_t len, struct async_submit_ctl *submit); -struct dma_async_tx_descriptor * -async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, - int src_cnt, size_t len, enum sum_check_flags *result, - struct async_submit_ctl *submit); - struct dma_async_tx_descriptor * async_xor_val_offs(struct page *dest, unsigned int offset, struct page **src_list, unsigned int *src_offset, -- cgit v1.2.3 From fc8d5bba61ad8087af9a56337a7a297af6b46129 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 13 Mar 2025 13:14:53 +0800 Subject: lib/scatterlist: Add SG_MITER_LOCAL and use it Add kmap_local support to the scatterlist iterator. Use it for all the helper functions in lib/scatterlist. Signed-off-by: Herbert Xu --- include/linux/scatterlist.h | 1 + lib/scatterlist.c | 12 ++++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index d836e7440ee8..138e2f1bd08f 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -671,6 +671,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter) #define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */ #define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */ #define SG_MITER_FROM_SG (1 << 2) /* nop */ +#define SG_MITER_LOCAL (1 << 3) /* use kmap_local */ struct sg_mapping_iter { /* the following three fields can be accessed directly */ diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 5bb6b8aff232..b58d5ef1a34b 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -879,7 +879,7 @@ EXPORT_SYMBOL(sg_miter_skip); * @miter->addr and @miter->length point to the current mapping. * * Context: - * May sleep if !SG_MITER_ATOMIC. + * May sleep if !SG_MITER_ATOMIC && !SG_MITER_LOCAL. * * Returns: * true if @miter contains the next mapping. false if end of sg @@ -901,6 +901,8 @@ bool sg_miter_next(struct sg_mapping_iter *miter) if (miter->__flags & SG_MITER_ATOMIC) miter->addr = kmap_atomic(miter->page) + miter->__offset; + else if (miter->__flags & SG_MITER_LOCAL) + miter->addr = kmap_local_page(miter->page) + miter->__offset; else miter->addr = kmap(miter->page) + miter->__offset; @@ -936,7 +938,9 @@ void sg_miter_stop(struct sg_mapping_iter *miter) if (miter->__flags & SG_MITER_ATOMIC) { WARN_ON_ONCE(!pagefault_disabled()); kunmap_atomic(miter->addr); - } else + } else if (miter->__flags & SG_MITER_LOCAL) + kunmap_local(miter->addr); + else kunmap(miter->page); miter->page = NULL; @@ -965,7 +969,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, { unsigned int offset = 0; struct sg_mapping_iter miter; - unsigned int sg_flags = SG_MITER_ATOMIC; + unsigned int sg_flags = SG_MITER_LOCAL; if (to_buffer) sg_flags |= SG_MITER_FROM_SG; @@ -1080,7 +1084,7 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, { unsigned int offset = 0; struct sg_mapping_iter miter; - unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; + unsigned int sg_flags = SG_MITER_LOCAL | SG_MITER_TO_SG; sg_miter_start(&miter, sgl, nents, sg_flags); -- cgit v1.2.3 From 5416b8a741d6d09369b973cd9d4dacb1887c24df Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 15 Mar 2025 18:30:34 +0800 Subject: crypto: acomp - Add ACOMP_REQUEST_ALLOC and acomp_request_alloc_extra Add ACOMP_REQUEST_ALLOC which is a wrapper around acomp_request_alloc that falls back to a synchronous stack reqeust if the allocation fails. Also add ACOMP_REQUEST_ON_STACK which stores the request on the stack only. The request should be freed with acomp_request_free. Finally add acomp_request_alloc_extra which gives the user extra memory to use in conjunction with the request. Signed-off-by: Herbert Xu --- crypto/acompress.c | 38 +++++++++++++++--- include/crypto/acompress.h | 80 +++++++++++++++++++++++++++++++++++-- include/crypto/internal/acompress.h | 6 +++ include/linux/crypto.h | 1 + 4 files changed, 117 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/crypto/acompress.c b/crypto/acompress.c index 194a4b36f97f..9da033ded193 100644 --- a/crypto/acompress.c +++ b/crypto/acompress.c @@ -60,28 +60,56 @@ static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm) struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); struct acomp_alg *alg = crypto_acomp_alg(acomp); - alg->exit(acomp); + if (alg->exit) + alg->exit(acomp); + + if (acomp_is_async(acomp)) + crypto_free_acomp(acomp->fb); } static int crypto_acomp_init_tfm(struct crypto_tfm *tfm) { struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); struct acomp_alg *alg = crypto_acomp_alg(acomp); + struct crypto_acomp *fb = NULL; + int err; + + acomp->fb = acomp; if (tfm->__crt_alg->cra_type != &crypto_acomp_type) return crypto_init_scomp_ops_async(tfm); + if (acomp_is_async(acomp)) { + fb = crypto_alloc_acomp(crypto_acomp_alg_name(acomp), 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(fb)) + return PTR_ERR(fb); + + err = -EINVAL; + if (crypto_acomp_reqsize(fb) > MAX_SYNC_COMP_REQSIZE) + goto out_free_fb; + + acomp->fb = fb; + } + acomp->compress = alg->compress; acomp->decompress = alg->decompress; acomp->reqsize = alg->reqsize; - if (alg->exit) - acomp->base.exit = crypto_acomp_exit_tfm; + acomp->base.exit = crypto_acomp_exit_tfm; - if (alg->init) - return alg->init(acomp); + if (!alg->init) + return 0; + + err = alg->init(acomp); + if (err) + goto out_free_fb; return 0; + +out_free_fb: + crypto_free_acomp(fb); + return err; } static unsigned int crypto_acomp_extsize(struct crypto_alg *alg) diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index 53c9e632862b..03cb381c2c54 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -10,9 +10,11 @@ #define _CRYPTO_ACOMP_H #include +#include #include #include #include +#include #include #include #include @@ -32,6 +34,14 @@ #define CRYPTO_ACOMP_DST_MAX 131072 +#define MAX_SYNC_COMP_REQSIZE 0 + +#define ACOMP_REQUEST_ALLOC(name, tfm, gfp) \ + char __##name##_req[sizeof(struct acomp_req) + \ + MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \ + struct acomp_req *name = acomp_request_on_stack_init( \ + __##name##_req, (tfm), (gfp), false) + struct acomp_req; struct acomp_req_chain { @@ -83,12 +93,14 @@ struct acomp_req { * @compress: Function performs a compress operation * @decompress: Function performs a de-compress operation * @reqsize: Context size for (de)compression requests + * @fb: Synchronous fallback tfm * @base: Common crypto API algorithm data structure */ struct crypto_acomp { int (*compress)(struct acomp_req *req); int (*decompress)(struct acomp_req *req); unsigned int reqsize; + struct crypto_acomp *fb; struct crypto_tfm base; }; @@ -210,24 +222,68 @@ static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask) return crypto_has_alg(alg_name, type, mask); } +static inline const char *crypto_acomp_alg_name(struct crypto_acomp *tfm) +{ + return crypto_tfm_alg_name(crypto_acomp_tfm(tfm)); +} + +static inline const char *crypto_acomp_driver_name(struct crypto_acomp *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)); +} + /** * acomp_request_alloc() -- allocates asynchronous (de)compression request * * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + * @gfp: gfp to pass to kzalloc (defaults to GFP_KERNEL) * * Return: allocated handle in case of success or NULL in case of an error */ -static inline struct acomp_req *acomp_request_alloc_noprof(struct crypto_acomp *tfm) +static inline struct acomp_req *acomp_request_alloc_extra_noprof( + struct crypto_acomp *tfm, size_t extra, gfp_t gfp) { struct acomp_req *req; + size_t len; + + len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN); + if (check_add_overflow(len, extra, &len)) + return NULL; - req = kzalloc_noprof(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); + req = kzalloc_noprof(len, gfp); if (likely(req)) acomp_request_set_tfm(req, tfm); return req; } +#define acomp_request_alloc_noprof(tfm, ...) \ + CONCATENATE(acomp_request_alloc_noprof_, COUNT_ARGS(__VA_ARGS__))( \ + tfm, ##__VA_ARGS__) +#define acomp_request_alloc_noprof_0(tfm) \ + acomp_request_alloc_noprof_1(tfm, GFP_KERNEL) +#define acomp_request_alloc_noprof_1(tfm, gfp) \ + acomp_request_alloc_extra_noprof(tfm, 0, gfp) #define acomp_request_alloc(...) alloc_hooks(acomp_request_alloc_noprof(__VA_ARGS__)) +/** + * acomp_request_alloc_extra() -- allocate acomp request with extra memory + * + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + * @extra: amount of extra memory + * @gfp: gfp to pass to kzalloc + * + * Return: allocated handle in case of success or NULL in case of an error + */ +#define acomp_request_alloc_extra(...) alloc_hooks(acomp_request_alloc_extra_noprof(__VA_ARGS__)) + +static inline void *acomp_request_extra(struct acomp_req *req) +{ + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + size_t len; + + len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN); + return (void *)((char *)req + len); +} + /** * acomp_request_free() -- zeroize and free asynchronous (de)compression * request as well as the output buffer if allocated @@ -237,6 +293,8 @@ static inline struct acomp_req *acomp_request_alloc_noprof(struct crypto_acomp * */ static inline void acomp_request_free(struct acomp_req *req) { + if (!req || (req->base.flags & CRYPTO_TFM_REQ_ON_STACK)) + return; kfree_sensitive(req); } @@ -257,7 +315,8 @@ static inline void acomp_request_set_callback(struct acomp_req *req, void *data) { u32 keep = CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | - CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA; + CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA | + CRYPTO_TFM_REQ_ON_STACK; req->base.complete = cmpl; req->base.data = data; @@ -446,4 +505,19 @@ int crypto_acomp_compress(struct acomp_req *req); */ int crypto_acomp_decompress(struct acomp_req *req); +static inline struct acomp_req *acomp_request_on_stack_init( + char *buf, struct crypto_acomp *tfm, gfp_t gfp, bool stackonly) +{ + struct acomp_req *req; + + if (!stackonly && (req = acomp_request_alloc(tfm, gfp))) + return req; + + req = (void *)buf; + acomp_request_set_tfm(req, tfm->fb); + req->base.flags = CRYPTO_TFM_REQ_ON_STACK; + + return req; +} + #endif diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h index 575dbcbc0df4..c1ed55a0e3bf 100644 --- a/include/crypto/internal/acompress.h +++ b/include/crypto/internal/acompress.h @@ -12,6 +12,12 @@ #include #include +#define ACOMP_REQUEST_ON_STACK(name, tfm) \ + char __##name##_req[sizeof(struct acomp_req) + \ + MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \ + struct acomp_req *name = acomp_request_on_stack_init( \ + __##name##_req, (tfm), 0, true) + /** * struct acomp_alg - asynchronous compression algorithm * diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 61ac11226638..ea3b95bdbde3 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -138,6 +138,7 @@ #define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100 #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 +#define CRYPTO_TFM_REQ_ON_STACK 0x00000800 /* * Miscellaneous stuff. -- cgit v1.2.3 From fce8b8d5986b76a4fdd062e3eec1bb6420fee6c5 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Sun, 16 Mar 2025 09:21:27 +0800 Subject: crypto: remove obsolete 'comp' compression API The 'comp' compression API has been superseded by the acomp API, which is a bit more cumbersome to use, but ultimately more flexible when it comes to hardware implementations. Now that all the users and implementations have been removed, let's remove the core plumbing of the 'comp' API as well. Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- Documentation/crypto/architecture.rst | 2 - crypto/Makefile | 2 +- crypto/api.c | 4 - crypto/compress.c | 32 ------- crypto/crypto_user.c | 16 ---- crypto/proc.c | 3 - crypto/testmgr.c | 152 +++------------------------------- include/linux/crypto.h | 76 +---------------- 8 files changed, 15 insertions(+), 272 deletions(-) delete mode 100644 crypto/compress.c (limited to 'include/linux') diff --git a/Documentation/crypto/architecture.rst b/Documentation/crypto/architecture.rst index 15dcd62fd22f..249b54d0849f 100644 --- a/Documentation/crypto/architecture.rst +++ b/Documentation/crypto/architecture.rst @@ -196,8 +196,6 @@ the aforementioned cipher types: - CRYPTO_ALG_TYPE_CIPHER Single block cipher -- CRYPTO_ALG_TYPE_COMPRESS Compression - - CRYPTO_ALG_TYPE_AEAD Authenticated Encryption with Associated Data (MAC) diff --git a/crypto/Makefile b/crypto/Makefile index d1e422249af6..f22ebd6fb221 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -4,7 +4,7 @@ # obj-$(CONFIG_CRYPTO) += crypto.o -crypto-y := api.o cipher.o compress.o +crypto-y := api.o cipher.o obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o obj-$(CONFIG_CRYPTO_FIPS) += fips.o diff --git a/crypto/api.c b/crypto/api.c index 91957bb52f3f..3416e98128a0 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -383,10 +383,6 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) case CRYPTO_ALG_TYPE_CIPHER: len += crypto_cipher_ctxsize(alg); break; - - case CRYPTO_ALG_TYPE_COMPRESS: - len += crypto_compress_ctxsize(alg); - break; } return len; diff --git a/crypto/compress.c b/crypto/compress.c deleted file mode 100644 index 9048fe390c46..000000000000 --- a/crypto/compress.c +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Cryptographic API. - * - * Compression operations. - * - * Copyright (c) 2002 James Morris - */ -#include -#include "internal.h" - -int crypto_comp_compress(struct crypto_comp *comp, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - struct crypto_tfm *tfm = crypto_comp_tfm(comp); - - return tfm->__crt_alg->cra_compress.coa_compress(tfm, src, slen, dst, - dlen); -} -EXPORT_SYMBOL_GPL(crypto_comp_compress); - -int crypto_comp_decompress(struct crypto_comp *comp, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - struct crypto_tfm *tfm = crypto_comp_tfm(comp); - - return tfm->__crt_alg->cra_compress.coa_decompress(tfm, src, slen, dst, - dlen); -} -EXPORT_SYMBOL_GPL(crypto_comp_decompress); diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 6c571834e86a..aad429bef03e 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -84,17 +84,6 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) sizeof(rcipher), &rcipher); } -static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) -{ - struct crypto_report_comp rcomp; - - memset(&rcomp, 0, sizeof(rcomp)); - - strscpy(rcomp.type, "compression", sizeof(rcomp.type)); - - return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(rcomp), &rcomp); -} - static int crypto_report_one(struct crypto_alg *alg, struct crypto_user_alg *ualg, struct sk_buff *skb) { @@ -135,11 +124,6 @@ static int crypto_report_one(struct crypto_alg *alg, if (crypto_report_cipher(skb, alg)) goto nla_put_failure; - break; - case CRYPTO_ALG_TYPE_COMPRESS: - if (crypto_report_comp(skb, alg)) - goto nla_put_failure; - break; } diff --git a/crypto/proc.c b/crypto/proc.c index 522b27d90d29..82f15b967e85 100644 --- a/crypto/proc.c +++ b/crypto/proc.c @@ -72,9 +72,6 @@ static int c_show(struct seq_file *m, void *p) seq_printf(m, "max keysize : %u\n", alg->cra_cipher.cia_max_keysize); break; - case CRYPTO_ALG_TYPE_COMPRESS: - seq_printf(m, "type : compression\n"); - break; default: seq_printf(m, "type : unknown\n"); break; diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 9c5648c45ff0..1b2387291787 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -3320,112 +3320,6 @@ out: return err; } -static int test_comp(struct crypto_comp *tfm, - const struct comp_testvec *ctemplate, - const struct comp_testvec *dtemplate, - int ctcount, int dtcount) -{ - const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm)); - char *output, *decomp_output; - unsigned int i; - int ret; - - output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL); - if (!output) - return -ENOMEM; - - decomp_output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL); - if (!decomp_output) { - kfree(output); - return -ENOMEM; - } - - for (i = 0; i < ctcount; i++) { - int ilen; - unsigned int dlen = COMP_BUF_SIZE; - - memset(output, 0, COMP_BUF_SIZE); - memset(decomp_output, 0, COMP_BUF_SIZE); - - ilen = ctemplate[i].inlen; - ret = crypto_comp_compress(tfm, ctemplate[i].input, - ilen, output, &dlen); - if (ret) { - printk(KERN_ERR "alg: comp: compression failed " - "on test %d for %s: ret=%d\n", i + 1, algo, - -ret); - goto out; - } - - ilen = dlen; - dlen = COMP_BUF_SIZE; - ret = crypto_comp_decompress(tfm, output, - ilen, decomp_output, &dlen); - if (ret) { - pr_err("alg: comp: compression failed: decompress: on test %d for %s failed: ret=%d\n", - i + 1, algo, -ret); - goto out; - } - - if (dlen != ctemplate[i].inlen) { - printk(KERN_ERR "alg: comp: Compression test %d " - "failed for %s: output len = %d\n", i + 1, algo, - dlen); - ret = -EINVAL; - goto out; - } - - if (memcmp(decomp_output, ctemplate[i].input, - ctemplate[i].inlen)) { - pr_err("alg: comp: compression failed: output differs: on test %d for %s\n", - i + 1, algo); - hexdump(decomp_output, dlen); - ret = -EINVAL; - goto out; - } - } - - for (i = 0; i < dtcount; i++) { - int ilen; - unsigned int dlen = COMP_BUF_SIZE; - - memset(decomp_output, 0, COMP_BUF_SIZE); - - ilen = dtemplate[i].inlen; - ret = crypto_comp_decompress(tfm, dtemplate[i].input, - ilen, decomp_output, &dlen); - if (ret) { - printk(KERN_ERR "alg: comp: decompression failed " - "on test %d for %s: ret=%d\n", i + 1, algo, - -ret); - goto out; - } - - if (dlen != dtemplate[i].outlen) { - printk(KERN_ERR "alg: comp: Decompression test %d " - "failed for %s: output len = %d\n", i + 1, algo, - dlen); - ret = -EINVAL; - goto out; - } - - if (memcmp(decomp_output, dtemplate[i].output, dlen)) { - printk(KERN_ERR "alg: comp: Decompression test %d " - "failed for %s\n", i + 1, algo); - hexdump(decomp_output, dlen); - ret = -EINVAL; - goto out; - } - } - - ret = 0; - -out: - kfree(decomp_output); - kfree(output); - return ret; -} - static int test_acomp(struct crypto_acomp *tfm, const struct comp_testvec *ctemplate, const struct comp_testvec *dtemplate, @@ -3684,42 +3578,22 @@ static int alg_test_cipher(const struct alg_test_desc *desc, static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, u32 type, u32 mask) { - struct crypto_comp *comp; struct crypto_acomp *acomp; int err; - u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK; - if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) { - acomp = crypto_alloc_acomp(driver, type, mask); - if (IS_ERR(acomp)) { - if (PTR_ERR(acomp) == -ENOENT) - return 0; - pr_err("alg: acomp: Failed to load transform for %s: %ld\n", - driver, PTR_ERR(acomp)); - return PTR_ERR(acomp); - } - err = test_acomp(acomp, desc->suite.comp.comp.vecs, - desc->suite.comp.decomp.vecs, - desc->suite.comp.comp.count, - desc->suite.comp.decomp.count); - crypto_free_acomp(acomp); - } else { - comp = crypto_alloc_comp(driver, type, mask); - if (IS_ERR(comp)) { - if (PTR_ERR(comp) == -ENOENT) - return 0; - pr_err("alg: comp: Failed to load transform for %s: %ld\n", - driver, PTR_ERR(comp)); - return PTR_ERR(comp); - } - - err = test_comp(comp, desc->suite.comp.comp.vecs, - desc->suite.comp.decomp.vecs, - desc->suite.comp.comp.count, - desc->suite.comp.decomp.count); - - crypto_free_comp(comp); - } + acomp = crypto_alloc_acomp(driver, type, mask); + if (IS_ERR(acomp)) { + if (PTR_ERR(acomp) == -ENOENT) + return 0; + pr_err("alg: acomp: Failed to load transform for %s: %ld\n", + driver, PTR_ERR(acomp)); + return PTR_ERR(acomp); + } + err = test_acomp(acomp, desc->suite.comp.comp.vecs, + desc->suite.comp.decomp.vecs, + desc->suite.comp.comp.count, + desc->suite.comp.decomp.count); + crypto_free_acomp(acomp); return err; } diff --git a/include/linux/crypto.h b/include/linux/crypto.h index ea3b95bdbde3..1e3809d28abd 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -24,7 +24,6 @@ */ #define CRYPTO_ALG_TYPE_MASK 0x0000000f #define CRYPTO_ALG_TYPE_CIPHER 0x00000001 -#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 #define CRYPTO_ALG_TYPE_AEAD 0x00000003 #define CRYPTO_ALG_TYPE_LSKCIPHER 0x00000004 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 @@ -246,26 +245,7 @@ struct cipher_alg { void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); }; -/** - * struct compress_alg - compression/decompression algorithm - * @coa_compress: Compress a buffer of specified length, storing the resulting - * data in the specified buffer. Return the length of the - * compressed data in dlen. - * @coa_decompress: Decompress the source buffer, storing the uncompressed - * data in the specified buffer. The length of the data is - * returned in dlen. - * - * All fields are mandatory. - */ -struct compress_alg { - int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen); - int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen); -}; - #define cra_cipher cra_u.cipher -#define cra_compress cra_u.compress /** * struct crypto_alg - definition of a cryptograpic cipher algorithm @@ -316,7 +296,7 @@ struct compress_alg { * transformation types. There are multiple options, such as * &crypto_skcipher_type, &crypto_ahash_type, &crypto_rng_type. * This field might be empty. In that case, there are no common - * callbacks. This is the case for: cipher, compress, shash. + * callbacks. This is the case for: cipher. * @cra_u: Callbacks implementing the transformation. This is a union of * multiple structures. Depending on the type of transformation selected * by @cra_type and @cra_flags above, the associated structure must be @@ -335,8 +315,6 @@ struct compress_alg { * @cra_init. * @cra_u.cipher: Union member which contains a single-block symmetric cipher * definition. See @struct @cipher_alg. - * @cra_u.compress: Union member which contains a (de)compression algorithm. - * See @struct @compress_alg. * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE * @cra_list: internally used * @cra_users: internally used @@ -366,7 +344,6 @@ struct crypto_alg { union { struct cipher_alg cipher; - struct compress_alg compress; } cra_u; int (*cra_init)(struct crypto_tfm *tfm); @@ -440,10 +417,6 @@ struct crypto_tfm { void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; }; -struct crypto_comp { - struct crypto_tfm base; -}; - /* * Transform user interface. */ @@ -500,53 +473,6 @@ static inline unsigned int crypto_tfm_ctx_alignment(void) return __alignof__(tfm->__crt_ctx); } -static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) -{ - return (struct crypto_comp *)tfm; -} - -static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name, - u32 type, u32 mask) -{ - type &= ~CRYPTO_ALG_TYPE_MASK; - type |= CRYPTO_ALG_TYPE_COMPRESS; - mask |= CRYPTO_ALG_TYPE_MASK; - - return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask)); -} - -static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm) -{ - return &tfm->base; -} - -static inline void crypto_free_comp(struct crypto_comp *tfm) -{ - crypto_free_tfm(crypto_comp_tfm(tfm)); -} - -static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask) -{ - type &= ~CRYPTO_ALG_TYPE_MASK; - type |= CRYPTO_ALG_TYPE_COMPRESS; - mask |= CRYPTO_ALG_TYPE_MASK; - - return crypto_has_alg(alg_name, type, mask); -} - -static inline const char *crypto_comp_name(struct crypto_comp *tfm) -{ - return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); -} - -int crypto_comp_compress(struct crypto_comp *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); - -int crypto_comp_decompress(struct crypto_comp *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); - static inline void crypto_reqchain_init(struct crypto_async_request *req) { req->err = -EINPROGRESS; -- cgit v1.2.3