summaryrefslogtreecommitdiff
path: root/include/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'include/crypto')
-rw-r--r--include/crypto/acompress.h109
-rw-r--r--include/crypto/algapi.h37
-rw-r--r--include/crypto/blake2b.h31
-rw-r--r--include/crypto/chacha.h89
-rw-r--r--include/crypto/ctr.h50
-rw-r--r--include/crypto/ghash.h4
-rw-r--r--include/crypto/hash.h176
-rw-r--r--include/crypto/internal/acompress.h128
-rw-r--r--include/crypto/internal/blake2b.h92
-rw-r--r--include/crypto/internal/blockhash.h52
-rw-r--r--include/crypto/internal/chacha.h43
-rw-r--r--include/crypto/internal/engine.h5
-rw-r--r--include/crypto/internal/geniv.h1
-rw-r--r--include/crypto/internal/hash.h117
-rw-r--r--include/crypto/internal/poly1305.h28
-rw-r--r--include/crypto/internal/scompress.h17
-rw-r--r--include/crypto/internal/sha2.h66
-rw-r--r--include/crypto/internal/simd.h10
-rw-r--r--include/crypto/internal/skcipher.h49
-rw-r--r--include/crypto/md5.h3
-rw-r--r--include/crypto/null.h3
-rw-r--r--include/crypto/poly1305.h67
-rw-r--r--include/crypto/polyval.h8
-rw-r--r--include/crypto/rng.h8
-rw-r--r--include/crypto/scatterwalk.h65
-rw-r--r--include/crypto/sha1.h9
-rw-r--r--include/crypto/sha1_base.h81
-rw-r--r--include/crypto/sha2.h62
-rw-r--r--include/crypto/sha256_base.h135
-rw-r--r--include/crypto/sha3.h20
-rw-r--r--include/crypto/sha512_base.h88
-rw-r--r--include/crypto/sig.h2
-rw-r--r--include/crypto/sm3.h4
-rw-r--r--include/crypto/sm3_base.h92
-rw-r--r--include/crypto/streebog.h5
35 files changed, 877 insertions, 879 deletions
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index c497c73baf13..9eacb9fa375d 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -32,30 +32,28 @@
/* Set this bit for if virtual address destination cannot be used for DMA. */
#define CRYPTO_ACOMP_REQ_DST_NONDMA 0x00000010
-/* Set this bit if source is a folio. */
-#define CRYPTO_ACOMP_REQ_SRC_FOLIO 0x00000020
-
-/* Set this bit if destination is a folio. */
-#define CRYPTO_ACOMP_REQ_DST_FOLIO 0x00000040
+/* Private flags that should not be touched by the user. */
+#define CRYPTO_ACOMP_REQ_PRIVATE \
+ (CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | \
+ CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA)
#define CRYPTO_ACOMP_DST_MAX 131072
#define MAX_SYNC_COMP_REQSIZE 0
-#define ACOMP_REQUEST_ALLOC(name, tfm, gfp) \
+#define ACOMP_REQUEST_ON_STACK(name, tfm) \
char __##name##_req[sizeof(struct acomp_req) + \
MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
struct acomp_req *name = acomp_request_on_stack_init( \
- __##name##_req, (tfm), (gfp), false)
+ __##name##_req, (tfm))
+
+#define ACOMP_REQUEST_CLONE(name, gfp) \
+ acomp_request_clone(name, sizeof(__##name##_req), gfp)
struct acomp_req;
struct folio;
struct acomp_req_chain {
- struct list_head head;
- struct acomp_req *req0;
- struct acomp_req *cur;
- int (*op)(struct acomp_req *req);
crypto_completion_t compl;
void *data;
struct scatterlist ssg;
@@ -68,8 +66,6 @@ struct acomp_req_chain {
u8 *dst;
struct folio *dfolio;
};
- size_t soff;
- size_t doff;
u32 flags;
};
@@ -81,10 +77,6 @@ struct acomp_req_chain {
* @dst: Destination scatterlist
* @svirt: Source virtual address
* @dvirt: Destination virtual address
- * @sfolio: Source folio
- * @soff: Source folio offset
- * @dfolio: Destination folio
- * @doff: Destination folio offset
* @slen: Size of the input buffer
* @dlen: Size of the output buffer and number of bytes produced
* @chain: Private API code data, do not use
@@ -95,15 +87,11 @@ struct acomp_req {
union {
struct scatterlist *src;
const u8 *svirt;
- struct folio *sfolio;
};
union {
struct scatterlist *dst;
u8 *dvirt;
- struct folio *dfolio;
};
- size_t soff;
- size_t doff;
unsigned int slen;
unsigned int dlen;
@@ -126,18 +114,11 @@ struct crypto_acomp {
int (*compress)(struct acomp_req *req);
int (*decompress)(struct acomp_req *req);
unsigned int reqsize;
- struct crypto_acomp *fb;
struct crypto_tfm base;
};
-struct crypto_acomp_stream {
- spinlock_t lock;
- void *ctx;
-};
-
#define COMP_ALG_COMMON { \
struct crypto_alg base; \
- struct crypto_acomp_stream __percpu *stream; \
}
struct comp_alg_common COMP_ALG_COMMON;
@@ -213,7 +194,7 @@ static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
static inline void acomp_request_set_tfm(struct acomp_req *req,
struct crypto_acomp *tfm)
{
- req->base.tfm = crypto_acomp_tfm(tfm);
+ crypto_request_set_tfm(&req->base, crypto_acomp_tfm(tfm));
}
static inline bool acomp_is_async(struct crypto_acomp *tfm)
@@ -310,6 +291,11 @@ static inline void *acomp_request_extra(struct acomp_req *req)
return (void *)((char *)req + len);
}
+static inline bool acomp_req_on_stack(struct acomp_req *req)
+{
+ return crypto_req_on_stack(&req->base);
+}
+
/**
* acomp_request_free() -- zeroize and free asynchronous (de)compression
* request as well as the output buffer if allocated
@@ -319,7 +305,7 @@ static inline void *acomp_request_extra(struct acomp_req *req)
*/
static inline void acomp_request_free(struct acomp_req *req)
{
- if (!req || (req->base.flags & CRYPTO_TFM_REQ_ON_STACK))
+ if (!req || acomp_req_on_stack(req))
return;
kfree_sensitive(req);
}
@@ -340,17 +326,9 @@ static inline void acomp_request_set_callback(struct acomp_req *req,
crypto_completion_t cmpl,
void *data)
{
- u32 keep = CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA |
- CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA |
- CRYPTO_ACOMP_REQ_SRC_FOLIO | CRYPTO_ACOMP_REQ_DST_FOLIO |
- CRYPTO_TFM_REQ_ON_STACK;
-
- req->base.complete = cmpl;
- req->base.data = data;
- req->base.flags &= keep;
- req->base.flags |= flgs & ~keep;
-
- crypto_reqchain_init(&req->base);
+ flgs &= ~CRYPTO_ACOMP_REQ_PRIVATE;
+ flgs |= req->base.flags & CRYPTO_ACOMP_REQ_PRIVATE;
+ crypto_request_set_callback(&req->base, flgs, cmpl, data);
}
/**
@@ -379,8 +357,6 @@ static inline void acomp_request_set_params(struct acomp_req *req,
req->base.flags &= ~(CRYPTO_ACOMP_REQ_SRC_VIRT |
CRYPTO_ACOMP_REQ_SRC_NONDMA |
- CRYPTO_ACOMP_REQ_SRC_FOLIO |
- CRYPTO_ACOMP_REQ_DST_FOLIO |
CRYPTO_ACOMP_REQ_DST_VIRT |
CRYPTO_ACOMP_REQ_DST_NONDMA);
}
@@ -403,7 +379,6 @@ static inline void acomp_request_set_src_sg(struct acomp_req *req,
req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT;
- req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO;
}
/**
@@ -423,7 +398,6 @@ static inline void acomp_request_set_src_dma(struct acomp_req *req,
req->slen = slen;
req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
- req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO;
req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
}
@@ -444,7 +418,6 @@ static inline void acomp_request_set_src_nondma(struct acomp_req *req,
req->svirt = src;
req->slen = slen;
- req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO;
req->base.flags |= CRYPTO_ACOMP_REQ_SRC_NONDMA;
req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
}
@@ -463,13 +436,9 @@ static inline void acomp_request_set_src_folio(struct acomp_req *req,
struct folio *folio, size_t off,
unsigned int len)
{
- req->sfolio = folio;
- req->soff = off;
- req->slen = len;
-
- req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
- req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT;
- req->base.flags |= CRYPTO_ACOMP_REQ_SRC_FOLIO;
+ sg_init_table(&req->chain.ssg, 1);
+ sg_set_folio(&req->chain.ssg, folio, len, off);
+ acomp_request_set_src_sg(req, &req->chain.ssg, len);
}
/**
@@ -490,7 +459,6 @@ static inline void acomp_request_set_dst_sg(struct acomp_req *req,
req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT;
- req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO;
}
/**
@@ -510,7 +478,6 @@ static inline void acomp_request_set_dst_dma(struct acomp_req *req,
req->dlen = dlen;
req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
- req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO;
req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
}
@@ -530,7 +497,6 @@ static inline void acomp_request_set_dst_nondma(struct acomp_req *req,
req->dvirt = dst;
req->dlen = dlen;
- req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO;
req->base.flags |= CRYPTO_ACOMP_REQ_DST_NONDMA;
req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
}
@@ -549,19 +515,9 @@ static inline void acomp_request_set_dst_folio(struct acomp_req *req,
struct folio *folio, size_t off,
unsigned int len)
{
- req->dfolio = folio;
- req->doff = off;
- req->dlen = len;
-
- req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
- req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT;
- req->base.flags |= CRYPTO_ACOMP_REQ_DST_FOLIO;
-}
-
-static inline void acomp_request_chain(struct acomp_req *req,
- struct acomp_req *head)
-{
- crypto_request_chain(&req->base, &head->base);
+ sg_init_table(&req->chain.dsg, 1);
+ sg_set_folio(&req->chain.dsg, folio, len, off);
+ acomp_request_set_dst_sg(req, &req->chain.dsg, len);
}
/**
@@ -587,18 +543,15 @@ int crypto_acomp_compress(struct acomp_req *req);
int crypto_acomp_decompress(struct acomp_req *req);
static inline struct acomp_req *acomp_request_on_stack_init(
- char *buf, struct crypto_acomp *tfm, gfp_t gfp, bool stackonly)
+ char *buf, struct crypto_acomp *tfm)
{
- struct acomp_req *req;
-
- if (!stackonly && (req = acomp_request_alloc(tfm, gfp)))
- return req;
-
- req = (void *)buf;
- acomp_request_set_tfm(req, tfm->fb);
- req->base.flags = CRYPTO_TFM_REQ_ON_STACK;
+ struct acomp_req *req = (void *)buf;
+ crypto_stack_request_init(&req->base, crypto_acomp_tfm(tfm));
return req;
}
+struct acomp_req *acomp_request_clone(struct acomp_req *req,
+ size_t total, gfp_t gfp);
+
#endif
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 6e07bbc04089..188eface0a11 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -68,16 +68,17 @@ struct crypto_instance {
struct crypto_spawn *spawns;
};
- struct work_struct free_work;
-
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
struct crypto_template {
struct list_head list;
struct hlist_head instances;
+ struct hlist_head dead;
struct module *module;
+ struct work_struct free_work;
+
int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
char name[CRYPTO_MAX_ALG_NAME];
@@ -106,18 +107,6 @@ struct crypto_queue {
unsigned int max_qlen;
};
-struct scatter_walk {
- /* Must be the first member, see struct skcipher_walk. */
- union {
- void *const addr;
-
- /* Private API field, do not touch. */
- union crypto_no_such_thing *__addr;
- };
- struct scatterlist *sg;
- unsigned int offset;
-};
-
struct crypto_attr_alg {
char name[CRYPTO_MAX_ALG_NAME];
};
@@ -157,8 +146,16 @@ void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
const char *crypto_attr_alg_name(struct rtattr *rta);
-int crypto_inst_setname(struct crypto_instance *inst, const char *name,
- struct crypto_alg *alg);
+int __crypto_inst_setname(struct crypto_instance *inst, const char *name,
+ const char *driver, struct crypto_alg *alg);
+
+#define crypto_inst_setname(inst, name, ...) \
+ CONCATENATE(crypto_inst_setname_, COUNT_ARGS(__VA_ARGS__))( \
+ inst, name, ##__VA_ARGS__)
+#define crypto_inst_setname_1(inst, name, alg) \
+ __crypto_inst_setname(inst, name, name, alg)
+#define crypto_inst_setname_2(inst, name, driver, alg) \
+ __crypto_inst_setname(inst, name, driver, alg)
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
int crypto_enqueue_request(struct crypto_queue *queue,
@@ -266,14 +263,14 @@ static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
}
-static inline bool crypto_request_chained(struct crypto_async_request *req)
+static inline bool crypto_tfm_req_virt(struct crypto_tfm *tfm)
{
- return !list_empty(&req->list);
+ return tfm->__crt_alg->cra_flags & CRYPTO_ALG_REQ_VIRT;
}
-static inline bool crypto_tfm_req_chain(struct crypto_tfm *tfm)
+static inline u32 crypto_request_flags(struct crypto_async_request *req)
{
- return tfm->__crt_alg->cra_flags & CRYPTO_ALG_REQ_CHAIN;
+ return req->flags & ~CRYPTO_TFM_REQ_ON_STACK;
}
#endif /* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/blake2b.h b/include/crypto/blake2b.h
index 0c0176285349..dd7694477e50 100644
--- a/include/crypto/blake2b.h
+++ b/include/crypto/blake2b.h
@@ -7,10 +7,20 @@
#include <linux/types.h>
#include <linux/string.h>
+struct blake2b_state {
+ /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */
+ u64 h[8];
+ u64 t[2];
+ /* The true state ends here. The rest is temporary storage. */
+ u64 f[2];
+};
+
enum blake2b_lengths {
BLAKE2B_BLOCK_SIZE = 128,
BLAKE2B_HASH_SIZE = 64,
BLAKE2B_KEY_SIZE = 64,
+ BLAKE2B_STATE_SIZE = offsetof(struct blake2b_state, f),
+ BLAKE2B_DESC_SIZE = sizeof(struct blake2b_state),
BLAKE2B_160_HASH_SIZE = 20,
BLAKE2B_256_HASH_SIZE = 32,
@@ -18,16 +28,6 @@ enum blake2b_lengths {
BLAKE2B_512_HASH_SIZE = 64,
};
-struct blake2b_state {
- /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */
- u64 h[8];
- u64 t[2];
- u64 f[2];
- u8 buf[BLAKE2B_BLOCK_SIZE];
- unsigned int buflen;
- unsigned int outlen;
-};
-
enum blake2b_iv {
BLAKE2B_IV0 = 0x6A09E667F3BCC908ULL,
BLAKE2B_IV1 = 0xBB67AE8584CAA73BULL,
@@ -40,7 +40,7 @@ enum blake2b_iv {
};
static inline void __blake2b_init(struct blake2b_state *state, size_t outlen,
- const void *key, size_t keylen)
+ size_t keylen)
{
state->h[0] = BLAKE2B_IV0 ^ (0x01010000 | keylen << 8 | outlen);
state->h[1] = BLAKE2B_IV1;
@@ -52,15 +52,6 @@ static inline void __blake2b_init(struct blake2b_state *state, size_t outlen,
state->h[7] = BLAKE2B_IV7;
state->t[0] = 0;
state->t[1] = 0;
- state->f[0] = 0;
- state->f[1] = 0;
- state->buflen = 0;
- state->outlen = outlen;
- if (keylen) {
- memcpy(state->buf, key, keylen);
- memset(&state->buf[keylen], 0, BLAKE2B_BLOCK_SIZE - keylen);
- state->buflen = BLAKE2B_BLOCK_SIZE;
- }
}
#endif /* _CRYPTO_BLAKE2B_H */
diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
index f8cc073bba41..91f6b4cf561c 100644
--- a/include/crypto/chacha.h
+++ b/include/crypto/chacha.h
@@ -16,6 +16,7 @@
#define _CRYPTO_CHACHA_H
#include <linux/unaligned.h>
+#include <linux/string.h>
#include <linux/types.h>
/* 32-bit stream position, then 96-bit nonce (RFC7539 convention) */
@@ -25,21 +26,32 @@
#define CHACHA_BLOCK_SIZE 64
#define CHACHAPOLY_IV_SIZE 12
-#define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32))
+#define CHACHA_KEY_WORDS 8
+#define CHACHA_STATE_WORDS 16
+#define HCHACHA_OUT_WORDS 8
/* 192-bit nonce, then 64-bit stream position */
#define XCHACHA_IV_SIZE 32
-void chacha_block_generic(u32 *state, u8 *stream, int nrounds);
-static inline void chacha20_block(u32 *state, u8 *stream)
+struct chacha_state {
+ u32 x[CHACHA_STATE_WORDS];
+};
+
+void chacha_block_generic(struct chacha_state *state,
+ u8 out[CHACHA_BLOCK_SIZE], int nrounds);
+static inline void chacha20_block(struct chacha_state *state,
+ u8 out[CHACHA_BLOCK_SIZE])
{
- chacha_block_generic(state, stream, 20);
+ chacha_block_generic(state, out, 20);
}
-void hchacha_block_arch(const u32 *state, u32 *out, int nrounds);
-void hchacha_block_generic(const u32 *state, u32 *out, int nrounds);
+void hchacha_block_arch(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds);
+void hchacha_block_generic(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds);
-static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
+static inline void hchacha_block(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds)
{
if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
hchacha_block_arch(state, out, nrounds);
@@ -54,37 +66,40 @@ enum chacha_constants { /* expand 32-byte k */
CHACHA_CONSTANT_TE_K = 0x6b206574U
};
-static inline void chacha_init_consts(u32 *state)
+static inline void chacha_init_consts(struct chacha_state *state)
{
- state[0] = CHACHA_CONSTANT_EXPA;
- state[1] = CHACHA_CONSTANT_ND_3;
- state[2] = CHACHA_CONSTANT_2_BY;
- state[3] = CHACHA_CONSTANT_TE_K;
+ state->x[0] = CHACHA_CONSTANT_EXPA;
+ state->x[1] = CHACHA_CONSTANT_ND_3;
+ state->x[2] = CHACHA_CONSTANT_2_BY;
+ state->x[3] = CHACHA_CONSTANT_TE_K;
}
-static inline void chacha_init(u32 *state, const u32 *key, const u8 *iv)
+static inline void chacha_init(struct chacha_state *state,
+ const u32 key[CHACHA_KEY_WORDS],
+ const u8 iv[CHACHA_IV_SIZE])
{
chacha_init_consts(state);
- state[4] = key[0];
- state[5] = key[1];
- state[6] = key[2];
- state[7] = key[3];
- state[8] = key[4];
- state[9] = key[5];
- state[10] = key[6];
- state[11] = key[7];
- state[12] = get_unaligned_le32(iv + 0);
- state[13] = get_unaligned_le32(iv + 4);
- state[14] = get_unaligned_le32(iv + 8);
- state[15] = get_unaligned_le32(iv + 12);
+ state->x[4] = key[0];
+ state->x[5] = key[1];
+ state->x[6] = key[2];
+ state->x[7] = key[3];
+ state->x[8] = key[4];
+ state->x[9] = key[5];
+ state->x[10] = key[6];
+ state->x[11] = key[7];
+ state->x[12] = get_unaligned_le32(iv + 0);
+ state->x[13] = get_unaligned_le32(iv + 4);
+ state->x[14] = get_unaligned_le32(iv + 8);
+ state->x[15] = get_unaligned_le32(iv + 12);
}
-void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
+void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds);
-void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src,
+void chacha_crypt_generic(struct chacha_state *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds);
-static inline void chacha_crypt(u32 *state, u8 *dst, const u8 *src,
+static inline void chacha_crypt(struct chacha_state *state,
+ u8 *dst, const u8 *src,
unsigned int bytes, int nrounds)
{
if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
@@ -93,10 +108,24 @@ static inline void chacha_crypt(u32 *state, u8 *dst, const u8 *src,
chacha_crypt_generic(state, dst, src, bytes, nrounds);
}
-static inline void chacha20_crypt(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes)
+static inline void chacha20_crypt(struct chacha_state *state,
+ u8 *dst, const u8 *src, unsigned int bytes)
{
chacha_crypt(state, dst, src, bytes, 20);
}
+static inline void chacha_zeroize_state(struct chacha_state *state)
+{
+ memzero_explicit(state, sizeof(*state));
+}
+
+#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA)
+bool chacha_is_arch_optimized(void);
+#else
+static inline bool chacha_is_arch_optimized(void)
+{
+ return false;
+}
+#endif
+
#endif /* _CRYPTO_CHACHA_H */
diff --git a/include/crypto/ctr.h b/include/crypto/ctr.h
index da1ee73e9ce9..06984a26c8cf 100644
--- a/include/crypto/ctr.h
+++ b/include/crypto/ctr.h
@@ -8,58 +8,8 @@
#ifndef _CRYPTO_CTR_H
#define _CRYPTO_CTR_H
-#include <crypto/algapi.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
#define CTR_RFC3686_NONCE_SIZE 4
#define CTR_RFC3686_IV_SIZE 8
#define CTR_RFC3686_BLOCK_SIZE 16
-static inline int crypto_ctr_encrypt_walk(struct skcipher_request *req,
- void (*fn)(struct crypto_skcipher *,
- const u8 *, u8 *))
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- int blocksize = crypto_skcipher_chunksize(tfm);
- u8 buf[MAX_CIPHER_BLOCKSIZE];
- struct skcipher_walk walk;
- int err;
-
- /* avoid integer division due to variable blocksize parameter */
- if (WARN_ON_ONCE(!is_power_of_2(blocksize)))
- return -EINVAL;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while (walk.nbytes > 0) {
- const u8 *src = walk.src.virt.addr;
- u8 *dst = walk.dst.virt.addr;
- int nbytes = walk.nbytes;
- int tail = 0;
-
- if (nbytes < walk.total) {
- tail = walk.nbytes & (blocksize - 1);
- nbytes -= tail;
- }
-
- do {
- int bsize = min(nbytes, blocksize);
-
- fn(tfm, walk.iv, buf);
-
- crypto_xor_cpy(dst, src, buf, bsize);
- crypto_inc(walk.iv, blocksize);
-
- dst += bsize;
- src += bsize;
- nbytes -= bsize;
- } while (nbytes > 0);
-
- err = skcipher_walk_done(&walk, tail);
- }
- return err;
-}
-
#endif /* _CRYPTO_CTR_H */
diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h
index f832c9f2aca3..043d938e9a2c 100644
--- a/include/crypto/ghash.h
+++ b/include/crypto/ghash.h
@@ -7,18 +7,18 @@
#define __CRYPTO_GHASH_H__
#include <linux/types.h>
-#include <crypto/gf128mul.h>
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
+struct gf128mul_4k;
+
struct ghash_ctx {
struct gf128mul_4k *gf128;
};
struct ghash_desc_ctx {
u8 buffer[GHASH_BLOCK_SIZE];
- u32 bytes;
};
#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 2aa83ee0ec98..6f6b9de12cd3 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -8,13 +8,17 @@
#ifndef _CRYPTO_HASH_H
#define _CRYPTO_HASH_H
-#include <linux/atomic.h>
#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
#include <linux/string.h>
/* Set this bit for virtual address instead of SG list. */
#define CRYPTO_AHASH_REQ_VIRT 0x00000001
+#define CRYPTO_AHASH_REQ_PRIVATE \
+ CRYPTO_AHASH_REQ_VIRT
+
struct crypto_ahash;
/**
@@ -61,6 +65,10 @@ struct ahash_request {
};
u8 *result;
+ struct scatterlist sg_head[2];
+ crypto_completion_t saved_complete;
+ void *saved_data;
+
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
@@ -81,6 +89,8 @@ struct ahash_request {
* transformation object. Data processing can happen synchronously
* [SHASH] or asynchronously [AHASH] at this point. Driver must not use
* req->result.
+ * For block-only algorithms, @update must return the number
+ * of bytes to store in the API partial block buffer.
* @final: **[mandatory]** Retrieve result from the driver. This function finalizes the
* transformation and retrieves the resulting hash from the driver and
* pushes it back to upper layers. No data processing happens at this
@@ -123,6 +133,10 @@ struct ahash_request {
* data so the transformation can continue from this point onward. No
* data processing happens at this point. Driver must not use
* req->result.
+ * @export_core: Export partial state without partial block. Only defined
+ * for algorithms that are not block-only.
+ * @import_core: Import partial state without partial block. Only defined
+ * for algorithms that are not block-only.
* @init_tfm: Initialize the cryptographic transformation object.
* This function is called only once at the instantiation
* time, right after the transformation context was
@@ -135,7 +149,6 @@ struct ahash_request {
* This is a counterpart to @init_tfm, used to remove
* various changes set in @init_tfm.
* @clone_tfm: Copy transform into new object, may allocate memory.
- * @reqsize: Size of the request context.
* @halg: see struct hash_alg_common
*/
struct ahash_alg {
@@ -146,14 +159,14 @@ struct ahash_alg {
int (*digest)(struct ahash_request *req);
int (*export)(struct ahash_request *req, void *out);
int (*import)(struct ahash_request *req, const void *in);
+ int (*export_core)(struct ahash_request *req, void *out);
+ int (*import_core)(struct ahash_request *req, const void *in);
int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
int (*init_tfm)(struct crypto_ahash *tfm);
void (*exit_tfm)(struct crypto_ahash *tfm);
int (*clone_tfm)(struct crypto_ahash *dst, struct crypto_ahash *src);
- unsigned int reqsize;
-
struct hash_alg_common halg;
};
@@ -164,17 +177,31 @@ struct shash_desc {
#define HASH_MAX_DIGESTSIZE 64
+/* Worst case is sha3-224. */
+#define HASH_MAX_STATESIZE 200 + 144 + 1
+
/*
- * Worst case is hmac(sha3-224-generic). Its context is a nested 'shash_desc'
- * containing a 'struct sha3_state'.
+ * Worst case is hmac(sha3-224-s390). Its context is a nested 'shash_desc'
+ * containing a 'struct s390_sha_ctx'.
*/
#define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360)
+#define MAX_SYNC_HASH_REQSIZE (sizeof(struct ahash_request) + \
+ HASH_MAX_DESCSIZE)
#define SHASH_DESC_ON_STACK(shash, ctx) \
char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \
__aligned(__alignof__(struct shash_desc)); \
struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
+#define HASH_REQUEST_ON_STACK(name, _tfm) \
+ char __##name##_req[sizeof(struct ahash_request) + \
+ MAX_SYNC_HASH_REQSIZE] CRYPTO_MINALIGN_ATTR; \
+ struct ahash_request *name = \
+ ahash_request_on_stack_init(__##name##_req, (_tfm))
+
+#define HASH_REQUEST_CLONE(name, gfp) \
+ hash_request_clone(name, sizeof(__##name##_req), gfp)
+
/**
* struct shash_alg - synchronous message digest definition
* @init: see struct ahash_alg
@@ -184,6 +211,8 @@ struct shash_desc {
* @digest: see struct ahash_alg
* @export: see struct ahash_alg
* @import: see struct ahash_alg
+ * @export_core: see struct ahash_alg
+ * @import_core: see struct ahash_alg
* @setkey: see struct ahash_alg
* @init_tfm: Initialize the cryptographic transformation object.
* This function is called only once at the instantiation
@@ -214,6 +243,8 @@ struct shash_alg {
unsigned int len, u8 *out);
int (*export)(struct shash_desc *desc, void *out);
int (*import)(struct shash_desc *desc, const void *in);
+ int (*export_core)(struct shash_desc *desc, void *out);
+ int (*import_core)(struct shash_desc *desc, const void *in);
int (*setkey)(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
int (*init_tfm)(struct crypto_shash *tfm);
@@ -237,7 +268,6 @@ struct crypto_ahash {
};
struct crypto_shash {
- unsigned int descsize;
struct crypto_tfm base;
};
@@ -251,6 +281,11 @@ struct crypto_shash {
* CRYPTO_ALG_TYPE_SKCIPHER API applies here as well.
*/
+static inline bool ahash_req_on_stack(struct ahash_request *req)
+{
+ return crypto_req_on_stack(&req->base);
+}
+
static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_ahash, base);
@@ -458,7 +493,11 @@ int crypto_ahash_finup(struct ahash_request *req);
* -EBUSY if queue is full and request should be resubmitted later;
* other < 0 if an error occurred
*/
-int crypto_ahash_final(struct ahash_request *req);
+static inline int crypto_ahash_final(struct ahash_request *req)
+{
+ req->nbytes = 0;
+ return crypto_ahash_finup(req);
+}
/**
* crypto_ahash_digest() - calculate message digest for a buffer
@@ -547,7 +586,7 @@ int crypto_ahash_update(struct ahash_request *req);
static inline void ahash_request_set_tfm(struct ahash_request *req,
struct crypto_ahash *tfm)
{
- req->base.tfm = crypto_ahash_tfm(tfm);
+ crypto_request_set_tfm(&req->base, crypto_ahash_tfm(tfm));
}
/**
@@ -583,6 +622,12 @@ static inline struct ahash_request *ahash_request_alloc_noprof(
*/
void ahash_request_free(struct ahash_request *req);
+static inline void ahash_request_zero(struct ahash_request *req)
+{
+ memzero_explicit(req, sizeof(*req) +
+ crypto_ahash_reqsize(crypto_ahash_reqtfm(req)));
+}
+
static inline struct ahash_request *ahash_request_cast(
struct crypto_async_request *req)
{
@@ -619,14 +664,9 @@ static inline void ahash_request_set_callback(struct ahash_request *req,
crypto_completion_t compl,
void *data)
{
- u32 keep = CRYPTO_AHASH_REQ_VIRT;
-
- req->base.complete = compl;
- req->base.data = data;
- flags &= ~keep;
- req->base.flags &= keep;
- req->base.flags |= flags;
- crypto_reqchain_init(&req->base);
+ flags &= ~CRYPTO_AHASH_REQ_PRIVATE;
+ flags |= req->base.flags & CRYPTO_AHASH_REQ_PRIVATE;
+ crypto_request_set_callback(&req->base, flags, compl, data);
}
/**
@@ -675,12 +715,6 @@ static inline void ahash_request_set_virt(struct ahash_request *req,
req->base.flags |= CRYPTO_AHASH_REQ_VIRT;
}
-static inline void ahash_request_chain(struct ahash_request *req,
- struct ahash_request *head)
-{
- crypto_request_chain(&req->base, &head->base);
-}
-
/**
* DOC: Synchronous Message Digest API
*
@@ -816,7 +850,7 @@ static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags)
*/
static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm)
{
- return tfm->descsize;
+ return crypto_shash_alg(tfm)->descsize;
}
static inline void *shash_desc_ctx(struct shash_desc *desc)
@@ -834,7 +868,7 @@ static inline void *shash_desc_ctx(struct shash_desc *desc)
* cipher handle must point to a keyed message digest cipher in order for this
* function to succeed.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
@@ -851,7 +885,7 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
* crypto_shash_update and crypto_shash_final. The parameters have the same
* meaning as discussed for those separate three functions.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
@@ -871,12 +905,15 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
* directly, and it allocates a hash descriptor on the stack internally.
* Note that this stack allocation may be fairly large.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 on success; < 0 if an error occurred.
*/
int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
unsigned int len, u8 *out);
+int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data,
+ unsigned int len, u8 *out);
+
/**
* crypto_shash_export() - extract operational state for message digest
* @desc: reference to the operational state handle whose state is exported
@@ -886,7 +923,7 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
* caller-allocated output buffer out which must have sufficient size (e.g. by
* calling crypto_shash_descsize).
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the export creation was successful; < 0 if an error occurred
*/
int crypto_shash_export(struct shash_desc *desc, void *out);
@@ -900,7 +937,7 @@ int crypto_shash_export(struct shash_desc *desc, void *out);
* the input buffer. That buffer should have been generated with the
* crypto_ahash_export function.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the import was successful; < 0 if an error occurred
*/
int crypto_shash_import(struct shash_desc *desc, const void *in);
@@ -913,19 +950,29 @@ int crypto_shash_import(struct shash_desc *desc, const void *in);
* operational state handle. Any potentially existing state created by
* previous operations is discarded.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest initialization was successful; < 0 if an
* error occurred
*/
-static inline int crypto_shash_init(struct shash_desc *desc)
-{
- struct crypto_shash *tfm = desc->tfm;
+int crypto_shash_init(struct shash_desc *desc);
- if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return crypto_shash_alg(tfm)->init(desc);
-}
+/**
+ * crypto_shash_finup() - calculate message digest of buffer
+ * @desc: see crypto_shash_final()
+ * @data: see crypto_shash_update()
+ * @len: see crypto_shash_update()
+ * @out: see crypto_shash_final()
+ *
+ * This function is a "short-hand" for the function calls of
+ * crypto_shash_update and crypto_shash_final. The parameters have the same
+ * meaning as discussed for those separate functions.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
+int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out);
/**
* crypto_shash_update() - add data to message digest for processing
@@ -935,12 +982,15 @@ static inline int crypto_shash_init(struct shash_desc *desc)
*
* Updates the message digest state of the operational state handle.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest update was successful; < 0 if an error
* occurred
*/
-int crypto_shash_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
+static inline int crypto_shash_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return crypto_shash_finup(desc, data, len, NULL);
+}
/**
* crypto_shash_final() - calculate message digest
@@ -952,29 +1002,14 @@ int crypto_shash_update(struct shash_desc *desc, const u8 *data,
* into the output buffer. The caller must ensure that the output buffer is
* large enough by using crypto_shash_digestsize.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
-int crypto_shash_final(struct shash_desc *desc, u8 *out);
-
-/**
- * crypto_shash_finup() - calculate message digest of buffer
- * @desc: see crypto_shash_final()
- * @data: see crypto_shash_update()
- * @len: see crypto_shash_update()
- * @out: see crypto_shash_final()
- *
- * This function is a "short-hand" for the function calls of
- * crypto_shash_update and crypto_shash_final. The parameters have the same
- * meaning as discussed for those separate functions.
- *
- * Context: Any context.
- * Return: 0 if the message digest creation was successful; < 0 if an error
- * occurred
- */
-int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out);
+static inline int crypto_shash_final(struct shash_desc *desc, u8 *out)
+{
+ return crypto_shash_finup(desc, NULL, 0, out);
+}
static inline void shash_desc_zero(struct shash_desc *desc)
{
@@ -982,14 +1017,25 @@ static inline void shash_desc_zero(struct shash_desc *desc)
sizeof(*desc) + crypto_shash_descsize(desc->tfm));
}
-static inline int ahash_request_err(struct ahash_request *req)
+static inline bool ahash_is_async(struct crypto_ahash *tfm)
{
- return req->base.err;
+ return crypto_tfm_is_async(&tfm->base);
}
-static inline bool ahash_is_async(struct crypto_ahash *tfm)
+static inline struct ahash_request *ahash_request_on_stack_init(
+ char *buf, struct crypto_ahash *tfm)
{
- return crypto_tfm_is_async(&tfm->base);
+ struct ahash_request *req = (void *)buf;
+
+ crypto_stack_request_init(&req->base, crypto_ahash_tfm(tfm));
+ return req;
+}
+
+static inline struct ahash_request *ahash_request_clone(
+ struct ahash_request *req, size_t total, gfp_t gfp)
+{
+ return container_of(crypto_request_clone(&req->base, total, gfp),
+ struct ahash_request, base);
}
#endif /* _CRYPTO_HASH_H */
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index aaf59f3236fa..ffffd88bbbad 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -11,12 +11,17 @@
#include <crypto/acompress.h>
#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <linux/compiler_types.h>
+#include <linux/cpumask_types.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue_types.h>
-#define ACOMP_REQUEST_ON_STACK(name, tfm) \
+#define ACOMP_FBREQ_ON_STACK(name, req) \
char __##name##_req[sizeof(struct acomp_req) + \
MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
- struct acomp_req *name = acomp_request_on_stack_init( \
- __##name##_req, (tfm), 0, true)
+ struct acomp_req *name = acomp_fbreq_on_stack_init( \
+ __##name##_req, (req))
/**
* struct acomp_alg - asynchronous compression algorithm
@@ -35,9 +40,7 @@
* counterpart to @init, used to remove various changes set in
* @init.
*
- * @reqsize: Context size for (de)compression requests
* @base: Common crypto API algorithm data structure
- * @stream: Per-cpu memory for algorithm
* @calg: Cmonn algorithm data structure shared with scomp
*/
struct acomp_alg {
@@ -46,14 +49,61 @@ struct acomp_alg {
int (*init)(struct crypto_acomp *tfm);
void (*exit)(struct crypto_acomp *tfm);
- unsigned int reqsize;
-
union {
struct COMP_ALG_COMMON;
struct comp_alg_common calg;
};
};
+struct crypto_acomp_stream {
+ spinlock_t lock;
+ void *ctx;
+};
+
+struct crypto_acomp_streams {
+ /* These must come first because of struct scomp_alg. */
+ void *(*alloc_ctx)(void);
+ union {
+ void (*free_ctx)(void *);
+ void (*cfree_ctx)(const void *);
+ };
+
+ struct crypto_acomp_stream __percpu *streams;
+ struct work_struct stream_work;
+ cpumask_t stream_want;
+};
+
+struct acomp_walk {
+ union {
+ /* Virtual address of the source. */
+ struct {
+ struct {
+ const void *const addr;
+ } virt;
+ } src;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk in;
+ };
+
+ union {
+ /* Virtual address of the destination. */
+ struct {
+ struct {
+ void *const addr;
+ } virt;
+ } dst;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk out;
+ };
+
+ unsigned int slen;
+ unsigned int dlen;
+
+ int flags;
+};
+
/*
* Transform internal helpers.
*/
@@ -98,17 +148,10 @@ void crypto_unregister_acomp(struct acomp_alg *alg);
int crypto_register_acomps(struct acomp_alg *algs, int count);
void crypto_unregister_acomps(struct acomp_alg *algs, int count);
-static inline bool acomp_request_chained(struct acomp_req *req)
-{
- return crypto_request_chained(&req->base);
-}
-
static inline bool acomp_request_issg(struct acomp_req *req)
{
return !(req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
- CRYPTO_ACOMP_REQ_DST_VIRT |
- CRYPTO_ACOMP_REQ_SRC_FOLIO |
- CRYPTO_ACOMP_REQ_DST_FOLIO));
+ CRYPTO_ACOMP_REQ_DST_VIRT));
}
static inline bool acomp_request_src_isvirt(struct acomp_req *req)
@@ -143,19 +186,62 @@ static inline bool acomp_request_isnondma(struct acomp_req *req)
CRYPTO_ACOMP_REQ_DST_NONDMA);
}
-static inline bool acomp_request_src_isfolio(struct acomp_req *req)
+static inline bool crypto_acomp_req_virt(struct crypto_acomp *tfm)
+{
+ return crypto_tfm_req_virt(&tfm->base);
+}
+
+void crypto_acomp_free_streams(struct crypto_acomp_streams *s);
+int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s);
+
+struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
+ struct crypto_acomp_streams *s) __acquires(stream);
+
+static inline void crypto_acomp_unlock_stream_bh(
+ struct crypto_acomp_stream *stream) __releases(stream)
+{
+ spin_unlock_bh(&stream->lock);
+}
+
+void acomp_walk_done_src(struct acomp_walk *walk, int used);
+void acomp_walk_done_dst(struct acomp_walk *walk, int used);
+int acomp_walk_next_src(struct acomp_walk *walk);
+int acomp_walk_next_dst(struct acomp_walk *walk);
+int acomp_walk_virt(struct acomp_walk *__restrict walk,
+ struct acomp_req *__restrict req, bool atomic);
+
+static inline bool acomp_walk_more_src(const struct acomp_walk *walk, int cur)
+{
+ return walk->slen != cur;
+}
+
+static inline u32 acomp_request_flags(struct acomp_req *req)
{
- return req->base.flags & CRYPTO_ACOMP_REQ_SRC_FOLIO;
+ return crypto_request_flags(&req->base) & ~CRYPTO_ACOMP_REQ_PRIVATE;
}
-static inline bool acomp_request_dst_isfolio(struct acomp_req *req)
+static inline struct crypto_acomp *crypto_acomp_fb(struct crypto_acomp *tfm)
{
- return req->base.flags & CRYPTO_ACOMP_REQ_DST_FOLIO;
+ return __crypto_acomp_tfm(crypto_acomp_tfm(tfm)->fb);
}
-static inline bool crypto_acomp_req_chain(struct crypto_acomp *tfm)
+static inline struct acomp_req *acomp_fbreq_on_stack_init(
+ char *buf, struct acomp_req *old)
{
- return crypto_tfm_req_chain(&tfm->base);
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(old);
+ struct acomp_req *req = (void *)buf;
+
+ crypto_stack_request_init(&req->base,
+ crypto_acomp_tfm(crypto_acomp_fb(tfm)));
+ acomp_request_set_callback(req, acomp_request_flags(old), NULL, NULL);
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_PRIVATE;
+ req->base.flags |= old->base.flags & CRYPTO_ACOMP_REQ_PRIVATE;
+ req->src = old->src;
+ req->dst = old->dst;
+ req->slen = old->slen;
+ req->dlen = old->dlen;
+
+ return req;
}
#endif
diff --git a/include/crypto/internal/blake2b.h b/include/crypto/internal/blake2b.h
index 982fe5e8471c..3e09e2485306 100644
--- a/include/crypto/internal/blake2b.h
+++ b/include/crypto/internal/blake2b.h
@@ -7,65 +7,36 @@
#ifndef _CRYPTO_INTERNAL_BLAKE2B_H
#define _CRYPTO_INTERNAL_BLAKE2B_H
+#include <asm/byteorder.h>
#include <crypto/blake2b.h>
#include <crypto/internal/hash.h>
+#include <linux/array_size.h>
+#include <linux/compiler.h>
+#include <linux/build_bug.h>
+#include <linux/errno.h>
+#include <linux/math.h>
#include <linux/string.h>
-
-void blake2b_compress_generic(struct blake2b_state *state,
- const u8 *block, size_t nblocks, u32 inc);
+#include <linux/types.h>
static inline void blake2b_set_lastblock(struct blake2b_state *state)
{
state->f[0] = -1;
+ state->f[1] = 0;
}
-typedef void (*blake2b_compress_t)(struct blake2b_state *state,
- const u8 *block, size_t nblocks, u32 inc);
-
-static inline void __blake2b_update(struct blake2b_state *state,
- const u8 *in, size_t inlen,
- blake2b_compress_t compress)
+static inline void blake2b_set_nonlast(struct blake2b_state *state)
{
- const size_t fill = BLAKE2B_BLOCK_SIZE - state->buflen;
-
- if (unlikely(!inlen))
- return;
- if (inlen > fill) {
- memcpy(state->buf + state->buflen, in, fill);
- (*compress)(state, state->buf, 1, BLAKE2B_BLOCK_SIZE);
- state->buflen = 0;
- in += fill;
- inlen -= fill;
- }
- if (inlen > BLAKE2B_BLOCK_SIZE) {
- const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2B_BLOCK_SIZE);
- /* Hash one less (full) block than strictly possible */
- (*compress)(state, in, nblocks - 1, BLAKE2B_BLOCK_SIZE);
- in += BLAKE2B_BLOCK_SIZE * (nblocks - 1);
- inlen -= BLAKE2B_BLOCK_SIZE * (nblocks - 1);
- }
- memcpy(state->buf + state->buflen, in, inlen);
- state->buflen += inlen;
+ state->f[0] = 0;
+ state->f[1] = 0;
}
-static inline void __blake2b_final(struct blake2b_state *state, u8 *out,
- blake2b_compress_t compress)
-{
- int i;
-
- blake2b_set_lastblock(state);
- memset(state->buf + state->buflen, 0,
- BLAKE2B_BLOCK_SIZE - state->buflen); /* Padding */
- (*compress)(state, state->buf, 1, state->buflen);
- for (i = 0; i < ARRAY_SIZE(state->h); i++)
- __cpu_to_le64s(&state->h[i]);
- memcpy(out, state->h, state->outlen);
-}
+typedef void (*blake2b_compress_t)(struct blake2b_state *state,
+ const u8 *block, size_t nblocks, u32 inc);
/* Helper functions for shash implementations of BLAKE2b */
struct blake2b_tfm_ctx {
- u8 key[BLAKE2B_KEY_SIZE];
+ u8 key[BLAKE2B_BLOCK_SIZE];
unsigned int keylen;
};
@@ -74,10 +45,13 @@ static inline int crypto_blake2b_setkey(struct crypto_shash *tfm,
{
struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm);
- if (keylen == 0 || keylen > BLAKE2B_KEY_SIZE)
+ if (keylen > BLAKE2B_KEY_SIZE)
return -EINVAL;
+ BUILD_BUG_ON(BLAKE2B_KEY_SIZE > BLAKE2B_BLOCK_SIZE);
+
memcpy(tctx->key, key, keylen);
+ memset(tctx->key + keylen, 0, BLAKE2B_BLOCK_SIZE - keylen);
tctx->keylen = keylen;
return 0;
@@ -89,26 +63,38 @@ static inline int crypto_blake2b_init(struct shash_desc *desc)
struct blake2b_state *state = shash_desc_ctx(desc);
unsigned int outlen = crypto_shash_digestsize(desc->tfm);
- __blake2b_init(state, outlen, tctx->key, tctx->keylen);
- return 0;
+ __blake2b_init(state, outlen, tctx->keylen);
+ return tctx->keylen ?
+ crypto_shash_update(desc, tctx->key, BLAKE2B_BLOCK_SIZE) : 0;
}
-static inline int crypto_blake2b_update(struct shash_desc *desc,
- const u8 *in, unsigned int inlen,
- blake2b_compress_t compress)
+static inline int crypto_blake2b_update_bo(struct shash_desc *desc,
+ const u8 *in, unsigned int inlen,
+ blake2b_compress_t compress)
{
struct blake2b_state *state = shash_desc_ctx(desc);
- __blake2b_update(state, in, inlen, compress);
- return 0;
+ blake2b_set_nonlast(state);
+ compress(state, in, inlen / BLAKE2B_BLOCK_SIZE, BLAKE2B_BLOCK_SIZE);
+ return inlen - round_down(inlen, BLAKE2B_BLOCK_SIZE);
}
-static inline int crypto_blake2b_final(struct shash_desc *desc, u8 *out,
+static inline int crypto_blake2b_finup(struct shash_desc *desc, const u8 *in,
+ unsigned int inlen, u8 *out,
blake2b_compress_t compress)
{
struct blake2b_state *state = shash_desc_ctx(desc);
+ u8 buf[BLAKE2B_BLOCK_SIZE];
+ int i;
- __blake2b_final(state, out, compress);
+ memcpy(buf, in, inlen);
+ memset(buf + inlen, 0, BLAKE2B_BLOCK_SIZE - inlen);
+ blake2b_set_lastblock(state);
+ compress(state, buf, 1, inlen);
+ for (i = 0; i < ARRAY_SIZE(state->h); i++)
+ __cpu_to_le64s(&state->h[i]);
+ memcpy(out, state->h, crypto_shash_digestsize(desc->tfm));
+ memzero_explicit(buf, sizeof(buf));
return 0;
}
diff --git a/include/crypto/internal/blockhash.h b/include/crypto/internal/blockhash.h
new file mode 100644
index 000000000000..52d9d4c82493
--- /dev/null
+++ b/include/crypto/internal/blockhash.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Handle partial blocks for block hash.
+ *
+ * Copyright (c) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ * Copyright (c) 2025 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#ifndef _CRYPTO_INTERNAL_BLOCKHASH_H
+#define _CRYPTO_INTERNAL_BLOCKHASH_H
+
+#include <linux/string.h>
+#include <linux/types.h>
+
+#define BLOCK_HASH_UPDATE_BASE(block_fn, state, src, nbytes, bs, dv, \
+ buf, buflen) \
+ ({ \
+ typeof(block_fn) *_block_fn = &(block_fn); \
+ typeof(state + 0) _state = (state); \
+ unsigned int _buflen = (buflen); \
+ size_t _nbytes = (nbytes); \
+ unsigned int _bs = (bs); \
+ const u8 *_src = (src); \
+ u8 *_buf = (buf); \
+ while ((_buflen + _nbytes) >= _bs) { \
+ const u8 *data = _src; \
+ size_t len = _nbytes; \
+ size_t blocks; \
+ int remain; \
+ if (_buflen) { \
+ remain = _bs - _buflen; \
+ memcpy(_buf + _buflen, _src, remain); \
+ data = _buf; \
+ len = _bs; \
+ } \
+ remain = len % bs; \
+ blocks = (len - remain) / (dv); \
+ (*_block_fn)(_state, data, blocks); \
+ _src += len - remain - _buflen; \
+ _nbytes -= len - remain - _buflen; \
+ _buflen = 0; \
+ } \
+ memcpy(_buf + _buflen, _src, _nbytes); \
+ _buflen += _nbytes; \
+ })
+
+#define BLOCK_HASH_UPDATE(block, state, src, nbytes, bs, buf, buflen) \
+ BLOCK_HASH_UPDATE_BASE(block, state, src, nbytes, bs, 1, buf, buflen)
+#define BLOCK_HASH_UPDATE_BLOCKS(block, state, src, nbytes, bs, buf, buflen) \
+ BLOCK_HASH_UPDATE_BASE(block, state, src, nbytes, bs, bs, buf, buflen)
+
+#endif /* _CRYPTO_INTERNAL_BLOCKHASH_H */
diff --git a/include/crypto/internal/chacha.h b/include/crypto/internal/chacha.h
deleted file mode 100644
index b085dc1ac151..000000000000
--- a/include/crypto/internal/chacha.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef _CRYPTO_INTERNAL_CHACHA_H
-#define _CRYPTO_INTERNAL_CHACHA_H
-
-#include <crypto/chacha.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/crypto.h>
-
-struct chacha_ctx {
- u32 key[8];
- int nrounds;
-};
-
-static inline int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize, int nrounds)
-{
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- int i;
-
- if (keysize != CHACHA_KEY_SIZE)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
- ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
-
- ctx->nrounds = nrounds;
- return 0;
-}
-
-static inline int chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize)
-{
- return chacha_setkey(tfm, key, keysize, 20);
-}
-
-static inline int chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize)
-{
- return chacha_setkey(tfm, key, keysize, 12);
-}
-
-#endif /* _CRYPTO_CHACHA_H */
diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h
index fbf4be56cf12..b6a4ea2240fc 100644
--- a/include/crypto/internal/engine.h
+++ b/include/crypto/internal/engine.h
@@ -27,10 +27,10 @@ struct device;
* @retry_support: indication that the hardware allows re-execution
* of a failed backlog request
* crypto-engine, in head position to keep order
+ * @rt: whether this queue is set to run as a realtime task
* @list: link with the global crypto engine list
* @queue_lock: spinlock to synchronise access to request queue
* @queue: the crypto queue of the engine
- * @rt: whether this queue is set to run as a realtime task
* @prepare_crypt_hardware: a request will soon arrive from the queue
* so the subsystem requests the driver to prepare the hardware
* by issuing this call
@@ -51,14 +51,13 @@ struct crypto_engine {
bool running;
bool retry_support;
+ bool rt;
struct list_head list;
spinlock_t queue_lock;
struct crypto_queue queue;
struct device *dev;
- bool rt;
-
int (*prepare_crypt_hardware)(struct crypto_engine *engine);
int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
int (*do_batch_requests)(struct crypto_engine *engine);
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 7fd7126f593a..012f5fb22d43 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -15,7 +15,6 @@
struct aead_geniv_ctx {
spinlock_t lock;
struct crypto_aead *child;
- struct crypto_sync_skcipher *sknull;
u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
};
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 485e22cf517e..0f85c543f80b 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -11,6 +11,24 @@
#include <crypto/algapi.h>
#include <crypto/hash.h>
+/* Set this bit to handle partial blocks in the API. */
+#define CRYPTO_AHASH_ALG_BLOCK_ONLY 0x01000000
+
+/* Set this bit if final requires at least one byte. */
+#define CRYPTO_AHASH_ALG_FINAL_NONZERO 0x02000000
+
+/* Set this bit if finup can deal with multiple blocks. */
+#define CRYPTO_AHASH_ALG_FINUP_MAX 0x04000000
+
+/* This bit is set by the Crypto API if export_core is not supported. */
+#define CRYPTO_AHASH_ALG_NO_EXPORT_CORE 0x08000000
+
+#define HASH_FBREQ_ON_STACK(name, req) \
+ char __##name##_req[sizeof(struct ahash_request) + \
+ MAX_SYNC_HASH_REQSIZE] CRYPTO_MINALIGN_ATTR; \
+ struct ahash_request *name = ahash_fbreq_on_stack_init( \
+ __##name##_req, (req))
+
struct ahash_request;
struct ahash_instance {
@@ -49,6 +67,7 @@ int crypto_register_ahashes(struct ahash_alg *algs, int count);
void crypto_unregister_ahashes(struct ahash_alg *algs, int count);
int ahash_register_instance(struct crypto_template *tmpl,
struct ahash_instance *inst);
+void ahash_free_singlespawn_instance(struct ahash_instance *inst);
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
@@ -58,12 +77,20 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
return alg->setkey != shash_no_setkey;
}
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
+
static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg)
{
return crypto_shash_alg_has_setkey(alg) &&
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
}
+static inline bool crypto_hash_alg_needs_key(struct hash_alg_common *alg)
+{
+ return crypto_hash_alg_has_setkey(alg) &&
+ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
+}
+
int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask);
@@ -187,7 +214,7 @@ static inline void ahash_request_complete(struct ahash_request *req, int err)
static inline u32 ahash_request_flags(struct ahash_request *req)
{
- return req->base.flags;
+ return crypto_request_flags(&req->base) & ~CRYPTO_AHASH_REQ_PRIVATE;
}
static inline struct crypto_ahash *crypto_spawn_ahash(
@@ -247,20 +274,96 @@ static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
return container_of(tfm, struct crypto_shash, base);
}
-static inline bool ahash_request_chained(struct ahash_request *req)
+static inline bool ahash_request_isvirt(struct ahash_request *req)
{
- return crypto_request_chained(&req->base);
+ return req->base.flags & CRYPTO_AHASH_REQ_VIRT;
}
-static inline bool ahash_request_isvirt(struct ahash_request *req)
+static inline bool crypto_ahash_req_virt(struct crypto_ahash *tfm)
{
- return req->base.flags & CRYPTO_AHASH_REQ_VIRT;
+ return crypto_tfm_req_virt(&tfm->base);
}
-static inline bool crypto_ahash_req_chain(struct crypto_ahash *tfm)
+static inline struct crypto_ahash *crypto_ahash_fb(struct crypto_ahash *tfm)
{
- return crypto_tfm_req_chain(&tfm->base);
+ return __crypto_ahash_cast(crypto_ahash_tfm(tfm)->fb);
}
+static inline struct ahash_request *ahash_fbreq_on_stack_init(
+ char *buf, struct ahash_request *old)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(old);
+ struct ahash_request *req = (void *)buf;
+
+ crypto_stack_request_init(&req->base,
+ crypto_ahash_tfm(crypto_ahash_fb(tfm)));
+ ahash_request_set_callback(req, ahash_request_flags(old), NULL, NULL);
+ req->base.flags &= ~CRYPTO_AHASH_REQ_PRIVATE;
+ req->base.flags |= old->base.flags & CRYPTO_AHASH_REQ_PRIVATE;
+ req->src = old->src;
+ req->result = old->result;
+ req->nbytes = old->nbytes;
+
+ return req;
+}
+
+/* Return the state size without partial block for block-only algorithms. */
+static inline unsigned int crypto_shash_coresize(struct crypto_shash *tfm)
+{
+ return crypto_shash_statesize(tfm) - crypto_shash_blocksize(tfm) - 1;
+}
+
+/* This can only be used if the request was never cloned. */
+#define HASH_REQUEST_ZERO(name) \
+ memzero_explicit(__##name##_req, sizeof(__##name##_req))
+
+/**
+ * crypto_ahash_export_core() - extract core state for message digest
+ * @req: reference to the ahash_request handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * Export the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the export creation was successful; < 0 if an error occurred
+ */
+int crypto_ahash_export_core(struct ahash_request *req, void *out);
+
+/**
+ * crypto_ahash_import_core() - import core state
+ * @req: reference to ahash_request handle the state is imported into
+ * @in: buffer holding the state
+ *
+ * Import the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
+int crypto_ahash_import_core(struct ahash_request *req, const void *in);
+
+/**
+ * crypto_shash_export_core() - extract core state for message digest
+ * @desc: reference to the operational state handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * Export the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the export creation was successful; < 0 if an error occurred
+ */
+int crypto_shash_export_core(struct shash_desc *desc, void *out);
+
+/**
+ * crypto_shash_import_core() - import core state
+ * @desc: reference to the operational state handle the state imported into
+ * @in: buffer holding the state
+ *
+ * Import the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
+int crypto_shash_import_core(struct shash_desc *desc, const void *in);
+
#endif /* _CRYPTO_INTERNAL_HASH_H */
diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
index e614594f88c1..c60315f47562 100644
--- a/include/crypto/internal/poly1305.h
+++ b/include/crypto/internal/poly1305.h
@@ -6,9 +6,8 @@
#ifndef _CRYPTO_INTERNAL_POLY1305_H
#define _CRYPTO_INTERNAL_POLY1305_H
-#include <linux/unaligned.h>
-#include <linux/types.h>
#include <crypto/poly1305.h>
+#include <linux/types.h>
/*
* Poly1305 core functions. These only accept whole blocks; the caller must
@@ -31,4 +30,29 @@ void poly1305_core_blocks(struct poly1305_state *state,
void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4],
void *dst);
+void poly1305_block_init_arch(struct poly1305_block_state *state,
+ const u8 raw_key[POLY1305_BLOCK_SIZE]);
+void poly1305_block_init_generic(struct poly1305_block_state *state,
+ const u8 raw_key[POLY1305_BLOCK_SIZE]);
+void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *src,
+ unsigned int len, u32 padbit);
+
+static inline void poly1305_blocks_generic(struct poly1305_block_state *state,
+ const u8 *src, unsigned int len,
+ u32 padbit)
+{
+ poly1305_core_blocks(&state->h, &state->core_r, src,
+ len / POLY1305_BLOCK_SIZE, padbit);
+}
+
+void poly1305_emit_arch(const struct poly1305_state *state,
+ u8 digest[POLY1305_DIGEST_SIZE], const u32 nonce[4]);
+
+static inline void poly1305_emit_generic(const struct poly1305_state *state,
+ u8 digest[POLY1305_DIGEST_SIZE],
+ const u32 nonce[4])
+{
+ poly1305_core_emit(state, nonce, digest);
+}
+
#endif
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index f25aa2ea3b48..533d6c16a491 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -9,10 +9,7 @@
#ifndef _CRYPTO_SCOMP_INT_H
#define _CRYPTO_SCOMP_INT_H
-#include <crypto/acompress.h>
-#include <crypto/algapi.h>
-
-struct acomp_req;
+#include <crypto/internal/acompress.h>
struct crypto_scomp {
struct crypto_tfm base;
@@ -26,12 +23,10 @@ struct crypto_scomp {
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
* @base: Common crypto API algorithm data structure
- * @stream: Per-cpu memory for algorithm
+ * @streams: Per-cpu memory for algorithm
* @calg: Cmonn algorithm data structure shared with acomp
*/
struct scomp_alg {
- void *(*alloc_ctx)(void);
- void (*free_ctx)(void *ctx);
int (*compress)(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx);
@@ -40,6 +35,14 @@ struct scomp_alg {
void *ctx);
union {
+ struct {
+ void *(*alloc_ctx)(void);
+ void (*free_ctx)(void *ctx);
+ };
+ struct crypto_acomp_streams streams;
+ };
+
+ union {
struct COMP_ALG_COMMON;
struct comp_alg_common calg;
};
diff --git a/include/crypto/internal/sha2.h b/include/crypto/internal/sha2.h
new file mode 100644
index 000000000000..b9bccd3ff57f
--- /dev/null
+++ b/include/crypto/internal/sha2.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _CRYPTO_INTERNAL_SHA2_H
+#define _CRYPTO_INTERNAL_SHA2_H
+
+#include <crypto/internal/simd.h>
+#include <crypto/sha2.h>
+#include <linux/compiler_attributes.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
+
+#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256)
+bool sha256_is_arch_optimized(void);
+#else
+static inline bool sha256_is_arch_optimized(void)
+{
+ return false;
+}
+#endif
+void sha256_blocks_generic(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+
+static inline void sha256_choose_blocks(
+ u32 state[SHA256_STATE_WORDS], const u8 *data, size_t nblocks,
+ bool force_generic, bool force_simd)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256) || force_generic)
+ sha256_blocks_generic(state, data, nblocks);
+ else if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD) &&
+ (force_simd || crypto_simd_usable()))
+ sha256_blocks_simd(state, data, nblocks);
+ else
+ sha256_blocks_arch(state, data, nblocks);
+}
+
+static __always_inline void sha256_finup(
+ struct crypto_sha256_state *sctx, u8 buf[SHA256_BLOCK_SIZE],
+ size_t len, u8 out[SHA256_DIGEST_SIZE], size_t digest_size,
+ bool force_generic, bool force_simd)
+{
+ const size_t bit_offset = SHA256_BLOCK_SIZE - 8;
+ __be64 *bits = (__be64 *)&buf[bit_offset];
+ int i;
+
+ buf[len++] = 0x80;
+ if (len > bit_offset) {
+ memset(&buf[len], 0, SHA256_BLOCK_SIZE - len);
+ sha256_choose_blocks(sctx->state, buf, 1, force_generic,
+ force_simd);
+ len = 0;
+ }
+
+ memset(&buf[len], 0, bit_offset - len);
+ *bits = cpu_to_be64(sctx->count << 3);
+ sha256_choose_blocks(sctx->state, buf, 1, force_generic, force_simd);
+
+ for (i = 0; i < digest_size; i += 4)
+ put_unaligned_be32(sctx->state[i / 4], out + i);
+}
+
+#endif /* _CRYPTO_INTERNAL_SHA2_H */
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
index be97b97a75dd..7e7f1ac3b7fd 100644
--- a/include/crypto/internal/simd.h
+++ b/include/crypto/internal/simd.h
@@ -6,6 +6,7 @@
#ifndef _CRYPTO_INTERNAL_SIMD_H
#define _CRYPTO_INTERNAL_SIMD_H
+#include <asm/simd.h>
#include <linux/percpu.h>
#include <linux/types.h>
@@ -43,14 +44,9 @@ void simd_unregister_aeads(struct aead_alg *algs, int count,
*
* This delegates to may_use_simd(), except that this also returns false if SIMD
* in crypto code has been temporarily disabled on this CPU by the crypto
- * self-tests, in order to test the no-SIMD fallback code. This override is
- * currently limited to configurations where the extra self-tests are enabled,
- * because it might be a bit too invasive to be part of the regular self-tests.
- *
- * This is a macro so that <asm/simd.h>, which some architectures don't have,
- * doesn't have to be included directly here.
+ * self-tests, in order to test the no-SIMD fallback code.
*/
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+#ifdef CONFIG_CRYPTO_SELFTESTS
DECLARE_PER_CPU(bool, crypto_simd_disabled_for_test);
#define crypto_simd_usable() \
(may_use_simd() && !this_cpu_read(crypto_simd_disabled_for_test))
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index a958ab0636ad..d5aa535263f6 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -10,6 +10,7 @@
#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
+#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
#include <linux/types.h>
@@ -54,48 +55,6 @@ struct crypto_lskcipher_spawn {
struct crypto_spawn base;
};
-struct skcipher_walk {
- union {
- /* Virtual address of the source. */
- struct {
- struct {
- const void *const addr;
- } virt;
- } src;
-
- /* Private field for the API, do not use. */
- struct scatter_walk in;
- };
-
- unsigned int nbytes;
-
- union {
- /* Virtual address of the destination. */
- struct {
- struct {
- void *const addr;
- } virt;
- } dst;
-
- /* Private field for the API, do not use. */
- struct scatter_walk out;
- };
-
- unsigned int total;
-
- u8 *page;
- u8 *buffer;
- u8 *oiv;
- void *iv;
-
- unsigned int ivsize;
-
- int flags;
- unsigned int blocksize;
- unsigned int stride;
- unsigned int alignmask;
-};
-
static inline struct crypto_instance *skcipher_crypto_instance(
struct skcipher_instance *inst)
{
@@ -212,7 +171,6 @@ void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count);
int lskcipher_register_instance(struct crypto_template *tmpl,
struct lskcipher_instance *inst);
-int skcipher_walk_done(struct skcipher_walk *walk, int res);
int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
struct skcipher_request *__restrict req,
bool atomic);
@@ -223,11 +181,6 @@ int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk,
struct aead_request *__restrict req,
bool atomic);
-static inline void skcipher_walk_abort(struct skcipher_walk *walk)
-{
- skcipher_walk_done(walk, -ECANCELED);
-}
-
static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
{
return crypto_tfm_ctx(&tfm->base);
diff --git a/include/crypto/md5.h b/include/crypto/md5.h
index cf9e9dec3d21..198b5d69b92f 100644
--- a/include/crypto/md5.h
+++ b/include/crypto/md5.h
@@ -8,6 +8,7 @@
#define MD5_HMAC_BLOCK_SIZE 64
#define MD5_BLOCK_WORDS 16
#define MD5_HASH_WORDS 4
+#define MD5_STATE_SIZE 24
#define MD5_H0 0x67452301UL
#define MD5_H1 0xefcdab89UL
@@ -18,8 +19,8 @@ extern const u8 md5_zero_message_hash[MD5_DIGEST_SIZE];
struct md5_state {
u32 hash[MD5_HASH_WORDS];
- u32 block[MD5_BLOCK_WORDS];
u64 byte_count;
+ u32 block[MD5_BLOCK_WORDS];
};
#endif
diff --git a/include/crypto/null.h b/include/crypto/null.h
index 0ef577cc00e3..1c66abf9de3b 100644
--- a/include/crypto/null.h
+++ b/include/crypto/null.h
@@ -9,7 +9,4 @@
#define NULL_DIGEST_SIZE 0
#define NULL_IV_SIZE 0
-struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void);
-void crypto_put_default_null_skcipher(void);
-
#endif
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index 090692ec3bc7..e54abda8cfe9 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -7,7 +7,6 @@
#define _CRYPTO_POLY1305_H
#include <linux/types.h>
-#include <linux/crypto.h>
#define POLY1305_BLOCK_SIZE 16
#define POLY1305_KEY_SIZE 32
@@ -38,17 +37,8 @@ struct poly1305_state {
};
};
-struct poly1305_desc_ctx {
- /* partial buffer */
- u8 buf[POLY1305_BLOCK_SIZE];
- /* bytes used in partial buffer */
- unsigned int buflen;
- /* how many keys have been set in r[] */
- unsigned short rset;
- /* whether s[] has been set */
- bool sset;
- /* finalize key */
- u32 s[4];
+/* Combined state for block function. */
+struct poly1305_block_state {
/* accumulator */
struct poly1305_state h;
/* key */
@@ -58,42 +48,29 @@ struct poly1305_desc_ctx {
};
};
-void poly1305_init_arch(struct poly1305_desc_ctx *desc,
- const u8 key[POLY1305_KEY_SIZE]);
-void poly1305_init_generic(struct poly1305_desc_ctx *desc,
- const u8 key[POLY1305_KEY_SIZE]);
-
-static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key)
-{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
- poly1305_init_arch(desc, key);
- else
- poly1305_init_generic(desc, key);
-}
-
-void poly1305_update_arch(struct poly1305_desc_ctx *desc, const u8 *src,
- unsigned int nbytes);
-void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src,
- unsigned int nbytes);
-
-static inline void poly1305_update(struct poly1305_desc_ctx *desc,
- const u8 *src, unsigned int nbytes)
-{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
- poly1305_update_arch(desc, src, nbytes);
- else
- poly1305_update_generic(desc, src, nbytes);
-}
+struct poly1305_desc_ctx {
+ /* partial buffer */
+ u8 buf[POLY1305_BLOCK_SIZE];
+ /* bytes used in partial buffer */
+ unsigned int buflen;
+ /* finalize key */
+ u32 s[4];
+ struct poly1305_block_state state;
+};
-void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *digest);
-void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *digest);
+void poly1305_init(struct poly1305_desc_ctx *desc,
+ const u8 key[POLY1305_KEY_SIZE]);
+void poly1305_update(struct poly1305_desc_ctx *desc,
+ const u8 *src, unsigned int nbytes);
+void poly1305_final(struct poly1305_desc_ctx *desc, u8 *digest);
-static inline void poly1305_final(struct poly1305_desc_ctx *desc, u8 *digest)
+#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)
+bool poly1305_is_arch_optimized(void);
+#else
+static inline bool poly1305_is_arch_optimized(void)
{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
- poly1305_final_arch(desc, digest);
- else
- poly1305_final_generic(desc, digest);
+ return false;
}
+#endif
#endif
diff --git a/include/crypto/polyval.h b/include/crypto/polyval.h
index 1d630f371f77..d2e63743e592 100644
--- a/include/crypto/polyval.h
+++ b/include/crypto/polyval.h
@@ -8,15 +8,7 @@
#ifndef _CRYPTO_POLYVAL_H
#define _CRYPTO_POLYVAL_H
-#include <linux/types.h>
-#include <linux/crypto.h>
-
#define POLYVAL_BLOCK_SIZE 16
#define POLYVAL_DIGEST_SIZE 16
-void polyval_mul_non4k(u8 *op1, const u8 *op2);
-
-void polyval_update_non4k(const u8 *key, const u8 *in,
- size_t nblocks, u8 *accumulator);
-
#endif
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index 5ac4388f50e1..f8224cc390f8 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -102,12 +102,10 @@ static inline struct rng_alg *__crypto_rng_alg(struct crypto_alg *alg)
}
/**
- * crypto_rng_alg - obtain name of RNG
- * @tfm: cipher handle
- *
- * Return the generic name (cra_name) of the initialized random number generator
+ * crypto_rng_alg() - obtain 'struct rng_alg' pointer from RNG handle
+ * @tfm: RNG handle
*
- * Return: generic name string
+ * Return: Pointer to 'struct rng_alg', derived from @tfm RNG handle
*/
static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
{
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 94a8585f26b2..15ab743f68c8 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -11,11 +11,64 @@
#ifndef _CRYPTO_SCATTERWALK_H
#define _CRYPTO_SCATTERWALK_H
-#include <crypto/algapi.h>
-
+#include <linux/errno.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+struct scatter_walk {
+ /* Must be the first member, see struct skcipher_walk. */
+ union {
+ void *const addr;
+
+ /* Private API field, do not touch. */
+ union crypto_no_such_thing *__addr;
+ };
+ struct scatterlist *sg;
+ unsigned int offset;
+};
+
+struct skcipher_walk {
+ union {
+ /* Virtual address of the source. */
+ struct {
+ struct {
+ const void *const addr;
+ } virt;
+ } src;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk in;
+ };
+
+ union {
+ /* Virtual address of the destination. */
+ struct {
+ struct {
+ void *const addr;
+ } virt;
+ } dst;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk out;
+ };
+
+ unsigned int nbytes;
+ unsigned int total;
+
+ u8 *page;
+ u8 *buffer;
+ u8 *oiv;
+ void *iv;
+
+ unsigned int ivsize;
+
+ int flags;
+ unsigned int blocksize;
+ unsigned int stride;
+ unsigned int alignmask;
+};
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
struct scatterlist *sg, int num)
@@ -243,4 +296,12 @@ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
struct scatterlist *src,
unsigned int len);
+int skcipher_walk_first(struct skcipher_walk *walk, bool atomic);
+int skcipher_walk_done(struct skcipher_walk *walk, int res);
+
+static inline void skcipher_walk_abort(struct skcipher_walk *walk)
+{
+ skcipher_walk_done(walk, -ECANCELED);
+}
+
#endif /* _CRYPTO_SCATTERWALK_H */
diff --git a/include/crypto/sha1.h b/include/crypto/sha1.h
index 044ecea60ac8..f48230b1413c 100644
--- a/include/crypto/sha1.h
+++ b/include/crypto/sha1.h
@@ -10,6 +10,7 @@
#define SHA1_DIGEST_SIZE 20
#define SHA1_BLOCK_SIZE 64
+#define SHA1_STATE_SIZE offsetof(struct sha1_state, buffer)
#define SHA1_H0 0x67452301UL
#define SHA1_H1 0xefcdab89UL
@@ -25,14 +26,6 @@ struct sha1_state {
u8 buffer[SHA1_BLOCK_SIZE];
};
-struct shash_desc;
-
-extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
-
/*
* An implementation of SHA-1's compression function. Don't use in new code!
* You shouldn't be using SHA-1, and even if you *have* to use SHA-1, this isn't
diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h
index 0c342ed0d038..62701d136c79 100644
--- a/include/crypto/sha1_base.h
+++ b/include/crypto/sha1_base.h
@@ -10,10 +10,9 @@
#include <crypto/internal/hash.h>
#include <crypto/sha1.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
+#include <linux/math.h>
#include <linux/string.h>
-
+#include <linux/types.h>
#include <linux/unaligned.h>
typedef void (sha1_block_fn)(struct sha1_state *sst, u8 const *src, int blocks);
@@ -32,63 +31,38 @@ static inline int sha1_base_init(struct shash_desc *desc)
return 0;
}
-static inline int sha1_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sha1_block_fn *block_fn)
+static inline int sha1_base_do_update_blocks(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha1_block_fn *block_fn)
{
+ unsigned int remain = len - round_down(len, SHA1_BLOCK_SIZE);
struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
-
- sctx->count += len;
-
- if (unlikely((partial + len) >= SHA1_BLOCK_SIZE)) {
- int blocks;
-
- if (partial) {
- int p = SHA1_BLOCK_SIZE - partial;
-
- memcpy(sctx->buffer + partial, data, p);
- data += p;
- len -= p;
-
- block_fn(sctx, sctx->buffer, 1);
- }
- blocks = len / SHA1_BLOCK_SIZE;
- len %= SHA1_BLOCK_SIZE;
-
- if (blocks) {
- block_fn(sctx, data, blocks);
- data += blocks * SHA1_BLOCK_SIZE;
- }
- partial = 0;
- }
- if (len)
- memcpy(sctx->buffer + partial, data, len);
-
- return 0;
+ sctx->count += len - remain;
+ block_fn(sctx, data, len / SHA1_BLOCK_SIZE);
+ return remain;
}
-static inline int sha1_base_do_finalize(struct shash_desc *desc,
- sha1_block_fn *block_fn)
+static inline int sha1_base_do_finup(struct shash_desc *desc,
+ const u8 *src, unsigned int len,
+ sha1_block_fn *block_fn)
{
- const int bit_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
+ unsigned int bit_offset = SHA1_BLOCK_SIZE / 8 - 1;
struct sha1_state *sctx = shash_desc_ctx(desc);
- __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
- unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
-
- sctx->buffer[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buffer + partial, 0x0, SHA1_BLOCK_SIZE - partial);
- partial = 0;
-
- block_fn(sctx, sctx->buffer, 1);
- }
-
- memset(sctx->buffer + partial, 0x0, bit_offset - partial);
- *bits = cpu_to_be64(sctx->count << 3);
- block_fn(sctx, sctx->buffer, 1);
+ union {
+ __be64 b64[SHA1_BLOCK_SIZE / 4];
+ u8 u8[SHA1_BLOCK_SIZE * 2];
+ } block = {};
+
+ if (len >= bit_offset * 8)
+ bit_offset += SHA1_BLOCK_SIZE / 8;
+ memcpy(&block, src, len);
+ block.u8[len] = 0x80;
+ sctx->count += len;
+ block.b64[bit_offset] = cpu_to_be64(sctx->count << 3);
+ block_fn(sctx, block.u8, (bit_offset + 1) * 8 / SHA1_BLOCK_SIZE);
+ memzero_explicit(&block, sizeof(block));
return 0;
}
@@ -102,7 +76,6 @@ static inline int sha1_base_finish(struct shash_desc *desc, u8 *out)
for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
put_unaligned_be32(sctx->state[i], digest++);
- memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
diff --git a/include/crypto/sha2.h b/include/crypto/sha2.h
index b9e9281d76c9..4912572578dc 100644
--- a/include/crypto/sha2.h
+++ b/include/crypto/sha2.h
@@ -13,12 +13,14 @@
#define SHA256_DIGEST_SIZE 32
#define SHA256_BLOCK_SIZE 64
+#define SHA256_STATE_WORDS 8
#define SHA384_DIGEST_SIZE 48
#define SHA384_BLOCK_SIZE 128
#define SHA512_DIGEST_SIZE 64
#define SHA512_BLOCK_SIZE 128
+#define SHA512_STATE_SIZE 80
#define SHA224_H0 0xc1059ed8UL
#define SHA224_H1 0x367cd507UL
@@ -64,9 +66,19 @@ extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE];
extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE];
-struct sha256_state {
- u32 state[SHA256_DIGEST_SIZE / 4];
+struct crypto_sha256_state {
+ u32 state[SHA256_STATE_WORDS];
u64 count;
+};
+
+struct sha256_state {
+ union {
+ struct crypto_sha256_state ctx;
+ struct {
+ u32 state[SHA256_STATE_WORDS];
+ u64 count;
+ };
+ };
u8 buf[SHA256_BLOCK_SIZE];
};
@@ -76,31 +88,7 @@ struct sha512_state {
u8 buf[SHA512_BLOCK_SIZE];
};
-struct shash_desc;
-
-extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-extern int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
-
-extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
-
-/*
- * Stand-alone implementation of the SHA256 algorithm. It is designed to
- * have as little dependencies as possible so it can be used in the
- * kexec_file purgatory. In other cases you should generally use the
- * hash APIs from include/crypto/hash.h. Especially when hashing large
- * amounts of data as those APIs may be hw-accelerated.
- *
- * For details see lib/crypto/sha256.c
- */
-
-static inline void sha256_init(struct sha256_state *sctx)
+static inline void sha256_block_init(struct crypto_sha256_state *sctx)
{
sctx->state[0] = SHA256_H0;
sctx->state[1] = SHA256_H1;
@@ -112,11 +100,16 @@ static inline void sha256_init(struct sha256_state *sctx)
sctx->state[7] = SHA256_H7;
sctx->count = 0;
}
-void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len);
-void sha256_final(struct sha256_state *sctx, u8 *out);
-void sha256(const u8 *data, unsigned int len, u8 *out);
-static inline void sha224_init(struct sha256_state *sctx)
+static inline void sha256_init(struct sha256_state *sctx)
+{
+ sha256_block_init(&sctx->ctx);
+}
+void sha256_update(struct sha256_state *sctx, const u8 *data, size_t len);
+void sha256_final(struct sha256_state *sctx, u8 out[SHA256_DIGEST_SIZE]);
+void sha256(const u8 *data, size_t len, u8 out[SHA256_DIGEST_SIZE]);
+
+static inline void sha224_block_init(struct crypto_sha256_state *sctx)
{
sctx->state[0] = SHA224_H0;
sctx->state[1] = SHA224_H1;
@@ -128,7 +121,12 @@ static inline void sha224_init(struct sha256_state *sctx)
sctx->state[7] = SHA224_H7;
sctx->count = 0;
}
+
+static inline void sha224_init(struct sha256_state *sctx)
+{
+ sha224_block_init(&sctx->ctx);
+}
/* Simply use sha256_update as it is equivalent to sha224_update. */
-void sha224_final(struct sha256_state *sctx, u8 *out);
+void sha224_final(struct sha256_state *sctx, u8 out[SHA224_DIGEST_SIZE]);
#endif /* _CRYPTO_SHA2_H */
diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h
deleted file mode 100644
index e0418818d63c..000000000000
--- a/include/crypto/sha256_base.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * sha256_base.h - core logic for SHA-256 implementations
- *
- * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
- */
-
-#ifndef _CRYPTO_SHA256_BASE_H
-#define _CRYPTO_SHA256_BASE_H
-
-#include <asm/byteorder.h>
-#include <linux/unaligned.h>
-#include <crypto/internal/hash.h>
-#include <crypto/sha2.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src,
- int blocks);
-
-static inline int sha224_base_init(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- sha224_init(sctx);
- return 0;
-}
-
-static inline int sha256_base_init(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- sha256_init(sctx);
- return 0;
-}
-
-static inline int lib_sha256_base_do_update(struct sha256_state *sctx,
- const u8 *data,
- unsigned int len,
- sha256_block_fn *block_fn)
-{
- unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
-
- sctx->count += len;
-
- if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) {
- int blocks;
-
- if (partial) {
- int p = SHA256_BLOCK_SIZE - partial;
-
- memcpy(sctx->buf + partial, data, p);
- data += p;
- len -= p;
-
- block_fn(sctx, sctx->buf, 1);
- }
-
- blocks = len / SHA256_BLOCK_SIZE;
- len %= SHA256_BLOCK_SIZE;
-
- if (blocks) {
- block_fn(sctx, data, blocks);
- data += blocks * SHA256_BLOCK_SIZE;
- }
- partial = 0;
- }
- if (len)
- memcpy(sctx->buf + partial, data, len);
-
- return 0;
-}
-
-static inline int sha256_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sha256_block_fn *block_fn)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- return lib_sha256_base_do_update(sctx, data, len, block_fn);
-}
-
-static inline int lib_sha256_base_do_finalize(struct sha256_state *sctx,
- sha256_block_fn *block_fn)
-{
- const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64);
- __be64 *bits = (__be64 *)(sctx->buf + bit_offset);
- unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
-
- sctx->buf[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buf + partial, 0x0, SHA256_BLOCK_SIZE - partial);
- partial = 0;
-
- block_fn(sctx, sctx->buf, 1);
- }
-
- memset(sctx->buf + partial, 0x0, bit_offset - partial);
- *bits = cpu_to_be64(sctx->count << 3);
- block_fn(sctx, sctx->buf, 1);
-
- return 0;
-}
-
-static inline int sha256_base_do_finalize(struct shash_desc *desc,
- sha256_block_fn *block_fn)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- return lib_sha256_base_do_finalize(sctx, block_fn);
-}
-
-static inline int lib_sha256_base_finish(struct sha256_state *sctx, u8 *out,
- unsigned int digest_size)
-{
- __be32 *digest = (__be32 *)out;
- int i;
-
- for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32))
- put_unaligned_be32(sctx->state[i], digest++);
-
- memzero_explicit(sctx, sizeof(*sctx));
- return 0;
-}
-
-static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
-{
- unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- return lib_sha256_base_finish(sctx, out, digest_size);
-}
-
-#endif /* _CRYPTO_SHA256_BASE_H */
diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h
index 080f60c2e6b1..41e1b83a6d91 100644
--- a/include/crypto/sha3.h
+++ b/include/crypto/sha3.h
@@ -5,30 +5,32 @@
#ifndef __CRYPTO_SHA3_H__
#define __CRYPTO_SHA3_H__
+#include <linux/types.h>
+
#define SHA3_224_DIGEST_SIZE (224 / 8)
#define SHA3_224_BLOCK_SIZE (200 - 2 * SHA3_224_DIGEST_SIZE)
+#define SHA3_224_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_224_BLOCK_SIZE + 1
#define SHA3_256_DIGEST_SIZE (256 / 8)
#define SHA3_256_BLOCK_SIZE (200 - 2 * SHA3_256_DIGEST_SIZE)
+#define SHA3_256_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_256_BLOCK_SIZE + 1
#define SHA3_384_DIGEST_SIZE (384 / 8)
#define SHA3_384_BLOCK_SIZE (200 - 2 * SHA3_384_DIGEST_SIZE)
+#define SHA3_384_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_384_BLOCK_SIZE + 1
#define SHA3_512_DIGEST_SIZE (512 / 8)
#define SHA3_512_BLOCK_SIZE (200 - 2 * SHA3_512_DIGEST_SIZE)
+#define SHA3_512_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_512_BLOCK_SIZE + 1
-struct sha3_state {
- u64 st[25];
- unsigned int rsiz;
- unsigned int rsizw;
+#define SHA3_STATE_SIZE 200
- unsigned int partial;
- u8 buf[SHA3_224_BLOCK_SIZE];
+struct shash_desc;
+
+struct sha3_state {
+ u64 st[SHA3_STATE_SIZE / 8];
};
int crypto_sha3_init(struct shash_desc *desc);
-int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-int crypto_sha3_final(struct shash_desc *desc, u8 *out);
#endif
diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h
index 679916a84cb2..aa814bab442d 100644
--- a/include/crypto/sha512_base.h
+++ b/include/crypto/sha512_base.h
@@ -10,10 +10,10 @@
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
+#include <linux/compiler.h>
+#include <linux/math.h>
#include <linux/string.h>
-
+#include <linux/types.h>
#include <linux/unaligned.h>
typedef void (sha512_block_fn)(struct sha512_state *sst, u8 const *src,
@@ -53,66 +53,51 @@ static inline int sha512_base_init(struct shash_desc *desc)
return 0;
}
-static inline int sha512_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sha512_block_fn *block_fn)
+static inline int sha512_base_do_update_blocks(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha512_block_fn *block_fn)
{
+ unsigned int remain = len - round_down(len, SHA512_BLOCK_SIZE);
struct sha512_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
+ len -= remain;
sctx->count[0] += len;
if (sctx->count[0] < len)
sctx->count[1]++;
-
- if (unlikely((partial + len) >= SHA512_BLOCK_SIZE)) {
- int blocks;
-
- if (partial) {
- int p = SHA512_BLOCK_SIZE - partial;
-
- memcpy(sctx->buf + partial, data, p);
- data += p;
- len -= p;
-
- block_fn(sctx, sctx->buf, 1);
- }
-
- blocks = len / SHA512_BLOCK_SIZE;
- len %= SHA512_BLOCK_SIZE;
-
- if (blocks) {
- block_fn(sctx, data, blocks);
- data += blocks * SHA512_BLOCK_SIZE;
- }
- partial = 0;
- }
- if (len)
- memcpy(sctx->buf + partial, data, len);
-
- return 0;
+ block_fn(sctx, data, len / SHA512_BLOCK_SIZE);
+ return remain;
}
-static inline int sha512_base_do_finalize(struct shash_desc *desc,
- sha512_block_fn *block_fn)
+static inline int sha512_base_do_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len,
+ sha512_block_fn *block_fn)
{
- const int bit_offset = SHA512_BLOCK_SIZE - sizeof(__be64[2]);
+ unsigned int bit_offset = SHA512_BLOCK_SIZE / 8 - 2;
struct sha512_state *sctx = shash_desc_ctx(desc);
- __be64 *bits = (__be64 *)(sctx->buf + bit_offset);
- unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
+ union {
+ __be64 b64[SHA512_BLOCK_SIZE / 4];
+ u8 u8[SHA512_BLOCK_SIZE * 2];
+ } block = {};
- sctx->buf[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buf + partial, 0x0, SHA512_BLOCK_SIZE - partial);
- partial = 0;
+ if (len >= SHA512_BLOCK_SIZE) {
+ int remain;
- block_fn(sctx, sctx->buf, 1);
+ remain = sha512_base_do_update_blocks(desc, src, len, block_fn);
+ src += len - remain;
+ len = remain;
}
- memset(sctx->buf + partial, 0x0, bit_offset - partial);
- bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
- bits[1] = cpu_to_be64(sctx->count[0] << 3);
- block_fn(sctx, sctx->buf, 1);
+ if (len >= bit_offset * 8)
+ bit_offset += SHA512_BLOCK_SIZE / 8;
+ memcpy(&block, src, len);
+ block.u8[len] = 0x80;
+ sctx->count[0] += len;
+ block.b64[bit_offset] = cpu_to_be64(sctx->count[1] << 3 |
+ sctx->count[0] >> 61);
+ block.b64[bit_offset + 1] = cpu_to_be64(sctx->count[0] << 3);
+ block_fn(sctx, block.u8, (bit_offset + 2) * 8 / SHA512_BLOCK_SIZE);
+ memzero_explicit(&block, sizeof(block));
return 0;
}
@@ -126,9 +111,10 @@ static inline int sha512_base_finish(struct shash_desc *desc, u8 *out)
for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64))
put_unaligned_be64(sctx->state[i], digest++);
-
- memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
+void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src,
+ int blocks);
+
#endif /* _CRYPTO_SHA512_BASE_H */
diff --git a/include/crypto/sig.h b/include/crypto/sig.h
index 11024708c069..fa6dafafab3f 100644
--- a/include/crypto/sig.h
+++ b/include/crypto/sig.h
@@ -128,7 +128,7 @@ static inline void crypto_free_sig(struct crypto_sig *tfm)
/**
* crypto_sig_keysize() - Get key size
*
- * Function returns the key size in bytes.
+ * Function returns the key size in bits.
* Function assumes that the key is already set in the transformation. If this
* function is called without a setkey or with a failed setkey, you may end up
* in a NULL dereference.
diff --git a/include/crypto/sm3.h b/include/crypto/sm3.h
index 1f021ad0533f..c8d02c86c298 100644
--- a/include/crypto/sm3.h
+++ b/include/crypto/sm3.h
@@ -14,6 +14,7 @@
#define SM3_DIGEST_SIZE 32
#define SM3_BLOCK_SIZE 64
+#define SM3_STATE_SIZE 40
#define SM3_T1 0x79CC4519
#define SM3_T2 0x7A879D8A
@@ -58,7 +59,6 @@ static inline void sm3_init(struct sm3_state *sctx)
sctx->count = 0;
}
-void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len);
-void sm3_final(struct sm3_state *sctx, u8 *out);
+void sm3_block_generic(struct sm3_state *sctx, u8 const *data, int blocks);
#endif
diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h
index b33ed39c2bce..7c53570bc05e 100644
--- a/include/crypto/sm3_base.h
+++ b/include/crypto/sm3_base.h
@@ -11,87 +11,59 @@
#include <crypto/internal/hash.h>
#include <crypto/sm3.h>
-#include <linux/crypto.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/string.h>
+#include <linux/types.h>
#include <linux/unaligned.h>
typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks);
static inline int sm3_base_init(struct shash_desc *desc)
{
- struct sm3_state *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SM3_IVA;
- sctx->state[1] = SM3_IVB;
- sctx->state[2] = SM3_IVC;
- sctx->state[3] = SM3_IVD;
- sctx->state[4] = SM3_IVE;
- sctx->state[5] = SM3_IVF;
- sctx->state[6] = SM3_IVG;
- sctx->state[7] = SM3_IVH;
- sctx->count = 0;
-
+ sm3_init(shash_desc_ctx(desc));
return 0;
}
-static inline int sm3_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sm3_block_fn *block_fn)
+static inline int sm3_base_do_update_blocks(struct shash_desc *desc,
+ const u8 *data, unsigned int len,
+ sm3_block_fn *block_fn)
{
+ unsigned int remain = len - round_down(len, SM3_BLOCK_SIZE);
struct sm3_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
-
- sctx->count += len;
-
- if (unlikely((partial + len) >= SM3_BLOCK_SIZE)) {
- int blocks;
-
- if (partial) {
- int p = SM3_BLOCK_SIZE - partial;
-
- memcpy(sctx->buffer + partial, data, p);
- data += p;
- len -= p;
- block_fn(sctx, sctx->buffer, 1);
- }
-
- blocks = len / SM3_BLOCK_SIZE;
- len %= SM3_BLOCK_SIZE;
-
- if (blocks) {
- block_fn(sctx, data, blocks);
- data += blocks * SM3_BLOCK_SIZE;
- }
- partial = 0;
- }
- if (len)
- memcpy(sctx->buffer + partial, data, len);
-
- return 0;
+ sctx->count += len - remain;
+ block_fn(sctx, data, len / SM3_BLOCK_SIZE);
+ return remain;
}
-static inline int sm3_base_do_finalize(struct shash_desc *desc,
- sm3_block_fn *block_fn)
+static inline int sm3_base_do_finup(struct shash_desc *desc,
+ const u8 *src, unsigned int len,
+ sm3_block_fn *block_fn)
{
- const int bit_offset = SM3_BLOCK_SIZE - sizeof(__be64);
+ unsigned int bit_offset = SM3_BLOCK_SIZE / 8 - 1;
struct sm3_state *sctx = shash_desc_ctx(desc);
- __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
- unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
+ union {
+ __be64 b64[SM3_BLOCK_SIZE / 4];
+ u8 u8[SM3_BLOCK_SIZE * 2];
+ } block = {};
- sctx->buffer[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buffer + partial, 0x0, SM3_BLOCK_SIZE - partial);
- partial = 0;
+ if (len >= SM3_BLOCK_SIZE) {
+ int remain;
- block_fn(sctx, sctx->buffer, 1);
+ remain = sm3_base_do_update_blocks(desc, src, len, block_fn);
+ src += len - remain;
+ len = remain;
}
- memset(sctx->buffer + partial, 0x0, bit_offset - partial);
- *bits = cpu_to_be64(sctx->count << 3);
- block_fn(sctx, sctx->buffer, 1);
+ if (len >= bit_offset * 8)
+ bit_offset += SM3_BLOCK_SIZE / 8;
+ memcpy(&block, src, len);
+ block.u8[len] = 0x80;
+ sctx->count += len;
+ block.b64[bit_offset] = cpu_to_be64(sctx->count << 3);
+ block_fn(sctx, block.u8, (bit_offset + 1) * 8 / SM3_BLOCK_SIZE);
+ memzero_explicit(&block, sizeof(block));
return 0;
}
@@ -104,8 +76,6 @@ static inline int sm3_base_finish(struct shash_desc *desc, u8 *out)
for (i = 0; i < SM3_DIGEST_SIZE / sizeof(__be32); i++)
put_unaligned_be32(sctx->state[i], digest++);
-
- memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
diff --git a/include/crypto/streebog.h b/include/crypto/streebog.h
index cae1b4a01971..570f720a113b 100644
--- a/include/crypto/streebog.h
+++ b/include/crypto/streebog.h
@@ -23,15 +23,10 @@ struct streebog_uint512 {
};
struct streebog_state {
- union {
- u8 buffer[STREEBOG_BLOCK_SIZE];
- struct streebog_uint512 m;
- };
struct streebog_uint512 hash;
struct streebog_uint512 h;
struct streebog_uint512 N;
struct streebog_uint512 Sigma;
- size_t fillsize;
};
#endif /* !_CRYPTO_STREEBOG_H_ */