summaryrefslogtreecommitdiff
path: root/arch/arm64/crypto/sha256-glue.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-01-29 02:38:56 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-01-29 02:38:56 +0300
commita78208e2436963d0b2c7d186277d6e1a9755029a (patch)
tree090caa51386d811a2750aef3dc70cd247f6aa622 /arch/arm64/crypto/sha256-glue.c
parent68353984d63d8d7ea728819dbdb7aecc5f32d360 (diff)
parent0bc81767c5bd9d005fae1099fb39eb3688370cb1 (diff)
downloadlinux-a78208e2436963d0b2c7d186277d6e1a9755029a.tar.xz
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Removed CRYPTO_TFM_RES flags - Extended spawn grabbing to all algorithm types - Moved hash descsize verification into API code Algorithms: - Fixed recursive pcrypt dead-lock - Added new 32 and 64-bit generic versions of poly1305 - Added cryptogams implementation of x86/poly1305 Drivers: - Added support for i.MX8M Mini in caam - Added support for i.MX8M Nano in caam - Added support for i.MX8M Plus in caam - Added support for A33 variant of SS in sun4i-ss - Added TEE support for Raven Ridge in ccp - Added in-kernel API to submit TEE commands in ccp - Added AMD-TEE driver - Added support for BCM2711 in iproc-rng200 - Added support for AES256-GCM based ciphers for chtls - Added aead support on SEC2 in hisilicon" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (244 commits) crypto: arm/chacha - fix build failured when kernel mode NEON is disabled crypto: caam - add support for i.MX8M Plus crypto: x86/poly1305 - emit does base conversion itself crypto: hisilicon - fix spelling mistake "disgest" -> "digest" crypto: chacha20poly1305 - add back missing test vectors and test chunking crypto: x86/poly1305 - fix .gitignore typo tee: fix memory allocation failure checks on drv_data and amdtee crypto: ccree - erase unneeded inline funcs crypto: ccree - make cc_pm_put_suspend() void crypto: ccree - split overloaded usage of irq field crypto: ccree - fix PM race condition crypto: ccree - fix FDE descriptor sequence crypto: ccree - cc_do_send_request() is void func crypto: ccree - fix pm wrongful error reporting crypto: ccree - turn errors to debug msgs crypto: ccree - fix AEAD decrypt auth fail crypto: ccree - fix typo in comment crypto: ccree - fix typos in error msgs crypto: atmel-{aes,sha,tdes} - Retire crypto_platform_data crypto: x86/sha - Eliminate casts on asm implementations ...
Diffstat (limited to 'arch/arm64/crypto/sha256-glue.c')
-rw-r--r--arch/arm64/crypto/sha256-glue.c32
1 files changed, 20 insertions, 12 deletions
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index 999da59f03a9..ddf4a0d85c1c 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -27,14 +27,26 @@ asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
unsigned int num_blks);
EXPORT_SYMBOL(sha256_block_data_order);
+static void __sha256_block_data_order(struct sha256_state *sst, u8 const *src,
+ int blocks)
+{
+ sha256_block_data_order(sst->state, src, blocks);
+}
+
asmlinkage void sha256_block_neon(u32 *digest, const void *data,
unsigned int num_blks);
+static void __sha256_block_neon(struct sha256_state *sst, u8 const *src,
+ int blocks)
+{
+ sha256_block_neon(sst->state, src, blocks);
+}
+
static int crypto_sha256_arm64_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
}
static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
@@ -42,9 +54,8 @@ static int crypto_sha256_arm64_finup(struct shash_desc *desc, const u8 *data,
{
if (len)
sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
+ sha256_base_do_finalize(desc, __sha256_block_data_order);
return sha256_base_finish(desc, out);
}
@@ -87,7 +98,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
while (len > 0) {
unsigned int chunk = len;
@@ -103,8 +114,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
sctx->count % SHA256_BLOCK_SIZE;
kernel_neon_begin();
- sha256_base_do_update(desc, data, chunk,
- (sha256_block_fn *)sha256_block_neon);
+ sha256_base_do_update(desc, data, chunk, __sha256_block_neon);
kernel_neon_end();
data += chunk;
len -= chunk;
@@ -118,15 +128,13 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
if (!crypto_simd_usable()) {
if (len)
sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ __sha256_block_data_order);
+ sha256_base_do_finalize(desc, __sha256_block_data_order);
} else {
if (len)
sha256_update_neon(desc, data, len);
kernel_neon_begin();
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_neon);
+ sha256_base_do_finalize(desc, __sha256_block_neon);
kernel_neon_end();
}
return sha256_base_finish(desc, out);