summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorArvind Sankar <nivedita@alum.mit.edu>2020-10-25 17:31:19 +0300
committerHerbert Xu <herbert@gondor.apana.org.au>2020-10-30 09:35:04 +0300
commit18d05ca4486fe38991c3166b1f4df26b8a029665 (patch)
tree5177e8c734e1f2b4379c23f4f5860dbde4be3f7f /lib
parent63642d5c141f1bcbe97f7895467a724ad2e3f346 (diff)
downloadlinux-18d05ca4486fe38991c3166b1f4df26b8a029665.tar.xz
crypto: lib/sha256 - Unroll LOAD and BLEND loops
Unrolling the LOAD and BLEND loops improves performance by ~8% on x86_64 (tested on Broadwell Xeon) while not increasing code size too much. Signed-off-by: Arvind Sankar <nivedita@alum.mit.edu> Reviewed-by: Eric Biggers <ebiggers@google.com> Acked-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'lib')
-rw-r--r--lib/crypto/sha256.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c
index e2e29d9b0ccd..cdef37c05972 100644
--- a/lib/crypto/sha256.c
+++ b/lib/crypto/sha256.c
@@ -76,12 +76,28 @@ static void sha256_transform(u32 *state, const u8 *input, u32 *W)
int i;
/* load the input */
- for (i = 0; i < 16; i++)
- LOAD_OP(i, W, input);
+ for (i = 0; i < 16; i += 8) {
+ LOAD_OP(i + 0, W, input);
+ LOAD_OP(i + 1, W, input);
+ LOAD_OP(i + 2, W, input);
+ LOAD_OP(i + 3, W, input);
+ LOAD_OP(i + 4, W, input);
+ LOAD_OP(i + 5, W, input);
+ LOAD_OP(i + 6, W, input);
+ LOAD_OP(i + 7, W, input);
+ }
/* now blend */
- for (i = 16; i < 64; i++)
- BLEND_OP(i, W);
+ for (i = 16; i < 64; i += 8) {
+ BLEND_OP(i + 0, W);
+ BLEND_OP(i + 1, W);
+ BLEND_OP(i + 2, W);
+ BLEND_OP(i + 3, W);
+ BLEND_OP(i + 4, W);
+ BLEND_OP(i + 5, W);
+ BLEND_OP(i + 6, W);
+ BLEND_OP(i + 7, W);
+ }
/* load the state into our registers */
a = state[0]; b = state[1]; c = state[2]; d = state[3];