diff options
author | Dave Watson <davejwatson@fb.com> | 2018-12-10 22:57:36 +0300 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2018-12-23 06:52:42 +0300 |
commit | 5350b0f563433dacc134214a452fd316b36251d6 (patch) | |
tree | ded02233d76bb75a3ca8274c69e80f2dd2bfad44 /arch/x86/crypto/aesni-intel_glue.c | |
parent | 2426f64bc51fc86951b735d2247d6eb89259d580 (diff) | |
download | linux-5350b0f563433dacc134214a452fd316b36251d6.tar.xz |
crypto: aesni - support 256 byte keys in avx asm
Add support for 192/256-bit keys using the avx gcm/aes routines.
The sse routines were previously updated in e31ac32d3b (Add support
for 192 & 256 bit keys to AESNI RFC4106).
Instead of adding an additional loop in the hotpath as in e31ac32d3b,
this diff instead generates separate versions of the code using macros,
and the entry routines choose which version once. This results
in a 5% performance improvement vs. adding a loop to the hot path.
This is the same strategy chosen by the intel isa-l_crypto library.
The key size checks are removed from the c code where appropriate.
Note that this diff depends on using gcm_context_data - 256 bit keys
require 16 HashKeys + 15 expanded keys, which is larger than
struct crypto_aes_ctx, so they are stored in struct gcm_context_data.
Signed-off-by: Dave Watson <davejwatson@fb.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/x86/crypto/aesni-intel_glue.c')
-rw-r--r-- | arch/x86/crypto/aesni-intel_glue.c | 18 |
1 files changed, 6 insertions, 12 deletions
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index d8b8cb33f608..7d1259feb0f9 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -209,8 +209,7 @@ static void aesni_gcm_enc_avx(void *ctx, u8 *hash_subkey, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len) { - struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; - if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){ + if (plaintext_len < AVX_GEN2_OPTSIZE) { aesni_gcm_enc(ctx, data, out, in, plaintext_len, iv, hash_subkey, aad, aad_len, auth_tag, auth_tag_len); @@ -227,8 +226,7 @@ static void aesni_gcm_dec_avx(void *ctx, u8 *hash_subkey, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len) { - struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; - if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) { + if (ciphertext_len < AVX_GEN2_OPTSIZE) { aesni_gcm_dec(ctx, data, out, in, ciphertext_len, iv, hash_subkey, aad, aad_len, auth_tag, auth_tag_len); @@ -268,8 +266,7 @@ static void aesni_gcm_enc_avx2(void *ctx, u8 *hash_subkey, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len) { - struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; - if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) { + if (plaintext_len < AVX_GEN2_OPTSIZE) { aesni_gcm_enc(ctx, data, out, in, plaintext_len, iv, hash_subkey, aad, aad_len, auth_tag, auth_tag_len); @@ -290,8 +287,7 @@ static void aesni_gcm_dec_avx2(void *ctx, u8 *hash_subkey, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len) { - struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; - if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) { + if (ciphertext_len < AVX_GEN2_OPTSIZE) { aesni_gcm_dec(ctx, data, out, in, ciphertext_len, iv, hash_subkey, aad, aad_len, auth_tag, auth_tag_len); @@ -928,8 +924,7 @@ static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen, struct scatter_walk dst_sg_walk = {}; struct gcm_context_data data AESNI_ALIGN_ATTR; - if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 || - aesni_gcm_enc_tfm == aesni_gcm_enc || + if (aesni_gcm_enc_tfm == aesni_gcm_enc || req->cryptlen < AVX_GEN2_OPTSIZE) { return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv, aes_ctx); @@ -1000,8 +995,7 @@ static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen, struct gcm_context_data data AESNI_ALIGN_ATTR; int retval = 0; - if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 || - aesni_gcm_enc_tfm == aesni_gcm_enc || + if (aesni_gcm_enc_tfm == aesni_gcm_enc || req->cryptlen < AVX_GEN2_OPTSIZE) { return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv, aes_ctx); |