summaryrefslogtreecommitdiff
path: root/arch/arm64/crypto/aes-neonbs-glue.c
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2017-01-29 02:25:39 +0300
committerHerbert Xu <herbert@gondor.apana.org.au>2017-02-03 13:16:20 +0300
commit12fcd92305880504c9827c99ea128fecf1c99f0d (patch)
tree49a2ccb419da7504484adba42996282c373d950d /arch/arm64/crypto/aes-neonbs-glue.c
parent4edd7d015b95abcedde591a0c45965305d7cd524 (diff)
downloadlinux-12fcd92305880504c9827c99ea128fecf1c99f0d.tar.xz
crypto: arm64/aes - replace scalar fallback with plain NEON fallback
The new bitsliced NEON implementation of AES uses a fallback in two places: CBC encryption (which is strictly sequential, whereas this driver can only operate efficiently on 8 blocks at a time), and the XTS tweak generation, which involves encrypting a single AES block with a different key schedule. The plain (i.e., non-bitsliced) NEON code is more suitable as a fallback, given that it is faster than scalar on low end cores (which is what the NEON implementations target, since high end cores have dedicated instructions for AES), and shows similar behavior in terms of D-cache footprint and sensitivity to cache timing attacks. So switch the fallback handling to the plain NEON driver. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/arm64/crypto/aes-neonbs-glue.c')
-rw-r--r--arch/arm64/crypto/aes-neonbs-glue.c38
1 files changed, 28 insertions, 10 deletions
diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
index 323dd76ae5f0..863e436ecf89 100644
--- a/arch/arm64/crypto/aes-neonbs-glue.c
+++ b/arch/arm64/crypto/aes-neonbs-glue.c
@@ -10,7 +10,6 @@
#include <asm/neon.h>
#include <crypto/aes.h>
-#include <crypto/cbc.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/xts.h>
@@ -42,7 +41,12 @@ asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[],
asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[],
int rounds, int blocks, u8 iv[]);
-asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
+/* borrowed from aes-neon-blk.ko */
+asmlinkage void neon_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
+ int rounds, int blocks, int first);
+asmlinkage void neon_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
+ int rounds, int blocks, u8 iv[],
+ int first);
struct aesbs_ctx {
u8 rk[13 * (8 * AES_BLOCK_SIZE) + 32];
@@ -140,16 +144,28 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
return 0;
}
-static void cbc_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
+static int cbc_encrypt(struct skcipher_request *req)
{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ int err, first = 1;
- __aes_arm64_encrypt(ctx->enc, dst, src, ctx->key.rounds);
-}
+ err = skcipher_walk_virt(&walk, req, true);
-static int cbc_encrypt(struct skcipher_request *req)
-{
- return crypto_cbc_encrypt_walk(req, cbc_encrypt_one);
+ kernel_neon_begin();
+ while (walk.nbytes >= AES_BLOCK_SIZE) {
+ unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
+
+ /* fall back to the non-bitsliced NEON implementation */
+ neon_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->enc, ctx->key.rounds, blocks, walk.iv,
+ first);
+ err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
+ first = 0;
+ }
+ kernel_neon_end();
+ return err;
}
static int cbc_decrypt(struct skcipher_request *req)
@@ -254,9 +270,11 @@ static int __xts_crypt(struct skcipher_request *req,
err = skcipher_walk_virt(&walk, req, true);
- __aes_arm64_encrypt(ctx->twkey, walk.iv, walk.iv, ctx->key.rounds);
-
kernel_neon_begin();
+
+ neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey,
+ ctx->key.rounds, 1, 1);
+
while (walk.nbytes >= AES_BLOCK_SIZE) {
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;