diff options
Diffstat (limited to 'arch/arm64/crypto/aes-glue.c')
-rw-r--r-- | arch/arm64/crypto/aes-glue.c | 470 |
1 files changed, 353 insertions, 117 deletions
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index 8d6c8932c841..aa57dc639f77 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -9,6 +9,8 @@ #include <asm/hwcap.h> #include <asm/simd.h> #include <crypto/aes.h> +#include <crypto/ctr.h> +#include <crypto/sha.h> #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> @@ -18,12 +20,10 @@ #include <crypto/xts.h> #include "aes-ce-setkey.h" -#include "aes-ctr-fallback.h" #ifdef USE_V8_CRYPTO_EXTENSIONS #define MODE "ce" #define PRIO 300 -#define aes_setkey ce_aes_setkey #define aes_expandkey ce_aes_expandkey #define aes_ecb_encrypt ce_aes_ecb_encrypt #define aes_ecb_decrypt ce_aes_ecb_decrypt @@ -31,6 +31,8 @@ #define aes_cbc_decrypt ce_aes_cbc_decrypt #define aes_cbc_cts_encrypt ce_aes_cbc_cts_encrypt #define aes_cbc_cts_decrypt ce_aes_cbc_cts_decrypt +#define aes_essiv_cbc_encrypt ce_aes_essiv_cbc_encrypt +#define aes_essiv_cbc_decrypt ce_aes_essiv_cbc_decrypt #define aes_ctr_encrypt ce_aes_ctr_encrypt #define aes_xts_encrypt ce_aes_xts_encrypt #define aes_xts_decrypt ce_aes_xts_decrypt @@ -39,27 +41,31 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions"); #else #define MODE "neon" #define PRIO 200 -#define aes_setkey crypto_aes_set_key -#define aes_expandkey crypto_aes_expand_key #define aes_ecb_encrypt neon_aes_ecb_encrypt #define aes_ecb_decrypt neon_aes_ecb_decrypt #define aes_cbc_encrypt neon_aes_cbc_encrypt #define aes_cbc_decrypt neon_aes_cbc_decrypt #define aes_cbc_cts_encrypt neon_aes_cbc_cts_encrypt #define aes_cbc_cts_decrypt neon_aes_cbc_cts_decrypt +#define aes_essiv_cbc_encrypt neon_aes_essiv_cbc_encrypt +#define aes_essiv_cbc_decrypt neon_aes_essiv_cbc_decrypt #define aes_ctr_encrypt neon_aes_ctr_encrypt #define aes_xts_encrypt neon_aes_xts_encrypt #define aes_xts_decrypt neon_aes_xts_decrypt #define aes_mac_update neon_aes_mac_update MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON"); +#endif +#if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS) MODULE_ALIAS_CRYPTO("ecb(aes)"); MODULE_ALIAS_CRYPTO("cbc(aes)"); MODULE_ALIAS_CRYPTO("ctr(aes)"); MODULE_ALIAS_CRYPTO("xts(aes)"); +#endif +MODULE_ALIAS_CRYPTO("cts(cbc(aes))"); +MODULE_ALIAS_CRYPTO("essiv(cbc(aes),sha256)"); MODULE_ALIAS_CRYPTO("cmac(aes)"); MODULE_ALIAS_CRYPTO("xcbc(aes)"); MODULE_ALIAS_CRYPTO("cbcmac(aes)"); -#endif MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); MODULE_LICENSE("GPL v2"); @@ -84,25 +90,32 @@ asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[], int rounds, int blocks, u8 ctr[]); asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[], - int rounds, int blocks, u32 const rk2[], u8 iv[], + int rounds, int bytes, u32 const rk2[], u8 iv[], int first); asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[], - int rounds, int blocks, u32 const rk2[], u8 iv[], + int rounds, int bytes, u32 const rk2[], u8 iv[], int first); +asmlinkage void aes_essiv_cbc_encrypt(u8 out[], u8 const in[], u32 const rk1[], + int rounds, int blocks, u8 iv[], + u32 const rk2[]); +asmlinkage void aes_essiv_cbc_decrypt(u8 out[], u8 const in[], u32 const rk1[], + int rounds, int blocks, u8 iv[], + u32 const rk2[]); + asmlinkage void aes_mac_update(u8 const in[], u32 const rk[], int rounds, int blocks, u8 dg[], int enc_before, int enc_after); -struct cts_cbc_req_ctx { - struct scatterlist sg_src[2]; - struct scatterlist sg_dst[2]; - struct skcipher_request subreq; +struct crypto_aes_xts_ctx { + struct crypto_aes_ctx key1; + struct crypto_aes_ctx __aligned(8) key2; }; -struct crypto_aes_xts_ctx { +struct crypto_aes_essiv_cbc_ctx { struct crypto_aes_ctx key1; struct crypto_aes_ctx __aligned(8) key2; + struct crypto_shash *hash; }; struct mac_tfm_ctx { @@ -118,11 +131,18 @@ struct mac_desc_ctx { static int skcipher_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { - return aes_setkey(crypto_skcipher_tfm(tfm), in_key, key_len); + struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + int ret; + + ret = aes_expandkey(ctx, in_key, key_len); + if (ret) + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + + return ret; } -static int xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key, - unsigned int key_len) +static int __maybe_unused xts_set_key(struct crypto_skcipher *tfm, + const u8 *in_key, unsigned int key_len) { struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); int ret; @@ -142,7 +162,33 @@ static int xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key, return -EINVAL; } -static int ecb_encrypt(struct skcipher_request *req) +static int __maybe_unused essiv_cbc_set_key(struct crypto_skcipher *tfm, + const u8 *in_key, + unsigned int key_len) +{ + struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); + SHASH_DESC_ON_STACK(desc, ctx->hash); + u8 digest[SHA256_DIGEST_SIZE]; + int ret; + + ret = aes_expandkey(&ctx->key1, in_key, key_len); + if (ret) + goto out; + + desc->tfm = ctx->hash; + crypto_shash_digest(desc, in_key, key_len, digest); + + ret = aes_expandkey(&ctx->key2, digest, sizeof(digest)); + if (ret) + goto out; + + return 0; +out: + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; +} + +static int __maybe_unused ecb_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); @@ -162,7 +208,7 @@ static int ecb_encrypt(struct skcipher_request *req) return err; } -static int ecb_decrypt(struct skcipher_request *req) +static int __maybe_unused ecb_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); @@ -182,63 +228,78 @@ static int ecb_decrypt(struct skcipher_request *req) return err; } -static int cbc_encrypt(struct skcipher_request *req) +static int cbc_encrypt_walk(struct skcipher_request *req, + struct skcipher_walk *walk) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); - int err, rounds = 6 + ctx->key_length / 4; - struct skcipher_walk walk; + int err = 0, rounds = 6 + ctx->key_length / 4; unsigned int blocks; - err = skcipher_walk_virt(&walk, req, false); - - while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { + while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) { kernel_neon_begin(); - aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_enc, rounds, blocks, walk.iv); + aes_cbc_encrypt(walk->dst.virt.addr, walk->src.virt.addr, + ctx->key_enc, rounds, blocks, walk->iv); kernel_neon_end(); - err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); + err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE); } return err; } -static int cbc_decrypt(struct skcipher_request *req) +static int __maybe_unused cbc_encrypt(struct skcipher_request *req) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); - int err, rounds = 6 + ctx->key_length / 4; struct skcipher_walk walk; - unsigned int blocks; + int err; err = skcipher_walk_virt(&walk, req, false); + if (err) + return err; + return cbc_encrypt_walk(req, &walk); +} - while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { +static int cbc_decrypt_walk(struct skcipher_request *req, + struct skcipher_walk *walk) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + int err = 0, rounds = 6 + ctx->key_length / 4; + unsigned int blocks; + + while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) { kernel_neon_begin(); - aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_dec, rounds, blocks, walk.iv); + aes_cbc_decrypt(walk->dst.virt.addr, walk->src.virt.addr, + ctx->key_dec, rounds, blocks, walk->iv); kernel_neon_end(); - err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); + err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE); } return err; } -static int cts_cbc_init_tfm(struct crypto_skcipher *tfm) +static int __maybe_unused cbc_decrypt(struct skcipher_request *req) { - crypto_skcipher_set_reqsize(tfm, sizeof(struct cts_cbc_req_ctx)); - return 0; + struct skcipher_walk walk; + int err; + + err = skcipher_walk_virt(&walk, req, false); + if (err) + return err; + return cbc_decrypt_walk(req, &walk); } static int cts_cbc_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); - struct cts_cbc_req_ctx *rctx = skcipher_request_ctx(req); int err, rounds = 6 + ctx->key_length / 4; int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2; struct scatterlist *src = req->src, *dst = req->dst; + struct scatterlist sg_src[2], sg_dst[2]; + struct skcipher_request subreq; struct skcipher_walk walk; - skcipher_request_set_tfm(&rctx->subreq, tfm); + skcipher_request_set_tfm(&subreq, tfm); + skcipher_request_set_callback(&subreq, skcipher_request_flags(req), + NULL, NULL); if (req->cryptlen <= AES_BLOCK_SIZE) { if (req->cryptlen < AES_BLOCK_SIZE) @@ -247,41 +308,30 @@ static int cts_cbc_encrypt(struct skcipher_request *req) } if (cbc_blocks > 0) { - unsigned int blocks; - - skcipher_request_set_crypt(&rctx->subreq, req->src, req->dst, + skcipher_request_set_crypt(&subreq, req->src, req->dst, cbc_blocks * AES_BLOCK_SIZE, req->iv); - err = skcipher_walk_virt(&walk, &rctx->subreq, false); - - while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { - kernel_neon_begin(); - aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_enc, rounds, blocks, walk.iv); - kernel_neon_end(); - err = skcipher_walk_done(&walk, - walk.nbytes % AES_BLOCK_SIZE); - } + err = skcipher_walk_virt(&walk, &subreq, false) ?: + cbc_encrypt_walk(&subreq, &walk); if (err) return err; if (req->cryptlen == AES_BLOCK_SIZE) return 0; - dst = src = scatterwalk_ffwd(rctx->sg_src, req->src, - rctx->subreq.cryptlen); + dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen); if (req->dst != req->src) - dst = scatterwalk_ffwd(rctx->sg_dst, req->dst, - rctx->subreq.cryptlen); + dst = scatterwalk_ffwd(sg_dst, req->dst, + subreq.cryptlen); } /* handle ciphertext stealing */ - skcipher_request_set_crypt(&rctx->subreq, src, dst, + skcipher_request_set_crypt(&subreq, src, dst, req->cryptlen - cbc_blocks * AES_BLOCK_SIZE, req->iv); - err = skcipher_walk_virt(&walk, &rctx->subreq, false); + err = skcipher_walk_virt(&walk, &subreq, false); if (err) return err; @@ -297,13 +347,16 @@ static int cts_cbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); - struct cts_cbc_req_ctx *rctx = skcipher_request_ctx(req); int err, rounds = 6 + ctx->key_length / 4; int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2; struct scatterlist *src = req->src, *dst = req->dst; + struct scatterlist sg_src[2], sg_dst[2]; + struct skcipher_request subreq; struct skcipher_walk walk; - skcipher_request_set_tfm(&rctx->subreq, tfm); + skcipher_request_set_tfm(&subreq, tfm); + skcipher_request_set_callback(&subreq, skcipher_request_flags(req), + NULL, NULL); if (req->cryptlen <= AES_BLOCK_SIZE) { if (req->cryptlen < AES_BLOCK_SIZE) @@ -312,41 +365,30 @@ static int cts_cbc_decrypt(struct skcipher_request *req) } if (cbc_blocks > 0) { - unsigned int blocks; - - skcipher_request_set_crypt(&rctx->subreq, req->src, req->dst, + skcipher_request_set_crypt(&subreq, req->src, req->dst, cbc_blocks * AES_BLOCK_SIZE, req->iv); - err = skcipher_walk_virt(&walk, &rctx->subreq, false); - - while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { - kernel_neon_begin(); - aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_dec, rounds, blocks, walk.iv); - kernel_neon_end(); - err = skcipher_walk_done(&walk, - walk.nbytes % AES_BLOCK_SIZE); - } + err = skcipher_walk_virt(&walk, &subreq, false) ?: + cbc_decrypt_walk(&subreq, &walk); if (err) return err; if (req->cryptlen == AES_BLOCK_SIZE) return 0; - dst = src = scatterwalk_ffwd(rctx->sg_src, req->src, - rctx->subreq.cryptlen); + dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen); if (req->dst != req->src) - dst = scatterwalk_ffwd(rctx->sg_dst, req->dst, - rctx->subreq.cryptlen); + dst = scatterwalk_ffwd(sg_dst, req->dst, + subreq.cryptlen); } /* handle ciphertext stealing */ - skcipher_request_set_crypt(&rctx->subreq, src, dst, + skcipher_request_set_crypt(&subreq, src, dst, req->cryptlen - cbc_blocks * AES_BLOCK_SIZE, req->iv); - err = skcipher_walk_virt(&walk, &rctx->subreq, false); + err = skcipher_walk_virt(&walk, &subreq, false); if (err) return err; @@ -358,6 +400,66 @@ static int cts_cbc_decrypt(struct skcipher_request *req) return skcipher_walk_done(&walk, 0); } +static int __maybe_unused essiv_cbc_init_tfm(struct crypto_skcipher *tfm) +{ + struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); + + ctx->hash = crypto_alloc_shash("sha256", 0, 0); + + return PTR_ERR_OR_ZERO(ctx->hash); +} + +static void __maybe_unused essiv_cbc_exit_tfm(struct crypto_skcipher *tfm) +{ + struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); + + crypto_free_shash(ctx->hash); +} + +static int __maybe_unused essiv_cbc_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); + int err, rounds = 6 + ctx->key1.key_length / 4; + struct skcipher_walk walk; + unsigned int blocks; + + err = skcipher_walk_virt(&walk, req, false); + + blocks = walk.nbytes / AES_BLOCK_SIZE; + if (blocks) { + kernel_neon_begin(); + aes_essiv_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key1.key_enc, rounds, blocks, + req->iv, ctx->key2.key_enc); + kernel_neon_end(); + err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); + } + return err ?: cbc_encrypt_walk(req, &walk); +} + +static int __maybe_unused essiv_cbc_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); + int err, rounds = 6 + ctx->key1.key_length / 4; + struct skcipher_walk walk; + unsigned int blocks; + + err = skcipher_walk_virt(&walk, req, false); + + blocks = walk.nbytes / AES_BLOCK_SIZE; + if (blocks) { + kernel_neon_begin(); + aes_essiv_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key1.key_dec, rounds, blocks, + req->iv, ctx->key2.key_enc); + kernel_neon_end(); + err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); + } + return err ?: cbc_decrypt_walk(req, &walk); +} + static int ctr_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); @@ -397,62 +499,176 @@ static int ctr_encrypt(struct skcipher_request *req) return err; } -static int ctr_encrypt_sync(struct skcipher_request *req) +static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + const struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + unsigned long flags; + + /* + * Temporarily disable interrupts to avoid races where + * cachelines are evicted when the CPU is interrupted + * to do something else. + */ + local_irq_save(flags); + aes_encrypt(ctx, dst, src); + local_irq_restore(flags); +} +static int __maybe_unused ctr_encrypt_sync(struct skcipher_request *req) +{ if (!crypto_simd_usable()) - return aes_ctr_encrypt_fallback(ctx, req); + return crypto_ctr_encrypt_walk(req, ctr_encrypt_one); return ctr_encrypt(req); } -static int xts_encrypt(struct skcipher_request *req) +static int __maybe_unused xts_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); int err, first, rounds = 6 + ctx->key1.key_length / 4; + int tail = req->cryptlen % AES_BLOCK_SIZE; + struct scatterlist sg_src[2], sg_dst[2]; + struct skcipher_request subreq; + struct scatterlist *src, *dst; struct skcipher_walk walk; - unsigned int blocks; + + if (req->cryptlen < AES_BLOCK_SIZE) + return -EINVAL; err = skcipher_walk_virt(&walk, req, false); - for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { + if (unlikely(tail > 0 && walk.nbytes < walk.total)) { + int xts_blocks = DIV_ROUND_UP(req->cryptlen, + AES_BLOCK_SIZE) - 2; + + skcipher_walk_abort(&walk); + + skcipher_request_set_tfm(&subreq, tfm); + skcipher_request_set_callback(&subreq, + skcipher_request_flags(req), + NULL, NULL); + skcipher_request_set_crypt(&subreq, req->src, req->dst, + xts_blocks * AES_BLOCK_SIZE, + req->iv); + req = &subreq; + err = skcipher_walk_virt(&walk, req, false); + } else { + tail = 0; + } + + for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) { + int nbytes = walk.nbytes; + + if (walk.nbytes < walk.total) + nbytes &= ~(AES_BLOCK_SIZE - 1); + kernel_neon_begin(); aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key1.key_enc, rounds, blocks, + ctx->key1.key_enc, rounds, nbytes, ctx->key2.key_enc, walk.iv, first); kernel_neon_end(); - err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } - return err; + if (err || likely(!tail)) + return err; + + dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen); + if (req->dst != req->src) + dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen); + + skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail, + req->iv); + + err = skcipher_walk_virt(&walk, &subreq, false); + if (err) + return err; + + kernel_neon_begin(); + aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key1.key_enc, rounds, walk.nbytes, + ctx->key2.key_enc, walk.iv, first); + kernel_neon_end(); + + return skcipher_walk_done(&walk, 0); } -static int xts_decrypt(struct skcipher_request *req) +static int __maybe_unused xts_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); int err, first, rounds = 6 + ctx->key1.key_length / 4; + int tail = req->cryptlen % AES_BLOCK_SIZE; + struct scatterlist sg_src[2], sg_dst[2]; + struct skcipher_request subreq; + struct scatterlist *src, *dst; struct skcipher_walk walk; - unsigned int blocks; + + if (req->cryptlen < AES_BLOCK_SIZE) + return -EINVAL; err = skcipher_walk_virt(&walk, req, false); - for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { + if (unlikely(tail > 0 && walk.nbytes < walk.total)) { + int xts_blocks = DIV_ROUND_UP(req->cryptlen, + AES_BLOCK_SIZE) - 2; + + skcipher_walk_abort(&walk); + + skcipher_request_set_tfm(&subreq, tfm); + skcipher_request_set_callback(&subreq, + skcipher_request_flags(req), + NULL, NULL); + skcipher_request_set_crypt(&subreq, req->src, req->dst, + xts_blocks * AES_BLOCK_SIZE, + req->iv); + req = &subreq; + err = skcipher_walk_virt(&walk, req, false); + } else { + tail = 0; + } + + for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) { + int nbytes = walk.nbytes; + + if (walk.nbytes < walk.total) + nbytes &= ~(AES_BLOCK_SIZE - 1); + kernel_neon_begin(); aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key1.key_dec, rounds, blocks, + ctx->key1.key_dec, rounds, nbytes, ctx->key2.key_enc, walk.iv, first); kernel_neon_end(); - err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } - return err; + if (err || likely(!tail)) + return err; + + dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen); + if (req->dst != req->src) + dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen); + + skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail, + req->iv); + + err = skcipher_walk_virt(&walk, &subreq, false); + if (err) + return err; + + + kernel_neon_begin(); + aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key1.key_dec, rounds, walk.nbytes, + ctx->key2.key_enc, walk.iv, first); + kernel_neon_end(); + + return skcipher_walk_done(&walk, 0); } static struct skcipher_alg aes_algs[] = { { +#if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS) .base = { .cra_name = "__ecb(aes)", .cra_driver_name = "__ecb-aes-" MODE, @@ -485,24 +701,6 @@ static struct skcipher_alg aes_algs[] = { { .decrypt = cbc_decrypt, }, { .base = { - .cra_name = "__cts(cbc(aes))", - .cra_driver_name = "__cts-cbc-aes-" MODE, - .cra_priority = PRIO, - .cra_flags = CRYPTO_ALG_INTERNAL, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct crypto_aes_ctx), - .cra_module = THIS_MODULE, - }, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .walksize = 2 * AES_BLOCK_SIZE, - .setkey = skcipher_aes_setkey, - .encrypt = cts_cbc_encrypt, - .decrypt = cts_cbc_decrypt, - .init = cts_cbc_init_tfm, -}, { - .base = { .cra_name = "__ctr(aes)", .cra_driver_name = "__ctr-aes-" MODE, .cra_priority = PRIO, @@ -547,9 +745,46 @@ static struct skcipher_alg aes_algs[] = { { .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, + .walksize = 2 * AES_BLOCK_SIZE, .setkey = xts_set_key, .encrypt = xts_encrypt, .decrypt = xts_decrypt, +}, { +#endif + .base = { + .cra_name = "__cts(cbc(aes))", + .cra_driver_name = "__cts-cbc-aes-" MODE, + .cra_priority = PRIO, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto_aes_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .walksize = 2 * AES_BLOCK_SIZE, + .setkey = skcipher_aes_setkey, + .encrypt = cts_cbc_encrypt, + .decrypt = cts_cbc_decrypt, +}, { + .base = { + .cra_name = "__essiv(cbc(aes),sha256)", + .cra_driver_name = "__essiv-cbc-aes-sha256-" MODE, + .cra_priority = PRIO + 1, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto_aes_essiv_cbc_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = essiv_cbc_set_key, + .encrypt = essiv_cbc_encrypt, + .decrypt = essiv_cbc_decrypt, + .init = essiv_cbc_init_tfm, + .exit = essiv_cbc_exit_tfm, } }; static int cbcmac_setkey(struct crypto_shash *tfm, const u8 *in_key, @@ -646,15 +881,14 @@ static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks, kernel_neon_end(); } else { if (enc_before) - __aes_arm64_encrypt(ctx->key_enc, dg, dg, rounds); + aes_encrypt(ctx, dg, dg); while (blocks--) { crypto_xor(dg, in, AES_BLOCK_SIZE); in += AES_BLOCK_SIZE; if (blocks || enc_after) - __aes_arm64_encrypt(ctx->key_enc, dg, dg, - rounds); + aes_encrypt(ctx, dg, dg); } } } @@ -837,5 +1071,7 @@ module_cpu_feature_match(AES, aes_init); module_init(aes_init); EXPORT_SYMBOL(neon_aes_ecb_encrypt); EXPORT_SYMBOL(neon_aes_cbc_encrypt); +EXPORT_SYMBOL(neon_aes_xts_encrypt); +EXPORT_SYMBOL(neon_aes_xts_decrypt); #endif module_exit(aes_exit); |