diff options
Diffstat (limited to 'arch/sparc/crypto')
-rw-r--r-- | arch/sparc/crypto/aes_glue.c | 310 | ||||
-rw-r--r-- | arch/sparc/crypto/camellia_glue.c | 217 | ||||
-rw-r--r-- | arch/sparc/crypto/des_glue.c | 499 |
3 files changed, 465 insertions, 561 deletions
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c index 7b946b3dee9d..0f5a501c95a9 100644 --- a/arch/sparc/crypto/aes_glue.c +++ b/arch/sparc/crypto/aes_glue.c @@ -24,6 +24,7 @@ #include <linux/types.h> #include <crypto/algapi.h> #include <crypto/aes.h> +#include <crypto/internal/skcipher.h> #include <asm/fpumacro.h> #include <asm/pstate.h> @@ -197,6 +198,12 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, return 0; } +static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) +{ + return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len); +} + static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm); @@ -211,131 +218,108 @@ static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ctx->ops->decrypt(&ctx->key[0], (const u32 *) src, (u32 *) dst); } -#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1)) - -static int ecb_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int ecb_encrypt(struct skcipher_request *req) { - struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct crypto_sparc64_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + err = skcipher_walk_virt(&walk, req, true); + if (err) + return err; ctx->ops->load_encrypt_keys(&ctx->key[0]); - while ((nbytes = walk.nbytes)) { - unsigned int block_len = nbytes & AES_BLOCK_MASK; - - if (likely(block_len)) { - ctx->ops->ecb_encrypt(&ctx->key[0], - (const u64 *)walk.src.virt.addr, - (u64 *) walk.dst.virt.addr, - block_len); - } - nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); + while ((nbytes = walk.nbytes) != 0) { + ctx->ops->ecb_encrypt(&ctx->key[0], walk.src.virt.addr, + walk.dst.virt.addr, + round_down(nbytes, AES_BLOCK_SIZE)); + err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE); } fprs_write(0); return err; } -static int ecb_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int ecb_decrypt(struct skcipher_request *req) { - struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - u64 *key_end; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct crypto_sparc64_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + const u64 *key_end; + struct skcipher_walk walk; + unsigned int nbytes; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + err = skcipher_walk_virt(&walk, req, true); + if (err) + return err; ctx->ops->load_decrypt_keys(&ctx->key[0]); key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)]; - while ((nbytes = walk.nbytes)) { - unsigned int block_len = nbytes & AES_BLOCK_MASK; - - if (likely(block_len)) { - ctx->ops->ecb_decrypt(key_end, - (const u64 *) walk.src.virt.addr, - (u64 *) walk.dst.virt.addr, block_len); - } - nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); + while ((nbytes = walk.nbytes) != 0) { + ctx->ops->ecb_decrypt(key_end, walk.src.virt.addr, + walk.dst.virt.addr, + round_down(nbytes, AES_BLOCK_SIZE)); + err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE); } fprs_write(0); return err; } -static int cbc_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int cbc_encrypt(struct skcipher_request *req) { - struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct crypto_sparc64_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + err = skcipher_walk_virt(&walk, req, true); + if (err) + return err; ctx->ops->load_encrypt_keys(&ctx->key[0]); - while ((nbytes = walk.nbytes)) { - unsigned int block_len = nbytes & AES_BLOCK_MASK; - - if (likely(block_len)) { - ctx->ops->cbc_encrypt(&ctx->key[0], - (const u64 *)walk.src.virt.addr, - (u64 *) walk.dst.virt.addr, - block_len, (u64 *) walk.iv); - } - nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); + while ((nbytes = walk.nbytes) != 0) { + ctx->ops->cbc_encrypt(&ctx->key[0], walk.src.virt.addr, + walk.dst.virt.addr, + round_down(nbytes, AES_BLOCK_SIZE), + walk.iv); + err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE); } fprs_write(0); return err; } -static int cbc_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int cbc_decrypt(struct skcipher_request *req) { - struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - u64 *key_end; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct crypto_sparc64_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + const u64 *key_end; + struct skcipher_walk walk; + unsigned int nbytes; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + err = skcipher_walk_virt(&walk, req, true); + if (err) + return err; ctx->ops->load_decrypt_keys(&ctx->key[0]); key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)]; - while ((nbytes = walk.nbytes)) { - unsigned int block_len = nbytes & AES_BLOCK_MASK; - - if (likely(block_len)) { - ctx->ops->cbc_decrypt(key_end, - (const u64 *) walk.src.virt.addr, - (u64 *) walk.dst.virt.addr, - block_len, (u64 *) walk.iv); - } - nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); + while ((nbytes = walk.nbytes) != 0) { + ctx->ops->cbc_decrypt(key_end, walk.src.virt.addr, + walk.dst.virt.addr, + round_down(nbytes, AES_BLOCK_SIZE), + walk.iv); + err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE); } fprs_write(0); return err; } -static void ctr_crypt_final(struct crypto_sparc64_aes_ctx *ctx, - struct blkcipher_walk *walk) +static void ctr_crypt_final(const struct crypto_sparc64_aes_ctx *ctx, + struct skcipher_walk *walk) { u8 *ctrblk = walk->iv; u64 keystream[AES_BLOCK_SIZE / sizeof(u64)]; @@ -349,40 +333,35 @@ static void ctr_crypt_final(struct crypto_sparc64_aes_ctx *ctx, crypto_inc(ctrblk, AES_BLOCK_SIZE); } -static int ctr_crypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int ctr_crypt(struct skcipher_request *req) { - struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct crypto_sparc64_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + err = skcipher_walk_virt(&walk, req, true); + if (err) + return err; ctx->ops->load_encrypt_keys(&ctx->key[0]); while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { - unsigned int block_len = nbytes & AES_BLOCK_MASK; - - if (likely(block_len)) { - ctx->ops->ctr_crypt(&ctx->key[0], - (const u64 *)walk.src.virt.addr, - (u64 *) walk.dst.virt.addr, - block_len, (u64 *) walk.iv); - } - nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); + ctx->ops->ctr_crypt(&ctx->key[0], walk.src.virt.addr, + walk.dst.virt.addr, + round_down(nbytes, AES_BLOCK_SIZE), + walk.iv); + err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE); } if (walk.nbytes) { ctr_crypt_final(ctx, &walk); - err = blkcipher_walk_done(desc, &walk, 0); + err = skcipher_walk_done(&walk, 0); } fprs_write(0); return err; } -static struct crypto_alg algs[] = { { +static struct crypto_alg cipher_alg = { .cra_name = "aes", .cra_driver_name = "aes-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, @@ -400,66 +379,53 @@ static struct crypto_alg algs[] = { { .cia_decrypt = crypto_aes_decrypt } } -}, { - .cra_name = "ecb(aes)", - .cra_driver_name = "ecb-aes-sparc64", - .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx), - .cra_alignmask = 7, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = aes_set_key, - .encrypt = ecb_encrypt, - .decrypt = ecb_decrypt, - }, - }, -}, { - .cra_name = "cbc(aes)", - .cra_driver_name = "cbc-aes-sparc64", - .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx), - .cra_alignmask = 7, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = aes_set_key, - .encrypt = cbc_encrypt, - .decrypt = cbc_decrypt, - }, - }, -}, { - .cra_name = "ctr(aes)", - .cra_driver_name = "ctr-aes-sparc64", - .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx), - .cra_alignmask = 7, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = aes_set_key, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, - }, - }, -} }; +}; + +static struct skcipher_alg skcipher_algs[] = { + { + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-aes-sparc64", + .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx), + .base.cra_alignmask = 7, + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_set_key_skcipher, + .encrypt = ecb_encrypt, + .decrypt = ecb_decrypt, + }, { + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-aes-sparc64", + .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx), + .base.cra_alignmask = 7, + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = aes_set_key_skcipher, + .encrypt = cbc_encrypt, + .decrypt = cbc_decrypt, + }, { + .base.cra_name = "ctr(aes)", + .base.cra_driver_name = "ctr-aes-sparc64", + .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx), + .base.cra_alignmask = 7, + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = aes_set_key_skcipher, + .encrypt = ctr_crypt, + .decrypt = ctr_crypt, + .chunksize = AES_BLOCK_SIZE, + } +}; static bool __init sparc64_has_aes_opcode(void) { @@ -477,17 +443,27 @@ static bool __init sparc64_has_aes_opcode(void) static int __init aes_sparc64_mod_init(void) { - if (sparc64_has_aes_opcode()) { - pr_info("Using sparc64 aes opcodes optimized AES implementation\n"); - return crypto_register_algs(algs, ARRAY_SIZE(algs)); + int err; + + if (!sparc64_has_aes_opcode()) { + pr_info("sparc64 aes opcodes not available.\n"); + return -ENODEV; } - pr_info("sparc64 aes opcodes not available.\n"); - return -ENODEV; + pr_info("Using sparc64 aes opcodes optimized AES implementation\n"); + err = crypto_register_alg(&cipher_alg); + if (err) + return err; + err = crypto_register_skciphers(skcipher_algs, + ARRAY_SIZE(skcipher_algs)); + if (err) + crypto_unregister_alg(&cipher_alg); + return err; } static void __exit aes_sparc64_mod_fini(void) { - crypto_unregister_algs(algs, ARRAY_SIZE(algs)); + crypto_unregister_alg(&cipher_alg); + crypto_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); } module_init(aes_sparc64_mod_init); diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c index 3823f9491a72..1700f863748c 100644 --- a/arch/sparc/crypto/camellia_glue.c +++ b/arch/sparc/crypto/camellia_glue.c @@ -12,6 +12,7 @@ #include <linux/mm.h> #include <linux/types.h> #include <crypto/algapi.h> +#include <crypto/internal/skcipher.h> #include <asm/fpumacro.h> #include <asm/pstate.h> @@ -52,6 +53,12 @@ static int camellia_set_key(struct crypto_tfm *tfm, const u8 *_in_key, return 0; } +static int camellia_set_key_skcipher(struct crypto_skcipher *tfm, + const u8 *in_key, unsigned int key_len) +{ + return camellia_set_key(crypto_skcipher_tfm(tfm), in_key, key_len); +} + extern void camellia_sparc64_crypt(const u64 *key, const u32 *input, u32 *output, unsigned int key_len); @@ -81,61 +88,46 @@ typedef void ecb_crypt_op(const u64 *input, u64 *output, unsigned int len, extern ecb_crypt_op camellia_sparc64_ecb_crypt_3_grand_rounds; extern ecb_crypt_op camellia_sparc64_ecb_crypt_4_grand_rounds; -#define CAMELLIA_BLOCK_MASK (~(CAMELLIA_BLOCK_SIZE - 1)) - -static int __ecb_crypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes, bool encrypt) +static int __ecb_crypt(struct skcipher_request *req, bool encrypt) { - struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct camellia_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; ecb_crypt_op *op; const u64 *key; + unsigned int nbytes; int err; op = camellia_sparc64_ecb_crypt_3_grand_rounds; if (ctx->key_len != 16) op = camellia_sparc64_ecb_crypt_4_grand_rounds; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + err = skcipher_walk_virt(&walk, req, true); + if (err) + return err; if (encrypt) key = &ctx->encrypt_key[0]; else key = &ctx->decrypt_key[0]; camellia_sparc64_load_keys(key, ctx->key_len); - while ((nbytes = walk.nbytes)) { - unsigned int block_len = nbytes & CAMELLIA_BLOCK_MASK; - - if (likely(block_len)) { - const u64 *src64; - u64 *dst64; - - src64 = (const u64 *)walk.src.virt.addr; - dst64 = (u64 *) walk.dst.virt.addr; - op(src64, dst64, block_len, key); - } - nbytes &= CAMELLIA_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); + while ((nbytes = walk.nbytes) != 0) { + op(walk.src.virt.addr, walk.dst.virt.addr, + round_down(nbytes, CAMELLIA_BLOCK_SIZE), key); + err = skcipher_walk_done(&walk, nbytes % CAMELLIA_BLOCK_SIZE); } fprs_write(0); return err; } -static int ecb_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int ecb_encrypt(struct skcipher_request *req) { - return __ecb_crypt(desc, dst, src, nbytes, true); + return __ecb_crypt(req, true); } -static int ecb_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int ecb_decrypt(struct skcipher_request *req) { - return __ecb_crypt(desc, dst, src, nbytes, false); + return __ecb_crypt(req, false); } typedef void cbc_crypt_op(const u64 *input, u64 *output, unsigned int len, @@ -146,85 +138,65 @@ extern cbc_crypt_op camellia_sparc64_cbc_encrypt_4_grand_rounds; extern cbc_crypt_op camellia_sparc64_cbc_decrypt_3_grand_rounds; extern cbc_crypt_op camellia_sparc64_cbc_decrypt_4_grand_rounds; -static int cbc_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int cbc_encrypt(struct skcipher_request *req) { - struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct camellia_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; cbc_crypt_op *op; const u64 *key; + unsigned int nbytes; int err; op = camellia_sparc64_cbc_encrypt_3_grand_rounds; if (ctx->key_len != 16) op = camellia_sparc64_cbc_encrypt_4_grand_rounds; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + err = skcipher_walk_virt(&walk, req, true); + if (err) + return err; key = &ctx->encrypt_key[0]; camellia_sparc64_load_keys(key, ctx->key_len); - while ((nbytes = walk.nbytes)) { - unsigned int block_len = nbytes & CAMELLIA_BLOCK_MASK; - - if (likely(block_len)) { - const u64 *src64; - u64 *dst64; - - src64 = (const u64 *)walk.src.virt.addr; - dst64 = (u64 *) walk.dst.virt.addr; - op(src64, dst64, block_len, key, - (u64 *) walk.iv); - } - nbytes &= CAMELLIA_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); + while ((nbytes = walk.nbytes) != 0) { + op(walk.src.virt.addr, walk.dst.virt.addr, + round_down(nbytes, CAMELLIA_BLOCK_SIZE), key, walk.iv); + err = skcipher_walk_done(&walk, nbytes % CAMELLIA_BLOCK_SIZE); } fprs_write(0); return err; } -static int cbc_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int cbc_decrypt(struct skcipher_request *req) { - struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct camellia_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; cbc_crypt_op *op; const u64 *key; + unsigned int nbytes; int err; op = camellia_sparc64_cbc_decrypt_3_grand_rounds; if (ctx->key_len != 16) op = camellia_sparc64_cbc_decrypt_4_grand_rounds; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + err = skcipher_walk_virt(&walk, req, true); + if (err) + return err; key = &ctx->decrypt_key[0]; camellia_sparc64_load_keys(key, ctx->key_len); - while ((nbytes = walk.nbytes)) { - unsigned int block_len = nbytes & CAMELLIA_BLOCK_MASK; - - if (likely(block_len)) { - const u64 *src64; - u64 *dst64; - - src64 = (const u64 *)walk.src.virt.addr; - dst64 = (u64 *) walk.dst.virt.addr; - op(src64, dst64, block_len, key, - (u64 *) walk.iv); - } - nbytes &= CAMELLIA_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); + while ((nbytes = walk.nbytes) != 0) { + op(walk.src.virt.addr, walk.dst.virt.addr, + round_down(nbytes, CAMELLIA_BLOCK_SIZE), key, walk.iv); + err = skcipher_walk_done(&walk, nbytes % CAMELLIA_BLOCK_SIZE); } fprs_write(0); return err; } -static struct crypto_alg algs[] = { { +static struct crypto_alg cipher_alg = { .cra_name = "camellia", .cra_driver_name = "camellia-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, @@ -242,46 +214,37 @@ static struct crypto_alg algs[] = { { .cia_decrypt = camellia_decrypt } } -}, { - .cra_name = "ecb(camellia)", - .cra_driver_name = "ecb-camellia-sparc64", - .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = CAMELLIA_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct camellia_sparc64_ctx), - .cra_alignmask = 7, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = CAMELLIA_MIN_KEY_SIZE, - .max_keysize = CAMELLIA_MAX_KEY_SIZE, - .setkey = camellia_set_key, - .encrypt = ecb_encrypt, - .decrypt = ecb_decrypt, - }, - }, -}, { - .cra_name = "cbc(camellia)", - .cra_driver_name = "cbc-camellia-sparc64", - .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = CAMELLIA_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct camellia_sparc64_ctx), - .cra_alignmask = 7, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = CAMELLIA_MIN_KEY_SIZE, - .max_keysize = CAMELLIA_MAX_KEY_SIZE, - .ivsize = CAMELLIA_BLOCK_SIZE, - .setkey = camellia_set_key, - .encrypt = cbc_encrypt, - .decrypt = cbc_decrypt, - }, - }, -} +}; + +static struct skcipher_alg skcipher_algs[] = { + { + .base.cra_name = "ecb(camellia)", + .base.cra_driver_name = "ecb-camellia-sparc64", + .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, + .base.cra_blocksize = CAMELLIA_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct camellia_sparc64_ctx), + .base.cra_alignmask = 7, + .base.cra_module = THIS_MODULE, + .min_keysize = CAMELLIA_MIN_KEY_SIZE, + .max_keysize = CAMELLIA_MAX_KEY_SIZE, + .setkey = camellia_set_key_skcipher, + .encrypt = ecb_encrypt, + .decrypt = ecb_decrypt, + }, { + .base.cra_name = "cbc(camellia)", + .base.cra_driver_name = "cbc-camellia-sparc64", + .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, + .base.cra_blocksize = CAMELLIA_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct camellia_sparc64_ctx), + .base.cra_alignmask = 7, + .base.cra_module = THIS_MODULE, + .min_keysize = CAMELLIA_MIN_KEY_SIZE, + .max_keysize = CAMELLIA_MAX_KEY_SIZE, + .ivsize = CAMELLIA_BLOCK_SIZE, + .setkey = camellia_set_key_skcipher, + .encrypt = cbc_encrypt, + .decrypt = cbc_decrypt, + } }; static bool __init sparc64_has_camellia_opcode(void) @@ -300,17 +263,27 @@ static bool __init sparc64_has_camellia_opcode(void) static int __init camellia_sparc64_mod_init(void) { - if (sparc64_has_camellia_opcode()) { - pr_info("Using sparc64 camellia opcodes optimized CAMELLIA implementation\n"); - return crypto_register_algs(algs, ARRAY_SIZE(algs)); + int err; + + if (!sparc64_has_camellia_opcode()) { + pr_info("sparc64 camellia opcodes not available.\n"); + return -ENODEV; } - pr_info("sparc64 camellia opcodes not available.\n"); - return -ENODEV; + pr_info("Using sparc64 camellia opcodes optimized CAMELLIA implementation\n"); + err = crypto_register_alg(&cipher_alg); + if (err) + return err; + err = crypto_register_skciphers(skcipher_algs, + ARRAY_SIZE(skcipher_algs)); + if (err) + crypto_unregister_alg(&cipher_alg); + return err; } static void __exit camellia_sparc64_mod_fini(void) { - crypto_unregister_algs(algs, ARRAY_SIZE(algs)); + crypto_unregister_alg(&cipher_alg); + crypto_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); } module_init(camellia_sparc64_mod_init); diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c index db6010b4e52e..a499102bf706 100644 --- a/arch/sparc/crypto/des_glue.c +++ b/arch/sparc/crypto/des_glue.c @@ -13,6 +13,7 @@ #include <linux/types.h> #include <crypto/algapi.h> #include <crypto/internal/des.h> +#include <crypto/internal/skcipher.h> #include <asm/fpumacro.h> #include <asm/pstate.h> @@ -61,6 +62,12 @@ static int des_set_key(struct crypto_tfm *tfm, const u8 *key, return 0; } +static int des_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + return des_set_key(crypto_skcipher_tfm(tfm), key, keylen); +} + extern void des_sparc64_crypt(const u64 *key, const u64 *input, u64 *output); @@ -85,113 +92,90 @@ extern void des_sparc64_load_keys(const u64 *key); extern void des_sparc64_ecb_crypt(const u64 *input, u64 *output, unsigned int len); -#define DES_BLOCK_MASK (~(DES_BLOCK_SIZE - 1)) - -static int __ecb_crypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes, bool encrypt) +static int __ecb_crypt(struct skcipher_request *req, bool encrypt) { - struct des_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct des_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + err = skcipher_walk_virt(&walk, req, true); + if (err) + return err; if (encrypt) des_sparc64_load_keys(&ctx->encrypt_expkey[0]); else des_sparc64_load_keys(&ctx->decrypt_expkey[0]); - while ((nbytes = walk.nbytes)) { - unsigned int block_len = nbytes & DES_BLOCK_MASK; - - if (likely(block_len)) { - des_sparc64_ecb_crypt((const u64 *)walk.src.virt.addr, - (u64 *) walk.dst.virt.addr, - block_len); - } - nbytes &= DES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); + while ((nbytes = walk.nbytes) != 0) { + des_sparc64_ecb_crypt(walk.src.virt.addr, walk.dst.virt.addr, + round_down(nbytes, DES_BLOCK_SIZE)); + err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE); } fprs_write(0); return err; } -static int ecb_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int ecb_encrypt(struct skcipher_request *req) { - return __ecb_crypt(desc, dst, src, nbytes, true); + return __ecb_crypt(req, true); } -static int ecb_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int ecb_decrypt(struct skcipher_request *req) { - return __ecb_crypt(desc, dst, src, nbytes, false); + return __ecb_crypt(req, false); } extern void des_sparc64_cbc_encrypt(const u64 *input, u64 *output, unsigned int len, u64 *iv); -static int cbc_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +extern void des_sparc64_cbc_decrypt(const u64 *input, u64 *output, + unsigned int len, u64 *iv); + +static int __cbc_crypt(struct skcipher_request *req, bool encrypt) { - struct des_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct des_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - - des_sparc64_load_keys(&ctx->encrypt_expkey[0]); - while ((nbytes = walk.nbytes)) { - unsigned int block_len = nbytes & DES_BLOCK_MASK; + err = skcipher_walk_virt(&walk, req, true); + if (err) + return err; - if (likely(block_len)) { - des_sparc64_cbc_encrypt((const u64 *)walk.src.virt.addr, - (u64 *) walk.dst.virt.addr, - block_len, (u64 *) walk.iv); - } - nbytes &= DES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); + if (encrypt) + des_sparc64_load_keys(&ctx->encrypt_expkey[0]); + else + des_sparc64_load_keys(&ctx->decrypt_expkey[0]); + while ((nbytes = walk.nbytes) != 0) { + if (encrypt) + des_sparc64_cbc_encrypt(walk.src.virt.addr, + walk.dst.virt.addr, + round_down(nbytes, + DES_BLOCK_SIZE), + walk.iv); + else + des_sparc64_cbc_decrypt(walk.src.virt.addr, + walk.dst.virt.addr, + round_down(nbytes, + DES_BLOCK_SIZE), + walk.iv); + err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE); } fprs_write(0); return err; } -extern void des_sparc64_cbc_decrypt(const u64 *input, u64 *output, - unsigned int len, u64 *iv); - -static int cbc_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int cbc_encrypt(struct skcipher_request *req) { - struct des_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - int err; - - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - - des_sparc64_load_keys(&ctx->decrypt_expkey[0]); - while ((nbytes = walk.nbytes)) { - unsigned int block_len = nbytes & DES_BLOCK_MASK; + return __cbc_crypt(req, true); +} - if (likely(block_len)) { - des_sparc64_cbc_decrypt((const u64 *)walk.src.virt.addr, - (u64 *) walk.dst.virt.addr, - block_len, (u64 *) walk.iv); - } - nbytes &= DES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); - } - fprs_write(0); - return err; +static int cbc_decrypt(struct skcipher_request *req) +{ + return __cbc_crypt(req, false); } static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key, @@ -227,6 +211,12 @@ static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key, return 0; } +static int des3_ede_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + return des3_ede_set_key(crypto_skcipher_tfm(tfm), key, keylen); +} + extern void des3_ede_sparc64_crypt(const u64 *key, const u64 *input, u64 *output); @@ -251,241 +241,196 @@ extern void des3_ede_sparc64_load_keys(const u64 *key); extern void des3_ede_sparc64_ecb_crypt(const u64 *expkey, const u64 *input, u64 *output, unsigned int len); -static int __ecb3_crypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes, bool encrypt) +static int __ecb3_crypt(struct skcipher_request *req, bool encrypt) { - struct des3_ede_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct des3_ede_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; const u64 *K; + unsigned int nbytes; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + err = skcipher_walk_virt(&walk, req, true); + if (err) + return err; if (encrypt) K = &ctx->encrypt_expkey[0]; else K = &ctx->decrypt_expkey[0]; des3_ede_sparc64_load_keys(K); - while ((nbytes = walk.nbytes)) { - unsigned int block_len = nbytes & DES_BLOCK_MASK; - - if (likely(block_len)) { - const u64 *src64 = (const u64 *)walk.src.virt.addr; - des3_ede_sparc64_ecb_crypt(K, src64, - (u64 *) walk.dst.virt.addr, - block_len); - } - nbytes &= DES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); + while ((nbytes = walk.nbytes) != 0) { + des3_ede_sparc64_ecb_crypt(K, walk.src.virt.addr, + walk.dst.virt.addr, + round_down(nbytes, DES_BLOCK_SIZE)); + err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE); } fprs_write(0); return err; } -static int ecb3_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int ecb3_encrypt(struct skcipher_request *req) { - return __ecb3_crypt(desc, dst, src, nbytes, true); + return __ecb3_crypt(req, true); } -static int ecb3_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int ecb3_decrypt(struct skcipher_request *req) { - return __ecb3_crypt(desc, dst, src, nbytes, false); + return __ecb3_crypt(req, false); } extern void des3_ede_sparc64_cbc_encrypt(const u64 *expkey, const u64 *input, u64 *output, unsigned int len, u64 *iv); -static int cbc3_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) -{ - struct des3_ede_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - const u64 *K; - int err; - - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - - K = &ctx->encrypt_expkey[0]; - des3_ede_sparc64_load_keys(K); - while ((nbytes = walk.nbytes)) { - unsigned int block_len = nbytes & DES_BLOCK_MASK; - - if (likely(block_len)) { - const u64 *src64 = (const u64 *)walk.src.virt.addr; - des3_ede_sparc64_cbc_encrypt(K, src64, - (u64 *) walk.dst.virt.addr, - block_len, - (u64 *) walk.iv); - } - nbytes &= DES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); - } - fprs_write(0); - return err; -} - extern void des3_ede_sparc64_cbc_decrypt(const u64 *expkey, const u64 *input, u64 *output, unsigned int len, u64 *iv); -static int cbc3_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int __cbc3_crypt(struct skcipher_request *req, bool encrypt) { - struct des3_ede_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct des3_ede_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; const u64 *K; + unsigned int nbytes; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + err = skcipher_walk_virt(&walk, req, true); + if (err) + return err; - K = &ctx->decrypt_expkey[0]; + if (encrypt) + K = &ctx->encrypt_expkey[0]; + else + K = &ctx->decrypt_expkey[0]; des3_ede_sparc64_load_keys(K); - while ((nbytes = walk.nbytes)) { - unsigned int block_len = nbytes & DES_BLOCK_MASK; - - if (likely(block_len)) { - const u64 *src64 = (const u64 *)walk.src.virt.addr; - des3_ede_sparc64_cbc_decrypt(K, src64, - (u64 *) walk.dst.virt.addr, - block_len, - (u64 *) walk.iv); - } - nbytes &= DES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); + while ((nbytes = walk.nbytes) != 0) { + if (encrypt) + des3_ede_sparc64_cbc_encrypt(K, walk.src.virt.addr, + walk.dst.virt.addr, + round_down(nbytes, + DES_BLOCK_SIZE), + walk.iv); + else + des3_ede_sparc64_cbc_decrypt(K, walk.src.virt.addr, + walk.dst.virt.addr, + round_down(nbytes, + DES_BLOCK_SIZE), + walk.iv); + err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE); } fprs_write(0); return err; } -static struct crypto_alg algs[] = { { - .cra_name = "des", - .cra_driver_name = "des-sparc64", - .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, - .cra_blocksize = DES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct des_sparc64_ctx), - .cra_alignmask = 7, - .cra_module = THIS_MODULE, - .cra_u = { - .cipher = { - .cia_min_keysize = DES_KEY_SIZE, - .cia_max_keysize = DES_KEY_SIZE, - .cia_setkey = des_set_key, - .cia_encrypt = sparc_des_encrypt, - .cia_decrypt = sparc_des_decrypt +static int cbc3_encrypt(struct skcipher_request *req) +{ + return __cbc3_crypt(req, true); +} + +static int cbc3_decrypt(struct skcipher_request *req) +{ + return __cbc3_crypt(req, false); +} + +static struct crypto_alg cipher_algs[] = { + { + .cra_name = "des", + .cra_driver_name = "des-sparc64", + .cra_priority = SPARC_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct des_sparc64_ctx), + .cra_alignmask = 7, + .cra_module = THIS_MODULE, + .cra_u = { + .cipher = { + .cia_min_keysize = DES_KEY_SIZE, + .cia_max_keysize = DES_KEY_SIZE, + .cia_setkey = des_set_key, + .cia_encrypt = sparc_des_encrypt, + .cia_decrypt = sparc_des_decrypt + } } - } -}, { - .cra_name = "ecb(des)", - .cra_driver_name = "ecb-des-sparc64", - .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = DES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct des_sparc64_ctx), - .cra_alignmask = 7, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = DES_KEY_SIZE, - .max_keysize = DES_KEY_SIZE, - .setkey = des_set_key, - .encrypt = ecb_encrypt, - .decrypt = ecb_decrypt, - }, - }, -}, { - .cra_name = "cbc(des)", - .cra_driver_name = "cbc-des-sparc64", - .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = DES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct des_sparc64_ctx), - .cra_alignmask = 7, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = DES_KEY_SIZE, - .max_keysize = DES_KEY_SIZE, - .ivsize = DES_BLOCK_SIZE, - .setkey = des_set_key, - .encrypt = cbc_encrypt, - .decrypt = cbc_decrypt, - }, - }, -}, { - .cra_name = "des3_ede", - .cra_driver_name = "des3_ede-sparc64", - .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx), - .cra_alignmask = 7, - .cra_module = THIS_MODULE, - .cra_u = { - .cipher = { - .cia_min_keysize = DES3_EDE_KEY_SIZE, - .cia_max_keysize = DES3_EDE_KEY_SIZE, - .cia_setkey = des3_ede_set_key, - .cia_encrypt = sparc_des3_ede_encrypt, - .cia_decrypt = sparc_des3_ede_decrypt + }, { + .cra_name = "des3_ede", + .cra_driver_name = "des3_ede-sparc64", + .cra_priority = SPARC_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx), + .cra_alignmask = 7, + .cra_module = THIS_MODULE, + .cra_u = { + .cipher = { + .cia_min_keysize = DES3_EDE_KEY_SIZE, + .cia_max_keysize = DES3_EDE_KEY_SIZE, + .cia_setkey = des3_ede_set_key, + .cia_encrypt = sparc_des3_ede_encrypt, + .cia_decrypt = sparc_des3_ede_decrypt + } } } -}, { - .cra_name = "ecb(des3_ede)", - .cra_driver_name = "ecb-des3_ede-sparc64", - .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx), - .cra_alignmask = 7, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .setkey = des3_ede_set_key, - .encrypt = ecb3_encrypt, - .decrypt = ecb3_decrypt, - }, - }, -}, { - .cra_name = "cbc(des3_ede)", - .cra_driver_name = "cbc-des3_ede-sparc64", - .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx), - .cra_alignmask = 7, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES3_EDE_BLOCK_SIZE, - .setkey = des3_ede_set_key, - .encrypt = cbc3_encrypt, - .decrypt = cbc3_decrypt, - }, - }, -} }; +}; + +static struct skcipher_alg skcipher_algs[] = { + { + .base.cra_name = "ecb(des)", + .base.cra_driver_name = "ecb-des-sparc64", + .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct des_sparc64_ctx), + .base.cra_alignmask = 7, + .base.cra_module = THIS_MODULE, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = des_set_key_skcipher, + .encrypt = ecb_encrypt, + .decrypt = ecb_decrypt, + }, { + .base.cra_name = "cbc(des)", + .base.cra_driver_name = "cbc-des-sparc64", + .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct des_sparc64_ctx), + .base.cra_alignmask = 7, + .base.cra_module = THIS_MODULE, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = des_set_key_skcipher, + .encrypt = cbc_encrypt, + .decrypt = cbc_decrypt, + }, { + .base.cra_name = "ecb(des3_ede)", + .base.cra_driver_name = "ecb-des3_ede-sparc64", + .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, + .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx), + .base.cra_alignmask = 7, + .base.cra_module = THIS_MODULE, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .setkey = des3_ede_set_key_skcipher, + .encrypt = ecb3_encrypt, + .decrypt = ecb3_decrypt, + }, { + .base.cra_name = "cbc(des3_ede)", + .base.cra_driver_name = "cbc-des3_ede-sparc64", + .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, + .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx), + .base.cra_alignmask = 7, + .base.cra_module = THIS_MODULE, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setkey = des3_ede_set_key_skcipher, + .encrypt = cbc3_encrypt, + .decrypt = cbc3_decrypt, + } +}; static bool __init sparc64_has_des_opcode(void) { @@ -503,17 +448,27 @@ static bool __init sparc64_has_des_opcode(void) static int __init des_sparc64_mod_init(void) { - if (sparc64_has_des_opcode()) { - pr_info("Using sparc64 des opcodes optimized DES implementation\n"); - return crypto_register_algs(algs, ARRAY_SIZE(algs)); + int err; + + if (!sparc64_has_des_opcode()) { + pr_info("sparc64 des opcodes not available.\n"); + return -ENODEV; } - pr_info("sparc64 des opcodes not available.\n"); - return -ENODEV; + pr_info("Using sparc64 des opcodes optimized DES implementation\n"); + err = crypto_register_algs(cipher_algs, ARRAY_SIZE(cipher_algs)); + if (err) + return err; + err = crypto_register_skciphers(skcipher_algs, + ARRAY_SIZE(skcipher_algs)); + if (err) + crypto_unregister_algs(cipher_algs, ARRAY_SIZE(cipher_algs)); + return err; } static void __exit des_sparc64_mod_fini(void) { - crypto_unregister_algs(algs, ARRAY_SIZE(algs)); + crypto_unregister_algs(cipher_algs, ARRAY_SIZE(cipher_algs)); + crypto_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); } module_init(des_sparc64_mod_init); |