diff options
author | Horia Geantă <horia.geanta@nxp.com> | 2018-08-06 15:43:59 +0300 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2018-09-04 06:35:03 +0300 |
commit | 5ca7badb1f6266d3d519be110dc84ffb224d80c9 (patch) | |
tree | 34e9c6df71feac8a6dea5933c700e28f405d3cd5 | |
parent | aec48adce85d4ca96b0f75cd951c06b6bee613a4 (diff) | |
download | linux-5ca7badb1f6266d3d519be110dc84ffb224d80c9.tar.xz |
crypto: caam/jr - ablkcipher -> skcipher conversion
Convert driver from deprecated ablkcipher API to skcipher.
Link: https://www.mail-archive.com/search?l=mid&q=20170728085622.GC19664@gondor.apana.org.au
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r-- | drivers/crypto/caam/caamalg.c | 448 | ||||
-rw-r--r-- | drivers/crypto/caam/compat.h | 1 |
2 files changed, 208 insertions, 241 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index b23730c07fda..c6e3c8ad6d2d 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -2,7 +2,7 @@ * caam - Freescale FSL CAAM support for crypto API * * Copyright 2008-2011 Freescale Semiconductor, Inc. - * Copyright 2016 NXP + * Copyright 2016-2018 NXP * * Based on talitos crypto API driver. * @@ -81,8 +81,6 @@ #define debug(format, arg...) #endif -static struct list_head alg_list; - struct caam_alg_entry { int class1_alg_type; int class2_alg_type; @@ -96,6 +94,12 @@ struct caam_aead_alg { bool registered; }; +struct caam_skcipher_alg { + struct skcipher_alg skcipher; + struct caam_alg_entry caam; + bool registered; +}; + /* * per-session context */ @@ -646,20 +650,20 @@ static int rfc4543_setkey(struct crypto_aead *aead, return rfc4543_set_sh_desc(aead); } -static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, - const u8 *key, unsigned int keylen) +static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, + unsigned int keylen) { - struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher); - const char *alg_name = crypto_tfm_alg_name(tfm); + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_skcipher_alg *alg = + container_of(crypto_skcipher_alg(skcipher), typeof(*alg), + skcipher); struct device *jrdev = ctx->jrdev; - unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + unsigned int ivsize = crypto_skcipher_ivsize(skcipher); u32 *desc; u32 ctx1_iv_off = 0; const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CTR_MOD128); - const bool is_rfc3686 = (ctr_mode && - (strstr(alg_name, "rfc3686") != NULL)); + const bool is_rfc3686 = alg->caam.rfc3686; #ifdef DEBUG print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", @@ -687,14 +691,14 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, ctx->cdata.key_virt = key; ctx->cdata.key_inline = true; - /* ablkcipher_encrypt shared descriptor */ + /* skcipher_encrypt shared descriptor */ desc = ctx->sh_desc_enc; cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, ctx1_iv_off); dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, desc_bytes(desc), ctx->dir); - /* ablkcipher_decrypt shared descriptor */ + /* skcipher_decrypt shared descriptor */ desc = ctx->sh_desc_dec; cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, ctx1_iv_off); @@ -704,16 +708,15 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, return 0; } -static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, - const u8 *key, unsigned int keylen) +static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, + unsigned int keylen) { - struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); struct device *jrdev = ctx->jrdev; u32 *desc; if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { - crypto_ablkcipher_set_flags(ablkcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); + crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); dev_err(jrdev, "key size mismatch\n"); return -EINVAL; } @@ -722,13 +725,13 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, ctx->cdata.key_virt = key; ctx->cdata.key_inline = true; - /* xts_ablkcipher_encrypt shared descriptor */ + /* xts_skcipher_encrypt shared descriptor */ desc = ctx->sh_desc_enc; cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata); dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, desc_bytes(desc), ctx->dir); - /* xts_ablkcipher_decrypt shared descriptor */ + /* xts_skcipher_decrypt shared descriptor */ desc = ctx->sh_desc_dec; cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata); dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, @@ -756,7 +759,7 @@ struct aead_edesc { }; /* - * ablkcipher_edesc - s/w-extended ablkcipher descriptor + * skcipher_edesc - s/w-extended skcipher descriptor * @src_nents: number of segments in input s/w scatterlist * @dst_nents: number of segments in output s/w scatterlist * @iv_dma: dma address of iv for checking continuity and link table @@ -766,7 +769,7 @@ struct aead_edesc { * @hw_desc: the h/w job descriptor followed by any referenced link tables * and IV */ -struct ablkcipher_edesc { +struct skcipher_edesc { int src_nents; int dst_nents; dma_addr_t iv_dma; @@ -806,12 +809,11 @@ static void aead_unmap(struct device *dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes); } -static void ablkcipher_unmap(struct device *dev, - struct ablkcipher_edesc *edesc, - struct ablkcipher_request *req) +static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, + struct skcipher_request *req) { - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + int ivsize = crypto_skcipher_ivsize(skcipher); caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, @@ -869,75 +871,74 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, aead_request_complete(req, err); } -static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, - void *context) +static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, + void *context) { - struct ablkcipher_request *req = context; - struct ablkcipher_edesc *edesc; - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + struct skcipher_request *req = context; + struct skcipher_edesc *edesc; + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + int ivsize = crypto_skcipher_ivsize(skcipher); #ifdef DEBUG dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif - edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]); + edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); if (err) caam_jr_strstatus(jrdev, err); #ifdef DEBUG print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->info, + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, edesc->src_nents > 1 ? 100 : ivsize, 1); #endif caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->dst, - edesc->dst_nents > 1 ? 100 : req->nbytes, 1); + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); - ablkcipher_unmap(jrdev, edesc, req); + skcipher_unmap(jrdev, edesc, req); /* - * The crypto API expects us to set the IV (req->info) to the last + * The crypto API expects us to set the IV (req->iv) to the last * ciphertext block. This is used e.g. by the CTS mode. */ - scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize, + scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize, ivsize, 0); kfree(edesc); - ablkcipher_request_complete(req, err); + skcipher_request_complete(req, err); } -static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, - void *context) +static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, + void *context) { - struct ablkcipher_request *req = context; - struct ablkcipher_edesc *edesc; + struct skcipher_request *req = context; + struct skcipher_edesc *edesc; #ifdef DEBUG - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + int ivsize = crypto_skcipher_ivsize(skcipher); dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif - edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]); + edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); if (err) caam_jr_strstatus(jrdev, err); #ifdef DEBUG print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->info, - ivsize, 1); + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); #endif caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->dst, - edesc->dst_nents > 1 ? 100 : req->nbytes, 1); + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); - ablkcipher_unmap(jrdev, edesc, req); + skcipher_unmap(jrdev, edesc, req); kfree(edesc); - ablkcipher_request_complete(req, err); + skcipher_request_complete(req, err); } /* @@ -1079,34 +1080,38 @@ static void init_authenc_job(struct aead_request *req, } /* - * Fill in ablkcipher job descriptor + * Fill in skcipher job descriptor */ -static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, - struct ablkcipher_edesc *edesc, - struct ablkcipher_request *req) +static void init_skcipher_job(struct skcipher_request *req, + struct skcipher_edesc *edesc, + const bool encrypt) { - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + int ivsize = crypto_skcipher_ivsize(skcipher); u32 *desc = edesc->hw_desc; + u32 *sh_desc; u32 out_options = 0; - dma_addr_t dst_dma; + dma_addr_t dst_dma, ptr; int len; #ifdef DEBUG print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->info, - ivsize, 1); - pr_err("asked=%d, nbytes%d\n", - (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes); + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); + pr_err("asked=%d, cryptlen%d\n", + (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen); #endif caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->src, - edesc->src_nents > 1 ? 100 : req->nbytes, 1); + edesc->src_nents > 1 ? 100 : req->cryptlen, 1); + + sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec; + ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma; len = desc_len(sh_desc); init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); - append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize, + append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->cryptlen + ivsize, LDST_SGF); if (likely(req->src == req->dst)) { @@ -1121,7 +1126,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, out_options = LDST_SGF; } } - append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options); + append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); } /* @@ -1411,35 +1416,35 @@ static int aead_decrypt(struct aead_request *req) } /* - * allocate and map the ablkcipher extended descriptor for ablkcipher + * allocate and map the skcipher extended descriptor for skcipher */ -static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request - *req, int desc_bytes) +static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, + int desc_bytes) { - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); struct device *jrdev = ctx->jrdev; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; - struct ablkcipher_edesc *edesc; + struct skcipher_edesc *edesc; dma_addr_t iv_dma; u8 *iv; - int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + int ivsize = crypto_skcipher_ivsize(skcipher); int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; - src_nents = sg_nents_for_len(req->src, req->nbytes); + src_nents = sg_nents_for_len(req->src, req->cryptlen); if (unlikely(src_nents < 0)) { dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", - req->nbytes); + req->cryptlen); return ERR_PTR(src_nents); } if (req->dst != req->src) { - dst_nents = sg_nents_for_len(req->dst, req->nbytes); + dst_nents = sg_nents_for_len(req->dst, req->cryptlen); if (unlikely(dst_nents < 0)) { dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", - req->nbytes); + req->cryptlen); return ERR_PTR(dst_nents); } } @@ -1488,12 +1493,12 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; - edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + + edesc->sec4_sg = (void *)edesc + sizeof(struct skcipher_edesc) + desc_bytes; /* Make sure IV is located in a DMAable area */ iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; - memcpy(iv, req->info, ivsize); + memcpy(iv, req->iv, ivsize); iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, iv_dma)) { @@ -1525,7 +1530,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request edesc->iv_dma = iv_dma; #ifdef DEBUG - print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ", + print_hex_dump(KERN_ERR, "skcipher sec4_sg@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, sec4_sg_bytes, 1); #endif @@ -1533,185 +1538,187 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request return edesc; } -static int ablkcipher_encrypt(struct ablkcipher_request *req) +static int skcipher_encrypt(struct skcipher_request *req) { - struct ablkcipher_edesc *edesc; - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + struct skcipher_edesc *edesc; + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); struct device *jrdev = ctx->jrdev; u32 *desc; int ret = 0; /* allocate extended descriptor */ - edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); + edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); if (IS_ERR(edesc)) return PTR_ERR(edesc); /* Create and submit job descriptor*/ - init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req); + init_skcipher_job(req, edesc, true); #ifdef DEBUG - print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", + print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, desc_bytes(edesc->hw_desc), 1); #endif desc = edesc->hw_desc; - ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); + ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req); if (!ret) { ret = -EINPROGRESS; } else { - ablkcipher_unmap(jrdev, edesc, req); + skcipher_unmap(jrdev, edesc, req); kfree(edesc); } return ret; } -static int ablkcipher_decrypt(struct ablkcipher_request *req) +static int skcipher_decrypt(struct skcipher_request *req) { - struct ablkcipher_edesc *edesc; - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); - int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + struct skcipher_edesc *edesc; + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + int ivsize = crypto_skcipher_ivsize(skcipher); struct device *jrdev = ctx->jrdev; u32 *desc; int ret = 0; /* allocate extended descriptor */ - edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); + edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); if (IS_ERR(edesc)) return PTR_ERR(edesc); /* - * The crypto API expects us to set the IV (req->info) to the last + * The crypto API expects us to set the IV (req->iv) to the last * ciphertext block. */ - scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize, + scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize, ivsize, 0); /* Create and submit job descriptor*/ - init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req); + init_skcipher_job(req, edesc, false); desc = edesc->hw_desc; #ifdef DEBUG - print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", + print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, desc_bytes(edesc->hw_desc), 1); #endif - ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req); + ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req); if (!ret) { ret = -EINPROGRESS; } else { - ablkcipher_unmap(jrdev, edesc, req); + skcipher_unmap(jrdev, edesc, req); kfree(edesc); } return ret; } -#define template_aead template_u.aead -#define template_ablkcipher template_u.ablkcipher -struct caam_alg_template { - char name[CRYPTO_MAX_ALG_NAME]; - char driver_name[CRYPTO_MAX_ALG_NAME]; - unsigned int blocksize; - u32 type; - union { - struct ablkcipher_alg ablkcipher; - } template_u; - u32 class1_alg_type; - u32 class2_alg_type; -}; - -static struct caam_alg_template driver_algs[] = { - /* ablkcipher descriptor */ +static struct caam_skcipher_alg driver_algs[] = { { - .name = "cbc(aes)", - .driver_name = "cbc-aes-caam", - .blocksize = AES_BLOCK_SIZE, - .template_ablkcipher = { - .setkey = ablkcipher_setkey, - .encrypt = ablkcipher_encrypt, - .decrypt = ablkcipher_decrypt, + .skcipher = { + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, }, { - .name = "cbc(des3_ede)", - .driver_name = "cbc-3des-caam", - .blocksize = DES3_EDE_BLOCK_SIZE, - .template_ablkcipher = { - .setkey = ablkcipher_setkey, - .encrypt = ablkcipher_encrypt, - .decrypt = ablkcipher_decrypt, + .skcipher = { + .base = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-3des-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, }, { - .name = "cbc(des)", - .driver_name = "cbc-des-caam", - .blocksize = DES_BLOCK_SIZE, - .template_ablkcipher = { - .setkey = ablkcipher_setkey, - .encrypt = ablkcipher_encrypt, - .decrypt = ablkcipher_decrypt, + .skcipher = { + .base = { + .cra_name = "cbc(des)", + .cra_driver_name = "cbc-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, }, { - .name = "ctr(aes)", - .driver_name = "ctr-aes-caam", - .blocksize = 1, - .template_ablkcipher = { - .setkey = ablkcipher_setkey, - .encrypt = ablkcipher_encrypt, - .decrypt = ablkcipher_decrypt, - .geniv = "chainiv", + .skcipher = { + .base = { + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, + .chunksize = AES_BLOCK_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, }, { - .name = "rfc3686(ctr(aes))", - .driver_name = "rfc3686-ctr-aes-caam", - .blocksize = 1, - .template_ablkcipher = { - .setkey = ablkcipher_setkey, - .encrypt = ablkcipher_encrypt, - .decrypt = ablkcipher_decrypt, + .skcipher = { + .base = { + .cra_name = "rfc3686(ctr(aes))", + .cra_driver_name = "rfc3686-ctr-aes-caam", + .cra_blocksize = 1, + }, + .setkey = skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .ivsize = CTR_RFC3686_IV_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, + .chunksize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | + OP_ALG_AAI_CTR_MOD128, + .rfc3686 = true, + }, }, { - .name = "xts(aes)", - .driver_name = "xts-aes-caam", - .blocksize = AES_BLOCK_SIZE, - .template_ablkcipher = { - .setkey = xts_ablkcipher_setkey, - .encrypt = ablkcipher_encrypt, - .decrypt = ablkcipher_decrypt, - .geniv = "eseqiv", + .skcipher = { + .base = { + .cra_name = "xts(aes)", + .cra_driver_name = "xts-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = xts_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, }, }; @@ -2996,12 +3003,6 @@ static struct caam_aead_alg driver_aeads[] = { }, }; -struct caam_crypto_alg { - struct crypto_alg crypto_alg; - struct list_head entry; - struct caam_alg_entry caam; -}; - static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, bool uses_dkp) { @@ -3042,14 +3043,14 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, return 0; } -static int caam_cra_init(struct crypto_tfm *tfm) +static int caam_cra_init(struct crypto_skcipher *tfm) { - struct crypto_alg *alg = tfm->__crt_alg; - struct caam_crypto_alg *caam_alg = - container_of(alg, struct caam_crypto_alg, crypto_alg); - struct caam_ctx *ctx = crypto_tfm_ctx(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct caam_skcipher_alg *caam_alg = + container_of(alg, typeof(*caam_alg), skcipher); - return caam_init_common(ctx, &caam_alg->caam, false); + return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam, + false); } static int caam_aead_init(struct crypto_aead *tfm) @@ -3071,9 +3072,9 @@ static void caam_exit_common(struct caam_ctx *ctx) caam_jr_free(ctx->jrdev); } -static void caam_cra_exit(struct crypto_tfm *tfm) +static void caam_cra_exit(struct crypto_skcipher *tfm) { - caam_exit_common(crypto_tfm_ctx(tfm)); + caam_exit_common(crypto_skcipher_ctx(tfm)); } static void caam_aead_exit(struct crypto_aead *tfm) @@ -3083,8 +3084,6 @@ static void caam_aead_exit(struct crypto_aead *tfm) static void __exit caam_algapi_exit(void) { - - struct caam_crypto_alg *t_alg, *n; int i; for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { @@ -3094,49 +3093,25 @@ static void __exit caam_algapi_exit(void) crypto_unregister_aead(&t_alg->aead); } - if (!alg_list.next) - return; + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { + struct caam_skcipher_alg *t_alg = driver_algs + i; - list_for_each_entry_safe(t_alg, n, &alg_list, entry) { - crypto_unregister_alg(&t_alg->crypto_alg); - list_del(&t_alg->entry); - kfree(t_alg); + if (t_alg->registered) + crypto_unregister_skcipher(&t_alg->skcipher); } } -static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template - *template) +static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) { - struct caam_crypto_alg *t_alg; - struct crypto_alg *alg; + struct skcipher_alg *alg = &t_alg->skcipher; - t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); - if (!t_alg) { - pr_err("failed to allocate t_alg\n"); - return ERR_PTR(-ENOMEM); - } + alg->base.cra_module = THIS_MODULE; + alg->base.cra_priority = CAAM_CRA_PRIORITY; + alg->base.cra_ctxsize = sizeof(struct caam_ctx); + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; - alg = &t_alg->crypto_alg; - - snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); - snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", - template->driver_name); - alg->cra_module = THIS_MODULE; - alg->cra_init = caam_cra_init; - alg->cra_exit = caam_cra_exit; - alg->cra_priority = CAAM_CRA_PRIORITY; - alg->cra_blocksize = template->blocksize; - alg->cra_alignmask = 0; - alg->cra_ctxsize = sizeof(struct caam_ctx); - alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_TYPE_ABLKCIPHER; - alg->cra_type = &crypto_ablkcipher_type; - alg->cra_ablkcipher = template->template_ablkcipher; - - t_alg->caam.class1_alg_type = template->class1_alg_type; - t_alg->caam.class2_alg_type = template->class2_alg_type; - - return t_alg; + alg->init = caam_cra_init; + alg->exit = caam_cra_exit; } static void caam_aead_alg_init(struct caam_aead_alg *t_alg) @@ -3188,8 +3163,6 @@ static int __init caam_algapi_init(void) return -ENODEV; - INIT_LIST_HEAD(&alg_list); - /* * Register crypto algorithms the device supports. * First, detect presence and attributes of DES, AES, and MD blocks. @@ -3205,9 +3178,8 @@ static int __init caam_algapi_init(void) md_limit = SHA256_DIGEST_SIZE; for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { - struct caam_crypto_alg *t_alg; - struct caam_alg_template *alg = driver_algs + i; - u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK; + struct caam_skcipher_alg *t_alg = driver_algs + i; + u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; /* Skip DES algorithms if not supported by device */ if (!des_inst && @@ -3224,26 +3196,20 @@ static int __init caam_algapi_init(void) * on LP devices. */ if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) - if ((alg->class1_alg_type & OP_ALG_AAI_MASK) == + if ((t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_XTS) continue; - t_alg = caam_alg_alloc(alg); - if (IS_ERR(t_alg)) { - err = PTR_ERR(t_alg); - pr_warn("%s alg allocation failed\n", alg->driver_name); - continue; - } + caam_skcipher_alg_init(t_alg); - err = crypto_register_alg(&t_alg->crypto_alg); + err = crypto_register_skcipher(&t_alg->skcipher); if (err) { pr_warn("%s alg registration failed\n", - t_alg->crypto_alg.cra_driver_name); - kfree(t_alg); + t_alg->skcipher.base.cra_driver_name); continue; } - list_add_tail(&t_alg->entry, &alg_list); + t_alg->registered = true; registered = true; } diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index 1c71e0cd5098..f1cfb23975a0 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h @@ -39,6 +39,7 @@ #include <crypto/authenc.h> #include <crypto/akcipher.h> #include <crypto/scatterwalk.h> +#include <crypto/skcipher.h> #include <crypto/internal/skcipher.h> #include <crypto/internal/hash.h> #include <crypto/internal/rsa.h> |