diff options
author | Tero Kristo <t-kristo@ti.com> | 2019-11-05 17:01:08 +0300 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2019-12-11 11:37:00 +0300 |
commit | 04a4616e6a21eb5ed2c8399160be370af4f67037 (patch) | |
tree | 4ea62a0a43b3d5a44deec90c3abbf6b2f8929ac2 /drivers/crypto/omap-aes-gcm.c | |
parent | 1cfd9f3f308fde70d14927f376cd61c2365819d0 (diff) | |
download | linux-04a4616e6a21eb5ed2c8399160be370af4f67037.tar.xz |
crypto: omap-aes-gcm - convert to use crypto engine
Currently omap-aes-gcm algorithms are using local implementation for
crypto request queuing logic. Instead, implement this via usage of
crypto engine which is used already for rest of the omap aes algorithms.
This avoids some random conflicts / crashes also which can happen if
both aes and aes-gcm are attempted to be used simultaneously.
Signed-off-by: Tero Kristo <t-kristo@ti.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/omap-aes-gcm.c')
-rw-r--r-- | drivers/crypto/omap-aes-gcm.c | 98 |
1 files changed, 54 insertions, 44 deletions
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c index e92000846f16..32dc00dc570b 100644 --- a/drivers/crypto/omap-aes-gcm.c +++ b/drivers/crypto/omap-aes-gcm.c @@ -13,6 +13,7 @@ #include <linux/dmaengine.h> #include <linux/omap-dma.h> #include <linux/interrupt.h> +#include <linux/pm_runtime.h> #include <crypto/aes.h> #include <crypto/gcm.h> #include <crypto/scatterwalk.h> @@ -29,11 +30,13 @@ static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret) { struct aead_request *req = dd->aead_req; - dd->flags &= ~FLAGS_BUSY; dd->in_sg = NULL; dd->out_sg = NULL; - req->base.complete(&req->base, ret); + crypto_finalize_aead_request(dd->engine, req, ret); + + pm_runtime_mark_last_busy(dd->dev); + pm_runtime_put_autosuspend(dd->dev); } static void omap_aes_gcm_done_task(struct omap_aes_dev *dd) @@ -81,7 +84,6 @@ static void omap_aes_gcm_done_task(struct omap_aes_dev *dd) } omap_aes_gcm_finish_req(dd, ret); - omap_aes_gcm_handle_queue(dd, NULL); } static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd, @@ -127,6 +129,9 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd, if (cryptlen) { tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen); + if (nsg) + sg_unmark_end(dd->in_sgl); + ret = omap_crypto_align_sg(&tmp, cryptlen, AES_BLOCK_SIZE, &dd->in_sgl[nsg], OMAP_CRYPTO_COPY_DATA | @@ -146,7 +151,7 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd, dd->out_sg = req->dst; dd->orig_out = req->dst; - dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, assoclen); + dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, req->assoclen); flags = 0; if (req->src == req->dst || dd->out_sg == sg_arr) @@ -202,37 +207,21 @@ void omap_aes_gcm_dma_out_callback(void *data) static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd, struct aead_request *req) { - struct omap_aes_gcm_ctx *ctx; - struct aead_request *backlog; - struct omap_aes_reqctx *rctx; - unsigned long flags; - int err, ret = 0; - - spin_lock_irqsave(&dd->lock, flags); - if (req) - ret = aead_enqueue_request(&dd->aead_queue, req); - if (dd->flags & FLAGS_BUSY) { - spin_unlock_irqrestore(&dd->lock, flags); - return ret; - } - - backlog = aead_get_backlog(&dd->aead_queue); - req = aead_dequeue_request(&dd->aead_queue); if (req) - dd->flags |= FLAGS_BUSY; - spin_unlock_irqrestore(&dd->lock, flags); - - if (!req) - return ret; + return crypto_transfer_aead_request_to_engine(dd->engine, req); - if (backlog) - backlog->base.complete(&backlog->base, -EINPROGRESS); + return 0; +} - ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - rctx = aead_request_ctx(req); +static int omap_aes_gcm_prepare_req(struct crypto_engine *engine, void *areq) +{ + struct aead_request *req = container_of(areq, struct aead_request, + base); + struct omap_aes_reqctx *rctx = aead_request_ctx(req); + struct omap_aes_dev *dd = rctx->dd; + struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + int err; - dd->ctx = &ctx->octx; - rctx->dd = dd; dd->aead_req = req; rctx->mode &= FLAGS_MODE_MASK; @@ -242,20 +231,9 @@ static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd, if (err) return err; - err = omap_aes_write_ctrl(dd); - if (!err) { - if (dd->in_sg_len) - err = omap_aes_crypt_dma_start(dd); - else - omap_aes_gcm_dma_out_callback(dd); - } - - if (err) { - omap_aes_gcm_finish_req(dd, err); - omap_aes_gcm_handle_queue(dd, NULL); - } + dd->ctx = &ctx->octx; - return ret; + return omap_aes_write_ctrl(dd); } static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode) @@ -378,3 +356,35 @@ int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent, { return crypto_rfc4106_check_authsize(authsize); } + +static int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq) +{ + struct aead_request *req = container_of(areq, struct aead_request, + base); + struct omap_aes_reqctx *rctx = aead_request_ctx(req); + struct omap_aes_dev *dd = rctx->dd; + int ret = 0; + + if (!dd) + return -ENODEV; + + if (dd->in_sg_len) + ret = omap_aes_crypt_dma_start(dd); + else + omap_aes_gcm_dma_out_callback(dd); + + return ret; +} + +int omap_aes_gcm_cra_init(struct crypto_aead *tfm) +{ + struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm); + + ctx->enginectx.op.prepare_request = omap_aes_gcm_prepare_req; + ctx->enginectx.op.unprepare_request = NULL; + ctx->enginectx.op.do_one_request = omap_aes_gcm_crypt_req; + + crypto_aead_set_reqsize(tfm, sizeof(struct omap_aes_reqctx)); + + return 0; +} |