diff options
Diffstat (limited to 'crypto/crypto_engine.c')
-rw-r--r-- | crypto/crypto_engine.c | 86 |
1 files changed, 5 insertions, 81 deletions
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index c7c16da5e649..18e1689efe12 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -23,9 +23,6 @@ #define CRYPTO_ENGINE_MAX_QLEN 10 -/* Temporary algorithm flag used to indicate an updated driver. */ -#define CRYPTO_ALG_ENGINE 0x200 - struct crypto_engine_alg { struct crypto_alg base; struct crypto_engine_op op; @@ -77,7 +74,6 @@ static void crypto_pump_requests(struct crypto_engine *engine, struct crypto_engine_alg *alg; struct crypto_engine_op *op; unsigned long flags; - bool was_busy = false; int ret; spin_lock_irqsave(&engine->queue_lock, flags); @@ -86,12 +82,6 @@ static void crypto_pump_requests(struct crypto_engine *engine, if (!engine->retry_support && engine->cur_req) goto out; - /* If another context is idling then defer */ - if (engine->idling) { - kthread_queue_work(engine->kworker, &engine->pump_requests); - goto out; - } - /* Check if the engine queue is idle */ if (!crypto_queue_len(&engine->queue) || !engine->running) { if (!engine->busy) @@ -105,15 +95,6 @@ static void crypto_pump_requests(struct crypto_engine *engine, } engine->busy = false; - engine->idling = true; - spin_unlock_irqrestore(&engine->queue_lock, flags); - - if (engine->unprepare_crypt_hardware && - engine->unprepare_crypt_hardware(engine)) - dev_err(engine->dev, "failed to unprepare crypt hardware\n"); - - spin_lock_irqsave(&engine->queue_lock, flags); - engine->idling = false; goto out; } @@ -132,32 +113,14 @@ start_request: if (!engine->retry_support) engine->cur_req = async_req; - if (engine->busy) - was_busy = true; - else + if (!engine->busy) engine->busy = true; spin_unlock_irqrestore(&engine->queue_lock, flags); - /* Until here we get the request need to be encrypted successfully */ - if (!was_busy && engine->prepare_crypt_hardware) { - ret = engine->prepare_crypt_hardware(engine); - if (ret) { - dev_err(engine->dev, "failed to prepare crypt hardware\n"); - goto req_err_1; - } - } - - if (async_req->tfm->__crt_alg->cra_flags & CRYPTO_ALG_ENGINE) { - alg = container_of(async_req->tfm->__crt_alg, - struct crypto_engine_alg, base); - op = &alg->op; - } else { - dev_err(engine->dev, "failed to do request\n"); - ret = -EINVAL; - goto req_err_1; - } - + alg = container_of(async_req->tfm->__crt_alg, + struct crypto_engine_alg, base); + op = &alg->op; ret = op->do_one_request(engine, async_req); /* Request unsuccessfully executed by hardware */ @@ -205,17 +168,6 @@ retry: out: spin_unlock_irqrestore(&engine->queue_lock, flags); - /* - * Batch requests is possible only if - * hardware can enqueue multiple requests - */ - if (engine->do_batch_requests) { - ret = engine->do_batch_requests(engine); - if (ret) - dev_err(engine->dev, "failed to do batch requests: %d\n", - ret); - } - return; } @@ -472,12 +424,6 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop); * crypto-engine queue. * @dev: the device attached with one hardware engine * @retry_support: whether hardware has support for retry mechanism - * @cbk_do_batch: pointer to a callback function to be invoked when executing - * a batch of requests. - * This has the form: - * callback(struct crypto_engine *engine) - * where: - * engine: the crypto engine structure. * @rt: whether this queue is set to run as a realtime task * @qlen: maximum size of the crypto-engine queue * @@ -486,7 +432,6 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop); */ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, bool retry_support, - int (*cbk_do_batch)(struct crypto_engine *engine), bool rt, int qlen) { struct crypto_engine *engine; @@ -502,14 +447,8 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, engine->rt = rt; engine->running = false; engine->busy = false; - engine->idling = false; engine->retry_support = retry_support; engine->priv_data = dev; - /* - * Batch requests is possible only if - * hardware has support for retry mechanism. - */ - engine->do_batch_requests = retry_support ? cbk_do_batch : NULL; snprintf(engine->name, sizeof(engine->name), "%s-engine", dev_name(dev)); @@ -544,7 +483,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set); */ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) { - return crypto_engine_alloc_init_and_set(dev, false, NULL, rt, + return crypto_engine_alloc_init_and_set(dev, false, rt, CRYPTO_ENGINE_MAX_QLEN); } EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); @@ -569,9 +508,6 @@ int crypto_engine_register_aead(struct aead_engine_alg *alg) { if (!alg->op.do_one_request) return -EINVAL; - - alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; - return crypto_register_aead(&alg->base); } EXPORT_SYMBOL_GPL(crypto_engine_register_aead); @@ -614,9 +550,6 @@ int crypto_engine_register_ahash(struct ahash_engine_alg *alg) { if (!alg->op.do_one_request) return -EINVAL; - - alg->base.halg.base.cra_flags |= CRYPTO_ALG_ENGINE; - return crypto_register_ahash(&alg->base); } EXPORT_SYMBOL_GPL(crypto_engine_register_ahash); @@ -660,9 +593,6 @@ int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg) { if (!alg->op.do_one_request) return -EINVAL; - - alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; - return crypto_register_akcipher(&alg->base); } EXPORT_SYMBOL_GPL(crypto_engine_register_akcipher); @@ -677,9 +607,6 @@ int crypto_engine_register_kpp(struct kpp_engine_alg *alg) { if (!alg->op.do_one_request) return -EINVAL; - - alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; - return crypto_register_kpp(&alg->base); } EXPORT_SYMBOL_GPL(crypto_engine_register_kpp); @@ -694,9 +621,6 @@ int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg) { if (!alg->op.do_one_request) return -EINVAL; - - alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; - return crypto_register_skcipher(&alg->base); } EXPORT_SYMBOL_GPL(crypto_engine_register_skcipher); |