summaryrefslogtreecommitdiff
path: root/drivers/crypto/inside-secure/safexcel.c
diff options
context:
space:
mode:
authorOfer Heifetz <oferh@marvell.com>2018-06-28 18:21:57 +0300
committerHerbert Xu <herbert@gondor.apana.org.au>2018-07-08 19:30:19 +0300
commit9744fec95f0674fbf67b12c42c3784dc299dc904 (patch)
tree2dcff99ca9570b5b587b434e3b9b0f1d4221dea1 /drivers/crypto/inside-secure/safexcel.c
parent6246987932a52c2676a3bab7d624a607aa228e59 (diff)
downloadlinux-9744fec95f0674fbf67b12c42c3784dc299dc904.tar.xz
crypto: inside-secure - remove request list to improve performance
This patch main goal is to improve driver performance by moving the crypto request from a list to a RDR ring shadow. This is possible since there is one producer and one consume for this RDR request shadow and one ring descriptor is left unused. Doing this change eliminates the use of spinlock when accessing the descriptor ring and the need to dynamicaly allocate memory per crypto request. The crypto request is placed in the first RDR shadow descriptor only if there are enough descriptors, when the result handler is invoked, it fetches the first result descriptor from RDR shadow. Signed-off-by: Ofer Heifetz <oferh@marvell.com> Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/inside-secure/safexcel.c')
-rw-r--r--drivers/crypto/inside-secure/safexcel.c91
1 files changed, 45 insertions, 46 deletions
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 7bfdba1ada26..bd3635cb1e50 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -509,7 +509,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
{
struct crypto_async_request *req, *backlog;
struct safexcel_context *ctx;
- struct safexcel_request *request;
int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
/* If a request wasn't properly dequeued because of a lack of resources,
@@ -533,16 +532,10 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
}
handle_req:
- request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
- if (!request)
- goto request_failed;
-
ctx = crypto_tfm_ctx(req->tfm);
- ret = ctx->send(req, ring, request, &commands, &results);
- if (ret) {
- kfree(request);
+ ret = ctx->send(req, ring, &commands, &results);
+ if (ret)
goto request_failed;
- }
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -551,14 +544,8 @@ handle_req:
* to the engine because the input data was cached, continue to
* dequeue other requests as this is valid and not an error.
*/
- if (!commands && !results) {
- kfree(request);
+ if (!commands && !results)
continue;
- }
-
- spin_lock_bh(&priv->ring[ring].egress_lock);
- list_add_tail(&request->list, &priv->ring[ring].list);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
cdesc += commands;
rdesc += results;
@@ -576,7 +563,7 @@ finalize:
if (!nreq)
return;
- spin_lock_bh(&priv->ring[ring].egress_lock);
+ spin_lock_bh(&priv->ring[ring].lock);
priv->ring[ring].requests += nreq;
@@ -585,7 +572,7 @@ finalize:
priv->ring[ring].busy = true;
}
- spin_unlock_bh(&priv->ring[ring].egress_lock);
+ spin_unlock_bh(&priv->ring[ring].lock);
/* let the RDR know we have pending descriptors */
writel((rdesc * priv->config.rd_offset) << 2,
@@ -617,6 +604,24 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
return -EINVAL;
}
+inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
+ int ring,
+ struct safexcel_result_desc *rdesc,
+ struct crypto_async_request *req)
+{
+ int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
+
+ priv->ring[ring].rdr_req[i] = req;
+}
+
+inline struct crypto_async_request *
+safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
+{
+ int i = safexcel_ring_first_rdr_index(priv, ring);
+
+ return priv->ring[ring].rdr_req[i];
+}
+
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
{
struct safexcel_command_desc *cdesc;
@@ -645,21 +650,16 @@ void safexcel_inv_complete(struct crypto_async_request *req, int error)
int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv,
- dma_addr_t ctxr_dma, int ring,
- struct safexcel_request *request)
+ dma_addr_t ctxr_dma, int ring)
{
struct safexcel_command_desc *cdesc;
struct safexcel_result_desc *rdesc;
int ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
-
/* Prepare command descriptor */
cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
- if (IS_ERR(cdesc)) {
- ret = PTR_ERR(cdesc);
- goto unlock;
- }
+ if (IS_ERR(cdesc))
+ return PTR_ERR(cdesc);
cdesc->control_data.type = EIP197_TYPE_EXTENDED;
cdesc->control_data.options = 0;
@@ -674,21 +674,20 @@ int safexcel_invalidate_cache(struct crypto_async_request *async,
goto cdesc_rollback;
}
- request->req = async;
- goto unlock;
+ safexcel_rdr_req_set(priv, ring, rdesc, async);
+
+ return ret;
cdesc_rollback:
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
-unlock:
- spin_unlock_bh(&priv->ring[ring].egress_lock);
return ret;
}
static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
int ring)
{
- struct safexcel_request *sreq;
+ struct crypto_async_request *req;
struct safexcel_context *ctx;
int ret, i, nreq, ndesc, tot_descs, handled = 0;
bool should_complete;
@@ -703,28 +702,22 @@ handle_results:
goto requests_left;
for (i = 0; i < nreq; i++) {
- spin_lock_bh(&priv->ring[ring].egress_lock);
- sreq = list_first_entry(&priv->ring[ring].list,
- struct safexcel_request, list);
- list_del(&sreq->list);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
-
- ctx = crypto_tfm_ctx(sreq->req->tfm);
- ndesc = ctx->handle_result(priv, ring, sreq->req,
+ req = safexcel_rdr_req_get(priv, ring);
+
+ ctx = crypto_tfm_ctx(req->tfm);
+ ndesc = ctx->handle_result(priv, ring, req,
&should_complete, &ret);
if (ndesc < 0) {
- kfree(sreq);
dev_err(priv->dev, "failed to handle result (%d)", ndesc);
goto acknowledge;
}
if (should_complete) {
local_bh_disable();
- sreq->req->complete(sreq->req, ret);
+ req->complete(req, ret);
local_bh_enable();
}
- kfree(sreq);
tot_descs += ndesc;
handled++;
}
@@ -743,7 +736,7 @@ acknowledge:
goto handle_results;
requests_left:
- spin_lock_bh(&priv->ring[ring].egress_lock);
+ spin_lock_bh(&priv->ring[ring].lock);
priv->ring[ring].requests -= handled;
safexcel_try_push_requests(priv, ring);
@@ -751,7 +744,7 @@ requests_left:
if (!priv->ring[ring].requests)
priv->ring[ring].busy = false;
- spin_unlock_bh(&priv->ring[ring].egress_lock);
+ spin_unlock_bh(&priv->ring[ring].lock);
}
static void safexcel_dequeue_work(struct work_struct *work)
@@ -1073,6 +1066,14 @@ static int safexcel_probe(struct platform_device *pdev)
if (ret)
goto err_reg_clk;
+ priv->ring[i].rdr_req = devm_kzalloc(dev,
+ sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE,
+ GFP_KERNEL);
+ if (!priv->ring[i].rdr_req) {
+ ret = -ENOMEM;
+ goto err_reg_clk;
+ }
+
ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
if (!ring_irq) {
ret = -ENOMEM;
@@ -1108,9 +1109,7 @@ static int safexcel_probe(struct platform_device *pdev)
crypto_init_queue(&priv->ring[i].queue,
EIP197_DEFAULT_RING_SIZE);
- INIT_LIST_HEAD(&priv->ring[i].list);
spin_lock_init(&priv->ring[i].lock);
- spin_lock_init(&priv->ring[i].egress_lock);
spin_lock_init(&priv->ring[i].queue_lock);
}