diff options
Diffstat (limited to 'drivers/crypto/inside-secure')
-rw-r--r-- | drivers/crypto/inside-secure/safexcel.c | 371 | ||||
-rw-r--r-- | drivers/crypto/inside-secure/safexcel.h | 173 | ||||
-rw-r--r-- | drivers/crypto/inside-secure/safexcel_cipher.c | 136 | ||||
-rw-r--r-- | drivers/crypto/inside-secure/safexcel_hash.c | 214 |
4 files changed, 613 insertions, 281 deletions
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 89ba9e85c0f3..225e74a7f724 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -108,10 +108,10 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv, writel(EIP197_PE_ICE_x_CTRL_SW_RESET | EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR | EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR, - priv->base + ctrl); + EIP197_PE(priv) + ctrl); /* Enable access to the program memory */ - writel(prog_en, priv->base + EIP197_PE_ICE_RAM_CTRL); + writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL); /* Write the firmware */ for (i = 0; i < fw->size / sizeof(u32); i++) @@ -119,12 +119,12 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv, priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32)); /* Disable access to the program memory */ - writel(0, priv->base + EIP197_PE_ICE_RAM_CTRL); + writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL); /* Release engine from reset */ - val = readl(priv->base + ctrl); + val = readl(EIP197_PE(priv) + ctrl); val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET; - writel(val, priv->base + ctrl); + writel(val, EIP197_PE(priv) + ctrl); } static int eip197_load_firmwares(struct safexcel_crypto_priv *priv) @@ -145,14 +145,14 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv) } /* Clear the scratchpad memory */ - val = readl(priv->base + EIP197_PE_ICE_SCRATCH_CTRL); + val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL); val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER | EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN | EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS | EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS; - writel(val, priv->base + EIP197_PE_ICE_SCRATCH_CTRL); + writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL); - memset(priv->base + EIP197_PE_ICE_SCRATCH_RAM, 0, + memset(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0, EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32)); eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL, @@ -173,7 +173,7 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) u32 hdw, cd_size_rnd, val; int i; - hdw = readl(priv->base + EIP197_HIA_OPTIONS); + hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); hdw &= GENMASK(27, 25); hdw >>= 25; @@ -182,26 +182,25 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) for (i = 0; i < priv->config.rings; i++) { /* ring base address */ writel(lower_32_bits(priv->ring[i].cdr.base_dma), - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); + EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); writel(upper_32_bits(priv->ring[i].cdr.base_dma), - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); + EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) | priv->config.cd_size, - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DESC_SIZE); + EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) | (EIP197_FETCH_COUNT * priv->config.cd_offset), - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG); + EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG); /* Configure DMA tx control */ val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS); val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS); - writel(val, - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DMA_CFG); + writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG); /* clear any pending interrupt */ writel(GENMASK(5, 0), - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_STAT); + EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT); } return 0; @@ -212,7 +211,7 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) u32 hdw, rd_size_rnd, val; int i; - hdw = readl(priv->base + EIP197_HIA_OPTIONS); + hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); hdw &= GENMASK(27, 25); hdw >>= 25; @@ -221,33 +220,33 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) for (i = 0; i < priv->config.rings; i++) { /* ring base address */ writel(lower_32_bits(priv->ring[i].rdr.base_dma), - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); writel(upper_32_bits(priv->ring[i].rdr.base_dma), - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) | priv->config.rd_size, - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DESC_SIZE); + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) | (EIP197_FETCH_COUNT * priv->config.rd_offset), - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG); + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG); /* Configure DMA tx control */ val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS); val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS); val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUG; writel(val, - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DMA_CFG); + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG); /* clear any pending interrupt */ writel(GENMASK(7, 0), - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_STAT); + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT); /* enable ring interrupt */ - val = readl(priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i)); + val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i)); val |= EIP197_RDR_IRQ(i); - writel(val, priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i)); + writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i)); } return 0; @@ -259,39 +258,40 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) int i, ret; /* Determine endianess and configure byte swap */ - version = readl(priv->base + EIP197_HIA_VERSION); - val = readl(priv->base + EIP197_HIA_MST_CTRL); + version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION); + val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); if ((version & 0xffff) == EIP197_HIA_VERSION_BE) val |= EIP197_MST_CTRL_BYTE_SWAP; else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE) val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24); - writel(val, priv->base + EIP197_HIA_MST_CTRL); - + writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); /* Configure wr/rd cache values */ writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) | EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS), - priv->base + EIP197_MST_CTRL); + EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL); /* Interrupts reset */ /* Disable all global interrupts */ - writel(0, priv->base + EIP197_HIA_AIC_G_ENABLE_CTRL); + writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL); /* Clear any pending interrupt */ - writel(GENMASK(31, 0), priv->base + EIP197_HIA_AIC_G_ACK); + writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK); /* Data Fetch Engine configuration */ /* Reset all DFE threads */ writel(EIP197_DxE_THR_CTRL_RESET_PE, - priv->base + EIP197_HIA_DFE_THR_CTRL); + EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL); - /* Reset HIA input interface arbiter */ - writel(EIP197_HIA_RA_PE_CTRL_RESET, - priv->base + EIP197_HIA_RA_PE_CTRL); + if (priv->version == EIP197) { + /* Reset HIA input interface arbiter */ + writel(EIP197_HIA_RA_PE_CTRL_RESET, + EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL); + } /* DMA transfer size to use */ val = EIP197_HIA_DFE_CFG_DIS_DEBUG; @@ -299,29 +299,32 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7); val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS); val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS); - writel(val, priv->base + EIP197_HIA_DFE_CFG); + writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG); /* Leave the DFE threads reset state */ - writel(0, priv->base + EIP197_HIA_DFE_THR_CTRL); + writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL); /* Configure the procesing engine thresholds */ writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9), - priv->base + EIP197_PE_IN_DBUF_THRES); + EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES); writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7), - priv->base + EIP197_PE_IN_TBUF_THRES); + EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES); - /* enable HIA input interface arbiter and rings */ - writel(EIP197_HIA_RA_PE_CTRL_EN | GENMASK(priv->config.rings - 1, 0), - priv->base + EIP197_HIA_RA_PE_CTRL); + if (priv->version == EIP197) { + /* enable HIA input interface arbiter and rings */ + writel(EIP197_HIA_RA_PE_CTRL_EN | + GENMASK(priv->config.rings - 1, 0), + EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL); + } /* Data Store Engine configuration */ /* Reset all DSE threads */ writel(EIP197_DxE_THR_CTRL_RESET_PE, - priv->base + EIP197_HIA_DSE_THR_CTRL); + EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL); /* Wait for all DSE threads to complete */ - while ((readl(priv->base + EIP197_HIA_DSE_THR_STAT) & + while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT) & GENMASK(15, 12)) != GENMASK(15, 12)) ; @@ -330,15 +333,19 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8); val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS); val |= EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE; - val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR; - writel(val, priv->base + EIP197_HIA_DSE_CFG); + /* FIXME: instability issues can occur for EIP97 but disabling it impact + * performances. + */ + if (priv->version == EIP197) + val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR; + writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG); /* Leave the DSE threads reset state */ - writel(0, priv->base + EIP197_HIA_DSE_THR_CTRL); + writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL); /* Configure the procesing engine thresholds */ writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8), - priv->base + EIP197_PE_OUT_DBUF_THRES); + EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES); /* Processing Engine configuration */ @@ -348,73 +355,75 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC; val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1; val |= EIP197_ALG_SHA2; - writel(val, priv->base + EIP197_PE_EIP96_FUNCTION_EN); + writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN); /* Command Descriptor Rings prepare */ for (i = 0; i < priv->config.rings; i++) { /* Clear interrupts for this ring */ writel(GENMASK(31, 0), - priv->base + EIP197_HIA_AIC_R_ENABLE_CLR(i)); + EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i)); /* Disable external triggering */ - writel(0, priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG); + writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG); /* Clear the pending prepared counter */ writel(EIP197_xDR_PREP_CLR_COUNT, - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_COUNT); + EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT); /* Clear the pending processed counter */ writel(EIP197_xDR_PROC_CLR_COUNT, - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_COUNT); + EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT); writel(0, - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_PNTR); + EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR); writel(0, - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_PNTR); + EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR); writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2, - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_SIZE); + EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE); } /* Result Descriptor Ring prepare */ for (i = 0; i < priv->config.rings; i++) { /* Disable external triggering*/ - writel(0, priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG); + writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG); /* Clear the pending prepared counter */ writel(EIP197_xDR_PREP_CLR_COUNT, - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_COUNT); + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT); /* Clear the pending processed counter */ writel(EIP197_xDR_PROC_CLR_COUNT, - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_COUNT); + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT); writel(0, - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_PNTR); + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR); writel(0, - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_PNTR); + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR); /* Ring size */ writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2, - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_SIZE); + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE); } /* Enable command descriptor rings */ writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), - priv->base + EIP197_HIA_DFE_THR_CTRL); + EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL); /* Enable result descriptor rings */ writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), - priv->base + EIP197_HIA_DSE_THR_CTRL); + EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL); /* Clear any HIA interrupt */ - writel(GENMASK(30, 20), priv->base + EIP197_HIA_AIC_G_ACK); + writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK); - eip197_trc_cache_init(priv); + if (priv->version == EIP197) { + eip197_trc_cache_init(priv); - ret = eip197_load_firmwares(priv); - if (ret) - return ret; + ret = eip197_load_firmwares(priv); + if (ret) + return ret; + } safexcel_hw_setup_cdesc_rings(priv); safexcel_hw_setup_rdesc_rings(priv); @@ -422,6 +431,23 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) return 0; } +/* Called with ring's lock taken */ +static int safexcel_try_push_requests(struct safexcel_crypto_priv *priv, + int ring, int reqs) +{ + int coal = min_t(int, reqs, EIP197_MAX_BATCH_SZ); + + if (!coal) + return 0; + + /* Configure when we want an interrupt */ + writel(EIP197_HIA_RDR_THRESH_PKT_MODE | + EIP197_HIA_RDR_THRESH_PROC_PKT(coal), + EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH); + + return coal; +} + void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) { struct crypto_async_request *req, *backlog; @@ -429,34 +455,36 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) struct safexcel_request *request; int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; - priv->ring[ring].need_dequeue = false; + /* If a request wasn't properly dequeued because of a lack of resources, + * proceeded it first, + */ + req = priv->ring[ring].req; + backlog = priv->ring[ring].backlog; + if (req) + goto handle_req; - do { + while (true) { spin_lock_bh(&priv->ring[ring].queue_lock); backlog = crypto_get_backlog(&priv->ring[ring].queue); req = crypto_dequeue_request(&priv->ring[ring].queue); spin_unlock_bh(&priv->ring[ring].queue_lock); - if (!req) + if (!req) { + priv->ring[ring].req = NULL; + priv->ring[ring].backlog = NULL; goto finalize; + } +handle_req: request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req)); - if (!request) { - spin_lock_bh(&priv->ring[ring].queue_lock); - crypto_enqueue_request(&priv->ring[ring].queue, req); - spin_unlock_bh(&priv->ring[ring].queue_lock); - - priv->ring[ring].need_dequeue = true; - goto finalize; - } + if (!request) + goto request_failed; ctx = crypto_tfm_ctx(req->tfm); ret = ctx->send(req, ring, request, &commands, &results); if (ret) { kfree(request); - req->complete(req, ret); - priv->ring[ring].need_dequeue = true; - goto finalize; + goto request_failed; } if (backlog) @@ -468,30 +496,39 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) cdesc += commands; rdesc += results; - } while (nreq++ < EIP197_MAX_BATCH_SZ); + nreq++; + } + +request_failed: + /* Not enough resources to handle all the requests. Bail out and save + * the request and the backlog for the next dequeue call (per-ring). + */ + priv->ring[ring].req = req; + priv->ring[ring].backlog = backlog; finalize: - if (nreq == EIP197_MAX_BATCH_SZ) - priv->ring[ring].need_dequeue = true; - else if (!nreq) + if (!nreq) return; - spin_lock_bh(&priv->ring[ring].lock); + spin_lock_bh(&priv->ring[ring].egress_lock); - /* Configure when we want an interrupt */ - writel(EIP197_HIA_RDR_THRESH_PKT_MODE | - EIP197_HIA_RDR_THRESH_PROC_PKT(nreq), - priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_THRESH); + if (!priv->ring[ring].busy) { + nreq -= safexcel_try_push_requests(priv, ring, nreq); + if (nreq) + priv->ring[ring].busy = true; + } + + priv->ring[ring].requests_left += nreq; + + spin_unlock_bh(&priv->ring[ring].egress_lock); /* let the RDR know we have pending descriptors */ writel((rdesc * priv->config.rd_offset) << 2, - priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PREP_COUNT); + EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT); /* let the CDR know we have pending descriptors */ writel((cdesc * priv->config.cd_offset) << 2, - priv->base + EIP197_HIA_CDR(ring) + EIP197_HIA_xDR_PREP_COUNT); - - spin_unlock_bh(&priv->ring[ring].lock); + EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT); } void safexcel_free_context(struct safexcel_crypto_priv *priv, @@ -540,7 +577,6 @@ void safexcel_inv_complete(struct crypto_async_request *req, int error) } int safexcel_invalidate_cache(struct crypto_async_request *async, - struct safexcel_context *ctx, struct safexcel_crypto_priv *priv, dma_addr_t ctxr_dma, int ring, struct safexcel_request *request) @@ -587,14 +623,17 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv { struct safexcel_request *sreq; struct safexcel_context *ctx; - int ret, i, nreq, ndesc = 0; + int ret, i, nreq, ndesc, tot_descs, done; bool should_complete; - nreq = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT); - nreq >>= 24; - nreq &= GENMASK(6, 0); +handle_results: + tot_descs = 0; + + nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); + nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET; + nreq &= EIP197_xDR_PROC_xD_PKT_MASK; if (!nreq) - return; + goto requests_left; for (i = 0; i < nreq; i++) { spin_lock_bh(&priv->ring[ring].egress_lock); @@ -607,14 +646,11 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv ndesc = ctx->handle_result(priv, ring, sreq->req, &should_complete, &ret); if (ndesc < 0) { + kfree(sreq); dev_err(priv->dev, "failed to handle result (%d)", ndesc); - return; + goto acknowledge; } - writel(EIP197_xDR_PROC_xD_PKT(1) | - EIP197_xDR_PROC_xD_COUNT(ndesc * priv->config.rd_offset), - priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT); - if (should_complete) { local_bh_disable(); sreq->req->complete(sreq->req, ret); @@ -622,19 +658,41 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv } kfree(sreq); + tot_descs += ndesc; } + +acknowledge: + if (i) { + writel(EIP197_xDR_PROC_xD_PKT(i) | + EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset), + EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); + } + + /* If the number of requests overflowed the counter, try to proceed more + * requests. + */ + if (nreq == EIP197_xDR_PROC_xD_PKT_MASK) + goto handle_results; + +requests_left: + spin_lock_bh(&priv->ring[ring].egress_lock); + + done = safexcel_try_push_requests(priv, ring, + priv->ring[ring].requests_left); + + priv->ring[ring].requests_left -= done; + if (!done && !priv->ring[ring].requests_left) + priv->ring[ring].busy = false; + + spin_unlock_bh(&priv->ring[ring].egress_lock); } -static void safexcel_handle_result_work(struct work_struct *work) +static void safexcel_dequeue_work(struct work_struct *work) { struct safexcel_work_data *data = container_of(work, struct safexcel_work_data, work); - struct safexcel_crypto_priv *priv = data->priv; - - safexcel_handle_result_descriptor(priv, data->ring); - if (priv->ring[data->ring].need_dequeue) - safexcel_dequeue(data->priv, data->ring); + safexcel_dequeue(data->priv, data->ring); } struct safexcel_ring_irq_data { @@ -646,16 +704,16 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data) { struct safexcel_ring_irq_data *irq_data = data; struct safexcel_crypto_priv *priv = irq_data->priv; - int ring = irq_data->ring; + int ring = irq_data->ring, rc = IRQ_NONE; u32 status, stat; - status = readl(priv->base + EIP197_HIA_AIC_R_ENABLED_STAT(ring)); + status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring)); if (!status) - return IRQ_NONE; + return rc; /* RDR interrupts */ if (status & EIP197_RDR_IRQ(ring)) { - stat = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT); + stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT); if (unlikely(stat & EIP197_xDR_ERR)) { /* @@ -665,22 +723,37 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data) */ dev_err(priv->dev, "RDR: fatal error."); } else if (likely(stat & EIP197_xDR_THRESH)) { - queue_work(priv->ring[ring].workqueue, &priv->ring[ring].work_data.work); + rc = IRQ_WAKE_THREAD; } /* ACK the interrupts */ writel(stat & 0xff, - priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT); + EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT); } /* ACK the interrupts */ - writel(status, priv->base + EIP197_HIA_AIC_R_ACK(ring)); + writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring)); + + return rc; +} + +static irqreturn_t safexcel_irq_ring_thread(int irq, void *data) +{ + struct safexcel_ring_irq_data *irq_data = data; + struct safexcel_crypto_priv *priv = irq_data->priv; + int ring = irq_data->ring; + + safexcel_handle_result_descriptor(priv, ring); + + queue_work(priv->ring[ring].workqueue, + &priv->ring[ring].work_data.work); return IRQ_HANDLED; } static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name, irq_handler_t handler, + irq_handler_t threaded_handler, struct safexcel_ring_irq_data *ring_irq_priv) { int ret, irq = platform_get_irq_byname(pdev, name); @@ -690,8 +763,9 @@ static int safexcel_request_ring_irq(struct platform_device *pdev, const char *n return irq; } - ret = devm_request_irq(&pdev->dev, irq, handler, 0, - dev_name(&pdev->dev), ring_irq_priv); + ret = devm_request_threaded_irq(&pdev->dev, irq, handler, + threaded_handler, IRQF_ONESHOT, + dev_name(&pdev->dev), ring_irq_priv); if (ret) { dev_err(&pdev->dev, "unable to request IRQ %d\n", irq); return ret; @@ -754,11 +828,11 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv) { u32 val, mask; - val = readl(priv->base + EIP197_HIA_OPTIONS); + val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); val = (val & GENMASK(27, 25)) >> 25; mask = BIT(val) - 1; - val = readl(priv->base + EIP197_HIA_OPTIONS); + val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings); priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32)); @@ -768,6 +842,35 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv) priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask; } +static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv) +{ + struct safexcel_register_offsets *offsets = &priv->offsets; + + if (priv->version == EIP197) { + offsets->hia_aic = EIP197_HIA_AIC_BASE; + offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE; + offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE; + offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE; + offsets->hia_dfe = EIP197_HIA_DFE_BASE; + offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE; + offsets->hia_dse = EIP197_HIA_DSE_BASE; + offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE; + offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE; + offsets->pe = EIP197_PE_BASE; + } else { + offsets->hia_aic = EIP97_HIA_AIC_BASE; + offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE; + offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE; + offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE; + offsets->hia_dfe = EIP97_HIA_DFE_BASE; + offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE; + offsets->hia_dse = EIP97_HIA_DSE_BASE; + offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE; + offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE; + offsets->pe = EIP97_PE_BASE; + } +} + static int safexcel_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -780,6 +883,9 @@ static int safexcel_probe(struct platform_device *pdev) return -ENOMEM; priv->dev = dev; + priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev); + + safexcel_init_register_offsets(priv); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); priv->base = devm_ioremap_resource(dev, res); @@ -838,6 +944,7 @@ static int safexcel_probe(struct platform_device *pdev) snprintf(irq_name, 6, "ring%d", i); irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring, + safexcel_irq_ring_thread, ring_irq); if (irq < 0) { ret = irq; @@ -846,7 +953,7 @@ static int safexcel_probe(struct platform_device *pdev) priv->ring[i].work_data.priv = priv; priv->ring[i].work_data.ring = i; - INIT_WORK(&priv->ring[i].work_data.work, safexcel_handle_result_work); + INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work); snprintf(wq_name, 9, "wq_ring%d", i); priv->ring[i].workqueue = create_singlethread_workqueue(wq_name); @@ -855,6 +962,9 @@ static int safexcel_probe(struct platform_device *pdev) goto err_clk; } + priv->ring[i].requests_left = 0; + priv->ring[i].busy = false; + crypto_init_queue(&priv->ring[i].queue, EIP197_DEFAULT_RING_SIZE); @@ -902,7 +1012,14 @@ static int safexcel_remove(struct platform_device *pdev) } static const struct of_device_id safexcel_of_match_table[] = { - { .compatible = "inside-secure,safexcel-eip197" }, + { + .compatible = "inside-secure,safexcel-eip97", + .data = (void *)EIP97, + }, + { + .compatible = "inside-secure,safexcel-eip197", + .data = (void *)EIP197, + }, {}, }; diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 304c5838c11a..4e219c21608b 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -19,64 +19,103 @@ #define EIP197_HIA_VERSION_BE 0x35ca /* Static configuration */ -#define EIP197_DEFAULT_RING_SIZE 64 +#define EIP197_DEFAULT_RING_SIZE 400 #define EIP197_MAX_TOKENS 5 #define EIP197_MAX_RINGS 4 #define EIP197_FETCH_COUNT 1 -#define EIP197_MAX_BATCH_SZ EIP197_DEFAULT_RING_SIZE +#define EIP197_MAX_BATCH_SZ 64 #define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \ GFP_KERNEL : GFP_ATOMIC) +/* Register base offsets */ +#define EIP197_HIA_AIC(priv) ((priv)->base + (priv)->offsets.hia_aic) +#define EIP197_HIA_AIC_G(priv) ((priv)->base + (priv)->offsets.hia_aic_g) +#define EIP197_HIA_AIC_R(priv) ((priv)->base + (priv)->offsets.hia_aic_r) +#define EIP197_HIA_AIC_xDR(priv) ((priv)->base + (priv)->offsets.hia_aic_xdr) +#define EIP197_HIA_DFE(priv) ((priv)->base + (priv)->offsets.hia_dfe) +#define EIP197_HIA_DFE_THR(priv) ((priv)->base + (priv)->offsets.hia_dfe_thr) +#define EIP197_HIA_DSE(priv) ((priv)->base + (priv)->offsets.hia_dse) +#define EIP197_HIA_DSE_THR(priv) ((priv)->base + (priv)->offsets.hia_dse_thr) +#define EIP197_HIA_GEN_CFG(priv) ((priv)->base + (priv)->offsets.hia_gen_cfg) +#define EIP197_PE(priv) ((priv)->base + (priv)->offsets.pe) + +/* EIP197 base offsets */ +#define EIP197_HIA_AIC_BASE 0x90000 +#define EIP197_HIA_AIC_G_BASE 0x90000 +#define EIP197_HIA_AIC_R_BASE 0x90800 +#define EIP197_HIA_AIC_xDR_BASE 0x80000 +#define EIP197_HIA_DFE_BASE 0x8c000 +#define EIP197_HIA_DFE_THR_BASE 0x8c040 +#define EIP197_HIA_DSE_BASE 0x8d000 +#define EIP197_HIA_DSE_THR_BASE 0x8d040 +#define EIP197_HIA_GEN_CFG_BASE 0xf0000 +#define EIP197_PE_BASE 0xa0000 + +/* EIP97 base offsets */ +#define EIP97_HIA_AIC_BASE 0x0 +#define EIP97_HIA_AIC_G_BASE 0x0 +#define EIP97_HIA_AIC_R_BASE 0x0 +#define EIP97_HIA_AIC_xDR_BASE 0x0 +#define EIP97_HIA_DFE_BASE 0xf000 +#define EIP97_HIA_DFE_THR_BASE 0xf200 +#define EIP97_HIA_DSE_BASE 0xf400 +#define EIP97_HIA_DSE_THR_BASE 0xf600 +#define EIP97_HIA_GEN_CFG_BASE 0x10000 +#define EIP97_PE_BASE 0x10000 + /* CDR/RDR register offsets */ -#define EIP197_HIA_xDR_OFF(r) (0x80000 + (r) * 0x1000) -#define EIP197_HIA_CDR(r) (EIP197_HIA_xDR_OFF(r)) -#define EIP197_HIA_RDR(r) (EIP197_HIA_xDR_OFF(r) + 0x800) -#define EIP197_HIA_xDR_RING_BASE_ADDR_LO 0x0 -#define EIP197_HIA_xDR_RING_BASE_ADDR_HI 0x4 -#define EIP197_HIA_xDR_RING_SIZE 0x18 -#define EIP197_HIA_xDR_DESC_SIZE 0x1c -#define EIP197_HIA_xDR_CFG 0x20 -#define EIP197_HIA_xDR_DMA_CFG 0x24 -#define EIP197_HIA_xDR_THRESH 0x28 -#define EIP197_HIA_xDR_PREP_COUNT 0x2c -#define EIP197_HIA_xDR_PROC_COUNT 0x30 -#define EIP197_HIA_xDR_PREP_PNTR 0x34 -#define EIP197_HIA_xDR_PROC_PNTR 0x38 -#define EIP197_HIA_xDR_STAT 0x3c +#define EIP197_HIA_xDR_OFF(priv, r) (EIP197_HIA_AIC_xDR(priv) + (r) * 0x1000) +#define EIP197_HIA_CDR(priv, r) (EIP197_HIA_xDR_OFF(priv, r)) +#define EIP197_HIA_RDR(priv, r) (EIP197_HIA_xDR_OFF(priv, r) + 0x800) +#define EIP197_HIA_xDR_RING_BASE_ADDR_LO 0x0000 +#define EIP197_HIA_xDR_RING_BASE_ADDR_HI 0x0004 +#define EIP197_HIA_xDR_RING_SIZE 0x0018 +#define EIP197_HIA_xDR_DESC_SIZE 0x001c +#define EIP197_HIA_xDR_CFG 0x0020 +#define EIP197_HIA_xDR_DMA_CFG 0x0024 +#define EIP197_HIA_xDR_THRESH 0x0028 +#define EIP197_HIA_xDR_PREP_COUNT 0x002c +#define EIP197_HIA_xDR_PROC_COUNT 0x0030 +#define EIP197_HIA_xDR_PREP_PNTR 0x0034 +#define EIP197_HIA_xDR_PROC_PNTR 0x0038 +#define EIP197_HIA_xDR_STAT 0x003c /* register offsets */ -#define EIP197_HIA_DFE_CFG 0x8c000 -#define EIP197_HIA_DFE_THR_CTRL 0x8c040 -#define EIP197_HIA_DFE_THR_STAT 0x8c044 -#define EIP197_HIA_DSE_CFG 0x8d000 -#define EIP197_HIA_DSE_THR_CTRL 0x8d040 -#define EIP197_HIA_DSE_THR_STAT 0x8d044 -#define EIP197_HIA_RA_PE_CTRL 0x90010 -#define EIP197_HIA_RA_PE_STAT 0x90014 +#define EIP197_HIA_DFE_CFG 0x0000 +#define EIP197_HIA_DFE_THR_CTRL 0x0000 +#define EIP197_HIA_DFE_THR_STAT 0x0004 +#define EIP197_HIA_DSE_CFG 0x0000 +#define EIP197_HIA_DSE_THR_CTRL 0x0000 +#define EIP197_HIA_DSE_THR_STAT 0x0004 +#define EIP197_HIA_RA_PE_CTRL 0x0010 +#define EIP197_HIA_RA_PE_STAT 0x0014 #define EIP197_HIA_AIC_R_OFF(r) ((r) * 0x1000) -#define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0x9e808 - EIP197_HIA_AIC_R_OFF(r)) -#define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0x9e810 - EIP197_HIA_AIC_R_OFF(r)) -#define EIP197_HIA_AIC_R_ACK(r) (0x9e810 - EIP197_HIA_AIC_R_OFF(r)) -#define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0x9e814 - EIP197_HIA_AIC_R_OFF(r)) -#define EIP197_HIA_AIC_G_ENABLE_CTRL 0x9f808 -#define EIP197_HIA_AIC_G_ENABLED_STAT 0x9f810 -#define EIP197_HIA_AIC_G_ACK 0x9f810 -#define EIP197_HIA_MST_CTRL 0x9fff4 -#define EIP197_HIA_OPTIONS 0x9fff8 -#define EIP197_HIA_VERSION 0x9fffc -#define EIP197_PE_IN_DBUF_THRES 0xa0000 -#define EIP197_PE_IN_TBUF_THRES 0xa0100 -#define EIP197_PE_ICE_SCRATCH_RAM 0xa0800 -#define EIP197_PE_ICE_PUE_CTRL 0xa0c80 -#define EIP197_PE_ICE_SCRATCH_CTRL 0xa0d04 -#define EIP197_PE_ICE_FPP_CTRL 0xa0d80 -#define EIP197_PE_ICE_RAM_CTRL 0xa0ff0 -#define EIP197_PE_EIP96_FUNCTION_EN 0xa1004 -#define EIP197_PE_EIP96_CONTEXT_CTRL 0xa1008 -#define EIP197_PE_EIP96_CONTEXT_STAT 0xa100c -#define EIP197_PE_OUT_DBUF_THRES 0xa1c00 -#define EIP197_PE_OUT_TBUF_THRES 0xa1d00 +#define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0xe008 - EIP197_HIA_AIC_R_OFF(r)) +#define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r)) +#define EIP197_HIA_AIC_R_ACK(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r)) +#define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0xe014 - EIP197_HIA_AIC_R_OFF(r)) +#define EIP197_HIA_AIC_G_ENABLE_CTRL 0xf808 +#define EIP197_HIA_AIC_G_ENABLED_STAT 0xf810 +#define EIP197_HIA_AIC_G_ACK 0xf810 +#define EIP197_HIA_MST_CTRL 0xfff4 +#define EIP197_HIA_OPTIONS 0xfff8 +#define EIP197_HIA_VERSION 0xfffc +#define EIP197_PE_IN_DBUF_THRES 0x0000 +#define EIP197_PE_IN_TBUF_THRES 0x0100 +#define EIP197_PE_ICE_SCRATCH_RAM 0x0800 +#define EIP197_PE_ICE_PUE_CTRL 0x0c80 +#define EIP197_PE_ICE_SCRATCH_CTRL 0x0d04 +#define EIP197_PE_ICE_FPP_CTRL 0x0d80 +#define EIP197_PE_ICE_RAM_CTRL 0x0ff0 +#define EIP197_PE_EIP96_FUNCTION_EN 0x1004 +#define EIP197_PE_EIP96_CONTEXT_CTRL 0x1008 +#define EIP197_PE_EIP96_CONTEXT_STAT 0x100c +#define EIP197_PE_OUT_DBUF_THRES 0x1c00 +#define EIP197_PE_OUT_TBUF_THRES 0x1d00 +#define EIP197_MST_CTRL 0xfff4 + +/* EIP197-specific registers, no indirection */ #define EIP197_CLASSIFICATION_RAMS 0xe0000 #define EIP197_TRC_CTRL 0xf0800 #define EIP197_TRC_LASTRES 0xf0804 @@ -90,7 +129,6 @@ #define EIP197_TRC_ECCDATASTAT 0xf083c #define EIP197_TRC_ECCDATA 0xf0840 #define EIP197_CS_RAM_CTRL 0xf7ff0 -#define EIP197_MST_CTRL 0xffff4 /* EIP197_HIA_xDR_DESC_SIZE */ #define EIP197_xDR_DESC_MODE_64BIT BIT(31) @@ -117,6 +155,8 @@ #define EIP197_xDR_PREP_CLR_COUNT BIT(31) /* EIP197_HIA_xDR_PROC_COUNT */ +#define EIP197_xDR_PROC_xD_PKT_OFFSET 24 +#define EIP197_xDR_PROC_xD_PKT_MASK GENMASK(6, 0) #define EIP197_xDR_PROC_xD_COUNT(n) ((n) << 2) #define EIP197_xDR_PROC_xD_PKT(n) ((n) << 24) #define EIP197_xDR_PROC_CLR_COUNT BIT(31) @@ -463,12 +503,33 @@ struct safexcel_work_data { int ring; }; +enum safexcel_eip_version { + EIP97, + EIP197, +}; + +struct safexcel_register_offsets { + u32 hia_aic; + u32 hia_aic_g; + u32 hia_aic_r; + u32 hia_aic_xdr; + u32 hia_dfe; + u32 hia_dfe_thr; + u32 hia_dse; + u32 hia_dse_thr; + u32 hia_gen_cfg; + u32 pe; +}; + struct safexcel_crypto_priv { void __iomem *base; struct device *dev; struct clk *clk; struct safexcel_config config; + enum safexcel_eip_version version; + struct safexcel_register_offsets offsets; + /* context DMA pool */ struct dma_pool *context_pool; @@ -489,7 +550,20 @@ struct safexcel_crypto_priv { /* queue */ struct crypto_queue queue; spinlock_t queue_lock; - bool need_dequeue; + + /* Number of requests in the engine that needs the threshold + * interrupt to be set up. + */ + int requests_left; + + /* The ring is currently handling at least one request */ + bool busy; + + /* Store for current requests when bailing out of the dequeueing + * function when no enough resources are available. + */ + struct crypto_async_request *req; + struct crypto_async_request *backlog; } ring[EIP197_MAX_RINGS]; }; @@ -539,7 +613,6 @@ void safexcel_free_context(struct safexcel_crypto_priv *priv, struct crypto_async_request *req, int result_sz); int safexcel_invalidate_cache(struct crypto_async_request *async, - struct safexcel_context *ctx, struct safexcel_crypto_priv *priv, dma_addr_t ctxr_dma, int ring, struct safexcel_request *request); diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 5438552bc6d7..63a8768ed2ae 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -14,6 +14,7 @@ #include <crypto/aes.h> #include <crypto/skcipher.h> +#include <crypto/internal/skcipher.h> #include "safexcel.h" @@ -26,13 +27,17 @@ struct safexcel_cipher_ctx { struct safexcel_context base; struct safexcel_crypto_priv *priv; - enum safexcel_cipher_direction direction; u32 mode; __le32 key[8]; unsigned int key_len; }; +struct safexcel_cipher_req { + enum safexcel_cipher_direction direction; + bool needs_inv; +}; + static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, struct crypto_async_request *async, struct safexcel_command_desc *cdesc, @@ -64,6 +69,7 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key, { struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct safexcel_crypto_priv *priv = ctx->priv; struct crypto_aes_ctx aes; int ret, i; @@ -73,10 +79,12 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key, return ret; } - for (i = 0; i < len / sizeof(u32); i++) { - if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) { - ctx->base.needs_inv = true; - break; + if (priv->version == EIP197 && ctx->base.ctxr_dma) { + for (i = 0; i < len / sizeof(u32); i++) { + if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) { + ctx->base.needs_inv = true; + break; + } } } @@ -90,12 +98,15 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key, } static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, + struct crypto_async_request *async, struct safexcel_command_desc *cdesc) { struct safexcel_crypto_priv *priv = ctx->priv; + struct skcipher_request *req = skcipher_request_cast(async); + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); int ctrl_size; - if (ctx->direction == SAFEXCEL_ENCRYPT) + if (sreq->direction == SAFEXCEL_ENCRYPT) cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT; else cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN; @@ -126,9 +137,9 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, return 0; } -static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, - struct crypto_async_request *async, - bool *should_complete, int *ret) +static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring, + struct crypto_async_request *async, + bool *should_complete, int *ret) { struct skcipher_request *req = skcipher_request_cast(async); struct safexcel_result_desc *rdesc; @@ -238,7 +249,7 @@ static int safexcel_aes_send(struct crypto_async_request *async, n_cdesc++; if (n_cdesc == 1) { - safexcel_context_control(ctx, cdesc); + safexcel_context_control(ctx, async, cdesc); safexcel_cipher_token(ctx, async, cdesc, req->cryptlen); } @@ -265,7 +276,6 @@ static int safexcel_aes_send(struct crypto_async_request *async, spin_unlock_bh(&priv->ring[ring].egress_lock); request->req = &req->base; - ctx->base.handle_result = safexcel_handle_result; *commands = n_cdesc; *results = n_rdesc; @@ -341,8 +351,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, ring = safexcel_select_ring(priv); ctx->base.ring = ring; - ctx->base.needs_inv = false; - ctx->base.send = safexcel_aes_send; spin_lock_bh(&priv->ring[ring].queue_lock); enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); @@ -351,14 +359,34 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, if (enq_ret != -EINPROGRESS) *ret = enq_ret; - if (!priv->ring[ring].need_dequeue) - safexcel_dequeue(priv, ring); + queue_work(priv->ring[ring].workqueue, + &priv->ring[ring].work_data.work); *should_complete = false; return ndesc; } +static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, + struct crypto_async_request *async, + bool *should_complete, int *ret) +{ + struct skcipher_request *req = skcipher_request_cast(async); + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); + int err; + + if (sreq->needs_inv) { + sreq->needs_inv = false; + err = safexcel_handle_inv_result(priv, ring, async, + should_complete, ret); + } else { + err = safexcel_handle_req_result(priv, ring, async, + should_complete, ret); + } + + return err; +} + static int safexcel_cipher_send_inv(struct crypto_async_request *async, int ring, struct safexcel_request *request, int *commands, int *results) @@ -368,9 +396,7 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async, struct safexcel_crypto_priv *priv = ctx->priv; int ret; - ctx->base.handle_result = safexcel_handle_inv_result; - - ret = safexcel_invalidate_cache(async, &ctx->base, priv, + ret = safexcel_invalidate_cache(async, priv, ctx->base.ctxr_dma, ring, request); if (unlikely(ret)) return ret; @@ -381,32 +407,54 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async, return 0; } +static int safexcel_send(struct crypto_async_request *async, + int ring, struct safexcel_request *request, + int *commands, int *results) +{ + struct skcipher_request *req = skcipher_request_cast(async); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); + struct safexcel_crypto_priv *priv = ctx->priv; + int ret; + + BUG_ON(priv->version == EIP97 && sreq->needs_inv); + + if (sreq->needs_inv) + ret = safexcel_cipher_send_inv(async, ring, request, + commands, results); + else + ret = safexcel_aes_send(async, ring, request, + commands, results); + return ret; +} + static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_crypto_priv *priv = ctx->priv; - struct skcipher_request req; + SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm)); + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); struct safexcel_inv_result result = {}; int ring = ctx->base.ring; - memset(&req, 0, sizeof(struct skcipher_request)); + memset(req, 0, sizeof(struct skcipher_request)); /* create invalidation request */ init_completion(&result.completion); - skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, - safexcel_inv_complete, &result); + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + safexcel_inv_complete, &result); - skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm)); - ctx = crypto_tfm_ctx(req.base.tfm); + skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm)); + ctx = crypto_tfm_ctx(req->base.tfm); ctx->base.exit_inv = true; - ctx->base.send = safexcel_cipher_send_inv; + sreq->needs_inv = true; spin_lock_bh(&priv->ring[ring].queue_lock); - crypto_enqueue_request(&priv->ring[ring].queue, &req.base); + crypto_enqueue_request(&priv->ring[ring].queue, &req->base); spin_unlock_bh(&priv->ring[ring].queue_lock); - if (!priv->ring[ring].need_dequeue) - safexcel_dequeue(priv, ring); + queue_work(priv->ring[ring].workqueue, + &priv->ring[ring].work_data.work); wait_for_completion_interruptible(&result.completion); @@ -424,19 +472,21 @@ static int safexcel_aes(struct skcipher_request *req, enum safexcel_cipher_direction dir, u32 mode) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); struct safexcel_crypto_priv *priv = ctx->priv; int ret, ring; - ctx->direction = dir; + sreq->needs_inv = false; + sreq->direction = dir; ctx->mode = mode; if (ctx->base.ctxr) { - if (ctx->base.needs_inv) - ctx->base.send = safexcel_cipher_send_inv; + if (priv->version == EIP197 && ctx->base.needs_inv) { + sreq->needs_inv = true; + ctx->base.needs_inv = false; + } } else { ctx->base.ring = safexcel_select_ring(priv); - ctx->base.send = safexcel_aes_send; - ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, EIP197_GFP_FLAGS(req->base), &ctx->base.ctxr_dma); @@ -450,8 +500,8 @@ static int safexcel_aes(struct skcipher_request *req, ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base); spin_unlock_bh(&priv->ring[ring].queue_lock); - if (!priv->ring[ring].need_dequeue) - safexcel_dequeue(priv, ring); + queue_work(priv->ring[ring].workqueue, + &priv->ring[ring].work_data.work); return ret; } @@ -476,6 +526,11 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm) alg.skcipher.base); ctx->priv = tmpl->priv; + ctx->base.send = safexcel_send; + ctx->base.handle_result = safexcel_handle_result; + + crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), + sizeof(struct safexcel_cipher_req)); return 0; } @@ -494,9 +549,14 @@ static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm) memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32)); - ret = safexcel_cipher_exit_inv(tfm); - if (ret) - dev_warn(priv->dev, "cipher: invalidation error %d\n", ret); + if (priv->version == EIP197) { + ret = safexcel_cipher_exit_inv(tfm); + if (ret) + dev_warn(priv->dev, "cipher: invalidation error %d\n", ret); + } else { + dma_pool_free(priv->context_pool, ctx->base.ctxr, + ctx->base.ctxr_dma); + } } struct safexcel_alg_template safexcel_alg_ecb_aes = { diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 74feb6227101..122a2a58e98f 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -14,7 +14,6 @@ #include <linux/dma-mapping.h> #include <linux/dmapool.h> - #include "safexcel.h" struct safexcel_ahash_ctx { @@ -32,9 +31,12 @@ struct safexcel_ahash_req { bool last_req; bool finish; bool hmac; + bool needs_inv; + + int nents; u8 state_sz; /* expected sate size, only set once */ - u32 state[SHA256_DIGEST_SIZE / sizeof(u32)]; + u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32)); u64 len; u64 processed; @@ -119,15 +121,15 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, } } -static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, - struct crypto_async_request *async, - bool *should_complete, int *ret) +static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring, + struct crypto_async_request *async, + bool *should_complete, int *ret) { struct safexcel_result_desc *rdesc; struct ahash_request *areq = ahash_request_cast(async); struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); struct safexcel_ahash_req *sreq = ahash_request_ctx(areq); - int cache_len, result_sz = sreq->state_sz; + int cache_len; *ret = 0; @@ -148,11 +150,13 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, spin_unlock_bh(&priv->ring[ring].egress_lock); if (sreq->finish) - result_sz = crypto_ahash_digestsize(ahash); - memcpy(sreq->state, areq->result, result_sz); + memcpy(areq->result, sreq->state, + crypto_ahash_digestsize(ahash)); - dma_unmap_sg(priv->dev, areq->src, - sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE); + if (sreq->nents) { + dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE); + sreq->nents = 0; + } safexcel_free_context(priv, async, sreq->state_sz); @@ -165,9 +169,9 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, return 1; } -static int safexcel_ahash_send(struct crypto_async_request *async, int ring, - struct safexcel_request *request, int *commands, - int *results) +static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, + struct safexcel_request *request, + int *commands, int *results) { struct ahash_request *areq = ahash_request_cast(async); struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); @@ -177,7 +181,7 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring, struct safexcel_command_desc *cdesc, *first_cdesc = NULL; struct safexcel_result_desc *rdesc; struct scatterlist *sg; - int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0; + int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0; queued = len = req->len - req->processed; if (queued < crypto_ahash_blocksize(ahash)) @@ -185,17 +189,31 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring, else cache_len = queued - areq->nbytes; - /* - * If this is not the last request and the queued data does not fit - * into full blocks, cache it for the next send() call. - */ - extra = queued & (crypto_ahash_blocksize(ahash) - 1); - if (!req->last_req && extra) { - sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), - req->cache_next, extra, areq->nbytes - extra); - - queued -= extra; - len -= extra; + if (!req->last_req) { + /* If this is not the last request and the queued data does not + * fit into full blocks, cache it for the next send() call. + */ + extra = queued & (crypto_ahash_blocksize(ahash) - 1); + if (!extra) + /* If this is not the last request and the queued data + * is a multiple of a block, cache the last one for now. + */ + extra = queued - crypto_ahash_blocksize(ahash); + + if (extra) { + sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), + req->cache_next, extra, + areq->nbytes - extra); + + queued -= extra; + len -= extra; + + if (!queued) { + *commands = 0; + *results = 0; + return 0; + } + } } spin_lock_bh(&priv->ring[ring].egress_lock); @@ -233,15 +251,15 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring, } /* Now handle the current ahash request buffer(s) */ - nents = dma_map_sg(priv->dev, areq->src, - sg_nents_for_len(areq->src, areq->nbytes), - DMA_TO_DEVICE); - if (!nents) { + req->nents = dma_map_sg(priv->dev, areq->src, + sg_nents_for_len(areq->src, areq->nbytes), + DMA_TO_DEVICE); + if (!req->nents) { ret = -ENOMEM; goto cdesc_rollback; } - for_each_sg(areq->src, sg, nents, i) { + for_each_sg(areq->src, sg, req->nents, i) { int sglen = sg_dma_len(sg); /* Do not overflow the request */ @@ -273,7 +291,7 @@ send_command: /* Add the token */ safexcel_hash_token(first_cdesc, len, req->state_sz); - ctx->base.result_dma = dma_map_single(priv->dev, areq->result, + ctx->base.result_dma = dma_map_single(priv->dev, req->state, req->state_sz, DMA_FROM_DEVICE); if (dma_mapping_error(priv->dev, ctx->base.result_dma)) { ret = -EINVAL; @@ -292,7 +310,6 @@ send_command: req->processed += len; request->req = &areq->base; - ctx->base.handle_result = safexcel_handle_result; *commands = n_cdesc; *results = 1; @@ -374,8 +391,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, ring = safexcel_select_ring(priv); ctx->base.ring = ring; - ctx->base.needs_inv = false; - ctx->base.send = safexcel_ahash_send; spin_lock_bh(&priv->ring[ring].queue_lock); enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); @@ -384,14 +399,36 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, if (enq_ret != -EINPROGRESS) *ret = enq_ret; - if (!priv->ring[ring].need_dequeue) - safexcel_dequeue(priv, ring); + queue_work(priv->ring[ring].workqueue, + &priv->ring[ring].work_data.work); *should_complete = false; return 1; } +static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, + struct crypto_async_request *async, + bool *should_complete, int *ret) +{ + struct ahash_request *areq = ahash_request_cast(async); + struct safexcel_ahash_req *req = ahash_request_ctx(areq); + int err; + + BUG_ON(priv->version == EIP97 && req->needs_inv); + + if (req->needs_inv) { + req->needs_inv = false; + err = safexcel_handle_inv_result(priv, ring, async, + should_complete, ret); + } else { + err = safexcel_handle_req_result(priv, ring, async, + should_complete, ret); + } + + return err; +} + static int safexcel_ahash_send_inv(struct crypto_async_request *async, int ring, struct safexcel_request *request, int *commands, int *results) @@ -400,8 +437,7 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async, struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); int ret; - ctx->base.handle_result = safexcel_handle_inv_result; - ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv, + ret = safexcel_invalidate_cache(async, ctx->priv, ctx->base.ctxr_dma, ring, request); if (unlikely(ret)) return ret; @@ -412,32 +448,50 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async, return 0; } +static int safexcel_ahash_send(struct crypto_async_request *async, + int ring, struct safexcel_request *request, + int *commands, int *results) +{ + struct ahash_request *areq = ahash_request_cast(async); + struct safexcel_ahash_req *req = ahash_request_ctx(areq); + int ret; + + if (req->needs_inv) + ret = safexcel_ahash_send_inv(async, ring, request, + commands, results); + else + ret = safexcel_ahash_send_req(async, ring, request, + commands, results); + return ret; +} + static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) { struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_crypto_priv *priv = ctx->priv; - struct ahash_request req; + AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm)); + struct safexcel_ahash_req *rctx = ahash_request_ctx(req); struct safexcel_inv_result result = {}; int ring = ctx->base.ring; - memset(&req, 0, sizeof(struct ahash_request)); + memset(req, 0, sizeof(struct ahash_request)); /* create invalidation request */ init_completion(&result.completion); - ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, safexcel_inv_complete, &result); - ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm)); - ctx = crypto_tfm_ctx(req.base.tfm); + ahash_request_set_tfm(req, __crypto_ahash_cast(tfm)); + ctx = crypto_tfm_ctx(req->base.tfm); ctx->base.exit_inv = true; - ctx->base.send = safexcel_ahash_send_inv; + rctx->needs_inv = true; spin_lock_bh(&priv->ring[ring].queue_lock); - crypto_enqueue_request(&priv->ring[ring].queue, &req.base); + crypto_enqueue_request(&priv->ring[ring].queue, &req->base); spin_unlock_bh(&priv->ring[ring].queue_lock); - if (!priv->ring[ring].need_dequeue) - safexcel_dequeue(priv, ring); + queue_work(priv->ring[ring].workqueue, + &priv->ring[ring].work_data.work); wait_for_completion_interruptible(&result.completion); @@ -450,13 +504,23 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) return 0; } +/* safexcel_ahash_cache: cache data until at least one request can be sent to + * the engine, aka. when there is at least 1 block size in the pipe. + */ static int safexcel_ahash_cache(struct ahash_request *areq) { struct safexcel_ahash_req *req = ahash_request_ctx(areq); struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); int queued, cache_len; + /* cache_len: everyting accepted by the driver but not sent yet, + * tot sz handled by update() - last req sz - tot sz handled by send() + */ cache_len = req->len - areq->nbytes - req->processed; + /* queued: everything accepted by the driver which will be handled by + * the next send() calls. + * tot sz handled by update() - tot sz handled by send() + */ queued = req->len - req->processed; /* @@ -470,7 +534,7 @@ static int safexcel_ahash_cache(struct ahash_request *areq) return areq->nbytes; } - /* We could'nt cache all the data */ + /* We couldn't cache all the data */ return -E2BIG; } @@ -481,14 +545,23 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq) struct safexcel_crypto_priv *priv = ctx->priv; int ret, ring; - ctx->base.send = safexcel_ahash_send; - - if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) - ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq); + req->needs_inv = false; if (ctx->base.ctxr) { - if (ctx->base.needs_inv) - ctx->base.send = safexcel_ahash_send_inv; + if (priv->version == EIP197 && + !ctx->base.needs_inv && req->processed && + ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) + /* We're still setting needs_inv here, even though it is + * cleared right away, because the needs_inv flag can be + * set in other functions and we want to keep the same + * logic. + */ + ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq); + + if (ctx->base.needs_inv) { + ctx->base.needs_inv = false; + req->needs_inv = true; + } } else { ctx->base.ring = safexcel_select_ring(priv); ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, @@ -504,8 +577,8 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq) ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base); spin_unlock_bh(&priv->ring[ring].queue_lock); - if (!priv->ring[ring].need_dequeue) - safexcel_dequeue(priv, ring); + queue_work(priv->ring[ring].workqueue, + &priv->ring[ring].work_data.work); return ret; } @@ -588,7 +661,6 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out) export->processed = req->processed; memcpy(export->state, req->state, req->state_sz); - memset(export->cache, 0, crypto_ahash_blocksize(ahash)); memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash)); return 0; @@ -622,6 +694,8 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template, alg.ahash); ctx->priv = tmpl->priv; + ctx->base.send = safexcel_ahash_send; + ctx->base.handle_result = safexcel_handle_result; crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct safexcel_ahash_req)); @@ -668,9 +742,14 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm) if (!ctx->base.ctxr) return; - ret = safexcel_ahash_exit_inv(tfm); - if (ret) - dev_warn(priv->dev, "hash: invalidation error %d\n", ret); + if (priv->version == EIP197) { + ret = safexcel_ahash_exit_inv(tfm); + if (ret) + dev_warn(priv->dev, "hash: invalidation error %d\n", ret); + } else { + dma_pool_free(priv->context_pool, ctx->base.ctxr, + ctx->base.ctxr_dma); + } } struct safexcel_alg_template safexcel_alg_sha1 = { @@ -809,7 +888,7 @@ static int safexcel_hmac_init_iv(struct ahash_request *areq, req->last_req = true; ret = crypto_ahash_update(areq); - if (ret && ret != -EINPROGRESS) + if (ret && ret != -EINPROGRESS && ret != -EBUSY) return ret; wait_for_completion_interruptible(&result.completion); @@ -874,6 +953,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct safexcel_crypto_priv *priv = ctx->priv; struct safexcel_ahash_export_state istate, ostate; int ret, i; @@ -881,11 +961,13 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, if (ret) return ret; - for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) { - if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || - ctx->opad[i] != le32_to_cpu(ostate.state[i])) { - ctx->base.needs_inv = true; - break; + if (priv->version == EIP197 && ctx->base.ctxr) { + for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) { + if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || + ctx->opad[i] != le32_to_cpu(ostate.state[i])) { + ctx->base.needs_inv = true; + break; + } } } |