diff options
Diffstat (limited to 'drivers/crypto/inside-secure')
-rw-r--r-- | drivers/crypto/inside-secure/safexcel.c | 474 | ||||
-rw-r--r-- | drivers/crypto/inside-secure/safexcel.h | 201 | ||||
-rw-r--r-- | drivers/crypto/inside-secure/safexcel_cipher.c | 492 | ||||
-rw-r--r-- | drivers/crypto/inside-secure/safexcel_hash.c | 560 | ||||
-rw-r--r-- | drivers/crypto/inside-secure/safexcel_ring.c | 63 |
5 files changed, 1355 insertions, 435 deletions
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 4e86f864a952..7e71043457a6 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -1,11 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2017 Marvell * * Antoine Tenart <antoine.tenart@free-electrons.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <linux/clk.h> @@ -33,7 +30,19 @@ MODULE_PARM_DESC(max_rings, "Maximum number of rings to use."); static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv) { u32 val, htable_offset; - int i; + int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc; + + if (priv->version == EIP197B) { + cs_rc_max = EIP197B_CS_RC_MAX; + cs_ht_wc = EIP197B_CS_HT_WC; + cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC; + cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC; + } else { + cs_rc_max = EIP197D_CS_RC_MAX; + cs_ht_wc = EIP197D_CS_HT_WC; + cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC; + cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC; + } /* Enable the record cache memory access */ val = readl(priv->base + EIP197_CS_RAM_CTRL); @@ -54,7 +63,7 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv) writel(val, priv->base + EIP197_TRC_PARAMS); /* Clear all records */ - for (i = 0; i < EIP197_CS_RC_MAX; i++) { + for (i = 0; i < cs_rc_max; i++) { u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE; writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) | @@ -64,14 +73,14 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv) val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1); if (i == 0) val |= EIP197_CS_RC_PREV(EIP197_RC_NULL); - else if (i == EIP197_CS_RC_MAX - 1) + else if (i == cs_rc_max - 1) val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL); writel(val, priv->base + offset + sizeof(u32)); } /* Clear the hash table entries */ - htable_offset = EIP197_CS_RC_MAX * EIP197_CS_RC_SIZE; - for (i = 0; i < 64; i++) + htable_offset = cs_rc_max * EIP197_CS_RC_SIZE; + for (i = 0; i < cs_ht_wc; i++) writel(GENMASK(29, 0), priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32)); @@ -82,23 +91,23 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv) /* Write head and tail pointers of the record free chain */ val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) | - EIP197_TRC_FREECHAIN_TAIL_PTR(EIP197_CS_RC_MAX - 1); + EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1); writel(val, priv->base + EIP197_TRC_FREECHAIN); /* Configure the record cache #1 */ - val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(EIP197_CS_TRC_REC_WC) | - EIP197_TRC_PARAMS2_HTABLE_PTR(EIP197_CS_RC_MAX); + val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) | + EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max); writel(val, priv->base + EIP197_TRC_PARAMS2); /* Configure the record cache #2 */ - val = EIP197_TRC_PARAMS_RC_SZ_LARGE(EIP197_CS_TRC_LG_REC_WC) | + val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) | EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) | EIP197_TRC_PARAMS_HTABLE_SZ(2); writel(val, priv->base + EIP197_TRC_PARAMS); } static void eip197_write_firmware(struct safexcel_crypto_priv *priv, - const struct firmware *fw, u32 ctrl, + const struct firmware *fw, int pe, u32 ctrl, u32 prog_en) { const u32 *data = (const u32 *)fw->data; @@ -112,7 +121,7 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv, EIP197_PE(priv) + ctrl); /* Enable access to the program memory */ - writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL); + writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); /* Write the firmware */ for (i = 0; i < fw->size / sizeof(u32); i++) @@ -120,7 +129,7 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv, priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32)); /* Disable access to the program memory */ - writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL); + writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); /* Release engine from reset */ val = readl(EIP197_PE(priv) + ctrl); @@ -132,35 +141,62 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv) { const char *fw_name[] = {"ifpp.bin", "ipue.bin"}; const struct firmware *fw[FW_NB]; - int i, j, ret = 0; + char fw_path[31], *dir = NULL; + int i, j, ret = 0, pe; u32 val; + switch (priv->version) { + case EIP197B: + dir = "eip197b"; + break; + case EIP197D: + dir = "eip197d"; + break; + default: + /* No firmware is required */ + return 0; + } + for (i = 0; i < FW_NB; i++) { - ret = request_firmware(&fw[i], fw_name[i], priv->dev); + snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]); + ret = request_firmware(&fw[i], fw_path, priv->dev); if (ret) { - dev_err(priv->dev, - "Failed to request firmware %s (%d)\n", - fw_name[i], ret); - goto release_fw; - } - } - - /* Clear the scratchpad memory */ - val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL); - val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER | - EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN | - EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS | - EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS; - writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL); + if (priv->version != EIP197B) + goto release_fw; - memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0, - EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32)); - - eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL, - EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN); + /* Fallback to the old firmware location for the + * EIP197b. + */ + ret = request_firmware(&fw[i], fw_name[i], priv->dev); + if (ret) { + dev_err(priv->dev, + "Failed to request firmware %s (%d)\n", + fw_name[i], ret); + goto release_fw; + } + } + } - eip197_write_firmware(priv, fw[FW_IPUE], EIP197_PE_ICE_PUE_CTRL, - EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN); + for (pe = 0; pe < priv->config.pes; pe++) { + /* Clear the scratchpad memory */ + val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe)); + val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER | + EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN | + EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS | + EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS; + writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe)); + + memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0, + EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32)); + + eip197_write_firmware(priv, fw[FW_IFPP], pe, + EIP197_PE_ICE_FPP_CTRL(pe), + EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN); + + eip197_write_firmware(priv, fw[FW_IPUE], pe, + EIP197_PE_ICE_PUE_CTRL(pe), + EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN); + } release_fw: for (j = 0; j < i; j++) @@ -256,7 +292,7 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) static int safexcel_hw_init(struct safexcel_crypto_priv *priv) { u32 version, val; - int i, ret; + int i, ret, pe; /* Determine endianess and configure byte swap */ version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION); @@ -267,6 +303,10 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE) val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24); + /* For EIP197 set maximum number of TX commands to 2^5 = 32 */ + if (priv->version == EIP197B || priv->version == EIP197D) + val |= EIP197_MST_CTRL_TX_MAX_CMD(5); + writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); /* Configure wr/rd cache values */ @@ -282,82 +322,94 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) /* Clear any pending interrupt */ writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK); - /* Data Fetch Engine configuration */ - - /* Reset all DFE threads */ - writel(EIP197_DxE_THR_CTRL_RESET_PE, - EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL); - - if (priv->version == EIP197) { - /* Reset HIA input interface arbiter */ - writel(EIP197_HIA_RA_PE_CTRL_RESET, - EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL); - } - - /* DMA transfer size to use */ - val = EIP197_HIA_DFE_CFG_DIS_DEBUG; - val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9); - val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7); - val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS); - val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS); - writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG); - - /* Leave the DFE threads reset state */ - writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL); - - /* Configure the procesing engine thresholds */ - writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9), - EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES); - writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7), - EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES); - - if (priv->version == EIP197) { - /* enable HIA input interface arbiter and rings */ - writel(EIP197_HIA_RA_PE_CTRL_EN | - GENMASK(priv->config.rings - 1, 0), - EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL); - } - - /* Data Store Engine configuration */ - - /* Reset all DSE threads */ - writel(EIP197_DxE_THR_CTRL_RESET_PE, - EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL); - - /* Wait for all DSE threads to complete */ - while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT) & - GENMASK(15, 12)) != GENMASK(15, 12)) - ; - - /* DMA transfer size to use */ - val = EIP197_HIA_DSE_CFG_DIS_DEBUG; - val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8); - val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS); - val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE; - /* FIXME: instability issues can occur for EIP97 but disabling it impact - * performances. - */ - if (priv->version == EIP197) - val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR; - writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG); + /* Processing Engine configuration */ + for (pe = 0; pe < priv->config.pes; pe++) { + /* Data Fetch Engine configuration */ - /* Leave the DSE threads reset state */ - writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL); + /* Reset all DFE threads */ + writel(EIP197_DxE_THR_CTRL_RESET_PE, + EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe)); - /* Configure the procesing engine thresholds */ - writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8), - EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES); + if (priv->version == EIP197B || priv->version == EIP197D) { + /* Reset HIA input interface arbiter */ + writel(EIP197_HIA_RA_PE_CTRL_RESET, + EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe)); + } - /* Processing Engine configuration */ + /* DMA transfer size to use */ + val = EIP197_HIA_DFE_CFG_DIS_DEBUG; + val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) | + EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9); + val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) | + EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7); + val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS); + val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS); + writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe)); + + /* Leave the DFE threads reset state */ + writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe)); + + /* Configure the processing engine thresholds */ + writel(EIP197_PE_IN_xBUF_THRES_MIN(6) | + EIP197_PE_IN_xBUF_THRES_MAX(9), + EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe)); + writel(EIP197_PE_IN_xBUF_THRES_MIN(6) | + EIP197_PE_IN_xBUF_THRES_MAX(7), + EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe)); + + if (priv->version == EIP197B || priv->version == EIP197D) { + /* enable HIA input interface arbiter and rings */ + writel(EIP197_HIA_RA_PE_CTRL_EN | + GENMASK(priv->config.rings - 1, 0), + EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe)); + } - /* H/W capabilities selection */ - val = EIP197_FUNCTION_RSVD; - val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY; - val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT; - val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC; - val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1; - val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2; - writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN); + /* Data Store Engine configuration */ + + /* Reset all DSE threads */ + writel(EIP197_DxE_THR_CTRL_RESET_PE, + EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe)); + + /* Wait for all DSE threads to complete */ + while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) & + GENMASK(15, 12)) != GENMASK(15, 12)) + ; + + /* DMA transfer size to use */ + val = EIP197_HIA_DSE_CFG_DIS_DEBUG; + val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | + EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8); + val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS); + val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE; + /* FIXME: instability issues can occur for EIP97 but disabling it impact + * performances. + */ + if (priv->version == EIP197B || priv->version == EIP197D) + val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR; + writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe)); + + /* Leave the DSE threads reset state */ + writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe)); + + /* Configure the procesing engine thresholds */ + writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | + EIP197_PE_OUT_DBUF_THRES_MAX(8), + EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe)); + + /* Processing Engine configuration */ + + /* H/W capabilities selection */ + val = EIP197_FUNCTION_RSVD; + val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY; + val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT; + val |= EIP197_ALG_DES_ECB | EIP197_ALG_DES_CBC; + val |= EIP197_ALG_3DES_ECB | EIP197_ALG_3DES_CBC; + val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC; + val |= EIP197_ALG_MD5 | EIP197_ALG_HMAC_MD5; + val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1; + val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2; + writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe)); + } /* Command Descriptor Rings prepare */ for (i = 0; i < priv->config.rings; i++) { @@ -408,18 +460,20 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE); } - /* Enable command descriptor rings */ - writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), - EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL); + for (pe = 0; pe < priv->config.pes; pe++) { + /* Enable command descriptor rings */ + writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), + EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe)); - /* Enable result descriptor rings */ - writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), - EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL); + /* Enable result descriptor rings */ + writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), + EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe)); + } /* Clear any HIA interrupt */ writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK); - if (priv->version == EIP197) { + if (priv->version == EIP197B || priv->version == EIP197D) { eip197_trc_cache_init(priv); ret = eip197_load_firmwares(priv); @@ -452,7 +506,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) { struct crypto_async_request *req, *backlog; struct safexcel_context *ctx; - struct safexcel_request *request; int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; /* If a request wasn't properly dequeued because of a lack of resources, @@ -476,16 +529,10 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) } handle_req: - request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req)); - if (!request) - goto request_failed; - ctx = crypto_tfm_ctx(req->tfm); - ret = ctx->send(req, ring, request, &commands, &results); - if (ret) { - kfree(request); + ret = ctx->send(req, ring, &commands, &results); + if (ret) goto request_failed; - } if (backlog) backlog->complete(backlog, -EINPROGRESS); @@ -494,14 +541,8 @@ handle_req: * to the engine because the input data was cached, continue to * dequeue other requests as this is valid and not an error. */ - if (!commands && !results) { - kfree(request); + if (!commands && !results) continue; - } - - spin_lock_bh(&priv->ring[ring].egress_lock); - list_add_tail(&request->list, &priv->ring[ring].list); - spin_unlock_bh(&priv->ring[ring].egress_lock); cdesc += commands; rdesc += results; @@ -519,7 +560,7 @@ finalize: if (!nreq) return; - spin_lock_bh(&priv->ring[ring].egress_lock); + spin_lock_bh(&priv->ring[ring].lock); priv->ring[ring].requests += nreq; @@ -528,7 +569,7 @@ finalize: priv->ring[ring].busy = true; } - spin_unlock_bh(&priv->ring[ring].egress_lock); + spin_unlock_bh(&priv->ring[ring].lock); /* let the RDR know we have pending descriptors */ writel((rdesc * priv->config.rd_offset) << 2, @@ -560,6 +601,24 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv, return -EINVAL; } +inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv, + int ring, + struct safexcel_result_desc *rdesc, + struct crypto_async_request *req) +{ + int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc); + + priv->ring[ring].rdr_req[i] = req; +} + +inline struct crypto_async_request * +safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring) +{ + int i = safexcel_ring_first_rdr_index(priv, ring); + + return priv->ring[ring].rdr_req[i]; +} + void safexcel_complete(struct safexcel_crypto_priv *priv, int ring) { struct safexcel_command_desc *cdesc; @@ -588,21 +647,16 @@ void safexcel_inv_complete(struct crypto_async_request *req, int error) int safexcel_invalidate_cache(struct crypto_async_request *async, struct safexcel_crypto_priv *priv, - dma_addr_t ctxr_dma, int ring, - struct safexcel_request *request) + dma_addr_t ctxr_dma, int ring) { struct safexcel_command_desc *cdesc; struct safexcel_result_desc *rdesc; int ret = 0; - spin_lock_bh(&priv->ring[ring].egress_lock); - /* Prepare command descriptor */ cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma); - if (IS_ERR(cdesc)) { - ret = PTR_ERR(cdesc); - goto unlock; - } + if (IS_ERR(cdesc)) + return PTR_ERR(cdesc); cdesc->control_data.type = EIP197_TYPE_EXTENDED; cdesc->control_data.options = 0; @@ -617,21 +671,20 @@ int safexcel_invalidate_cache(struct crypto_async_request *async, goto cdesc_rollback; } - request->req = async; - goto unlock; + safexcel_rdr_req_set(priv, ring, rdesc, async); + + return ret; cdesc_rollback: safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); -unlock: - spin_unlock_bh(&priv->ring[ring].egress_lock); return ret; } static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv, int ring) { - struct safexcel_request *sreq; + struct crypto_async_request *req; struct safexcel_context *ctx; int ret, i, nreq, ndesc, tot_descs, handled = 0; bool should_complete; @@ -646,28 +699,22 @@ handle_results: goto requests_left; for (i = 0; i < nreq; i++) { - spin_lock_bh(&priv->ring[ring].egress_lock); - sreq = list_first_entry(&priv->ring[ring].list, - struct safexcel_request, list); - list_del(&sreq->list); - spin_unlock_bh(&priv->ring[ring].egress_lock); - - ctx = crypto_tfm_ctx(sreq->req->tfm); - ndesc = ctx->handle_result(priv, ring, sreq->req, + req = safexcel_rdr_req_get(priv, ring); + + ctx = crypto_tfm_ctx(req->tfm); + ndesc = ctx->handle_result(priv, ring, req, &should_complete, &ret); if (ndesc < 0) { - kfree(sreq); dev_err(priv->dev, "failed to handle result (%d)", ndesc); goto acknowledge; } if (should_complete) { local_bh_disable(); - sreq->req->complete(sreq->req, ret); + req->complete(req, ret); local_bh_enable(); } - kfree(sreq); tot_descs += ndesc; handled++; } @@ -686,7 +733,7 @@ acknowledge: goto handle_results; requests_left: - spin_lock_bh(&priv->ring[ring].egress_lock); + spin_lock_bh(&priv->ring[ring].lock); priv->ring[ring].requests -= handled; safexcel_try_push_requests(priv, ring); @@ -694,7 +741,7 @@ requests_left: if (!priv->ring[ring].requests) priv->ring[ring].busy = false; - spin_unlock_bh(&priv->ring[ring].egress_lock); + spin_unlock_bh(&priv->ring[ring].lock); } static void safexcel_dequeue_work(struct work_struct *work) @@ -785,17 +832,29 @@ static int safexcel_request_ring_irq(struct platform_device *pdev, const char *n } static struct safexcel_alg_template *safexcel_algs[] = { + &safexcel_alg_ecb_des, + &safexcel_alg_cbc_des, + &safexcel_alg_ecb_des3_ede, + &safexcel_alg_cbc_des3_ede, &safexcel_alg_ecb_aes, &safexcel_alg_cbc_aes, + &safexcel_alg_md5, &safexcel_alg_sha1, &safexcel_alg_sha224, &safexcel_alg_sha256, + &safexcel_alg_sha384, + &safexcel_alg_sha512, + &safexcel_alg_hmac_md5, &safexcel_alg_hmac_sha1, &safexcel_alg_hmac_sha224, &safexcel_alg_hmac_sha256, + &safexcel_alg_hmac_sha384, + &safexcel_alg_hmac_sha512, &safexcel_alg_authenc_hmac_sha1_cbc_aes, &safexcel_alg_authenc_hmac_sha224_cbc_aes, &safexcel_alg_authenc_hmac_sha256_cbc_aes, + &safexcel_alg_authenc_hmac_sha384_cbc_aes, + &safexcel_alg_authenc_hmac_sha512_cbc_aes, }; static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) @@ -805,6 +864,9 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) { safexcel_algs[i]->priv = priv; + if (!(safexcel_algs[i]->engines & priv->version)) + continue; + if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher); else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD) @@ -820,6 +882,9 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) fail: for (j = 0; j < i; j++) { + if (!(safexcel_algs[j]->engines & priv->version)) + continue; + if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher); else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD) @@ -836,6 +901,9 @@ static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv) int i; for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) { + if (!(safexcel_algs[i]->engines & priv->version)) + continue; + if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher); else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD) @@ -847,9 +915,21 @@ static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv) static void safexcel_configure(struct safexcel_crypto_priv *priv) { - u32 val, mask; + u32 val, mask = 0; val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); + + /* Read number of PEs from the engine */ + switch (priv->version) { + case EIP197B: + case EIP197D: + mask = EIP197_N_PES_MASK; + break; + default: + mask = EIP97_N_PES_MASK; + } + priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask; + val = (val & GENMASK(27, 25)) >> 25; mask = BIT(val) - 1; @@ -867,7 +947,9 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv) { struct safexcel_register_offsets *offsets = &priv->offsets; - if (priv->version == EIP197) { + switch (priv->version) { + case EIP197B: + case EIP197D: offsets->hia_aic = EIP197_HIA_AIC_BASE; offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE; offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE; @@ -878,7 +960,8 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv) offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE; offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE; offsets->pe = EIP197_PE_BASE; - } else { + break; + case EIP97IES: offsets->hia_aic = EIP97_HIA_AIC_BASE; offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE; offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE; @@ -889,6 +972,7 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv) offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE; offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE; offsets->pe = EIP97_PE_BASE; + break; } } @@ -906,6 +990,9 @@ static int safexcel_probe(struct platform_device *pdev) priv->dev = dev; priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev); + if (priv->version == EIP197B || priv->version == EIP197D) + priv->flags |= EIP197_TRC_CACHE; + safexcel_init_register_offsets(priv); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -957,6 +1044,13 @@ static int safexcel_probe(struct platform_device *pdev) safexcel_configure(priv); + priv->ring = devm_kzalloc(dev, priv->config.rings * sizeof(*priv->ring), + GFP_KERNEL); + if (!priv->ring) { + ret = -ENOMEM; + goto err_reg_clk; + } + for (i = 0; i < priv->config.rings; i++) { char irq_name[6] = {0}; /* "ringX\0" */ char wq_name[9] = {0}; /* "wq_ringX\0" */ @@ -969,6 +1063,14 @@ static int safexcel_probe(struct platform_device *pdev) if (ret) goto err_reg_clk; + priv->ring[i].rdr_req = devm_kzalloc(dev, + sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE, + GFP_KERNEL); + if (!priv->ring[i].rdr_req) { + ret = -ENOMEM; + goto err_reg_clk; + } + ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL); if (!ring_irq) { ret = -ENOMEM; @@ -1004,9 +1106,7 @@ static int safexcel_probe(struct platform_device *pdev) crypto_init_queue(&priv->ring[i].queue, EIP197_DEFAULT_RING_SIZE); - INIT_LIST_HEAD(&priv->ring[i].list); spin_lock_init(&priv->ring[i].lock); - spin_lock_init(&priv->ring[i].egress_lock); spin_lock_init(&priv->ring[i].queue_lock); } @@ -1034,6 +1134,24 @@ err_core_clk: return ret; } +static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv) +{ + int i; + + for (i = 0; i < priv->config.rings; i++) { + /* clear any pending interrupt */ + writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT); + writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT); + + /* Reset the CDR base address */ + writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); + writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); + + /* Reset the RDR base address */ + writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); + writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); + } +} static int safexcel_remove(struct platform_device *pdev) { @@ -1041,6 +1159,8 @@ static int safexcel_remove(struct platform_device *pdev) int i; safexcel_unregister_algorithms(priv); + safexcel_hw_reset_rings(priv); + clk_disable_unprepare(priv->clk); for (i = 0; i < priv->config.rings; i++) @@ -1051,12 +1171,26 @@ static int safexcel_remove(struct platform_device *pdev) static const struct of_device_id safexcel_of_match_table[] = { { + .compatible = "inside-secure,safexcel-eip97ies", + .data = (void *)EIP97IES, + }, + { + .compatible = "inside-secure,safexcel-eip197b", + .data = (void *)EIP197B, + }, + { + .compatible = "inside-secure,safexcel-eip197d", + .data = (void *)EIP197D, + }, + { + /* Deprecated. Kept for backward compatibility. */ .compatible = "inside-secure,safexcel-eip97", - .data = (void *)EIP97, + .data = (void *)EIP97IES, }, { + /* Deprecated. Kept for backward compatibility. */ .compatible = "inside-secure,safexcel-eip197", - .data = (void *)EIP197, + .data = (void *)EIP197B, }, {}, }; diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 8b3ee9b59f53..65624a81f0fd 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -1,11 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2017 Marvell * * Antoine Tenart <antoine.tenart@free-electrons.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #ifndef __SAFEXCEL_H__ @@ -95,13 +92,13 @@ #define EIP197_HIA_xDR_STAT 0x003c /* register offsets */ -#define EIP197_HIA_DFE_CFG 0x0000 -#define EIP197_HIA_DFE_THR_CTRL 0x0000 -#define EIP197_HIA_DFE_THR_STAT 0x0004 -#define EIP197_HIA_DSE_CFG 0x0000 -#define EIP197_HIA_DSE_THR_CTRL 0x0000 -#define EIP197_HIA_DSE_THR_STAT 0x0004 -#define EIP197_HIA_RA_PE_CTRL 0x0010 +#define EIP197_HIA_DFE_CFG(n) (0x0000 + (128 * (n))) +#define EIP197_HIA_DFE_THR_CTRL(n) (0x0000 + (128 * (n))) +#define EIP197_HIA_DFE_THR_STAT(n) (0x0004 + (128 * (n))) +#define EIP197_HIA_DSE_CFG(n) (0x0000 + (128 * (n))) +#define EIP197_HIA_DSE_THR_CTRL(n) (0x0000 + (128 * (n))) +#define EIP197_HIA_DSE_THR_STAT(n) (0x0004 + (128 * (n))) +#define EIP197_HIA_RA_PE_CTRL(n) (0x0010 + (8 * (n))) #define EIP197_HIA_RA_PE_STAT 0x0014 #define EIP197_HIA_AIC_R_OFF(r) ((r) * 0x1000) #define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0xe008 - EIP197_HIA_AIC_R_OFF(r)) @@ -114,18 +111,18 @@ #define EIP197_HIA_MST_CTRL 0xfff4 #define EIP197_HIA_OPTIONS 0xfff8 #define EIP197_HIA_VERSION 0xfffc -#define EIP197_PE_IN_DBUF_THRES 0x0000 -#define EIP197_PE_IN_TBUF_THRES 0x0100 -#define EIP197_PE_ICE_SCRATCH_RAM 0x0800 -#define EIP197_PE_ICE_PUE_CTRL 0x0c80 -#define EIP197_PE_ICE_SCRATCH_CTRL 0x0d04 -#define EIP197_PE_ICE_FPP_CTRL 0x0d80 -#define EIP197_PE_ICE_RAM_CTRL 0x0ff0 -#define EIP197_PE_EIP96_FUNCTION_EN 0x1004 -#define EIP197_PE_EIP96_CONTEXT_CTRL 0x1008 -#define EIP197_PE_EIP96_CONTEXT_STAT 0x100c -#define EIP197_PE_OUT_DBUF_THRES 0x1c00 -#define EIP197_PE_OUT_TBUF_THRES 0x1d00 +#define EIP197_PE_IN_DBUF_THRES(n) (0x0000 + (0x2000 * (n))) +#define EIP197_PE_IN_TBUF_THRES(n) (0x0100 + (0x2000 * (n))) +#define EIP197_PE_ICE_SCRATCH_RAM(n) (0x0800 + (0x2000 * (n))) +#define EIP197_PE_ICE_PUE_CTRL(n) (0x0c80 + (0x2000 * (n))) +#define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n))) +#define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n))) +#define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n))) +#define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n))) +#define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n))) +#define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n))) +#define EIP197_PE_OUT_DBUF_THRES(n) (0x1c00 + (0x2000 * (n))) +#define EIP197_PE_OUT_TBUF_THRES(n) (0x1d00 + (0x2000 * (n))) #define EIP197_MST_CTRL 0xfff4 /* EIP197-specific registers, no indirection */ @@ -184,6 +181,11 @@ #define EIP197_HIA_RA_PE_CTRL_RESET BIT(31) #define EIP197_HIA_RA_PE_CTRL_EN BIT(30) +/* EIP197_HIA_OPTIONS */ +#define EIP197_N_PES_OFFSET 4 +#define EIP197_N_PES_MASK GENMASK(4, 0) +#define EIP97_N_PES_MASK GENMASK(2, 0) + /* EIP197_HIA_AIC_R_ENABLE_CTRL */ #define EIP197_CDR_IRQ(n) BIT((n) * 2) #define EIP197_RDR_IRQ(n) BIT((n) * 2 + 1) @@ -217,6 +219,7 @@ #define WR_CACHE_4BITS (WR_CACHE_3BITS << 1 | BIT(0)) #define EIP197_MST_CTRL_RD_CACHE(n) (((n) & 0xf) << 0) #define EIP197_MST_CTRL_WD_CACHE(n) (((n) & 0xf) << 4) +#define EIP197_MST_CTRL_TX_MAX_CMD(n) (((n) & 0xf) << 20) #define EIP197_MST_CTRL_BYTE_SWAP BIT(24) #define EIP197_MST_CTRL_NO_BYTE_SWAP BIT(25) @@ -287,7 +290,7 @@ struct safexcel_context_record { u32 control0; u32 control1; - __le32 data[24]; + __le32 data[40]; } __packed; /* control0 */ @@ -305,14 +308,19 @@ struct safexcel_context_record { #define CONTEXT_CONTROL_NO_FINISH_HASH BIT(5) #define CONTEXT_CONTROL_SIZE(n) ((n) << 8) #define CONTEXT_CONTROL_KEY_EN BIT(16) +#define CONTEXT_CONTROL_CRYPTO_ALG_DES (0x0 << 17) +#define CONTEXT_CONTROL_CRYPTO_ALG_3DES (0x2 << 17) #define CONTEXT_CONTROL_CRYPTO_ALG_AES128 (0x5 << 17) #define CONTEXT_CONTROL_CRYPTO_ALG_AES192 (0x6 << 17) #define CONTEXT_CONTROL_CRYPTO_ALG_AES256 (0x7 << 17) #define CONTEXT_CONTROL_DIGEST_PRECOMPUTED (0x1 << 21) #define CONTEXT_CONTROL_DIGEST_HMAC (0x3 << 21) +#define CONTEXT_CONTROL_CRYPTO_ALG_MD5 (0x0 << 23) #define CONTEXT_CONTROL_CRYPTO_ALG_SHA1 (0x2 << 23) #define CONTEXT_CONTROL_CRYPTO_ALG_SHA224 (0x4 << 23) #define CONTEXT_CONTROL_CRYPTO_ALG_SHA256 (0x3 << 23) +#define CONTEXT_CONTROL_CRYPTO_ALG_SHA384 (0x6 << 23) +#define CONTEXT_CONTROL_CRYPTO_ALG_SHA512 (0x5 << 23) #define CONTEXT_CONTROL_INV_FR (0x5 << 24) #define CONTEXT_CONTROL_INV_TR (0x6 << 24) @@ -327,6 +335,11 @@ struct safexcel_context_record { #define CONTEXT_CONTROL_COUNTER_MODE BIT(10) #define CONTEXT_CONTROL_HASH_STORE BIT(19) +/* The hash counter given to the engine in the context has a granularity of + * 64 bits. + */ +#define EIP197_COUNTER_BLOCK_SIZE 64 + /* EIP197_CS_RAM_CTRL */ #define EIP197_TRC_ENABLE_0 BIT(4) #define EIP197_TRC_ENABLE_1 BIT(5) @@ -349,13 +362,19 @@ struct safexcel_context_record { #define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n) ((n) << 18) /* Cache helpers */ -#define EIP197_CS_RC_MAX 52 +#define EIP197B_CS_RC_MAX 52 +#define EIP197D_CS_RC_MAX 96 #define EIP197_CS_RC_SIZE (4 * sizeof(u32)) #define EIP197_CS_RC_NEXT(x) (x) #define EIP197_CS_RC_PREV(x) ((x) << 10) #define EIP197_RC_NULL 0x3ff -#define EIP197_CS_TRC_REC_WC 59 -#define EIP197_CS_TRC_LG_REC_WC 73 +#define EIP197B_CS_TRC_REC_WC 59 +#define EIP197D_CS_TRC_REC_WC 64 +#define EIP197B_CS_TRC_LG_REC_WC 73 +#define EIP197D_CS_TRC_LG_REC_WC 80 +#define EIP197B_CS_HT_WC 64 +#define EIP197D_CS_HT_WC 256 + /* Result data */ struct result_data_desc { @@ -450,6 +469,7 @@ struct safexcel_control_data_desc { #define EIP197_OPTION_MAGIC_VALUE BIT(0) #define EIP197_OPTION_64BIT_CTX BIT(1) #define EIP197_OPTION_CTX_CTRL_IN_CMD BIT(8) +#define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10) #define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9) #define EIP197_TYPE_EXTENDED 0x3 @@ -480,7 +500,7 @@ enum eip197_fw { FW_NB }; -struct safexcel_ring { +struct safexcel_desc_ring { void *base; void *base_end; dma_addr_t base_dma; @@ -489,8 +509,7 @@ struct safexcel_ring { void *write; void *read; - /* number of elements used in the ring */ - unsigned nr; + /* descriptor element offset */ unsigned offset; }; @@ -500,12 +519,8 @@ enum safexcel_alg_type { SAFEXCEL_ALG_TYPE_AHASH, }; -struct safexcel_request { - struct list_head list; - struct crypto_async_request *req; -}; - struct safexcel_config { + u32 pes; u32 rings; u32 cd_size; @@ -521,9 +536,40 @@ struct safexcel_work_data { int ring; }; +struct safexcel_ring { + spinlock_t lock; + + struct workqueue_struct *workqueue; + struct safexcel_work_data work_data; + + /* command/result rings */ + struct safexcel_desc_ring cdr; + struct safexcel_desc_ring rdr; + + /* result ring crypto API request */ + struct crypto_async_request **rdr_req; + + /* queue */ + struct crypto_queue queue; + spinlock_t queue_lock; + + /* Number of requests in the engine. */ + int requests; + + /* The ring is currently handling at least one request */ + bool busy; + + /* Store for current requests when bailing out of the dequeueing + * function when no enough resources are available. + */ + struct crypto_async_request *req; + struct crypto_async_request *backlog; +}; + enum safexcel_eip_version { - EIP97, - EIP197, + EIP97IES = BIT(0), + EIP197B = BIT(1), + EIP197D = BIT(2), }; struct safexcel_register_offsets { @@ -539,6 +585,10 @@ struct safexcel_register_offsets { u32 pe; }; +enum safexcel_flags { + EIP197_TRC_CACHE = BIT(0), +}; + struct safexcel_crypto_priv { void __iomem *base; struct device *dev; @@ -548,46 +598,19 @@ struct safexcel_crypto_priv { enum safexcel_eip_version version; struct safexcel_register_offsets offsets; + u32 flags; /* context DMA pool */ struct dma_pool *context_pool; atomic_t ring_used; - struct { - spinlock_t lock; - spinlock_t egress_lock; - - struct list_head list; - struct workqueue_struct *workqueue; - struct safexcel_work_data work_data; - - /* command/result rings */ - struct safexcel_ring cdr; - struct safexcel_ring rdr; - - /* queue */ - struct crypto_queue queue; - spinlock_t queue_lock; - - /* Number of requests in the engine. */ - int requests; - - /* The ring is currently handling at least one request */ - bool busy; - - /* Store for current requests when bailing out of the dequeueing - * function when no enough resources are available. - */ - struct crypto_async_request *req; - struct crypto_async_request *backlog; - } ring[EIP197_MAX_RINGS]; + struct safexcel_ring *ring; }; struct safexcel_context { int (*send)(struct crypto_async_request *req, int ring, - struct safexcel_request *request, int *commands, - int *results); + int *commands, int *results); int (*handle_result)(struct safexcel_crypto_priv *priv, int ring, struct crypto_async_request *req, bool *complete, int *ret); @@ -600,13 +623,13 @@ struct safexcel_context { }; struct safexcel_ahash_export_state { - u64 len; - u64 processed; + u64 len[2]; + u64 processed[2]; u32 digest; - u32 state[SHA256_DIGEST_SIZE / sizeof(u32)]; - u8 cache[SHA256_BLOCK_SIZE]; + u32 state[SHA512_DIGEST_SIZE / sizeof(u32)]; + u8 cache[SHA512_BLOCK_SIZE]; }; /* @@ -617,6 +640,7 @@ struct safexcel_ahash_export_state { struct safexcel_alg_template { struct safexcel_crypto_priv *priv; enum safexcel_alg_type type; + u32 engines; union { struct skcipher_alg skcipher; struct aead_alg aead; @@ -635,16 +659,16 @@ int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv, void safexcel_complete(struct safexcel_crypto_priv *priv, int ring); int safexcel_invalidate_cache(struct crypto_async_request *async, struct safexcel_crypto_priv *priv, - dma_addr_t ctxr_dma, int ring, - struct safexcel_request *request); + dma_addr_t ctxr_dma, int ring); int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv, - struct safexcel_ring *cdr, - struct safexcel_ring *rdr); + struct safexcel_desc_ring *cdr, + struct safexcel_desc_ring *rdr); int safexcel_select_ring(struct safexcel_crypto_priv *priv); void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv, - struct safexcel_ring *ring); + struct safexcel_desc_ring *ring); +void *safexcel_ring_first_rptr(struct safexcel_crypto_priv *priv, int ring); void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv, - struct safexcel_ring *ring); + struct safexcel_desc_ring *ring); struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv, int ring_id, bool first, bool last, @@ -655,21 +679,44 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri int ring_id, bool first, bool last, dma_addr_t data, u32 len); +int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv, + int ring); +int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv, + int ring, + struct safexcel_result_desc *rdesc); +void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv, + int ring, + struct safexcel_result_desc *rdesc, + struct crypto_async_request *req); +inline struct crypto_async_request * +safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring); void safexcel_inv_complete(struct crypto_async_request *req, int error); int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen, void *istate, void *ostate); /* available algorithms */ +extern struct safexcel_alg_template safexcel_alg_ecb_des; +extern struct safexcel_alg_template safexcel_alg_cbc_des; +extern struct safexcel_alg_template safexcel_alg_ecb_des3_ede; +extern struct safexcel_alg_template safexcel_alg_cbc_des3_ede; extern struct safexcel_alg_template safexcel_alg_ecb_aes; extern struct safexcel_alg_template safexcel_alg_cbc_aes; +extern struct safexcel_alg_template safexcel_alg_md5; extern struct safexcel_alg_template safexcel_alg_sha1; extern struct safexcel_alg_template safexcel_alg_sha224; extern struct safexcel_alg_template safexcel_alg_sha256; +extern struct safexcel_alg_template safexcel_alg_sha384; +extern struct safexcel_alg_template safexcel_alg_sha512; +extern struct safexcel_alg_template safexcel_alg_hmac_md5; extern struct safexcel_alg_template safexcel_alg_hmac_sha1; extern struct safexcel_alg_template safexcel_alg_hmac_sha224; extern struct safexcel_alg_template safexcel_alg_hmac_sha256; +extern struct safexcel_alg_template safexcel_alg_hmac_sha384; +extern struct safexcel_alg_template safexcel_alg_hmac_sha512; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes; extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes; +extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes; #endif diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 6bb60fda2043..3aef1d43e435 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -1,11 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2017 Marvell * * Antoine Tenart <antoine.tenart@free-electrons.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <linux/device.h> @@ -15,6 +12,7 @@ #include <crypto/aead.h> #include <crypto/aes.h> #include <crypto/authenc.h> +#include <crypto/des.h> #include <crypto/sha.h> #include <crypto/skcipher.h> #include <crypto/internal/aead.h> @@ -27,21 +25,28 @@ enum safexcel_cipher_direction { SAFEXCEL_DECRYPT, }; +enum safexcel_cipher_alg { + SAFEXCEL_DES, + SAFEXCEL_3DES, + SAFEXCEL_AES, +}; + struct safexcel_cipher_ctx { struct safexcel_context base; struct safexcel_crypto_priv *priv; u32 mode; + enum safexcel_cipher_alg alg; bool aead; __le32 key[8]; unsigned int key_len; /* All the below is AEAD specific */ - u32 alg; + u32 hash_alg; u32 state_sz; - u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)]; - u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)]; + u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)]; + u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)]; }; struct safexcel_cipher_req { @@ -57,10 +62,24 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, unsigned offset = 0; if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { - offset = AES_BLOCK_SIZE / sizeof(u32); - memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE); + switch (ctx->alg) { + case SAFEXCEL_DES: + offset = DES_BLOCK_SIZE / sizeof(u32); + memcpy(cdesc->control_data.token, iv, DES_BLOCK_SIZE); + cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD; + break; + case SAFEXCEL_3DES: + offset = DES3_EDE_BLOCK_SIZE / sizeof(u32); + memcpy(cdesc->control_data.token, iv, DES3_EDE_BLOCK_SIZE); + cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD; + break; - cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; + case SAFEXCEL_AES: + offset = AES_BLOCK_SIZE / sizeof(u32); + memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE); + cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; + break; + } } token = (struct safexcel_token *)(cdesc->control_data.token + offset); @@ -145,7 +164,7 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm, return ret; } - if (priv->version == EIP197 && ctx->base.ctxr_dma) { + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { for (i = 0; i < len / sizeof(u32); i++) { if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) { ctx->base.needs_inv = true; @@ -179,12 +198,12 @@ static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key, goto badkey; /* Encryption key */ - if (priv->version == EIP197 && ctx->base.ctxr_dma && + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma && memcmp(ctx->key, keys.enckey, keys.enckeylen)) ctx->base.needs_inv = true; /* Auth key */ - switch (ctx->alg) { + switch (ctx->hash_alg) { case CONTEXT_CONTROL_CRYPTO_ALG_SHA1: if (safexcel_hmac_setkey("safexcel-sha1", keys.authkey, keys.authkeylen, &istate, &ostate)) @@ -200,6 +219,16 @@ static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key, keys.authkeylen, &istate, &ostate)) goto badkey; break; + case CONTEXT_CONTROL_CRYPTO_ALG_SHA384: + if (safexcel_hmac_setkey("safexcel-sha384", keys.authkey, + keys.authkeylen, &istate, &ostate)) + goto badkey; + break; + case CONTEXT_CONTROL_CRYPTO_ALG_SHA512: + if (safexcel_hmac_setkey("safexcel-sha512", keys.authkey, + keys.authkeylen, &istate, &ostate)) + goto badkey; + break; default: dev_err(priv->dev, "aead: unsupported hash algorithm\n"); goto badkey; @@ -208,7 +237,7 @@ static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key, crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) & CRYPTO_TFM_RES_MASK); - if (priv->version == EIP197 && ctx->base.ctxr_dma && + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma && (memcmp(ctx->ipad, istate.state, ctx->state_sz) || memcmp(ctx->opad, ostate.state, ctx->state_sz))) ctx->base.needs_inv = true; @@ -258,22 +287,28 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, if (ctx->aead) cdesc->control_data.control0 |= CONTEXT_CONTROL_DIGEST_HMAC | - ctx->alg; - - switch (ctx->key_len) { - case AES_KEYSIZE_128: - cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128; - break; - case AES_KEYSIZE_192: - cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192; - break; - case AES_KEYSIZE_256: - cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256; - break; - default: - dev_err(priv->dev, "aes keysize not supported: %u\n", - ctx->key_len); - return -EINVAL; + ctx->hash_alg; + + if (ctx->alg == SAFEXCEL_DES) { + cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_DES; + } else if (ctx->alg == SAFEXCEL_3DES) { + cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_3DES; + } else if (ctx->alg == SAFEXCEL_AES) { + switch (ctx->key_len) { + case AES_KEYSIZE_128: + cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128; + break; + case AES_KEYSIZE_192: + cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192; + break; + case AES_KEYSIZE_256: + cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256; + break; + default: + dev_err(priv->dev, "aes keysize not supported: %u\n", + ctx->key_len); + return -EINVAL; + } } ctrl_size = ctx->key_len / sizeof(u32); @@ -298,7 +333,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin *ret = 0; - spin_lock_bh(&priv->ring[ring].egress_lock); do { rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); if (IS_ERR(rdesc)) { @@ -315,7 +349,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin } while (!rdesc->last_seg); safexcel_complete(priv, ring); - spin_unlock_bh(&priv->ring[ring].egress_lock); if (src == dst) { dma_unmap_sg(priv->dev, src, @@ -335,8 +368,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin return ndesc; } -static int safexcel_aes_send(struct crypto_async_request *base, int ring, - struct safexcel_request *request, +static int safexcel_send_req(struct crypto_async_request *base, int ring, struct safexcel_cipher_req *sreq, struct scatterlist *src, struct scatterlist *dst, unsigned int cryptlen, unsigned int assoclen, @@ -346,7 +378,7 @@ static int safexcel_aes_send(struct crypto_async_request *base, int ring, struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); struct safexcel_crypto_priv *priv = ctx->priv; struct safexcel_command_desc *cdesc; - struct safexcel_result_desc *rdesc; + struct safexcel_result_desc *rdesc, *first_rdesc = NULL; struct scatterlist *sg; unsigned int totlen = cryptlen + assoclen; int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen; @@ -386,8 +418,6 @@ static int safexcel_aes_send(struct crypto_async_request *base, int ring, ctx->opad, ctx->state_sz); } - spin_lock_bh(&priv->ring[ring].egress_lock); - /* command descriptors */ for_each_sg(src, sg, nr_src, i) { int len = sg_dma_len(sg); @@ -434,12 +464,12 @@ static int safexcel_aes_send(struct crypto_async_request *base, int ring, ret = PTR_ERR(rdesc); goto rdesc_rollback; } + if (first) + first_rdesc = rdesc; n_rdesc++; } - spin_unlock_bh(&priv->ring[ring].egress_lock); - - request->req = base; + safexcel_rdr_req_set(priv, ring, first_rdesc, base); *commands = n_cdesc; *results = n_rdesc; @@ -452,8 +482,6 @@ cdesc_rollback: for (i = 0; i < n_cdesc; i++) safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); - spin_unlock_bh(&priv->ring[ring].egress_lock); - if (src == dst) { dma_unmap_sg(priv->dev, src, sg_nents_for_len(src, totlen), @@ -481,7 +509,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, *ret = 0; - spin_lock_bh(&priv->ring[ring].egress_lock); do { rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); if (IS_ERR(rdesc)) { @@ -491,17 +518,13 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, break; } - if (rdesc->result_data.error_code) { - dev_err(priv->dev, "cipher: invalidate: result descriptor error (%d)\n", - rdesc->result_data.error_code); - *ret = -EIO; - } + if (likely(!*ret)) + *ret = safexcel_rdesc_check_errors(priv, rdesc); ndesc++; } while (!rdesc->last_seg); safexcel_complete(priv, ring); - spin_unlock_bh(&priv->ring[ring].egress_lock); if (ctx->base.exit_inv) { dma_pool_free(priv->context_pool, ctx->base.ctxr, @@ -577,15 +600,13 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv, } static int safexcel_cipher_send_inv(struct crypto_async_request *base, - int ring, struct safexcel_request *request, - int *commands, int *results) + int ring, int *commands, int *results) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); struct safexcel_crypto_priv *priv = ctx->priv; int ret; - ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring, - request); + ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring); if (unlikely(ret)) return ret; @@ -596,7 +617,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *base, } static int safexcel_skcipher_send(struct crypto_async_request *async, int ring, - struct safexcel_request *request, int *commands, int *results) { struct skcipher_request *req = skcipher_request_cast(async); @@ -605,21 +625,19 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring, struct safexcel_crypto_priv *priv = ctx->priv; int ret; - BUG_ON(priv->version == EIP97 && sreq->needs_inv); + BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv); if (sreq->needs_inv) - ret = safexcel_cipher_send_inv(async, ring, request, commands, - results); + ret = safexcel_cipher_send_inv(async, ring, commands, results); else - ret = safexcel_aes_send(async, ring, request, sreq, req->src, + ret = safexcel_send_req(async, ring, sreq, req->src, req->dst, req->cryptlen, 0, 0, req->iv, commands, results); return ret; } static int safexcel_aead_send(struct crypto_async_request *async, int ring, - struct safexcel_request *request, int *commands, - int *results) + int *commands, int *results) { struct aead_request *req = aead_request_cast(async); struct crypto_aead *tfm = crypto_aead_reqtfm(req); @@ -628,14 +646,13 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring, struct safexcel_crypto_priv *priv = ctx->priv; int ret; - BUG_ON(priv->version == EIP97 && sreq->needs_inv); + BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv); if (sreq->needs_inv) - ret = safexcel_cipher_send_inv(async, ring, request, commands, - results); + ret = safexcel_cipher_send_inv(async, ring, commands, results); else - ret = safexcel_aes_send(async, ring, request, sreq, req->src, - req->dst, req->cryptlen, req->assoclen, + ret = safexcel_send_req(async, ring, sreq, req->src, req->dst, + req->cryptlen, req->assoclen, crypto_aead_authsize(tfm), req->iv, commands, results); return ret; @@ -705,9 +722,10 @@ static int safexcel_aead_exit_inv(struct crypto_tfm *tfm) return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result); } -static int safexcel_aes(struct crypto_async_request *base, +static int safexcel_queue_req(struct crypto_async_request *base, struct safexcel_cipher_req *sreq, - enum safexcel_cipher_direction dir, u32 mode) + enum safexcel_cipher_direction dir, u32 mode, + enum safexcel_cipher_alg alg) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); struct safexcel_crypto_priv *priv = ctx->priv; @@ -715,10 +733,11 @@ static int safexcel_aes(struct crypto_async_request *base, sreq->needs_inv = false; sreq->direction = dir; + ctx->alg = alg; ctx->mode = mode; if (ctx->base.ctxr) { - if (priv->version == EIP197 && ctx->base.needs_inv) { + if (priv->flags & EIP197_TRC_CACHE && ctx->base.needs_inv) { sreq->needs_inv = true; ctx->base.needs_inv = false; } @@ -745,14 +764,16 @@ static int safexcel_aes(struct crypto_async_request *base, static int safexcel_ecb_aes_encrypt(struct skcipher_request *req) { - return safexcel_aes(&req->base, skcipher_request_ctx(req), - SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB); + return safexcel_queue_req(&req->base, skcipher_request_ctx(req), + SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, + SAFEXCEL_AES); } static int safexcel_ecb_aes_decrypt(struct skcipher_request *req) { - return safexcel_aes(&req->base, skcipher_request_ctx(req), - SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB); + return safexcel_queue_req(&req->base, skcipher_request_ctx(req), + SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, + SAFEXCEL_AES); } static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm) @@ -795,7 +816,7 @@ static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm) if (safexcel_cipher_cra_exit(tfm)) return; - if (priv->version == EIP197) { + if (priv->flags & EIP197_TRC_CACHE) { ret = safexcel_skcipher_exit_inv(tfm); if (ret) dev_warn(priv->dev, "skcipher: invalidation error %d\n", @@ -815,7 +836,7 @@ static void safexcel_aead_cra_exit(struct crypto_tfm *tfm) if (safexcel_cipher_cra_exit(tfm)) return; - if (priv->version == EIP197) { + if (priv->flags & EIP197_TRC_CACHE) { ret = safexcel_aead_exit_inv(tfm); if (ret) dev_warn(priv->dev, "aead: invalidation error %d\n", @@ -828,6 +849,7 @@ static void safexcel_aead_cra_exit(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_ecb_aes = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .engines = EIP97IES | EIP197B | EIP197D, .alg.skcipher = { .setkey = safexcel_skcipher_aes_setkey, .encrypt = safexcel_ecb_aes_encrypt, @@ -838,7 +860,7 @@ struct safexcel_alg_template safexcel_alg_ecb_aes = { .cra_name = "ecb(aes)", .cra_driver_name = "safexcel-ecb-aes", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -852,18 +874,21 @@ struct safexcel_alg_template safexcel_alg_ecb_aes = { static int safexcel_cbc_aes_encrypt(struct skcipher_request *req) { - return safexcel_aes(&req->base, skcipher_request_ctx(req), - SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC); + return safexcel_queue_req(&req->base, skcipher_request_ctx(req), + SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, + SAFEXCEL_AES); } static int safexcel_cbc_aes_decrypt(struct skcipher_request *req) { - return safexcel_aes(&req->base, skcipher_request_ctx(req), - SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC); + return safexcel_queue_req(&req->base, skcipher_request_ctx(req), + SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, + SAFEXCEL_AES); } struct safexcel_alg_template safexcel_alg_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .engines = EIP97IES | EIP197B | EIP197D, .alg.skcipher = { .setkey = safexcel_skcipher_aes_setkey, .encrypt = safexcel_cbc_aes_encrypt, @@ -875,7 +900,7 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = { .cra_name = "cbc(aes)", .cra_driver_name = "safexcel-cbc-aes", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -887,20 +912,234 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = { }, }; +static int safexcel_cbc_des_encrypt(struct skcipher_request *req) +{ + return safexcel_queue_req(&req->base, skcipher_request_ctx(req), + SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, + SAFEXCEL_DES); +} + +static int safexcel_cbc_des_decrypt(struct skcipher_request *req) +{ + return safexcel_queue_req(&req->base, skcipher_request_ctx(req), + SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, + SAFEXCEL_DES); +} + +static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + u32 tmp[DES_EXPKEY_WORDS]; + int ret; + + if (len != DES_KEY_SIZE) { + crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + ret = des_ekey(tmp, key); + if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; + return -EINVAL; + } + + /* if context exits and key changed, need to invalidate it */ + if (ctx->base.ctxr_dma) + if (memcmp(ctx->key, key, len)) + ctx->base.needs_inv = true; + + memcpy(ctx->key, key, len); + ctx->key_len = len; + + return 0; +} + +struct safexcel_alg_template safexcel_alg_cbc_des = { + .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .engines = EIP97IES | EIP197B | EIP197D, + .alg.skcipher = { + .setkey = safexcel_des_setkey, + .encrypt = safexcel_cbc_des_encrypt, + .decrypt = safexcel_cbc_des_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .base = { + .cra_name = "cbc(des)", + .cra_driver_name = "safexcel-cbc-des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_skcipher_cra_init, + .cra_exit = safexcel_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_ecb_des_encrypt(struct skcipher_request *req) +{ + return safexcel_queue_req(&req->base, skcipher_request_ctx(req), + SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, + SAFEXCEL_DES); +} + +static int safexcel_ecb_des_decrypt(struct skcipher_request *req) +{ + return safexcel_queue_req(&req->base, skcipher_request_ctx(req), + SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, + SAFEXCEL_DES); +} + +struct safexcel_alg_template safexcel_alg_ecb_des = { + .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .engines = EIP97IES | EIP197B | EIP197D, + .alg.skcipher = { + .setkey = safexcel_des_setkey, + .encrypt = safexcel_ecb_des_encrypt, + .decrypt = safexcel_ecb_des_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .base = { + .cra_name = "ecb(des)", + .cra_driver_name = "safexcel-ecb-des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_skcipher_cra_init, + .cra_exit = safexcel_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_cbc_des3_ede_encrypt(struct skcipher_request *req) +{ + return safexcel_queue_req(&req->base, skcipher_request_ctx(req), + SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, + SAFEXCEL_3DES); +} + +static int safexcel_cbc_des3_ede_decrypt(struct skcipher_request *req) +{ + return safexcel_queue_req(&req->base, skcipher_request_ctx(req), + SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC, + SAFEXCEL_3DES); +} + +static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm, + const u8 *key, unsigned int len) +{ + struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + if (len != DES3_EDE_KEY_SIZE) { + crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + /* if context exits and key changed, need to invalidate it */ + if (ctx->base.ctxr_dma) { + if (memcmp(ctx->key, key, len)) + ctx->base.needs_inv = true; + } + + memcpy(ctx->key, key, len); + + ctx->key_len = len; + + return 0; +} + +struct safexcel_alg_template safexcel_alg_cbc_des3_ede = { + .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .engines = EIP97IES | EIP197B | EIP197D, + .alg.skcipher = { + .setkey = safexcel_des3_ede_setkey, + .encrypt = safexcel_cbc_des3_ede_encrypt, + .decrypt = safexcel_cbc_des3_ede_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .base = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "safexcel-cbc-des3_ede", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_skcipher_cra_init, + .cra_exit = safexcel_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_ecb_des3_ede_encrypt(struct skcipher_request *req) +{ + return safexcel_queue_req(&req->base, skcipher_request_ctx(req), + SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, + SAFEXCEL_3DES); +} + +static int safexcel_ecb_des3_ede_decrypt(struct skcipher_request *req) +{ + return safexcel_queue_req(&req->base, skcipher_request_ctx(req), + SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB, + SAFEXCEL_3DES); +} + +struct safexcel_alg_template safexcel_alg_ecb_des3_ede = { + .type = SAFEXCEL_ALG_TYPE_SKCIPHER, + .engines = EIP97IES | EIP197B | EIP197D, + .alg.skcipher = { + .setkey = safexcel_des3_ede_setkey, + .encrypt = safexcel_ecb_des3_ede_encrypt, + .decrypt = safexcel_ecb_des3_ede_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .base = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "safexcel-ecb-des3_ede", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_skcipher_cra_init, + .cra_exit = safexcel_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + static int safexcel_aead_encrypt(struct aead_request *req) { struct safexcel_cipher_req *creq = aead_request_ctx(req); - return safexcel_aes(&req->base, creq, SAFEXCEL_ENCRYPT, - CONTEXT_CONTROL_CRYPTO_MODE_CBC); + return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT, + CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES); } static int safexcel_aead_decrypt(struct aead_request *req) { struct safexcel_cipher_req *creq = aead_request_ctx(req); - return safexcel_aes(&req->base, creq, SAFEXCEL_DECRYPT, - CONTEXT_CONTROL_CRYPTO_MODE_CBC); + return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT, + CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES); } static int safexcel_aead_cra_init(struct crypto_tfm *tfm) @@ -926,13 +1165,14 @@ static int safexcel_aead_sha1_cra_init(struct crypto_tfm *tfm) struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); safexcel_aead_cra_init(tfm); - ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1; + ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1; ctx->state_sz = SHA1_DIGEST_SIZE; return 0; } struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, + .engines = EIP97IES | EIP197B | EIP197D, .alg.aead = { .setkey = safexcel_aead_aes_setkey, .encrypt = safexcel_aead_encrypt, @@ -943,7 +1183,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -960,13 +1200,14 @@ static int safexcel_aead_sha256_cra_init(struct crypto_tfm *tfm) struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); safexcel_aead_cra_init(tfm); - ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256; + ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256; ctx->state_sz = SHA256_DIGEST_SIZE; return 0; } struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, + .engines = EIP97IES | EIP197B | EIP197D, .alg.aead = { .setkey = safexcel_aead_aes_setkey, .encrypt = safexcel_aead_encrypt, @@ -977,7 +1218,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -994,13 +1235,14 @@ static int safexcel_aead_sha224_cra_init(struct crypto_tfm *tfm) struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); safexcel_aead_cra_init(tfm); - ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224; + ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224; ctx->state_sz = SHA256_DIGEST_SIZE; return 0; } struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = { .type = SAFEXCEL_ALG_TYPE_AEAD, + .engines = EIP97IES | EIP197B | EIP197D, .alg.aead = { .setkey = safexcel_aead_aes_setkey, .encrypt = safexcel_aead_encrypt, @@ -1011,7 +1253,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = { .cra_name = "authenc(hmac(sha224),cbc(aes))", .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes", .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), @@ -1022,3 +1264,73 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = { }, }, }; + +static int safexcel_aead_sha512_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_cra_init(tfm); + ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512; + ctx->state_sz = SHA512_DIGEST_SIZE; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .engines = EIP97IES | EIP197B | EIP197D, + .alg.aead = { + .setkey = safexcel_aead_aes_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha512),cbc(aes))", + .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha512_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +static int safexcel_aead_sha384_cra_init(struct crypto_tfm *tfm) +{ + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + safexcel_aead_cra_init(tfm); + ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384; + ctx->state_sz = SHA512_DIGEST_SIZE; + return 0; +} + +struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = { + .type = SAFEXCEL_ALG_TYPE_AEAD, + .engines = EIP97IES | EIP197B | EIP197D, + .alg.aead = { + .setkey = safexcel_aead_aes_setkey, + .encrypt = safexcel_aead_encrypt, + .decrypt = safexcel_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha384),cbc(aes))", + .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), + .cra_alignmask = 0, + .cra_init = safexcel_aead_sha384_cra_init, + .cra_exit = safexcel_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index c77b0e1655a8..ac9282c1a5ec 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -1,14 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2017 Marvell * * Antoine Tenart <antoine.tenart@free-electrons.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <crypto/hmac.h> +#include <crypto/md5.h> #include <crypto/sha.h> #include <linux/device.h> #include <linux/dma-mapping.h> @@ -22,8 +20,8 @@ struct safexcel_ahash_ctx { u32 alg; - u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)]; - u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)]; + u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)]; + u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)]; }; struct safexcel_ahash_req { @@ -38,18 +36,26 @@ struct safexcel_ahash_req { u32 digest; u8 state_sz; /* expected sate size, only set once */ - u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32)); + u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32)); - u64 len; - u64 processed; + u64 len[2]; + u64 processed[2]; - u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); + u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); dma_addr_t cache_dma; unsigned int cache_sz; - u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); + u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); }; +static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req) +{ + if (req->len[1] > req->processed[1]) + return 0xffffffff - (req->len[0] - req->processed[0]); + + return req->len[0] - req->processed[0]; +} + static void safexcel_hash_token(struct safexcel_command_desc *cdesc, u32 input_length, u32 result_length) { @@ -72,9 +78,9 @@ static void safexcel_hash_token(struct safexcel_command_desc *cdesc, static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, struct safexcel_ahash_req *req, struct safexcel_command_desc *cdesc, - unsigned int digestsize, - unsigned int blocksize) + unsigned int digestsize) { + struct safexcel_crypto_priv *priv = ctx->priv; int i; cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT; @@ -82,12 +88,17 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, cdesc->control_data.control0 |= req->digest; if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) { - if (req->processed) { - if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1) + if (req->processed[0] || req->processed[1]) { + if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) + cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(5); + else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1) cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6); else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 || ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256) cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9); + else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384 || + ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512) + cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(17); cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT; } else { @@ -102,12 +113,28 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, * fields. Do this now as we need it to setup the first command * descriptor. */ - if (req->processed) { + if (req->processed[0] || req->processed[1]) { for (i = 0; i < digestsize / sizeof(u32); i++) ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]); - if (req->finish) - ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize); + if (req->finish) { + u64 count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE; + count += ((0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * + req->processed[1]); + + /* This is a haredware limitation, as the + * counter must fit into an u32. This represents + * a farily big amount of input data, so we + * shouldn't see this. + */ + if (unlikely(count & 0xffff0000)) { + dev_warn(priv->dev, + "Input data is too big\n"); + return; + } + + ctx->base.ctxr->data[i] = cpu_to_le32(count); + } } } else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) { cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32)); @@ -126,11 +153,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin struct ahash_request *areq = ahash_request_cast(async); struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); struct safexcel_ahash_req *sreq = ahash_request_ctx(areq); - int cache_len; + u64 cache_len; *ret = 0; - spin_lock_bh(&priv->ring[ring].egress_lock); rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); if (IS_ERR(rdesc)) { dev_err(priv->dev, @@ -141,7 +167,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin } safexcel_complete(priv, ring); - spin_unlock_bh(&priv->ring[ring].egress_lock); if (sreq->nents) { dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE); @@ -164,7 +189,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin memcpy(areq->result, sreq->state, crypto_ahash_digestsize(ahash)); - cache_len = sreq->len - sreq->processed; + cache_len = safexcel_queued_len(sreq); if (cache_len) memcpy(sreq->cache, sreq->cache_next, cache_len); @@ -174,7 +199,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin } static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, - struct safexcel_request *request, int *commands, int *results) { struct ahash_request *areq = ahash_request_cast(async); @@ -185,9 +209,10 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, struct safexcel_command_desc *cdesc, *first_cdesc = NULL; struct safexcel_result_desc *rdesc; struct scatterlist *sg; - int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0; + int i, extra, n_cdesc = 0, ret = 0; + u64 queued, len, cache_len; - queued = len = req->len - req->processed; + queued = len = safexcel_queued_len(req); if (queued <= crypto_ahash_blocksize(ahash)) cache_len = queued; else @@ -220,16 +245,12 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, } } - spin_lock_bh(&priv->ring[ring].egress_lock); - /* Add a command descriptor for the cached data, if any */ if (cache_len) { req->cache_dma = dma_map_single(priv->dev, req->cache, cache_len, DMA_TO_DEVICE); - if (dma_mapping_error(priv->dev, req->cache_dma)) { - spin_unlock_bh(&priv->ring[ring].egress_lock); + if (dma_mapping_error(priv->dev, req->cache_dma)) return -EINVAL; - } req->cache_sz = cache_len; first_cdesc = safexcel_add_cdesc(priv, ring, 1, @@ -260,7 +281,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, int sglen = sg_dma_len(sg); /* Do not overflow the request */ - if (queued - sglen < 0) + if (queued < sglen) sglen = queued; cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, @@ -282,8 +303,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, send_command: /* Setup the context options */ - safexcel_context_control(ctx, req, first_cdesc, req->state_sz, - crypto_ahash_blocksize(ahash)); + safexcel_context_control(ctx, req, first_cdesc, req->state_sz); /* Add the token */ safexcel_hash_token(first_cdesc, len, req->state_sz); @@ -303,10 +323,11 @@ send_command: goto unmap_result; } - spin_unlock_bh(&priv->ring[ring].egress_lock); + safexcel_rdr_req_set(priv, ring, rdesc, &areq->base); - req->processed += len; - request->req = &areq->base; + req->processed[0] += len; + if (req->processed[0] < len) + req->processed[1]++; *commands = n_cdesc; *results = 1; @@ -327,7 +348,6 @@ unmap_cache: req->cache_sz = 0; } - spin_unlock_bh(&priv->ring[ring].egress_lock); return ret; } @@ -335,16 +355,18 @@ static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq) { struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); struct safexcel_ahash_req *req = ahash_request_ctx(areq); - struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); unsigned int state_w_sz = req->state_sz / sizeof(u32); + u64 processed; int i; + processed = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE; + processed += (0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * req->processed[1]; + for (i = 0; i < state_w_sz; i++) if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i])) return true; - if (ctx->base.ctxr->data[state_w_sz] != - cpu_to_le32(req->processed / crypto_ahash_blocksize(ahash))) + if (ctx->base.ctxr->data[state_w_sz] != cpu_to_le32(processed)) return true; return false; @@ -363,21 +385,16 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, *ret = 0; - spin_lock_bh(&priv->ring[ring].egress_lock); rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); if (IS_ERR(rdesc)) { dev_err(priv->dev, "hash: invalidate: could not retrieve the result descriptor\n"); *ret = PTR_ERR(rdesc); - } else if (rdesc->result_data.error_code) { - dev_err(priv->dev, - "hash: invalidate: result descriptor error (%d)\n", - rdesc->result_data.error_code); - *ret = -EINVAL; + } else { + *ret = safexcel_rdesc_check_errors(priv, rdesc); } safexcel_complete(priv, ring); - spin_unlock_bh(&priv->ring[ring].egress_lock); if (ctx->base.exit_inv) { dma_pool_free(priv->context_pool, ctx->base.ctxr, @@ -413,7 +430,7 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, struct safexcel_ahash_req *req = ahash_request_ctx(areq); int err; - BUG_ON(priv->version == EIP97 && req->needs_inv); + BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && req->needs_inv); if (req->needs_inv) { req->needs_inv = false; @@ -428,15 +445,14 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, } static int safexcel_ahash_send_inv(struct crypto_async_request *async, - int ring, struct safexcel_request *request, - int *commands, int *results) + int ring, int *commands, int *results) { struct ahash_request *areq = ahash_request_cast(async); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); int ret; ret = safexcel_invalidate_cache(async, ctx->priv, - ctx->base.ctxr_dma, ring, request); + ctx->base.ctxr_dma, ring); if (unlikely(ret)) return ret; @@ -447,19 +463,17 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async, } static int safexcel_ahash_send(struct crypto_async_request *async, - int ring, struct safexcel_request *request, - int *commands, int *results) + int ring, int *commands, int *results) { struct ahash_request *areq = ahash_request_cast(async); struct safexcel_ahash_req *req = ahash_request_ctx(areq); int ret; if (req->needs_inv) - ret = safexcel_ahash_send_inv(async, ring, request, - commands, results); + ret = safexcel_ahash_send_inv(async, ring, commands, results); else - ret = safexcel_ahash_send_req(async, ring, request, - commands, results); + ret = safexcel_ahash_send_req(async, ring, commands, results); + return ret; } @@ -509,17 +523,17 @@ static int safexcel_ahash_cache(struct ahash_request *areq) { struct safexcel_ahash_req *req = ahash_request_ctx(areq); struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); - int queued, cache_len; + u64 queued, cache_len; - /* cache_len: everyting accepted by the driver but not sent yet, - * tot sz handled by update() - last req sz - tot sz handled by send() - */ - cache_len = req->len - areq->nbytes - req->processed; /* queued: everything accepted by the driver which will be handled by * the next send() calls. * tot sz handled by update() - tot sz handled by send() */ - queued = req->len - req->processed; + queued = safexcel_queued_len(req); + /* cache_len: everything accepted by the driver but not sent yet, + * tot sz handled by update() - last req sz - tot sz handled by send() + */ + cache_len = queued - areq->nbytes; /* * In case there isn't enough bytes to proceed (less than a @@ -546,8 +560,8 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq) req->needs_inv = false; if (ctx->base.ctxr) { - if (priv->version == EIP197 && - !ctx->base.needs_inv && req->processed && + if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv && + (req->processed[0] || req->processed[1]) && req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) /* We're still setting needs_inv here, even though it is * cleared right away, because the needs_inv flag can be @@ -590,7 +604,9 @@ static int safexcel_ahash_update(struct ahash_request *areq) if (!areq->nbytes) return 0; - req->len += areq->nbytes; + req->len[0] += areq->nbytes; + if (req->len[0] < areq->nbytes) + req->len[1]++; safexcel_ahash_cache(areq); @@ -605,7 +621,7 @@ static int safexcel_ahash_update(struct ahash_request *areq) return safexcel_ahash_enqueue(areq); if (!req->last_req && - req->len - req->processed > crypto_ahash_blocksize(ahash)) + safexcel_queued_len(req) > crypto_ahash_blocksize(ahash)) return safexcel_ahash_enqueue(areq); return 0; @@ -620,8 +636,11 @@ static int safexcel_ahash_final(struct ahash_request *areq) req->finish = true; /* If we have an overall 0 length request */ - if (!(req->len + areq->nbytes)) { - if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1) + if (!req->len[0] && !req->len[1] && !areq->nbytes) { + if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) + memcpy(areq->result, md5_zero_message_hash, + MD5_DIGEST_SIZE); + else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1) memcpy(areq->result, sha1_zero_message_hash, SHA1_DIGEST_SIZE); else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224) @@ -630,6 +649,12 @@ static int safexcel_ahash_final(struct ahash_request *areq) else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256) memcpy(areq->result, sha256_zero_message_hash, SHA256_DIGEST_SIZE); + else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384) + memcpy(areq->result, sha384_zero_message_hash, + SHA384_DIGEST_SIZE); + else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512) + memcpy(areq->result, sha512_zero_message_hash, + SHA512_DIGEST_SIZE); return 0; } @@ -654,8 +679,10 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out) struct safexcel_ahash_req *req = ahash_request_ctx(areq); struct safexcel_ahash_export_state *export = out; - export->len = req->len; - export->processed = req->processed; + export->len[0] = req->len[0]; + export->len[1] = req->len[1]; + export->processed[0] = req->processed[0]; + export->processed[1] = req->processed[1]; export->digest = req->digest; @@ -676,8 +703,10 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in) if (ret) return ret; - req->len = export->len; - req->processed = export->processed; + req->len[0] = export->len[0]; + req->len[1] = export->len[1]; + req->processed[0] = export->processed[0]; + req->processed[1] = export->processed[1]; req->digest = export->digest; @@ -743,7 +772,7 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm) if (!ctx->base.ctxr) return; - if (priv->version == EIP197) { + if (priv->flags & EIP197_TRC_CACHE) { ret = safexcel_ahash_exit_inv(tfm); if (ret) dev_warn(priv->dev, "hash: invalidation error %d\n", ret); @@ -755,6 +784,7 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm) struct safexcel_alg_template safexcel_alg_sha1 = { .type = SAFEXCEL_ALG_TYPE_AHASH, + .engines = EIP97IES | EIP197B | EIP197D, .alg.ahash = { .init = safexcel_sha1_init, .update = safexcel_ahash_update, @@ -908,8 +938,7 @@ int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen, u8 *ipad, *opad; int ret; - tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH, - CRYPTO_ALG_TYPE_AHASH_MASK); + tfm = crypto_alloc_ahash(alg, 0, 0); if (IS_ERR(tfm)) return PTR_ERR(tfm); @@ -963,7 +992,7 @@ static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key, if (ret) return ret; - if (priv->version == EIP197 && ctx->base.ctxr) { + if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr) { for (i = 0; i < state_sz / sizeof(u32); i++) { if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || ctx->opad[i] != le32_to_cpu(ostate.state[i])) { @@ -988,6 +1017,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, struct safexcel_alg_template safexcel_alg_hmac_sha1 = { .type = SAFEXCEL_ALG_TYPE_AHASH, + .engines = EIP97IES | EIP197B | EIP197D, .alg.ahash = { .init = safexcel_hmac_sha1_init, .update = safexcel_ahash_update, @@ -1051,6 +1081,7 @@ static int safexcel_sha256_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_sha256 = { .type = SAFEXCEL_ALG_TYPE_AHASH, + .engines = EIP97IES | EIP197B | EIP197D, .alg.ahash = { .init = safexcel_sha256_init, .update = safexcel_ahash_update, @@ -1113,6 +1144,7 @@ static int safexcel_sha224_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_sha224 = { .type = SAFEXCEL_ALG_TYPE_AHASH, + .engines = EIP97IES | EIP197B | EIP197D, .alg.ahash = { .init = safexcel_sha224_init, .update = safexcel_ahash_update, @@ -1168,6 +1200,7 @@ static int safexcel_hmac_sha224_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_hmac_sha224 = { .type = SAFEXCEL_ALG_TYPE_AHASH, + .engines = EIP97IES | EIP197B | EIP197D, .alg.ahash = { .init = safexcel_hmac_sha224_init, .update = safexcel_ahash_update, @@ -1224,6 +1257,7 @@ static int safexcel_hmac_sha256_digest(struct ahash_request *areq) struct safexcel_alg_template safexcel_alg_hmac_sha256 = { .type = SAFEXCEL_ALG_TYPE_AHASH, + .engines = EIP97IES | EIP197B | EIP197D, .alg.ahash = { .init = safexcel_hmac_sha256_init, .update = safexcel_ahash_update, @@ -1251,3 +1285,375 @@ struct safexcel_alg_template safexcel_alg_hmac_sha256 = { }, }, }; + +static int safexcel_sha512_init(struct ahash_request *areq) +{ + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); + struct safexcel_ahash_req *req = ahash_request_ctx(areq); + + memset(req, 0, sizeof(*req)); + + req->state[0] = lower_32_bits(SHA512_H0); + req->state[1] = upper_32_bits(SHA512_H0); + req->state[2] = lower_32_bits(SHA512_H1); + req->state[3] = upper_32_bits(SHA512_H1); + req->state[4] = lower_32_bits(SHA512_H2); + req->state[5] = upper_32_bits(SHA512_H2); + req->state[6] = lower_32_bits(SHA512_H3); + req->state[7] = upper_32_bits(SHA512_H3); + req->state[8] = lower_32_bits(SHA512_H4); + req->state[9] = upper_32_bits(SHA512_H4); + req->state[10] = lower_32_bits(SHA512_H5); + req->state[11] = upper_32_bits(SHA512_H5); + req->state[12] = lower_32_bits(SHA512_H6); + req->state[13] = upper_32_bits(SHA512_H6); + req->state[14] = lower_32_bits(SHA512_H7); + req->state[15] = upper_32_bits(SHA512_H7); + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512; + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + req->state_sz = SHA512_DIGEST_SIZE; + + return 0; +} + +static int safexcel_sha512_digest(struct ahash_request *areq) +{ + int ret = safexcel_sha512_init(areq); + + if (ret) + return ret; + + return safexcel_ahash_finup(areq); +} + +struct safexcel_alg_template safexcel_alg_sha512 = { + .type = SAFEXCEL_ALG_TYPE_AHASH, + .engines = EIP97IES | EIP197B | EIP197D, + .alg.ahash = { + .init = safexcel_sha512_init, + .update = safexcel_ahash_update, + .final = safexcel_ahash_final, + .finup = safexcel_ahash_finup, + .digest = safexcel_sha512_digest, + .export = safexcel_ahash_export, + .import = safexcel_ahash_import, + .halg = { + .digestsize = SHA512_DIGEST_SIZE, + .statesize = sizeof(struct safexcel_ahash_export_state), + .base = { + .cra_name = "sha512", + .cra_driver_name = "safexcel-sha512", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), + .cra_init = safexcel_ahash_cra_init, + .cra_exit = safexcel_ahash_cra_exit, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +static int safexcel_sha384_init(struct ahash_request *areq) +{ + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); + struct safexcel_ahash_req *req = ahash_request_ctx(areq); + + memset(req, 0, sizeof(*req)); + + req->state[0] = lower_32_bits(SHA384_H0); + req->state[1] = upper_32_bits(SHA384_H0); + req->state[2] = lower_32_bits(SHA384_H1); + req->state[3] = upper_32_bits(SHA384_H1); + req->state[4] = lower_32_bits(SHA384_H2); + req->state[5] = upper_32_bits(SHA384_H2); + req->state[6] = lower_32_bits(SHA384_H3); + req->state[7] = upper_32_bits(SHA384_H3); + req->state[8] = lower_32_bits(SHA384_H4); + req->state[9] = upper_32_bits(SHA384_H4); + req->state[10] = lower_32_bits(SHA384_H5); + req->state[11] = upper_32_bits(SHA384_H5); + req->state[12] = lower_32_bits(SHA384_H6); + req->state[13] = upper_32_bits(SHA384_H6); + req->state[14] = lower_32_bits(SHA384_H7); + req->state[15] = upper_32_bits(SHA384_H7); + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384; + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + req->state_sz = SHA512_DIGEST_SIZE; + + return 0; +} + +static int safexcel_sha384_digest(struct ahash_request *areq) +{ + int ret = safexcel_sha384_init(areq); + + if (ret) + return ret; + + return safexcel_ahash_finup(areq); +} + +struct safexcel_alg_template safexcel_alg_sha384 = { + .type = SAFEXCEL_ALG_TYPE_AHASH, + .engines = EIP97IES | EIP197B | EIP197D, + .alg.ahash = { + .init = safexcel_sha384_init, + .update = safexcel_ahash_update, + .final = safexcel_ahash_final, + .finup = safexcel_ahash_finup, + .digest = safexcel_sha384_digest, + .export = safexcel_ahash_export, + .import = safexcel_ahash_import, + .halg = { + .digestsize = SHA384_DIGEST_SIZE, + .statesize = sizeof(struct safexcel_ahash_export_state), + .base = { + .cra_name = "sha384", + .cra_driver_name = "safexcel-sha384", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), + .cra_init = safexcel_ahash_cra_init, + .cra_exit = safexcel_ahash_cra_exit, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha512", + SHA512_DIGEST_SIZE); +} + +static int safexcel_hmac_sha512_init(struct ahash_request *areq) +{ + struct safexcel_ahash_req *req = ahash_request_ctx(areq); + + safexcel_sha512_init(areq); + req->digest = CONTEXT_CONTROL_DIGEST_HMAC; + return 0; +} + +static int safexcel_hmac_sha512_digest(struct ahash_request *areq) +{ + int ret = safexcel_hmac_sha512_init(areq); + + if (ret) + return ret; + + return safexcel_ahash_finup(areq); +} + +struct safexcel_alg_template safexcel_alg_hmac_sha512 = { + .type = SAFEXCEL_ALG_TYPE_AHASH, + .engines = EIP97IES | EIP197B | EIP197D, + .alg.ahash = { + .init = safexcel_hmac_sha512_init, + .update = safexcel_ahash_update, + .final = safexcel_ahash_final, + .finup = safexcel_ahash_finup, + .digest = safexcel_hmac_sha512_digest, + .setkey = safexcel_hmac_sha512_setkey, + .export = safexcel_ahash_export, + .import = safexcel_ahash_import, + .halg = { + .digestsize = SHA512_DIGEST_SIZE, + .statesize = sizeof(struct safexcel_ahash_export_state), + .base = { + .cra_name = "hmac(sha512)", + .cra_driver_name = "safexcel-hmac-sha512", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), + .cra_init = safexcel_ahash_cra_init, + .cra_exit = safexcel_ahash_cra_exit, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha384", + SHA512_DIGEST_SIZE); +} + +static int safexcel_hmac_sha384_init(struct ahash_request *areq) +{ + struct safexcel_ahash_req *req = ahash_request_ctx(areq); + + safexcel_sha384_init(areq); + req->digest = CONTEXT_CONTROL_DIGEST_HMAC; + return 0; +} + +static int safexcel_hmac_sha384_digest(struct ahash_request *areq) +{ + int ret = safexcel_hmac_sha384_init(areq); + + if (ret) + return ret; + + return safexcel_ahash_finup(areq); +} + +struct safexcel_alg_template safexcel_alg_hmac_sha384 = { + .type = SAFEXCEL_ALG_TYPE_AHASH, + .engines = EIP97IES | EIP197B | EIP197D, + .alg.ahash = { + .init = safexcel_hmac_sha384_init, + .update = safexcel_ahash_update, + .final = safexcel_ahash_final, + .finup = safexcel_ahash_finup, + .digest = safexcel_hmac_sha384_digest, + .setkey = safexcel_hmac_sha384_setkey, + .export = safexcel_ahash_export, + .import = safexcel_ahash_import, + .halg = { + .digestsize = SHA384_DIGEST_SIZE, + .statesize = sizeof(struct safexcel_ahash_export_state), + .base = { + .cra_name = "hmac(sha384)", + .cra_driver_name = "safexcel-hmac-sha384", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), + .cra_init = safexcel_ahash_cra_init, + .cra_exit = safexcel_ahash_cra_exit, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +static int safexcel_md5_init(struct ahash_request *areq) +{ + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); + struct safexcel_ahash_req *req = ahash_request_ctx(areq); + + memset(req, 0, sizeof(*req)); + + req->state[0] = MD5_H0; + req->state[1] = MD5_H1; + req->state[2] = MD5_H2; + req->state[3] = MD5_H3; + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5; + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + req->state_sz = MD5_DIGEST_SIZE; + + return 0; +} + +static int safexcel_md5_digest(struct ahash_request *areq) +{ + int ret = safexcel_md5_init(areq); + + if (ret) + return ret; + + return safexcel_ahash_finup(areq); +} + +struct safexcel_alg_template safexcel_alg_md5 = { + .type = SAFEXCEL_ALG_TYPE_AHASH, + .engines = EIP97IES | EIP197B | EIP197D, + .alg.ahash = { + .init = safexcel_md5_init, + .update = safexcel_ahash_update, + .final = safexcel_ahash_final, + .finup = safexcel_ahash_finup, + .digest = safexcel_md5_digest, + .export = safexcel_ahash_export, + .import = safexcel_ahash_import, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct safexcel_ahash_export_state), + .base = { + .cra_name = "md5", + .cra_driver_name = "safexcel-md5", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), + .cra_init = safexcel_ahash_cra_init, + .cra_exit = safexcel_ahash_cra_exit, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +static int safexcel_hmac_md5_init(struct ahash_request *areq) +{ + struct safexcel_ahash_req *req = ahash_request_ctx(areq); + + safexcel_md5_init(areq); + req->digest = CONTEXT_CONTROL_DIGEST_HMAC; + return 0; +} + +static int safexcel_hmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-md5", + MD5_DIGEST_SIZE); +} + +static int safexcel_hmac_md5_digest(struct ahash_request *areq) +{ + int ret = safexcel_hmac_md5_init(areq); + + if (ret) + return ret; + + return safexcel_ahash_finup(areq); +} + +struct safexcel_alg_template safexcel_alg_hmac_md5 = { + .type = SAFEXCEL_ALG_TYPE_AHASH, + .engines = EIP97IES | EIP197B | EIP197D, + .alg.ahash = { + .init = safexcel_hmac_md5_init, + .update = safexcel_ahash_update, + .final = safexcel_ahash_final, + .finup = safexcel_ahash_finup, + .digest = safexcel_hmac_md5_digest, + .setkey = safexcel_hmac_md5_setkey, + .export = safexcel_ahash_export, + .import = safexcel_ahash_import, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct safexcel_ahash_export_state), + .base = { + .cra_name = "hmac(md5)", + .cra_driver_name = "safexcel-hmac-md5", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), + .cra_init = safexcel_ahash_cra_init, + .cra_exit = safexcel_ahash_cra_exit, + .cra_module = THIS_MODULE, + }, + }, + }, +}; diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c index c9d2a8716b5b..eb75fa684876 100644 --- a/drivers/crypto/inside-secure/safexcel_ring.c +++ b/drivers/crypto/inside-secure/safexcel_ring.c @@ -1,11 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2017 Marvell * * Antoine Tenart <antoine.tenart@free-electrons.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <linux/dma-mapping.h> @@ -14,8 +11,8 @@ #include "safexcel.h" int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv, - struct safexcel_ring *cdr, - struct safexcel_ring *rdr) + struct safexcel_desc_ring *cdr, + struct safexcel_desc_ring *rdr) { cdr->offset = sizeof(u32) * priv->config.cd_offset; cdr->base = dmam_alloc_coherent(priv->dev, @@ -24,7 +21,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv, if (!cdr->base) return -ENOMEM; cdr->write = cdr->base; - cdr->base_end = cdr->base + cdr->offset * EIP197_DEFAULT_RING_SIZE; + cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1); cdr->read = cdr->base; rdr->offset = sizeof(u32) * priv->config.rd_offset; @@ -34,7 +31,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv, if (!rdr->base) return -ENOMEM; rdr->write = rdr->base; - rdr->base_end = rdr->base + rdr->offset * EIP197_DEFAULT_RING_SIZE; + rdr->base_end = rdr->base + rdr->offset * (EIP197_DEFAULT_RING_SIZE - 1); rdr->read = rdr->base; return 0; @@ -46,49 +43,73 @@ inline int safexcel_select_ring(struct safexcel_crypto_priv *priv) } static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv, - struct safexcel_ring *ring) + struct safexcel_desc_ring *ring) { void *ptr = ring->write; - if (ring->nr == EIP197_DEFAULT_RING_SIZE - 1) + if ((ring->write == ring->read - ring->offset) || + (ring->read == ring->base && ring->write == ring->base_end)) return ERR_PTR(-ENOMEM); - ring->write += ring->offset; if (ring->write == ring->base_end) ring->write = ring->base; + else + ring->write += ring->offset; - ring->nr++; return ptr; } void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv, - struct safexcel_ring *ring) + struct safexcel_desc_ring *ring) { void *ptr = ring->read; - if (!ring->nr) + if (ring->write == ring->read) return ERR_PTR(-ENOENT); - ring->read += ring->offset; if (ring->read == ring->base_end) ring->read = ring->base; + else + ring->read += ring->offset; - ring->nr--; return ptr; } +inline void *safexcel_ring_curr_rptr(struct safexcel_crypto_priv *priv, + int ring) +{ + struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr; + + return rdr->read; +} + +inline int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv, + int ring) +{ + struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr; + + return (rdr->read - rdr->base) / rdr->offset; +} + +inline int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv, + int ring, + struct safexcel_result_desc *rdesc) +{ + struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr; + + return ((void *)rdesc - rdr->base) / rdr->offset; +} + void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv, - struct safexcel_ring *ring) + struct safexcel_desc_ring *ring) { - if (!ring->nr) + if (ring->write == ring->read) return; if (ring->write == ring->base) - ring->write = ring->base_end - ring->offset; + ring->write = ring->base_end; else ring->write -= ring->offset; - - ring->nr--; } struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv, |