diff options
Diffstat (limited to 'drivers/crypto/ccree')
-rw-r--r-- | drivers/crypto/ccree/cc_aead.c | 176 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_aead.h | 3 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_buffer_mgr.c | 229 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_buffer_mgr.h | 5 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_cipher.c | 78 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_debugfs.c | 29 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_driver.c | 127 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_driver.h | 18 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_hash.c | 228 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_hash.h | 31 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_hw_queue_defs.h | 332 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_pm.c | 60 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_pm.h | 21 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_request_mgr.c | 48 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_request_mgr.h | 19 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_sram_mgr.c | 78 | ||||
-rw-r--r-- | drivers/crypto/ccree/cc_sram_mgr.h | 45 |
17 files changed, 570 insertions, 957 deletions
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c index 2fc0e0da790b..1cf51edbc4b9 100644 --- a/drivers/crypto/ccree/cc_aead.c +++ b/drivers/crypto/ccree/cc_aead.c @@ -6,8 +6,9 @@ #include <crypto/algapi.h> #include <crypto/internal/aead.h> #include <crypto/authenc.h> -#include <crypto/internal/des.h> +#include <crypto/gcm.h> #include <linux/rtnetlink.h> +#include <crypto/internal/des.h> #include "cc_driver.h" #include "cc_buffer_mgr.h" #include "cc_aead.h" @@ -26,7 +27,7 @@ #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE struct cc_aead_handle { - cc_sram_addr_t sram_workspace_addr; + u32 sram_workspace_addr; struct list_head aead_list; }; @@ -60,11 +61,6 @@ struct cc_aead_ctx { enum drv_hash_mode auth_mode; }; -static inline bool valid_assoclen(struct aead_request *req) -{ - return ((req->assoclen == 16) || (req->assoclen == 20)); -} - static void cc_aead_exit(struct crypto_aead *tfm) { struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); @@ -417,7 +413,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey, dma_addr_t key_dma_addr = 0; struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); - u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode); + u32 larval_addr; struct cc_crypto_req cc_req = {}; unsigned int blocksize; unsigned int digestsize; @@ -448,8 +444,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey, if (!key) return -ENOMEM; - key_dma_addr = dma_map_single(dev, (void *)key, keylen, - DMA_TO_DEVICE); + key_dma_addr = dma_map_single(dev, key, keylen, DMA_TO_DEVICE); if (dma_mapping_error(dev, key_dma_addr)) { dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", key, keylen); @@ -460,6 +455,8 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey, /* Load hash initial state */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hashmode); + larval_addr = cc_larval_digest_addr(ctx->drvdata, + ctx->auth_mode); set_din_sram(&desc[idx], larval_addr, digestsize); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); @@ -796,7 +793,7 @@ static void cc_proc_authen_desc(struct aead_request *areq, * assoc. + iv + data -compact in one table * if assoclen is ZERO only IV perform */ - cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr; + u32 mlli_addr = areq_ctx->assoc.sram_addr; u32 mlli_nents = areq_ctx->assoc.mlli_nents; if (areq_ctx->is_single_pass) { @@ -1170,7 +1167,7 @@ static void cc_mlli_to_sram(struct aead_request *req, req_ctx->data_buff_type == CC_DMA_BUF_MLLI || !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) { dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n", - (unsigned int)ctx->drvdata->mlli_sram_addr, + ctx->drvdata->mlli_sram_addr, req_ctx->mlli_params.mlli_len); /* Copy MLLI table host-to-sram */ hw_desc_init(&desc[*seq_size]); @@ -1222,7 +1219,7 @@ static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[], req_ctx->is_single_pass); if (req_ctx->is_single_pass) { - /** + /* * Single-pass flow */ cc_set_hmac_desc(req, desc, seq_size); @@ -1234,7 +1231,7 @@ static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[], return; } - /** + /* * Double-pass flow * Fallback for unsupported single-pass modes, * i.e. using assoc. data of non-word-multiple @@ -1275,7 +1272,7 @@ cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[], req_ctx->is_single_pass); if (req_ctx->is_single_pass) { - /** + /* * Single-pass flow */ cc_set_xcbc_desc(req, desc, seq_size); @@ -1286,7 +1283,7 @@ cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[], return; } - /** + /* * Double-pass flow * Fallback for unsupported single-pass modes, * i.e. using assoc. data of non-word-multiple @@ -1611,7 +1608,6 @@ static void cc_proc_rfc4309_ccm(struct aead_request *req) memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv, CCM_BLOCK_IV_SIZE); req->iv = areq_ctx->ctr_iv; - areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE; } static void cc_set_ghash_desc(struct aead_request *req, @@ -1799,12 +1795,6 @@ static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[], struct aead_req_ctx *req_ctx = aead_request_ctx(req); unsigned int cipher_flow_mode; - if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) { - cipher_flow_mode = AES_and_HASH; - } else { /* Encrypt */ - cipher_flow_mode = AES_to_HASH_and_DOUT; - } - //in RFC4543 no data to encrypt. just copy data from src to dest. if (req_ctx->plaintext_authenticate_only) { cc_proc_cipher_desc(req, BYPASS, desc, seq_size); @@ -1816,6 +1806,12 @@ static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[], return 0; } + if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) { + cipher_flow_mode = AES_and_HASH; + } else { /* Encrypt */ + cipher_flow_mode = AES_to_HASH_and_DOUT; + } + // for gcm and rfc4106. cc_set_ghash_desc(req, desc, seq_size); /* process(ghash) assoc data */ @@ -1870,8 +1866,7 @@ static int config_gcm_context(struct aead_request *req) */ __be64 temp64; - temp64 = cpu_to_be64((req_ctx->assoclen + - GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8); + temp64 = cpu_to_be64((req_ctx->assoclen + cryptlen) * 8); memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64)); temp64 = 0; memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8); @@ -1891,7 +1886,6 @@ static void cc_proc_rfc4_gcm(struct aead_request *req) memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv, GCM_BLOCK_RFC4_IV_SIZE); req->iv = areq_ctx->ctr_iv; - areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE; } static int cc_proc_aead(struct aead_request *req, @@ -1921,8 +1915,8 @@ static int cc_proc_aead(struct aead_request *req, } /* Setup request structure */ - cc_req.user_cb = (void *)cc_aead_complete; - cc_req.user_arg = (void *)req; + cc_req.user_cb = cc_aead_complete; + cc_req.user_arg = req; /* Setup request context */ areq_ctx->gen_ctx.op_type = direct; @@ -1989,7 +1983,6 @@ static int cc_proc_aead(struct aead_request *req, /* Load MLLI tables to SRAM if necessary */ cc_mlli_to_sram(req, desc, &seq_len); - /*TODO: move seq len by reference */ switch (ctx->auth_mode) { case DRV_HASH_SHA1: case DRV_HASH_SHA256: @@ -2034,9 +2027,6 @@ static int cc_aead_encrypt(struct aead_request *req) /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; - areq_ctx->is_gcm4543 = false; - - areq_ctx->plaintext_authenticate_only = false; rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT); if (rc != -EINPROGRESS && rc != -EBUSY) @@ -2050,22 +2040,17 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req) /* Very similar to cc_aead_encrypt() above. */ struct aead_req_ctx *areq_ctx = aead_request_ctx(req); - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct device *dev = drvdata_to_dev(ctx->drvdata); - int rc = -EINVAL; + int rc; - if (!valid_assoclen(req)) { - dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen); + rc = crypto_ipsec_check_assoclen(req->assoclen); + if (rc) goto out; - } memset(areq_ctx, 0, sizeof(*areq_ctx)); /* No generated IV required */ areq_ctx->backup_iv = req->iv; - areq_ctx->assoclen = req->assoclen; - areq_ctx->is_gcm4543 = true; + areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE; cc_proc_rfc4309_ccm(req); @@ -2086,9 +2071,6 @@ static int cc_aead_decrypt(struct aead_request *req) /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; - areq_ctx->is_gcm4543 = false; - - areq_ctx->plaintext_authenticate_only = false; rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT); if (rc != -EINPROGRESS && rc != -EBUSY) @@ -2099,24 +2081,19 @@ static int cc_aead_decrypt(struct aead_request *req) static int cc_rfc4309_ccm_decrypt(struct aead_request *req) { - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct device *dev = drvdata_to_dev(ctx->drvdata); struct aead_req_ctx *areq_ctx = aead_request_ctx(req); - int rc = -EINVAL; + int rc; - if (!valid_assoclen(req)) { - dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen); + rc = crypto_ipsec_check_assoclen(req->assoclen); + if (rc) goto out; - } memset(areq_ctx, 0, sizeof(*areq_ctx)); /* No generated IV required */ areq_ctx->backup_iv = req->iv; - areq_ctx->assoclen = req->assoclen; + areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE; - areq_ctx->is_gcm4543 = true; cc_proc_rfc4309_ccm(req); rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT); @@ -2216,28 +2193,20 @@ static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc, static int cc_rfc4106_gcm_encrypt(struct aead_request *req) { - /* Very similar to cc_aead_encrypt() above. */ - - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct device *dev = drvdata_to_dev(ctx->drvdata); struct aead_req_ctx *areq_ctx = aead_request_ctx(req); - int rc = -EINVAL; + int rc; - if (!valid_assoclen(req)) { - dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen); + rc = crypto_ipsec_check_assoclen(req->assoclen); + if (rc) goto out; - } memset(areq_ctx, 0, sizeof(*areq_ctx)); /* No generated IV required */ areq_ctx->backup_iv = req->iv; - areq_ctx->assoclen = req->assoclen; - areq_ctx->plaintext_authenticate_only = false; + areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE; cc_proc_rfc4_gcm(req); - areq_ctx->is_gcm4543 = true; rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT); if (rc != -EINPROGRESS && rc != -EBUSY) @@ -2248,17 +2217,12 @@ out: static int cc_rfc4543_gcm_encrypt(struct aead_request *req) { - /* Very similar to cc_aead_encrypt() above. */ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct device *dev = drvdata_to_dev(ctx->drvdata); struct aead_req_ctx *areq_ctx = aead_request_ctx(req); - int rc = -EINVAL; + int rc; - if (!valid_assoclen(req)) { - dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen); + rc = crypto_ipsec_check_assoclen(req->assoclen); + if (rc) goto out; - } memset(areq_ctx, 0, sizeof(*areq_ctx)); @@ -2270,7 +2234,6 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req) areq_ctx->assoclen = req->assoclen; cc_proc_rfc4_gcm(req); - areq_ctx->is_gcm4543 = true; rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT); if (rc != -EINPROGRESS && rc != -EBUSY) @@ -2281,28 +2244,20 @@ out: static int cc_rfc4106_gcm_decrypt(struct aead_request *req) { - /* Very similar to cc_aead_decrypt() above. */ - - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct device *dev = drvdata_to_dev(ctx->drvdata); struct aead_req_ctx *areq_ctx = aead_request_ctx(req); - int rc = -EINVAL; + int rc; - if (!valid_assoclen(req)) { - dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen); + rc = crypto_ipsec_check_assoclen(req->assoclen); + if (rc) goto out; - } memset(areq_ctx, 0, sizeof(*areq_ctx)); /* No generated IV required */ areq_ctx->backup_iv = req->iv; - areq_ctx->assoclen = req->assoclen; - areq_ctx->plaintext_authenticate_only = false; + areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE; cc_proc_rfc4_gcm(req); - areq_ctx->is_gcm4543 = true; rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT); if (rc != -EINPROGRESS && rc != -EBUSY) @@ -2313,17 +2268,12 @@ out: static int cc_rfc4543_gcm_decrypt(struct aead_request *req) { - /* Very similar to cc_aead_decrypt() above. */ - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); - struct device *dev = drvdata_to_dev(ctx->drvdata); struct aead_req_ctx *areq_ctx = aead_request_ctx(req); - int rc = -EINVAL; + int rc; - if (!valid_assoclen(req)) { - dev_dbg(dev, "invalid Assoclen:%u\n", req->assoclen); + rc = crypto_ipsec_check_assoclen(req->assoclen); + if (rc) goto out; - } memset(areq_ctx, 0, sizeof(*areq_ctx)); @@ -2335,7 +2285,6 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req) areq_ctx->assoclen = req->assoclen; cc_proc_rfc4_gcm(req); - areq_ctx->is_gcm4543 = true; rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT); if (rc != -EINPROGRESS && rc != -EBUSY) @@ -2614,7 +2563,7 @@ static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl, struct cc_crypto_alg *t_alg; struct aead_alg *alg; - t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); + t_alg = devm_kzalloc(dev, sizeof(*t_alg), GFP_KERNEL); if (!t_alg) return ERR_PTR(-ENOMEM); @@ -2628,6 +2577,7 @@ static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl, alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx); alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + alg->base.cra_blocksize = tmpl->blocksize; alg->init = cc_aead_init; alg->exit = cc_aead_exit; @@ -2643,19 +2593,12 @@ static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl, int cc_aead_free(struct cc_drvdata *drvdata) { struct cc_crypto_alg *t_alg, *n; - struct cc_aead_handle *aead_handle = - (struct cc_aead_handle *)drvdata->aead_handle; - - if (aead_handle) { - /* Remove registered algs */ - list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, - entry) { - crypto_unregister_aead(&t_alg->aead_alg); - list_del(&t_alg->entry); - kfree(t_alg); - } - kfree(aead_handle); - drvdata->aead_handle = NULL; + struct cc_aead_handle *aead_handle = drvdata->aead_handle; + + /* Remove registered algs */ + list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) { + crypto_unregister_aead(&t_alg->aead_alg); + list_del(&t_alg->entry); } return 0; @@ -2669,7 +2612,7 @@ int cc_aead_alloc(struct cc_drvdata *drvdata) int alg; struct device *dev = drvdata_to_dev(drvdata); - aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL); + aead_handle = devm_kmalloc(dev, sizeof(*aead_handle), GFP_KERNEL); if (!aead_handle) { rc = -ENOMEM; goto fail0; @@ -2682,7 +2625,6 @@ int cc_aead_alloc(struct cc_drvdata *drvdata) MAX_HMAC_DIGEST_SIZE); if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) { - dev_err(dev, "SRAM pool exhausted\n"); rc = -ENOMEM; goto fail1; } @@ -2705,18 +2647,16 @@ int cc_aead_alloc(struct cc_drvdata *drvdata) if (rc) { dev_err(dev, "%s alg registration failed\n", t_alg->aead_alg.base.cra_driver_name); - goto fail2; - } else { - list_add_tail(&t_alg->entry, &aead_handle->aead_list); - dev_dbg(dev, "Registered %s\n", - t_alg->aead_alg.base.cra_driver_name); + goto fail1; } + + list_add_tail(&t_alg->entry, &aead_handle->aead_list); + dev_dbg(dev, "Registered %s\n", + t_alg->aead_alg.base.cra_driver_name); } return 0; -fail2: - kfree(t_alg); fail1: cc_aead_free(drvdata); fail0: diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h index f12169b57f9d..b69591550730 100644 --- a/drivers/crypto/ccree/cc_aead.h +++ b/drivers/crypto/ccree/cc_aead.h @@ -66,7 +66,7 @@ struct aead_req_ctx { /* used to prevent cache coherence problem */ u8 backup_mac[MAX_MAC_SIZE]; u8 *backup_iv; /* store orig iv */ - u32 assoclen; /* internal assoclen */ + u32 assoclen; /* size of AAD buffer to authenticate */ dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */ /* buffer for internal ccm configurations */ dma_addr_t ccm_iv0_dma_addr; @@ -79,7 +79,6 @@ struct aead_req_ctx { dma_addr_t gcm_iv_inc2_dma_addr; dma_addr_t hkey_dma_addr; /* Phys. address of hkey */ dma_addr_t gcm_block_len_dma_addr; /* Phys. address of gcm block len */ - bool is_gcm4543; u8 *icv_virt_addr; /* Virt. address of ICV */ struct async_gen_req_ctx gen_ctx; diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index a72586eccd81..b2bd093e7013 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c @@ -13,16 +13,6 @@ #include "cc_hash.h" #include "cc_aead.h" -enum dma_buffer_type { - DMA_NULL_TYPE = -1, - DMA_SGL_TYPE = 1, - DMA_BUFF_TYPE = 2, -}; - -struct buff_mgr_handle { - struct dma_pool *mlli_buffs_pool; -}; - union buffer_array_entry { struct scatterlist *sgl; dma_addr_t buffer_dma; @@ -34,7 +24,6 @@ struct buffer_array { unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI]; int nents[MAX_NUM_OF_BUFFERS_IN_MLLI]; int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI]; - enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI]; bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI]; u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI]; }; @@ -64,11 +53,7 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req, enum cc_sg_cpy_direct dir) { struct aead_req_ctx *areq_ctx = aead_request_ctx(req); - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - u32 skip = areq_ctx->assoclen + req->cryptlen; - - if (areq_ctx->is_gcm4543) - skip += crypto_aead_ivsize(tfm); + u32 skip = req->assoclen + req->cryptlen; cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src, (skip - areq_ctx->req_authsize), skip, dir); @@ -77,9 +62,13 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req, /** * cc_get_sgl_nents() - Get scatterlist number of entries. * + * @dev: Device object * @sg_list: SG list * @nbytes: [IN] Total SGL data bytes. * @lbytes: [OUT] Returns the amount of bytes at the last entry + * + * Return: + * Number of entries in the scatterlist */ static unsigned int cc_get_sgl_nents(struct device *dev, struct scatterlist *sg_list, @@ -87,6 +76,8 @@ static unsigned int cc_get_sgl_nents(struct device *dev, { unsigned int nents = 0; + *lbytes = 0; + while (nbytes && sg_list) { nents++; /* get the number of bytes in the last entry */ @@ -95,6 +86,7 @@ static unsigned int cc_get_sgl_nents(struct device *dev, nbytes : sg_list->length; sg_list = sg_next(sg_list); } + dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes); return nents; } @@ -103,11 +95,13 @@ static unsigned int cc_get_sgl_nents(struct device *dev, * cc_copy_sg_portion() - Copy scatter list data, * from to_skip to end, to dest and vice versa * - * @dest: - * @sg: - * @to_skip: - * @end: - * @direct: + * @dev: Device object + * @dest: Buffer to copy to/from + * @sg: SG list + * @to_skip: Number of bytes to skip before copying + * @end: Offset of last byte to copy + * @direct: Transfer direction (true == from SG list to buffer, false == from + * buffer to SG list) */ void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg, u32 to_skip, u32 end, enum cc_sg_cpy_direct direct) @@ -115,7 +109,7 @@ void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg, u32 nents; nents = sg_nents_for_len(sg, end); - sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip, + sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip, (direct == CC_SG_TO_BUF)); } @@ -204,21 +198,15 @@ static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data, goto build_mlli_exit; } /* Point to start of MLLI */ - mlli_p = (u32 *)mlli_params->mlli_virt_addr; + mlli_p = mlli_params->mlli_virt_addr; /* go over all SG's and link it to one MLLI table */ for (i = 0; i < sg_data->num_of_buffers; i++) { union buffer_array_entry *entry = &sg_data->entry[i]; u32 tot_len = sg_data->total_data_len[i]; u32 offset = sg_data->offset[i]; - if (sg_data->type[i] == DMA_SGL_TYPE) - rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, - offset, &total_nents, - &mlli_p); - else /*DMA_BUFF_TYPE*/ - rc = cc_render_buff_to_mlli(dev, entry->buffer_dma, - tot_len, &total_nents, - &mlli_p); + rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset, + &total_nents, &mlli_p); if (rc) return rc; @@ -244,27 +232,6 @@ build_mlli_exit: return rc; } -static void cc_add_buffer_entry(struct device *dev, - struct buffer_array *sgl_data, - dma_addr_t buffer_dma, unsigned int buffer_len, - bool is_last_entry, u32 *mlli_nents) -{ - unsigned int index = sgl_data->num_of_buffers; - - dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n", - index, &buffer_dma, buffer_len, is_last_entry); - sgl_data->nents[index] = 1; - sgl_data->entry[index].buffer_dma = buffer_dma; - sgl_data->offset[index] = 0; - sgl_data->total_data_len[index] = buffer_len; - sgl_data->type[index] = DMA_BUFF_TYPE; - sgl_data->is_last[index] = is_last_entry; - sgl_data->mlli_nents[index] = mlli_nents; - if (sgl_data->mlli_nents[index]) - *sgl_data->mlli_nents[index] = 0; - sgl_data->num_of_buffers++; -} - static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data, unsigned int nents, struct scatterlist *sgl, unsigned int data_len, unsigned int data_offset, @@ -278,7 +245,6 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data, sgl_data->entry[index].sgl = sgl; sgl_data->offset[index] = data_offset; sgl_data->total_data_len[index] = data_len; - sgl_data->type[index] = DMA_SGL_TYPE; sgl_data->is_last[index] = is_last_table; sgl_data->mlli_nents[index] = mlli_nents; if (sgl_data->mlli_nents[index]) @@ -290,37 +256,25 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg, unsigned int nbytes, int direction, u32 *nents, u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents) { - if (sg_is_last(sg)) { - /* One entry only case -set to DLLI */ - if (dma_map_sg(dev, sg, 1, direction) != 1) { - dev_err(dev, "dma_map_sg() single buffer failed\n"); - return -ENOMEM; - } - dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", - &sg_dma_address(sg), sg_page(sg), sg_virt(sg), - sg->offset, sg->length); - *lbytes = nbytes; - *nents = 1; - *mapped_nents = 1; - } else { /*sg_is_last*/ - *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); - if (*nents > max_sg_nents) { - *nents = 0; - dev_err(dev, "Too many fragments. current %d max %d\n", - *nents, max_sg_nents); - return -ENOMEM; - } - /* In case of mmu the number of mapped nents might - * be changed from the original sgl nents - */ - *mapped_nents = dma_map_sg(dev, sg, *nents, direction); - if (*mapped_nents == 0) { - *nents = 0; - dev_err(dev, "dma_map_sg() sg buffer failed\n"); - return -ENOMEM; - } + int ret = 0; + + *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); + if (*nents > max_sg_nents) { + *nents = 0; + dev_err(dev, "Too many fragments. current %d max %d\n", + *nents, max_sg_nents); + return -ENOMEM; + } + + ret = dma_map_sg(dev, sg, *nents, direction); + if (dma_mapping_error(dev, ret)) { + *nents = 0; + dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret); + return -ENOMEM; } + *mapped_nents = ret; + return 0; } @@ -411,7 +365,6 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, { struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx; struct mlli_params *mlli_params = &req_ctx->mlli_params; - struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; struct device *dev = drvdata_to_dev(drvdata); struct buffer_array sg_data; u32 dummy = 0; @@ -424,10 +377,9 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, /* Map IV buffer */ if (ivsize) { - dump_byte_array("iv", (u8 *)info, ivsize); + dump_byte_array("iv", info, ivsize); req_ctx->gen_ctx.iv_dma_addr = - dma_map_single(dev, (void *)info, - ivsize, DMA_BIDIRECTIONAL); + dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) { dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", ivsize, info); @@ -476,7 +428,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, } if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { - mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; + mlli_params->curr_pool = drvdata->mlli_buffs_pool; rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); if (rc) goto cipher_exit; @@ -555,11 +507,12 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, areq_ctx->assoclen, req->cryptlen); - dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, + DMA_BIDIRECTIONAL); if (req->src != req->dst) { dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", sg_virt(req->dst)); - dma_unmap_sg(dev, req->dst, sg_nents(req->dst), + dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_BIDIRECTIONAL); } if (drvdata->coherent && @@ -614,18 +567,6 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata, dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n", hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr); - // TODO: what about CTR?? ask Ron - if (do_chain && areq_ctx->plaintext_authenticate_only) { - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm); - unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET; - /* Chain to given list */ - cc_add_buffer_entry(dev, sg_data, - (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs), - iv_size_to_authenc, is_last, - &areq_ctx->assoc.mlli_nents); - areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; - } chain_iv_exit: return rc; @@ -639,13 +580,8 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata, struct aead_req_ctx *areq_ctx = aead_request_ctx(req); int rc = 0; int mapped_nents = 0; - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - unsigned int size_of_assoc = areq_ctx->assoclen; struct device *dev = drvdata_to_dev(drvdata); - if (areq_ctx->is_gcm4543) - size_of_assoc += crypto_aead_ivsize(tfm); - if (!sg_data) { rc = -EINVAL; goto chain_assoc_exit; @@ -661,7 +597,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata, goto chain_assoc_exit; } - mapped_nents = sg_nents_for_len(req->src, size_of_assoc); + mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen); if (mapped_nents < 0) return mapped_nents; @@ -854,16 +790,11 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, u32 src_mapped_nents = 0, dst_mapped_nents = 0; u32 offset = 0; /* non-inplace mode */ - unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen; - struct crypto_aead *tfm = crypto_aead_reqtfm(req); + unsigned int size_for_map = req->assoclen + req->cryptlen; u32 sg_index = 0; - bool is_gcm4543 = areq_ctx->is_gcm4543; - u32 size_to_skip = areq_ctx->assoclen; + u32 size_to_skip = req->assoclen; struct scatterlist *sgl; - if (is_gcm4543) - size_to_skip += crypto_aead_ivsize(tfm); - offset = size_to_skip; if (!sg_data) @@ -872,16 +803,13 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, areq_ctx->src_sgl = req->src; areq_ctx->dst_sgl = req->dst; - if (is_gcm4543) - size_for_map += crypto_aead_ivsize(tfm); - size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0; src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map, &src_last_bytes); sg_index = areq_ctx->src_sgl->length; //check where the data starts - while (sg_index <= size_to_skip) { + while (src_mapped_nents && (sg_index <= size_to_skip)) { src_mapped_nents--; offset -= areq_ctx->src_sgl->length; sgl = sg_next(areq_ctx->src_sgl); @@ -901,14 +829,15 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, areq_ctx->src_offset = offset; if (req->src != req->dst) { - size_for_map = areq_ctx->assoclen + req->cryptlen; - size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? - authsize : 0; - if (is_gcm4543) - size_for_map += crypto_aead_ivsize(tfm); + size_for_map = req->assoclen + req->cryptlen; + + if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) + size_for_map += authsize; + else + size_for_map -= authsize; rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL, - &areq_ctx->dst.nents, + &areq_ctx->dst.mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, &dst_mapped_nents); if (rc) @@ -921,7 +850,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, offset = size_to_skip; //check where the data starts - while (sg_index <= size_to_skip) { + while (dst_mapped_nents && sg_index <= size_to_skip) { dst_mapped_nents--; offset -= areq_ctx->dst_sgl->length; sgl = sg_next(areq_ctx->dst_sgl); @@ -1012,14 +941,11 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) struct device *dev = drvdata_to_dev(drvdata); struct buffer_array sg_data; unsigned int authsize = areq_ctx->req_authsize; - struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; int rc = 0; - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - bool is_gcm4543 = areq_ctx->is_gcm4543; dma_addr_t dma_addr; u32 mapped_nents = 0; u32 dummy = 0; /*used for the assoc data fragments */ - u32 size_to_map = 0; + u32 size_to_map; gfp_t flags = cc_gfp_flags(&req->base); mlli_params->curr_pool = NULL; @@ -1116,14 +1042,15 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) areq_ctx->gcm_iv_inc2_dma_addr = dma_addr; } - size_to_map = req->cryptlen + areq_ctx->assoclen; - if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) + size_to_map = req->cryptlen + req->assoclen; + /* If we do in-place encryption, we also need the auth tag */ + if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) && + (req->src == req->dst)) { size_to_map += authsize; + } - if (is_gcm4543) - size_to_map += crypto_aead_ivsize(tfm); rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL, - &areq_ctx->src.nents, + &areq_ctx->src.mapped_nents, (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES), &dummy, &mapped_nents); @@ -1183,7 +1110,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) */ if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { - mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; + mlli_params->curr_pool = drvdata->mlli_buffs_pool; rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); if (rc) goto aead_map_failure; @@ -1211,7 +1138,6 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx); struct mlli_params *mlli_params = &areq_ctx->mlli_params; struct buffer_array sg_data; - struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; int rc = 0; u32 dummy = 0; u32 mapped_nents = 0; @@ -1229,7 +1155,6 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, return 0; } - /*TODO: copy data in case that buffer is enough for operation */ /* map the previous buffer */ if (*curr_buff_cnt) { rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, @@ -1258,7 +1183,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, /*build mlli */ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { - mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; + mlli_params->curr_pool = drvdata->mlli_buffs_pool; /* add the src data to the sg_data */ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes, 0, true, &areq_ctx->mlli_nents); @@ -1296,7 +1221,6 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, unsigned int update_data_len; u32 total_in_len = nbytes + *curr_buff_cnt; struct buffer_array sg_data; - struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; unsigned int swap_index = 0; int rc = 0; u32 dummy = 0; @@ -1371,7 +1295,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, } if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { - mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; + mlli_params->curr_pool = drvdata->mlli_buffs_pool; /* add the src data to the sg_data */ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, (update_data_len - *curr_buff_cnt), 0, true, @@ -1438,39 +1362,22 @@ void cc_unmap_hash_request(struct device *dev, void *ctx, int cc_buffer_mgr_init(struct cc_drvdata *drvdata) { - struct buff_mgr_handle *buff_mgr_handle; struct device *dev = drvdata_to_dev(drvdata); - buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL); - if (!buff_mgr_handle) - return -ENOMEM; - - drvdata->buff_mgr_handle = buff_mgr_handle; - - buff_mgr_handle->mlli_buffs_pool = + drvdata->mlli_buffs_pool = dma_pool_create("dx_single_mlli_tables", dev, MAX_NUM_OF_TOTAL_MLLI_ENTRIES * LLI_ENTRY_BYTE_SIZE, MLLI_TABLE_MIN_ALIGNMENT, 0); - if (!buff_mgr_handle->mlli_buffs_pool) - goto error; + if (!drvdata->mlli_buffs_pool) + return -ENOMEM; return 0; - -error: - cc_buffer_mgr_fini(drvdata); - return -ENOMEM; } int cc_buffer_mgr_fini(struct cc_drvdata *drvdata) { - struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle; - - if (buff_mgr_handle) { - dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool); - kfree(drvdata->buff_mgr_handle); - drvdata->buff_mgr_handle = NULL; - } + dma_pool_destroy(drvdata->mlli_buffs_pool); return 0; } diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h index af434872c6ff..653441b6542e 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.h +++ b/drivers/crypto/ccree/cc_buffer_mgr.h @@ -24,14 +24,15 @@ enum cc_sg_cpy_direct { }; struct cc_mlli { - cc_sram_addr_t sram_addr; + u32 sram_addr; + unsigned int mapped_nents; unsigned int nents; //sg nents unsigned int mlli_nents; //mlli nents might be different than the above }; struct mlli_params { struct dma_pool *curr_pool; - u8 *mlli_virt_addr; + void *mlli_virt_addr; dma_addr_t mlli_dma_addr; u32 mlli_len; }; diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index 7d6252d892d7..a84335328f37 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -20,10 +20,6 @@ #define template_skcipher template_u.skcipher -struct cc_cipher_handle { - struct list_head alg_list; -}; - struct cc_user_key_info { u8 *key; dma_addr_t key_dma_addr; @@ -184,7 +180,7 @@ static int cc_cipher_init(struct crypto_tfm *tfm) ctx_p->user.key); /* Map key buffer */ - ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key, + ctx_p->user.key_dma_addr = dma_map_single(dev, ctx_p->user.key, max_key_buf_size, DMA_TO_DEVICE); if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) { @@ -284,7 +280,7 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key, dev_dbg(dev, "Setting HW key in context @%p for %s. keylen=%u\n", ctx_p, crypto_tfm_alg_name(tfm), keylen); - dump_byte_array("key", (u8 *)key, keylen); + dump_byte_array("key", key, keylen); /* STAT_PHASE_0: Init and sanity checks */ @@ -387,7 +383,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n", ctx_p, crypto_tfm_alg_name(tfm), keylen); - dump_byte_array("key", (u8 *)key, keylen); + dump_byte_array("key", key, keylen); /* STAT_PHASE_0: Init and sanity checks */ @@ -533,14 +529,6 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm, int flow_mode = ctx_p->flow_mode; int direction = req_ctx->gen_ctx.op_type; dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr; - unsigned int du_size = nbytes; - - struct cc_crypto_alg *cc_alg = - container_of(tfm->__crt_alg, struct cc_crypto_alg, - skcipher_alg.base); - - if (cc_alg->data_unit) - du_size = cc_alg->data_unit; switch (cipher_mode) { case DRV_CIPHER_ECB: @@ -753,7 +741,7 @@ static void cc_setup_mlli_desc(struct crypto_tfm *tfm, dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n", &req_ctx->mlli_params.mlli_dma_addr, req_ctx->mlli_params.mlli_len, - (unsigned int)ctx_p->drvdata->mlli_sram_addr); + ctx_p->drvdata->mlli_sram_addr); hw_desc_init(&desc[*seq_size]); set_din_type(&desc[*seq_size], DMA_DLLI, req_ctx->mlli_params.mlli_dma_addr, @@ -801,16 +789,16 @@ static void cc_setup_flow_desc(struct crypto_tfm *tfm, req_ctx->in_mlli_nents, NS_BIT); if (req_ctx->out_nents == 0) { dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n", - (unsigned int)ctx_p->drvdata->mlli_sram_addr, - (unsigned int)ctx_p->drvdata->mlli_sram_addr); + ctx_p->drvdata->mlli_sram_addr, + ctx_p->drvdata->mlli_sram_addr); set_dout_mlli(&desc[*seq_size], ctx_p->drvdata->mlli_sram_addr, req_ctx->in_mlli_nents, NS_BIT, (!last_desc ? 0 : 1)); } else { dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n", - (unsigned int)ctx_p->drvdata->mlli_sram_addr, - (unsigned int)ctx_p->drvdata->mlli_sram_addr + + ctx_p->drvdata->mlli_sram_addr, + ctx_p->drvdata->mlli_sram_addr + (u32)LLI_ENTRY_BYTE_SIZE * req_ctx->in_nents); set_dout_mlli(&desc[*seq_size], (ctx_p->drvdata->mlli_sram_addr + @@ -871,7 +859,6 @@ static int cc_cipher_process(struct skcipher_request *req, /* STAT_PHASE_0: Init and sanity checks */ - /* TODO: check data length according to mode */ if (validate_data_size(ctx_p, nbytes)) { dev_dbg(dev, "Unsupported data size %d.\n", nbytes); rc = -EINVAL; @@ -893,8 +880,8 @@ static int cc_cipher_process(struct skcipher_request *req, } /* Setup request structure */ - cc_req.user_cb = (void *)cc_cipher_complete; - cc_req.user_arg = (void *)req; + cc_req.user_cb = cc_cipher_complete; + cc_req.user_arg = req; /* Setup CPP operation details */ if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY) { @@ -1228,6 +1215,10 @@ static const struct cc_alg_template skcipher_algs[] = { .sec_func = true, }, { + /* See https://www.mail-archive.com/linux-crypto@vger.kernel.org/msg40576.html + * for the reason why this differs from the generic + * implementation. + */ .name = "xts(aes)", .driver_name = "xts-aes-ccree", .blocksize = 1, @@ -1423,7 +1414,7 @@ static const struct cc_alg_template skcipher_algs[] = { { .name = "ofb(aes)", .driver_name = "ofb-aes-ccree", - .blocksize = AES_BLOCK_SIZE, + .blocksize = 1, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, @@ -1576,7 +1567,7 @@ static const struct cc_alg_template skcipher_algs[] = { { .name = "ctr(sm4)", .driver_name = "ctr-sm4-ccree", - .blocksize = SM4_BLOCK_SIZE, + .blocksize = 1, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, @@ -1634,7 +1625,7 @@ static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl, struct cc_crypto_alg *t_alg; struct skcipher_alg *alg; - t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); + t_alg = devm_kzalloc(dev, sizeof(*t_alg), GFP_KERNEL); if (!t_alg) return ERR_PTR(-ENOMEM); @@ -1665,36 +1656,23 @@ static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl, int cc_cipher_free(struct cc_drvdata *drvdata) { struct cc_crypto_alg *t_alg, *n; - struct cc_cipher_handle *cipher_handle = drvdata->cipher_handle; - - if (cipher_handle) { - /* Remove registered algs */ - list_for_each_entry_safe(t_alg, n, &cipher_handle->alg_list, - entry) { - crypto_unregister_skcipher(&t_alg->skcipher_alg); - list_del(&t_alg->entry); - kfree(t_alg); - } - kfree(cipher_handle); - drvdata->cipher_handle = NULL; + + /* Remove registered algs */ + list_for_each_entry_safe(t_alg, n, &drvdata->alg_list, entry) { + crypto_unregister_skcipher(&t_alg->skcipher_alg); + list_del(&t_alg->entry); } return 0; } int cc_cipher_alloc(struct cc_drvdata *drvdata) { - struct cc_cipher_handle *cipher_handle; struct cc_crypto_alg *t_alg; struct device *dev = drvdata_to_dev(drvdata); int rc = -ENOMEM; int alg; - cipher_handle = kmalloc(sizeof(*cipher_handle), GFP_KERNEL); - if (!cipher_handle) - return -ENOMEM; - - INIT_LIST_HEAD(&cipher_handle->alg_list); - drvdata->cipher_handle = cipher_handle; + INIT_LIST_HEAD(&drvdata->alg_list); /* Linux crypto */ dev_dbg(dev, "Number of algorithms = %zu\n", @@ -1723,14 +1701,12 @@ int cc_cipher_alloc(struct cc_drvdata *drvdata) if (rc) { dev_err(dev, "%s alg registration failed\n", t_alg->skcipher_alg.base.cra_driver_name); - kfree(t_alg); goto fail0; - } else { - list_add_tail(&t_alg->entry, - &cipher_handle->alg_list); - dev_dbg(dev, "Registered %s\n", - t_alg->skcipher_alg.base.cra_driver_name); } + + list_add_tail(&t_alg->entry, &drvdata->alg_list); + dev_dbg(dev, "Registered %s\n", + t_alg->skcipher_alg.base.cra_driver_name); } return 0; diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c index 566999738698..c454afce7781 100644 --- a/drivers/crypto/ccree/cc_debugfs.c +++ b/drivers/crypto/ccree/cc_debugfs.c @@ -8,10 +8,6 @@ #include "cc_crypto_ctx.h" #include "cc_debugfs.h" -struct cc_debugfs_ctx { - struct dentry *dir; -}; - #define CC_DEBUG_REG(_X) { \ .name = __stringify(_X),\ .offset = CC_REG(_X) \ @@ -67,13 +63,8 @@ void __exit cc_debugfs_global_fini(void) int cc_debugfs_init(struct cc_drvdata *drvdata) { struct device *dev = drvdata_to_dev(drvdata); - struct cc_debugfs_ctx *ctx; struct debugfs_regset32 *regset, *verset; - ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return -ENOMEM; - regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); if (!regset) return -ENOMEM; @@ -81,16 +72,18 @@ int cc_debugfs_init(struct cc_drvdata *drvdata) regset->regs = debug_regs; regset->nregs = ARRAY_SIZE(debug_regs); regset->base = drvdata->cc_base; + regset->dev = dev; - ctx->dir = debugfs_create_dir(drvdata->plat_dev->name, cc_debugfs_dir); + drvdata->dir = debugfs_create_dir(drvdata->plat_dev->name, + cc_debugfs_dir); - debugfs_create_regset32("regs", 0400, ctx->dir, regset); - debugfs_create_bool("coherent", 0400, ctx->dir, &drvdata->coherent); + debugfs_create_regset32("regs", 0400, drvdata->dir, regset); + debugfs_create_bool("coherent", 0400, drvdata->dir, &drvdata->coherent); verset = devm_kzalloc(dev, sizeof(*verset), GFP_KERNEL); /* Failing here is not important enough to fail the module load */ if (!verset) - goto out; + return 0; if (drvdata->hw_rev <= CC_HW_REV_712) { ver_sig_regs[0].offset = drvdata->sig_offset; @@ -102,17 +95,13 @@ int cc_debugfs_init(struct cc_drvdata *drvdata) verset->nregs = ARRAY_SIZE(pid_cid_regs); } verset->base = drvdata->cc_base; + verset->dev = dev; - debugfs_create_regset32("version", 0400, ctx->dir, verset); - -out: - drvdata->debugfs = ctx; + debugfs_create_regset32("version", 0400, drvdata->dir, verset); return 0; } void cc_debugfs_fini(struct cc_drvdata *drvdata) { - struct cc_debugfs_ctx *ctx = (struct cc_debugfs_ctx *)drvdata->debugfs; - - debugfs_remove_recursive(ctx->dir); + debugfs_remove_recursive(drvdata->dir); } diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 532bc95a8373..2d50991b9a17 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -14,6 +14,8 @@ #include <linux/of.h> #include <linux/clk.h> #include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/pm_runtime.h> #include "cc_driver.h" #include "cc_request_mgr.h" @@ -134,7 +136,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id) /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */ /* if driver suspended return, probably shared interrupt */ - if (cc_pm_is_dev_suspended(dev)) + if (pm_runtime_suspended(dev)) return IRQ_NONE; /* read the interrupt status */ @@ -269,7 +271,6 @@ static int init_cc_resources(struct platform_device *plat_dev) u32 val, hw_rev_pidr, sig_cidr; u64 dma_mask; const struct cc_hw_data *hw_rev; - const struct of_device_id *dev_id; struct clk *clk; int irq; int rc = 0; @@ -278,11 +279,7 @@ static int init_cc_resources(struct platform_device *plat_dev) if (!new_drvdata) return -ENOMEM; - dev_id = of_match_node(arm_ccree_dev_of_match, np); - if (!dev_id) - return -ENODEV; - - hw_rev = (struct cc_hw_data *)dev_id->data; + hw_rev = of_device_get_match_data(dev); new_drvdata->hw_rev_name = hw_rev->name; new_drvdata->hw_rev = hw_rev->rev; new_drvdata->std_bodies = hw_rev->std_bodies; @@ -302,22 +299,12 @@ static int init_cc_resources(struct platform_device *plat_dev) platform_set_drvdata(plat_dev, new_drvdata); new_drvdata->plat_dev = plat_dev; - clk = devm_clk_get(dev, NULL); - if (IS_ERR(clk)) - switch (PTR_ERR(clk)) { - /* Clock is optional so this might be fine */ - case -ENOENT: - break; - - /* Clock not available, let's try again soon */ - case -EPROBE_DEFER: - return -EPROBE_DEFER; - - default: - dev_err(dev, "Error getting clock: %ld\n", - PTR_ERR(clk)); - return PTR_ERR(clk); - } + clk = devm_clk_get_optional(dev, NULL); + if (IS_ERR(clk)) { + if (PTR_ERR(clk) != -EPROBE_DEFER) + dev_err(dev, "Error getting clock: %pe\n", clk); + return PTR_ERR(clk); + } new_drvdata->clk = clk; new_drvdata->coherent = of_dma_is_coherent(np); @@ -344,13 +331,13 @@ static int init_cc_resources(struct platform_device *plat_dev) init_completion(&new_drvdata->hw_queue_avail); - if (!plat_dev->dev.dma_mask) - plat_dev->dev.dma_mask = &plat_dev->dev.coherent_dma_mask; + if (!dev->dma_mask) + dev->dma_mask = &dev->coherent_dma_mask; dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN); while (dma_mask > 0x7fffffffUL) { - if (dma_supported(&plat_dev->dev, dma_mask)) { - rc = dma_set_coherent_mask(&plat_dev->dev, dma_mask); + if (dma_supported(dev, dma_mask)) { + rc = dma_set_coherent_mask(dev, dma_mask); if (!rc) break; } @@ -362,7 +349,7 @@ static int init_cc_resources(struct platform_device *plat_dev) return rc; } - rc = cc_clk_on(new_drvdata); + rc = clk_prepare_enable(new_drvdata->clk); if (rc) { dev_err(dev, "Failed to enable clock"); return rc; @@ -370,7 +357,17 @@ static int init_cc_resources(struct platform_device *plat_dev) new_drvdata->sec_disabled = cc_sec_disable; - /* wait for Crytpcell reset completion */ + pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + rc = pm_runtime_get_sync(dev); + if (rc < 0) { + dev_err(dev, "pm_runtime_get_sync() failed: %d\n", rc); + goto post_pm_err; + } + + /* Wait for Cryptocell reset completion */ if (!cc_wait_for_reset_completion(new_drvdata)) { dev_err(dev, "Cryptocell reset not completed"); } @@ -382,7 +379,7 @@ static int init_cc_resources(struct platform_device *plat_dev) dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n", val, hw_rev->sig); rc = -EINVAL; - goto post_clk_err; + goto post_pm_err; } sig_cidr = val; hw_rev_pidr = cc_ioread(new_drvdata, new_drvdata->ver_offset); @@ -393,7 +390,7 @@ static int init_cc_resources(struct platform_device *plat_dev) dev_err(dev, "Invalid CC PIDR: PIDR0124=0x%08X != expected=0x%08X\n", val, hw_rev->pidr_0124); rc = -EINVAL; - goto post_clk_err; + goto post_pm_err; } hw_rev_pidr = val; @@ -402,7 +399,7 @@ static int init_cc_resources(struct platform_device *plat_dev) dev_err(dev, "Invalid CC CIDR: CIDR0123=0x%08X != expected=0x%08X\n", val, hw_rev->cidr_0123); rc = -EINVAL; - goto post_clk_err; + goto post_pm_err; } sig_cidr = val; @@ -421,7 +418,7 @@ static int init_cc_resources(struct platform_device *plat_dev) default: dev_err(dev, "Unsupported engines configuration.\n"); rc = -EINVAL; - goto post_clk_err; + goto post_pm_err; } /* Check security disable state */ @@ -447,14 +444,14 @@ static int init_cc_resources(struct platform_device *plat_dev) new_drvdata); if (rc) { dev_err(dev, "Could not register to interrupt %d\n", irq); - goto post_clk_err; + goto post_pm_err; } dev_dbg(dev, "Registered to IRQ: %d\n", irq); rc = init_cc_regs(new_drvdata, true); if (rc) { dev_err(dev, "init_cc_regs failed\n"); - goto post_clk_err; + goto post_pm_err; } rc = cc_debugfs_init(new_drvdata); @@ -477,15 +474,14 @@ static int init_cc_resources(struct platform_device *plat_dev) new_drvdata->mlli_sram_addr = cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE); if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) { - dev_err(dev, "Failed to alloc MLLI Sram buffer\n"); rc = -ENOMEM; - goto post_sram_mgr_err; + goto post_fips_init_err; } rc = cc_req_mgr_init(new_drvdata); if (rc) { dev_err(dev, "cc_req_mgr_init failed\n"); - goto post_sram_mgr_err; + goto post_fips_init_err; } rc = cc_buffer_mgr_init(new_drvdata); @@ -494,12 +490,6 @@ static int init_cc_resources(struct platform_device *plat_dev) goto post_req_mgr_err; } - rc = cc_pm_init(new_drvdata); - if (rc) { - dev_err(dev, "cc_pm_init failed\n"); - goto post_buf_mgr_err; - } - /* Allocate crypto algs */ rc = cc_cipher_alloc(new_drvdata); if (rc) { @@ -520,15 +510,13 @@ static int init_cc_resources(struct platform_device *plat_dev) goto post_hash_err; } - /* All set, we can allow autosuspend */ - cc_pm_go(new_drvdata); - /* If we got here and FIPS mode is enabled * it means all FIPS test passed, so let TEE * know we're good. */ cc_set_ree_fips_status(new_drvdata, true); + pm_runtime_put(dev); return 0; post_hash_err: @@ -539,16 +527,17 @@ post_buf_mgr_err: cc_buffer_mgr_fini(new_drvdata); post_req_mgr_err: cc_req_mgr_fini(new_drvdata); -post_sram_mgr_err: - cc_sram_mgr_fini(new_drvdata); post_fips_init_err: cc_fips_fini(new_drvdata); post_debugfs_err: cc_debugfs_fini(new_drvdata); post_regs_err: fini_cc_regs(new_drvdata); -post_clk_err: - cc_clk_off(new_drvdata); +post_pm_err: + pm_runtime_put_noidle(dev); + pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); + clk_disable_unprepare(new_drvdata->clk); return rc; } @@ -560,36 +549,22 @@ void fini_cc_regs(struct cc_drvdata *drvdata) static void cleanup_cc_resources(struct platform_device *plat_dev) { + struct device *dev = &plat_dev->dev; struct cc_drvdata *drvdata = (struct cc_drvdata *)platform_get_drvdata(plat_dev); cc_aead_free(drvdata); cc_hash_free(drvdata); cc_cipher_free(drvdata); - cc_pm_fini(drvdata); cc_buffer_mgr_fini(drvdata); cc_req_mgr_fini(drvdata); - cc_sram_mgr_fini(drvdata); cc_fips_fini(drvdata); cc_debugfs_fini(drvdata); fini_cc_regs(drvdata); - cc_clk_off(drvdata); -} - -int cc_clk_on(struct cc_drvdata *drvdata) -{ - struct clk *clk = drvdata->clk; - int rc; - - if (IS_ERR(clk)) - /* Not all devices have a clock associated with CCREE */ - return 0; - - rc = clk_prepare_enable(clk); - if (rc) - return rc; - - return 0; + pm_runtime_put_noidle(dev); + pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); + clk_disable_unprepare(drvdata->clk); } unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata) @@ -600,17 +575,6 @@ unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata) return HASH_LEN_SIZE_630; } -void cc_clk_off(struct cc_drvdata *drvdata) -{ - struct clk *clk = drvdata->clk; - - if (IS_ERR(clk)) - /* Not all devices have a clock associated with CCREE */ - return; - - clk_disable_unprepare(clk); -} - static int ccree_probe(struct platform_device *plat_dev) { int rc; @@ -653,7 +617,6 @@ static struct platform_driver ccree_driver = { static int __init ccree_init(void) { - cc_hash_global_init(); cc_debugfs_global_init(); return platform_driver_register(&ccree_driver); diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h index c227718ba992..d938886390d2 100644 --- a/drivers/crypto/ccree/cc_driver.h +++ b/drivers/crypto/ccree/cc_driver.h @@ -26,7 +26,6 @@ #include <linux/clk.h> #include <linux/platform_device.h> -/* Registers definitions from shared/hw/ree_include */ #include "cc_host_regs.h" #include "cc_crypto_ctx.h" #include "cc_hw_queue_defs.h" @@ -71,9 +70,7 @@ enum cc_std_body { #define CC_NVM_IS_IDLE_MASK BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT) -#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \ - CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \ - CC_AXIM_MON_COMP_VALUE_BIT_SHIFT) +#define AXIM_MON_COMP_VALUE CC_GENMASK(CC_AXIM_MON_COMP_VALUE) #define CC_CPP_AES_ABORT_MASK ( \ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT) | \ @@ -139,15 +136,15 @@ struct cc_drvdata { int irq; struct completion hw_queue_avail; /* wait for HW queue availability */ struct platform_device *plat_dev; - cc_sram_addr_t mlli_sram_addr; - void *buff_mgr_handle; - void *cipher_handle; + u32 mlli_sram_addr; + struct dma_pool *mlli_buffs_pool; + struct list_head alg_list; void *hash_handle; void *aead_handle; void *request_mgr_handle; void *fips_handle; - void *sram_mgr_handle; - void *debugfs; + u32 sram_free_offset; /* offset to non-allocated area in SRAM */ + struct dentry *dir; /* for debugfs */ struct clk *clk; bool coherent; char *hw_rev_name; @@ -158,7 +155,6 @@ struct cc_drvdata { int std_bodies; bool sec_disabled; u32 comp_mask; - bool pm_on; }; struct cc_crypto_alg { @@ -212,8 +208,6 @@ static inline void dump_byte_array(const char *name, const u8 *the_array, bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata); int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe); void fini_cc_regs(struct cc_drvdata *drvdata); -int cc_clk_on(struct cc_drvdata *drvdata); -void cc_clk_off(struct cc_drvdata *drvdata); unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata); static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val) diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c index 912e5ce5079d..d5310783af15 100644 --- a/drivers/crypto/ccree/cc_hash.c +++ b/drivers/crypto/ccree/cc_hash.c @@ -20,8 +20,8 @@ #define CC_SM3_HASH_LEN_SIZE 8 struct cc_hash_handle { - cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/ - cc_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */ + u32 digest_len_sram_addr; /* const value in SRAM*/ + u32 larval_digest_sram_addr; /* const value in SRAM */ struct list_head hash_list; }; @@ -39,12 +39,19 @@ static const u32 cc_sha256_init[] = { SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 }; static const u32 cc_digest_len_sha512_init[] = { 0x00000080, 0x00000000, 0x00000000, 0x00000000 }; -static u64 cc_sha384_init[] = { - SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4, - SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 }; -static u64 cc_sha512_init[] = { - SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4, - SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 }; + +/* + * Due to the way the HW works, every double word in the SHA384 and SHA512 + * larval hashes must be stored in hi/lo order + */ +#define hilo(x) upper_32_bits(x), lower_32_bits(x) +static const u32 cc_sha384_init[] = { + hilo(SHA384_H7), hilo(SHA384_H6), hilo(SHA384_H5), hilo(SHA384_H4), + hilo(SHA384_H3), hilo(SHA384_H2), hilo(SHA384_H1), hilo(SHA384_H0) }; +static const u32 cc_sha512_init[] = { + hilo(SHA512_H7), hilo(SHA512_H6), hilo(SHA512_H5), hilo(SHA512_H4), + hilo(SHA512_H3), hilo(SHA512_H2), hilo(SHA512_H1), hilo(SHA512_H0) }; + static const u32 cc_sm3_init[] = { SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE, SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA }; @@ -342,7 +349,6 @@ static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req, /* Get final MAC result */ hw_desc_init(&desc[idx]); set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); - /* TODO */ set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); set_queue_last_ind(ctx->drvdata, &desc[idx]); @@ -422,8 +428,7 @@ static int cc_hash_digest(struct ahash_request *req) bool is_hmac = ctx->is_hmac; struct cc_crypto_req cc_req = {}; struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; - cc_sram_addr_t larval_digest_addr = - cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode); + u32 larval_digest_addr; int idx = 0; int rc = 0; gfp_t flags = cc_gfp_flags(&req->base); @@ -465,6 +470,8 @@ static int cc_hash_digest(struct ahash_request *req) set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT); } else { + larval_digest_addr = cc_larval_digest_addr(ctx->drvdata, + ctx->hash_mode); set_din_sram(&desc[idx], larval_digest_addr, ctx->inter_digestsize); } @@ -726,7 +733,7 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key, int digestsize = 0; int i, idx = 0, rc = 0; struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; - cc_sram_addr_t larval_addr; + u32 larval_addr; struct device *dev; ctx = crypto_ahash_ctx(ahash); @@ -752,7 +759,7 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key, return -ENOMEM; ctx->key_params.key_dma_addr = - dma_map_single(dev, (void *)ctx->key_params.key, keylen, + dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE); if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) { dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", @@ -1067,8 +1074,8 @@ static int cc_alloc_ctx(struct cc_hash_ctx *ctx) ctx->key_params.keylen = 0; ctx->digest_buff_dma_addr = - dma_map_single(dev, (void *)ctx->digest_buff, - sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL); + dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff), + DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) { dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n", sizeof(ctx->digest_buff), ctx->digest_buff); @@ -1079,7 +1086,7 @@ static int cc_alloc_ctx(struct cc_hash_ctx *ctx) &ctx->digest_buff_dma_addr); ctx->opad_tmp_keys_dma_addr = - dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, + dma_map_single(dev, ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) { @@ -1196,8 +1203,8 @@ static int cc_mac_update(struct ahash_request *req) idx++; /* Setup request structure */ - cc_req.user_cb = (void *)cc_update_complete; - cc_req.user_arg = (void *)req; + cc_req.user_cb = cc_update_complete; + cc_req.user_arg = req; rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base); if (rc != -EINPROGRESS && rc != -EBUSY) { @@ -1254,8 +1261,8 @@ static int cc_mac_final(struct ahash_request *req) } /* Setup request structure */ - cc_req.user_cb = (void *)cc_hash_complete; - cc_req.user_arg = (void *)req; + cc_req.user_cb = cc_hash_complete; + cc_req.user_arg = req; if (state->xcbc_count && rem_cnt == 0) { /* Load key for ECB decryption */ @@ -1311,7 +1318,6 @@ static int cc_mac_final(struct ahash_request *req) /* Get final MAC result */ hw_desc_init(&desc[idx]); - /* TODO */ set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); set_queue_last_ind(ctx->drvdata, &desc[idx]); @@ -1369,8 +1375,8 @@ static int cc_mac_finup(struct ahash_request *req) } /* Setup request structure */ - cc_req.user_cb = (void *)cc_hash_complete; - cc_req.user_arg = (void *)req; + cc_req.user_cb = cc_hash_complete; + cc_req.user_arg = req; if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) { key_len = CC_AES_128_BIT_KEY_SIZE; @@ -1393,7 +1399,6 @@ static int cc_mac_finup(struct ahash_request *req) /* Get final MAC result */ hw_desc_init(&desc[idx]); - /* TODO */ set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); set_queue_last_ind(ctx->drvdata, &desc[idx]); @@ -1448,8 +1453,8 @@ static int cc_mac_digest(struct ahash_request *req) } /* Setup request structure */ - cc_req.user_cb = (void *)cc_digest_complete; - cc_req.user_arg = (void *)req; + cc_req.user_cb = cc_digest_complete; + cc_req.user_arg = req; if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) { key_len = CC_AES_128_BIT_KEY_SIZE; @@ -1820,7 +1825,7 @@ static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template, struct crypto_alg *alg; struct ahash_alg *halg; - t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL); + t_crypto_alg = devm_kzalloc(dev, sizeof(*t_crypto_alg), GFP_KERNEL); if (!t_crypto_alg) return ERR_PTR(-ENOMEM); @@ -1857,104 +1862,85 @@ static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template, return t_crypto_alg; } +static int cc_init_copy_sram(struct cc_drvdata *drvdata, const u32 *data, + unsigned int size, u32 *sram_buff_ofs) +{ + struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)]; + unsigned int larval_seq_len = 0; + int rc; + + cc_set_sram_desc(data, *sram_buff_ofs, size / sizeof(*data), + larval_seq, &larval_seq_len); + rc = send_request_init(drvdata, larval_seq, larval_seq_len); + if (rc) + return rc; + + *sram_buff_ofs += size; + return 0; +} + int cc_init_hash_sram(struct cc_drvdata *drvdata) { struct cc_hash_handle *hash_handle = drvdata->hash_handle; - cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr; - unsigned int larval_seq_len = 0; - struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)]; + u32 sram_buff_ofs = hash_handle->digest_len_sram_addr; bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712); bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713); int rc = 0; /* Copy-to-sram digest-len */ - cc_set_sram_desc(cc_digest_len_init, sram_buff_ofs, - ARRAY_SIZE(cc_digest_len_init), larval_seq, - &larval_seq_len); - rc = send_request_init(drvdata, larval_seq, larval_seq_len); + rc = cc_init_copy_sram(drvdata, cc_digest_len_init, + sizeof(cc_digest_len_init), &sram_buff_ofs); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(cc_digest_len_init); - larval_seq_len = 0; - if (large_sha_supported) { /* Copy-to-sram digest-len for sha384/512 */ - cc_set_sram_desc(cc_digest_len_sha512_init, sram_buff_ofs, - ARRAY_SIZE(cc_digest_len_sha512_init), - larval_seq, &larval_seq_len); - rc = send_request_init(drvdata, larval_seq, larval_seq_len); + rc = cc_init_copy_sram(drvdata, cc_digest_len_sha512_init, + sizeof(cc_digest_len_sha512_init), + &sram_buff_ofs); if (rc) goto init_digest_const_err; - - sram_buff_ofs += sizeof(cc_digest_len_sha512_init); - larval_seq_len = 0; } /* The initial digests offset */ hash_handle->larval_digest_sram_addr = sram_buff_ofs; /* Copy-to-sram initial SHA* digests */ - cc_set_sram_desc(cc_md5_init, sram_buff_ofs, ARRAY_SIZE(cc_md5_init), - larval_seq, &larval_seq_len); - rc = send_request_init(drvdata, larval_seq, larval_seq_len); + rc = cc_init_copy_sram(drvdata, cc_md5_init, sizeof(cc_md5_init), + &sram_buff_ofs); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(cc_md5_init); - larval_seq_len = 0; - cc_set_sram_desc(cc_sha1_init, sram_buff_ofs, - ARRAY_SIZE(cc_sha1_init), larval_seq, - &larval_seq_len); - rc = send_request_init(drvdata, larval_seq, larval_seq_len); + rc = cc_init_copy_sram(drvdata, cc_sha1_init, sizeof(cc_sha1_init), + &sram_buff_ofs); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(cc_sha1_init); - larval_seq_len = 0; - cc_set_sram_desc(cc_sha224_init, sram_buff_ofs, - ARRAY_SIZE(cc_sha224_init), larval_seq, - &larval_seq_len); - rc = send_request_init(drvdata, larval_seq, larval_seq_len); + rc = cc_init_copy_sram(drvdata, cc_sha224_init, sizeof(cc_sha224_init), + &sram_buff_ofs); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(cc_sha224_init); - larval_seq_len = 0; - cc_set_sram_desc(cc_sha256_init, sram_buff_ofs, - ARRAY_SIZE(cc_sha256_init), larval_seq, - &larval_seq_len); - rc = send_request_init(drvdata, larval_seq, larval_seq_len); + rc = cc_init_copy_sram(drvdata, cc_sha256_init, sizeof(cc_sha256_init), + &sram_buff_ofs); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(cc_sha256_init); - larval_seq_len = 0; if (sm3_supported) { - cc_set_sram_desc(cc_sm3_init, sram_buff_ofs, - ARRAY_SIZE(cc_sm3_init), larval_seq, - &larval_seq_len); - rc = send_request_init(drvdata, larval_seq, larval_seq_len); + rc = cc_init_copy_sram(drvdata, cc_sm3_init, + sizeof(cc_sm3_init), &sram_buff_ofs); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(cc_sm3_init); - larval_seq_len = 0; } if (large_sha_supported) { - cc_set_sram_desc((u32 *)cc_sha384_init, sram_buff_ofs, - (ARRAY_SIZE(cc_sha384_init) * 2), larval_seq, - &larval_seq_len); - rc = send_request_init(drvdata, larval_seq, larval_seq_len); + rc = cc_init_copy_sram(drvdata, cc_sha384_init, + sizeof(cc_sha384_init), &sram_buff_ofs); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(cc_sha384_init); - larval_seq_len = 0; - cc_set_sram_desc((u32 *)cc_sha512_init, sram_buff_ofs, - (ARRAY_SIZE(cc_sha512_init) * 2), larval_seq, - &larval_seq_len); - rc = send_request_init(drvdata, larval_seq, larval_seq_len); + rc = cc_init_copy_sram(drvdata, cc_sha512_init, + sizeof(cc_sha512_init), &sram_buff_ofs); if (rc) goto init_digest_const_err; } @@ -1963,38 +1949,16 @@ init_digest_const_err: return rc; } -static void __init cc_swap_dwords(u32 *buf, unsigned long size) -{ - int i; - u32 tmp; - - for (i = 0; i < size; i += 2) { - tmp = buf[i]; - buf[i] = buf[i + 1]; - buf[i + 1] = tmp; - } -} - -/* - * Due to the way the HW works we need to swap every - * double word in the SHA384 and SHA512 larval hashes - */ -void __init cc_hash_global_init(void) -{ - cc_swap_dwords((u32 *)&cc_sha384_init, (ARRAY_SIZE(cc_sha384_init) * 2)); - cc_swap_dwords((u32 *)&cc_sha512_init, (ARRAY_SIZE(cc_sha512_init) * 2)); -} - int cc_hash_alloc(struct cc_drvdata *drvdata) { struct cc_hash_handle *hash_handle; - cc_sram_addr_t sram_buff; + u32 sram_buff; u32 sram_size_to_alloc; struct device *dev = drvdata_to_dev(drvdata); int rc = 0; int alg; - hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL); + hash_handle = devm_kzalloc(dev, sizeof(*hash_handle), GFP_KERNEL); if (!hash_handle) return -ENOMEM; @@ -2016,7 +1980,6 @@ int cc_hash_alloc(struct cc_drvdata *drvdata) sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc); if (sram_buff == NULL_SRAM_ADDR) { - dev_err(dev, "SRAM pool exhausted\n"); rc = -ENOMEM; goto fail; } @@ -2056,12 +2019,10 @@ int cc_hash_alloc(struct cc_drvdata *drvdata) if (rc) { dev_err(dev, "%s alg registration failed\n", driver_hash[alg].driver_name); - kfree(t_alg); goto fail; - } else { - list_add_tail(&t_alg->entry, - &hash_handle->hash_list); } + + list_add_tail(&t_alg->entry, &hash_handle->hash_list); } if (hw_mode == DRV_CIPHER_XCBC_MAC || hw_mode == DRV_CIPHER_CMAC) @@ -2081,18 +2042,16 @@ int cc_hash_alloc(struct cc_drvdata *drvdata) if (rc) { dev_err(dev, "%s alg registration failed\n", driver_hash[alg].driver_name); - kfree(t_alg); goto fail; - } else { - list_add_tail(&t_alg->entry, &hash_handle->hash_list); } + + list_add_tail(&t_alg->entry, &hash_handle->hash_list); } return 0; fail: - kfree(drvdata->hash_handle); - drvdata->hash_handle = NULL; + cc_hash_free(drvdata); return rc; } @@ -2101,17 +2060,12 @@ int cc_hash_free(struct cc_drvdata *drvdata) struct cc_hash_alg *t_hash_alg, *hash_n; struct cc_hash_handle *hash_handle = drvdata->hash_handle; - if (hash_handle) { - list_for_each_entry_safe(t_hash_alg, hash_n, - &hash_handle->hash_list, entry) { - crypto_unregister_ahash(&t_hash_alg->ahash_alg); - list_del(&t_hash_alg->entry); - kfree(t_hash_alg); - } - - kfree(hash_handle); - drvdata->hash_handle = NULL; + list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, + entry) { + crypto_unregister_ahash(&t_hash_alg->ahash_alg); + list_del(&t_hash_alg->entry); } + return 0; } @@ -2272,22 +2226,23 @@ static const void *cc_larval_digest(struct device *dev, u32 mode) } } -/*! - * Gets the address of the initial digest in SRAM +/** + * cc_larval_digest_addr() - Get the address of the initial digest in SRAM * according to the given hash mode * - * \param drvdata - * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256 + * @drvdata: Associated device driver context + * @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256 * - * \return u32 The address of the initial digest in SRAM + * Return: + * The address of the initial digest in SRAM */ -cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode) +u32 cc_larval_digest_addr(void *drvdata, u32 mode) { struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata; struct cc_hash_handle *hash_handle = _drvdata->hash_handle; struct device *dev = drvdata_to_dev(_drvdata); bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713); - cc_sram_addr_t addr; + u32 addr; switch (mode) { case DRV_HASH_NULL: @@ -2339,12 +2294,11 @@ cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode) return hash_handle->larval_digest_sram_addr; } -cc_sram_addr_t -cc_digest_len_addr(void *drvdata, u32 mode) +u32 cc_digest_len_addr(void *drvdata, u32 mode) { struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata; struct cc_hash_handle *hash_handle = _drvdata->hash_handle; - cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr; + u32 digest_len_addr = hash_handle->digest_len_sram_addr; switch (mode) { case DRV_HASH_SHA1: diff --git a/drivers/crypto/ccree/cc_hash.h b/drivers/crypto/ccree/cc_hash.h index 0d6dc61484d7..3d0f2179e07e 100644 --- a/drivers/crypto/ccree/cc_hash.h +++ b/drivers/crypto/ccree/cc_hash.h @@ -80,30 +80,27 @@ int cc_hash_alloc(struct cc_drvdata *drvdata); int cc_init_hash_sram(struct cc_drvdata *drvdata); int cc_hash_free(struct cc_drvdata *drvdata); -/*! - * Gets the initial digest length +/** + * cc_digest_len_addr() - Gets the initial digest length * - * \param drvdata - * \param mode The Hash mode. Supported modes: - * MD5/SHA1/SHA224/SHA256/SHA384/SHA512 + * @drvdata: Associated device driver context + * @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512 * - * \return u32 returns the address of the initial digest length in SRAM + * Return: + * Returns the address of the initial digest length in SRAM */ -cc_sram_addr_t -cc_digest_len_addr(void *drvdata, u32 mode); +u32 cc_digest_len_addr(void *drvdata, u32 mode); -/*! - * Gets the address of the initial digest in SRAM +/** + * cc_larval_digest_addr() - Gets the address of the initial digest in SRAM * according to the given hash mode * - * \param drvdata - * \param mode The Hash mode. Supported modes: - * MD5/SHA1/SHA224/SHA256/SHA384/SHA512 + * @drvdata: Associated device driver context + * @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512 * - * \return u32 The address of the initial digest in SRAM + * Return: + * The address of the initial digest in SRAM */ -cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode); - -void cc_hash_global_init(void); +u32 cc_larval_digest_addr(void *drvdata, u32 mode); #endif /*__CC_HASH_H__*/ diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h index 9f4db9956e91..15df58c66911 100644 --- a/drivers/crypto/ccree/cc_hw_queue_defs.h +++ b/drivers/crypto/ccree/cc_hw_queue_defs.h @@ -17,46 +17,43 @@ /* Define max. available slots in HW queue */ #define HW_QUEUE_SLOTS_MAX 15 -#define CC_REG_LOW(word, name) \ - (CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SHIFT) - -#define CC_REG_HIGH(word, name) \ - (CC_REG_LOW(word, name) + \ - CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SIZE - 1) - -#define CC_GENMASK(word, name) \ - GENMASK(CC_REG_HIGH(word, name), CC_REG_LOW(word, name)) - -#define WORD0_VALUE CC_GENMASK(0, VALUE) -#define WORD0_CPP_CIPHER_MODE CC_GENMASK(0, CPP_CIPHER_MODE) -#define WORD1_DIN_CONST_VALUE CC_GENMASK(1, DIN_CONST_VALUE) -#define WORD1_DIN_DMA_MODE CC_GENMASK(1, DIN_DMA_MODE) -#define WORD1_DIN_SIZE CC_GENMASK(1, DIN_SIZE) -#define WORD1_NOT_LAST CC_GENMASK(1, NOT_LAST) -#define WORD1_NS_BIT CC_GENMASK(1, NS_BIT) -#define WORD1_LOCK_QUEUE CC_GENMASK(1, LOCK_QUEUE) -#define WORD2_VALUE CC_GENMASK(2, VALUE) -#define WORD3_DOUT_DMA_MODE CC_GENMASK(3, DOUT_DMA_MODE) -#define WORD3_DOUT_LAST_IND CC_GENMASK(3, DOUT_LAST_IND) -#define WORD3_DOUT_SIZE CC_GENMASK(3, DOUT_SIZE) -#define WORD3_HASH_XOR_BIT CC_GENMASK(3, HASH_XOR_BIT) -#define WORD3_NS_BIT CC_GENMASK(3, NS_BIT) -#define WORD3_QUEUE_LAST_IND CC_GENMASK(3, QUEUE_LAST_IND) -#define WORD4_ACK_NEEDED CC_GENMASK(4, ACK_NEEDED) -#define WORD4_AES_SEL_N_HASH CC_GENMASK(4, AES_SEL_N_HASH) -#define WORD4_AES_XOR_CRYPTO_KEY CC_GENMASK(4, AES_XOR_CRYPTO_KEY) -#define WORD4_BYTES_SWAP CC_GENMASK(4, BYTES_SWAP) -#define WORD4_CIPHER_CONF0 CC_GENMASK(4, CIPHER_CONF0) -#define WORD4_CIPHER_CONF1 CC_GENMASK(4, CIPHER_CONF1) -#define WORD4_CIPHER_CONF2 CC_GENMASK(4, CIPHER_CONF2) -#define WORD4_CIPHER_DO CC_GENMASK(4, CIPHER_DO) -#define WORD4_CIPHER_MODE CC_GENMASK(4, CIPHER_MODE) -#define WORD4_CMAC_SIZE0 CC_GENMASK(4, CMAC_SIZE0) -#define WORD4_DATA_FLOW_MODE CC_GENMASK(4, DATA_FLOW_MODE) -#define WORD4_KEY_SIZE CC_GENMASK(4, KEY_SIZE) -#define WORD4_SETUP_OPERATION CC_GENMASK(4, SETUP_OPERATION) -#define WORD5_DIN_ADDR_HIGH CC_GENMASK(5, DIN_ADDR_HIGH) -#define WORD5_DOUT_ADDR_HIGH CC_GENMASK(5, DOUT_ADDR_HIGH) +#define CC_REG_LOW(name) (name ## _BIT_SHIFT) +#define CC_REG_HIGH(name) (CC_REG_LOW(name) + name ## _BIT_SIZE - 1) +#define CC_GENMASK(name) GENMASK(CC_REG_HIGH(name), CC_REG_LOW(name)) + +#define CC_HWQ_GENMASK(word, field) \ + CC_GENMASK(CC_DSCRPTR_QUEUE_WORD ## word ## _ ## field) + +#define WORD0_VALUE CC_HWQ_GENMASK(0, VALUE) +#define WORD0_CPP_CIPHER_MODE CC_HWQ_GENMASK(0, CPP_CIPHER_MODE) +#define WORD1_DIN_CONST_VALUE CC_HWQ_GENMASK(1, DIN_CONST_VALUE) +#define WORD1_DIN_DMA_MODE CC_HWQ_GENMASK(1, DIN_DMA_MODE) +#define WORD1_DIN_SIZE CC_HWQ_GENMASK(1, DIN_SIZE) +#define WORD1_NOT_LAST CC_HWQ_GENMASK(1, NOT_LAST) +#define WORD1_NS_BIT CC_HWQ_GENMASK(1, NS_BIT) +#define WORD1_LOCK_QUEUE CC_HWQ_GENMASK(1, LOCK_QUEUE) +#define WORD2_VALUE CC_HWQ_GENMASK(2, VALUE) +#define WORD3_DOUT_DMA_MODE CC_HWQ_GENMASK(3, DOUT_DMA_MODE) +#define WORD3_DOUT_LAST_IND CC_HWQ_GENMASK(3, DOUT_LAST_IND) +#define WORD3_DOUT_SIZE CC_HWQ_GENMASK(3, DOUT_SIZE) +#define WORD3_HASH_XOR_BIT CC_HWQ_GENMASK(3, HASH_XOR_BIT) +#define WORD3_NS_BIT CC_HWQ_GENMASK(3, NS_BIT) +#define WORD3_QUEUE_LAST_IND CC_HWQ_GENMASK(3, QUEUE_LAST_IND) +#define WORD4_ACK_NEEDED CC_HWQ_GENMASK(4, ACK_NEEDED) +#define WORD4_AES_SEL_N_HASH CC_HWQ_GENMASK(4, AES_SEL_N_HASH) +#define WORD4_AES_XOR_CRYPTO_KEY CC_HWQ_GENMASK(4, AES_XOR_CRYPTO_KEY) +#define WORD4_BYTES_SWAP CC_HWQ_GENMASK(4, BYTES_SWAP) +#define WORD4_CIPHER_CONF0 CC_HWQ_GENMASK(4, CIPHER_CONF0) +#define WORD4_CIPHER_CONF1 CC_HWQ_GENMASK(4, CIPHER_CONF1) +#define WORD4_CIPHER_CONF2 CC_HWQ_GENMASK(4, CIPHER_CONF2) +#define WORD4_CIPHER_DO CC_HWQ_GENMASK(4, CIPHER_DO) +#define WORD4_CIPHER_MODE CC_HWQ_GENMASK(4, CIPHER_MODE) +#define WORD4_CMAC_SIZE0 CC_HWQ_GENMASK(4, CMAC_SIZE0) +#define WORD4_DATA_FLOW_MODE CC_HWQ_GENMASK(4, DATA_FLOW_MODE) +#define WORD4_KEY_SIZE CC_HWQ_GENMASK(4, KEY_SIZE) +#define WORD4_SETUP_OPERATION CC_HWQ_GENMASK(4, SETUP_OPERATION) +#define WORD5_DIN_ADDR_HIGH CC_HWQ_GENMASK(5, DIN_ADDR_HIGH) +#define WORD5_DOUT_ADDR_HIGH CC_HWQ_GENMASK(5, DOUT_ADDR_HIGH) /****************************************************************************** * TYPE DEFINITIONS @@ -207,31 +204,32 @@ enum cc_hash_cipher_pad { /* Descriptor packing macros */ /*****************************/ -/* - * Init a HW descriptor struct - * @pdesc: pointer HW descriptor struct +/** + * hw_desc_init() - Init a HW descriptor struct + * @pdesc: pointer to HW descriptor struct */ static inline void hw_desc_init(struct cc_hw_desc *pdesc) { memset(pdesc, 0, sizeof(struct cc_hw_desc)); } -/* - * Indicates the end of current HW descriptors flow and release the HW engines. +/** + * set_queue_last_ind_bit() - Indicate the end of current HW descriptors flow + * and release the HW engines. * - * @pdesc: pointer HW descriptor struct + * @pdesc: Pointer to HW descriptor struct */ static inline void set_queue_last_ind_bit(struct cc_hw_desc *pdesc) { pdesc->word[3] |= FIELD_PREP(WORD3_QUEUE_LAST_IND, 1); } -/* - * Set the DIN field of a HW descriptors +/** + * set_din_type() - Set the DIN field of a HW descriptor * - * @pdesc: pointer HW descriptor struct - * @dma_mode: dmaMode The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT - * @addr: dinAdr DIN address + * @pdesc: Pointer to HW descriptor struct + * @dma_mode: The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT + * @addr: DIN address * @size: Data size in bytes * @axi_sec: AXI secure bit */ @@ -239,20 +237,20 @@ static inline void set_din_type(struct cc_hw_desc *pdesc, enum cc_dma_mode dma_mode, dma_addr_t addr, u32 size, enum cc_axi_sec axi_sec) { - pdesc->word[0] = (u32)addr; + pdesc->word[0] = lower_32_bits(addr); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - pdesc->word[5] |= FIELD_PREP(WORD5_DIN_ADDR_HIGH, ((u16)(addr >> 32))); + pdesc->word[5] |= FIELD_PREP(WORD5_DIN_ADDR_HIGH, upper_32_bits(addr)); #endif pdesc->word[1] |= FIELD_PREP(WORD1_DIN_DMA_MODE, dma_mode) | FIELD_PREP(WORD1_DIN_SIZE, size) | FIELD_PREP(WORD1_NS_BIT, axi_sec); } -/* - * Set the DIN field of a HW descriptors to NO DMA mode. +/** + * set_din_no_dma() - Set the DIN field of a HW descriptor to NO DMA mode. * Used for NOP descriptor, register patches and other special modes. * - * @pdesc: pointer HW descriptor struct + * @pdesc: Pointer to HW descriptor struct * @addr: DIN address * @size: Data size in bytes */ @@ -262,14 +260,11 @@ static inline void set_din_no_dma(struct cc_hw_desc *pdesc, u32 addr, u32 size) pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size); } -/* - * Setup the special CPP descriptor +/** + * set_cpp_crypto_key() - Setup the special CPP descriptor * - * @pdesc: pointer HW descriptor struct - * @alg: cipher used (AES / SM4) - * @mode: mode used (CTR or CBC) - * @slot: slot number - * @ksize: key size + * @pdesc: Pointer to HW descriptor struct + * @slot: Slot number */ static inline void set_cpp_crypto_key(struct cc_hw_desc *pdesc, u8 slot) { @@ -281,27 +276,26 @@ static inline void set_cpp_crypto_key(struct cc_hw_desc *pdesc, u8 slot) pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, slot); } -/* - * Set the DIN field of a HW descriptors to SRAM mode. +/** + * set_din_sram() - Set the DIN field of a HW descriptor to SRAM mode. * Note: No need to check SRAM alignment since host requests do not use SRAM and - * adaptor will enforce alignment check. + * the adaptor will enforce alignment checks. * - * @pdesc: pointer HW descriptor struct + * @pdesc: Pointer to HW descriptor struct * @addr: DIN address - * @size Data size in bytes + * @size: Data size in bytes */ -static inline void set_din_sram(struct cc_hw_desc *pdesc, dma_addr_t addr, - u32 size) +static inline void set_din_sram(struct cc_hw_desc *pdesc, u32 addr, u32 size) { - pdesc->word[0] = (u32)addr; + pdesc->word[0] = addr; pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size) | FIELD_PREP(WORD1_DIN_DMA_MODE, DMA_SRAM); } -/* - * Set the DIN field of a HW descriptors to CONST mode +/** + * set_din_const() - Set the DIN field of a HW descriptor to CONST mode * - * @pdesc: pointer HW descriptor struct + * @pdesc: Pointer to HW descriptor struct * @val: DIN const value * @size: Data size in bytes */ @@ -313,20 +307,20 @@ static inline void set_din_const(struct cc_hw_desc *pdesc, u32 val, u32 size) FIELD_PREP(WORD1_DIN_SIZE, size); } -/* - * Set the DIN not last input data indicator +/** + * set_din_not_last_indication() - Set the DIN not last input data indicator * - * @pdesc: pointer HW descriptor struct + * @pdesc: Pointer to HW descriptor struct */ static inline void set_din_not_last_indication(struct cc_hw_desc *pdesc) { pdesc->word[1] |= FIELD_PREP(WORD1_NOT_LAST, 1); } -/* - * Set the DOUT field of a HW descriptors +/** + * set_dout_type() - Set the DOUT field of a HW descriptor * - * @pdesc: pointer HW descriptor struct + * @pdesc: Pointer to HW descriptor struct * @dma_mode: The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT * @addr: DOUT address * @size: Data size in bytes @@ -336,24 +330,24 @@ static inline void set_dout_type(struct cc_hw_desc *pdesc, enum cc_dma_mode dma_mode, dma_addr_t addr, u32 size, enum cc_axi_sec axi_sec) { - pdesc->word[2] = (u32)addr; + pdesc->word[2] = lower_32_bits(addr); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - pdesc->word[5] |= FIELD_PREP(WORD5_DOUT_ADDR_HIGH, ((u16)(addr >> 32))); + pdesc->word[5] |= FIELD_PREP(WORD5_DOUT_ADDR_HIGH, upper_32_bits(addr)); #endif pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_DMA_MODE, dma_mode) | FIELD_PREP(WORD3_DOUT_SIZE, size) | FIELD_PREP(WORD3_NS_BIT, axi_sec); } -/* - * Set the DOUT field of a HW descriptors to DLLI type +/** + * set_dout_dlli() - Set the DOUT field of a HW descriptor to DLLI type * The LAST INDICATION is provided by the user * - * @pdesc pointer HW descriptor struct + * @pdesc: Pointer to HW descriptor struct * @addr: DOUT address * @size: Data size in bytes - * @last_ind: The last indication bit * @axi_sec: AXI secure bit + * @last_ind: The last indication bit */ static inline void set_dout_dlli(struct cc_hw_desc *pdesc, dma_addr_t addr, u32 size, enum cc_axi_sec axi_sec, @@ -363,29 +357,28 @@ static inline void set_dout_dlli(struct cc_hw_desc *pdesc, dma_addr_t addr, pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind); } -/* - * Set the DOUT field of a HW descriptors to DLLI type +/** + * set_dout_mlli() - Set the DOUT field of a HW descriptor to MLLI type * The LAST INDICATION is provided by the user * - * @pdesc: pointer HW descriptor struct + * @pdesc: Pointer to HW descriptor struct * @addr: DOUT address * @size: Data size in bytes - * @last_ind: The last indication bit * @axi_sec: AXI secure bit + * @last_ind: The last indication bit */ -static inline void set_dout_mlli(struct cc_hw_desc *pdesc, dma_addr_t addr, - u32 size, enum cc_axi_sec axi_sec, - bool last_ind) +static inline void set_dout_mlli(struct cc_hw_desc *pdesc, u32 addr, u32 size, + enum cc_axi_sec axi_sec, bool last_ind) { set_dout_type(pdesc, DMA_MLLI, addr, size, axi_sec); pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind); } -/* - * Set the DOUT field of a HW descriptors to NO DMA mode. +/** + * set_dout_no_dma() - Set the DOUT field of a HW descriptor to NO DMA mode. * Used for NOP descriptor, register patches and other special modes. * - * @pdesc: pointer HW descriptor struct + * @pdesc: pointer to HW descriptor struct * @addr: DOUT address * @size: Data size in bytes * @write_enable: Enables a write operation to a register @@ -398,54 +391,55 @@ static inline void set_dout_no_dma(struct cc_hw_desc *pdesc, u32 addr, FIELD_PREP(WORD3_DOUT_LAST_IND, write_enable); } -/* - * Set the word for the XOR operation. +/** + * set_xor_val() - Set the word for the XOR operation. * - * @pdesc: pointer HW descriptor struct - * @val: xor data value + * @pdesc: Pointer to HW descriptor struct + * @val: XOR data value */ static inline void set_xor_val(struct cc_hw_desc *pdesc, u32 val) { pdesc->word[2] = val; } -/* - * Sets the XOR indicator bit in the descriptor +/** + * set_xor_active() - Set the XOR indicator bit in the descriptor * - * @pdesc: pointer HW descriptor struct + * @pdesc: Pointer to HW descriptor struct */ static inline void set_xor_active(struct cc_hw_desc *pdesc) { pdesc->word[3] |= FIELD_PREP(WORD3_HASH_XOR_BIT, 1); } -/* - * Select the AES engine instead of HASH engine when setting up combined mode - * with AES XCBC MAC +/** + * set_aes_not_hash_mode() - Select the AES engine instead of HASH engine when + * setting up combined mode with AES XCBC MAC * - * @pdesc: pointer HW descriptor struct + * @pdesc: Pointer to HW descriptor struct */ static inline void set_aes_not_hash_mode(struct cc_hw_desc *pdesc) { pdesc->word[4] |= FIELD_PREP(WORD4_AES_SEL_N_HASH, 1); } -/* - * Set aes xor crypto key, this in some secenrios select SM3 engine +/** + * set_aes_xor_crypto_key() - Set aes xor crypto key, which in some scenarios + * selects the SM3 engine * - * @pdesc: pointer HW descriptor struct + * @pdesc: Pointer to HW descriptor struct */ static inline void set_aes_xor_crypto_key(struct cc_hw_desc *pdesc) { pdesc->word[4] |= FIELD_PREP(WORD4_AES_XOR_CRYPTO_KEY, 1); } -/* - * Set the DOUT field of a HW descriptors to SRAM mode +/** + * set_dout_sram() - Set the DOUT field of a HW descriptor to SRAM mode * Note: No need to check SRAM alignment since host requests do not use SRAM and - * adaptor will enforce alignment check. + * the adaptor will enforce alignment checks. * - * @pdesc: pointer HW descriptor struct + * @pdesc: Pointer to HW descriptor struct * @addr: DOUT address * @size: Data size in bytes */ @@ -456,32 +450,34 @@ static inline void set_dout_sram(struct cc_hw_desc *pdesc, u32 addr, u32 size) FIELD_PREP(WORD3_DOUT_SIZE, size); } -/* - * Sets the data unit size for XEX mode in data_out_addr[15:0] +/** + * set_xex_data_unit_size() - Set the data unit size for XEX mode in + * data_out_addr[15:0] * - * @pdesc: pDesc pointer HW descriptor struct - * @size: data unit size for XEX mode + * @pdesc: Pointer to HW descriptor struct + * @size: Data unit size for XEX mode */ static inline void set_xex_data_unit_size(struct cc_hw_desc *pdesc, u32 size) { pdesc->word[2] = size; } -/* - * Set the number of rounds for Multi2 in data_out_addr[15:0] +/** + * set_multi2_num_rounds() - Set the number of rounds for Multi2 in + * data_out_addr[15:0] * - * @pdesc: pointer HW descriptor struct - * @num: number of rounds for Multi2 + * @pdesc: Pointer to HW descriptor struct + * @num: Number of rounds for Multi2 */ static inline void set_multi2_num_rounds(struct cc_hw_desc *pdesc, u32 num) { pdesc->word[2] = num; } -/* - * Set the flow mode. +/** + * set_flow_mode() - Set the flow mode. * - * @pdesc: pointer HW descriptor struct + * @pdesc: Pointer to HW descriptor struct * @mode: Any one of the modes defined in [CC7x-DESC] */ static inline void set_flow_mode(struct cc_hw_desc *pdesc, @@ -490,22 +486,22 @@ static inline void set_flow_mode(struct cc_hw_desc *pdesc, pdesc->word[4] |= FIELD_PREP(WORD4_DATA_FLOW_MODE, mode); } -/* - * Set the cipher mode. +/** + * set_cipher_mode() - Set the cipher mode. * - * @pdesc: pointer HW descriptor struct - * @mode: Any one of the modes defined in [CC7x-DESC] + * @pdesc: Pointer to HW descriptor struct + * @mode: Any one of the modes defined in [CC7x-DESC] */ static inline void set_cipher_mode(struct cc_hw_desc *pdesc, int mode) { pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode); } -/* - * Set the cipher mode for hash algorithms. +/** + * set_hash_cipher_mode() - Set the cipher mode for hash algorithms. * - * @pdesc: pointer HW descriptor struct - * @cipher_mode: Any one of the modes defined in [CC7x-DESC] + * @pdesc: Pointer to HW descriptor struct + * @cipher_mode: Any one of the modes defined in [CC7x-DESC] * @hash_mode: specifies which hash is being handled */ static inline void set_hash_cipher_mode(struct cc_hw_desc *pdesc, @@ -517,10 +513,10 @@ static inline void set_hash_cipher_mode(struct cc_hw_desc *pdesc, set_aes_xor_crypto_key(pdesc); } -/* - * Set the cipher configuration fields. +/** + * set_cipher_config0() - Set the cipher configuration fields. * - * @pdesc: pointer HW descriptor struct + * @pdesc: Pointer to HW descriptor struct * @mode: Any one of the modes defined in [CC7x-DESC] */ static inline void set_cipher_config0(struct cc_hw_desc *pdesc, int mode) @@ -528,11 +524,11 @@ static inline void set_cipher_config0(struct cc_hw_desc *pdesc, int mode) pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode); } -/* - * Set the cipher configuration fields. +/** + * set_cipher_config1() - Set the cipher configuration fields. * - * @pdesc: pointer HW descriptor struct - * @config: Any one of the modes defined in [CC7x-DESC] + * @pdesc: Pointer to HW descriptor struct + * @config: Padding mode */ static inline void set_cipher_config1(struct cc_hw_desc *pdesc, enum cc_hash_conf_pad config) @@ -540,10 +536,10 @@ static inline void set_cipher_config1(struct cc_hw_desc *pdesc, pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF1, config); } -/* - * Set HW key configuration fields. +/** + * set_hw_crypto_key() - Set HW key configuration fields. * - * @pdesc: pointer HW descriptor struct + * @pdesc: Pointer to HW descriptor struct * @hw_key: The HW key slot asdefined in enum cc_hw_crypto_key */ static inline void set_hw_crypto_key(struct cc_hw_desc *pdesc, @@ -555,64 +551,64 @@ static inline void set_hw_crypto_key(struct cc_hw_desc *pdesc, (hw_key >> HW_KEY_SHIFT_CIPHER_CFG2)); } -/* - * Set byte order of all setup-finalize descriptors. +/** + * set_bytes_swap() - Set byte order of all setup-finalize descriptors. * - * @pdesc: pointer HW descriptor struct - * @config: Any one of the modes defined in [CC7x-DESC] + * @pdesc: Pointer to HW descriptor struct + * @config: True to enable byte swapping */ static inline void set_bytes_swap(struct cc_hw_desc *pdesc, bool config) { pdesc->word[4] |= FIELD_PREP(WORD4_BYTES_SWAP, config); } -/* - * Set CMAC_SIZE0 mode. +/** + * set_cmac_size0_mode() - Set CMAC_SIZE0 mode. * - * @pdesc: pointer HW descriptor struct + * @pdesc: Pointer to HW descriptor struct */ static inline void set_cmac_size0_mode(struct cc_hw_desc *pdesc) { pdesc->word[4] |= FIELD_PREP(WORD4_CMAC_SIZE0, 1); } -/* - * Set key size descriptor field. +/** + * set_key_size() - Set key size descriptor field. * - * @pdesc: pointer HW descriptor struct - * @size: key size in bytes (NOT size code) + * @pdesc: Pointer to HW descriptor struct + * @size: Key size in bytes (NOT size code) */ static inline void set_key_size(struct cc_hw_desc *pdesc, u32 size) { pdesc->word[4] |= FIELD_PREP(WORD4_KEY_SIZE, size); } -/* - * Set AES key size. +/** + * set_key_size_aes() - Set AES key size. * - * @pdesc: pointer HW descriptor struct - * @size: key size in bytes (NOT size code) + * @pdesc: Pointer to HW descriptor struct + * @size: Key size in bytes (NOT size code) */ static inline void set_key_size_aes(struct cc_hw_desc *pdesc, u32 size) { set_key_size(pdesc, ((size >> 3) - 2)); } -/* - * Set DES key size. +/** + * set_key_size_des() - Set DES key size. * - * @pdesc: pointer HW descriptor struct - * @size: key size in bytes (NOT size code) + * @pdesc: Pointer to HW descriptor struct + * @size: Key size in bytes (NOT size code) */ static inline void set_key_size_des(struct cc_hw_desc *pdesc, u32 size) { set_key_size(pdesc, ((size >> 3) - 1)); } -/* - * Set the descriptor setup mode +/** + * set_setup_mode() - Set the descriptor setup mode * - * @pdesc: pointer HW descriptor struct + * @pdesc: Pointer to HW descriptor struct * @mode: Any one of the setup modes defined in [CC7x-DESC] */ static inline void set_setup_mode(struct cc_hw_desc *pdesc, @@ -621,10 +617,10 @@ static inline void set_setup_mode(struct cc_hw_desc *pdesc, pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, mode); } -/* - * Set the descriptor cipher DO +/** + * set_cipher_do() - Set the descriptor cipher DO * - * @pdesc: pointer HW descriptor struct + * @pdesc: Pointer to HW descriptor struct * @config: Any one of the cipher do defined in [CC7x-DESC] */ static inline void set_cipher_do(struct cc_hw_desc *pdesc, diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c index 24c368b866f6..d39e1664fc7e 100644 --- a/drivers/crypto/ccree/cc_pm.c +++ b/drivers/crypto/ccree/cc_pm.c @@ -15,29 +15,25 @@ #define POWER_DOWN_ENABLE 0x01 #define POWER_DOWN_DISABLE 0x00 -const struct dev_pm_ops ccree_pm = { - SET_RUNTIME_PM_OPS(cc_pm_suspend, cc_pm_resume, NULL) -}; - -int cc_pm_suspend(struct device *dev) +static int cc_pm_suspend(struct device *dev) { struct cc_drvdata *drvdata = dev_get_drvdata(dev); dev_dbg(dev, "set HOST_POWER_DOWN_EN\n"); fini_cc_regs(drvdata); cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE); - cc_clk_off(drvdata); + clk_disable_unprepare(drvdata->clk); return 0; } -int cc_pm_resume(struct device *dev) +static int cc_pm_resume(struct device *dev) { int rc; struct cc_drvdata *drvdata = dev_get_drvdata(dev); dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n"); /* Enables the device source clk */ - rc = cc_clk_on(drvdata); + rc = clk_prepare_enable(drvdata->clk); if (rc) { dev_err(dev, "failed getting clock back on. We're toast.\n"); return rc; @@ -62,53 +58,19 @@ int cc_pm_resume(struct device *dev) return 0; } +const struct dev_pm_ops ccree_pm = { + SET_RUNTIME_PM_OPS(cc_pm_suspend, cc_pm_resume, NULL) +}; + int cc_pm_get(struct device *dev) { - int rc = 0; - struct cc_drvdata *drvdata = dev_get_drvdata(dev); - - if (drvdata->pm_on) - rc = pm_runtime_get_sync(dev); + int rc = pm_runtime_get_sync(dev); return (rc == 1 ? 0 : rc); } void cc_pm_put_suspend(struct device *dev) { - struct cc_drvdata *drvdata = dev_get_drvdata(dev); - - if (drvdata->pm_on) { - pm_runtime_mark_last_busy(dev); - pm_runtime_put_autosuspend(dev); - } -} - -bool cc_pm_is_dev_suspended(struct device *dev) -{ - /* check device state using runtime api */ - return pm_runtime_suspended(dev); -} - -int cc_pm_init(struct cc_drvdata *drvdata) -{ - struct device *dev = drvdata_to_dev(drvdata); - - /* must be before the enabling to avoid redundant suspending */ - pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT); - pm_runtime_use_autosuspend(dev); - /* set us as active - note we won't do PM ops until cc_pm_go()! */ - return pm_runtime_set_active(dev); -} - -/* enable the PM module*/ -void cc_pm_go(struct cc_drvdata *drvdata) -{ - pm_runtime_enable(drvdata_to_dev(drvdata)); - drvdata->pm_on = true; -} - -void cc_pm_fini(struct cc_drvdata *drvdata) -{ - pm_runtime_disable(drvdata_to_dev(drvdata)); - drvdata->pm_on = false; + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); } diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h index 80a18e11cae4..50cac33de118 100644 --- a/drivers/crypto/ccree/cc_pm.h +++ b/drivers/crypto/ccree/cc_pm.h @@ -15,26 +15,11 @@ extern const struct dev_pm_ops ccree_pm; -int cc_pm_init(struct cc_drvdata *drvdata); -void cc_pm_go(struct cc_drvdata *drvdata); -void cc_pm_fini(struct cc_drvdata *drvdata); -int cc_pm_suspend(struct device *dev); -int cc_pm_resume(struct device *dev); int cc_pm_get(struct device *dev); void cc_pm_put_suspend(struct device *dev); -bool cc_pm_is_dev_suspended(struct device *dev); #else -static inline int cc_pm_init(struct cc_drvdata *drvdata) -{ - return 0; -} - -static inline void cc_pm_go(struct cc_drvdata *drvdata) {} - -static inline void cc_pm_fini(struct cc_drvdata *drvdata) {} - static inline int cc_pm_get(struct device *dev) { return 0; @@ -42,12 +27,6 @@ static inline int cc_pm_get(struct device *dev) static inline void cc_pm_put_suspend(struct device *dev) {} -static inline bool cc_pm_is_dev_suspended(struct device *dev) -{ - /* if PM not supported device is never suspend */ - return false; -} - #endif #endif /*__POWER_MGR_H__*/ diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c index 9d61e6f12478..1d7649ecf44e 100644 --- a/drivers/crypto/ccree/cc_request_mgr.c +++ b/drivers/crypto/ccree/cc_request_mgr.c @@ -206,12 +206,13 @@ static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[], } } -/*! - * Completion will take place if and only if user requested completion - * by cc_send_sync_request(). +/** + * request_mgr_complete() - Completion will take place if and only if user + * requested completion by cc_send_sync_request(). * - * \param dev - * \param dx_compl_h The completion event to signal + * @dev: Device pointer + * @dx_compl_h: The completion event to signal + * @dummy: unused error code */ static void request_mgr_complete(struct device *dev, void *dx_compl_h, int dummy) @@ -264,15 +265,15 @@ static int cc_queues_status(struct cc_drvdata *drvdata, return -ENOSPC; } -/*! - * Enqueue caller request to crypto hardware. +/** + * cc_do_send_request() - Enqueue caller request to crypto hardware. * Need to be called with HW lock held and PM running * - * \param drvdata - * \param cc_req The request to enqueue - * \param desc The crypto sequence - * \param len The crypto sequence length - * \param add_comp If "true": add an artificial dout DMA to mark completion + * @drvdata: Associated device driver context + * @cc_req: The request to enqueue + * @desc: The crypto sequence + * @len: The crypto sequence length + * @add_comp: If "true": add an artificial dout DMA to mark completion * */ static void cc_do_send_request(struct cc_drvdata *drvdata, @@ -295,7 +296,6 @@ static void cc_do_send_request(struct cc_drvdata *drvdata, req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req; req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1); - /* TODO: Use circ_buf.h ? */ dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head); @@ -377,7 +377,7 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata) rc = cc_queues_status(drvdata, mgr, bli->len); if (rc) { /* - * There is still not room in the FIFO for + * There is still no room in the FIFO for * this request. Bail out. We'll return here * on the next completion irq. */ @@ -476,10 +476,6 @@ int cc_send_sync_request(struct cc_drvdata *drvdata, break; spin_unlock_bh(&mgr->hw_lock); - if (rc != -EAGAIN) { - cc_pm_put_suspend(dev); - return rc; - } wait_for_completion_interruptible(&drvdata->hw_queue_avail); reinit_completion(&drvdata->hw_queue_avail); } @@ -490,16 +486,18 @@ int cc_send_sync_request(struct cc_drvdata *drvdata, return 0; } -/*! - * Enqueue caller request to crypto hardware during init process. - * assume this function is not called in middle of a flow, +/** + * send_request_init() - Enqueue caller request to crypto hardware during init + * process. + * Assume this function is not called in the middle of a flow, * since we set QUEUE_LAST_IND flag in the last descriptor. * - * \param drvdata - * \param desc The crypto sequence - * \param len The crypto sequence length + * @drvdata: Associated device driver context + * @desc: The crypto sequence + * @len: The crypto sequence length * - * \return int Returns "0" upon success + * Return: + * Returns "0" upon success */ int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc, unsigned int len) diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h index ff7746aaaf35..ae25ca843dce 100644 --- a/drivers/crypto/ccree/cc_request_mgr.h +++ b/drivers/crypto/ccree/cc_request_mgr.h @@ -12,18 +12,17 @@ int cc_req_mgr_init(struct cc_drvdata *drvdata); -/*! - * Enqueue caller request to crypto hardware. +/** + * cc_send_request() - Enqueue caller request to crypto hardware. * - * \param drvdata - * \param cc_req The request to enqueue - * \param desc The crypto sequence - * \param len The crypto sequence length - * \param is_dout If "true": completion is handled by the caller - * If "false": this function adds a dummy descriptor completion - * and waits upon completion signal. + * @drvdata: Associated device driver context + * @cc_req: The request to enqueue + * @desc: The crypto sequence + * @len: The crypto sequence length + * @req: Asynchronous crypto request * - * \return int Returns -EINPROGRESS or error + * Return: + * Returns -EINPROGRESS or error */ int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req, struct cc_hw_desc *desc, unsigned int len, diff --git a/drivers/crypto/ccree/cc_sram_mgr.c b/drivers/crypto/ccree/cc_sram_mgr.c index 62c885e6e791..37a95856361f 100644 --- a/drivers/crypto/ccree/cc_sram_mgr.c +++ b/drivers/crypto/ccree/cc_sram_mgr.c @@ -5,88 +5,61 @@ #include "cc_sram_mgr.h" /** - * struct cc_sram_ctx -Internal RAM context manager - * @sram_free_offset: the offset to the non-allocated area - */ -struct cc_sram_ctx { - cc_sram_addr_t sram_free_offset; -}; - -/** - * cc_sram_mgr_fini() - Cleanup SRAM pool. - * - * @drvdata: Associated device driver context - */ -void cc_sram_mgr_fini(struct cc_drvdata *drvdata) -{ - /* Nothing needed */ -} - -/** * cc_sram_mgr_init() - Initializes SRAM pool. * The pool starts right at the beginning of SRAM. * Returns zero for success, negative value otherwise. * * @drvdata: Associated device driver context + * + * Return: + * 0 for success, negative error code for failure. */ int cc_sram_mgr_init(struct cc_drvdata *drvdata) { - struct cc_sram_ctx *ctx; - dma_addr_t start = 0; + u32 start = 0; struct device *dev = drvdata_to_dev(drvdata); if (drvdata->hw_rev < CC_HW_REV_712) { /* Pool starts after ROM bytes */ - start = (dma_addr_t)cc_ioread(drvdata, - CC_REG(HOST_SEP_SRAM_THRESHOLD)); - + start = cc_ioread(drvdata, CC_REG(HOST_SEP_SRAM_THRESHOLD)); if ((start & 0x3) != 0) { - dev_err(dev, "Invalid SRAM offset %pad\n", &start); + dev_err(dev, "Invalid SRAM offset 0x%x\n", start); return -EINVAL; } } - /* Allocate "this" context */ - ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); - - if (!ctx) - return -ENOMEM; - - ctx->sram_free_offset = start; - drvdata->sram_mgr_handle = ctx; - + drvdata->sram_free_offset = start; return 0; } -/*! - * Allocated buffer from SRAM pool. - * Note: Caller is responsible to free the LAST allocated buffer. - * This function does not taking care of any fragmentation may occur - * by the order of calls to alloc/free. +/** + * cc_sram_alloc() - Allocate buffer from SRAM pool. + * + * @drvdata: Associated device driver context + * @size: The requested numer of bytes to allocate * - * \param drvdata - * \param size The requested bytes to allocate + * Return: + * Address offset in SRAM or NULL_SRAM_ADDR for failure. */ -cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size) +u32 cc_sram_alloc(struct cc_drvdata *drvdata, u32 size) { - struct cc_sram_ctx *smgr_ctx = drvdata->sram_mgr_handle; struct device *dev = drvdata_to_dev(drvdata); - cc_sram_addr_t p; + u32 p; if ((size & 0x3)) { dev_err(dev, "Requested buffer size (%u) is not multiple of 4", size); return NULL_SRAM_ADDR; } - if (size > (CC_CC_SRAM_SIZE - smgr_ctx->sram_free_offset)) { - dev_err(dev, "Not enough space to allocate %u B (at offset %llu)\n", - size, smgr_ctx->sram_free_offset); + if (size > (CC_CC_SRAM_SIZE - drvdata->sram_free_offset)) { + dev_err(dev, "Not enough space to allocate %u B (at offset %u)\n", + size, drvdata->sram_free_offset); return NULL_SRAM_ADDR; } - p = smgr_ctx->sram_free_offset; - smgr_ctx->sram_free_offset += size; - dev_dbg(dev, "Allocated %u B @ %u\n", size, (unsigned int)p); + p = drvdata->sram_free_offset; + drvdata->sram_free_offset += size; + dev_dbg(dev, "Allocated %u B @ %u\n", size, p); return p; } @@ -97,13 +70,12 @@ cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size) * * @src: A pointer to array of words to set as consts. * @dst: The target SRAM buffer to set into - * @nelements: The number of words in "src" array + * @nelement: The number of words in "src" array * @seq: A pointer to the given IN/OUT descriptor sequence * @seq_len: A pointer to the given IN/OUT sequence length */ -void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst, - unsigned int nelement, struct cc_hw_desc *seq, - unsigned int *seq_len) +void cc_set_sram_desc(const u32 *src, u32 dst, unsigned int nelement, + struct cc_hw_desc *seq, unsigned int *seq_len) { u32 i; unsigned int idx = *seq_len; diff --git a/drivers/crypto/ccree/cc_sram_mgr.h b/drivers/crypto/ccree/cc_sram_mgr.h index 1d14de9ee8c3..1c965ef83002 100644 --- a/drivers/crypto/ccree/cc_sram_mgr.h +++ b/drivers/crypto/ccree/cc_sram_mgr.h @@ -10,42 +10,30 @@ struct cc_drvdata; -/** - * Address (offset) within CC internal SRAM - */ - -typedef u64 cc_sram_addr_t; - -#define NULL_SRAM_ADDR ((cc_sram_addr_t)-1) +#define NULL_SRAM_ADDR ((u32)-1) -/*! - * Initializes SRAM pool. +/** + * cc_sram_mgr_init() - Initializes SRAM pool. * The first X bytes of SRAM are reserved for ROM usage, hence, pool * starts right after X bytes. * - * \param drvdata + * @drvdata: Associated device driver context * - * \return int Zero for success, negative value otherwise. + * Return: + * Zero for success, negative value otherwise. */ int cc_sram_mgr_init(struct cc_drvdata *drvdata); -/*! - * Uninits SRAM pool. +/** + * cc_sram_alloc() - Allocate buffer from SRAM pool. * - * \param drvdata - */ -void cc_sram_mgr_fini(struct cc_drvdata *drvdata); - -/*! - * Allocated buffer from SRAM pool. - * Note: Caller is responsible to free the LAST allocated buffer. - * This function does not taking care of any fragmentation may occur - * by the order of calls to alloc/free. + * @drvdata: Associated device driver context + * @size: The requested bytes to allocate * - * \param drvdata - * \param size The requested bytes to allocate + * Return: + * Address offset in SRAM or NULL_SRAM_ADDR for failure. */ -cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size); +u32 cc_sram_alloc(struct cc_drvdata *drvdata, u32 size); /** * cc_set_sram_desc() - Create const descriptors sequence to @@ -54,12 +42,11 @@ cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size); * * @src: A pointer to array of words to set as consts. * @dst: The target SRAM buffer to set into - * @nelements: The number of words in "src" array + * @nelement: The number of words in "src" array * @seq: A pointer to the given IN/OUT descriptor sequence * @seq_len: A pointer to the given IN/OUT sequence length */ -void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst, - unsigned int nelement, struct cc_hw_desc *seq, - unsigned int *seq_len); +void cc_set_sram_desc(const u32 *src, u32 dst, unsigned int nelement, + struct cc_hw_desc *seq, unsigned int *seq_len); #endif /*__CC_SRAM_MGR_H__*/ |