diff options
Diffstat (limited to 'drivers/crypto')
25 files changed, 221 insertions, 133 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index a8c4ce07fc9d..caa98a7fe392 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -73,6 +73,17 @@ config ZCRYPT + Crypto Express 2,3,4 or 5 Accelerator (CEXxA) + Crypto Express 4 or 5 EP11 Coprocessor (CEXxP) +config ZCRYPT_MULTIDEVNODES + bool "Support for multiple zcrypt device nodes" + default y + depends on S390 + depends on ZCRYPT + help + With this option enabled the zcrypt device driver can + provide multiple devices nodes in /dev. Each device + node can get customized to limit access and narrow + down the use of the available crypto hardware. + config PKEY tristate "Kernel API for protected key handling" depends on S390 diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index d67667970f7e..ec40f991e6c6 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -1553,8 +1553,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; - edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + - desc_bytes; + edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc + + desc_bytes); edesc->iv_dir = DMA_TO_DEVICE; /* Make sure IV is located in a DMAable area */ @@ -1757,8 +1757,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; - edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + - desc_bytes; + edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc + + desc_bytes); edesc->iv_dir = DMA_FROM_DEVICE; /* Make sure IV is located in a DMAable area */ diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 6e61cc93c2b0..d7aa7d7ff102 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -679,10 +679,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, int ret = 0; if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { - crypto_ablkcipher_set_flags(ablkcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); dev_err(jrdev, "key size mismatch\n"); - return -EINVAL; + goto badkey; } ctx->cdata.keylen = keylen; @@ -715,7 +713,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, return ret; badkey: crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return 0; + return -EINVAL; } /* diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 578ea63a3109..f26d62e5533a 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); - dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); - dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); } static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, @@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); - dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); - dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); } /* RSA Job Completion handler */ @@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, goto unmap_p; } - pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, pdb->tmp1_dma)) { dev_err(dev, "Unable to map RSA tmp1 memory\n"); goto unmap_q; } - pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, pdb->tmp2_dma)) { dev_err(dev, "Unable to map RSA tmp2 memory\n"); goto unmap_tmp1; @@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, return 0; unmap_tmp1: - dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); unmap_q: dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); unmap_p: @@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, goto unmap_dq; } - pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, pdb->tmp1_dma)) { dev_err(dev, "Unable to map RSA tmp1 memory\n"); goto unmap_qinv; } - pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, pdb->tmp2_dma)) { dev_err(dev, "Unable to map RSA tmp2 memory\n"); goto unmap_tmp1; @@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, return 0; unmap_tmp1: - dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); unmap_qinv: dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); unmap_dq: diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index f4f258075b89..acdd72016ffe 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg) BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); /* Unmap just-run descriptor so we can post-process */ - dma_unmap_single(dev, jrp->outring[hw_idx].desc, + dma_unmap_single(dev, + caam_dma_to_cpu(jrp->outring[hw_idx].desc), jrp->entinfo[sw_idx].desc_size, DMA_TO_DEVICE); diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h index 9a476bb6d4c7..af596455b420 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_dev.h +++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h @@ -35,6 +35,7 @@ struct nitrox_cmdq { /* requests in backlog queues */ atomic_t backlog_count; + int write_idx; /* command size 32B/64B */ u8 instr_size; u8 qno; @@ -87,7 +88,7 @@ struct nitrox_bh { struct bh_data *slc; }; -/* NITROX-5 driver state */ +/* NITROX-V driver state */ #define NITROX_UCODE_LOADED 0 #define NITROX_READY 1 diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c index ebe267379ac9..4d31df07777f 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c @@ -36,6 +36,7 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq) cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); cmdq->qsize = (qsize + PKT_IN_ALIGN); + cmdq->write_idx = 0; spin_lock_init(&cmdq->response_lock); spin_lock_init(&cmdq->cmdq_lock); diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index deaefd532aaa..4a362fc22f62 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c @@ -42,6 +42,16 @@ * Invalid flag options in AES-CCM IV. */ +static inline int incr_index(int index, int count, int max) +{ + if ((index + count) >= max) + index = index + count - max; + else + index += count; + + return index; +} + /** * dma_free_sglist - unmap and free the sg lists. * @ndev: N5 device @@ -426,30 +436,29 @@ static void post_se_instr(struct nitrox_softreq *sr, struct nitrox_cmdq *cmdq) { struct nitrox_device *ndev = sr->ndev; - union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell; - u64 offset; + int idx; u8 *ent; spin_lock_bh(&cmdq->cmdq_lock); - /* get the next write offset */ - offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno); - pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset); + idx = cmdq->write_idx; /* copy the instruction */ - ent = cmdq->head + pkt_in_baoff_dbell.s.aoff; + ent = cmdq->head + (idx * cmdq->instr_size); memcpy(ent, &sr->instr, cmdq->instr_size); - /* flush the command queue updates */ - dma_wmb(); - sr->tstamp = jiffies; atomic_set(&sr->status, REQ_POSTED); response_list_add(sr, cmdq); + sr->tstamp = jiffies; + /* flush the command queue updates */ + dma_wmb(); /* Ring doorbell with count 1 */ writeq(1, cmdq->dbell_csr_addr); /* orders the doorbell rings */ mmiowb(); + cmdq->write_idx = incr_index(idx, 1, ndev->qlen); + spin_unlock_bh(&cmdq->cmdq_lock); } @@ -459,6 +468,9 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) struct nitrox_softreq *sr, *tmp; int ret = 0; + if (!atomic_read(&cmdq->backlog_count)) + return 0; + spin_lock_bh(&cmdq->backlog_lock); list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { @@ -466,7 +478,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) /* submit until space available */ if (unlikely(cmdq_full(cmdq, ndev->qlen))) { - ret = -EBUSY; + ret = -ENOSPC; break; } /* delete from backlog list */ @@ -491,23 +503,20 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr) { struct nitrox_cmdq *cmdq = sr->cmdq; struct nitrox_device *ndev = sr->ndev; - int ret = -EBUSY; + + /* try to post backlog requests */ + post_backlog_cmds(cmdq); if (unlikely(cmdq_full(cmdq, ndev->qlen))) { if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EAGAIN; - + return -ENOSPC; + /* add to backlog list */ backlog_list_add(sr, cmdq); - } else { - ret = post_backlog_cmds(cmdq); - if (ret) { - backlog_list_add(sr, cmdq); - return ret; - } - post_se_instr(sr, cmdq); - ret = -EINPROGRESS; + return -EBUSY; } - return ret; + post_se_instr(sr, cmdq); + + return -EINPROGRESS; } /** @@ -624,11 +633,9 @@ int nitrox_process_se_request(struct nitrox_device *ndev, */ sr->instr.fdata[0] = *((u64 *)&req->gph); sr->instr.fdata[1] = 0; - /* flush the soft_req changes before posting the cmd */ - wmb(); ret = nitrox_enqueue_request(sr); - if (ret == -EAGAIN) + if (ret == -ENOSPC) goto send_fail; return ret; diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 218739b961fe..72790d88236d 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -38,6 +38,17 @@ static DEFINE_MUTEX(sev_cmd_mutex); static struct sev_misc_dev *misc_dev; static struct psp_device *psp_master; +static int psp_cmd_timeout = 100; +module_param(psp_cmd_timeout, int, 0644); +MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands"); + +static int psp_probe_timeout = 5; +module_param(psp_probe_timeout, int, 0644); +MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe"); + +static bool psp_dead; +static int psp_timeout; + static struct psp_device *psp_alloc_struct(struct sp_device *sp) { struct device *dev = sp->dev; @@ -82,10 +93,19 @@ done: return IRQ_HANDLED; } -static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg) +static int sev_wait_cmd_ioc(struct psp_device *psp, + unsigned int *reg, unsigned int timeout) { - wait_event(psp->sev_int_queue, psp->sev_int_rcvd); + int ret; + + ret = wait_event_timeout(psp->sev_int_queue, + psp->sev_int_rcvd, timeout * HZ); + if (!ret) + return -ETIMEDOUT; + *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg); + + return 0; } static int sev_cmd_buffer_len(int cmd) @@ -133,12 +153,15 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) if (!psp) return -ENODEV; + if (psp_dead) + return -EBUSY; + /* Get the physical address of the command buffer */ phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; - dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x\n", - cmd, phys_msb, phys_lsb); + dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", + cmd, phys_msb, phys_lsb, psp_timeout); print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, sev_cmd_buffer_len(cmd), false); @@ -154,7 +177,18 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg); /* wait for command completion */ - sev_wait_cmd_ioc(psp, ®); + ret = sev_wait_cmd_ioc(psp, ®, psp_timeout); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(psp->dev, "sev command %#x timed out, disabling PSP \n", cmd); + psp_dead = true; + + return ret; + } + + psp_timeout = psp_cmd_timeout; if (psp_ret) *psp_ret = reg & PSP_CMDRESP_ERR_MASK; @@ -888,6 +922,8 @@ void psp_pci_init(void) psp_master = sp->psp_data; + psp_timeout = psp_probe_timeout; + if (sev_get_api_version()) goto err; diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 5c539af8ed60..010bbf607797 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -367,7 +367,8 @@ static inline void dsgl_walk_init(struct dsgl_walk *walk, walk->to = (struct phys_sge_pairs *)(dsgl + 1); } -static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid) +static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid, + int pci_chan_id) { struct cpl_rx_phys_dsgl *phys_cpl; @@ -385,6 +386,7 @@ static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid) phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; phys_cpl->rss_hdr_int.qid = htons(qid); phys_cpl->rss_hdr_int.hash_val = 0; + phys_cpl->rss_hdr_int.channel = pci_chan_id; } static inline void dsgl_walk_add_page(struct dsgl_walk *walk, @@ -718,7 +720,7 @@ static inline void create_wreq(struct chcr_context *ctx, FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid, !!lcb, ctx->tx_qidx); - chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id, + chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id, qid); chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) - ((sizeof(chcr_req->wreq)) >> 4))); @@ -1339,16 +1341,23 @@ static int chcr_device_init(struct chcr_context *ctx) adap->vres.ncrypto_fc); rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan; txq_perchan = ntxq / u_ctx->lldi.nchan; - rxq_idx = ctx->dev->tx_channel_id * rxq_perchan; - rxq_idx += id % rxq_perchan; - txq_idx = ctx->dev->tx_channel_id * txq_perchan; - txq_idx += id % txq_perchan; spin_lock(&ctx->dev->lock_chcr_dev); - ctx->rx_qidx = rxq_idx; - ctx->tx_qidx = txq_idx; + ctx->tx_chan_id = ctx->dev->tx_channel_id; ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id; ctx->dev->rx_channel_id = 0; spin_unlock(&ctx->dev->lock_chcr_dev); + rxq_idx = ctx->tx_chan_id * rxq_perchan; + rxq_idx += id % rxq_perchan; + txq_idx = ctx->tx_chan_id * txq_perchan; + txq_idx += id % txq_perchan; + ctx->rx_qidx = rxq_idx; + ctx->tx_qidx = txq_idx; + /* Channel Id used by SGE to forward packet to Host. + * Same value should be used in cpl_fw6_pld RSS_CH field + * by FW. Driver programs PCI channel ID to be used in fw + * at the time of queue allocation with value "pi->tx_chan" + */ + ctx->pci_chan_id = txq_idx / txq_perchan; } out: return err; @@ -2503,6 +2512,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req, struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct dsgl_walk dsgl_walk; unsigned int authsize = crypto_aead_authsize(tfm); + struct chcr_context *ctx = a_ctx(tfm); u32 temp; dsgl_walk_init(&dsgl_walk, phys_cpl); @@ -2512,7 +2522,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req, dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma); temp = req->cryptlen + (reqctx->op ? -authsize : authsize); dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen); - dsgl_walk_end(&dsgl_walk, qid); + dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id); } void chcr_add_cipher_src_ent(struct ablkcipher_request *req, @@ -2544,6 +2554,8 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, unsigned short qid) { struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req); + struct chcr_context *ctx = c_ctx(tfm); struct dsgl_walk dsgl_walk; dsgl_walk_init(&dsgl_walk, phys_cpl); @@ -2552,7 +2564,7 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, reqctx->dstsg = dsgl_walk.last_sg; reqctx->dst_ofst = dsgl_walk.last_sg_len; - dsgl_walk_end(&dsgl_walk, qid); + dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id); } void chcr_add_hash_src_ent(struct ahash_request *req, diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c index 04f277cade7c..62249d4ed373 100644 --- a/drivers/crypto/chelsio/chcr_core.c +++ b/drivers/crypto/chelsio/chcr_core.c @@ -237,9 +237,7 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state) static int __init chcr_crypto_init(void) { - if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) - pr_err("ULD register fail: No chcr crypto support in cxgb4\n"); - + cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info); return 0; } diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index 54835cb109e5..0d2c70c344f3 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h @@ -255,6 +255,8 @@ struct chcr_context { struct chcr_dev *dev; unsigned char tx_qidx; unsigned char rx_qidx; + unsigned char tx_chan_id; + unsigned char pci_chan_id; struct __crypto_ctx crypto_ctx[0]; }; diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h index a53a0e6ba024..7725b6ee14ef 100644 --- a/drivers/crypto/chelsio/chtls/chtls.h +++ b/drivers/crypto/chelsio/chtls/chtls.h @@ -96,6 +96,10 @@ enum csk_flags { CSK_CONN_INLINE, /* Connection on HW */ }; +enum chtls_cdev_state { + CHTLS_CDEV_STATE_UP = 1 +}; + struct listen_ctx { struct sock *lsk; struct chtls_dev *cdev; @@ -146,6 +150,7 @@ struct chtls_dev { unsigned int send_page_order; int max_host_sndbuf; struct key_map kmap; + unsigned int cdev_state; }; struct chtls_hws { diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 9b07f9165658..f59b044ebd25 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -160,6 +160,7 @@ static void chtls_register_dev(struct chtls_dev *cdev) tlsdev->hash = chtls_create_hash; tlsdev->unhash = chtls_destroy_hash; tls_register_device(&cdev->tlsdev); + cdev->cdev_state = CHTLS_CDEV_STATE_UP; } static void chtls_unregister_dev(struct chtls_dev *cdev) @@ -281,8 +282,10 @@ static void chtls_free_all_uld(void) struct chtls_dev *cdev, *tmp; mutex_lock(&cdev_mutex); - list_for_each_entry_safe(cdev, tmp, &cdev_list, list) - chtls_free_uld(cdev); + list_for_each_entry_safe(cdev, tmp, &cdev_list, list) { + if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) + chtls_free_uld(cdev); + } mutex_unlock(&cdev_mutex); } diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 7e71043457a6..86c699c14f84 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -1044,7 +1044,8 @@ static int safexcel_probe(struct platform_device *pdev) safexcel_configure(priv); - priv->ring = devm_kzalloc(dev, priv->config.rings * sizeof(*priv->ring), + priv->ring = devm_kcalloc(dev, priv->config.rings, + sizeof(*priv->ring), GFP_KERNEL); if (!priv->ring) { ret = -ENOMEM; @@ -1063,8 +1064,9 @@ static int safexcel_probe(struct platform_device *pdev) if (ret) goto err_reg_clk; - priv->ring[i].rdr_req = devm_kzalloc(dev, - sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE, + priv->ring[i].rdr_req = devm_kcalloc(dev, + EIP197_DEFAULT_RING_SIZE, + sizeof(priv->ring[i].rdr_req), GFP_KERNEL); if (!priv->ring[i].rdr_req) { ret = -ENOMEM; diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index a10c418d4e5c..56bd28174f52 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -63,7 +63,7 @@ struct dcp { struct dcp_coherent_block *coh; struct completion completion[DCP_MAX_CHANS]; - struct mutex mutex[DCP_MAX_CHANS]; + spinlock_t lock[DCP_MAX_CHANS]; struct task_struct *thread[DCP_MAX_CHANS]; struct crypto_queue queue[DCP_MAX_CHANS]; }; @@ -349,13 +349,20 @@ static int dcp_chan_thread_aes(void *data) int ret; - do { - __set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); - mutex_lock(&sdcp->mutex[chan]); + spin_lock(&sdcp->lock[chan]); backlog = crypto_get_backlog(&sdcp->queue[chan]); arq = crypto_dequeue_request(&sdcp->queue[chan]); - mutex_unlock(&sdcp->mutex[chan]); + spin_unlock(&sdcp->lock[chan]); + + if (!backlog && !arq) { + schedule(); + continue; + } + + set_current_state(TASK_RUNNING); if (backlog) backlog->complete(backlog, -EINPROGRESS); @@ -363,11 +370,8 @@ static int dcp_chan_thread_aes(void *data) if (arq) { ret = mxs_dcp_aes_block_crypt(arq); arq->complete(arq, ret); - continue; } - - schedule(); - } while (!kthread_should_stop()); + } return 0; } @@ -409,9 +413,9 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb) rctx->ecb = ecb; actx->chan = DCP_CHAN_CRYPTO; - mutex_lock(&sdcp->mutex[actx->chan]); + spin_lock(&sdcp->lock[actx->chan]); ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); - mutex_unlock(&sdcp->mutex[actx->chan]); + spin_unlock(&sdcp->lock[actx->chan]); wake_up_process(sdcp->thread[actx->chan]); @@ -640,13 +644,20 @@ static int dcp_chan_thread_sha(void *data) struct ahash_request *req; int ret, fini; - do { - __set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); - mutex_lock(&sdcp->mutex[chan]); + spin_lock(&sdcp->lock[chan]); backlog = crypto_get_backlog(&sdcp->queue[chan]); arq = crypto_dequeue_request(&sdcp->queue[chan]); - mutex_unlock(&sdcp->mutex[chan]); + spin_unlock(&sdcp->lock[chan]); + + if (!backlog && !arq) { + schedule(); + continue; + } + + set_current_state(TASK_RUNNING); if (backlog) backlog->complete(backlog, -EINPROGRESS); @@ -658,12 +669,8 @@ static int dcp_chan_thread_sha(void *data) ret = dcp_sha_req_to_buf(arq); fini = rctx->fini; arq->complete(arq, ret); - if (!fini) - continue; } - - schedule(); - } while (!kthread_should_stop()); + } return 0; } @@ -721,9 +728,9 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini) rctx->init = 1; } - mutex_lock(&sdcp->mutex[actx->chan]); + spin_lock(&sdcp->lock[actx->chan]); ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); - mutex_unlock(&sdcp->mutex[actx->chan]); + spin_unlock(&sdcp->lock[actx->chan]); wake_up_process(sdcp->thread[actx->chan]); mutex_unlock(&actx->mutex); @@ -997,7 +1004,7 @@ static int mxs_dcp_probe(struct platform_device *pdev) platform_set_drvdata(pdev, sdcp); for (i = 0; i < DCP_MAX_CHANS; i++) { - mutex_init(&sdcp->mutex[i]); + spin_lock_init(&sdcp->lock[i]); init_completion(&sdcp->completion[i]); crypto_init_queue(&sdcp->queue[i], 50); } diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c index ba197f34c252..763c2166ee0e 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c @@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct adf_hw_device_data *hw_data; char name[ADF_DEVICE_NAME_LENGTH]; unsigned int i, bar_nr; - int ret, bar_mask; + unsigned long bar_mask; + int ret; switch (ent->device) { case ADF_C3XXX_PCI_DEVICE_ID: @@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Find and map all the device's BARS */ i = 0; bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); - for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, - ADF_PCI_MAX_BARS * 2) { + for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; bar->base_addr = pci_resource_start(pdev, bar_nr); diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c index 24ec908eb26c..613c7d5644ce 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c @@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct adf_hw_device_data *hw_data; char name[ADF_DEVICE_NAME_LENGTH]; unsigned int i, bar_nr; - int ret, bar_mask; + unsigned long bar_mask; + int ret; switch (ent->device) { case ADF_C3XXXIOV_PCI_DEVICE_ID: @@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Find and map all the device's BARS */ i = 0; bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); - for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, - ADF_PCI_MAX_BARS * 2) { + for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; bar->base_addr = pci_resource_start(pdev, bar_nr); diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c index 59a5a0df50b6..9cb832963357 100644 --- a/drivers/crypto/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/qat/qat_c62x/adf_drv.c @@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct adf_hw_device_data *hw_data; char name[ADF_DEVICE_NAME_LENGTH]; unsigned int i, bar_nr; - int ret, bar_mask; + unsigned long bar_mask; + int ret; switch (ent->device) { case ADF_C62X_PCI_DEVICE_ID: @@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Find and map all the device's BARS */ i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0; bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); - for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, - ADF_PCI_MAX_BARS * 2) { + for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; bar->base_addr = pci_resource_start(pdev, bar_nr); diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c index b9f3e0e4fde9..278452b8ef81 100644 --- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c @@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct adf_hw_device_data *hw_data; char name[ADF_DEVICE_NAME_LENGTH]; unsigned int i, bar_nr; - int ret, bar_mask; + unsigned long bar_mask; + int ret; switch (ent->device) { case ADF_C62XIOV_PCI_DEVICE_ID: @@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Find and map all the device's BARS */ i = 0; bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); - for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, - ADF_PCI_MAX_BARS * 2) { + for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; bar->base_addr = pci_resource_start(pdev, bar_nr); diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index 9225d060e18f..f5e960d23a7a 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c @@ -198,7 +198,6 @@ static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev) pr_err("QAT: Can't find acceleration device\n"); return PCI_ERS_RESULT_DISCONNECT; } - pci_cleanup_aer_uncorrect_error_status(pdev); if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC)) return PCI_ERS_RESULT_DISCONNECT; diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index be5c5a988ca5..3a9708ef4ce2 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct adf_hw_device_data *hw_data; char name[ADF_DEVICE_NAME_LENGTH]; unsigned int i, bar_nr; - int ret, bar_mask; + unsigned long bar_mask; + int ret; switch (ent->device) { case ADF_DH895XCC_PCI_DEVICE_ID: @@ -237,8 +238,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Find and map all the device's BARS */ i = 0; bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); - for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, - ADF_PCI_MAX_BARS * 2) { + for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; bar->base_addr = pci_resource_start(pdev, bar_nr); diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c index 26ab17bfc6da..3da0f951cb59 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c @@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct adf_hw_device_data *hw_data; char name[ADF_DEVICE_NAME_LENGTH]; unsigned int i, bar_nr; - int ret, bar_mask; + unsigned long bar_mask; + int ret; switch (ent->device) { case ADF_DH895XCCIOV_PCI_DEVICE_ID: @@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Find and map all the device's BARS */ i = 0; bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); - for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, - ADF_PCI_MAX_BARS * 2) { + for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; bar->base_addr = pci_resource_start(pdev, bar_nr); diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 5285ece4f33a..b71895871be3 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c @@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, ret = crypto_skcipher_encrypt(req); skcipher_request_zero(req); } else { - preempt_disable(); - pagefault_disable(); - enable_kernel_vsx(); - blkcipher_walk_init(&walk, dst, src, nbytes); ret = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { + preempt_disable(); + pagefault_disable(); + enable_kernel_vsx(); aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, nbytes & AES_BLOCK_MASK, &ctx->enc_key, walk.iv, 1); + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); + nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); } - - disable_kernel_vsx(); - pagefault_enable(); - preempt_enable(); } return ret; @@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, ret = crypto_skcipher_decrypt(req); skcipher_request_zero(req); } else { - preempt_disable(); - pagefault_disable(); - enable_kernel_vsx(); - blkcipher_walk_init(&walk, dst, src, nbytes); ret = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { + preempt_disable(); + pagefault_disable(); + enable_kernel_vsx(); aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, nbytes & AES_BLOCK_MASK, &ctx->dec_key, walk.iv, 0); + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); + nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); } - - disable_kernel_vsx(); - pagefault_enable(); - preempt_enable(); } return ret; diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index 8bd9aff0f55f..e9954a7d4694 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c @@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc, ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); skcipher_request_zero(req); } else { + blkcipher_walk_init(&walk, dst, src, nbytes); + + ret = blkcipher_walk_virt(desc, &walk); + preempt_disable(); pagefault_disable(); enable_kernel_vsx(); - blkcipher_walk_init(&walk, dst, src, nbytes); - - ret = blkcipher_walk_virt(desc, &walk); iv = walk.iv; memset(tweak, 0, AES_BLOCK_SIZE); aes_p8_encrypt(iv, tweak, &ctx->tweak_key); + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); + while ((nbytes = walk.nbytes)) { + preempt_disable(); + pagefault_disable(); + enable_kernel_vsx(); if (enc) aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); else aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); } - - disable_kernel_vsx(); - pagefault_enable(); - preempt_enable(); } return ret; } |