diff options
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/caam/ctrl.c | 5 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_algo.c | 16 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_core.c | 3 | ||||
-rw-r--r-- | drivers/crypto/marvell/hash.c | 11 | ||||
-rw-r--r-- | drivers/crypto/padlock-aes.c | 23 | ||||
-rw-r--r-- | drivers/crypto/padlock-sha.c | 18 |
6 files changed, 21 insertions, 55 deletions
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index d5ca5b824641..755109841cfd 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -556,8 +556,9 @@ static int caam_probe(struct platform_device *pdev) * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel, * long pointers in master configuration register */ - clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH | - MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE | MCFGR_LARGE_BURST | + clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR, + MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF | + MCFGR_WDENABLE | MCFGR_LARGE_BURST | (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); /* diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 57b49ecdcad4..2ed1e24b44a8 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -696,16 +696,18 @@ badkey_err: static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx) { - int ret = 0; - struct sge_ofld_txq *q; struct adapter *adap = netdev2adap(dev); + struct sge_uld_txq_info *txq_info = + adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; + struct sge_uld_txq *txq; + int ret = 0; local_bh_disable(); - q = &adap->sge.ofldtxq[idx]; - spin_lock(&q->sendq.lock); - if (q->full) + txq = &txq_info->uldtxq[idx]; + spin_lock(&txq->sendq.lock); + if (txq->full) ret = -1; - spin_unlock(&q->sendq.lock); + spin_unlock(&txq->sendq.lock); local_bh_enable(); return ret; } @@ -775,11 +777,11 @@ static int chcr_device_init(struct chcr_context *ctx) } u_ctx = ULD_CTX(ctx); rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan; - ctx->dev->tx_channel_id = 0; rxq_idx = ctx->dev->tx_channel_id * rxq_perchan; rxq_idx += id % rxq_perchan; spin_lock(&ctx->dev->lock_chcr_dev); ctx->tx_channel_id = rxq_idx; + ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id; spin_unlock(&ctx->dev->lock_chcr_dev); } out: diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c index acde2de2c1e1..918da8e6e2d8 100644 --- a/drivers/crypto/chelsio/chcr_core.c +++ b/drivers/crypto/chelsio/chcr_core.c @@ -42,6 +42,7 @@ static chcr_handler_func work_handlers[NUM_CPL_CMDS] = { static struct cxgb4_uld_info chcr_uld_info = { .name = DRV_MODULE_NAME, .nrxq = MAX_ULD_QSETS, + .ntxq = MAX_ULD_QSETS, .rxq_size = 1024, .add = chcr_uld_add, .state_change = chcr_uld_state_change, @@ -124,7 +125,7 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev, int chcr_send_wr(struct sk_buff *skb) { - return cxgb4_ofld_send(skb->dev, skb); + return cxgb4_crypto_send(skb->dev, skb); } static void *chcr_uld_add(const struct cxgb4_lld_info *lld) diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index 2a9260559654..317cf029c0cf 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c @@ -168,12 +168,11 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req) mv_cesa_adjust_op(engine, &creq->op_tmpl); memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); - digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); - for (i = 0; i < digsize / 4; i++) - writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i)); - - mv_cesa_adjust_op(engine, &creq->op_tmpl); - memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); + if (!sreq->offset) { + digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); + for (i = 0; i < digsize / 4; i++) + writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i)); + } if (creq->cache_ptr) memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET, diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 441e86b23571..b3869748cc6b 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -183,8 +183,8 @@ static inline void padlock_store_cword(struct cword *cword) /* * While the padlock instructions don't use FP/SSE registers, they - * generate a spurious DNA fault when cr0.ts is '1'. These instructions - * should be used only inside the irq_ts_save/restore() context + * generate a spurious DNA fault when CR0.TS is '1'. Fortunately, + * the kernel doesn't use CR0.TS. */ static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key, @@ -298,24 +298,18 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); - int ts_state; padlock_reset_key(&ctx->cword.encrypt); - ts_state = irq_ts_save(); ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1); - irq_ts_restore(ts_state); padlock_store_cword(&ctx->cword.encrypt); } static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); - int ts_state; padlock_reset_key(&ctx->cword.encrypt); - ts_state = irq_ts_save(); ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1); - irq_ts_restore(ts_state); padlock_store_cword(&ctx->cword.encrypt); } @@ -346,14 +340,12 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); struct blkcipher_walk walk; int err; - int ts_state; padlock_reset_key(&ctx->cword.encrypt); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); - ts_state = irq_ts_save(); while ((nbytes = walk.nbytes)) { padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, ctx->E, &ctx->cword.encrypt, @@ -361,7 +353,6 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - irq_ts_restore(ts_state); padlock_store_cword(&ctx->cword.encrypt); @@ -375,14 +366,12 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); struct blkcipher_walk walk; int err; - int ts_state; padlock_reset_key(&ctx->cword.decrypt); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); - ts_state = irq_ts_save(); while ((nbytes = walk.nbytes)) { padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, ctx->D, &ctx->cword.decrypt, @@ -390,7 +379,6 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - irq_ts_restore(ts_state); padlock_store_cword(&ctx->cword.encrypt); @@ -425,14 +413,12 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); struct blkcipher_walk walk; int err; - int ts_state; padlock_reset_key(&ctx->cword.encrypt); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); - ts_state = irq_ts_save(); while ((nbytes = walk.nbytes)) { u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, ctx->E, @@ -442,7 +428,6 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - irq_ts_restore(ts_state); padlock_store_cword(&ctx->cword.decrypt); @@ -456,14 +441,12 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); struct blkcipher_walk walk; int err; - int ts_state; padlock_reset_key(&ctx->cword.encrypt); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); - ts_state = irq_ts_save(); while ((nbytes = walk.nbytes)) { padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, ctx->D, walk.iv, &ctx->cword.decrypt, @@ -472,8 +455,6 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, err = blkcipher_walk_done(desc, &walk, nbytes); } - irq_ts_restore(ts_state); - padlock_store_cword(&ctx->cword.encrypt); return err; diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 8c5f90647b7a..bc72d20c32c3 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -89,7 +89,6 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in, struct sha1_state state; unsigned int space; unsigned int leftover; - int ts_state; int err; dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; @@ -120,14 +119,11 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in, memcpy(result, &state.state, SHA1_DIGEST_SIZE); - /* prevent taking the spurious DNA fault with padlock. */ - ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ : \ : "c"((unsigned long)state.count + count), \ "a"((unsigned long)state.count), \ "S"(in), "D"(result)); - irq_ts_restore(ts_state); padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); @@ -155,7 +151,6 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, struct sha256_state state; unsigned int space; unsigned int leftover; - int ts_state; int err; dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; @@ -186,14 +181,11 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, memcpy(result, &state.state, SHA256_DIGEST_SIZE); - /* prevent taking the spurious DNA fault with padlock. */ - ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ : \ : "c"((unsigned long)state.count + count), \ "a"((unsigned long)state.count), \ "S"(in), "D"(result)); - irq_ts_restore(ts_state); padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); @@ -312,7 +304,6 @@ static int padlock_sha1_update_nano(struct shash_desc *desc, u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ ((aligned(STACK_ALIGN))); u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); - int ts_state; partial = sctx->count & 0x3f; sctx->count += len; @@ -328,23 +319,19 @@ static int padlock_sha1_update_nano(struct shash_desc *desc, memcpy(sctx->buffer + partial, data, done + SHA1_BLOCK_SIZE); src = sctx->buffer; - ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" : "+S"(src), "+D"(dst) \ : "a"((long)-1), "c"((unsigned long)1)); - irq_ts_restore(ts_state); done += SHA1_BLOCK_SIZE; src = data + done; } /* Process the left bytes from the input data */ if (len - done >= SHA1_BLOCK_SIZE) { - ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" : "+S"(src), "+D"(dst) : "a"((long)-1), "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE))); - irq_ts_restore(ts_state); done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE); src = data + done; } @@ -401,7 +388,6 @@ static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data, u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ ((aligned(STACK_ALIGN))); u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); - int ts_state; partial = sctx->count & 0x3f; sctx->count += len; @@ -417,23 +403,19 @@ static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data, memcpy(sctx->buf + partial, data, done + SHA256_BLOCK_SIZE); src = sctx->buf; - ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" : "+S"(src), "+D"(dst) : "a"((long)-1), "c"((unsigned long)1)); - irq_ts_restore(ts_state); done += SHA256_BLOCK_SIZE; src = data + done; } /* Process the left bytes from input data*/ if (len - done >= SHA256_BLOCK_SIZE) { - ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" : "+S"(src), "+D"(dst) : "a"((long)-1), "c"((unsigned long)((len - done) / 64))); - irq_ts_restore(ts_state); done += ((len - done) - (len - done) % 64); src = data + done; } |