diff options
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/ablkcipher.c | 2 | ||||
-rw-r--r-- | crypto/algif_aead.c | 4 | ||||
-rw-r--r-- | crypto/algif_skcipher.c | 67 | ||||
-rw-r--r-- | crypto/async_tx/async_memcpy.c | 2 | ||||
-rw-r--r-- | crypto/async_tx/async_pq.c | 4 | ||||
-rw-r--r-- | crypto/async_tx/async_raid6_recov.c | 4 | ||||
-rw-r--r-- | crypto/async_tx/async_xor.c | 4 | ||||
-rw-r--r-- | crypto/blkcipher.c | 2 |
8 files changed, 44 insertions, 45 deletions
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index b4ffc5be1a93..e5b5721809e2 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -277,12 +277,12 @@ static int ablkcipher_walk_first(struct ablkcipher_request *req, if (WARN_ON_ONCE(in_irq())) return -EDEADLK; + walk->iv = req->info; walk->nbytes = walk->total; if (unlikely(!walk->total)) return 0; walk->iv_buffer = NULL; - walk->iv = req->info; if (unlikely(((unsigned long)walk->iv & alignmask))) { int err = ablkcipher_copy_iv(walk, tfm, alignmask); diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index f70bcf844975..4c93b8a4e81e 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -125,7 +125,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags) if (flags & MSG_DONTWAIT) return -EAGAIN; - set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); for (;;) { if (signal_pending(current)) @@ -139,7 +139,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags) } finish_wait(sk_sleep(sk), &wait); - clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); return err; } diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index bbb1b66e969c..5c756b30e79b 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -47,7 +47,7 @@ struct skcipher_ctx { bool merge; bool enc; - struct ablkcipher_request req; + struct skcipher_request req; }; struct skcipher_async_rsgl { @@ -64,13 +64,13 @@ struct skcipher_async_req { }; #define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \ - crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req))) + crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))) #define GET_REQ_SIZE(ctx) \ - crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req)) + crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)) #define GET_IV_SIZE(ctx) \ - crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(&ctx->req)) + crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req)) #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \ sizeof(struct scatterlist) - 1) @@ -212,7 +212,7 @@ static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) if (flags & MSG_DONTWAIT) return -EAGAIN; - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); for (;;) { if (signal_pending(current)) @@ -258,7 +258,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags) return -EAGAIN; } - set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); for (;;) { if (signal_pending(current)) @@ -272,7 +272,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags) } finish_wait(sk_sleep(sk), &wait); - clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); return err; } @@ -302,8 +302,8 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req); - unsigned ivsize = crypto_ablkcipher_ivsize(tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req); + unsigned ivsize = crypto_skcipher_ivsize(tfm); struct skcipher_sg_list *sgl; struct af_alg_control con = {}; long copied = 0; @@ -507,7 +507,7 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, struct skcipher_sg_list *sgl; struct scatterlist *sg; struct skcipher_async_req *sreq; - struct ablkcipher_request *req; + struct skcipher_request *req; struct skcipher_async_rsgl *last_rsgl = NULL; unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx); unsigned int reqlen = sizeof(struct skcipher_async_req) + @@ -531,9 +531,9 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, } sg_init_table(sreq->tsg, tx_nents); memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx)); - ablkcipher_request_set_tfm(req, crypto_ablkcipher_reqtfm(&ctx->req)); - ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - skcipher_async_cb, sk); + skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req)); + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + skcipher_async_cb, sk); while (iov_iter_count(&msg->msg_iter)) { struct skcipher_async_rsgl *rsgl; @@ -608,10 +608,10 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, if (mark) sg_mark_end(sreq->tsg + txbufs - 1); - ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, - len, sreq->iv); - err = ctx->enc ? crypto_ablkcipher_encrypt(req) : - crypto_ablkcipher_decrypt(req); + skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, + len, sreq->iv); + err = ctx->enc ? crypto_skcipher_encrypt(req) : + crypto_skcipher_decrypt(req); if (err == -EINPROGRESS) { atomic_inc(&ctx->inflight); err = -EIOCBQUEUED; @@ -632,7 +632,7 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg, struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; - unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm( + unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm( &ctx->req)); struct skcipher_sg_list *sgl; struct scatterlist *sg; @@ -669,14 +669,13 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg, if (!used) goto free; - ablkcipher_request_set_crypt(&ctx->req, sg, - ctx->rsgl.sg, used, - ctx->iv); + skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used, + ctx->iv); err = af_alg_wait_for_completion( ctx->enc ? - crypto_ablkcipher_encrypt(&ctx->req) : - crypto_ablkcipher_decrypt(&ctx->req), + crypto_skcipher_encrypt(&ctx->req) : + crypto_skcipher_decrypt(&ctx->req), &ctx->completion); free: @@ -751,17 +750,17 @@ static struct proto_ops algif_skcipher_ops = { static void *skcipher_bind(const char *name, u32 type, u32 mask) { - return crypto_alloc_ablkcipher(name, type, mask); + return crypto_alloc_skcipher(name, type, mask); } static void skcipher_release(void *private) { - crypto_free_ablkcipher(private); + crypto_free_skcipher(private); } static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) { - return crypto_ablkcipher_setkey(private, key, keylen); + return crypto_skcipher_setkey(private, key, keylen); } static void skcipher_wait(struct sock *sk) @@ -778,13 +777,13 @@ static void skcipher_sock_destruct(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req); if (atomic_read(&ctx->inflight)) skcipher_wait(sk); skcipher_free_sgl(sk); - sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm)); + sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); sock_kfree_s(sk, ctx, ctx->len); af_alg_release_parent(sk); } @@ -793,20 +792,20 @@ static int skcipher_accept_parent(void *private, struct sock *sk) { struct skcipher_ctx *ctx; struct alg_sock *ask = alg_sk(sk); - unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private); + unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(private); ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) return -ENOMEM; - ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private), + ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(private), GFP_KERNEL); if (!ctx->iv) { sock_kfree_s(sk, ctx, len); return -ENOMEM; } - memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private)); + memset(ctx->iv, 0, crypto_skcipher_ivsize(private)); INIT_LIST_HEAD(&ctx->tsgl); ctx->len = len; @@ -819,9 +818,9 @@ static int skcipher_accept_parent(void *private, struct sock *sk) ask->private = ctx; - ablkcipher_request_set_tfm(&ctx->req, private); - ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, - af_alg_complete, &ctx->completion); + skcipher_request_set_tfm(&ctx->req, private); + skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, + af_alg_complete, &ctx->completion); sk->sk_destruct = skcipher_sock_destruct; diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index f8c0b8dbeb75..88bc8e6b2a54 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -53,7 +53,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, struct dmaengine_unmap_data *unmap = NULL; if (device) - unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); + unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { unsigned long dma_prep_flags = 0; diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 5d355e0c2633..c0748bbd4c08 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -188,7 +188,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); if (device) - unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); + unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); /* XORing P/Q is only implemented in software */ if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) && @@ -307,7 +307,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, BUG_ON(disks < 4); if (device) - unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); + unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); if (unmap && disks <= dma_maxpq(device, 0) && is_dma_pq_aligned(device, offset, 0, len)) { diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index 934a84981495..8fab6275ea1f 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c @@ -41,7 +41,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, u8 *a, *b, *c; if (dma) - unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); + unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT); if (unmap) { struct device *dev = dma->dev; @@ -105,7 +105,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, u8 *d, *s; if (dma) - unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); + unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT); if (unmap) { dma_addr_t dma_dest[2]; diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index e1bce26cd4f9..da75777f2b3f 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -182,7 +182,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, BUG_ON(src_cnt <= 1); if (device) - unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO); + unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT); if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { struct dma_async_tx_descriptor *tx; @@ -278,7 +278,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, BUG_ON(src_cnt <= 1); if (device) - unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO); + unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOWAIT); if (unmap && src_cnt <= device->max_xor && is_dma_xor_aligned(device, offset, 0, len)) { diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 11b981492031..8cc1622b2ee0 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -326,12 +326,12 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc, if (WARN_ON_ONCE(in_irq())) return -EDEADLK; + walk->iv = desc->info; walk->nbytes = walk->total; if (unlikely(!walk->total)) return 0; walk->buffer = NULL; - walk->iv = desc->info; if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { int err = blkcipher_copy_iv(walk); if (err) |