diff options
Diffstat (limited to 'net/tls')
-rw-r--r-- | net/tls/tls_device.c | 46 | ||||
-rw-r--r-- | net/tls/tls_device_fallback.c | 3 | ||||
-rw-r--r-- | net/tls/tls_strp.c | 2 | ||||
-rw-r--r-- | net/tls/tls_sw.c | 8 |
4 files changed, 43 insertions, 16 deletions
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index e3e6cf75aa03..0f983e5f7dde 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -71,7 +71,13 @@ static void tls_device_tx_del_task(struct work_struct *work) struct tls_offload_context_tx *offload_ctx = container_of(work, struct tls_offload_context_tx, destruct_work); struct tls_context *ctx = offload_ctx->ctx; - struct net_device *netdev = ctx->netdev; + struct net_device *netdev; + + /* Safe, because this is the destroy flow, refcount is 0, so + * tls_device_down can't store this field in parallel. + */ + netdev = rcu_dereference_protected(ctx->netdev, + !refcount_read(&ctx->refcount)); netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX); dev_put(netdev); @@ -81,6 +87,7 @@ static void tls_device_tx_del_task(struct work_struct *work) static void tls_device_queue_ctx_destruction(struct tls_context *ctx) { + struct net_device *netdev; unsigned long flags; bool async_cleanup; @@ -91,7 +98,14 @@ static void tls_device_queue_ctx_destruction(struct tls_context *ctx) } list_del(&ctx->list); /* Remove from tls_device_list / tls_device_down_list */ - async_cleanup = ctx->netdev && ctx->tx_conf == TLS_HW; + + /* Safe, because this is the destroy flow, refcount is 0, so + * tls_device_down can't store this field in parallel. + */ + netdev = rcu_dereference_protected(ctx->netdev, + !refcount_read(&ctx->refcount)); + + async_cleanup = netdev && ctx->tx_conf == TLS_HW; if (async_cleanup) { struct tls_offload_context_tx *offload_ctx = tls_offload_ctx_tx(ctx); @@ -229,7 +243,8 @@ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx, trace_tls_device_tx_resync_send(sk, seq, rcd_sn); down_read(&device_offload_lock); - netdev = tls_ctx->netdev; + netdev = rcu_dereference_protected(tls_ctx->netdev, + lockdep_is_held(&device_offload_lock)); if (netdev) err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn, @@ -710,7 +725,7 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx, trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type); rcu_read_lock(); - netdev = READ_ONCE(tls_ctx->netdev); + netdev = rcu_dereference(tls_ctx->netdev); if (netdev) netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn, TLS_OFFLOAD_CTX_DIR_RX); @@ -984,11 +999,17 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx) int is_decrypted = skb->decrypted; int is_encrypted = !is_decrypted; struct sk_buff *skb_iter; + int left; + left = rxm->full_len - skb->len; /* Check if all the data is decrypted already */ - skb_walk_frags(skb, skb_iter) { + skb_iter = skb_shinfo(skb)->frag_list; + while (skb_iter && left > 0) { is_decrypted &= skb_iter->decrypted; is_encrypted &= !skb_iter->decrypted; + + left -= skb_iter->len; + skb_iter = skb_iter->next; } trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len, @@ -1029,7 +1050,7 @@ static void tls_device_attach(struct tls_context *ctx, struct sock *sk, if (sk->sk_destruct != tls_device_sk_destruct) { refcount_set(&ctx->refcount, 1); dev_hold(netdev); - ctx->netdev = netdev; + RCU_INIT_POINTER(ctx->netdev, netdev); spin_lock_irq(&tls_device_lock); list_add_tail(&ctx->list, &tls_device_list); spin_unlock_irq(&tls_device_lock); @@ -1300,7 +1321,8 @@ void tls_device_offload_cleanup_rx(struct sock *sk) struct net_device *netdev; down_read(&device_offload_lock); - netdev = tls_ctx->netdev; + netdev = rcu_dereference_protected(tls_ctx->netdev, + lockdep_is_held(&device_offload_lock)); if (!netdev) goto out; @@ -1309,7 +1331,7 @@ void tls_device_offload_cleanup_rx(struct sock *sk) if (tls_ctx->tx_conf != TLS_HW) { dev_put(netdev); - tls_ctx->netdev = NULL; + rcu_assign_pointer(tls_ctx->netdev, NULL); } else { set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags); } @@ -1329,7 +1351,11 @@ static int tls_device_down(struct net_device *netdev) spin_lock_irqsave(&tls_device_lock, flags); list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) { - if (ctx->netdev != netdev || + struct net_device *ctx_netdev = + rcu_dereference_protected(ctx->netdev, + lockdep_is_held(&device_offload_lock)); + + if (ctx_netdev != netdev || !refcount_inc_not_zero(&ctx->refcount)) continue; @@ -1346,7 +1372,7 @@ static int tls_device_down(struct net_device *netdev) /* Stop the RX and TX resync. * tls_dev_resync must not be called after tls_dev_del. */ - WRITE_ONCE(ctx->netdev, NULL); + rcu_assign_pointer(ctx->netdev, NULL); /* Start skipping the RX resync logic completely. */ set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags); diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c index 618cee704217..7dfc8023e0f1 100644 --- a/net/tls/tls_device_fallback.c +++ b/net/tls/tls_device_fallback.c @@ -426,7 +426,8 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, struct sk_buff *skb) { - if (dev == tls_get_ctx(sk)->netdev || netif_is_bond_master(dev)) + if (dev == rcu_dereference_bh(tls_get_ctx(sk)->netdev) || + netif_is_bond_master(dev)) return skb; return tls_sw_fallback(sk, skb); diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c index f0b7c9122fba..9b79e334dbd9 100644 --- a/net/tls/tls_strp.c +++ b/net/tls/tls_strp.c @@ -41,7 +41,7 @@ static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp) struct sk_buff *skb; int i, err, offset; - skb = alloc_skb_with_frags(0, strp->anchor->len, TLS_PAGE_ORDER, + skb = alloc_skb_with_frags(0, strp->stm.full_len, TLS_PAGE_ORDER, &err, strp->sk->sk_allocation); if (!skb) return NULL; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 17db8c8811fa..fe27241cd13f 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1352,7 +1352,7 @@ static int tls_setup_from_iter(struct iov_iter *from, rc = -EFAULT; goto out; } - copied = iov_iter_get_pages(from, pages, + copied = iov_iter_get_pages2(from, pages, length, maxpages, &offset); if (copied <= 0) { @@ -1360,8 +1360,6 @@ static int tls_setup_from_iter(struct iov_iter *from, goto out; } - iov_iter_advance(from, copied); - length -= copied; size += copied; while (copied) { @@ -2704,7 +2702,9 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) crypto_info->version != TLS_1_3_VERSION && !!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC); - tls_strp_init(&sw_ctx_rx->strp, sk); + rc = tls_strp_init(&sw_ctx_rx->strp, sk); + if (rc) + goto free_aead; } goto out; |