diff options
author | Feng Liu <feliu@nvidia.com> | 2023-05-12 18:18:12 +0300 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2023-05-24 19:32:45 +0300 |
commit | 73f53bc295727a3cdbd9d6bcdfaa239258970cf4 (patch) | |
tree | bf279bc9df3ce21b92de48eac6b12ec5f284ad00 /drivers | |
parent | 978a55b0c096c374edfa73881b502052c5fe8e75 (diff) | |
download | linux-73f53bc295727a3cdbd9d6bcdfaa239258970cf4.tar.xz |
virtio_net: Fix error unwinding of XDP initialization
[ Upstream commit 5306623a9826aa7d63b32c6a3803c798a765474d ]
When initializing XDP in virtnet_open(), some rq xdp initialization
may hit an error causing net device open failed. However, previous
rqs have already initialized XDP and enabled NAPI, which is not the
expected behavior. Need to roll back the previous rq initialization
to avoid leaks in error unwinding of init code.
Also extract helper functions of disable and enable queue pairs.
Use newly introduced disable helper function in error unwinding and
virtnet_close. Use enable helper function in virtnet_open.
Fixes: 754b8a21a96d ("virtio_net: setup xdp_rxq_info")
Signed-off-by: Feng Liu <feliu@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: William Tu <witu@nvidia.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/virtio_net.c | 61 |
1 files changed, 44 insertions, 17 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 08a23ba3d68a..47788f093551 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1697,6 +1697,38 @@ static int virtnet_poll(struct napi_struct *napi, int budget) return received; } +static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index) +{ + virtnet_napi_tx_disable(&vi->sq[qp_index].napi); + napi_disable(&vi->rq[qp_index].napi); + xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); +} + +static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index) +{ + struct net_device *dev = vi->dev; + int err; + + err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index, + vi->rq[qp_index].napi.napi_id); + if (err < 0) + return err; + + err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err < 0) + goto err_xdp_reg_mem_model; + + virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi); + virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi); + + return 0; + +err_xdp_reg_mem_model: + xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); + return err; +} + static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); @@ -1710,22 +1742,20 @@ static int virtnet_open(struct net_device *dev) if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); - err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id); + err = virtnet_enable_queue_pair(vi, i); if (err < 0) - return err; - - err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq, - MEM_TYPE_PAGE_SHARED, NULL); - if (err < 0) { - xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); - return err; - } - - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); - virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); + goto err_enable_qp; } return 0; + +err_enable_qp: + disable_delayed_refill(vi); + cancel_delayed_work_sync(&vi->refill); + + for (i--; i >= 0; i--) + virtnet_disable_queue_pair(vi, i); + return err; } static int virtnet_poll_tx(struct napi_struct *napi, int budget) @@ -2157,11 +2187,8 @@ static int virtnet_close(struct net_device *dev) /* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); - for (i = 0; i < vi->max_queue_pairs; i++) { - virtnet_napi_tx_disable(&vi->sq[i].napi); - napi_disable(&vi->rq[i].napi); - xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); - } + for (i = 0; i < vi->max_queue_pairs; i++) + virtnet_disable_queue_pair(vi, i); return 0; } |