summaryrefslogtreecommitdiff
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r--drivers/net/virtio_net.c503
1 files changed, 322 insertions, 181 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 64c87bb48a41..5d674eb9a0f2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -360,24 +360,7 @@ struct receive_queue {
struct xdp_buff **xsk_buffs;
};
-/* This structure can contain rss message with maximum settings for indirection table and keysize
- * Note, that default structure that describes RSS configuration virtio_net_rss_config
- * contains same info but can't handle table values.
- * In any case, structure would be passed to virtio hw through sg_buf split by parts
- * because table sizes may be differ according to the device configuration.
- */
#define VIRTIO_NET_RSS_MAX_KEY_SIZE 40
-struct virtio_net_ctrl_rss {
- u32 hash_types;
- u16 indirection_table_mask;
- u16 unclassified_queue;
- u16 hash_cfg_reserved; /* for HASH_CONFIG (see virtio_net_hash_config for details) */
- u16 max_tx_vq;
- u8 hash_key_length;
- u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
-
- u16 *indirection_table;
-};
/* Control VQ buffers: protected by the rtnl lock */
struct control_buf {
@@ -421,7 +404,9 @@ struct virtnet_info {
u16 rss_indir_table_size;
u32 rss_hash_types_supported;
u32 rss_hash_types_saved;
- struct virtio_net_ctrl_rss rss;
+ struct virtio_net_rss_config_hdr *rss_hdr;
+ struct virtio_net_rss_config_trailer rss_trailer;
+ u8 rss_hash_key_data[VIRTIO_NET_RSS_MAX_KEY_SIZE];
/* Has control virtqueue */
bool has_cvq;
@@ -503,6 +488,7 @@ struct virtio_net_common_hdr {
static struct virtio_net_common_hdr xsk_hdr;
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
+static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq);
static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
struct net_device *dev,
unsigned int *xdp_xmit,
@@ -522,23 +508,16 @@ enum virtnet_xmit_type {
VIRTNET_XMIT_TYPE_XSK,
};
-static int rss_indirection_table_alloc(struct virtio_net_ctrl_rss *rss, u16 indir_table_size)
+static size_t virtnet_rss_hdr_size(const struct virtnet_info *vi)
{
- if (!indir_table_size) {
- rss->indirection_table = NULL;
- return 0;
- }
-
- rss->indirection_table = kmalloc_array(indir_table_size, sizeof(u16), GFP_KERNEL);
- if (!rss->indirection_table)
- return -ENOMEM;
+ u16 indir_table_size = vi->has_rss ? vi->rss_indir_table_size : 1;
- return 0;
+ return struct_size(vi->rss_hdr, indirection_table, indir_table_size);
}
-static void rss_indirection_table_free(struct virtio_net_ctrl_rss *rss)
+static size_t virtnet_rss_trailer_size(const struct virtnet_info *vi)
{
- kfree(rss->indirection_table);
+ return struct_size(&vi->rss_trailer, hash_key_data, vi->rss_key_size);
}
/* We use the last two bits of the pointer to distinguish the xmit type. */
@@ -799,6 +778,26 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
}
+static int check_mergeable_len(struct net_device *dev, void *mrg_ctx,
+ unsigned int len)
+{
+ unsigned int headroom, tailroom, room, truesize;
+
+ truesize = mergeable_ctx_to_truesize(mrg_ctx);
+ headroom = mergeable_ctx_to_headroom(mrg_ctx);
+ tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
+ room = SKB_DATA_ALIGN(headroom + tailroom);
+
+ if (len > truesize - room) {
+ pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
+ dev->name, len, (unsigned long)(truesize - room));
+ DEV_STATS_INC(dev, rx_length_errors);
+ return -1;
+ }
+
+ return 0;
+}
+
static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
unsigned int headroom,
unsigned int len)
@@ -1087,11 +1086,10 @@ static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
return false;
}
-static void check_sq_full_and_disable(struct virtnet_info *vi,
- struct net_device *dev,
- struct send_queue *sq)
+static bool tx_may_stop(struct virtnet_info *vi,
+ struct net_device *dev,
+ struct send_queue *sq)
{
- bool use_napi = sq->napi.weight;
int qnum;
qnum = sq - vi->sq;
@@ -1106,20 +1104,39 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
* Since most packets only take 1 or 2 ring slots, stopping the queue
* early means 16 slots are typically wasted.
*/
- if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
+ if (sq->vq->num_free < MAX_SKB_FRAGS + 2) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
netif_tx_stop_queue(txq);
u64_stats_update_begin(&sq->stats.syncp);
u64_stats_inc(&sq->stats.stop);
u64_stats_update_end(&sq->stats.syncp);
+
+ return true;
+ }
+
+ return false;
+}
+
+static void check_sq_full_and_disable(struct virtnet_info *vi,
+ struct net_device *dev,
+ struct send_queue *sq)
+{
+ bool use_napi = sq->napi.weight;
+ int qnum;
+
+ qnum = sq - vi->sq;
+
+ if (tx_may_stop(vi, dev, sq)) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
+
if (use_napi) {
if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
virtqueue_napi_schedule(&sq->napi, sq->vq);
} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
free_old_xmit(sq, txq, false);
- if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
+ if (sq->vq->num_free >= MAX_SKB_FRAGS + 2) {
netif_start_subqueue(dev, qnum);
u64_stats_update_begin(&sq->stats.syncp);
u64_stats_inc(&sq->stats.wake);
@@ -1130,15 +1147,29 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
}
}
+/* Note that @len is the length of received data without virtio header */
static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
- struct receive_queue *rq, void *buf, u32 len)
+ struct receive_queue *rq, void *buf,
+ u32 len, bool first_buf)
{
struct xdp_buff *xdp;
u32 bufsize;
xdp = (struct xdp_buff *)buf;
- bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool) + vi->hdr_len;
+ /* In virtnet_add_recvbuf_xsk, we use part of XDP_PACKET_HEADROOM for
+ * virtio header and ask the vhost to fill data from
+ * hard_start + XDP_PACKET_HEADROOM - vi->hdr_len
+ * The first buffer has virtio header so the remaining region for frame
+ * data is
+ * xsk_pool_get_rx_frame_size()
+ * While other buffers than the first one do not have virtio header, so
+ * the maximum frame data's length can be
+ * xsk_pool_get_rx_frame_size() + vi->hdr_len
+ */
+ bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool);
+ if (!first_buf)
+ bufsize += vi->hdr_len;
if (unlikely(len > bufsize)) {
pr_debug("%s: rx error: len %u exceeds truesize %u\n",
@@ -1263,7 +1294,7 @@ static int xsk_append_merge_buffer(struct virtnet_info *vi,
u64_stats_add(&stats->bytes, len);
- xdp = buf_to_xdp(vi, rq, buf, len);
+ xdp = buf_to_xdp(vi, rq, buf, len, false);
if (!xdp)
goto err;
@@ -1361,7 +1392,7 @@ static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queu
u64_stats_add(&stats->bytes, len);
- xdp = buf_to_xdp(vi, rq, buf, len);
+ xdp = buf_to_xdp(vi, rq, buf, len, true);
if (!xdp)
return;
@@ -1800,7 +1831,8 @@ static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
* across multiple buffers (num_buf > 1), and we make sure buffers
* have enough headroom.
*/
-static struct page *xdp_linearize_page(struct receive_queue *rq,
+static struct page *xdp_linearize_page(struct net_device *dev,
+ struct receive_queue *rq,
int *num_buf,
struct page *p,
int offset,
@@ -1820,18 +1852,27 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
page_off += *len;
+ /* Only mergeable mode can go inside this while loop. In small mode,
+ * *num_buf == 1, so it cannot go inside.
+ */
while (--*num_buf) {
unsigned int buflen;
void *buf;
+ void *ctx;
int off;
- buf = virtnet_rq_get_buf(rq, &buflen, NULL);
+ buf = virtnet_rq_get_buf(rq, &buflen, &ctx);
if (unlikely(!buf))
goto err_buf;
p = virt_to_head_page(buf);
off = buf - page_address(p);
+ if (check_mergeable_len(dev, ctx, buflen)) {
+ put_page(p);
+ goto err_buf;
+ }
+
/* guard against a misconfigured or uncooperative backend that
* is sending packet larger than the MTU.
*/
@@ -1920,7 +1961,7 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev,
headroom = vi->hdr_len + header_offset;
buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- xdp_page = xdp_linearize_page(rq, &num_buf, page,
+ xdp_page = xdp_linearize_page(dev, rq, &num_buf, page,
offset, header_offset,
&tlen);
if (!xdp_page)
@@ -2129,10 +2170,9 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
struct virtnet_rq_stats *stats)
{
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
- unsigned int headroom, tailroom, room;
- unsigned int truesize, cur_frag_size;
struct skb_shared_info *shinfo;
unsigned int xdp_frags_truesz = 0;
+ unsigned int truesize;
struct page *page;
skb_frag_t *frag;
int offset;
@@ -2175,21 +2215,14 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
page = virt_to_head_page(buf);
offset = buf - page_address(page);
- truesize = mergeable_ctx_to_truesize(ctx);
- headroom = mergeable_ctx_to_headroom(ctx);
- tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
- room = SKB_DATA_ALIGN(headroom + tailroom);
-
- cur_frag_size = truesize;
- xdp_frags_truesz += cur_frag_size;
- if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
+ if (check_mergeable_len(dev, ctx, len)) {
put_page(page);
- pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
- dev->name, len, (unsigned long)(truesize - room));
- DEV_STATS_INC(dev, rx_length_errors);
goto err;
}
+ truesize = mergeable_ctx_to_truesize(ctx);
+ xdp_frags_truesz += truesize;
+
frag = &shinfo->frags[shinfo->nr_frags++];
skb_frag_fill_page_desc(frag, page, offset, len);
if (page_is_pfmemalloc(page))
@@ -2255,7 +2288,7 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
*/
if (!xdp_prog->aux->xdp_has_frags) {
/* linearize data for XDP */
- xdp_page = xdp_linearize_page(rq, num_buf,
+ xdp_page = xdp_linearize_page(vi->dev, rq, num_buf,
*page, offset,
XDP_PACKET_HEADROOM,
len);
@@ -2403,18 +2436,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
struct sk_buff *head_skb, *curr_skb;
unsigned int truesize = mergeable_ctx_to_truesize(ctx);
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
- unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
- unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
head_skb = NULL;
u64_stats_add(&stats->bytes, len - vi->hdr_len);
- if (unlikely(len > truesize - room)) {
- pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
- dev->name, len, (unsigned long)(truesize - room));
- DEV_STATS_INC(dev, rx_length_errors);
+ if (check_mergeable_len(dev, ctx, len))
goto err_skb;
- }
if (unlikely(vi->xdp_enabled)) {
struct bpf_prog *xdp_prog;
@@ -2449,17 +2476,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
u64_stats_add(&stats->bytes, len);
page = virt_to_head_page(buf);
- truesize = mergeable_ctx_to_truesize(ctx);
- headroom = mergeable_ctx_to_headroom(ctx);
- tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
- room = SKB_DATA_ALIGN(headroom + tailroom);
- if (unlikely(len > truesize - room)) {
- pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
- dev->name, len, (unsigned long)(truesize - room));
- DEV_STATS_INC(dev, rx_length_errors);
+ if (check_mergeable_len(dev, ctx, len))
goto err_skb;
- }
+ truesize = mergeable_ctx_to_truesize(ctx);
curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page,
buf, len, truesize);
if (!curr_skb)
@@ -2788,7 +2808,8 @@ static void skb_recv_done(struct virtqueue *rvq)
virtqueue_napi_schedule(&rq->napi, rvq);
}
-static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
+static void virtnet_napi_do_enable(struct virtqueue *vq,
+ struct napi_struct *napi)
{
napi_enable(napi);
@@ -2801,10 +2822,21 @@ static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
local_bh_enable();
}
-static void virtnet_napi_tx_enable(struct virtnet_info *vi,
- struct virtqueue *vq,
- struct napi_struct *napi)
+static void virtnet_napi_enable(struct receive_queue *rq)
{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ int qidx = vq2rxq(rq->vq);
+
+ virtnet_napi_do_enable(rq->vq, &rq->napi);
+ netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, &rq->napi);
+}
+
+static void virtnet_napi_tx_enable(struct send_queue *sq)
+{
+ struct virtnet_info *vi = sq->vq->vdev->priv;
+ struct napi_struct *napi = &sq->napi;
+ int qidx = vq2txq(sq->vq);
+
if (!napi->weight)
return;
@@ -2816,13 +2848,30 @@ static void virtnet_napi_tx_enable(struct virtnet_info *vi,
return;
}
- return virtnet_napi_enable(vq, napi);
+ virtnet_napi_do_enable(sq->vq, napi);
+ netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, napi);
}
-static void virtnet_napi_tx_disable(struct napi_struct *napi)
+static void virtnet_napi_tx_disable(struct send_queue *sq)
{
- if (napi->weight)
+ struct virtnet_info *vi = sq->vq->vdev->priv;
+ struct napi_struct *napi = &sq->napi;
+ int qidx = vq2txq(sq->vq);
+
+ if (napi->weight) {
+ netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, NULL);
napi_disable(napi);
+ }
+}
+
+static void virtnet_napi_disable(struct receive_queue *rq)
+{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ struct napi_struct *napi = &rq->napi;
+ int qidx = vq2rxq(rq->vq);
+
+ netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, NULL);
+ napi_disable(napi);
}
static void refill_work(struct work_struct *work)
@@ -2835,9 +2884,23 @@ static void refill_work(struct work_struct *work)
for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i];
+ /*
+ * When queue API support is added in the future and the call
+ * below becomes napi_disable_locked, this driver will need to
+ * be refactored.
+ *
+ * One possible solution would be to:
+ * - cancel refill_work with cancel_delayed_work (note:
+ * non-sync)
+ * - cancel refill_work with cancel_delayed_work_sync in
+ * virtnet_remove after the netdev is unregistered
+ * - wrap all of the work in a lock (perhaps the netdev
+ * instance lock)
+ * - check netif_running() and return early to avoid a race
+ */
napi_disable(&rq->napi);
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
- virtnet_napi_enable(rq->vq, &rq->napi);
+ virtnet_napi_do_enable(rq->vq, &rq->napi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again.
@@ -2958,7 +3021,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq, int budget)
free_old_xmit(sq, txq, !!budget);
} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
- if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
+ if (sq->vq->num_free >= MAX_SKB_FRAGS + 2) {
if (netif_tx_queue_stopped(txq)) {
u64_stats_update_begin(&sq->stats.syncp);
u64_stats_inc(&sq->stats.wake);
@@ -3034,8 +3097,8 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
{
- virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
- napi_disable(&vi->rq[qp_index].napi);
+ virtnet_napi_tx_disable(&vi->sq[qp_index]);
+ virtnet_napi_disable(&vi->rq[qp_index]);
xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
}
@@ -3054,9 +3117,8 @@ static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
if (err < 0)
goto err_xdp_reg_mem_model;
- netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, qp_index));
- virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
- virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
+ virtnet_napi_enable(&vi->rq[qp_index]);
+ virtnet_napi_tx_enable(&vi->sq[qp_index]);
return 0;
@@ -3156,7 +3218,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
else
free_old_xmit(sq, txq, !!budget);
- if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
+ if (sq->vq->num_free >= MAX_SKB_FRAGS + 2) {
if (netif_tx_queue_stopped(txq)) {
u64_stats_update_begin(&sq->stats.syncp);
u64_stats_inc(&sq->stats.wake);
@@ -3253,15 +3315,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
bool use_napi = sq->napi.weight;
bool kick;
- /* Free up any pending old buffers before queueing new ones. */
- do {
- if (use_napi)
- virtqueue_disable_cb(sq->vq);
-
+ if (!use_napi)
free_old_xmit(sq, txq, false);
-
- } while (use_napi && !xmit_more &&
- unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
+ else
+ virtqueue_disable_cb(sq->vq);
/* timestamp packet in software */
skb_tx_timestamp(skb);
@@ -3287,7 +3344,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
nf_reset_ct(skb);
}
- check_sq_full_and_disable(vi, dev, sq);
+ if (use_napi)
+ tx_may_stop(vi, dev, sq);
+ else
+ check_sq_full_and_disable(vi, dev,sq);
kick = use_napi ? __netdev_tx_sent_queue(txq, skb->len, xmit_more) :
!xmit_more || netif_xmit_stopped(txq);
@@ -3299,28 +3359,81 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
+ if (use_napi && kick && unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
+ virtqueue_napi_schedule(&sq->napi, sq->vq);
+
return NETDEV_TX_OK;
}
-static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
+static void __virtnet_rx_pause(struct virtnet_info *vi,
+ struct receive_queue *rq)
{
bool running = netif_running(vi->dev);
if (running) {
- napi_disable(&rq->napi);
+ virtnet_napi_disable(rq);
virtnet_cancel_dim(vi, &rq->dim);
}
}
-static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
+static void virtnet_rx_pause_all(struct virtnet_info *vi)
+{
+ int i;
+
+ /*
+ * Make sure refill_work does not run concurrently to
+ * avoid napi_disable race which leads to deadlock.
+ */
+ disable_delayed_refill(vi);
+ cancel_delayed_work_sync(&vi->refill);
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ __virtnet_rx_pause(vi, &vi->rq[i]);
+}
+
+static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
+{
+ /*
+ * Make sure refill_work does not run concurrently to
+ * avoid napi_disable race which leads to deadlock.
+ */
+ disable_delayed_refill(vi);
+ cancel_delayed_work_sync(&vi->refill);
+ __virtnet_rx_pause(vi, rq);
+}
+
+static void __virtnet_rx_resume(struct virtnet_info *vi,
+ struct receive_queue *rq,
+ bool refill)
{
bool running = netif_running(vi->dev);
+ bool schedule_refill = false;
- if (!try_fill_recv(vi, rq, GFP_KERNEL))
+ if (refill && !try_fill_recv(vi, rq, GFP_KERNEL))
+ schedule_refill = true;
+ if (running)
+ virtnet_napi_enable(rq);
+
+ if (schedule_refill)
schedule_delayed_work(&vi->refill, 0);
+}
- if (running)
- virtnet_napi_enable(rq->vq, &rq->napi);
+static void virtnet_rx_resume_all(struct virtnet_info *vi)
+{
+ int i;
+
+ enable_delayed_refill(vi);
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ if (i < vi->curr_queue_pairs)
+ __virtnet_rx_resume(vi, &vi->rq[i], true);
+ else
+ __virtnet_rx_resume(vi, &vi->rq[i], false);
+ }
+}
+
+static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
+{
+ enable_delayed_refill(vi);
+ __virtnet_rx_resume(vi, rq, true);
}
static int virtnet_rx_resize(struct virtnet_info *vi,
@@ -3332,7 +3445,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
virtnet_rx_pause(vi, rq);
- err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
+ err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf, NULL);
if (err)
netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
@@ -3349,7 +3462,7 @@ static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq)
qindex = sq - vi->sq;
if (running)
- virtnet_napi_tx_disable(&sq->napi);
+ virtnet_napi_tx_disable(sq);
txq = netdev_get_tx_queue(vi->dev, qindex);
@@ -3383,7 +3496,7 @@ static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq)
__netif_tx_unlock_bh(txq);
if (running)
- virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
+ virtnet_napi_tx_enable(sq);
}
static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
@@ -3391,11 +3504,18 @@ static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
{
int qindex, err;
+ if (ring_num <= MAX_SKB_FRAGS + 2) {
+ netdev_err(vi->dev, "tx size (%d) cannot be smaller than %d\n",
+ ring_num, MAX_SKB_FRAGS + 2);
+ return -EINVAL;
+ }
+
qindex = sq - vi->sq;
virtnet_tx_pause(vi, sq);
- err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
+ err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf,
+ virtnet_sq_free_unused_buf_done);
if (err)
netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
@@ -3575,15 +3695,16 @@ static void virtnet_rss_update_by_qpairs(struct virtnet_info *vi, u16 queue_pair
for (; i < vi->rss_indir_table_size; ++i) {
indir_val = ethtool_rxfh_indir_default(i, queue_pairs);
- vi->rss.indirection_table[i] = indir_val;
+ vi->rss_hdr->indirection_table[i] = cpu_to_le16(indir_val);
}
- vi->rss.max_tx_vq = queue_pairs;
+ vi->rss_trailer.max_tx_vq = cpu_to_le16(queue_pairs);
}
static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
- struct virtio_net_ctrl_rss old_rss;
+ struct virtio_net_rss_config_hdr *old_rss_hdr;
+ struct virtio_net_rss_config_trailer old_rss_trailer;
struct net_device *dev = vi->dev;
struct scatterlist sg;
@@ -3598,24 +3719,28 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
* update (VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET below) and return directly.
*/
if (vi->has_rss && !netif_is_rxfh_configured(dev)) {
- memcpy(&old_rss, &vi->rss, sizeof(old_rss));
- if (rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size)) {
- vi->rss.indirection_table = old_rss.indirection_table;
+ old_rss_hdr = vi->rss_hdr;
+ old_rss_trailer = vi->rss_trailer;
+ vi->rss_hdr = devm_kzalloc(&dev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL);
+ if (!vi->rss_hdr) {
+ vi->rss_hdr = old_rss_hdr;
return -ENOMEM;
}
+ *vi->rss_hdr = *old_rss_hdr;
virtnet_rss_update_by_qpairs(vi, queue_pairs);
if (!virtnet_commit_rss_command(vi)) {
/* restore ctrl_rss if commit_rss_command failed */
- rss_indirection_table_free(&vi->rss);
- memcpy(&vi->rss, &old_rss, sizeof(old_rss));
+ devm_kfree(&dev->dev, vi->rss_hdr);
+ vi->rss_hdr = old_rss_hdr;
+ vi->rss_trailer = old_rss_trailer;
dev_warn(&dev->dev, "Fail to set num of queue pairs to %d, because committing RSS failed\n",
queue_pairs);
return -EINVAL;
}
- rss_indirection_table_free(&old_rss);
+ devm_kfree(&dev->dev, old_rss_hdr);
goto succ;
}
@@ -3635,8 +3760,10 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
succ:
vi->curr_queue_pairs = queue_pairs;
/* virtnet_open() will refill when device is going to up. */
- if (dev->flags & IFF_UP)
+ spin_lock_bh(&vi->refill_lock);
+ if (dev->flags & IFF_UP && vi->refill_enabled)
schedule_delayed_work(&vi->refill, 0);
+ spin_unlock_bh(&vi->refill_lock);
return 0;
}
@@ -3825,7 +3952,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
cpumask_var_t mask;
int stragglers;
int group_size;
- int i, j, cpu;
+ int i, start = 0, cpu;
int num_cpu;
int stride;
@@ -3839,16 +3966,18 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
stragglers = num_cpu >= vi->curr_queue_pairs ?
num_cpu % vi->curr_queue_pairs :
0;
- cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < vi->curr_queue_pairs; i++) {
group_size = stride + (i < stragglers ? 1 : 0);
- for (j = 0; j < group_size; j++) {
+ for_each_online_cpu_wrap(cpu, start) {
+ if (!group_size--) {
+ start = cpu;
+ break;
+ }
cpumask_set_cpu(cpu, mask);
- cpu = cpumask_next_wrap(cpu, cpu_online_mask,
- nr_cpu_ids, false);
}
+
virtqueue_set_affinity(vi->rq[i].vq, mask);
virtqueue_set_affinity(vi->sq[i].vq, mask);
__netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
@@ -4058,28 +4187,12 @@ static int virtnet_set_ringparam(struct net_device *dev,
static bool virtnet_commit_rss_command(struct virtnet_info *vi)
{
struct net_device *dev = vi->dev;
- struct scatterlist sgs[4];
- unsigned int sg_buf_size;
+ struct scatterlist sgs[2];
/* prepare sgs */
- sg_init_table(sgs, 4);
-
- sg_buf_size = offsetof(struct virtio_net_ctrl_rss, hash_cfg_reserved);
- sg_set_buf(&sgs[0], &vi->rss, sg_buf_size);
-
- if (vi->has_rss) {
- sg_buf_size = sizeof(uint16_t) * vi->rss_indir_table_size;
- sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size);
- } else {
- sg_set_buf(&sgs[1], &vi->rss.hash_cfg_reserved, sizeof(uint16_t));
- }
-
- sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
- - offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
- sg_set_buf(&sgs[2], &vi->rss.max_tx_vq, sg_buf_size);
-
- sg_buf_size = vi->rss_key_size;
- sg_set_buf(&sgs[3], vi->rss.key, sg_buf_size);
+ sg_init_table(sgs, 2);
+ sg_set_buf(&sgs[0], vi->rss_hdr, virtnet_rss_hdr_size(vi));
+ sg_set_buf(&sgs[1], &vi->rss_trailer, virtnet_rss_trailer_size(vi));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
@@ -4096,17 +4209,17 @@ err:
static void virtnet_init_default_rss(struct virtnet_info *vi)
{
- vi->rss.hash_types = vi->rss_hash_types_supported;
+ vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_supported);
vi->rss_hash_types_saved = vi->rss_hash_types_supported;
- vi->rss.indirection_table_mask = vi->rss_indir_table_size
- ? vi->rss_indir_table_size - 1 : 0;
- vi->rss.unclassified_queue = 0;
+ vi->rss_hdr->indirection_table_mask = vi->rss_indir_table_size
+ ? cpu_to_le16(vi->rss_indir_table_size - 1) : 0;
+ vi->rss_hdr->unclassified_queue = 0;
virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs);
- vi->rss.hash_key_length = vi->rss_key_size;
+ vi->rss_trailer.hash_key_length = vi->rss_key_size;
- netdev_rss_key_fill(vi->rss.key, vi->rss_key_size);
+ netdev_rss_key_fill(vi->rss_hash_key_data, vi->rss_key_size);
}
static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
@@ -4217,7 +4330,7 @@ static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *
if (new_hashtypes != vi->rss_hash_types_saved) {
vi->rss_hash_types_saved = new_hashtypes;
- vi->rss.hash_types = vi->rss_hash_types_saved;
+ vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved);
if (vi->dev->features & NETIF_F_RXHASH)
return virtnet_commit_rss_command(vi);
}
@@ -5397,11 +5510,11 @@ static int virtnet_get_rxfh(struct net_device *dev,
if (rxfh->indir) {
for (i = 0; i < vi->rss_indir_table_size; ++i)
- rxfh->indir[i] = vi->rss.indirection_table[i];
+ rxfh->indir[i] = le16_to_cpu(vi->rss_hdr->indirection_table[i]);
}
if (rxfh->key)
- memcpy(rxfh->key, vi->rss.key, vi->rss_key_size);
+ memcpy(rxfh->key, vi->rss_hash_key_data, vi->rss_key_size);
rxfh->hfunc = ETH_RSS_HASH_TOP;
@@ -5425,7 +5538,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
return -EOPNOTSUPP;
for (i = 0; i < vi->rss_indir_table_size; ++i)
- vi->rss.indirection_table[i] = rxfh->indir[i];
+ vi->rss_hdr->indirection_table[i] = cpu_to_le16(rxfh->indir[i]);
update = true;
}
@@ -5437,7 +5550,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
if (!vi->has_rss && !vi->has_rss_hash_report)
return -EOPNOTSUPP;
- memcpy(vi->rss.key, rxfh->key, vi->rss_key_size);
+ memcpy(vi->rss_hash_key_data, rxfh->key, vi->rss_key_size);
update = true;
}
@@ -5594,6 +5707,10 @@ static void virtnet_get_base_stats(struct net_device *dev,
if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED)
tx->hw_drop_ratelimits = 0;
+
+ netdev_stat_queue_sum(dev,
+ dev->real_num_rx_queues, vi->max_queue_pairs, rx,
+ dev->real_num_tx_queues, vi->max_queue_pairs, tx);
}
static const struct netdev_stat_ops virtnet_stat_ops = {
@@ -5614,8 +5731,11 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
netif_tx_unlock_bh(vi->dev);
- if (netif_running(vi->dev))
+ if (netif_running(vi->dev)) {
+ rtnl_lock();
virtnet_close(vi->dev);
+ rtnl_unlock();
+ }
}
static int init_vqs(struct virtnet_info *vi);
@@ -5635,7 +5755,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
enable_rx_mode_work(vi);
if (netif_running(vi->dev)) {
+ rtnl_lock();
err = virtnet_open(vi->dev);
+ rtnl_unlock();
if (err)
return err;
}
@@ -5710,7 +5832,7 @@ static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queu
virtnet_rx_pause(vi, rq);
- err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf);
+ err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf, NULL);
if (err) {
netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err);
@@ -5739,7 +5861,8 @@ static int virtnet_sq_bind_xsk_pool(struct virtnet_info *vi,
virtnet_tx_pause(vi, sq);
- err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf);
+ err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf,
+ virtnet_sq_free_unused_buf_done);
if (err) {
netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err);
pool = NULL;
@@ -5800,8 +5923,10 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
hdr_dma = virtqueue_dma_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len,
DMA_TO_DEVICE, 0);
- if (virtqueue_dma_mapping_error(sq->vq, hdr_dma))
- return -ENOMEM;
+ if (virtqueue_dma_mapping_error(sq->vq, hdr_dma)) {
+ err = -ENOMEM;
+ goto err_free_buffs;
+ }
err = xsk_pool_dma_map(pool, dma_dev, 0);
if (err)
@@ -5829,6 +5954,8 @@ err_rq:
err_xsk_map:
virtqueue_dma_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len,
DMA_TO_DEVICE, 0);
+err_free_buffs:
+ kvfree(rq->xsk_buffs);
return err;
}
@@ -5921,12 +6048,12 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (prog)
bpf_prog_add(prog, vi->max_queue_pairs - 1);
+ virtnet_rx_pause_all(vi);
+
/* Make sure NAPI is not using any XDP TX queues for RX. */
if (netif_running(dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++) {
- napi_disable(&vi->rq[i].napi);
- virtnet_napi_tx_disable(&vi->sq[i].napi);
- }
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ virtnet_napi_tx_disable(&vi->sq[i]);
}
if (!prog) {
@@ -5958,14 +6085,12 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
vi->xdp_enabled = false;
}
+ virtnet_rx_resume_all(vi);
for (i = 0; i < vi->max_queue_pairs; i++) {
if (old_prog)
bpf_prog_put(old_prog);
- if (netif_running(dev)) {
- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
- virtnet_napi_tx_enable(vi, vi->sq[i].vq,
- &vi->sq[i].napi);
- }
+ if (netif_running(dev))
+ virtnet_napi_tx_enable(&vi->sq[i]);
}
return 0;
@@ -5977,12 +6102,10 @@ err:
rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
}
+ virtnet_rx_resume_all(vi);
if (netif_running(dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++) {
- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
- virtnet_napi_tx_enable(vi, vi->sq[i].vq,
- &vi->sq[i].napi);
- }
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ virtnet_napi_tx_enable(&vi->sq[i]);
}
if (prog)
bpf_prog_sub(prog, vi->max_queue_pairs - 1);
@@ -6042,9 +6165,9 @@ static int virtnet_set_features(struct net_device *dev,
if ((dev->features ^ features) & NETIF_F_RXHASH) {
if (features & NETIF_F_RXHASH)
- vi->rss.hash_types = vi->rss_hash_types_saved;
+ vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved);
else
- vi->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
+ vi->rss_hdr->hash_types = cpu_to_le32(VIRTIO_NET_HASH_REPORT_NONE);
if (!virtnet_commit_rss_command(vi))
return -EINVAL;
@@ -6214,7 +6337,7 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
{
struct virtnet_info *vi = vq->vdev->priv;
struct send_queue *sq;
- int i = vq2rxq(vq);
+ int i = vq2txq(vq);
sq = &vi->sq[i];
@@ -6234,6 +6357,14 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
}
}
+static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq)
+{
+ struct virtnet_info *vi = vq->vdev->priv;
+ int i = vq2txq(vq);
+
+ netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i));
+}
+
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
@@ -6380,8 +6511,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
INIT_DELAYED_WORK(&vi->refill, refill_work);
for (i = 0; i < vi->max_queue_pairs; i++) {
vi->rq[i].pages = NULL;
- netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
- napi_weight);
+ netif_napi_add_config(vi->dev, &vi->rq[i].napi, virtnet_poll,
+ i);
+ vi->rq[i].napi.weight = napi_weight;
netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
virtnet_poll_tx,
napi_tx ? napi_weight : 0);
@@ -6725,9 +6857,11 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_cread16(vdev, offsetof(struct virtio_net_config,
rss_max_indirection_table_length));
}
- err = rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size);
- if (err)
+ vi->rss_hdr = devm_kzalloc(&vdev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL);
+ if (!vi->rss_hdr) {
+ err = -ENOMEM;
goto free;
+ }
if (vi->has_rss || vi->has_rss_hash_report) {
vi->rss_key_size =
@@ -6966,11 +7100,20 @@ free:
static void remove_vq_common(struct virtnet_info *vi)
{
+ int i;
+
virtio_reset_device(vi->vdev);
/* Free unused buffers in both send and recv, if any. */
free_unused_bufs(vi);
+ /*
+ * Rule of thumb is netdev_tx_reset_queue() should follow any
+ * skb freeing not followed by netdev_tx_completed_queue()
+ */
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i));
+
free_receive_bufs(vi);
free_receive_page_frags(vi);
@@ -6997,8 +7140,6 @@ static void virtnet_remove(struct virtio_device *vdev)
remove_vq_common(vi);
- rss_indirection_table_free(&vi->rss);
-
free_netdev(vi->dev);
}