summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2024-03-05 13:38:16 +0300
committerPaolo Abeni <pabeni@redhat.com>2024-03-05 13:38:17 +0300
commit6702d60d3c2eae086fbf85e09598ccf0c46b1d00 (patch)
tree0a9c5c5872b910d0152fd869daacd57b4aaf2e87 /drivers
parent0e71862a20d58b6e8d4c39de1d72c8919c4dccd1 (diff)
parentc5d3705cfd938f6ddd8fa2ff74689cc9b52c00ca (diff)
downloadlinux-6702d60d3c2eae086fbf85e09598ccf0c46b1d00.tar.xz
Merge branch 'remove-page-frag-implementation-in-vhost_net'
Yunsheng Lin says: ==================== remove page frag implementation in vhost_net Currently there are three implementations for page frag: 1. mm/page_alloc.c: net stack seems to be using it in the rx part with 'struct page_frag_cache' and the main API being page_frag_alloc_align(). 2. net/core/sock.c: net stack seems to be using it in the tx part with 'struct page_frag' and the main API being skb_page_frag_refill(). 3. drivers/vhost/net.c: vhost seems to be using it to build xdp frame, and it's implementation seems to be a mix of the above two. This patchset tries to unfiy the page frag implementation a little bit by unifying gfp bit for order 3 page allocation and replacing page frag implementation in vhost.c with the one in page_alloc.c. After this patchset, we are not only able to unify the page frag implementation a little, but also able to have about 0.5% performance boost testing by using the vhost_net_test introduced in the last patch. Before this patchset: Performance counter stats for './vhost_net_test' (10 runs): 305325.78 msec task-clock # 1.738 CPUs utilized ( +- 0.12% ) 1048668 context-switches # 3.435 K/sec ( +- 0.00% ) 11 cpu-migrations # 0.036 /sec ( +- 17.64% ) 33 page-faults # 0.108 /sec ( +- 0.49% ) 244651819491 cycles # 0.801 GHz ( +- 0.43% ) (64) 64714638024 stalled-cycles-frontend # 26.45% frontend cycles idle ( +- 2.19% ) (67) 30774313491 stalled-cycles-backend # 12.58% backend cycles idle ( +- 7.68% ) (70) 201749748680 instructions # 0.82 insn per cycle # 0.32 stalled cycles per insn ( +- 0.41% ) (66.76%) 65494787909 branches # 214.508 M/sec ( +- 0.35% ) (64) 4284111313 branch-misses # 6.54% of all branches ( +- 0.45% ) (66) 175.699 +- 0.189 seconds time elapsed ( +- 0.11% ) After this patchset: Performance counter stats for './vhost_net_test' (10 runs): 303974.38 msec task-clock # 1.739 CPUs utilized ( +- 0.14% ) 1048807 context-switches # 3.450 K/sec ( +- 0.00% ) 14 cpu-migrations # 0.046 /sec ( +- 12.86% ) 33 page-faults # 0.109 /sec ( +- 0.46% ) 251289376347 cycles # 0.827 GHz ( +- 0.32% ) (60) 67885175415 stalled-cycles-frontend # 27.01% frontend cycles idle ( +- 0.48% ) (63) 27809282600 stalled-cycles-backend # 11.07% backend cycles idle ( +- 0.36% ) (71) 195543234672 instructions # 0.78 insn per cycle # 0.35 stalled cycles per insn ( +- 0.29% ) (69.04%) 62423183552 branches # 205.357 M/sec ( +- 0.48% ) (67) 4135666632 branch-misses # 6.63% of all branches ( +- 0.63% ) (67) 174.764 +- 0.214 seconds time elapsed ( +- 0.12% ) Changelog: V6: Add timeout for poll() and simplify some logic as suggested by Jason. V5: Address the comment from jason in vhost_net_test.c and the comment about leaving out the gfp change for page frag in sock.c as suggested by Paolo. V4: Resend based on latest net-next branch. V3: 1. Add __page_frag_alloc_align() which is passed with the align mask the original function expected as suggested by Alexander. 2. Drop patch 3 in v2 suggested by Alexander. 3. Reorder patch 4 & 5 in v2 suggested by Alexander. Note that placing this gfp flags handing for order 3 page in an inline function is not considered, as we may be able to unify the page_frag and page_frag_cache handling. V2: Change 'xor'd' to 'masked off', add vhost tx testing for vhost_net_test. V1: Fix some typo, drop RFC tag and rebase on latest net-next. ==================== Link: https://lore.kernel.org/r/20240228093013.8263-1-linyunsheng@huawei.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c11
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.c17
-rw-r--r--drivers/nvme/host/tcp.c7
-rw-r--r--drivers/nvme/target/tcp.c4
-rw-r--r--drivers/vhost/net.c91
5 files changed, 33 insertions, 97 deletions
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 7b89b66adb53..166bd827a6d7 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1276,17 +1276,10 @@ static void gve_unreg_xdp_info(struct gve_priv *priv)
static void gve_drain_page_cache(struct gve_priv *priv)
{
- struct page_frag_cache *nc;
int i;
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- nc = &priv->rx[i].page_cache;
- if (nc->va) {
- __page_frag_cache_drain(virt_to_page(nc->va),
- nc->pagecnt_bias);
- nc->va = NULL;
- }
- }
+ for (i = 0; i < priv->rx_cfg.num_queues; i++)
+ page_frag_cache_drain(&priv->rx[i].page_cache);
}
static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
index d58b07e7e123..7063c78bd35f 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
@@ -286,7 +286,6 @@ mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
static void
mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
- struct page *page;
int i;
for (i = 0; i < q->n_desc; i++) {
@@ -301,19 +300,12 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
entry->buf = NULL;
}
- if (!q->cache.va)
- return;
-
- page = virt_to_page(q->cache.va);
- __page_frag_cache_drain(page, q->cache.pagecnt_bias);
- memset(&q->cache, 0, sizeof(q->cache));
+ page_frag_cache_drain(&q->cache);
}
static void
mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
- struct page *page;
-
for (;;) {
void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
@@ -323,12 +315,7 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
skb_free_frag(buf);
}
- if (!q->cache.va)
- return;
-
- page = virt_to_page(q->cache.va);
- __page_frag_cache_drain(page, q->cache.pagecnt_bias);
- memset(&q->cache, 0, sizeof(q->cache));
+ page_frag_cache_drain(&q->cache);
}
static void
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index a6d596e05602..3692b56cb58d 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1344,7 +1344,6 @@ static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
{
- struct page *page;
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
unsigned int noreclaim_flag;
@@ -1355,11 +1354,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
if (queue->hdr_digest || queue->data_digest)
nvme_tcp_free_crypto(queue);
- if (queue->pf_cache.va) {
- page = virt_to_head_page(queue->pf_cache.va);
- __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
- queue->pf_cache.va = NULL;
- }
+ page_frag_cache_drain(&queue->pf_cache);
noreclaim_flag = memalloc_noreclaim_save();
/* ->sock will be released by fput() */
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index c8655fc5aa5b..2aa5762e9f50 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1591,7 +1591,6 @@ static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
static void nvmet_tcp_release_queue_work(struct work_struct *w)
{
- struct page *page;
struct nvmet_tcp_queue *queue =
container_of(w, struct nvmet_tcp_queue, release_work);
@@ -1615,8 +1614,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
if (queue->hdr_digest || queue->data_digest)
nvmet_tcp_free_crypto(queue);
ida_free(&nvmet_tcp_queue_ida, queue->idx);
- page = virt_to_head_page(queue->pf_cache.va);
- __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
+ page_frag_cache_drain(&queue->pf_cache);
kfree(queue);
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f2ed7167c848..4b2fcb228a0a 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -141,10 +141,8 @@ struct vhost_net {
unsigned tx_zcopy_err;
/* Flush in progress. Protected by tx vq lock. */
bool tx_flush;
- /* Private page frag */
- struct page_frag page_frag;
- /* Refcount bias of page frag */
- int refcnt_bias;
+ /* Private page frag cache */
+ struct page_frag_cache pf_cache;
};
static unsigned vhost_net_zcopy_mask __read_mostly;
@@ -655,41 +653,6 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
!vhost_vq_avail_empty(vq->dev, vq);
}
-static bool vhost_net_page_frag_refill(struct vhost_net *net, unsigned int sz,
- struct page_frag *pfrag, gfp_t gfp)
-{
- if (pfrag->page) {
- if (pfrag->offset + sz <= pfrag->size)
- return true;
- __page_frag_cache_drain(pfrag->page, net->refcnt_bias);
- }
-
- pfrag->offset = 0;
- net->refcnt_bias = 0;
- if (SKB_FRAG_PAGE_ORDER) {
- /* Avoid direct reclaim but allow kswapd to wake */
- pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
- __GFP_COMP | __GFP_NOWARN |
- __GFP_NORETRY,
- SKB_FRAG_PAGE_ORDER);
- if (likely(pfrag->page)) {
- pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
- goto done;
- }
- }
- pfrag->page = alloc_page(gfp);
- if (likely(pfrag->page)) {
- pfrag->size = PAGE_SIZE;
- goto done;
- }
- return false;
-
-done:
- net->refcnt_bias = USHRT_MAX;
- page_ref_add(pfrag->page, USHRT_MAX - 1);
- return true;
-}
-
#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
@@ -699,7 +662,6 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
struct vhost_net *net = container_of(vq->dev, struct vhost_net,
dev);
struct socket *sock = vhost_vq_get_backend(vq);
- struct page_frag *alloc_frag = &net->page_frag;
struct virtio_net_hdr *gso;
struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
struct tun_xdp_hdr *hdr;
@@ -710,6 +672,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
int sock_hlen = nvq->sock_hlen;
void *buf;
int copied;
+ int ret;
if (unlikely(len < nvq->sock_hlen))
return -EFAULT;
@@ -719,18 +682,17 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
return -ENOSPC;
buflen += SKB_DATA_ALIGN(len + pad);
- alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
- if (unlikely(!vhost_net_page_frag_refill(net, buflen,
- alloc_frag, GFP_KERNEL)))
+ buf = page_frag_alloc_align(&net->pf_cache, buflen, GFP_KERNEL,
+ SMP_CACHE_BYTES);
+ if (unlikely(!buf))
return -ENOMEM;
- buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
- copied = copy_page_from_iter(alloc_frag->page,
- alloc_frag->offset +
- offsetof(struct tun_xdp_hdr, gso),
- sock_hlen, from);
- if (copied != sock_hlen)
- return -EFAULT;
+ copied = copy_from_iter(buf + offsetof(struct tun_xdp_hdr, gso),
+ sock_hlen, from);
+ if (copied != sock_hlen) {
+ ret = -EFAULT;
+ goto err;
+ }
hdr = buf;
gso = &hdr->gso;
@@ -743,27 +705,30 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
vhost16_to_cpu(vq, gso->csum_start) +
vhost16_to_cpu(vq, gso->csum_offset) + 2);
- if (vhost16_to_cpu(vq, gso->hdr_len) > len)
- return -EINVAL;
+ if (vhost16_to_cpu(vq, gso->hdr_len) > len) {
+ ret = -EINVAL;
+ goto err;
+ }
}
len -= sock_hlen;
- copied = copy_page_from_iter(alloc_frag->page,
- alloc_frag->offset + pad,
- len, from);
- if (copied != len)
- return -EFAULT;
+ copied = copy_from_iter(buf + pad, len, from);
+ if (copied != len) {
+ ret = -EFAULT;
+ goto err;
+ }
xdp_init_buff(xdp, buflen, NULL);
xdp_prepare_buff(xdp, buf, pad, len, true);
hdr->buflen = buflen;
- --net->refcnt_bias;
- alloc_frag->offset += buflen;
-
++nvq->batched_xdp;
return 0;
+
+err:
+ page_frag_free(buf);
+ return ret;
}
static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
@@ -1353,8 +1318,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
vqs[VHOST_NET_VQ_RX]);
f->private_data = n;
- n->page_frag.page = NULL;
- n->refcnt_bias = 0;
+ n->pf_cache.va = NULL;
return 0;
}
@@ -1422,8 +1386,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
kfree(n->vqs[VHOST_NET_VQ_TX].xdp);
kfree(n->dev.vqs);
- if (n->page_frag.page)
- __page_frag_cache_drain(n->page_frag.page, n->refcnt_bias);
+ page_frag_cache_drain(&n->pf_cache);
kvfree(n);
return 0;
}