diff options
| author | Paolo Abeni <pabeni@redhat.com> | 2026-01-15 12:07:49 +0300 |
|---|---|---|
| committer | Paolo Abeni <pabeni@redhat.com> | 2026-01-15 12:07:50 +0300 |
| commit | cc75d43783f74fe0a1c288aba9e6ac55f1444977 (patch) | |
| tree | 858293f26809e20f182270f851898b025ccb7746 | |
| parent | 3b194343c25084a8d2fa0c0f2c9e80f3080fd732 (diff) | |
| parent | a2cb2e23b2bcc5e376a7aa63964e04a5b059d7a1 (diff) | |
| download | linux-cc75d43783f74fe0a1c288aba9e6ac55f1444977.tar.xz | |
Merge branch 'xsk-move-cq_cached_prod_lock'
Jason Xing says:
====================
xsk: move cq_cached_prod_lock
From: Jason Xing <kernelxing@tencent.com>
Move cq_cached_prod_lock to avoid touching new cacheline.
Acked-by: Stanislav Fomichev <sdf@fomichev.me>
====================
Link: https://patch.msgid.link/20260104012125.44003-1-kerneljasonxing@gmail.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
| -rw-r--r-- | include/net/xsk_buff_pool.h | 5 | ||||
| -rw-r--r-- | net/xdp/xsk.c | 15 | ||||
| -rw-r--r-- | net/xdp/xsk_buff_pool.c | 6 | ||||
| -rw-r--r-- | net/xdp/xsk_queue.h | 5 |
4 files changed, 17 insertions, 14 deletions
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index 92a2358c6ce3..0b1abdb99c9e 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -90,11 +90,6 @@ struct xsk_buff_pool { * destructor callback. */ spinlock_t cq_prod_lock; - /* Mutual exclusion of the completion ring in the SKB mode. - * Protect: when sockets share a single cq when the same netdev - * and queue id is shared. - */ - spinlock_t cq_cached_prod_lock; struct xdp_buff_xsk *free_heads[]; }; diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index f093c3453f64..3b46bc635c43 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -543,9 +543,9 @@ static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool) { int ret; - spin_lock(&pool->cq_cached_prod_lock); + spin_lock(&pool->cq->cq_cached_prod_lock); ret = xskq_prod_reserve(pool->cq); - spin_unlock(&pool->cq_cached_prod_lock); + spin_unlock(&pool->cq->cq_cached_prod_lock); return ret; } @@ -619,9 +619,9 @@ static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool, static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n) { - spin_lock(&pool->cq_cached_prod_lock); + spin_lock(&pool->cq->cq_cached_prod_lock); xskq_prod_cancel_n(pool->cq, n); - spin_unlock(&pool->cq_cached_prod_lock); + spin_unlock(&pool->cq->cq_cached_prod_lock); } INDIRECT_CALLABLE_SCOPE @@ -1349,6 +1349,13 @@ static int xsk_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr } if (umem_xs->queue_id != qid || umem_xs->dev != dev) { + /* One fill and completion ring required for each queue id. */ + if (!xsk_validate_queues(xs)) { + err = -EINVAL; + sockfd_put(sock); + goto out_unlock; + } + /* Share the umem with another socket on another qid * and/or device. */ diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index 51526034c42a..cd5125b6af53 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -91,7 +91,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, INIT_LIST_HEAD(&pool->xsk_tx_list); spin_lock_init(&pool->xsk_tx_list_lock); spin_lock_init(&pool->cq_prod_lock); - spin_lock_init(&pool->cq_cached_prod_lock); + spin_lock_init(&xs->cq_tmp->cq_cached_prod_lock); refcount_set(&pool->users, 1); pool->fq = xs->fq_tmp; @@ -247,10 +247,6 @@ int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, u16 flags; struct xdp_umem *umem = umem_xs->umem; - /* One fill and completion ring required for each queue id. */ - if (!pool->fq || !pool->cq) - return -EINVAL; - flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY; if (umem_xs->pool->uses_need_wakeup) flags |= XDP_USE_NEED_WAKEUP; diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index 1eb8d9f8b104..ec08d9c102b1 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -46,6 +46,11 @@ struct xsk_queue { u64 invalid_descs; u64 queue_empty_descs; size_t ring_vmalloc_size; + /* Mutual exclusion of the completion ring in the SKB mode. + * Protect: when sockets share a single cq when the same netdev + * and queue id is shared. + */ + spinlock_t cq_cached_prod_lock; }; struct parsed_desc { |
