summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaciej Fijalkowski <maciej.fijalkowski@intel.com>2026-02-18 18:00:00 +0300
committerAlexei Starovoitov <ast@kernel.org>2026-02-25 04:11:58 +0300
commitf620af11c27b8ec9994a39fe968aa778112d1566 (patch)
tree70c515d18afdb27745989296243707470d7e9b7c
parent8ebfe65e22d5016c0ef2f7b5831117202493f794 (diff)
downloadlinux-f620af11c27b8ec9994a39fe968aa778112d1566.tar.xz
xsk: avoid double checking against rx queue being full
Currently non-zc xsk rx path for multi-buffer case checks twice if xsk rx queue has enough space for producing descriptors: 1. if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) { xs->rx_queue_full++; return -ENOBUFS; } 2. __xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0); -> err = xskq_prod_reserve_desc(xs->rx, addr, len, flags); -> if (xskq_prod_is_full(q)) Second part is redundant as in 1. we already peeked onto rx queue and checked that there is enough space to produce given amount of descriptors. Provide helper functions that will skip it and therefore optimize code. Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Acked-by: Stanislav Fomichev <sdf@fomichev.me> Reviewed-by: Jason Xing <kerneljasonxing@gmail.com> Link: https://lore.kernel.org/r/20260218150000.301176-1-maciej.fijalkowski@intel.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r--net/xdp/xsk.c14
-rw-r--r--net/xdp/xsk_queue.h16
2 files changed, 24 insertions, 6 deletions
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 3b46bc635c43..e6530996053b 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -160,6 +160,17 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,
return 0;
}
+static void __xsk_rcv_zc_safe(struct xdp_sock *xs, struct xdp_buff_xsk *xskb,
+ u32 len, u32 flags)
+{
+ u64 addr;
+
+ addr = xp_get_handle(xskb, xskb->pool);
+ __xskq_prod_reserve_desc(xs->rx, addr, len, flags);
+
+ xp_release(xskb);
+}
+
static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
@@ -292,7 +303,8 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
rem -= copied;
xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
- __xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0);
+ __xsk_rcv_zc_safe(xs, xskb, copied - meta_len,
+ rem ? XDP_PKT_CONTD : 0);
meta_len = 0;
} while (rem);
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index ec08d9c102b1..3e3fbb73d23e 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -445,20 +445,26 @@ static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_de
q->cached_prod = cached_prod;
}
-static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
- u64 addr, u32 len, u32 flags)
+static inline void __xskq_prod_reserve_desc(struct xsk_queue *q,
+ u64 addr, u32 len, u32 flags)
{
struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
u32 idx;
- if (xskq_prod_is_full(q))
- return -ENOBUFS;
-
/* A, matches D */
idx = q->cached_prod++ & q->ring_mask;
ring->desc[idx].addr = addr;
ring->desc[idx].len = len;
ring->desc[idx].options = flags;
+}
+
+static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
+ u64 addr, u32 len, u32 flags)
+{
+ if (xskq_prod_is_full(q))
+ return -ENOBUFS;
+
+ __xskq_prod_reserve_desc(q, addr, len, flags);
return 0;
}