summaryrefslogtreecommitdiff
path: root/include/linux/udp.h
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2025-09-22 13:42:40 +0300
committerJakub Kicinski <kuba@kernel.org>2025-09-24 02:38:39 +0300
commitb650bf0977d34c52befb31a9fa711534e11b220f (patch)
tree17c0b2af97437acc9db86dc9ad5ab42df72d3a1f /include/linux/udp.h
parentdf1526752e0cd8db11b1fd4c1be3bd47409fd3ac (diff)
downloadlinux-b650bf0977d34c52befb31a9fa711534e11b220f.tar.xz
udp: remove busylock and add per NUMA queues
busylock was protecting UDP sockets against packet floods, but unfortunately was not protecting the host itself. Under stress, many cpus could spin while acquiring the busylock, and NIC had to drop packets. Or packets would be dropped in cpu backlog if RPS/RFS were in place. This patch replaces the busylock by intermediate lockless queues. (One queue per NUMA node). This means that fewer number of cpus have to acquire the UDP receive queue lock. Most of the cpus can either: - immediately drop the packet. - or queue it in their NUMA aware lockless queue. Then one of the cpu is chosen to process this lockless queue in a batch. The batch only contains packets that were cooked on the same NUMA node, thus with very limited latency impact. Tested: DDOS targeting a victim UDP socket, on a platform with 6 NUMA nodes (Intel(R) Xeon(R) 6985P-C) Before: nstat -n ; sleep 1 ; nstat | grep Udp Udp6InDatagrams 1004179 0.0 Udp6InErrors 3117 0.0 Udp6RcvbufErrors 3117 0.0 After: nstat -n ; sleep 1 ; nstat | grep Udp Udp6InDatagrams 1116633 0.0 Udp6InErrors 14197275 0.0 Udp6RcvbufErrors 14197275 0.0 We can see this host can now proces 14.2 M more packets per second while under attack, and the victim socket can receive 11 % more packets. I used a small bpftrace program measuring time (in us) spent in __udp_enqueue_schedule_skb(). Before: @udp_enqueue_us[398]: [0] 24901 |@@@ | [1] 63512 |@@@@@@@@@ | [2, 4) 344827 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@| [4, 8) 244673 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ | [8, 16) 54022 |@@@@@@@@ | [16, 32) 222134 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ | [32, 64) 232042 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ | [64, 128) 4219 | | [128, 256) 188 | | After: @udp_enqueue_us[398]: [0] 5608855 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@| [1] 1111277 |@@@@@@@@@@ | [2, 4) 501439 |@@@@ | [4, 8) 102921 | | [8, 16) 29895 | | [16, 32) 43500 | | [32, 64) 31552 | | [64, 128) 979 | | [128, 256) 13 | | Note that the remaining bottleneck for this platform is in udp_drops_inc() because we limited struct numa_drop_counters to only two nodes so far. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Paolo Abeni <pabeni@redhat.com> Reviewed-by: Willem de Bruijn <willemb@google.com> Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com> Link: https://patch.msgid.link/20250922104240.2182559-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include/linux/udp.h')
-rw-r--r--include/linux/udp.h9
1 files changed, 8 insertions, 1 deletions
diff --git a/include/linux/udp.h b/include/linux/udp.h
index e554890c4415..58795688a186 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -44,6 +44,12 @@ enum {
UDP_FLAGS_UDPLITE_RECV_CC, /* set via udplite setsockopt */
};
+/* per NUMA structure for lockless producer usage. */
+struct udp_prod_queue {
+ struct llist_head ll_root ____cacheline_aligned_in_smp;
+ atomic_t rmem_alloc;
+};
+
struct udp_sock {
/* inet_sock has to be the first member */
struct inet_sock inet;
@@ -90,6 +96,8 @@ struct udp_sock {
struct sk_buff *skb,
int nhoff);
+ struct udp_prod_queue *udp_prod_queue;
+
/* udp_recvmsg try to use this before splicing sk_receive_queue */
struct sk_buff_head reader_queue ____cacheline_aligned_in_smp;
@@ -109,7 +117,6 @@ struct udp_sock {
*/
struct hlist_node tunnel_list;
struct numa_drop_counters drop_counters;
- spinlock_t busylock ____cacheline_aligned_in_smp;
};
#define udp_test_bit(nr, sk) \