diff options
author | Eric Dumazet <edumazet@google.com> | 2022-05-16 07:24:55 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2022-05-16 13:33:59 +0300 |
commit | 39564c3fdc6684c6726b63e131d2a9f3809811cb (patch) | |
tree | 523592ac216cfcc1bbbb560b953d2d99b8e13186 /net/core/skbuff.c | |
parent | 2db60eed1a957423cf06ee1060fc45ed3971990d (diff) | |
download | linux-39564c3fdc6684c6726b63e131d2a9f3809811cb.tar.xz |
net: add skb_defer_max sysctl
commit 68822bdf76f1 ("net: generalize skb freeing
deferral to per-cpu lists") added another per-cpu
cache of skbs. It was expected to be small,
and an IPI was forced whenever the list reached 128
skbs.
We might need to be able to control more precisely
queue capacity and added latency.
An IPI is generated whenever queue reaches half capacity.
Default value of the new limit is 64.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r-- | net/core/skbuff.c | 15 |
1 files changed, 9 insertions, 6 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b40c8cdf4785..1d10bb4adec1 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -80,6 +80,7 @@ #include <linux/user_namespace.h> #include <linux/indirect_call_wrapper.h> +#include "dev.h" #include "sock_destructor.h" struct kmem_cache *skbuff_head_cache __ro_after_init; @@ -6496,16 +6497,21 @@ void skb_attempt_defer_free(struct sk_buff *skb) int cpu = skb->alloc_cpu; struct softnet_data *sd; unsigned long flags; + unsigned int defer_max; bool kick; if (WARN_ON_ONCE(cpu >= nr_cpu_ids) || !cpu_online(cpu) || cpu == raw_smp_processor_id()) { - __kfree_skb(skb); +nodefer: __kfree_skb(skb); return; } sd = &per_cpu(softnet_data, cpu); + defer_max = READ_ONCE(sysctl_skb_defer_max); + if (READ_ONCE(sd->defer_count) >= defer_max) + goto nodefer; + /* We do not send an IPI or any signal. * Remote cpu will eventually call skb_defer_free_flush() */ @@ -6515,11 +6521,8 @@ void skb_attempt_defer_free(struct sk_buff *skb) WRITE_ONCE(sd->defer_list, skb); sd->defer_count++; - /* kick every time queue length reaches 128. - * This condition should hardly be hit under normal conditions, - * unless cpu suddenly stopped to receive NIC interrupts. - */ - kick = sd->defer_count == 128; + /* Send an IPI every time queue reaches half capacity. */ + kick = sd->defer_count == (defer_max >> 1); spin_unlock_irqrestore(&sd->defer_lock, flags); |