diff options
author | Mateusz Guzik <mjguzik@gmail.com> | 2023-08-23 08:06:08 +0300 |
---|---|---|
committer | Dennis Zhou <dennis@kernel.org> | 2023-08-25 18:06:53 +0300 |
commit | c439d5e8a0deb7310b5bb4e5f2fe47c40ff5297f (patch) | |
tree | ec3fa53c5487f15036a02f6eb29d70896302baa5 /lib | |
parent | f7d77dfc91f747f64cb00884fd6d7940c3b49fca (diff) | |
download | linux-c439d5e8a0deb7310b5bb4e5f2fe47c40ff5297f.tar.xz |
pcpcntr: add group allocation/free
Allocations and frees are globally serialized on the pcpu lock (and the
CPU hotplug lock if enabled, which is the case on Debian).
At least one frequent consumer allocates 4 back-to-back counters (and
frees them in the same manner), exacerbating the problem.
While this does not fully remedy scalability issues, it is a step
towards that goal and provides immediate relief.
Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
Reviewed-by: Dennis Zhou <dennis@kernel.org>
Reviewed-by: Vegard Nossum <vegard.nossum@oracle.com>
Link: https://lore.kernel.org/r/20230823050609.2228718-2-mjguzik@gmail.com
[Dennis: reflowed a few lines]
Signed-off-by: Dennis Zhou <dennis@kernel.org>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/percpu_counter.c | 62 |
1 files changed, 43 insertions, 19 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 5004463c4f9f..9073430dc865 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -151,48 +151,72 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc) } EXPORT_SYMBOL(__percpu_counter_sum); -int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp, - struct lock_class_key *key) +int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount, + gfp_t gfp, u32 nr_counters, + struct lock_class_key *key) { unsigned long flags __maybe_unused; - - raw_spin_lock_init(&fbc->lock); - lockdep_set_class(&fbc->lock, key); - fbc->count = amount; - fbc->counters = alloc_percpu_gfp(s32, gfp); - if (!fbc->counters) + size_t counter_size; + s32 __percpu *counters; + u32 i; + + counter_size = ALIGN(sizeof(*counters), __alignof__(*counters)); + counters = __alloc_percpu_gfp(nr_counters * counter_size, + __alignof__(*counters), gfp); + if (!counters) { + fbc[0].counters = NULL; return -ENOMEM; + } - debug_percpu_counter_activate(fbc); + for (i = 0; i < nr_counters; i++) { + raw_spin_lock_init(&fbc[i].lock); + lockdep_set_class(&fbc[i].lock, key); +#ifdef CONFIG_HOTPLUG_CPU + INIT_LIST_HEAD(&fbc[i].list); +#endif + fbc[i].count = amount; + fbc[i].counters = (void *)counters + (i * counter_size); + + debug_percpu_counter_activate(&fbc[i]); + } #ifdef CONFIG_HOTPLUG_CPU - INIT_LIST_HEAD(&fbc->list); spin_lock_irqsave(&percpu_counters_lock, flags); - list_add(&fbc->list, &percpu_counters); + for (i = 0; i < nr_counters; i++) + list_add(&fbc[i].list, &percpu_counters); spin_unlock_irqrestore(&percpu_counters_lock, flags); #endif return 0; } -EXPORT_SYMBOL(__percpu_counter_init); +EXPORT_SYMBOL(__percpu_counter_init_many); -void percpu_counter_destroy(struct percpu_counter *fbc) +void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters) { unsigned long flags __maybe_unused; + u32 i; + + if (WARN_ON_ONCE(!fbc)) + return; - if (!fbc->counters) + if (!fbc[0].counters) return; - debug_percpu_counter_deactivate(fbc); + for (i = 0; i < nr_counters; i++) + debug_percpu_counter_deactivate(&fbc[i]); #ifdef CONFIG_HOTPLUG_CPU spin_lock_irqsave(&percpu_counters_lock, flags); - list_del(&fbc->list); + for (i = 0; i < nr_counters; i++) + list_del(&fbc[i].list); spin_unlock_irqrestore(&percpu_counters_lock, flags); #endif - free_percpu(fbc->counters); - fbc->counters = NULL; + + free_percpu(fbc[0].counters); + + for (i = 0; i < nr_counters; i++) + fbc[i].counters = NULL; } -EXPORT_SYMBOL(percpu_counter_destroy); +EXPORT_SYMBOL(percpu_counter_destroy_many); int percpu_counter_batch __read_mostly = 32; EXPORT_SYMBOL(percpu_counter_batch); |