diff options
author | Vlastimil Babka <vbabka@suse.cz> | 2023-08-07 21:50:44 +0300 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2023-12-07 14:41:48 +0300 |
commit | 6f3dd2c31d7d703a814c59f60daf95c57fa6a4c2 (patch) | |
tree | 38f953c4cdee4ab3059709ec3ca72754d8838d54 /mm/slub.c | |
parent | ecf9a253ce120082ce0a8aff806c4de4865cfcc5 (diff) | |
download | linux-6f3dd2c31d7d703a814c59f60daf95c57fa6a4c2.tar.xz |
mm/slub: fix bulk alloc and free stats
The SLUB sysfs stats enabled CONFIG_SLUB_STATS have two deficiencies
identified wrt bulk alloc/free operations:
- Bulk allocations from cpu freelist are not counted. Add the
ALLOC_FASTPATH counter there.
- Bulk fastpath freeing will count a list of multiple objects with a
single FREE_FASTPATH inc. Add a stat_add() variant to count them all.
Reviewed-by: Chengming Zhou <zhouchengming@bytedance.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 11 |
1 files changed, 10 insertions, 1 deletions
diff --git a/mm/slub.c b/mm/slub.c index 3f8b95757106..d7b0ca6012e0 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -396,6 +396,14 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si) #endif } +static inline +void stat_add(const struct kmem_cache *s, enum stat_item si, int v) +{ +#ifdef CONFIG_SLUB_STATS + raw_cpu_add(s->cpu_slab->stat[si], v); +#endif +} + /* * The slab lists for all objects. */ @@ -4268,7 +4276,7 @@ redo: local_unlock(&s->cpu_slab->lock); } - stat(s, FREE_FASTPATH); + stat_add(s, FREE_FASTPATH, cnt); } #else /* CONFIG_SLUB_TINY */ static void do_slab_free(struct kmem_cache *s, @@ -4545,6 +4553,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, c->freelist = get_freepointer(s, object); p[i] = object; maybe_wipe_obj_freeptr(s, p[i]); + stat(s, ALLOC_FASTPATH); } c->tid = next_tid(c->tid); local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); |