diff options
author | Hyeonggon Yoo <42.hyeyoo@gmail.com> | 2022-10-15 07:34:29 +0300 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2022-10-15 22:42:05 +0300 |
commit | e36ce448a08d43de69e7449eb225805a7a8addf8 (patch) | |
tree | d1b0f706f04ff2bb76f86cba4597c7e3f41a86d7 /mm | |
parent | d5eff736902d5565a24f1b571b5987b3e5ee9a5b (diff) | |
download | linux-e36ce448a08d43de69e7449eb225805a7a8addf8.tar.xz |
mm/slab: use kmalloc_node() for off slab freelist_idx_t array allocation
After commit d6a71648dbc0 ("mm/slab: kmalloc: pass requests larger than
order-1 page to page allocator"), SLAB passes large ( > PAGE_SIZE * 2)
requests to buddy like SLUB does.
SLAB has been using kmalloc caches to allocate freelist_idx_t array for
off slab caches. But after the commit, freelist_size can be bigger than
KMALLOC_MAX_CACHE_SIZE.
Instead of using pointer to kmalloc cache, use kmalloc_node() and only
check if the kmalloc cache is off slab during calculate_slab_order().
If freelist_size > KMALLOC_MAX_CACHE_SIZE, no looping condition happens
as it allocates freelist_idx_t array directly from buddy.
Link: https://lore.kernel.org/all/20221014205818.GA1428667@roeck-us.net/
Reported-and-tested-by: Guenter Roeck <linux@roeck-us.net>
Fixes: d6a71648dbc0 ("mm/slab: kmalloc: pass requests larger than order-1 page to page allocator")
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 37 |
1 files changed, 19 insertions, 18 deletions
diff --git a/mm/slab.c b/mm/slab.c index a5486ff8362a..d1f6e2c64c2e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1619,7 +1619,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slab) * although actual page can be freed in rcu context */ if (OFF_SLAB(cachep)) - kmem_cache_free(cachep->freelist_cache, freelist); + kfree(freelist); } /* @@ -1671,21 +1671,27 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, if (flags & CFLGS_OFF_SLAB) { struct kmem_cache *freelist_cache; size_t freelist_size; + size_t freelist_cache_size; freelist_size = num * sizeof(freelist_idx_t); - freelist_cache = kmalloc_slab(freelist_size, 0u); - if (!freelist_cache) - continue; - - /* - * Needed to avoid possible looping condition - * in cache_grow_begin() - */ - if (OFF_SLAB(freelist_cache)) - continue; + if (freelist_size > KMALLOC_MAX_CACHE_SIZE) { + freelist_cache_size = PAGE_SIZE << get_order(freelist_size); + } else { + freelist_cache = kmalloc_slab(freelist_size, 0u); + if (!freelist_cache) + continue; + freelist_cache_size = freelist_cache->size; + + /* + * Needed to avoid possible looping condition + * in cache_grow_begin() + */ + if (OFF_SLAB(freelist_cache)) + continue; + } /* check if off slab has enough benefit */ - if (freelist_cache->size > cachep->size / 2) + if (freelist_cache_size > cachep->size / 2) continue; } @@ -2061,11 +2067,6 @@ done: cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); #endif - if (OFF_SLAB(cachep)) { - cachep->freelist_cache = - kmalloc_slab(cachep->freelist_size, 0u); - } - err = setup_cpu_cache(cachep, gfp); if (err) { __kmem_cache_release(cachep); @@ -2292,7 +2293,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep, freelist = NULL; else if (OFF_SLAB(cachep)) { /* Slab management obj is off-slab. */ - freelist = kmem_cache_alloc_node(cachep->freelist_cache, + freelist = kmalloc_node(cachep->freelist_size, local_flags, nodeid); } else { /* We will use last bytes at the slab for freelist */ |