diff options
author | Hyeonggon Yoo <42.hyeyoo@gmail.com> | 2022-08-17 13:18:18 +0300 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2022-08-24 17:11:41 +0300 |
commit | c4cab557521a73bd803e5c6f613b4e00bd3c4662 (patch) | |
tree | 6952eec31b0c7ce64413b421d574b84032727097 /mm/slab_common.c | |
parent | bf37d791022ecfb1279ac88c5448a53f1ae40a59 (diff) | |
download | linux-c4cab557521a73bd803e5c6f613b4e00bd3c4662.tar.xz |
mm/slab_common: cleanup kmalloc_large()
Now that kmalloc_large() and kmalloc_large_node() do mostly same job,
make kmalloc_large() wrapper of kmalloc_large_node_notrace().
In the meantime, add missing flag fix code in
kmalloc_large_node_notrace().
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r-- | mm/slab_common.c | 35 |
1 files changed, 13 insertions, 22 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index 7a0942d54424..51ccd0545816 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -905,28 +905,6 @@ gfp_t kmalloc_fix_flags(gfp_t flags) * directly to the page allocator. We use __GFP_COMP, because we will need to * know the allocation order to free the pages properly in kfree. */ -void *kmalloc_large(size_t size, gfp_t flags) -{ - void *ret = NULL; - struct page *page; - unsigned int order = get_order(size); - - if (unlikely(flags & GFP_SLAB_BUG_MASK)) - flags = kmalloc_fix_flags(flags); - - page = alloc_pages(flags | __GFP_COMP, order); - if (likely(page)) { - ret = page_address(page); - mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, - PAGE_SIZE << order); - } - ret = kasan_kmalloc_large(ret, size, flags); - /* As ret might get tagged, call kmemleak hook after KASAN. */ - kmemleak_alloc(ret, size, 1, flags); - trace_kmalloc(_RET_IP_, ret, NULL, size, PAGE_SIZE << order, flags); - return ret; -} -EXPORT_SYMBOL(kmalloc_large); void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node) { @@ -934,6 +912,9 @@ void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node) void *ptr = NULL; unsigned int order = get_order(size); + if (unlikely(flags & GFP_SLAB_BUG_MASK)) + flags = kmalloc_fix_flags(flags); + flags |= __GFP_COMP; page = alloc_pages_node(node, flags, order); if (page) { @@ -949,6 +930,16 @@ void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node) return ptr; } +void *kmalloc_large(size_t size, gfp_t flags) +{ + void *ret = kmalloc_large_node_notrace(size, flags, NUMA_NO_NODE); + + trace_kmalloc(_RET_IP_, ret, NULL, size, + PAGE_SIZE << get_order(size), flags); + return ret; +} +EXPORT_SYMBOL(kmalloc_large); + void *kmalloc_large_node(size_t size, gfp_t flags, int node) { void *ret = kmalloc_large_node_notrace(size, flags, node); |