From c4ba69f00c18524eb89990e9afda9d2a9b54de2f Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 28 Feb 2023 10:59:54 +0100 Subject: mm, page_flags: remove PG_slob_free With SLOB removed we no longer need the PG_slob_free alias for PG_private. Also update tools/mm/page-types. Signed-off-by: Vlastimil Babka Acked-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: Lorenzo Stoakes Acked-by: Mike Rapoport (IBM) --- include/linux/page-flags.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index a7e3a3405520..2bdc41cb0594 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -174,9 +174,6 @@ enum pageflags { /* Remapped by swiotlb-xen. */ PG_xen_remapped = PG_owner_priv_1, - /* SLOB */ - PG_slob_free = PG_private, - #ifdef CONFIG_MEMORY_FAILURE /* * Compound pages. Stored in first tail page's flags. @@ -483,7 +480,6 @@ PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) PAGEFLAG(Workingset, workingset, PF_HEAD) TESTCLEARFLAG(Workingset, workingset, PF_HEAD) __PAGEFLAG(Slab, slab, PF_NO_TAIL) -__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL) PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ /* Xen */ -- cgit v1.2.3 From de4d6089b9271ed172e92148a04f31578b375525 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 28 Feb 2023 15:34:17 +0100 Subject: mm/slab: remove CONFIG_SLOB code from slab common code CONFIG_SLOB has been removed from Kconfig. Remove code and #ifdef's specific to SLOB in the slab headers and common code. Signed-off-by: Vlastimil Babka Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: Lorenzo Stoakes Acked-by: Mike Rapoport (IBM) --- include/linux/slab.h | 39 --------------------------------- mm/slab.h | 61 ---------------------------------------------------- mm/slab_common.c | 2 -- 3 files changed, 102 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 45af70315a94..7f645a4c1298 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -298,19 +298,6 @@ static inline unsigned int arch_slab_minalign(void) #endif #endif -#ifdef CONFIG_SLOB -/* - * SLOB passes all requests larger than one page to the page allocator. - * No kmalloc array is necessary since objects of different sizes can - * be allocated from the same page. - */ -#define KMALLOC_SHIFT_HIGH PAGE_SHIFT -#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) -#ifndef KMALLOC_SHIFT_LOW -#define KMALLOC_SHIFT_LOW 3 -#endif -#endif - /* Maximum allocatable size */ #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) /* Maximum size for which we actually use a slab cache */ @@ -366,7 +353,6 @@ enum kmalloc_cache_type { NR_KMALLOC_TYPES }; -#ifndef CONFIG_SLOB extern struct kmem_cache * kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1]; @@ -458,7 +444,6 @@ static __always_inline unsigned int __kmalloc_index(size_t size, } static_assert(PAGE_SHIFT <= 20); #define kmalloc_index(s) __kmalloc_index(s, true) -#endif /* !CONFIG_SLOB */ void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1); @@ -487,10 +472,6 @@ void kmem_cache_free(struct kmem_cache *s, void *objp); void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p); int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p); -/* - * Caller must not use kfree_bulk() on memory not originally allocated - * by kmalloc(), because the SLOB allocator cannot handle this. - */ static __always_inline void kfree_bulk(size_t size, void **p) { kmem_cache_free_bulk(NULL, size, p); @@ -567,7 +548,6 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align * Try really hard to succeed the allocation but fail * eventually. */ -#ifndef CONFIG_SLOB static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size) && size) { @@ -583,17 +563,7 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) } return __kmalloc(size, flags); } -#else -static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) -{ - if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE) - return kmalloc_large(size, flags); - - return __kmalloc(size, flags); -} -#endif -#ifndef CONFIG_SLOB static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node) { if (__builtin_constant_p(size) && size) { @@ -609,15 +579,6 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla } return __kmalloc_node(size, flags, node); } -#else -static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node) -{ - if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE) - return kmalloc_large_node(size, flags, node); - - return __kmalloc_node(size, flags, node); -} -#endif /** * kmalloc_array - allocate memory for an array. diff --git a/mm/slab.h b/mm/slab.h index 43966aa5fadf..399966b3ce52 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -51,14 +51,6 @@ struct slab { }; unsigned int __unused; -#elif defined(CONFIG_SLOB) - - struct list_head slab_list; - void *__unused_1; - void *freelist; /* first free block */ - long units; - unsigned int __unused_2; - #else #error "Unexpected slab allocator configured" #endif @@ -72,11 +64,7 @@ struct slab { #define SLAB_MATCH(pg, sl) \ static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl)) SLAB_MATCH(flags, __page_flags); -#ifndef CONFIG_SLOB SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */ -#else -SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */ -#endif SLAB_MATCH(_refcount, __page_refcount); #ifdef CONFIG_MEMCG SLAB_MATCH(memcg_data, memcg_data); @@ -200,31 +188,6 @@ static inline size_t slab_size(const struct slab *slab) return PAGE_SIZE << slab_order(slab); } -#ifdef CONFIG_SLOB -/* - * Common fields provided in kmem_cache by all slab allocators - * This struct is either used directly by the allocator (SLOB) - * or the allocator must include definitions for all fields - * provided in kmem_cache_common in their definition of kmem_cache. - * - * Once we can do anonymous structs (C11 standard) we could put a - * anonymous struct definition in these allocators so that the - * separate allocations in the kmem_cache structure of SLAB and - * SLUB is no longer needed. - */ -struct kmem_cache { - unsigned int object_size;/* The original size of the object */ - unsigned int size; /* The aligned/padded/added on size */ - unsigned int align; /* Alignment as calculated */ - slab_flags_t flags; /* Active flags on the slab */ - const char *name; /* Slab name for sysfs */ - int refcount; /* Use counter */ - void (*ctor)(void *); /* Called on object slot creation */ - struct list_head list; /* List of all slab caches on the system */ -}; - -#endif /* CONFIG_SLOB */ - #ifdef CONFIG_SLAB #include #endif @@ -274,7 +237,6 @@ extern const struct kmalloc_info_struct { unsigned int size; } kmalloc_info[]; -#ifndef CONFIG_SLOB /* Kmalloc array related functions */ void setup_kmalloc_cache_index_table(void); void create_kmalloc_caches(slab_flags_t); @@ -286,7 +248,6 @@ void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node, size_t orig_size, unsigned long caller); void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller); -#endif gfp_t kmalloc_fix_flags(gfp_t flags); @@ -303,33 +264,16 @@ extern void create_boot_cache(struct kmem_cache *, const char *name, int slab_unmergeable(struct kmem_cache *s); struct kmem_cache *find_mergeable(unsigned size, unsigned align, slab_flags_t flags, const char *name, void (*ctor)(void *)); -#ifndef CONFIG_SLOB struct kmem_cache * __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, slab_flags_t flags, void (*ctor)(void *)); slab_flags_t kmem_cache_flags(unsigned int object_size, slab_flags_t flags, const char *name); -#else -static inline struct kmem_cache * -__kmem_cache_alias(const char *name, unsigned int size, unsigned int align, - slab_flags_t flags, void (*ctor)(void *)) -{ return NULL; } - -static inline slab_flags_t kmem_cache_flags(unsigned int object_size, - slab_flags_t flags, const char *name) -{ - return flags; -} -#endif static inline bool is_kmalloc_cache(struct kmem_cache *s) { -#ifndef CONFIG_SLOB return (s->flags & SLAB_KMALLOC); -#else - return false; -#endif } /* Legal flag mask for kmem_cache_create(), for various configurations */ @@ -634,7 +578,6 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, } #endif /* CONFIG_MEMCG_KMEM */ -#ifndef CONFIG_SLOB static inline struct kmem_cache *virt_to_cache(const void *obj) { struct slab *slab; @@ -684,8 +627,6 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) void free_large_kmalloc(struct folio *folio, void *object); -#endif /* CONFIG_SLOB */ - size_t __ksize(const void *objp); static inline size_t slab_ksize(const struct kmem_cache *s) @@ -777,7 +718,6 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, memcg_slab_post_alloc_hook(s, objcg, flags, size, p); } -#ifndef CONFIG_SLOB /* * The slab lists for all objects. */ @@ -824,7 +764,6 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) for (__node = 0; __node < nr_node_ids; __node++) \ if ((__n = get_node(__s, __node))) -#endif #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) void dump_unreclaimable_slab(void); diff --git a/mm/slab_common.c b/mm/slab_common.c index bf4e777cfe90..1522693295f5 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -625,7 +625,6 @@ void kmem_dump_obj(void *object) EXPORT_SYMBOL_GPL(kmem_dump_obj); #endif -#ifndef CONFIG_SLOB /* Create a cache during boot when no slab services are available yet */ void __init create_boot_cache(struct kmem_cache *s, const char *name, unsigned int size, slab_flags_t flags, @@ -1079,7 +1078,6 @@ void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, return ret; } EXPORT_SYMBOL(kmalloc_node_trace); -#endif /* !CONFIG_SLOB */ gfp_t kmalloc_fix_flags(gfp_t flags) { -- cgit v1.2.3 From ae65a5211d90e54ae604012ce9cf234c48780929 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 2 Mar 2023 16:01:00 +0100 Subject: mm/slab: document kfree() as allowed for kmem_cache_alloc() objects This will make it easier to free objects in situations when they can come from either kmalloc() or kmem_cache_alloc(), and also allow kfree_rcu() for freeing objects from kmem_cache_alloc(). For the SLAB and SLUB allocators this was always possible so with SLOB gone, we can document it as supported. Signed-off-by: Vlastimil Babka Reviewed-by: Mike Rapoport (IBM) Cc: Jonathan Corbet Cc: "Paul E. McKenney" Cc: Frederic Weisbecker Cc: Neeraj Upadhyay Cc: Josh Triplett Cc: Steven Rostedt Cc: Mathieu Desnoyers Cc: Lai Jiangshan Cc: Joel Fernandes --- Documentation/core-api/memory-allocation.rst | 17 +++++++++++++---- include/linux/rcupdate.h | 6 ++++-- mm/slab_common.c | 5 +---- 3 files changed, 18 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/Documentation/core-api/memory-allocation.rst b/Documentation/core-api/memory-allocation.rst index 5954ddf6ee13..1c58d883b273 100644 --- a/Documentation/core-api/memory-allocation.rst +++ b/Documentation/core-api/memory-allocation.rst @@ -170,7 +170,16 @@ should be used if a part of the cache might be copied to the userspace. After the cache is created kmem_cache_alloc() and its convenience wrappers can allocate memory from that cache. -When the allocated memory is no longer needed it must be freed. You can -use kvfree() for the memory allocated with `kmalloc`, `vmalloc` and -`kvmalloc`. The slab caches should be freed with kmem_cache_free(). And -don't forget to destroy the cache with kmem_cache_destroy(). +When the allocated memory is no longer needed it must be freed. + +Objects allocated by `kmalloc` can be freed by `kfree` or `kvfree`. Objects +allocated by `kmem_cache_alloc` can be freed with `kmem_cache_free`, `kfree` +or `kvfree`, where the latter two might be more convenient thanks to not +needing the kmem_cache pointer. + +The same rules apply to _bulk and _rcu flavors of freeing functions. + +Memory allocated by `vmalloc` can be freed with `vfree` or `kvfree`. +Memory allocated by `kvmalloc` can be freed with `kvfree`. +Caches created by `kmem_cache_create` should be freed with +`kmem_cache_destroy` only after freeing all the allocated objects first. diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 094321c17e48..dcd2cf1e8326 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -976,8 +976,10 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * either fall back to use of call_rcu() or rearrange the structure to * position the rcu_head structure into the first 4096 bytes. * - * Note that the allowable offset might decrease in the future, for example, - * to allow something like kmem_cache_free_rcu(). + * The object to be freed can be allocated either by kmalloc() or + * kmem_cache_alloc(). + * + * Note that the allowable offset might decrease in the future. * * The BUILD_BUG_ON check must not involve any function calls, hence the * checks are done in macros here. diff --git a/mm/slab_common.c b/mm/slab_common.c index 1522693295f5..607249785c07 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -989,12 +989,9 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller); /** * kfree - free previously allocated memory - * @object: pointer returned by kmalloc. + * @object: pointer returned by kmalloc() or kmem_cache_alloc() * * If @object is NULL, no operation is performed. - * - * Don't free memory not originally allocated by kmalloc() - * or you will run into trouble. */ void kfree(const void *object) { -- cgit v1.2.3