diff options
author | Vlastimil Babka <vbabka@suse.cz> | 2023-02-28 17:34:17 +0300 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2023-03-29 11:32:22 +0300 |
commit | de4d6089b9271ed172e92148a04f31578b375525 (patch) | |
tree | 1cefe5fe4bc5d3abbea14ecafe3aedb0a8fba176 /include/linux/slab.h | |
parent | d88e2a2bd20562048a263ca6a0f939c2695ffb2e (diff) | |
download | linux-de4d6089b9271ed172e92148a04f31578b375525.tar.xz |
mm/slab: remove CONFIG_SLOB code from slab common code
CONFIG_SLOB has been removed from Kconfig. Remove code and #ifdef's
specific to SLOB in the slab headers and common code.
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Acked-by: Lorenzo Stoakes <lstoakes@gmail.com>
Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r-- | include/linux/slab.h | 39 |
1 files changed, 0 insertions, 39 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 45af70315a94..7f645a4c1298 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -298,19 +298,6 @@ static inline unsigned int arch_slab_minalign(void) #endif #endif -#ifdef CONFIG_SLOB -/* - * SLOB passes all requests larger than one page to the page allocator. - * No kmalloc array is necessary since objects of different sizes can - * be allocated from the same page. - */ -#define KMALLOC_SHIFT_HIGH PAGE_SHIFT -#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) -#ifndef KMALLOC_SHIFT_LOW -#define KMALLOC_SHIFT_LOW 3 -#endif -#endif - /* Maximum allocatable size */ #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) /* Maximum size for which we actually use a slab cache */ @@ -366,7 +353,6 @@ enum kmalloc_cache_type { NR_KMALLOC_TYPES }; -#ifndef CONFIG_SLOB extern struct kmem_cache * kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1]; @@ -458,7 +444,6 @@ static __always_inline unsigned int __kmalloc_index(size_t size, } static_assert(PAGE_SHIFT <= 20); #define kmalloc_index(s) __kmalloc_index(s, true) -#endif /* !CONFIG_SLOB */ void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1); @@ -487,10 +472,6 @@ void kmem_cache_free(struct kmem_cache *s, void *objp); void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p); int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p); -/* - * Caller must not use kfree_bulk() on memory not originally allocated - * by kmalloc(), because the SLOB allocator cannot handle this. - */ static __always_inline void kfree_bulk(size_t size, void **p) { kmem_cache_free_bulk(NULL, size, p); @@ -567,7 +548,6 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align * Try really hard to succeed the allocation but fail * eventually. */ -#ifndef CONFIG_SLOB static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size) && size) { @@ -583,17 +563,7 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) } return __kmalloc(size, flags); } -#else -static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) -{ - if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE) - return kmalloc_large(size, flags); - - return __kmalloc(size, flags); -} -#endif -#ifndef CONFIG_SLOB static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node) { if (__builtin_constant_p(size) && size) { @@ -609,15 +579,6 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla } return __kmalloc_node(size, flags, node); } -#else -static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node) -{ - if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE) - return kmalloc_large_node(size, flags, node); - - return __kmalloc_node(size, flags, node); -} -#endif /** * kmalloc_array - allocate memory for an array. |