summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristian Brauner <brauner@kernel.org>2024-09-05 10:56:52 +0300
committerVlastimil Babka <vbabka@suse.cz>2024-09-10 12:42:58 +0300
commitdacf472bcdfa4f3b08cd2c1beef034e3e5ab4bd0 (patch)
tree0425e7649b81ccff07e68a625c940ada57e0ed2b /mm/slub.c
parent3dbe2bad5785e4a9f62d99186e7d31410a8f1e2d (diff)
downloadlinux-dacf472bcdfa4f3b08cd2c1beef034e3e5ab4bd0.tar.xz
slab: remove rcu_freeptr_offset from struct kmem_cache
Pass down struct kmem_cache_args to calculate_sizes() so we can use args->{use}_freeptr_offset directly. This allows us to remove ->rcu_freeptr_offset from struct kmem_cache. Reviewed-by: Kees Cook <kees@kernel.org> Reviewed-by: Jens Axboe <axboe@kernel.dk> Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Christian Brauner <brauner@kernel.org> Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c25
1 files changed, 7 insertions, 18 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 4719b60215b8..a23c7036cd61 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3916,8 +3916,7 @@ static void *__slab_alloc_node(struct kmem_cache *s,
* If the object has been wiped upon free, make sure it's fully initialized by
* zeroing out freelist pointer.
*
- * Note that we also wipe custom freelist pointers specified via
- * s->rcu_freeptr_offset.
+ * Note that we also wipe custom freelist pointers.
*/
static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
void *obj)
@@ -5141,17 +5140,11 @@ static void set_cpu_partial(struct kmem_cache *s)
#endif
}
-/* Was a valid freeptr offset requested? */
-static inline bool has_freeptr_offset(const struct kmem_cache *s)
-{
- return s->rcu_freeptr_offset != UINT_MAX;
-}
-
/*
* calculate_sizes() determines the order and the distribution of data within
* a slab object.
*/
-static int calculate_sizes(struct kmem_cache *s)
+static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
{
slab_flags_t flags = s->flags;
unsigned int size = s->object_size;
@@ -5192,7 +5185,7 @@ static int calculate_sizes(struct kmem_cache *s)
*/
s->inuse = size;
- if (((flags & SLAB_TYPESAFE_BY_RCU) && !has_freeptr_offset(s)) ||
+ if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) ||
(flags & SLAB_POISON) || s->ctor ||
((flags & SLAB_RED_ZONE) &&
(s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
@@ -5214,8 +5207,8 @@ static int calculate_sizes(struct kmem_cache *s)
*/
s->offset = size;
size += sizeof(void *);
- } else if ((flags & SLAB_TYPESAFE_BY_RCU) && has_freeptr_offset(s)) {
- s->offset = s->rcu_freeptr_offset;
+ } else if ((flags & SLAB_TYPESAFE_BY_RCU) && args->use_freeptr_offset) {
+ s->offset = args->freeptr_offset;
} else {
/*
* Store freelist pointer near middle of object to keep
@@ -5856,10 +5849,6 @@ int do_kmem_cache_create(struct kmem_cache *s, const char *name,
#ifdef CONFIG_SLAB_FREELIST_HARDENED
s->random = get_random_long();
#endif
- if (args->use_freeptr_offset)
- s->rcu_freeptr_offset = args->freeptr_offset;
- else
- s->rcu_freeptr_offset = UINT_MAX;
s->align = args->align;
s->ctor = args->ctor;
#ifdef CONFIG_HARDENED_USERCOPY
@@ -5867,7 +5856,7 @@ int do_kmem_cache_create(struct kmem_cache *s, const char *name,
s->usersize = args->usersize;
#endif
- if (!calculate_sizes(s))
+ if (!calculate_sizes(args, s))
goto out;
if (disable_higher_order_debug) {
/*
@@ -5877,7 +5866,7 @@ int do_kmem_cache_create(struct kmem_cache *s, const char *name,
if (get_order(s->size) > get_order(s->object_size)) {
s->flags &= ~DEBUG_METADATA_FLAGS;
s->offset = 0;
- if (!calculate_sizes(s))
+ if (!calculate_sizes(args, s))
goto out;
}
}