From 1934b212615dc617ac84fc306333ab2b9fc3b04f Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Fri, 9 Aug 2024 18:00:01 +0200 Subject: file: reclaim 24 bytes from f_owner We do embedd struct fown_struct into struct file letting it take up 32 bytes in total. We could tweak struct fown_struct to be more compact but really it shouldn't even be embedded in struct file in the first place. Instead, actual users of struct fown_struct should allocate the struct on demand. This frees up 24 bytes in struct file. That will have some potentially user-visible changes for the ownership fcntl()s. Some of them can now fail due to allocation failures. Practically, that probably will almost never happen as the allocations are small and they only happen once per file. The fown_struct is used during kill_fasync() which is used by e.g., pipes to generate a SIGIO signal. Sending of such signals is conditional on userspace having set an owner for the file using one of the F_OWNER fcntl()s. Such users will be unaffected if struct fown_struct is allocated during the fcntl() call. There are a few subsystems that call __f_setown() expecting file->f_owner to be allocated: (1) tun devices file->f_op->fasync::tun_chr_fasync() -> __f_setown() There are no callers of tun_chr_fasync(). (2) tty devices file->f_op->fasync::tty_fasync() -> __tty_fasync() -> __f_setown() tty_fasync() has no additional callers but __tty_fasync() has. Note that __tty_fasync() only calls __f_setown() if the @on argument is true. It's called from: file->f_op->release::tty_release() -> tty_release() -> __tty_fasync() -> __f_setown() tty_release() calls __tty_fasync() with @on false => __f_setown() is never called from tty_release(). => All callers of tty_release() are safe as well. file->f_op->release::tty_open() -> tty_release() -> __tty_fasync() -> __f_setown() __tty_hangup() calls __tty_fasync() with @on false => __f_setown() is never called from tty_release(). => All callers of __tty_hangup() are safe as well. From the callchains it's obvious that (1) and (2) end up getting called via file->f_op->fasync(). That can happen either through the F_SETFL fcntl() with the FASYNC flag raised or via the FIOASYNC ioctl(). If FASYNC is requested and the file isn't already FASYNC then file->f_op->fasync() is called with @on true which ends up causing both (1) and (2) to call __f_setown(). (1) and (2) are the only subsystems that call __f_setown() from the file->f_op->fasync() handler. So both (1) and (2) have been updated to allocate a struct fown_struct prior to calling fasync_helper() to register with the fasync infrastructure. That's safe as they both call fasync_helper() which also does allocations if @on is true. The other interesting case are file leases: (3) file leases lease_manager_ops->lm_setup::lease_setup() -> __f_setown() Which in turn is called from: generic_add_lease() -> lease_manager_ops->lm_setup::lease_setup() -> __f_setown() So here again we can simply make generic_add_lease() allocate struct fown_struct prior to the lease_manager_ops->lm_setup::lease_setup() which happens under a spinlock. With that the two remaining subsystems that call __f_setown() are: (4) dnotify (5) sockets Both have their own custom ioctls to set struct fown_struct and both have been converted to allocate a struct fown_struct on demand from their respective ioctls. Interactions with O_PATH are fine as well e.g., when opening a /dev/tty as O_PATH then no file->f_op->open() happens thus no file->f_owner is allocated. That's fine as no file operation will be set for those and the device has never been opened. fcntl()s called on such things will just allocate a ->f_owner on demand. Although I have zero idea why'd you care about f_owner on an O_PATH fd. Link: https://lore.kernel.org/r/20240813-work-f_owner-v2-1-4e9343a79f9f@kernel.org Reviewed-by: Jeff Layton Signed-off-by: Christian Brauner --- include/linux/fs.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index fb0426f349fc..7af239ca87e2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -947,6 +947,7 @@ static inline unsigned imajor(const struct inode *inode) } struct fown_struct { + struct file *file; /* backpointer for security modules */ rwlock_t lock; /* protects pid, uid, euid fields */ struct pid *pid; /* pid or -pgrp where SIGIO should be sent */ enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */ @@ -1011,7 +1012,7 @@ struct file { struct mutex f_pos_lock; loff_t f_pos; unsigned int f_flags; - struct fown_struct f_owner; + struct fown_struct *f_owner; const struct cred *f_cred; struct file_ra_state f_ra; struct path f_path; @@ -1076,6 +1077,12 @@ struct file_lease; #define OFFT_OFFSET_MAX type_max(off_t) #endif +int file_f_owner_allocate(struct file *file); +static inline struct fown_struct *file_f_owner(const struct file *file) +{ + return READ_ONCE(file->f_owner); +} + extern void send_sigio(struct fown_struct *fown, int fd, int band); static inline struct inode *file_inode(const struct file *f) @@ -1124,7 +1131,7 @@ extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force extern int f_setown(struct file *filp, int who, int force); extern void f_delown(struct file *filp); extern pid_t f_getown(struct file *filp); -extern int send_sigurg(struct fown_struct *fown); +extern int send_sigurg(struct file *file); /* * sb->s_flags. Note that these mirror the equivalent MS_* flags where -- cgit v1.2.3 From a55d1cbd1720679cfe9837bce250e397ec513989 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Thu, 22 Aug 2024 16:14:46 +0200 Subject: fs: switch f_iocb_flags and f_ra Now that we shrank struct file by 24 bytes we still have a 4 byte hole. If we move struct file_ra_state into the union and f_iocb_flags out of the union we close that whole and bring down struct file to 192 bytes. Which means struct file is 3 cachelines and we managed to shrink it by 40 bytes this cycle. I've tried to audit all codepaths that use f_ra and none of them seem to rely on it in file->f_op->release() and never have since commit 1da177e4c3f4 ("Linux-2.6.12-rc2"). Link: https://lore.kernel.org/r/20240823-luftdicht-berappen-d69a2166a0db@brauner Reviewed-by: Jeff Layton Reviewed-by: Jens Axboe Signed-off-by: Christian Brauner --- include/linux/fs.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index 7af239ca87e2..095a956aeb29 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -999,9 +999,9 @@ struct file { struct callback_head f_task_work; /* fput() must use workqueue (most kernel threads). */ struct llist_node f_llist; - unsigned int f_iocb_flags; + /* Invalid after last fput(). */ + struct file_ra_state f_ra; }; - /* * Protects f_ep, f_flags. * Must not be taken from IRQ context. @@ -1012,9 +1012,9 @@ struct file { struct mutex f_pos_lock; loff_t f_pos; unsigned int f_flags; + unsigned int f_iocb_flags; struct fown_struct *f_owner; const struct cred *f_cred; - struct file_ra_state f_ra; struct path f_path; struct inode *f_inode; /* cached value */ const struct file_operations *f_op; -- cgit v1.2.3 From c0390d541128e8820af8177a572d9d87ff68a3bb Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Fri, 23 Aug 2024 21:06:58 +0200 Subject: fs: pack struct file Now that we shrunk struct file to 192 bytes aka 3 cachelines reorder struct file to not leave any holes or have members cross cachelines. Add a short comment to each of the fields and mark the cachelines. It's possible that we may have to tweak this based on profiling in the future. So far I had Jens test this comparing io_uring with non-fixed and fixed files and it improved performance. The layout is a combination of Jens' and my changes. Link: https: //lore.kernel.org/r/20240824-peinigen-hocken-7384b977c643@brauner Signed-off-by: Christian Brauner --- include/linux/fs.h | 91 ++++++++++++++++++++++++++++++------------------------ 1 file changed, 51 insertions(+), 40 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index 095a956aeb29..af8bbd4eeb3a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -987,52 +987,63 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) index < ra->start + ra->size); } -/* - * f_{lock,count,pos_lock} members can be highly contended and share - * the same cacheline. f_{lock,mode} are very frequently used together - * and so share the same cacheline as well. The read-mostly - * f_{path,inode,op} are kept on a separate cacheline. +/** + * struct file - Represents a file + * @f_count: reference count + * @f_lock: Protects f_ep, f_flags. Must not be taken from IRQ context. + * @f_mode: FMODE_* flags often used in hotpaths + * @f_op: file operations + * @f_mapping: Contents of a cacheable, mappable object. + * @private_data: filesystem or driver specific data + * @f_inode: cached inode + * @f_flags: file flags + * @f_iocb_flags: iocb flags + * @f_cred: stashed credentials of creator/opener + * @f_path: path of the file + * @f_pos_lock: lock protecting file position + * @f_pos: file position + * @f_version: file version + * @f_security: LSM security context of this file + * @f_owner: file owner + * @f_wb_err: writeback error + * @f_sb_err: per sb writeback errors + * @f_ep: link of all epoll hooks for this file + * @f_task_work: task work entry point + * @f_llist: work queue entrypoint + * @f_ra: file's readahead state */ struct file { - union { - /* fput() uses task work when closing and freeing file (default). */ - struct callback_head f_task_work; - /* fput() must use workqueue (most kernel threads). */ - struct llist_node f_llist; - /* Invalid after last fput(). */ - struct file_ra_state f_ra; - }; - /* - * Protects f_ep, f_flags. - * Must not be taken from IRQ context. - */ - spinlock_t f_lock; - fmode_t f_mode; - atomic_long_t f_count; - struct mutex f_pos_lock; - loff_t f_pos; - unsigned int f_flags; - unsigned int f_iocb_flags; - struct fown_struct *f_owner; - const struct cred *f_cred; - struct path f_path; - struct inode *f_inode; /* cached value */ + atomic_long_t f_count; + spinlock_t f_lock; + fmode_t f_mode; const struct file_operations *f_op; - - u64 f_version; + struct address_space *f_mapping; + void *private_data; + struct inode *f_inode; + unsigned int f_flags; + unsigned int f_iocb_flags; + const struct cred *f_cred; + /* --- cacheline 1 boundary (64 bytes) --- */ + struct path f_path; + struct mutex f_pos_lock; + loff_t f_pos; + u64 f_version; + /* --- cacheline 2 boundary (128 bytes) --- */ #ifdef CONFIG_SECURITY - void *f_security; + void *f_security; #endif - /* needed for tty driver, and maybe others */ - void *private_data; - + struct fown_struct *f_owner; + errseq_t f_wb_err; + errseq_t f_sb_err; #ifdef CONFIG_EPOLL - /* Used by fs/eventpoll.c to link all the hooks to this file */ - struct hlist_head *f_ep; -#endif /* #ifdef CONFIG_EPOLL */ - struct address_space *f_mapping; - errseq_t f_wb_err; - errseq_t f_sb_err; /* for syncfs */ + struct hlist_head *f_ep; +#endif + union { + struct callback_head f_task_work; + struct llist_node f_llist; + struct file_ra_state f_ra; + }; + /* --- cacheline 3 boundary (192 bytes) --- */ } __randomize_layout __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */ -- cgit v1.2.3 From d345bd2e9834e2da505977e154a1c179c793b7b2 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Wed, 28 Aug 2024 12:56:24 +0200 Subject: mm: add kmem_cache_create_rcu() When a kmem cache is created with SLAB_TYPESAFE_BY_RCU the free pointer must be located outside of the object because we don't know what part of the memory can safely be overwritten as it may be needed to prevent object recycling. That has the consequence that SLAB_TYPESAFE_BY_RCU may end up adding a new cacheline. This is the case for e.g., struct file. After having it shrunk down by 40 bytes and having it fit in three cachelines we still have SLAB_TYPESAFE_BY_RCU adding a fourth cacheline because it needs to accommodate the free pointer. Add a new kmem_cache_create_rcu() function that allows the caller to specify an offset where the free pointer is supposed to be placed. Link: https://lore.kernel.org/r/20240828-work-kmem_cache-rcu-v3-2-5460bc1f09f6@kernel.org Acked-by: Mike Rapoport (Microsoft) Reviewed-by: Vlastimil Babka Signed-off-by: Christian Brauner --- include/linux/slab.h | 9 ++++ mm/slab.h | 2 + mm/slab_common.c | 136 ++++++++++++++++++++++++++++++++++++--------------- mm/slub.c | 20 +++++--- 4 files changed, 121 insertions(+), 46 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index eb2bf4629157..5b2da2cf31a8 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -212,6 +212,12 @@ enum _slab_flag_bits { #define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED #endif +/* + * freeptr_t represents a SLUB freelist pointer, which might be encoded + * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled. + */ +typedef struct { unsigned long v; } freeptr_t; + /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. * @@ -242,6 +248,9 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name, slab_flags_t flags, unsigned int useroffset, unsigned int usersize, void (*ctor)(void *)); +struct kmem_cache *kmem_cache_create_rcu(const char *name, unsigned int size, + unsigned int freeptr_offset, + slab_flags_t flags); void kmem_cache_destroy(struct kmem_cache *s); int kmem_cache_shrink(struct kmem_cache *s); diff --git a/mm/slab.h b/mm/slab.h index dcdb56b8e7f5..a6051385186e 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -261,6 +261,8 @@ struct kmem_cache { unsigned int object_size; /* Object size without metadata */ struct reciprocal_value reciprocal_size; unsigned int offset; /* Free pointer offset */ + /* Specific free pointer requested (if not UINT_MAX) */ + unsigned int rcu_freeptr_offset; #ifdef CONFIG_SLUB_CPU_PARTIAL /* Number of per cpu partial objects to keep around */ unsigned int cpu_partial; diff --git a/mm/slab_common.c b/mm/slab_common.c index c8dd7e08c5f6..887f6b9855dd 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -202,9 +202,10 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align, } static struct kmem_cache *create_cache(const char *name, - unsigned int object_size, unsigned int align, - slab_flags_t flags, unsigned int useroffset, - unsigned int usersize, void (*ctor)(void *)) + unsigned int object_size, unsigned int freeptr_offset, + unsigned int align, slab_flags_t flags, + unsigned int useroffset, unsigned int usersize, + void (*ctor)(void *)) { struct kmem_cache *s; int err; @@ -212,6 +213,13 @@ static struct kmem_cache *create_cache(const char *name, if (WARN_ON(useroffset + usersize > object_size)) useroffset = usersize = 0; + /* If a custom freelist pointer is requested make sure it's sane. */ + err = -EINVAL; + if (freeptr_offset != UINT_MAX && + (freeptr_offset >= object_size || !(flags & SLAB_TYPESAFE_BY_RCU) || + !IS_ALIGNED(freeptr_offset, sizeof(freeptr_t)))) + goto out; + err = -ENOMEM; s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); if (!s) @@ -219,13 +227,13 @@ static struct kmem_cache *create_cache(const char *name, s->name = name; s->size = s->object_size = object_size; + s->rcu_freeptr_offset = freeptr_offset; s->align = align; s->ctor = ctor; #ifdef CONFIG_HARDENED_USERCOPY s->useroffset = useroffset; s->usersize = usersize; #endif - err = __kmem_cache_create(s, flags); if (err) goto out_free_cache; @@ -240,38 +248,10 @@ out: return ERR_PTR(err); } -/** - * kmem_cache_create_usercopy - Create a cache with a region suitable - * for copying to userspace - * @name: A string which is used in /proc/slabinfo to identify this cache. - * @size: The size of objects to be created in this cache. - * @align: The required alignment for the objects. - * @flags: SLAB flags - * @useroffset: Usercopy region offset - * @usersize: Usercopy region size - * @ctor: A constructor for the objects. - * - * Cannot be called within a interrupt, but can be interrupted. - * The @ctor is run when new pages are allocated by the cache. - * - * The flags are - * - * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) - * to catch references to uninitialised memory. - * - * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check - * for buffer overruns. - * - * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware - * cacheline. This can be beneficial if you're counting cycles as closely - * as davem. - * - * Return: a pointer to the cache on success, NULL on failure. - */ -struct kmem_cache * -kmem_cache_create_usercopy(const char *name, - unsigned int size, unsigned int align, - slab_flags_t flags, +static struct kmem_cache * +do_kmem_cache_create_usercopy(const char *name, + unsigned int size, unsigned int freeptr_offset, + unsigned int align, slab_flags_t flags, unsigned int useroffset, unsigned int usersize, void (*ctor)(void *)) { @@ -331,7 +311,7 @@ kmem_cache_create_usercopy(const char *name, goto out_unlock; } - s = create_cache(cache_name, size, + s = create_cache(cache_name, size, freeptr_offset, calculate_alignment(flags, align, size), flags, useroffset, usersize, ctor); if (IS_ERR(s)) { @@ -355,6 +335,45 @@ out_unlock: } return s; } + +/** + * kmem_cache_create_usercopy - Create a cache with a region suitable + * for copying to userspace + * @name: A string which is used in /proc/slabinfo to identify this cache. + * @size: The size of objects to be created in this cache. + * @freeptr_offset: Custom offset for the free pointer in RCU caches + * @align: The required alignment for the objects. + * @flags: SLAB flags + * @useroffset: Usercopy region offset + * @usersize: Usercopy region size + * @ctor: A constructor for the objects. + * + * Cannot be called within a interrupt, but can be interrupted. + * The @ctor is run when new pages are allocated by the cache. + * + * The flags are + * + * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) + * to catch references to uninitialised memory. + * + * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check + * for buffer overruns. + * + * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware + * cacheline. This can be beneficial if you're counting cycles as closely + * as davem. + * + * Return: a pointer to the cache on success, NULL on failure. + */ +struct kmem_cache * +kmem_cache_create_usercopy(const char *name, unsigned int size, + unsigned int align, slab_flags_t flags, + unsigned int useroffset, unsigned int usersize, + void (*ctor)(void *)) +{ + return do_kmem_cache_create_usercopy(name, size, UINT_MAX, align, flags, + useroffset, usersize, ctor); +} EXPORT_SYMBOL(kmem_cache_create_usercopy); /** @@ -386,11 +405,50 @@ struct kmem_cache * kmem_cache_create(const char *name, unsigned int size, unsigned int align, slab_flags_t flags, void (*ctor)(void *)) { - return kmem_cache_create_usercopy(name, size, align, flags, 0, 0, - ctor); + return do_kmem_cache_create_usercopy(name, size, UINT_MAX, align, flags, + 0, 0, ctor); } EXPORT_SYMBOL(kmem_cache_create); +/** + * kmem_cache_create_rcu - Create a SLAB_TYPESAFE_BY_RCU cache. + * @name: A string which is used in /proc/slabinfo to identify this cache. + * @size: The size of objects to be created in this cache. + * @freeptr_offset: The offset into the memory to the free pointer + * @flags: SLAB flags + * + * Cannot be called within an interrupt, but can be interrupted. + * + * See kmem_cache_create() for an explanation of possible @flags. + * + * By default SLAB_TYPESAFE_BY_RCU caches place the free pointer outside + * of the object. This might cause the object to grow in size. Callers + * that have a reason to avoid this can specify a custom free pointer + * offset in their struct where the free pointer will be placed. + * + * Note that placing the free pointer inside the object requires the + * caller to ensure that no fields are invalidated that are required to + * guard against object recycling (See SLAB_TYPESAFE_BY_RCU for + * details.). + * + * Using zero as a value for @freeptr_offset is valid. To request no + * offset UINT_MAX must be specified. + * + * Note that @ctor isn't supported with custom free pointers as a @ctor + * requires an external free pointer. + * + * Return: a pointer to the cache on success, NULL on failure. + */ +struct kmem_cache *kmem_cache_create_rcu(const char *name, unsigned int size, + unsigned int freeptr_offset, + slab_flags_t flags) +{ + return do_kmem_cache_create_usercopy(name, size, freeptr_offset, 0, + flags | SLAB_TYPESAFE_BY_RCU, 0, 0, + NULL); +} +EXPORT_SYMBOL(kmem_cache_create_rcu); + static struct kmem_cache *kmem_buckets_cache __ro_after_init; /** diff --git a/mm/slub.c b/mm/slub.c index c9d8a2497fd6..9aa5da1e8e27 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -465,12 +465,6 @@ static struct workqueue_struct *flushwq; * Core slab cache functions *******************************************************************/ -/* - * freeptr_t represents a SLUB freelist pointer, which might be encoded - * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled. - */ -typedef struct { unsigned long v; } freeptr_t; - /* * Returns freelist pointer (ptr). With hardening, this is obfuscated * with an XOR of the address where the pointer is held and a per-cache @@ -3921,6 +3915,9 @@ static void *__slab_alloc_node(struct kmem_cache *s, /* * If the object has been wiped upon free, make sure it's fully initialized by * zeroing out freelist pointer. + * + * Note that we also wipe custom freelist pointers specified via + * s->rcu_freeptr_offset. */ static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, void *obj) @@ -5144,6 +5141,12 @@ static void set_cpu_partial(struct kmem_cache *s) #endif } +/* Was a valid freeptr offset requested? */ +static inline bool has_freeptr_offset(const struct kmem_cache *s) +{ + return s->rcu_freeptr_offset != UINT_MAX; +} + /* * calculate_sizes() determines the order and the distribution of data within * a slab object. @@ -5189,7 +5192,8 @@ static int calculate_sizes(struct kmem_cache *s) */ s->inuse = size; - if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || s->ctor || + if (((flags & SLAB_TYPESAFE_BY_RCU) && !has_freeptr_offset(s)) || + (flags & SLAB_POISON) || s->ctor || ((flags & SLAB_RED_ZONE) && (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) { /* @@ -5210,6 +5214,8 @@ static int calculate_sizes(struct kmem_cache *s) */ s->offset = size; size += sizeof(void *); + } else if ((flags & SLAB_TYPESAFE_BY_RCU) && has_freeptr_offset(s)) { + s->offset = s->rcu_freeptr_offset; } else { /* * Store freelist pointer near middle of object to keep -- cgit v1.2.3 From ea566e18b4deea6e998088de4f1a76d1f39c8d3f Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Wed, 28 Aug 2024 12:56:25 +0200 Subject: fs: use kmem_cache_create_rcu() Switch to the new kmem_cache_create_rcu() helper which allows us to use a custom free pointer offset avoiding the need to have an external free pointer which would grow struct file behind our backs. Link: https://lore.kernel.org/r/20240828-work-kmem_cache-rcu-v3-3-5460bc1f09f6@kernel.org Acked-by: Mike Rapoport (Microsoft) Reviewed-by: Vlastimil Babka Signed-off-by: Christian Brauner --- fs/file_table.c | 6 +++--- include/linux/fs.h | 2 ++ 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/fs/file_table.c b/fs/file_table.c index fdf98709dde2..3ef558f27a1c 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -511,9 +511,9 @@ EXPORT_SYMBOL(__fput_sync); void __init files_init(void) { - filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, - SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN | - SLAB_PANIC | SLAB_ACCOUNT, NULL); + filp_cachep = kmem_cache_create_rcu("filp", sizeof(struct file), + offsetof(struct file, f_freeptr), + SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT); percpu_counter_init(&nr_files, 0, GFP_KERNEL); } diff --git a/include/linux/fs.h b/include/linux/fs.h index af8bbd4eeb3a..58c91a52cad1 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1011,6 +1011,7 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) * @f_task_work: task work entry point * @f_llist: work queue entrypoint * @f_ra: file's readahead state + * @f_freeptr: Pointer used by SLAB_TYPESAFE_BY_RCU file cache (don't touch.) */ struct file { atomic_long_t f_count; @@ -1042,6 +1043,7 @@ struct file { struct callback_head f_task_work; struct llist_node f_llist; struct file_ra_state f_ra; + freeptr_t f_freeptr; }; /* --- cacheline 3 boundary (192 bytes) --- */ } __randomize_layout -- cgit v1.2.3 From 879fb3c274c12cb0892718e1e54f85fc406e3c7b Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Thu, 5 Sep 2024 09:56:45 +0200 Subject: slab: add struct kmem_cache_args Currently we have multiple kmem_cache_create*() variants that take up to seven separate parameters with one of the functions having to grow an eigth parameter in the future to handle both usercopy and a custom freelist pointer. Add a struct kmem_cache_args structure and move less common parameters into it. Core parameters such as name, object size, and flags continue to be passed separately. Add a new function __kmem_cache_create_args() that takes a struct kmem_cache_args pointer and port do_kmem_cache_create_usercopy() over to it. In follow-up patches we will port the other kmem_cache_create*() variants over to it as well. Reviewed-by: Kees Cook Reviewed-by: Jens Axboe Reviewed-by: Mike Rapoport (Microsoft) Reviewed-by: Vlastimil Babka Signed-off-by: Christian Brauner Reviewed-by: Roman Gushchin Signed-off-by: Vlastimil Babka --- include/linux/slab.h | 22 +++++++++++++++++ mm/slab_common.c | 67 +++++++++++++++++++++++++++++++++++++++------------- 2 files changed, 73 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 5b2da2cf31a8..2b8eeca7fd2c 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -240,6 +240,28 @@ struct mem_cgroup; */ bool slab_is_available(void); +/** + * struct kmem_cache_args - Less common arguments for kmem_cache_create() + * @align: The required alignment for the objects. + * @useroffset: Usercopy region offset + * @usersize: Usercopy region size + * @freeptr_offset: Custom offset for the free pointer in RCU caches + * @use_freeptr_offset: Whether a @freeptr_offset is used + * @ctor: A constructor for the objects. + */ +struct kmem_cache_args { + unsigned int align; + unsigned int useroffset; + unsigned int usersize; + unsigned int freeptr_offset; + bool use_freeptr_offset; + void (*ctor)(void *); +}; + +struct kmem_cache *__kmem_cache_create_args(const char *name, + unsigned int object_size, + struct kmem_cache_args *args, + slab_flags_t flags); struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, unsigned int align, slab_flags_t flags, void (*ctor)(void *)); diff --git a/mm/slab_common.c b/mm/slab_common.c index 91e0e36e4379..0f13c045b8d1 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -248,14 +248,24 @@ out: return ERR_PTR(err); } -static struct kmem_cache * -do_kmem_cache_create_usercopy(const char *name, - unsigned int size, unsigned int freeptr_offset, - unsigned int align, slab_flags_t flags, - unsigned int useroffset, unsigned int usersize, - void (*ctor)(void *)) +/** + * __kmem_cache_create_args - Create a kmem cache + * @name: A string which is used in /proc/slabinfo to identify this cache. + * @object_size: The size of objects to be created in this cache. + * @args: Arguments for the cache creation (see struct kmem_cache_args). + * @flags: See %SLAB_* flags for an explanation of individual @flags. + * + * Cannot be called within a interrupt, but can be interrupted. + * + * Return: a pointer to the cache on success, NULL on failure. + */ +struct kmem_cache *__kmem_cache_create_args(const char *name, + unsigned int object_size, + struct kmem_cache_args *args, + slab_flags_t flags) { struct kmem_cache *s = NULL; + unsigned int freeptr_offset = UINT_MAX; const char *cache_name; int err; @@ -275,7 +285,7 @@ do_kmem_cache_create_usercopy(const char *name, mutex_lock(&slab_mutex); - err = kmem_cache_sanity_check(name, size); + err = kmem_cache_sanity_check(name, object_size); if (err) { goto out_unlock; } @@ -296,12 +306,14 @@ do_kmem_cache_create_usercopy(const char *name, /* Fail closed on bad usersize of useroffset values. */ if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY) || - WARN_ON(!usersize && useroffset) || - WARN_ON(size < usersize || size - usersize < useroffset)) - usersize = useroffset = 0; - - if (!usersize) - s = __kmem_cache_alias(name, size, align, flags, ctor); + WARN_ON(!args->usersize && args->useroffset) || + WARN_ON(object_size < args->usersize || + object_size - args->usersize < args->useroffset)) + args->usersize = args->useroffset = 0; + + if (!args->usersize) + s = __kmem_cache_alias(name, object_size, args->align, flags, + args->ctor); if (s) goto out_unlock; @@ -311,9 +323,11 @@ do_kmem_cache_create_usercopy(const char *name, goto out_unlock; } - s = create_cache(cache_name, size, freeptr_offset, - calculate_alignment(flags, align, size), - flags, useroffset, usersize, ctor); + if (args->use_freeptr_offset) + freeptr_offset = args->freeptr_offset; + s = create_cache(cache_name, object_size, freeptr_offset, + calculate_alignment(flags, args->align, object_size), + flags, args->useroffset, args->usersize, args->ctor); if (IS_ERR(s)) { err = PTR_ERR(s); kfree_const(cache_name); @@ -335,6 +349,27 @@ out_unlock: } return s; } +EXPORT_SYMBOL(__kmem_cache_create_args); + +static struct kmem_cache * +do_kmem_cache_create_usercopy(const char *name, + unsigned int size, unsigned int freeptr_offset, + unsigned int align, slab_flags_t flags, + unsigned int useroffset, unsigned int usersize, + void (*ctor)(void *)) +{ + struct kmem_cache_args kmem_args = { + .align = align, + .use_freeptr_offset = freeptr_offset != UINT_MAX, + .freeptr_offset = freeptr_offset, + .useroffset = useroffset, + .usersize = usersize, + .ctor = ctor, + }; + + return __kmem_cache_create_args(name, size, &kmem_args, flags); +} + /** * kmem_cache_create_usercopy - Create a cache with a region suitable -- cgit v1.2.3 From 052d67b46bcd91b6785e8e6e047241814f6142a3 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Thu, 5 Sep 2024 09:56:53 +0200 Subject: slab: port KMEM_CACHE() to struct kmem_cache_args Make KMEM_CACHE() use struct kmem_cache_args. Reviewed-by: Kees Cook Reviewed-by: Jens Axboe Reviewed-by: Mike Rapoport (Microsoft) Reviewed-by: Vlastimil Babka Signed-off-by: Christian Brauner Reviewed-by: Roman Gushchin Signed-off-by: Vlastimil Babka --- include/linux/slab.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 2b8eeca7fd2c..1f38d94387cc 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -284,9 +284,11 @@ int kmem_cache_shrink(struct kmem_cache *s); * f.e. add ____cacheline_aligned_in_smp to the struct declaration * then the objects will be properly aligned in SMP configurations. */ -#define KMEM_CACHE(__struct, __flags) \ - kmem_cache_create(#__struct, sizeof(struct __struct), \ - __alignof__(struct __struct), (__flags), NULL) +#define KMEM_CACHE(__struct, __flags) \ + __kmem_cache_create_args(#__struct, sizeof(struct __struct), \ + &(struct kmem_cache_args) { \ + .align = __alignof__(struct __struct), \ + }, (__flags)) /* * To whitelist a single field for copying to/from usercopy, use this -- cgit v1.2.3 From 199cd13a745eb44fb4828bca373155293cdcfa5c Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Thu, 5 Sep 2024 09:56:54 +0200 Subject: slab: port KMEM_CACHE_USERCOPY() to struct kmem_cache_args Make KMEM_CACHE_USERCOPY() use struct kmem_cache_args. Reviewed-by: Kees Cook Reviewed-by: Jens Axboe Reviewed-by: Mike Rapoport (Microsoft) Reviewed-by: Vlastimil Babka Signed-off-by: Christian Brauner Reviewed-by: Roman Gushchin Signed-off-by: Vlastimil Babka --- include/linux/slab.h | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 1f38d94387cc..7e15a3a3edb1 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -294,12 +294,13 @@ int kmem_cache_shrink(struct kmem_cache *s); * To whitelist a single field for copying to/from usercopy, use this * macro instead for KMEM_CACHE() above. */ -#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \ - kmem_cache_create_usercopy(#__struct, \ - sizeof(struct __struct), \ - __alignof__(struct __struct), (__flags), \ - offsetof(struct __struct, __field), \ - sizeof_field(struct __struct, __field), NULL) +#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \ + __kmem_cache_create_args(#__struct, sizeof(struct __struct), \ + &(struct kmem_cache_args) { \ + .align = __alignof__(struct __struct), \ + .useroffset = offsetof(struct __struct, __field), \ + .usersize = sizeof_field(struct __struct, __field), \ + }, (__flags)) /* * Common kmalloc functions provided by all allocators -- cgit v1.2.3 From b2e7456b5c25c41eda7a8a15f7ccaa4e7579949f Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Thu, 5 Sep 2024 09:56:55 +0200 Subject: slab: create kmem_cache_create() compatibility layer Use _Generic() to create a compatibility layer that type switches on the third argument to either call __kmem_cache_create() or __kmem_cache_create_args(). If NULL is passed for the struct kmem_cache_args argument use default args making porting for callers that don't care about additional arguments easy. Reviewed-by: Kees Cook Reviewed-by: Jens Axboe Signed-off-by: Christian Brauner Reviewed-by: Mike Rapoport (Microsoft) Reviewed-by: Roman Gushchin Signed-off-by: Vlastimil Babka --- include/linux/slab.h | 29 ++++++++++++++++++++++++++--- mm/slab_common.c | 10 +++++----- 2 files changed, 31 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 7e15a3a3edb1..15eb0a0defd6 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -262,9 +262,10 @@ struct kmem_cache *__kmem_cache_create_args(const char *name, unsigned int object_size, struct kmem_cache_args *args, slab_flags_t flags); -struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, - unsigned int align, slab_flags_t flags, - void (*ctor)(void *)); + +struct kmem_cache *__kmem_cache_create(const char *name, unsigned int size, + unsigned int align, slab_flags_t flags, + void (*ctor)(void *)); struct kmem_cache *kmem_cache_create_usercopy(const char *name, unsigned int size, unsigned int align, slab_flags_t flags, @@ -273,6 +274,28 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name, struct kmem_cache *kmem_cache_create_rcu(const char *name, unsigned int size, unsigned int freeptr_offset, slab_flags_t flags); + +/* If NULL is passed for @args, use this variant with default arguments. */ +static inline struct kmem_cache * +__kmem_cache_default_args(const char *name, unsigned int size, + struct kmem_cache_args *args, + slab_flags_t flags) +{ + struct kmem_cache_args kmem_default_args = {}; + + /* Make sure we don't get passed garbage. */ + if (WARN_ON_ONCE(args)) + return ERR_PTR(-EINVAL); + + return __kmem_cache_create_args(name, size, &kmem_default_args, flags); +} + +#define kmem_cache_create(__name, __object_size, __args, ...) \ + _Generic((__args), \ + struct kmem_cache_args *: __kmem_cache_create_args, \ + void *: __kmem_cache_default_args, \ + default: __kmem_cache_create)(__name, __object_size, __args, __VA_ARGS__) + void kmem_cache_destroy(struct kmem_cache *s); int kmem_cache_shrink(struct kmem_cache *s); diff --git a/mm/slab_common.c b/mm/slab_common.c index 19ae3dd6e36f..418459927670 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -383,7 +383,7 @@ kmem_cache_create_usercopy(const char *name, unsigned int size, EXPORT_SYMBOL(kmem_cache_create_usercopy); /** - * kmem_cache_create - Create a cache. + * __kmem_cache_create - Create a cache. * @name: A string which is used in /proc/slabinfo to identify this cache. * @size: The size of objects to be created in this cache. * @align: The required alignment for the objects. @@ -407,9 +407,9 @@ EXPORT_SYMBOL(kmem_cache_create_usercopy); * * Return: a pointer to the cache on success, NULL on failure. */ -struct kmem_cache * -kmem_cache_create(const char *name, unsigned int size, unsigned int align, - slab_flags_t flags, void (*ctor)(void *)) +struct kmem_cache *__kmem_cache_create(const char *name, unsigned int size, + unsigned int align, slab_flags_t flags, + void (*ctor)(void *)) { struct kmem_cache_args kmem_args = { .align = align, @@ -418,7 +418,7 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align, return __kmem_cache_create_args(name, size, &kmem_args, flags); } -EXPORT_SYMBOL(kmem_cache_create); +EXPORT_SYMBOL(__kmem_cache_create); /** * kmem_cache_create_rcu - Create a SLAB_TYPESAFE_BY_RCU cache. -- cgit v1.2.3 From 3d453e60f1a9c377d670d5fe306d3b9eed477bc0 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Thu, 5 Sep 2024 09:56:57 +0200 Subject: slab: remove kmem_cache_create_rcu() Now that we have ported all users of kmem_cache_create_rcu() to struct kmem_cache_args the function is unused and can be removed. Reviewed-by: Kees Cook Reviewed-by: Jens Axboe Reviewed-by: Mike Rapoport (Microsoft) Reviewed-by: Vlastimil Babka Signed-off-by: Christian Brauner Reviewed-by: Roman Gushchin Signed-off-by: Vlastimil Babka --- include/linux/slab.h | 3 --- mm/slab_common.c | 43 ------------------------------------------- 2 files changed, 46 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 15eb0a0defd6..cb82a6414a28 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -271,9 +271,6 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name, slab_flags_t flags, unsigned int useroffset, unsigned int usersize, void (*ctor)(void *)); -struct kmem_cache *kmem_cache_create_rcu(const char *name, unsigned int size, - unsigned int freeptr_offset, - slab_flags_t flags); /* If NULL is passed for @args, use this variant with default arguments. */ static inline struct kmem_cache * diff --git a/mm/slab_common.c b/mm/slab_common.c index 418459927670..9133b9fafcb1 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -420,49 +420,6 @@ struct kmem_cache *__kmem_cache_create(const char *name, unsigned int size, } EXPORT_SYMBOL(__kmem_cache_create); -/** - * kmem_cache_create_rcu - Create a SLAB_TYPESAFE_BY_RCU cache. - * @name: A string which is used in /proc/slabinfo to identify this cache. - * @size: The size of objects to be created in this cache. - * @freeptr_offset: The offset into the memory to the free pointer - * @flags: SLAB flags - * - * Cannot be called within an interrupt, but can be interrupted. - * - * See kmem_cache_create() for an explanation of possible @flags. - * - * By default SLAB_TYPESAFE_BY_RCU caches place the free pointer outside - * of the object. This might cause the object to grow in size. Callers - * that have a reason to avoid this can specify a custom free pointer - * offset in their struct where the free pointer will be placed. - * - * Note that placing the free pointer inside the object requires the - * caller to ensure that no fields are invalidated that are required to - * guard against object recycling (See SLAB_TYPESAFE_BY_RCU for - * details.). - * - * Using zero as a value for @freeptr_offset is valid. To request no - * offset UINT_MAX must be specified. - * - * Note that @ctor isn't supported with custom free pointers as a @ctor - * requires an external free pointer. - * - * Return: a pointer to the cache on success, NULL on failure. - */ -struct kmem_cache *kmem_cache_create_rcu(const char *name, unsigned int size, - unsigned int freeptr_offset, - slab_flags_t flags) -{ - struct kmem_cache_args kmem_args = { - .freeptr_offset = freeptr_offset, - .use_freeptr_offset = true, - }; - - return __kmem_cache_create_args(name, size, &kmem_args, - flags | SLAB_TYPESAFE_BY_RCU); -} -EXPORT_SYMBOL(kmem_cache_create_rcu); - static struct kmem_cache *kmem_buckets_cache __ro_after_init; /** -- cgit v1.2.3 From 0c9050b09cfb1ddb3fe4b2d401dca1fb674c825a Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Thu, 5 Sep 2024 09:56:58 +0200 Subject: slab: make kmem_cache_create_usercopy() static inline Make kmem_cache_create_usercopy() a static inline function. Signed-off-by: Christian Brauner Reviewed-by: Mike Rapoport (Microsoft) Reviewed-by: Roman Gushchin Signed-off-by: Vlastimil Babka --- include/linux/slab.h | 49 ++++++++++++++++++++++++++++++++++++++++++++----- mm/slab_common.c | 45 --------------------------------------------- 2 files changed, 44 insertions(+), 50 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index cb82a6414a28..634717c9f73b 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -266,11 +266,50 @@ struct kmem_cache *__kmem_cache_create_args(const char *name, struct kmem_cache *__kmem_cache_create(const char *name, unsigned int size, unsigned int align, slab_flags_t flags, void (*ctor)(void *)); -struct kmem_cache *kmem_cache_create_usercopy(const char *name, - unsigned int size, unsigned int align, - slab_flags_t flags, - unsigned int useroffset, unsigned int usersize, - void (*ctor)(void *)); + +/** + * kmem_cache_create_usercopy - Create a cache with a region suitable + * for copying to userspace + * @name: A string which is used in /proc/slabinfo to identify this cache. + * @size: The size of objects to be created in this cache. + * @align: The required alignment for the objects. + * @flags: SLAB flags + * @useroffset: Usercopy region offset + * @usersize: Usercopy region size + * @ctor: A constructor for the objects. + * + * Cannot be called within a interrupt, but can be interrupted. + * The @ctor is run when new pages are allocated by the cache. + * + * The flags are + * + * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) + * to catch references to uninitialised memory. + * + * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check + * for buffer overruns. + * + * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware + * cacheline. This can be beneficial if you're counting cycles as closely + * as davem. + * + * Return: a pointer to the cache on success, NULL on failure. + */ +static inline struct kmem_cache * +kmem_cache_create_usercopy(const char *name, unsigned int size, + unsigned int align, slab_flags_t flags, + unsigned int useroffset, unsigned int usersize, + void (*ctor)(void *)) +{ + struct kmem_cache_args kmem_args = { + .align = align, + .ctor = ctor, + .useroffset = useroffset, + .usersize = usersize, + }; + + return __kmem_cache_create_args(name, size, &kmem_args, flags); +} /* If NULL is passed for @args, use this variant with default arguments. */ static inline struct kmem_cache * diff --git a/mm/slab_common.c b/mm/slab_common.c index 9133b9fafcb1..3477a3918afd 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -337,51 +337,6 @@ out_unlock: } EXPORT_SYMBOL(__kmem_cache_create_args); -/** - * kmem_cache_create_usercopy - Create a cache with a region suitable - * for copying to userspace - * @name: A string which is used in /proc/slabinfo to identify this cache. - * @size: The size of objects to be created in this cache. - * @align: The required alignment for the objects. - * @flags: SLAB flags - * @useroffset: Usercopy region offset - * @usersize: Usercopy region size - * @ctor: A constructor for the objects. - * - * Cannot be called within a interrupt, but can be interrupted. - * The @ctor is run when new pages are allocated by the cache. - * - * The flags are - * - * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) - * to catch references to uninitialised memory. - * - * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check - * for buffer overruns. - * - * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware - * cacheline. This can be beneficial if you're counting cycles as closely - * as davem. - * - * Return: a pointer to the cache on success, NULL on failure. - */ -struct kmem_cache * -kmem_cache_create_usercopy(const char *name, unsigned int size, - unsigned int align, slab_flags_t flags, - unsigned int useroffset, unsigned int usersize, - void (*ctor)(void *)) -{ - struct kmem_cache_args kmem_args = { - .align = align, - .ctor = ctor, - .useroffset = useroffset, - .usersize = usersize, - }; - - return __kmem_cache_create_args(name, size, &kmem_args, flags); -} -EXPORT_SYMBOL(kmem_cache_create_usercopy); - /** * __kmem_cache_create - Create a cache. * @name: A string which is used in /proc/slabinfo to identify this cache. -- cgit v1.2.3 From 781aee755638dbb6dbfe022bdd2f732621ec9534 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Thu, 5 Sep 2024 09:56:59 +0200 Subject: slab: make __kmem_cache_create() static inline Make __kmem_cache_create() a static inline function. Signed-off-by: Christian Brauner Reviewed-by: Mike Rapoport (Microsoft) Reviewed-by: Roman Gushchin Signed-off-by: Vlastimil Babka --- include/linux/slab.h | 13 ++++++++++--- mm/slab_common.c | 38 -------------------------------------- 2 files changed, 10 insertions(+), 41 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 634717c9f73b..331412a9f4f2 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -262,10 +262,17 @@ struct kmem_cache *__kmem_cache_create_args(const char *name, unsigned int object_size, struct kmem_cache_args *args, slab_flags_t flags); +static inline struct kmem_cache * +__kmem_cache_create(const char *name, unsigned int size, unsigned int align, + slab_flags_t flags, void (*ctor)(void *)) +{ + struct kmem_cache_args kmem_args = { + .align = align, + .ctor = ctor, + }; -struct kmem_cache *__kmem_cache_create(const char *name, unsigned int size, - unsigned int align, slab_flags_t flags, - void (*ctor)(void *)); + return __kmem_cache_create_args(name, size, &kmem_args, flags); +} /** * kmem_cache_create_usercopy - Create a cache with a region suitable diff --git a/mm/slab_common.c b/mm/slab_common.c index 3477a3918afd..30000dcf0736 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -337,44 +337,6 @@ out_unlock: } EXPORT_SYMBOL(__kmem_cache_create_args); -/** - * __kmem_cache_create - Create a cache. - * @name: A string which is used in /proc/slabinfo to identify this cache. - * @size: The size of objects to be created in this cache. - * @align: The required alignment for the objects. - * @flags: SLAB flags - * @ctor: A constructor for the objects. - * - * Cannot be called within a interrupt, but can be interrupted. - * The @ctor is run when new pages are allocated by the cache. - * - * The flags are - * - * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) - * to catch references to uninitialised memory. - * - * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check - * for buffer overruns. - * - * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware - * cacheline. This can be beneficial if you're counting cycles as closely - * as davem. - * - * Return: a pointer to the cache on success, NULL on failure. - */ -struct kmem_cache *__kmem_cache_create(const char *name, unsigned int size, - unsigned int align, slab_flags_t flags, - void (*ctor)(void *)) -{ - struct kmem_cache_args kmem_args = { - .align = align, - .ctor = ctor, - }; - - return __kmem_cache_create_args(name, size, &kmem_args, flags); -} -EXPORT_SYMBOL(__kmem_cache_create); - static struct kmem_cache *kmem_buckets_cache __ro_after_init; /** -- cgit v1.2.3 From 4b7ff9ab98af11a477d50f08382bcc4c2f899926 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Fri, 13 Sep 2024 10:15:56 +0200 Subject: mm, slab: restore kerneldoc for kmem_cache_create() As kmem_cache_create() became a _Generic() wrapper macro, it currently has no kerneldoc despite being the main API to use. Add it. Also adjust kmem_cache_create_usercopy() kerneldoc to indicate it is now a legacy wrapper. Also expand the kerneldoc for struct kmem_cache_args, especially for the freeptr_offset field, where important details were removed with the removal of kmem_cache_create_rcu(). Signed-off-by: Vlastimil Babka Reviewed-by: Christian Brauner --- include/linux/slab.h | 114 ++++++++++++++++++++++++++++++++++++++++----------- mm/slab_common.c | 10 +++-- 2 files changed, 98 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 331412a9f4f2..6a8ab7ef3af7 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -242,19 +242,72 @@ bool slab_is_available(void); /** * struct kmem_cache_args - Less common arguments for kmem_cache_create() - * @align: The required alignment for the objects. - * @useroffset: Usercopy region offset - * @usersize: Usercopy region size - * @freeptr_offset: Custom offset for the free pointer in RCU caches - * @use_freeptr_offset: Whether a @freeptr_offset is used - * @ctor: A constructor for the objects. + * + * Any uninitialized fields of the structure are interpreted as unused. The + * exception is @freeptr_offset where %0 is a valid value, so + * @use_freeptr_offset must be also set to %true in order to interpret the field + * as used. For @useroffset %0 is also valid, but only with non-%0 + * @usersize. + * + * When %NULL args is passed to kmem_cache_create(), it is equivalent to all + * fields unused. */ struct kmem_cache_args { + /** + * @align: The required alignment for the objects. + * + * %0 means no specific alignment is requested. + */ unsigned int align; + /** + * @useroffset: Usercopy region offset. + * + * %0 is a valid offset, when @usersize is non-%0 + */ unsigned int useroffset; + /** + * @usersize: Usercopy region size. + * + * %0 means no usercopy region is specified. + */ unsigned int usersize; + /** + * @freeptr_offset: Custom offset for the free pointer + * in &SLAB_TYPESAFE_BY_RCU caches + * + * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer + * outside of the object. This might cause the object to grow in size. + * Cache creators that have a reason to avoid this can specify a custom + * free pointer offset in their struct where the free pointer will be + * placed. + * + * Note that placing the free pointer inside the object requires the + * caller to ensure that no fields are invalidated that are required to + * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for + * details). + * + * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset + * is specified, %use_freeptr_offset must be set %true. + * + * Note that @ctor currently isn't supported with custom free pointers + * as a @ctor requires an external free pointer. + */ unsigned int freeptr_offset; + /** + * @use_freeptr_offset: Whether a @freeptr_offset is used. + */ bool use_freeptr_offset; + /** + * @ctor: A constructor for the objects. + * + * The constructor is invoked for each object in a newly allocated slab + * page. It is the cache user's responsibility to free object in the + * same state as after calling the constructor, or deal appropriately + * with any differences between a freshly constructed and a reallocated + * object. + * + * %NULL means no constructor. + */ void (*ctor)(void *); }; @@ -275,30 +328,20 @@ __kmem_cache_create(const char *name, unsigned int size, unsigned int align, } /** - * kmem_cache_create_usercopy - Create a cache with a region suitable - * for copying to userspace + * kmem_cache_create_usercopy - Create a kmem cache with a region suitable + * for copying to userspace. * @name: A string which is used in /proc/slabinfo to identify this cache. * @size: The size of objects to be created in this cache. * @align: The required alignment for the objects. * @flags: SLAB flags * @useroffset: Usercopy region offset * @usersize: Usercopy region size - * @ctor: A constructor for the objects. - * - * Cannot be called within a interrupt, but can be interrupted. - * The @ctor is run when new pages are allocated by the cache. - * - * The flags are + * @ctor: A constructor for the objects, or %NULL. * - * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) - * to catch references to uninitialised memory. - * - * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check - * for buffer overruns. - * - * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware - * cacheline. This can be beneficial if you're counting cycles as closely - * as davem. + * This is a legacy wrapper, new code should use either KMEM_CACHE_USERCOPY() + * if whitelisting a single field is sufficient, or kmem_cache_create() with + * the necessary parameters passed via the args parameter (see + * &struct kmem_cache_args) * * Return: a pointer to the cache on success, NULL on failure. */ @@ -333,6 +376,31 @@ __kmem_cache_default_args(const char *name, unsigned int size, return __kmem_cache_create_args(name, size, &kmem_default_args, flags); } +/** + * kmem_cache_create - Create a kmem cache. + * @__name: A string which is used in /proc/slabinfo to identify this cache. + * @__object_size: The size of objects to be created in this cache. + * @__args: Optional arguments, see &struct kmem_cache_args. Passing %NULL + * means defaults will be used for all the arguments. + * + * This is currently implemented as a macro using ``_Generic()`` to call + * either the new variant of the function, or a legacy one. + * + * The new variant has 4 parameters: + * ``kmem_cache_create(name, object_size, args, flags)`` + * + * See __kmem_cache_create_args() which implements this. + * + * The legacy variant has 5 parameters: + * ``kmem_cache_create(name, object_size, align, flags, ctor)`` + * + * The align and ctor parameters map to the respective fields of + * &struct kmem_cache_args + * + * Context: Cannot be called within a interrupt, but can be interrupted. + * + * Return: a pointer to the cache on success, NULL on failure. + */ #define kmem_cache_create(__name, __object_size, __args, ...) \ _Generic((__args), \ struct kmem_cache_args *: __kmem_cache_create_args, \ diff --git a/mm/slab_common.c b/mm/slab_common.c index 30000dcf0736..86c2e6f4a1ce 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -239,13 +239,17 @@ out: } /** - * __kmem_cache_create_args - Create a kmem cache + * __kmem_cache_create_args - Create a kmem cache. * @name: A string which is used in /proc/slabinfo to identify this cache. * @object_size: The size of objects to be created in this cache. - * @args: Arguments for the cache creation (see struct kmem_cache_args). + * @args: Additional arguments for the cache creation (see + * &struct kmem_cache_args). * @flags: See %SLAB_* flags for an explanation of individual @flags. * - * Cannot be called within a interrupt, but can be interrupted. + * Not to be called directly, use the kmem_cache_create() wrapper with the same + * parameters. + * + * Context: Cannot be called within a interrupt, but can be interrupted. * * Return: a pointer to the cache on success, NULL on failure. */ -- cgit v1.2.3