From 1934b212615dc617ac84fc306333ab2b9fc3b04f Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Fri, 9 Aug 2024 18:00:01 +0200 Subject: file: reclaim 24 bytes from f_owner We do embedd struct fown_struct into struct file letting it take up 32 bytes in total. We could tweak struct fown_struct to be more compact but really it shouldn't even be embedded in struct file in the first place. Instead, actual users of struct fown_struct should allocate the struct on demand. This frees up 24 bytes in struct file. That will have some potentially user-visible changes for the ownership fcntl()s. Some of them can now fail due to allocation failures. Practically, that probably will almost never happen as the allocations are small and they only happen once per file. The fown_struct is used during kill_fasync() which is used by e.g., pipes to generate a SIGIO signal. Sending of such signals is conditional on userspace having set an owner for the file using one of the F_OWNER fcntl()s. Such users will be unaffected if struct fown_struct is allocated during the fcntl() call. There are a few subsystems that call __f_setown() expecting file->f_owner to be allocated: (1) tun devices file->f_op->fasync::tun_chr_fasync() -> __f_setown() There are no callers of tun_chr_fasync(). (2) tty devices file->f_op->fasync::tty_fasync() -> __tty_fasync() -> __f_setown() tty_fasync() has no additional callers but __tty_fasync() has. Note that __tty_fasync() only calls __f_setown() if the @on argument is true. It's called from: file->f_op->release::tty_release() -> tty_release() -> __tty_fasync() -> __f_setown() tty_release() calls __tty_fasync() with @on false => __f_setown() is never called from tty_release(). => All callers of tty_release() are safe as well. file->f_op->release::tty_open() -> tty_release() -> __tty_fasync() -> __f_setown() __tty_hangup() calls __tty_fasync() with @on false => __f_setown() is never called from tty_release(). => All callers of __tty_hangup() are safe as well. From the callchains it's obvious that (1) and (2) end up getting called via file->f_op->fasync(). That can happen either through the F_SETFL fcntl() with the FASYNC flag raised or via the FIOASYNC ioctl(). If FASYNC is requested and the file isn't already FASYNC then file->f_op->fasync() is called with @on true which ends up causing both (1) and (2) to call __f_setown(). (1) and (2) are the only subsystems that call __f_setown() from the file->f_op->fasync() handler. So both (1) and (2) have been updated to allocate a struct fown_struct prior to calling fasync_helper() to register with the fasync infrastructure. That's safe as they both call fasync_helper() which also does allocations if @on is true. The other interesting case are file leases: (3) file leases lease_manager_ops->lm_setup::lease_setup() -> __f_setown() Which in turn is called from: generic_add_lease() -> lease_manager_ops->lm_setup::lease_setup() -> __f_setown() So here again we can simply make generic_add_lease() allocate struct fown_struct prior to the lease_manager_ops->lm_setup::lease_setup() which happens under a spinlock. With that the two remaining subsystems that call __f_setown() are: (4) dnotify (5) sockets Both have their own custom ioctls to set struct fown_struct and both have been converted to allocate a struct fown_struct on demand from their respective ioctls. Interactions with O_PATH are fine as well e.g., when opening a /dev/tty as O_PATH then no file->f_op->open() happens thus no file->f_owner is allocated. That's fine as no file operation will be set for those and the device has never been opened. fcntl()s called on such things will just allocate a ->f_owner on demand. Although I have zero idea why'd you care about f_owner on an O_PATH fd. Link: https://lore.kernel.org/r/20240813-work-f_owner-v2-1-4e9343a79f9f@kernel.org Reviewed-by: Jeff Layton Signed-off-by: Christian Brauner --- include/linux/fs.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index fb0426f349fc..7af239ca87e2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -947,6 +947,7 @@ static inline unsigned imajor(const struct inode *inode) } struct fown_struct { + struct file *file; /* backpointer for security modules */ rwlock_t lock; /* protects pid, uid, euid fields */ struct pid *pid; /* pid or -pgrp where SIGIO should be sent */ enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */ @@ -1011,7 +1012,7 @@ struct file { struct mutex f_pos_lock; loff_t f_pos; unsigned int f_flags; - struct fown_struct f_owner; + struct fown_struct *f_owner; const struct cred *f_cred; struct file_ra_state f_ra; struct path f_path; @@ -1076,6 +1077,12 @@ struct file_lease; #define OFFT_OFFSET_MAX type_max(off_t) #endif +int file_f_owner_allocate(struct file *file); +static inline struct fown_struct *file_f_owner(const struct file *file) +{ + return READ_ONCE(file->f_owner); +} + extern void send_sigio(struct fown_struct *fown, int fd, int band); static inline struct inode *file_inode(const struct file *f) @@ -1124,7 +1131,7 @@ extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force extern int f_setown(struct file *filp, int who, int force); extern void f_delown(struct file *filp); extern pid_t f_getown(struct file *filp); -extern int send_sigurg(struct fown_struct *fown); +extern int send_sigurg(struct file *file); /* * sb->s_flags. Note that these mirror the equivalent MS_* flags where -- cgit v1.2.3 From a55d1cbd1720679cfe9837bce250e397ec513989 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Thu, 22 Aug 2024 16:14:46 +0200 Subject: fs: switch f_iocb_flags and f_ra Now that we shrank struct file by 24 bytes we still have a 4 byte hole. If we move struct file_ra_state into the union and f_iocb_flags out of the union we close that whole and bring down struct file to 192 bytes. Which means struct file is 3 cachelines and we managed to shrink it by 40 bytes this cycle. I've tried to audit all codepaths that use f_ra and none of them seem to rely on it in file->f_op->release() and never have since commit 1da177e4c3f4 ("Linux-2.6.12-rc2"). Link: https://lore.kernel.org/r/20240823-luftdicht-berappen-d69a2166a0db@brauner Reviewed-by: Jeff Layton Reviewed-by: Jens Axboe Signed-off-by: Christian Brauner --- include/linux/fs.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index 7af239ca87e2..095a956aeb29 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -999,9 +999,9 @@ struct file { struct callback_head f_task_work; /* fput() must use workqueue (most kernel threads). */ struct llist_node f_llist; - unsigned int f_iocb_flags; + /* Invalid after last fput(). */ + struct file_ra_state f_ra; }; - /* * Protects f_ep, f_flags. * Must not be taken from IRQ context. @@ -1012,9 +1012,9 @@ struct file { struct mutex f_pos_lock; loff_t f_pos; unsigned int f_flags; + unsigned int f_iocb_flags; struct fown_struct *f_owner; const struct cred *f_cred; - struct file_ra_state f_ra; struct path f_path; struct inode *f_inode; /* cached value */ const struct file_operations *f_op; -- cgit v1.2.3 From c0390d541128e8820af8177a572d9d87ff68a3bb Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Fri, 23 Aug 2024 21:06:58 +0200 Subject: fs: pack struct file Now that we shrunk struct file to 192 bytes aka 3 cachelines reorder struct file to not leave any holes or have members cross cachelines. Add a short comment to each of the fields and mark the cachelines. It's possible that we may have to tweak this based on profiling in the future. So far I had Jens test this comparing io_uring with non-fixed and fixed files and it improved performance. The layout is a combination of Jens' and my changes. Link: https: //lore.kernel.org/r/20240824-peinigen-hocken-7384b977c643@brauner Signed-off-by: Christian Brauner --- include/linux/fs.h | 91 ++++++++++++++++++++++++++++++------------------------ 1 file changed, 51 insertions(+), 40 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index 095a956aeb29..af8bbd4eeb3a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -987,52 +987,63 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) index < ra->start + ra->size); } -/* - * f_{lock,count,pos_lock} members can be highly contended and share - * the same cacheline. f_{lock,mode} are very frequently used together - * and so share the same cacheline as well. The read-mostly - * f_{path,inode,op} are kept on a separate cacheline. +/** + * struct file - Represents a file + * @f_count: reference count + * @f_lock: Protects f_ep, f_flags. Must not be taken from IRQ context. + * @f_mode: FMODE_* flags often used in hotpaths + * @f_op: file operations + * @f_mapping: Contents of a cacheable, mappable object. + * @private_data: filesystem or driver specific data + * @f_inode: cached inode + * @f_flags: file flags + * @f_iocb_flags: iocb flags + * @f_cred: stashed credentials of creator/opener + * @f_path: path of the file + * @f_pos_lock: lock protecting file position + * @f_pos: file position + * @f_version: file version + * @f_security: LSM security context of this file + * @f_owner: file owner + * @f_wb_err: writeback error + * @f_sb_err: per sb writeback errors + * @f_ep: link of all epoll hooks for this file + * @f_task_work: task work entry point + * @f_llist: work queue entrypoint + * @f_ra: file's readahead state */ struct file { - union { - /* fput() uses task work when closing and freeing file (default). */ - struct callback_head f_task_work; - /* fput() must use workqueue (most kernel threads). */ - struct llist_node f_llist; - /* Invalid after last fput(). */ - struct file_ra_state f_ra; - }; - /* - * Protects f_ep, f_flags. - * Must not be taken from IRQ context. - */ - spinlock_t f_lock; - fmode_t f_mode; - atomic_long_t f_count; - struct mutex f_pos_lock; - loff_t f_pos; - unsigned int f_flags; - unsigned int f_iocb_flags; - struct fown_struct *f_owner; - const struct cred *f_cred; - struct path f_path; - struct inode *f_inode; /* cached value */ + atomic_long_t f_count; + spinlock_t f_lock; + fmode_t f_mode; const struct file_operations *f_op; - - u64 f_version; + struct address_space *f_mapping; + void *private_data; + struct inode *f_inode; + unsigned int f_flags; + unsigned int f_iocb_flags; + const struct cred *f_cred; + /* --- cacheline 1 boundary (64 bytes) --- */ + struct path f_path; + struct mutex f_pos_lock; + loff_t f_pos; + u64 f_version; + /* --- cacheline 2 boundary (128 bytes) --- */ #ifdef CONFIG_SECURITY - void *f_security; + void *f_security; #endif - /* needed for tty driver, and maybe others */ - void *private_data; - + struct fown_struct *f_owner; + errseq_t f_wb_err; + errseq_t f_sb_err; #ifdef CONFIG_EPOLL - /* Used by fs/eventpoll.c to link all the hooks to this file */ - struct hlist_head *f_ep; -#endif /* #ifdef CONFIG_EPOLL */ - struct address_space *f_mapping; - errseq_t f_wb_err; - errseq_t f_sb_err; /* for syncfs */ + struct hlist_head *f_ep; +#endif + union { + struct callback_head f_task_work; + struct llist_node f_llist; + struct file_ra_state f_ra; + }; + /* --- cacheline 3 boundary (192 bytes) --- */ } __randomize_layout __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */ -- cgit v1.2.3 From d345bd2e9834e2da505977e154a1c179c793b7b2 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Wed, 28 Aug 2024 12:56:24 +0200 Subject: mm: add kmem_cache_create_rcu() When a kmem cache is created with SLAB_TYPESAFE_BY_RCU the free pointer must be located outside of the object because we don't know what part of the memory can safely be overwritten as it may be needed to prevent object recycling. That has the consequence that SLAB_TYPESAFE_BY_RCU may end up adding a new cacheline. This is the case for e.g., struct file. After having it shrunk down by 40 bytes and having it fit in three cachelines we still have SLAB_TYPESAFE_BY_RCU adding a fourth cacheline because it needs to accommodate the free pointer. Add a new kmem_cache_create_rcu() function that allows the caller to specify an offset where the free pointer is supposed to be placed. Link: https://lore.kernel.org/r/20240828-work-kmem_cache-rcu-v3-2-5460bc1f09f6@kernel.org Acked-by: Mike Rapoport (Microsoft) Reviewed-by: Vlastimil Babka Signed-off-by: Christian Brauner --- include/linux/slab.h | 9 ++++ mm/slab.h | 2 + mm/slab_common.c | 136 ++++++++++++++++++++++++++++++++++++--------------- mm/slub.c | 20 +++++--- 4 files changed, 121 insertions(+), 46 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index eb2bf4629157..5b2da2cf31a8 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -212,6 +212,12 @@ enum _slab_flag_bits { #define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED #endif +/* + * freeptr_t represents a SLUB freelist pointer, which might be encoded + * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled. + */ +typedef struct { unsigned long v; } freeptr_t; + /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. * @@ -242,6 +248,9 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name, slab_flags_t flags, unsigned int useroffset, unsigned int usersize, void (*ctor)(void *)); +struct kmem_cache *kmem_cache_create_rcu(const char *name, unsigned int size, + unsigned int freeptr_offset, + slab_flags_t flags); void kmem_cache_destroy(struct kmem_cache *s); int kmem_cache_shrink(struct kmem_cache *s); diff --git a/mm/slab.h b/mm/slab.h index dcdb56b8e7f5..a6051385186e 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -261,6 +261,8 @@ struct kmem_cache { unsigned int object_size; /* Object size without metadata */ struct reciprocal_value reciprocal_size; unsigned int offset; /* Free pointer offset */ + /* Specific free pointer requested (if not UINT_MAX) */ + unsigned int rcu_freeptr_offset; #ifdef CONFIG_SLUB_CPU_PARTIAL /* Number of per cpu partial objects to keep around */ unsigned int cpu_partial; diff --git a/mm/slab_common.c b/mm/slab_common.c index c8dd7e08c5f6..887f6b9855dd 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -202,9 +202,10 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align, } static struct kmem_cache *create_cache(const char *name, - unsigned int object_size, unsigned int align, - slab_flags_t flags, unsigned int useroffset, - unsigned int usersize, void (*ctor)(void *)) + unsigned int object_size, unsigned int freeptr_offset, + unsigned int align, slab_flags_t flags, + unsigned int useroffset, unsigned int usersize, + void (*ctor)(void *)) { struct kmem_cache *s; int err; @@ -212,6 +213,13 @@ static struct kmem_cache *create_cache(const char *name, if (WARN_ON(useroffset + usersize > object_size)) useroffset = usersize = 0; + /* If a custom freelist pointer is requested make sure it's sane. */ + err = -EINVAL; + if (freeptr_offset != UINT_MAX && + (freeptr_offset >= object_size || !(flags & SLAB_TYPESAFE_BY_RCU) || + !IS_ALIGNED(freeptr_offset, sizeof(freeptr_t)))) + goto out; + err = -ENOMEM; s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); if (!s) @@ -219,13 +227,13 @@ static struct kmem_cache *create_cache(const char *name, s->name = name; s->size = s->object_size = object_size; + s->rcu_freeptr_offset = freeptr_offset; s->align = align; s->ctor = ctor; #ifdef CONFIG_HARDENED_USERCOPY s->useroffset = useroffset; s->usersize = usersize; #endif - err = __kmem_cache_create(s, flags); if (err) goto out_free_cache; @@ -240,38 +248,10 @@ out: return ERR_PTR(err); } -/** - * kmem_cache_create_usercopy - Create a cache with a region suitable - * for copying to userspace - * @name: A string which is used in /proc/slabinfo to identify this cache. - * @size: The size of objects to be created in this cache. - * @align: The required alignment for the objects. - * @flags: SLAB flags - * @useroffset: Usercopy region offset - * @usersize: Usercopy region size - * @ctor: A constructor for the objects. - * - * Cannot be called within a interrupt, but can be interrupted. - * The @ctor is run when new pages are allocated by the cache. - * - * The flags are - * - * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) - * to catch references to uninitialised memory. - * - * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check - * for buffer overruns. - * - * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware - * cacheline. This can be beneficial if you're counting cycles as closely - * as davem. - * - * Return: a pointer to the cache on success, NULL on failure. - */ -struct kmem_cache * -kmem_cache_create_usercopy(const char *name, - unsigned int size, unsigned int align, - slab_flags_t flags, +static struct kmem_cache * +do_kmem_cache_create_usercopy(const char *name, + unsigned int size, unsigned int freeptr_offset, + unsigned int align, slab_flags_t flags, unsigned int useroffset, unsigned int usersize, void (*ctor)(void *)) { @@ -331,7 +311,7 @@ kmem_cache_create_usercopy(const char *name, goto out_unlock; } - s = create_cache(cache_name, size, + s = create_cache(cache_name, size, freeptr_offset, calculate_alignment(flags, align, size), flags, useroffset, usersize, ctor); if (IS_ERR(s)) { @@ -355,6 +335,45 @@ out_unlock: } return s; } + +/** + * kmem_cache_create_usercopy - Create a cache with a region suitable + * for copying to userspace + * @name: A string which is used in /proc/slabinfo to identify this cache. + * @size: The size of objects to be created in this cache. + * @freeptr_offset: Custom offset for the free pointer in RCU caches + * @align: The required alignment for the objects. + * @flags: SLAB flags + * @useroffset: Usercopy region offset + * @usersize: Usercopy region size + * @ctor: A constructor for the objects. + * + * Cannot be called within a interrupt, but can be interrupted. + * The @ctor is run when new pages are allocated by the cache. + * + * The flags are + * + * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) + * to catch references to uninitialised memory. + * + * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check + * for buffer overruns. + * + * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware + * cacheline. This can be beneficial if you're counting cycles as closely + * as davem. + * + * Return: a pointer to the cache on success, NULL on failure. + */ +struct kmem_cache * +kmem_cache_create_usercopy(const char *name, unsigned int size, + unsigned int align, slab_flags_t flags, + unsigned int useroffset, unsigned int usersize, + void (*ctor)(void *)) +{ + return do_kmem_cache_create_usercopy(name, size, UINT_MAX, align, flags, + useroffset, usersize, ctor); +} EXPORT_SYMBOL(kmem_cache_create_usercopy); /** @@ -386,11 +405,50 @@ struct kmem_cache * kmem_cache_create(const char *name, unsigned int size, unsigned int align, slab_flags_t flags, void (*ctor)(void *)) { - return kmem_cache_create_usercopy(name, size, align, flags, 0, 0, - ctor); + return do_kmem_cache_create_usercopy(name, size, UINT_MAX, align, flags, + 0, 0, ctor); } EXPORT_SYMBOL(kmem_cache_create); +/** + * kmem_cache_create_rcu - Create a SLAB_TYPESAFE_BY_RCU cache. + * @name: A string which is used in /proc/slabinfo to identify this cache. + * @size: The size of objects to be created in this cache. + * @freeptr_offset: The offset into the memory to the free pointer + * @flags: SLAB flags + * + * Cannot be called within an interrupt, but can be interrupted. + * + * See kmem_cache_create() for an explanation of possible @flags. + * + * By default SLAB_TYPESAFE_BY_RCU caches place the free pointer outside + * of the object. This might cause the object to grow in size. Callers + * that have a reason to avoid this can specify a custom free pointer + * offset in their struct where the free pointer will be placed. + * + * Note that placing the free pointer inside the object requires the + * caller to ensure that no fields are invalidated that are required to + * guard against object recycling (See SLAB_TYPESAFE_BY_RCU for + * details.). + * + * Using zero as a value for @freeptr_offset is valid. To request no + * offset UINT_MAX must be specified. + * + * Note that @ctor isn't supported with custom free pointers as a @ctor + * requires an external free pointer. + * + * Return: a pointer to the cache on success, NULL on failure. + */ +struct kmem_cache *kmem_cache_create_rcu(const char *name, unsigned int size, + unsigned int freeptr_offset, + slab_flags_t flags) +{ + return do_kmem_cache_create_usercopy(name, size, freeptr_offset, 0, + flags | SLAB_TYPESAFE_BY_RCU, 0, 0, + NULL); +} +EXPORT_SYMBOL(kmem_cache_create_rcu); + static struct kmem_cache *kmem_buckets_cache __ro_after_init; /** diff --git a/mm/slub.c b/mm/slub.c index c9d8a2497fd6..9aa5da1e8e27 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -465,12 +465,6 @@ static struct workqueue_struct *flushwq; * Core slab cache functions *******************************************************************/ -/* - * freeptr_t represents a SLUB freelist pointer, which might be encoded - * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled. - */ -typedef struct { unsigned long v; } freeptr_t; - /* * Returns freelist pointer (ptr). With hardening, this is obfuscated * with an XOR of the address where the pointer is held and a per-cache @@ -3921,6 +3915,9 @@ static void *__slab_alloc_node(struct kmem_cache *s, /* * If the object has been wiped upon free, make sure it's fully initialized by * zeroing out freelist pointer. + * + * Note that we also wipe custom freelist pointers specified via + * s->rcu_freeptr_offset. */ static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, void *obj) @@ -5144,6 +5141,12 @@ static void set_cpu_partial(struct kmem_cache *s) #endif } +/* Was a valid freeptr offset requested? */ +static inline bool has_freeptr_offset(const struct kmem_cache *s) +{ + return s->rcu_freeptr_offset != UINT_MAX; +} + /* * calculate_sizes() determines the order and the distribution of data within * a slab object. @@ -5189,7 +5192,8 @@ static int calculate_sizes(struct kmem_cache *s) */ s->inuse = size; - if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || s->ctor || + if (((flags & SLAB_TYPESAFE_BY_RCU) && !has_freeptr_offset(s)) || + (flags & SLAB_POISON) || s->ctor || ((flags & SLAB_RED_ZONE) && (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) { /* @@ -5210,6 +5214,8 @@ static int calculate_sizes(struct kmem_cache *s) */ s->offset = size; size += sizeof(void *); + } else if ((flags & SLAB_TYPESAFE_BY_RCU) && has_freeptr_offset(s)) { + s->offset = s->rcu_freeptr_offset; } else { /* * Store freelist pointer near middle of object to keep -- cgit v1.2.3 From ea566e18b4deea6e998088de4f1a76d1f39c8d3f Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Wed, 28 Aug 2024 12:56:25 +0200 Subject: fs: use kmem_cache_create_rcu() Switch to the new kmem_cache_create_rcu() helper which allows us to use a custom free pointer offset avoiding the need to have an external free pointer which would grow struct file behind our backs. Link: https://lore.kernel.org/r/20240828-work-kmem_cache-rcu-v3-3-5460bc1f09f6@kernel.org Acked-by: Mike Rapoport (Microsoft) Reviewed-by: Vlastimil Babka Signed-off-by: Christian Brauner --- fs/file_table.c | 6 +++--- include/linux/fs.h | 2 ++ 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/fs/file_table.c b/fs/file_table.c index fdf98709dde2..3ef558f27a1c 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -511,9 +511,9 @@ EXPORT_SYMBOL(__fput_sync); void __init files_init(void) { - filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, - SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN | - SLAB_PANIC | SLAB_ACCOUNT, NULL); + filp_cachep = kmem_cache_create_rcu("filp", sizeof(struct file), + offsetof(struct file, f_freeptr), + SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT); percpu_counter_init(&nr_files, 0, GFP_KERNEL); } diff --git a/include/linux/fs.h b/include/linux/fs.h index af8bbd4eeb3a..58c91a52cad1 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1011,6 +1011,7 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) * @f_task_work: task work entry point * @f_llist: work queue entrypoint * @f_ra: file's readahead state + * @f_freeptr: Pointer used by SLAB_TYPESAFE_BY_RCU file cache (don't touch.) */ struct file { atomic_long_t f_count; @@ -1042,6 +1043,7 @@ struct file { struct callback_head f_task_work; struct llist_node f_llist; struct file_ra_state f_ra; + freeptr_t f_freeptr; }; /* --- cacheline 3 boundary (192 bytes) --- */ } __randomize_layout -- cgit v1.2.3 From d688d65a847ff910146eff51e70f4b7049895eb5 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Fri, 30 Aug 2024 15:04:49 +0200 Subject: fs: add generic_llseek_cookie() This is similar to generic_file_llseek() but allows the caller to specify a cookie that will be updated to indicate that a seek happened. Caller's requiring that information in their readdir implementations can use that. Link: https://lore.kernel.org/r/20240830-vfs-file-f_version-v1-8-6d3e4816aa7b@kernel.org Reviewed-by: Jan Kara Reviewed-by: Jeff Layton Signed-off-by: Christian Brauner --- fs/read_write.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/fs.h | 2 ++ 2 files changed, 47 insertions(+) (limited to 'include/linux') diff --git a/fs/read_write.c b/fs/read_write.c index b07e48cd81d1..fb519e55c8e8 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -180,6 +180,51 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence, } EXPORT_SYMBOL(generic_file_llseek_size); +/** + * generic_llseek_cookie - versioned llseek implementation + * @file: file structure to seek on + * @offset: file offset to seek to + * @whence: type of seek + * @cookie: cookie to update + * + * See generic_file_llseek for a general description and locking assumptions. + * + * In contrast to generic_file_llseek, this function also resets a + * specified cookie to indicate a seek took place. + */ +loff_t generic_llseek_cookie(struct file *file, loff_t offset, int whence, + u64 *cookie) +{ + struct inode *inode = file->f_mapping->host; + loff_t maxsize = inode->i_sb->s_maxbytes; + loff_t eof = i_size_read(inode); + int ret; + + if (WARN_ON_ONCE(!cookie)) + return -EINVAL; + + /* + * Require that this is only used for directories that guarantee + * synchronization between readdir and seek so that an update to + * @cookie is correctly synchronized with concurrent readdir. + */ + if (WARN_ON_ONCE(!(file->f_mode & FMODE_ATOMIC_POS))) + return -EINVAL; + + ret = must_set_pos(file, &offset, whence, eof); + if (ret < 0) + return ret; + if (ret == 0) + return offset; + + /* No need to hold f_lock because we know that f_pos_lock is held. */ + if (whence == SEEK_CUR) + return vfs_setpos_cookie(file, file->f_pos + offset, maxsize, cookie); + + return vfs_setpos_cookie(file, offset, maxsize, cookie); +} +EXPORT_SYMBOL(generic_llseek_cookie); + /** * generic_file_llseek - generic llseek implementation for regular files * @file: file structure to seek on diff --git a/include/linux/fs.h b/include/linux/fs.h index 58c91a52cad1..3e6b3c1afb31 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3202,6 +3202,8 @@ extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize); extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence); extern loff_t generic_file_llseek_size(struct file *file, loff_t offset, int whence, loff_t maxsize, loff_t eof); +loff_t generic_llseek_cookie(struct file *file, loff_t offset, int whence, + u64 *cookie); extern loff_t fixed_size_llseek(struct file *file, loff_t offset, int whence, loff_t size); extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t); -- cgit v1.2.3 From 5e9b50dea970ae6d3e1309d4254157099734a2af Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Fri, 30 Aug 2024 15:04:59 +0200 Subject: fs: add f_pipe Only regular files with FMODE_ATOMIC_POS and directories need f_pos_lock. Place a new f_pipe member in a union with f_pos_lock that they can use and make them stop abusing f_version in follow-up patches. Link: https://lore.kernel.org/r/20240830-vfs-file-f_version-v1-18-6d3e4816aa7b@kernel.org Reviewed-by: Jeff Layton Signed-off-by: Christian Brauner --- fs/file_table.c | 7 +++++++ include/linux/fs.h | 8 +++++++- 2 files changed, 14 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/fs/file_table.c b/fs/file_table.c index 3ef558f27a1c..7ce4d5dac080 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -156,6 +156,13 @@ static int init_file(struct file *f, int flags, const struct cred *cred) } spin_lock_init(&f->f_lock); + /* + * Note that f_pos_lock is only used for files raising + * FMODE_ATOMIC_POS and directories. Other files such as pipes + * don't need it and since f_pos_lock is in a union may reuse + * the space for other purposes. They are expected to initialize + * the respective member when opening the file. + */ mutex_init(&f->f_pos_lock); f->f_flags = flags; f->f_mode = OPEN_FMODE(flags); diff --git a/include/linux/fs.h b/include/linux/fs.h index 3e6b3c1afb31..ca4925008244 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1001,6 +1001,7 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) * @f_cred: stashed credentials of creator/opener * @f_path: path of the file * @f_pos_lock: lock protecting file position + * @f_pipe: specific to pipes * @f_pos: file position * @f_version: file version * @f_security: LSM security context of this file @@ -1026,7 +1027,12 @@ struct file { const struct cred *f_cred; /* --- cacheline 1 boundary (64 bytes) --- */ struct path f_path; - struct mutex f_pos_lock; + union { + /* regular files (with FMODE_ATOMIC_POS) and directories */ + struct mutex f_pos_lock; + /* pipes */ + u64 f_pipe; + }; loff_t f_pos; u64 f_version; /* --- cacheline 2 boundary (128 bytes) --- */ -- cgit v1.2.3 From 11068e0b64cbb540b96e577fcca0926242ecaf58 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Fri, 30 Aug 2024 15:05:01 +0200 Subject: fs: remove f_version Now that detecting concurrent seeks is done by the filesystems that require it we can remove f_version and free up 8 bytes for future extensions. Link: https://lore.kernel.org/r/20240830-vfs-file-f_version-v1-20-6d3e4816aa7b@kernel.org Reviewed-by: Jan Kara Reviewed-by: Jeff Layton Signed-off-by: Christian Brauner --- fs/read_write.c | 9 ++++----- include/linux/fs.h | 4 +--- 2 files changed, 5 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/fs/read_write.c b/fs/read_write.c index fb519e55c8e8..b19cce8e55b9 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -62,7 +62,8 @@ static loff_t vfs_setpos_cookie(struct file *file, loff_t offset, if (offset != file->f_pos) { file->f_pos = offset; - *cookie = 0; + if (cookie) + *cookie = 0; } return offset; } @@ -81,7 +82,7 @@ static loff_t vfs_setpos_cookie(struct file *file, loff_t offset, */ loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize) { - return vfs_setpos_cookie(file, offset, maxsize, &file->f_version); + return vfs_setpos_cookie(file, offset, maxsize, NULL); } EXPORT_SYMBOL(vfs_setpos); @@ -364,10 +365,8 @@ loff_t default_llseek(struct file *file, loff_t offset, int whence) } retval = -EINVAL; if (offset >= 0 || unsigned_offsets(file)) { - if (offset != file->f_pos) { + if (offset != file->f_pos) file->f_pos = offset; - file->f_version = 0; - } retval = offset; } out: diff --git a/include/linux/fs.h b/include/linux/fs.h index ca4925008244..7e11ce172140 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1003,7 +1003,6 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) * @f_pos_lock: lock protecting file position * @f_pipe: specific to pipes * @f_pos: file position - * @f_version: file version * @f_security: LSM security context of this file * @f_owner: file owner * @f_wb_err: writeback error @@ -1034,11 +1033,10 @@ struct file { u64 f_pipe; }; loff_t f_pos; - u64 f_version; - /* --- cacheline 2 boundary (128 bytes) --- */ #ifdef CONFIG_SECURITY void *f_security; #endif + /* --- cacheline 2 boundary (128 bytes) --- */ struct fown_struct *f_owner; errseq_t f_wb_err; errseq_t f_sb_err; -- cgit v1.2.3