From 3efa66ce6ee1b55ab687b316e48e1e9ddc1f780a Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 16 Apr 2025 18:29:01 +0200 Subject: rcuref: Provide rcuref_is_dead() rcuref_read() returns the number of references that are currently held. If 0 is returned then it is not safe to assume that the object ca be scheduled for deconstruction because it is marked DEAD. This happens if the return value of rcuref_put() is ignored and assumptions are made. If 0 is returned then the counter transitioned from 0 to RCUREF_NOREF. If rcuref_put() did not return to the caller then the counter did not yet transition from RCUREF_NOREF to RCUREF_DEAD. This means that there is still a chance that the counter will transition from RCUREF_NOREF to 0 meaning it is still valid and must not be deconstructed. In this brief window rcuref_read() will return 0. Provide rcuref_is_dead() to determine if the counter is marked as RCUREF_DEAD. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250416162921.513656-2-bigeasy@linutronix.de --- include/linux/rcuref.h | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h index 6322d8c1c6b4..2fb2af6d9824 100644 --- a/include/linux/rcuref.h +++ b/include/linux/rcuref.h @@ -30,7 +30,11 @@ static inline void rcuref_init(rcuref_t *ref, unsigned int cnt) * rcuref_read - Read the number of held reference counts of a rcuref * @ref: Pointer to the reference count * - * Return: The number of held references (0 ... N) + * Return: The number of held references (0 ... N). The value 0 does not + * indicate that it is safe to schedule the object, protected by this reference + * counter, for deconstruction. + * If you want to know if the reference counter has been marked DEAD (as + * signaled by rcuref_put()) please use rcuread_is_dead(). */ static inline unsigned int rcuref_read(rcuref_t *ref) { @@ -40,6 +44,22 @@ static inline unsigned int rcuref_read(rcuref_t *ref) return c >= RCUREF_RELEASED ? 0 : c + 1; } +/** + * rcuref_is_dead - Check if the rcuref has been already marked dead + * @ref: Pointer to the reference count + * + * Return: True if the object has been marked DEAD. This signals that a previous + * invocation of rcuref_put() returned true on this reference counter meaning + * the protected object can safely be scheduled for deconstruction. + * Otherwise, returns false. + */ +static inline bool rcuref_is_dead(rcuref_t *ref) +{ + unsigned int c = atomic_read(&ref->refcnt); + + return (c >= RCUREF_RELEASED) && (c < RCUREF_NOREF); +} + extern __must_check bool rcuref_get_slowpath(rcuref_t *ref); /** -- cgit v1.2.3 From 55284f70134f01fdc9cc4c4905551cc1f37abd34 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 16 Apr 2025 18:29:02 +0200 Subject: mm: Add vmalloc_huge_node() To enable node specific hash-tables using huge pages if possible. [bigeasy: use __vmalloc_node_range_noprof(), add nommu bits, inline vmalloc_huge] Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20250416162921.513656-3-bigeasy@linutronix.de --- include/linux/vmalloc.h | 9 +++++++-- mm/nommu.c | 18 +++++++++++++++++- mm/vmalloc.c | 11 ++++++----- 3 files changed, 30 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 31e9ffd936e3..de95794777ad 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -168,8 +168,13 @@ void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_m int node, const void *caller) __alloc_size(1); #define __vmalloc_node(...) alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__)) -void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1); -#define vmalloc_huge(...) alloc_hooks(vmalloc_huge_noprof(__VA_ARGS__)) +void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node) __alloc_size(1); +#define vmalloc_huge_node(...) alloc_hooks(vmalloc_huge_node_noprof(__VA_ARGS__)) + +static inline void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) +{ + return vmalloc_huge_node(size, gfp_mask, NUMA_NO_NODE); +} extern void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); #define __vmalloc_array(...) alloc_hooks(__vmalloc_array_noprof(__VA_ARGS__)) diff --git a/mm/nommu.c b/mm/nommu.c index 617e7ba8022f..70f92f9a7fab 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -200,7 +200,23 @@ void *vmalloc_noprof(unsigned long size) } EXPORT_SYMBOL(vmalloc_noprof); -void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc_noprof); +/* + * vmalloc_huge_node - allocate virtually contiguous memory, on a node + * + * @size: allocation size + * @gfp_mask: flags for the page level allocator + * @node: node to use for allocation or NUMA_NO_NODE + * + * Allocate enough pages to cover @size from the page level + * allocator and map them into contiguous kernel virtual space. + * + * Due to NOMMU implications the node argument and HUGE page attribute is + * ignored. + */ +void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node) +{ + return __vmalloc_noprof(size, gfp_mask); +} /* * vzalloc - allocate virtually contiguous memory with zero fill diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 3ed720a787ec..8b9f6d3c099d 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3943,9 +3943,10 @@ void *vmalloc_noprof(unsigned long size) EXPORT_SYMBOL(vmalloc_noprof); /** - * vmalloc_huge - allocate virtually contiguous memory, allow huge pages + * vmalloc_huge_node - allocate virtually contiguous memory, allow huge pages * @size: allocation size * @gfp_mask: flags for the page level allocator + * @node: node to use for allocation or NUMA_NO_NODE * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. @@ -3954,13 +3955,13 @@ EXPORT_SYMBOL(vmalloc_noprof); * * Return: pointer to the allocated memory or %NULL on error */ -void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) +void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node) { return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END, - gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, - NUMA_NO_NODE, __builtin_return_address(0)); + gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, + node, __builtin_return_address(0)); } -EXPORT_SYMBOL_GPL(vmalloc_huge_noprof); +EXPORT_SYMBOL_GPL(vmalloc_huge_node_noprof); /** * vzalloc - allocate virtually contiguous memory with zero fill -- cgit v1.2.3 From 80367ad01d93ac781b0e1df246edaf006928002f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 16 Apr 2025 18:29:12 +0200 Subject: futex: Add basic infrastructure for local task local hash The futex hash is system wide and shared by all tasks. Each slot is hashed based on futex address and the VMA of the thread. Due to randomized VMAs (and memory allocations) the same logical lock (pointer) can end up in a different hash bucket on each invocation of the application. This in turn means that different applications may share a hash bucket on the first invocation but not on the second and it is not always clear which applications will be involved. This can result in high latency's to acquire the futex_hash_bucket::lock especially if the lock owner is limited to a CPU and can not be effectively PI boosted. Introduce basic infrastructure for process local hash which is shared by all threads of process. This hash will only be used for a PROCESS_PRIVATE FUTEX operation. The hashmap can be allocated via: prctl(PR_FUTEX_HASH, PR_FUTEX_HASH_SET_SLOTS, num); A `num' of 0 means that the global hash is used instead of a private hash. Other values for `num' specify the number of slots for the hash and the number must be power of two, starting with two. The prctl() returns zero on success. This function can only be used before a thread is created. The current status for the private hash can be queried via: num = prctl(PR_FUTEX_HASH, PR_FUTEX_HASH_GET_SLOTS); which return the current number of slots. The value 0 means that the global hash is used. Values greater than 0 indicate the number of slots that are used. A negative number indicates an error. For optimisation, for the private hash jhash2() uses only two arguments the address and the offset. This omits the VMA which is always the same. [peterz: Use 0 for global hash. A bit shuffling and renaming. ] Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250416162921.513656-13-bigeasy@linutronix.de --- include/linux/futex.h | 26 +++++- include/linux/mm_types.h | 5 +- include/uapi/linux/prctl.h | 5 ++ init/Kconfig | 5 ++ kernel/fork.c | 2 + kernel/futex/core.c | 208 +++++++++++++++++++++++++++++++++++++++++---- kernel/futex/futex.h | 10 +++ kernel/sys.c | 4 + 8 files changed, 244 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/include/linux/futex.h b/include/linux/futex.h index b70df27d7e85..8f1be08bef18 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -4,11 +4,11 @@ #include #include +#include #include struct inode; -struct mm_struct; struct task_struct; /* @@ -77,7 +77,22 @@ void futex_exec_release(struct task_struct *tsk); long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, u32 __user *uaddr2, u32 val2, u32 val3); -#else +int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4); + +#ifdef CONFIG_FUTEX_PRIVATE_HASH +void futex_hash_free(struct mm_struct *mm); + +static inline void futex_mm_init(struct mm_struct *mm) +{ + mm->futex_phash = NULL; +} + +#else /* !CONFIG_FUTEX_PRIVATE_HASH */ +static inline void futex_hash_free(struct mm_struct *mm) { } +static inline void futex_mm_init(struct mm_struct *mm) { } +#endif /* CONFIG_FUTEX_PRIVATE_HASH */ + +#else /* !CONFIG_FUTEX */ static inline void futex_init_task(struct task_struct *tsk) { } static inline void futex_exit_recursive(struct task_struct *tsk) { } static inline void futex_exit_release(struct task_struct *tsk) { } @@ -88,6 +103,13 @@ static inline long do_futex(u32 __user *uaddr, int op, u32 val, { return -EINVAL; } +static inline int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4) +{ + return -EINVAL; +} +static inline void futex_hash_free(struct mm_struct *mm) { } +static inline void futex_mm_init(struct mm_struct *mm) { } + #endif #endif diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 56d07edd01f9..a4b5661e4177 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -31,6 +31,7 @@ #define INIT_PASID 0 struct address_space; +struct futex_private_hash; struct mem_cgroup; /* @@ -1031,7 +1032,9 @@ struct mm_struct { */ seqcount_t mm_lock_seq; #endif - +#ifdef CONFIG_FUTEX_PRIVATE_HASH + struct futex_private_hash *futex_phash; +#endif unsigned long hiwater_rss; /* High-watermark of RSS usage */ unsigned long hiwater_vm; /* High-water virtual memory usage */ diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 15c18ef4eb11..3b93fb906e3c 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -364,4 +364,9 @@ struct prctl_mm_map { # define PR_TIMER_CREATE_RESTORE_IDS_ON 1 # define PR_TIMER_CREATE_RESTORE_IDS_GET 2 +/* FUTEX hash management */ +#define PR_FUTEX_HASH 78 +# define PR_FUTEX_HASH_SET_SLOTS 1 +# define PR_FUTEX_HASH_GET_SLOTS 2 + #endif /* _LINUX_PRCTL_H */ diff --git a/init/Kconfig b/init/Kconfig index 63f5974b9fa6..4b84da2b2ec4 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1699,6 +1699,11 @@ config FUTEX_PI depends on FUTEX && RT_MUTEXES default y +config FUTEX_PRIVATE_HASH + bool + depends on FUTEX && !BASE_SMALL && MMU + default y + config EPOLL bool "Enable eventpoll support" if EXPERT default y diff --git a/kernel/fork.c b/kernel/fork.c index c4b26cd8998b..831dfec45054 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1305,6 +1305,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, RCU_INIT_POINTER(mm->exe_file, NULL); mmu_notifier_subscriptions_init(mm); init_tlb_flush_pending(mm); + futex_mm_init(mm); #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS) mm->pmd_huge_pte = NULL; #endif @@ -1387,6 +1388,7 @@ static inline void __mmput(struct mm_struct *mm) if (mm->binfmt) module_put(mm->binfmt->module); lru_gen_del_mm(mm); + futex_hash_free(mm); mmdrop(mm); } diff --git a/kernel/futex/core.c b/kernel/futex/core.c index afc66780f84f..818df7420a1a 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -39,6 +39,7 @@ #include #include #include +#include #include "futex.h" #include "../locking/rtmutex_common.h" @@ -55,6 +56,12 @@ static struct { #define futex_queues (__futex_data.queues) #define futex_hashmask (__futex_data.hashmask) +struct futex_private_hash { + unsigned int hash_mask; + void *mm; + bool custom; + struct futex_hash_bucket queues[]; +}; /* * Fault injections for futexes. @@ -107,9 +114,17 @@ late_initcall(fail_futex_debugfs); #endif /* CONFIG_FAIL_FUTEX */ -struct futex_private_hash *futex_private_hash(void) +static struct futex_hash_bucket * +__futex_hash(union futex_key *key, struct futex_private_hash *fph); + +#ifdef CONFIG_FUTEX_PRIVATE_HASH +static inline bool futex_key_is_private(union futex_key *key) { - return NULL; + /* + * Relies on get_futex_key() to set either bit for shared + * futexes -- see comment with union futex_key. + */ + return !(key->both.offset & (FUT_OFF_INODE | FUT_OFF_MMSHARED)); } bool futex_private_hash_get(struct futex_private_hash *fph) @@ -117,21 +132,8 @@ bool futex_private_hash_get(struct futex_private_hash *fph) return false; } -void futex_private_hash_put(struct futex_private_hash *fph) { } - -/** - * futex_hash - Return the hash bucket in the global hash - * @key: Pointer to the futex key for which the hash is calculated - * - * We hash on the keys returned from get_futex_key (see below) and return the - * corresponding hash bucket in the global hash. - */ -struct futex_hash_bucket *futex_hash(union futex_key *key) +void futex_private_hash_put(struct futex_private_hash *fph) { - u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4, - key->both.offset); - - return &futex_queues[hash & futex_hashmask]; } /** @@ -144,6 +146,84 @@ struct futex_hash_bucket *futex_hash(union futex_key *key) void futex_hash_get(struct futex_hash_bucket *hb) { } void futex_hash_put(struct futex_hash_bucket *hb) { } +static struct futex_hash_bucket * +__futex_hash_private(union futex_key *key, struct futex_private_hash *fph) +{ + u32 hash; + + if (!futex_key_is_private(key)) + return NULL; + + if (!fph) + fph = key->private.mm->futex_phash; + if (!fph || !fph->hash_mask) + return NULL; + + hash = jhash2((void *)&key->private.address, + sizeof(key->private.address) / 4, + key->both.offset); + return &fph->queues[hash & fph->hash_mask]; +} + +struct futex_private_hash *futex_private_hash(void) +{ + struct mm_struct *mm = current->mm; + struct futex_private_hash *fph; + + fph = mm->futex_phash; + return fph; +} + +struct futex_hash_bucket *futex_hash(union futex_key *key) +{ + struct futex_hash_bucket *hb; + + hb = __futex_hash(key, NULL); + return hb; +} + +#else /* !CONFIG_FUTEX_PRIVATE_HASH */ + +static struct futex_hash_bucket * +__futex_hash_private(union futex_key *key, struct futex_private_hash *fph) +{ + return NULL; +} + +struct futex_hash_bucket *futex_hash(union futex_key *key) +{ + return __futex_hash(key, NULL); +} + +#endif /* CONFIG_FUTEX_PRIVATE_HASH */ + +/** + * __futex_hash - Return the hash bucket + * @key: Pointer to the futex key for which the hash is calculated + * @fph: Pointer to private hash if known + * + * We hash on the keys returned from get_futex_key (see below) and return the + * corresponding hash bucket. + * If the FUTEX is PROCESS_PRIVATE then a per-process hash bucket (from the + * private hash) is returned if existing. Otherwise a hash bucket from the + * global hash is returned. + */ +static struct futex_hash_bucket * +__futex_hash(union futex_key *key, struct futex_private_hash *fph) +{ + struct futex_hash_bucket *hb; + u32 hash; + + hb = __futex_hash_private(key, fph); + if (hb) + return hb; + + hash = jhash2((u32 *)key, + offsetof(typeof(*key), both.offset) / 4, + key->both.offset); + return &futex_queues[hash & futex_hashmask]; +} + /** * futex_setup_timer - set up the sleeping hrtimer. * @time: ptr to the given timeout value @@ -985,6 +1065,13 @@ static void exit_pi_state_list(struct task_struct *curr) struct futex_pi_state *pi_state; union futex_key key = FUTEX_KEY_INIT; + /* + * Ensure the hash remains stable (no resize) during the while loop + * below. The hb pointer is acquired under the pi_lock so we can't block + * on the mutex. + */ + WARN_ON(curr != current); + guard(private_hash)(); /* * We are a ZOMBIE and nobody can enqueue itself on * pi_state_list anymore, but we have to be careful @@ -1160,13 +1247,98 @@ void futex_exit_release(struct task_struct *tsk) futex_cleanup_end(tsk, FUTEX_STATE_DEAD); } -static void futex_hash_bucket_init(struct futex_hash_bucket *fhb) +static void futex_hash_bucket_init(struct futex_hash_bucket *fhb, + struct futex_private_hash *fph) { +#ifdef CONFIG_FUTEX_PRIVATE_HASH + fhb->priv = fph; +#endif atomic_set(&fhb->waiters, 0); plist_head_init(&fhb->chain); spin_lock_init(&fhb->lock); } +#ifdef CONFIG_FUTEX_PRIVATE_HASH +void futex_hash_free(struct mm_struct *mm) +{ + kvfree(mm->futex_phash); +} + +static int futex_hash_allocate(unsigned int hash_slots, bool custom) +{ + struct mm_struct *mm = current->mm; + struct futex_private_hash *fph; + int i; + + if (hash_slots && (hash_slots == 1 || !is_power_of_2(hash_slots))) + return -EINVAL; + + if (mm->futex_phash) + return -EALREADY; + + if (!thread_group_empty(current)) + return -EINVAL; + + fph = kvzalloc(struct_size(fph, queues, hash_slots), GFP_KERNEL_ACCOUNT | __GFP_NOWARN); + if (!fph) + return -ENOMEM; + + fph->hash_mask = hash_slots ? hash_slots - 1 : 0; + fph->custom = custom; + fph->mm = mm; + + for (i = 0; i < hash_slots; i++) + futex_hash_bucket_init(&fph->queues[i], fph); + + mm->futex_phash = fph; + return 0; +} + +static int futex_hash_get_slots(void) +{ + struct futex_private_hash *fph; + + fph = current->mm->futex_phash; + if (fph && fph->hash_mask) + return fph->hash_mask + 1; + return 0; +} + +#else + +static int futex_hash_allocate(unsigned int hash_slots, bool custom) +{ + return -EINVAL; +} + +static int futex_hash_get_slots(void) +{ + return 0; +} +#endif + +int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4) +{ + int ret; + + switch (arg2) { + case PR_FUTEX_HASH_SET_SLOTS: + if (arg4 != 0) + return -EINVAL; + ret = futex_hash_allocate(arg3, true); + break; + + case PR_FUTEX_HASH_GET_SLOTS: + ret = futex_hash_get_slots(); + break; + + default: + ret = -EINVAL; + break; + } + return ret; +} + static int __init futex_init(void) { unsigned long hashsize, i; @@ -1185,7 +1357,7 @@ static int __init futex_init(void) hashsize = 1UL << futex_shift; for (i = 0; i < hashsize; i++) - futex_hash_bucket_init(&futex_queues[i]); + futex_hash_bucket_init(&futex_queues[i], NULL); futex_hashmask = hashsize - 1; return 0; diff --git a/kernel/futex/futex.h b/kernel/futex/futex.h index 26e69333cb74..899aed5acde1 100644 --- a/kernel/futex/futex.h +++ b/kernel/futex/futex.h @@ -118,6 +118,7 @@ struct futex_hash_bucket { atomic_t waiters; spinlock_t lock; struct plist_head chain; + struct futex_private_hash *priv; } ____cacheline_aligned_in_smp; /* @@ -204,6 +205,7 @@ futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout, int flags, u64 range_ns); extern struct futex_hash_bucket *futex_hash(union futex_key *key); +#ifdef CONFIG_FUTEX_PRIVATE_HASH extern void futex_hash_get(struct futex_hash_bucket *hb); extern void futex_hash_put(struct futex_hash_bucket *hb); @@ -211,6 +213,14 @@ extern struct futex_private_hash *futex_private_hash(void); extern bool futex_private_hash_get(struct futex_private_hash *fph); extern void futex_private_hash_put(struct futex_private_hash *fph); +#else /* !CONFIG_FUTEX_PRIVATE_HASH */ +static inline void futex_hash_get(struct futex_hash_bucket *hb) { } +static inline void futex_hash_put(struct futex_hash_bucket *hb) { } +static inline struct futex_private_hash *futex_private_hash(void) { return NULL; } +static inline bool futex_private_hash_get(void) { return false; } +static inline void futex_private_hash_put(struct futex_private_hash *fph) { } +#endif + DEFINE_CLASS(hb, struct futex_hash_bucket *, if (_T) futex_hash_put(_T), futex_hash(key), union futex_key *key); diff --git a/kernel/sys.c b/kernel/sys.c index c434968e9f5d..adc0de0aa364 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -52,6 +52,7 @@ #include #include #include +#include #include #include @@ -2820,6 +2821,9 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, return -EINVAL; error = posixtimer_create_prctl(arg2); break; + case PR_FUTEX_HASH: + error = futex_hash_prctl(arg2, arg3, arg4); + break; default: trace_task_prctl_unknown(option, arg2, arg3, arg4, arg5); error = -EINVAL; -- cgit v1.2.3 From 7c4f75a21f636486d2969d9b6680403ea8483539 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 16 Apr 2025 18:29:13 +0200 Subject: futex: Allow automatic allocation of process wide futex hash Allocate a private futex hash with 16 slots if a task forks its first thread. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250416162921.513656-14-bigeasy@linutronix.de --- include/linux/futex.h | 6 ++++++ kernel/fork.c | 22 ++++++++++++++++++++++ kernel/futex/core.c | 11 +++++++++++ 3 files changed, 39 insertions(+) (limited to 'include/linux') diff --git a/include/linux/futex.h b/include/linux/futex.h index 8f1be08bef18..1d3f7555825e 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -80,6 +80,7 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4); #ifdef CONFIG_FUTEX_PRIVATE_HASH +int futex_hash_allocate_default(void); void futex_hash_free(struct mm_struct *mm); static inline void futex_mm_init(struct mm_struct *mm) @@ -88,6 +89,7 @@ static inline void futex_mm_init(struct mm_struct *mm) } #else /* !CONFIG_FUTEX_PRIVATE_HASH */ +static inline int futex_hash_allocate_default(void) { return 0; } static inline void futex_hash_free(struct mm_struct *mm) { } static inline void futex_mm_init(struct mm_struct *mm) { } #endif /* CONFIG_FUTEX_PRIVATE_HASH */ @@ -107,6 +109,10 @@ static inline int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsig { return -EINVAL; } +static inline int futex_hash_allocate_default(void) +{ + return 0; +} static inline void futex_hash_free(struct mm_struct *mm) { } static inline void futex_mm_init(struct mm_struct *mm) { } diff --git a/kernel/fork.c b/kernel/fork.c index 831dfec45054..1f5d8083eeb2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2164,6 +2164,13 @@ static void rv_task_fork(struct task_struct *p) #define rv_task_fork(p) do {} while (0) #endif +static bool need_futex_hash_allocate_default(u64 clone_flags) +{ + if ((clone_flags & (CLONE_THREAD | CLONE_VM)) != (CLONE_THREAD | CLONE_VM)) + return false; + return true; +} + /* * This creates a new process as a copy of the old one, * but does not actually start it yet. @@ -2544,6 +2551,21 @@ __latent_entropy struct task_struct *copy_process( if (retval) goto bad_fork_cancel_cgroup; + /* + * Allocate a default futex hash for the user process once the first + * thread spawns. + */ + if (need_futex_hash_allocate_default(clone_flags)) { + retval = futex_hash_allocate_default(); + if (retval) + goto bad_fork_core_free; + /* + * If we fail beyond this point we don't free the allocated + * futex hash map. We assume that another thread will be created + * and makes use of it. The hash map will be freed once the main + * thread terminates. + */ + } /* * From this point on we must avoid any synchronous user-space * communication until we take the tasklist-lock. In particular, we do diff --git a/kernel/futex/core.c b/kernel/futex/core.c index 818df7420a1a..53b3a00a9253 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -1294,6 +1294,17 @@ static int futex_hash_allocate(unsigned int hash_slots, bool custom) return 0; } +int futex_hash_allocate_default(void) +{ + if (!current->mm) + return 0; + + if (current->mm->futex_phash) + return 0; + + return futex_hash_allocate(16, false); +} + static int futex_hash_get_slots(void) { struct futex_private_hash *fph; -- cgit v1.2.3 From bd54df5ea7cadac520e346d5f0fe5d58e635b6ba Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 16 Apr 2025 18:29:14 +0200 Subject: futex: Allow to resize the private local hash The mm_struct::futex_hash_lock guards the futex_hash_bucket assignment/ replacement. The futex_hash_allocate()/ PR_FUTEX_HASH_SET_SLOTS operation can now be invoked at runtime and resize an already existing internal private futex_hash_bucket to another size. The reallocation is based on an idea by Thomas Gleixner: The initial allocation of struct futex_private_hash sets the reference count to one. Every user acquires a reference on the local hash before using it and drops it after it enqueued itself on the hash bucket. There is no reference held while the task is scheduled out while waiting for the wake up. The resize process allocates a new struct futex_private_hash and drops the initial reference. Synchronized with mm_struct::futex_hash_lock it is checked if the reference counter for the currently used mm_struct::futex_phash is marked as DEAD. If so, then all users enqueued on the current private hash are requeued on the new private hash and the new private hash is set to mm_struct::futex_phash. Otherwise the newly allocated private hash is saved as mm_struct::futex_phash_new and the rehashing and reassigning is delayed to the futex_hash() caller once the reference counter is marked DEAD. The replacement is not performed at rcuref_put() time because certain callers, such as futex_wait_queue(), drop their reference after changing the task state. This change will be destroyed once the futex_hash_lock is acquired. The user can change the number slots with PR_FUTEX_HASH_SET_SLOTS multiple times. An increase and decrease is allowed and request blocks until the assignment is done. The private hash allocated at thread creation is changed from 16 to 16 <= 4 * number_of_threads <= global_hash_size where number_of_threads can not exceed the number of online CPUs. Should the user PR_FUTEX_HASH_SET_SLOTS then the auto scaling is disabled. [peterz: reorganize the code to avoid state tracking and simplify new object handling, block the user until changes are in effect, allow increase and decrease of the hash]. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250416162921.513656-15-bigeasy@linutronix.de --- include/linux/futex.h | 3 +- include/linux/mm_types.h | 4 +- kernel/futex/core.c | 290 +++++++++++++++++++++++++++++++++++++++++++---- kernel/futex/requeue.c | 5 + 4 files changed, 281 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/include/linux/futex.h b/include/linux/futex.h index 1d3f7555825e..40bc778b2bb4 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -85,7 +85,8 @@ void futex_hash_free(struct mm_struct *mm); static inline void futex_mm_init(struct mm_struct *mm) { - mm->futex_phash = NULL; + rcu_assign_pointer(mm->futex_phash, NULL); + mutex_init(&mm->futex_hash_lock); } #else /* !CONFIG_FUTEX_PRIVATE_HASH */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index a4b5661e4177..32ba5126e221 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -1033,7 +1033,9 @@ struct mm_struct { seqcount_t mm_lock_seq; #endif #ifdef CONFIG_FUTEX_PRIVATE_HASH - struct futex_private_hash *futex_phash; + struct mutex futex_hash_lock; + struct futex_private_hash __rcu *futex_phash; + struct futex_private_hash *futex_phash_new; #endif unsigned long hiwater_rss; /* High-watermark of RSS usage */ diff --git a/kernel/futex/core.c b/kernel/futex/core.c index 53b3a00a9253..9e7dad52abea 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -40,6 +40,7 @@ #include #include #include +#include #include "futex.h" #include "../locking/rtmutex_common.h" @@ -57,7 +58,9 @@ static struct { #define futex_hashmask (__futex_data.hashmask) struct futex_private_hash { + rcuref_t users; unsigned int hash_mask; + struct rcu_head rcu; void *mm; bool custom; struct futex_hash_bucket queues[]; @@ -129,11 +132,14 @@ static inline bool futex_key_is_private(union futex_key *key) bool futex_private_hash_get(struct futex_private_hash *fph) { - return false; + return rcuref_get(&fph->users); } void futex_private_hash_put(struct futex_private_hash *fph) { + /* Ignore return value, last put is verified via rcuref_is_dead() */ + if (rcuref_put(&fph->users)) + wake_up_var(fph->mm); } /** @@ -143,8 +149,23 @@ void futex_private_hash_put(struct futex_private_hash *fph) * Obtain an additional reference for the already obtained hash bucket. The * caller must already own an reference. */ -void futex_hash_get(struct futex_hash_bucket *hb) { } -void futex_hash_put(struct futex_hash_bucket *hb) { } +void futex_hash_get(struct futex_hash_bucket *hb) +{ + struct futex_private_hash *fph = hb->priv; + + if (!fph) + return; + WARN_ON_ONCE(!futex_private_hash_get(fph)); +} + +void futex_hash_put(struct futex_hash_bucket *hb) +{ + struct futex_private_hash *fph = hb->priv; + + if (!fph) + return; + futex_private_hash_put(fph); +} static struct futex_hash_bucket * __futex_hash_private(union futex_key *key, struct futex_private_hash *fph) @@ -155,7 +176,7 @@ __futex_hash_private(union futex_key *key, struct futex_private_hash *fph) return NULL; if (!fph) - fph = key->private.mm->futex_phash; + fph = rcu_dereference(key->private.mm->futex_phash); if (!fph || !fph->hash_mask) return NULL; @@ -165,21 +186,119 @@ __futex_hash_private(union futex_key *key, struct futex_private_hash *fph) return &fph->queues[hash & fph->hash_mask]; } +static void futex_rehash_private(struct futex_private_hash *old, + struct futex_private_hash *new) +{ + struct futex_hash_bucket *hb_old, *hb_new; + unsigned int slots = old->hash_mask + 1; + unsigned int i; + + for (i = 0; i < slots; i++) { + struct futex_q *this, *tmp; + + hb_old = &old->queues[i]; + + spin_lock(&hb_old->lock); + plist_for_each_entry_safe(this, tmp, &hb_old->chain, list) { + + plist_del(&this->list, &hb_old->chain); + futex_hb_waiters_dec(hb_old); + + WARN_ON_ONCE(this->lock_ptr != &hb_old->lock); + + hb_new = __futex_hash(&this->key, new); + futex_hb_waiters_inc(hb_new); + /* + * The new pointer isn't published yet but an already + * moved user can be unqueued due to timeout or signal. + */ + spin_lock_nested(&hb_new->lock, SINGLE_DEPTH_NESTING); + plist_add(&this->list, &hb_new->chain); + this->lock_ptr = &hb_new->lock; + spin_unlock(&hb_new->lock); + } + spin_unlock(&hb_old->lock); + } +} + +static bool __futex_pivot_hash(struct mm_struct *mm, + struct futex_private_hash *new) +{ + struct futex_private_hash *fph; + + WARN_ON_ONCE(mm->futex_phash_new); + + fph = rcu_dereference_protected(mm->futex_phash, + lockdep_is_held(&mm->futex_hash_lock)); + if (fph) { + if (!rcuref_is_dead(&fph->users)) { + mm->futex_phash_new = new; + return false; + } + + futex_rehash_private(fph, new); + } + rcu_assign_pointer(mm->futex_phash, new); + kvfree_rcu(fph, rcu); + return true; +} + +static void futex_pivot_hash(struct mm_struct *mm) +{ + scoped_guard(mutex, &mm->futex_hash_lock) { + struct futex_private_hash *fph; + + fph = mm->futex_phash_new; + if (fph) { + mm->futex_phash_new = NULL; + __futex_pivot_hash(mm, fph); + } + } +} + struct futex_private_hash *futex_private_hash(void) { struct mm_struct *mm = current->mm; - struct futex_private_hash *fph; + /* + * Ideally we don't loop. If there is a replacement in progress + * then a new private hash is already prepared and a reference can't be + * obtained once the last user dropped it's. + * In that case we block on mm_struct::futex_hash_lock and either have + * to perform the replacement or wait while someone else is doing the + * job. Eitherway, on the second iteration we acquire a reference on the + * new private hash or loop again because a new replacement has been + * requested. + */ +again: + scoped_guard(rcu) { + struct futex_private_hash *fph; - fph = mm->futex_phash; - return fph; + fph = rcu_dereference(mm->futex_phash); + if (!fph) + return NULL; + + if (rcuref_get(&fph->users)) + return fph; + } + futex_pivot_hash(mm); + goto again; } struct futex_hash_bucket *futex_hash(union futex_key *key) { + struct futex_private_hash *fph; struct futex_hash_bucket *hb; - hb = __futex_hash(key, NULL); - return hb; +again: + scoped_guard(rcu) { + hb = __futex_hash(key, NULL); + fph = hb->priv; + + if (!fph || futex_private_hash_get(fph)) + return hb; + } + futex_pivot_hash(key->private.mm); + goto again; } #else /* !CONFIG_FUTEX_PRIVATE_HASH */ @@ -664,6 +783,8 @@ int futex_unqueue(struct futex_q *q) spinlock_t *lock_ptr; int ret = 0; + /* RCU so lock_ptr is not going away during locking. */ + guard(rcu)(); /* In the common case we don't take the spinlock, which is nice. */ retry: /* @@ -1065,6 +1186,10 @@ static void exit_pi_state_list(struct task_struct *curr) struct futex_pi_state *pi_state; union futex_key key = FUTEX_KEY_INIT; + /* + * The mutex mm_struct::futex_hash_lock might be acquired. + */ + might_sleep(); /* * Ensure the hash remains stable (no resize) during the while loop * below. The hb pointer is acquired under the pi_lock so we can't block @@ -1261,7 +1386,51 @@ static void futex_hash_bucket_init(struct futex_hash_bucket *fhb, #ifdef CONFIG_FUTEX_PRIVATE_HASH void futex_hash_free(struct mm_struct *mm) { - kvfree(mm->futex_phash); + struct futex_private_hash *fph; + + kvfree(mm->futex_phash_new); + fph = rcu_dereference_raw(mm->futex_phash); + if (fph) { + WARN_ON_ONCE(rcuref_read(&fph->users) > 1); + kvfree(fph); + } +} + +static bool futex_pivot_pending(struct mm_struct *mm) +{ + struct futex_private_hash *fph; + + guard(rcu)(); + + if (!mm->futex_phash_new) + return true; + + fph = rcu_dereference(mm->futex_phash); + return rcuref_is_dead(&fph->users); +} + +static bool futex_hash_less(struct futex_private_hash *a, + struct futex_private_hash *b) +{ + /* user provided always wins */ + if (!a->custom && b->custom) + return true; + if (a->custom && !b->custom) + return false; + + /* zero-sized hash wins */ + if (!b->hash_mask) + return true; + if (!a->hash_mask) + return false; + + /* keep the biggest */ + if (a->hash_mask < b->hash_mask) + return true; + if (a->hash_mask > b->hash_mask) + return false; + + return false; /* equal */ } static int futex_hash_allocate(unsigned int hash_slots, bool custom) @@ -1273,16 +1442,23 @@ static int futex_hash_allocate(unsigned int hash_slots, bool custom) if (hash_slots && (hash_slots == 1 || !is_power_of_2(hash_slots))) return -EINVAL; - if (mm->futex_phash) - return -EALREADY; - - if (!thread_group_empty(current)) - return -EINVAL; + /* + * Once we've disabled the global hash there is no way back. + */ + scoped_guard(rcu) { + fph = rcu_dereference(mm->futex_phash); + if (fph && !fph->hash_mask) { + if (custom) + return -EBUSY; + return 0; + } + } fph = kvzalloc(struct_size(fph, queues, hash_slots), GFP_KERNEL_ACCOUNT | __GFP_NOWARN); if (!fph) return -ENOMEM; + rcuref_init(&fph->users, 1); fph->hash_mask = hash_slots ? hash_slots - 1 : 0; fph->custom = custom; fph->mm = mm; @@ -1290,26 +1466,102 @@ static int futex_hash_allocate(unsigned int hash_slots, bool custom) for (i = 0; i < hash_slots; i++) futex_hash_bucket_init(&fph->queues[i], fph); - mm->futex_phash = fph; + if (custom) { + /* + * Only let prctl() wait / retry; don't unduly delay clone(). + */ +again: + wait_var_event(mm, futex_pivot_pending(mm)); + } + + scoped_guard(mutex, &mm->futex_hash_lock) { + struct futex_private_hash *free __free(kvfree) = NULL; + struct futex_private_hash *cur, *new; + + cur = rcu_dereference_protected(mm->futex_phash, + lockdep_is_held(&mm->futex_hash_lock)); + new = mm->futex_phash_new; + mm->futex_phash_new = NULL; + + if (fph) { + if (cur && !new) { + /* + * If we have an existing hash, but do not yet have + * allocated a replacement hash, drop the initial + * reference on the existing hash. + */ + futex_private_hash_put(cur); + } + + if (new) { + /* + * Two updates raced; throw out the lesser one. + */ + if (futex_hash_less(new, fph)) { + free = new; + new = fph; + } else { + free = fph; + } + } else { + new = fph; + } + fph = NULL; + } + + if (new) { + /* + * Will set mm->futex_phash_new on failure; + * futex_private_hash_get() will try again. + */ + if (!__futex_pivot_hash(mm, new) && custom) + goto again; + } + } return 0; } int futex_hash_allocate_default(void) { + unsigned int threads, buckets, current_buckets = 0; + struct futex_private_hash *fph; + if (!current->mm) return 0; - if (current->mm->futex_phash) + scoped_guard(rcu) { + threads = min_t(unsigned int, + get_nr_threads(current), + num_online_cpus()); + + fph = rcu_dereference(current->mm->futex_phash); + if (fph) { + if (fph->custom) + return 0; + + current_buckets = fph->hash_mask + 1; + } + } + + /* + * The default allocation will remain within + * 16 <= threads * 4 <= global hash size + */ + buckets = roundup_pow_of_two(4 * threads); + buckets = clamp(buckets, 16, futex_hashmask + 1); + + if (current_buckets >= buckets) return 0; - return futex_hash_allocate(16, false); + return futex_hash_allocate(buckets, false); } static int futex_hash_get_slots(void) { struct futex_private_hash *fph; - fph = current->mm->futex_phash; + guard(rcu)(); + fph = rcu_dereference(current->mm->futex_phash); if (fph && fph->hash_mask) return fph->hash_mask + 1; return 0; diff --git a/kernel/futex/requeue.c b/kernel/futex/requeue.c index b0e64fd454d9..c716a66f8692 100644 --- a/kernel/futex/requeue.c +++ b/kernel/futex/requeue.c @@ -87,6 +87,11 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, futex_hb_waiters_inc(hb2); plist_add(&q->list, &hb2->chain); q->lock_ptr = &hb2->lock; + /* + * hb1 and hb2 belong to the same futex_hash_bucket_private + * because if we managed get a reference on hb1 then it can't be + * replaced. Therefore we avoid put(hb1)+get(hb2) here. + */ } q->key = *key2; } -- cgit v1.2.3 From cec199c5e39bde7191a08087cc3d002ccfab31ff Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 16 Apr 2025 18:29:16 +0200 Subject: futex: Implement FUTEX2_NUMA Extend the futex2 interface to be numa aware. When FUTEX2_NUMA is specified for a futex, the user value is extended to two words (of the same size). The first is the user value we all know, the second one will be the node to place this futex on. struct futex_numa_32 { u32 val; u32 node; }; When node is set to ~0, WAIT will set it to the current node_id such that WAKE knows where to find it. If userspace corrupts the node value between WAIT and WAKE, the futex will not be found and no wakeup will happen. When FUTEX2_NUMA is not set, the node is simply an extension of the hash, such that traditional futexes are still interleaved over the nodes. This is done to avoid having to have a separate !numa hash-table. [bigeasy: ensure to have at least hashsize of 4 in futex_init(), add pr_info() for size and allocation information. Cast the naddr math to void*] Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250416162921.513656-17-bigeasy@linutronix.de --- include/linux/futex.h | 3 ++ include/uapi/linux/futex.h | 7 ++++ kernel/futex/core.c | 100 +++++++++++++++++++++++++++++++++++++-------- kernel/futex/futex.h | 33 +++++++++++++-- 4 files changed, 123 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/include/linux/futex.h b/include/linux/futex.h index 40bc778b2bb4..eccc99751bd9 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -34,6 +34,7 @@ union futex_key { u64 i_seq; unsigned long pgoff; unsigned int offset; + /* unsigned int node; */ } shared; struct { union { @@ -42,11 +43,13 @@ union futex_key { }; unsigned long address; unsigned int offset; + /* unsigned int node; */ } private; struct { u64 ptr; unsigned long word; unsigned int offset; + unsigned int node; /* NOT hashed! */ } both; }; diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h index d2ee625ea189..6b94da467e70 100644 --- a/include/uapi/linux/futex.h +++ b/include/uapi/linux/futex.h @@ -74,6 +74,13 @@ /* do not use */ #define FUTEX_32 FUTEX2_SIZE_U32 /* historical accident :-( */ +/* + * When FUTEX2_NUMA doubles the futex word, the second word is a node value. + * The special value -1 indicates no-node. This is the same value as + * NUMA_NO_NODE, except that value is not ABI, this is. + */ +#define FUTEX_NO_NODE (-1) + /* * Max numbers of elements in a futex_waitv array */ diff --git a/kernel/futex/core.c b/kernel/futex/core.c index 8054fda94719..1490e6492993 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -36,6 +36,8 @@ #include #include #include +#include +#include #include #include #include @@ -51,11 +53,14 @@ * reside in the same cacheline. */ static struct { - struct futex_hash_bucket *queues; unsigned long hashmask; + unsigned int hashshift; + struct futex_hash_bucket *queues[MAX_NUMNODES]; } __futex_data __read_mostly __aligned(2*sizeof(long)); -#define futex_queues (__futex_data.queues) -#define futex_hashmask (__futex_data.hashmask) + +#define futex_hashmask (__futex_data.hashmask) +#define futex_hashshift (__futex_data.hashshift) +#define futex_queues (__futex_data.queues) struct futex_private_hash { rcuref_t users; @@ -339,15 +344,35 @@ __futex_hash(union futex_key *key, struct futex_private_hash *fph) { struct futex_hash_bucket *hb; u32 hash; + int node; hb = __futex_hash_private(key, fph); if (hb) return hb; hash = jhash2((u32 *)key, - offsetof(typeof(*key), both.offset) / 4, + offsetof(typeof(*key), both.offset) / sizeof(u32), key->both.offset); - return &futex_queues[hash & futex_hashmask]; + node = key->both.node; + + if (node == FUTEX_NO_NODE) { + /* + * In case of !FLAGS_NUMA, use some unused hash bits to pick a + * node -- this ensures regular futexes are interleaved across + * the nodes and avoids having to allocate multiple + * hash-tables. + * + * NOTE: this isn't perfectly uniform, but it is fast and + * handles sparse node masks. + */ + node = (hash >> futex_hashshift) % nr_node_ids; + if (!node_possible(node)) { + node = find_next_bit_wrap(node_possible_map.bits, + nr_node_ids, node); + } + } + + return &futex_queues[node][hash & futex_hashmask]; } /** @@ -454,25 +479,49 @@ int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key, struct page *page; struct folio *folio; struct address_space *mapping; - int err, ro = 0; + int node, err, size, ro = 0; bool fshared; fshared = flags & FLAGS_SHARED; + size = futex_size(flags); + if (flags & FLAGS_NUMA) + size *= 2; /* * The futex address must be "naturally" aligned. */ key->both.offset = address % PAGE_SIZE; - if (unlikely((address % sizeof(u32)) != 0)) + if (unlikely((address % size) != 0)) return -EINVAL; address -= key->both.offset; - if (unlikely(!access_ok(uaddr, sizeof(u32)))) + if (unlikely(!access_ok(uaddr, size))) return -EFAULT; if (unlikely(should_fail_futex(fshared))) return -EFAULT; + if (flags & FLAGS_NUMA) { + u32 __user *naddr = (void *)uaddr + size / 2; + + if (futex_get_value(&node, naddr)) + return -EFAULT; + + if (node == FUTEX_NO_NODE) { + node = numa_node_id(); + if (futex_put_value(node, naddr)) + return -EFAULT; + + } else if (node >= MAX_NUMNODES || !node_possible(node)) { + return -EINVAL; + } + + key->both.node = node; + + } else { + key->both.node = FUTEX_NO_NODE; + } + /* * PROCESS_PRIVATE futexes are fast. * As the mm cannot disappear under us and the 'key' only needs @@ -1642,24 +1691,41 @@ int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4) static int __init futex_init(void) { unsigned long hashsize, i; - unsigned int futex_shift; + unsigned int order, n; + unsigned long size; #ifdef CONFIG_BASE_SMALL hashsize = 16; #else - hashsize = roundup_pow_of_two(256 * num_possible_cpus()); + hashsize = 256 * num_possible_cpus(); + hashsize /= num_possible_nodes(); + hashsize = max(4, hashsize); + hashsize = roundup_pow_of_two(hashsize); #endif + futex_hashshift = ilog2(hashsize); + size = sizeof(struct futex_hash_bucket) * hashsize; + order = get_order(size); - futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues), - hashsize, 0, 0, - &futex_shift, NULL, - hashsize, hashsize); - hashsize = 1UL << futex_shift; + for_each_node(n) { + struct futex_hash_bucket *table; - for (i = 0; i < hashsize; i++) - futex_hash_bucket_init(&futex_queues[i], NULL); + if (order > MAX_PAGE_ORDER) + table = vmalloc_huge_node(size, GFP_KERNEL, n); + else + table = alloc_pages_exact_nid(n, size, GFP_KERNEL); + + BUG_ON(!table); + + for (i = 0; i < hashsize; i++) + futex_hash_bucket_init(&table[i], NULL); + + futex_queues[n] = table; + } futex_hashmask = hashsize - 1; + pr_info("futex hash table entries: %lu (%lu bytes on %d NUMA nodes, total %lu KiB, %s).\n", + hashsize, size, num_possible_nodes(), size * num_possible_nodes() / 1024, + order > MAX_PAGE_ORDER ? "vmalloc" : "linear"); return 0; } core_initcall(futex_init); diff --git a/kernel/futex/futex.h b/kernel/futex/futex.h index 899aed5acde1..acc795367889 100644 --- a/kernel/futex/futex.h +++ b/kernel/futex/futex.h @@ -54,7 +54,7 @@ static inline unsigned int futex_to_flags(unsigned int op) return flags; } -#define FUTEX2_VALID_MASK (FUTEX2_SIZE_MASK | FUTEX2_PRIVATE) +#define FUTEX2_VALID_MASK (FUTEX2_SIZE_MASK | FUTEX2_NUMA | FUTEX2_PRIVATE) /* FUTEX2_ to FLAGS_ */ static inline unsigned int futex2_to_flags(unsigned int flags2) @@ -87,6 +87,19 @@ static inline bool futex_flags_valid(unsigned int flags) if ((flags & FLAGS_SIZE_MASK) != FLAGS_SIZE_32) return false; + /* + * Must be able to represent both FUTEX_NO_NODE and every valid nodeid + * in a futex word. + */ + if (flags & FLAGS_NUMA) { + int bits = 8 * futex_size(flags); + u64 max = ~0ULL; + + max >>= 64 - bits; + if (nr_node_ids >= max) + return false; + } + return true; } @@ -282,7 +295,7 @@ static inline int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32 * This looks a bit overkill, but generally just results in a couple * of instructions. */ -static __always_inline int futex_read_inatomic(u32 *dest, u32 __user *from) +static __always_inline int futex_get_value(u32 *dest, u32 __user *from) { u32 val; @@ -299,12 +312,26 @@ Efault: return -EFAULT; } +static __always_inline int futex_put_value(u32 val, u32 __user *to) +{ + if (can_do_masked_user_access()) + to = masked_user_access_begin(to); + else if (!user_read_access_begin(to, sizeof(*to))) + return -EFAULT; + unsafe_put_user(val, to, Efault); + user_read_access_end(); + return 0; +Efault: + user_read_access_end(); + return -EFAULT; +} + static inline int futex_get_value_locked(u32 *dest, u32 __user *from) { int ret; pagefault_disable(); - ret = futex_read_inatomic(dest, from); + ret = futex_get_value(dest, from); pagefault_enable(); return ret; -- cgit v1.2.3 From c042c505210dc3453f378df432c10fff3d471bc5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 16 Apr 2025 18:29:17 +0200 Subject: futex: Implement FUTEX2_MPOL Extend the futex2 interface to be aware of mempolicy. When FUTEX2_MPOL is specified and there is a MPOL_PREFERRED or home_node specified covering the futex address, use that hash-map. Notably, in this case the futex will go to the global node hashtable, even if it is a PRIVATE futex. When FUTEX2_NUMA|FUTEX2_MPOL is specified and the user specified node value is FUTEX_NO_NODE, the MPOL lookup (as described above) will be tried first before reverting to setting node to the local node. [bigeasy: add CONFIG_FUTEX_MPOL, add MPOL to FUTEX2_VALID_MASK, write the node only to user if FUTEX_NO_NODE was supplied] Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250416162921.513656-18-bigeasy@linutronix.de --- include/linux/mmap_lock.h | 4 ++ include/uapi/linux/futex.h | 2 +- init/Kconfig | 5 ++ kernel/futex/core.c | 116 ++++++++++++++++++++++++++++++++++++++------- kernel/futex/futex.h | 6 ++- 5 files changed, 115 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index 4706c6769902..e0eddfd306ef 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -7,6 +7,7 @@ #include #include #include +#include #define MMAP_LOCK_INITIALIZER(name) \ .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock), @@ -211,6 +212,9 @@ static inline void mmap_read_unlock(struct mm_struct *mm) up_read(&mm->mmap_lock); } +DEFINE_GUARD(mmap_read_lock, struct mm_struct *, + mmap_read_lock(_T), mmap_read_unlock(_T)) + static inline void mmap_read_unlock_non_owner(struct mm_struct *mm) { __mmap_lock_trace_released(mm, false); diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h index 6b94da467e70..7e2744ec8933 100644 --- a/include/uapi/linux/futex.h +++ b/include/uapi/linux/futex.h @@ -63,7 +63,7 @@ #define FUTEX2_SIZE_U32 0x02 #define FUTEX2_SIZE_U64 0x03 #define FUTEX2_NUMA 0x04 - /* 0x08 */ +#define FUTEX2_MPOL 0x08 /* 0x10 */ /* 0x20 */ /* 0x40 */ diff --git a/init/Kconfig b/init/Kconfig index 4b84da2b2ec4..b373267ba2e2 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1704,6 +1704,11 @@ config FUTEX_PRIVATE_HASH depends on FUTEX && !BASE_SMALL && MMU default y +config FUTEX_MPOL + bool + depends on FUTEX && NUMA + default y + config EPOLL bool "Enable eventpoll support" if EXPERT default y diff --git a/kernel/futex/core.c b/kernel/futex/core.c index 1490e6492993..19a2c65f3d37 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -43,6 +43,8 @@ #include #include #include +#include +#include #include "futex.h" #include "../locking/rtmutex_common.h" @@ -328,6 +330,75 @@ struct futex_hash_bucket *futex_hash(union futex_key *key) #endif /* CONFIG_FUTEX_PRIVATE_HASH */ +#ifdef CONFIG_FUTEX_MPOL + +static int __futex_key_to_node(struct mm_struct *mm, unsigned long addr) +{ + struct vm_area_struct *vma = vma_lookup(mm, addr); + struct mempolicy *mpol; + int node = FUTEX_NO_NODE; + + if (!vma) + return FUTEX_NO_NODE; + + mpol = vma_policy(vma); + if (!mpol) + return FUTEX_NO_NODE; + + switch (mpol->mode) { + case MPOL_PREFERRED: + node = first_node(mpol->nodes); + break; + case MPOL_PREFERRED_MANY: + case MPOL_BIND: + if (mpol->home_node != NUMA_NO_NODE) + node = mpol->home_node; + break; + default: + break; + } + + return node; +} + +static int futex_key_to_node_opt(struct mm_struct *mm, unsigned long addr) +{ + int seq, node; + + guard(rcu)(); + + if (!mmap_lock_speculate_try_begin(mm, &seq)) + return -EBUSY; + + node = __futex_key_to_node(mm, addr); + + if (mmap_lock_speculate_retry(mm, seq)) + return -EAGAIN; + + return node; +} + +static int futex_mpol(struct mm_struct *mm, unsigned long addr) +{ + int node; + + node = futex_key_to_node_opt(mm, addr); + if (node >= FUTEX_NO_NODE) + return node; + + guard(mmap_read_lock)(mm); + return __futex_key_to_node(mm, addr); +} + +#else /* !CONFIG_FUTEX_MPOL */ + +static int futex_mpol(struct mm_struct *mm, unsigned long addr) +{ + return FUTEX_NO_NODE; +} + +#endif /* CONFIG_FUTEX_MPOL */ + /** * __futex_hash - Return the hash bucket * @key: Pointer to the futex key for which the hash is calculated @@ -342,18 +413,20 @@ struct futex_hash_bucket *futex_hash(union futex_key *key) static struct futex_hash_bucket * __futex_hash(union futex_key *key, struct futex_private_hash *fph) { - struct futex_hash_bucket *hb; + int node = key->both.node; u32 hash; - int node; - hb = __futex_hash_private(key, fph); - if (hb) - return hb; + if (node == FUTEX_NO_NODE) { + struct futex_hash_bucket *hb; + + hb = __futex_hash_private(key, fph); + if (hb) + return hb; + } hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / sizeof(u32), key->both.offset); - node = key->both.node; if (node == FUTEX_NO_NODE) { /* @@ -480,6 +553,7 @@ int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key, struct folio *folio; struct address_space *mapping; int node, err, size, ro = 0; + bool node_updated = false; bool fshared; fshared = flags & FLAGS_SHARED; @@ -501,27 +575,37 @@ int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key, if (unlikely(should_fail_futex(fshared))) return -EFAULT; + node = FUTEX_NO_NODE; + if (flags & FLAGS_NUMA) { u32 __user *naddr = (void *)uaddr + size / 2; if (futex_get_value(&node, naddr)) return -EFAULT; - if (node == FUTEX_NO_NODE) { - node = numa_node_id(); - if (futex_put_value(node, naddr)) - return -EFAULT; - - } else if (node >= MAX_NUMNODES || !node_possible(node)) { + if (node != FUTEX_NO_NODE && + (node >= MAX_NUMNODES || !node_possible(node))) return -EINVAL; - } + } - key->both.node = node; + if (node == FUTEX_NO_NODE && (flags & FLAGS_MPOL)) { + node = futex_mpol(mm, address); + node_updated = true; + } - } else { - key->both.node = FUTEX_NO_NODE; + if (flags & FLAGS_NUMA) { + u32 __user *naddr = (void *)uaddr + size / 2; + + if (node == FUTEX_NO_NODE) { + node = numa_node_id(); + node_updated = true; + } + if (node_updated && futex_put_value(node, naddr)) + return -EFAULT; } + key->both.node = node; + /* * PROCESS_PRIVATE futexes are fast. * As the mm cannot disappear under us and the 'key' only needs diff --git a/kernel/futex/futex.h b/kernel/futex/futex.h index acc795367889..069fc2a83080 100644 --- a/kernel/futex/futex.h +++ b/kernel/futex/futex.h @@ -39,6 +39,7 @@ #define FLAGS_HAS_TIMEOUT 0x0040 #define FLAGS_NUMA 0x0080 #define FLAGS_STRICT 0x0100 +#define FLAGS_MPOL 0x0200 /* FUTEX_ to FLAGS_ */ static inline unsigned int futex_to_flags(unsigned int op) @@ -54,7 +55,7 @@ static inline unsigned int futex_to_flags(unsigned int op) return flags; } -#define FUTEX2_VALID_MASK (FUTEX2_SIZE_MASK | FUTEX2_NUMA | FUTEX2_PRIVATE) +#define FUTEX2_VALID_MASK (FUTEX2_SIZE_MASK | FUTEX2_NUMA | FUTEX2_MPOL | FUTEX2_PRIVATE) /* FUTEX2_ to FLAGS_ */ static inline unsigned int futex2_to_flags(unsigned int flags2) @@ -67,6 +68,9 @@ static inline unsigned int futex2_to_flags(unsigned int flags2) if (flags2 & FUTEX2_NUMA) flags |= FLAGS_NUMA; + if (flags2 & FUTEX2_MPOL) + flags |= FLAGS_MPOL; + return flags; } -- cgit v1.2.3 From 01475aedfdfa33a5ee3219079426f5743367c624 Mon Sep 17 00:00:00 2001 From: Nam Cao Date: Mon, 28 Apr 2025 21:34:45 +0200 Subject: futex: Fix outdated comment in struct restart_block Since commit 2070887fdeac ("futex: fix restart in wait_requeue_pi"), futex_wait_requeue_pi() no longer uses restart_block. Update the comment accordingly. Signed-off-by: Nam Cao Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/all/20250428193445.4571-1-namcao@linutronix.de --- include/linux/restart_block.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h index 13f17676c5f4..7e50bbc94e47 100644 --- a/include/linux/restart_block.h +++ b/include/linux/restart_block.h @@ -26,7 +26,7 @@ struct restart_block { unsigned long arch_data; long (*fn)(struct restart_block *); union { - /* For futex_wait and futex_wait_requeue_pi */ + /* For futex_wait() */ struct { u32 __user *uaddr; u32 val; -- cgit v1.2.3 From 094ac8cff7858bee5fa4554f6ea66c964f8e160e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 10 May 2025 10:45:28 +0200 Subject: futex: Relax the rcu_assign_pointer() assignment of mm->futex_phash in futex_mm_init() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The following commit added an rcu_assign_pointer() assignment to futex_mm_init() in : bd54df5ea7ca ("futex: Allow to resize the private local hash") Which breaks the build on older compilers (gcc-9, x86-64 defconfig): CC io_uring/futex.o In file included from ./arch/x86/include/generated/asm/rwonce.h:1, from ./include/linux/compiler.h:390, from ./include/linux/array_size.h:5, from ./include/linux/kernel.h:16, from io_uring/futex.c:2: ./include/linux/futex.h: In function 'futex_mm_init': ./include/linux/rcupdate.h:555:36: error: dereferencing pointer to incomplete type 'struct futex_private_hash' The problem is that this variant of rcu_assign_pointer() wants to know the full type of 'struct futex_private_hash', which type is local to futex.c: kernel/futex/core.c:struct futex_private_hash { There are a couple of mechanical solutions for this bug: - we can uninline futex_mm_init() and move it into futex/core.c - or we can share the structure definition with kernel/fork.c. But both of these solutions have disadvantages: the first one adds runtime overhead, while the second one dis-encapsulates private futex types. A third solution, implemented by this patch, is to just initialize mm->futex_phash with NULL like the patch below, it's not like this new MM's ->futex_phash can be observed externally until the task is inserted into the task list, which guarantees full store ordering. The relaxation of this initialization might also give a tiny speedup on certain platforms. Fixes: bd54df5ea7ca ("futex: Allow to resize the private local hash") Signed-off-by: Ingo Molnar Cc: André Almeida Cc: Darren Hart Cc: Davidlohr Bueso Cc: Juri Lelli Cc: Peter Zijlstra Cc: Sebastian Andrzej Siewior Cc: Valentin Schneider Cc: Waiman Long Link: https://lore.kernel.org/r/aB8SI00EHBri23lB@gmail.com --- include/linux/futex.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/futex.h b/include/linux/futex.h index eccc99751bd9..168ffd5996b4 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -88,7 +88,14 @@ void futex_hash_free(struct mm_struct *mm); static inline void futex_mm_init(struct mm_struct *mm) { - rcu_assign_pointer(mm->futex_phash, NULL); + /* + * No need for rcu_assign_pointer() here, as we can rely on + * tasklist_lock write-ordering in copy_process(), before + * the task's MM becomes visible and the ->futex_phash + * becomes externally observable: + */ + mm->futex_phash = NULL; + mutex_init(&mm->futex_hash_lock); } -- cgit v1.2.3 From 279f2c2c8e2169403d01190f042efa6e41731578 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Sat, 17 May 2025 17:14:53 +0200 Subject: futex: Use RCU_INIT_POINTER() in futex_mm_init(). There is no need for an explicit NULL pointer initialisation plus a comment why it is okay. RCU_INIT_POINTER() can be used for NULL initialisations and it is documented. This has been build tested with gcc version 9.3.0 (Debian 9.3.0-22) on a x86-64 defconfig. Fixes: 094ac8cff7858 ("futex: Relax the rcu_assign_pointer() assignment of mm->futex_phash in futex_mm_init()") Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250517151455.1065363-4-bigeasy@linutronix.de --- include/linux/futex.h | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/futex.h b/include/linux/futex.h index 168ffd5996b4..005b040c4791 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -88,14 +88,7 @@ void futex_hash_free(struct mm_struct *mm); static inline void futex_mm_init(struct mm_struct *mm) { - /* - * No need for rcu_assign_pointer() here, as we can rely on - * tasklist_lock write-ordering in copy_process(), before - * the task's MM becomes visible and the ->futex_phash - * becomes externally observable: - */ - mm->futex_phash = NULL; - + RCU_INIT_POINTER(mm->futex_phash, NULL); mutex_init(&mm->futex_hash_lock); } -- cgit v1.2.3