From 80ddce5f2dbd0e83eadc9f9d373439180d599fe5 Mon Sep 17 00:00:00 2001 From: Ahmad Fatoum Date: Sat, 8 Jul 2023 13:27:19 +0200 Subject: thermal: core: constify params in thermal_zone_device_register Since commit 3d439b1a2ad3 ("thermal/core: Alloc-copy-free the thermal zone parameters structure"), thermal_zone_device_register() allocates a copy of the tzp argument and callers need not explicitly manage its lifetime. This means the function no longer cares about the parameter being mutable, so constify it. No functional change. Signed-off-by: Ahmad Fatoum Acked-by: Daniel Lezcano Signed-off-by: Rafael J. Wysocki --- include/linux/thermal.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 87837094d549..dee66ade89a0 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -301,14 +301,14 @@ int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp); #ifdef CONFIG_THERMAL struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, void *, struct thermal_zone_device_ops *, - struct thermal_zone_params *, int, int); + const struct thermal_zone_params *, int, int); void thermal_zone_device_unregister(struct thermal_zone_device *); struct thermal_zone_device * thermal_zone_device_register_with_trips(const char *, struct thermal_trip *, int, int, void *, struct thermal_zone_device_ops *, - struct thermal_zone_params *, int, int); + const struct thermal_zone_params *, int, int); void *thermal_zone_device_priv(struct thermal_zone_device *tzd); const char *thermal_zone_device_type(struct thermal_zone_device *tzd); @@ -348,7 +348,7 @@ void thermal_zone_device_critical(struct thermal_zone_device *tz); static inline struct thermal_zone_device *thermal_zone_device_register( const char *type, int trips, int mask, void *devdata, struct thermal_zone_device_ops *ops, - struct thermal_zone_params *tzp, + const struct thermal_zone_params *tzp, int passive_delay, int polling_delay) { return ERR_PTR(-ENODEV); } static inline void thermal_zone_device_unregister( -- cgit v1.2.3 From e7b915219baa4b2bf30afe1dbaca7a24ff627ad2 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Thu, 13 Jul 2023 16:57:40 +0200 Subject: PM: sleep: wakeirq: drop unused enable helpers Drop the wake-irq enable and disable helpers which have not been used since commit bed570307ed7 ("PM / wakeirq: Fix dedicated wakeirq for drivers not using autosuspend"). Note that these functions are essentially just leftovers from the first iteration of the wake-irq implementation where device drivers were supposed to call these functions themselves instead of PM core (as is also indicated by the bogus kernel doc comments). Signed-off-by: Johan Hovold Reviewed-by: Tony Lindgren Signed-off-by: Rafael J. Wysocki --- drivers/base/power/wakeirq.c | 49 -------------------------------------------- include/linux/pm_wakeirq.h | 10 --------- 2 files changed, 59 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index afd094dec5ca..42171f766dcb 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -194,7 +194,6 @@ err_free: return err; } - /** * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt * @dev: Device entry @@ -206,11 +205,6 @@ err_free: * Sets up a threaded interrupt handler for a device that has * a dedicated wake-up interrupt in addition to the device IO * interrupt. - * - * The interrupt starts disabled, and needs to be managed for - * the device by the bus code or the device driver using - * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*() - * functions. */ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) { @@ -232,11 +226,6 @@ EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq); * the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend() * to enable dedicated wake-up interrupt after running the runtime suspend * callback for @dev. - * - * The interrupt starts disabled, and needs to be managed for - * the device by the bus code or the device driver using - * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*() - * functions. */ int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq) { @@ -244,44 +233,6 @@ int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq) } EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse); -/** - * dev_pm_enable_wake_irq - Enable device wake-up interrupt - * @dev: Device - * - * Optionally called from the bus code or the device driver for - * runtime_resume() to override the PM runtime core managed wake-up - * interrupt handling to enable the wake-up interrupt. - * - * Note that for runtime_suspend()) the wake-up interrupts - * should be unconditionally enabled unlike for suspend() - * that is conditional. - */ -void dev_pm_enable_wake_irq(struct device *dev) -{ - struct wake_irq *wirq = dev->power.wakeirq; - - if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)) - enable_irq(wirq->irq); -} -EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq); - -/** - * dev_pm_disable_wake_irq - Disable device wake-up interrupt - * @dev: Device - * - * Optionally called from the bus code or the device driver for - * runtime_suspend() to override the PM runtime core managed wake-up - * interrupt handling to disable the wake-up interrupt. - */ -void dev_pm_disable_wake_irq(struct device *dev) -{ - struct wake_irq *wirq = dev->power.wakeirq; - - if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)) - disable_irq_nosync(wirq->irq); -} -EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq); - /** * dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt * @dev: Device diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h index dd42d16945d0..d9642c6cf852 100644 --- a/include/linux/pm_wakeirq.h +++ b/include/linux/pm_wakeirq.h @@ -10,8 +10,6 @@ extern int dev_pm_set_wake_irq(struct device *dev, int irq); extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq); extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq); extern void dev_pm_clear_wake_irq(struct device *dev); -extern void dev_pm_enable_wake_irq(struct device *dev); -extern void dev_pm_disable_wake_irq(struct device *dev); #else /* !CONFIG_PM */ @@ -34,13 +32,5 @@ static inline void dev_pm_clear_wake_irq(struct device *dev) { } -static inline void dev_pm_enable_wake_irq(struct device *dev) -{ -} - -static inline void dev_pm_disable_wake_irq(struct device *dev) -{ -} - #endif /* CONFIG_PM */ #endif /* _LINUX_PM_WAKEIRQ_H */ -- cgit v1.2.3 From c1ed39ec116272935528ca9b348b8ee79b0791da Mon Sep 17 00:00:00 2001 From: Winston Wen Date: Mon, 24 Jul 2023 10:10:56 +0800 Subject: fs/nls: make load_nls() take a const parameter load_nls() take a char * parameter, use it to find nls module in list or construct the module name to load it. This change make load_nls() take a const parameter, so we don't need do some cast like this: ses->local_nls = load_nls((char *)ctx->local_nls->charset); Suggested-by: Stephen Rothwell Signed-off-by: Winston Wen Reviewed-by: Paulo Alcantara Reviewed-by: Christian Brauner Signed-off-by: Steve French --- fs/nls/nls_base.c | 4 ++-- include/linux/nls.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c index 52ccd34b1e79..a026dbd3593f 100644 --- a/fs/nls/nls_base.c +++ b/fs/nls/nls_base.c @@ -272,7 +272,7 @@ int unregister_nls(struct nls_table * nls) return -EINVAL; } -static struct nls_table *find_nls(char *charset) +static struct nls_table *find_nls(const char *charset) { struct nls_table *nls; spin_lock(&nls_lock); @@ -288,7 +288,7 @@ static struct nls_table *find_nls(char *charset) return nls; } -struct nls_table *load_nls(char *charset) +struct nls_table *load_nls(const char *charset) { return try_then_request_module(find_nls(charset), "nls_%s", charset); } diff --git a/include/linux/nls.h b/include/linux/nls.h index 499e486b3722..e0bf8367b274 100644 --- a/include/linux/nls.h +++ b/include/linux/nls.h @@ -47,7 +47,7 @@ enum utf16_endian { /* nls_base.c */ extern int __register_nls(struct nls_table *, struct module *); extern int unregister_nls(struct nls_table *); -extern struct nls_table *load_nls(char *); +extern struct nls_table *load_nls(const char *charset); extern void unload_nls(struct nls_table *); extern struct nls_table *load_nls_default(void); #define register_nls(nls) __register_nls((nls), THIS_MODULE) -- cgit v1.2.3 From b1f02b95758d05b799731d939e76a0bd6da312db Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Sat, 22 Jul 2023 00:51:07 +0200 Subject: mm: fix memory ordering for mm_lock_seq and vm_lock_seq mm->mm_lock_seq effectively functions as a read/write lock; therefore it must be used with acquire/release semantics. A specific example is the interaction between userfaultfd_register() and lock_vma_under_rcu(). userfaultfd_register() does the following from the point where it changes a VMA's flags to the point where concurrent readers are permitted again (in a simple scenario where only a single private VMA is accessed and no merging/splitting is involved): userfaultfd_register userfaultfd_set_vm_flags vm_flags_reset vma_start_write down_write(&vma->vm_lock->lock) vma->vm_lock_seq = mm_lock_seq [marks VMA as busy] up_write(&vma->vm_lock->lock) vm_flags_init [sets VM_UFFD_* in __vm_flags] vma->vm_userfaultfd_ctx.ctx = ctx mmap_write_unlock vma_end_write_all WRITE_ONCE(mm->mm_lock_seq, mm->mm_lock_seq + 1) [unlocks VMA] There are no memory barriers in between the __vm_flags update and the mm->mm_lock_seq update that unlocks the VMA, so the unlock can be reordered to above the `vm_flags_init()` call, which means from the perspective of a concurrent reader, a VMA can be marked as a userfaultfd VMA while it is not VMA-locked. That's bad, we definitely need a store-release for the unlock operation. The non-atomic write to vma->vm_lock_seq in vma_start_write() is mostly fine because all accesses to vma->vm_lock_seq that matter are always protected by the VMA lock. There is a racy read in vma_start_read() though that can tolerate false-positives, so we should be using WRITE_ONCE() to keep things tidy and data-race-free (including for KCSAN). On the other side, lock_vma_under_rcu() works as follows in the relevant region for locking and userfaultfd check: lock_vma_under_rcu vma_start_read vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq) [early bailout] down_read_trylock(&vma->vm_lock->lock) vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq) [main check] userfaultfd_armed checks vma->vm_flags & __VM_UFFD_FLAGS Here, the interesting aspect is how far down the mm->mm_lock_seq read can be reordered - if this read is reordered down below the vma->vm_flags access, this could cause lock_vma_under_rcu() to partly operate on information that was read while the VMA was supposed to be locked. To prevent this kind of downwards bleeding of the mm->mm_lock_seq read, we need to read it with a load-acquire. Some of the comment wording is based on suggestions by Suren. BACKPORT WARNING: One of the functions changed by this patch (which I've written against Linus' tree) is vma_try_start_write(), but this function no longer exists in mm/mm-everything. I don't know whether the merged version of this patch will be ordered before or after the patch that removes vma_try_start_write(). If you're backporting this patch to a tree with vma_try_start_write(), make sure this patch changes that function. Link: https://lkml.kernel.org/r/20230721225107.942336-1-jannh@google.com Fixes: 5e31275cc997 ("mm: add per-VMA lock and helper functions to control it") Signed-off-by: Jann Horn Reviewed-by: Suren Baghdasaryan Cc: Signed-off-by: Andrew Morton --- include/linux/mm.h | 29 +++++++++++++++++++++++------ include/linux/mm_types.h | 28 ++++++++++++++++++++++++++++ include/linux/mmap_lock.h | 10 ++++++++-- 3 files changed, 59 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 2dd73e4f3d8e..406ab9ea818f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -641,8 +641,14 @@ static inline void vma_numab_state_free(struct vm_area_struct *vma) {} */ static inline bool vma_start_read(struct vm_area_struct *vma) { - /* Check before locking. A race might cause false locked result. */ - if (vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq)) + /* + * Check before locking. A race might cause false locked result. + * We can use READ_ONCE() for the mm_lock_seq here, and don't need + * ACQUIRE semantics, because this is just a lockless check whose result + * we don't rely on for anything - the mm_lock_seq read against which we + * need ordering is below. + */ + if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq)) return false; if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0)) @@ -653,8 +659,13 @@ static inline bool vma_start_read(struct vm_area_struct *vma) * False unlocked result is impossible because we modify and check * vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq * modification invalidates all existing locks. + * + * We must use ACQUIRE semantics for the mm_lock_seq so that if we are + * racing with vma_end_write_all(), we only start reading from the VMA + * after it has been unlocked. + * This pairs with RELEASE semantics in vma_end_write_all(). */ - if (unlikely(vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq))) { + if (unlikely(vma->vm_lock_seq == smp_load_acquire(&vma->vm_mm->mm_lock_seq))) { up_read(&vma->vm_lock->lock); return false; } @@ -676,7 +687,7 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq) * current task is holding mmap_write_lock, both vma->vm_lock_seq and * mm->mm_lock_seq can't be concurrently modified. */ - *mm_lock_seq = READ_ONCE(vma->vm_mm->mm_lock_seq); + *mm_lock_seq = vma->vm_mm->mm_lock_seq; return (vma->vm_lock_seq == *mm_lock_seq); } @@ -688,7 +699,13 @@ static inline void vma_start_write(struct vm_area_struct *vma) return; down_write(&vma->vm_lock->lock); - vma->vm_lock_seq = mm_lock_seq; + /* + * We should use WRITE_ONCE() here because we can have concurrent reads + * from the early lockless pessimistic check in vma_start_read(). + * We don't really care about the correctness of that early check, but + * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy. + */ + WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq); up_write(&vma->vm_lock->lock); } @@ -702,7 +719,7 @@ static inline bool vma_try_start_write(struct vm_area_struct *vma) if (!down_write_trylock(&vma->vm_lock->lock)) return false; - vma->vm_lock_seq = mm_lock_seq; + WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq); up_write(&vma->vm_lock->lock); return true; } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index de10fc797c8e..5e74ce4a28cd 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -514,6 +514,20 @@ struct vm_area_struct { }; #ifdef CONFIG_PER_VMA_LOCK + /* + * Can only be written (using WRITE_ONCE()) while holding both: + * - mmap_lock (in write mode) + * - vm_lock->lock (in write mode) + * Can be read reliably while holding one of: + * - mmap_lock (in read or write mode) + * - vm_lock->lock (in read or write mode) + * Can be read unreliably (using READ_ONCE()) for pessimistic bailout + * while holding nothing (except RCU to keep the VMA struct allocated). + * + * This sequence counter is explicitly allowed to overflow; sequence + * counter reuse can only lead to occasional unnecessary use of the + * slowpath. + */ int vm_lock_seq; struct vma_lock *vm_lock; @@ -679,6 +693,20 @@ struct mm_struct { * by mmlist_lock */ #ifdef CONFIG_PER_VMA_LOCK + /* + * This field has lock-like semantics, meaning it is sometimes + * accessed with ACQUIRE/RELEASE semantics. + * Roughly speaking, incrementing the sequence number is + * equivalent to releasing locks on VMAs; reading the sequence + * number can be part of taking a read lock on a VMA. + * + * Can be modified under write mmap_lock using RELEASE + * semantics. + * Can be read with no other protection when holding write + * mmap_lock. + * Can be read with ACQUIRE semantics if not holding write + * mmap_lock. + */ int mm_lock_seq; #endif diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index aab8f1b28d26..e05e167dbd16 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -76,8 +76,14 @@ static inline void mmap_assert_write_locked(struct mm_struct *mm) static inline void vma_end_write_all(struct mm_struct *mm) { mmap_assert_write_locked(mm); - /* No races during update due to exclusive mmap_lock being held */ - WRITE_ONCE(mm->mm_lock_seq, mm->mm_lock_seq + 1); + /* + * Nobody can concurrently modify mm->mm_lock_seq due to exclusive + * mmap_lock being held. + * We need RELEASE semantics here to ensure that preceding stores into + * the VMA take effect before we unlock it with this store. + * Pairs with ACQUIRE semantics in vma_start_read(). + */ + smp_store_release(&mm->mm_lock_seq, mm->mm_lock_seq + 1); } #else static inline void vma_end_write_all(struct mm_struct *mm) {} -- cgit v1.2.3 From 800959e697de0c55613a2ce40bca52520a421c9f Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 25 Jul 2023 21:48:08 +0800 Subject: ftrace: Remove unused extern declarations commit 6a9c981b1e96 ("ftrace: Remove unused function ftrace_arch_read_dyn_info()") left ftrace_arch_read_dyn_info() extern declaration. And commit 1d74f2a0f64b ("ftrace: remove ftrace_ip_converted()") leave ftrace_ip_converted() declaration. Link: https://lore.kernel.org/linux-trace-kernel/20230725134808.9716-1-yuehaibing@huawei.com Cc: Cc: Signed-off-by: YueHaibing Signed-off-by: Steven Rostedt (Google) --- include/linux/ftrace.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index ce156c7704ee..aad9cf8876b5 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -684,7 +684,6 @@ void __init ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable); /* defined in arch */ -extern int ftrace_ip_converted(unsigned long ip); extern int ftrace_dyn_arch_init(void); extern void ftrace_replace_code(int enable); extern int ftrace_update_ftrace_func(ftrace_func_t func); @@ -859,9 +858,6 @@ static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_a } #endif -/* May be defined in arch */ -extern int ftrace_arch_read_dyn_info(char *buf, int size); - extern int skip_trace(unsigned long ip); extern void ftrace_module_init(struct module *mod); extern void ftrace_module_enable(struct module *mod); -- cgit v1.2.3