From 4d312ac057da57b4a844ec8af14236e74b652efe Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 16 May 2023 21:35:47 +0200 Subject: x86/mm: Add early_memremap_pgprot_adjust() prototype early_memremap_pgprot_adjust() is a __weak function with a local prototype, but x86 has a custom implementation that does not see the prototype, causing a W=1 warning: arch/x86/mm/ioremap.c:785:17: error: no previous prototype for 'early_memremap_pgprot_adjust' [-Werror=missing-prototypes] Move the declaration into the global linux/io.h header to avoid this. Signed-off-by: Arnd Bergmann Signed-off-by: Dave Hansen Reviewed-by: Alexander Lobakin Link: https://lore.kernel.org/all/20230516193549.544673-19-arnd%40kernel.org --- mm/internal.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'mm') diff --git a/mm/internal.h b/mm/internal.h index 68410c6d97ac..e6029d94bdb2 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -178,12 +178,6 @@ extern unsigned long highest_memmap_pfn; */ #define MAX_RECLAIM_RETRIES 16 -/* - * in mm/early_ioremap.c - */ -pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr, - unsigned long size, pgprot_t prot); - /* * in mm/vmscan.c: */ -- cgit v1.2.3 From 444f20c29e8b41a5aef5c34e3eab84e8d1cc4511 Mon Sep 17 00:00:00 2001 From: zhaoxinchao Date: Tue, 18 Apr 2023 10:05:23 +0800 Subject: mm/slab: correct return values in comment for _kmem_cache_create() __kmem_cache_create() returns 0 on success and non-zero on failure. The comment is wrong in two instances, so fix the first one and remove the second one. Also make the comment non-doc, because it doesn't describe an API function, but SLAB-specific implementation. Signed-off-by: zhaoxinchao Signed-off-by: Vlastimil Babka --- mm/slab.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index bb57f7fdbae1..30d241908739 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1887,14 +1887,12 @@ static bool set_on_slab_cache(struct kmem_cache *cachep, return true; } -/** +/* * __kmem_cache_create - Create a cache. * @cachep: cache management descriptor * @flags: SLAB flags * - * Returns a ptr to the cache on success, NULL on failure. - * Cannot be called within an int, but can be interrupted. - * The @ctor is run when new pages are allocated by the cache. + * Returns zero on success, nonzero on failure. * * The flags are * @@ -1907,8 +1905,6 @@ static bool set_on_slab_cache(struct kmem_cache *cachep, * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware * cacheline. This can be beneficial if you're counting cycles as closely * as davem. - * - * Return: a pointer to the created cache or %NULL in case of error */ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags) { -- cgit v1.2.3 From 35973232b5cddd8b03dac2730cd1ca53137d1cd4 Mon Sep 17 00:00:00 2001 From: Xiongwei Song Date: Thu, 13 Apr 2023 22:34:48 +0800 Subject: slub: Correct the error code when slab_kset is NULL The -ENOSYS is inproper when kset_create_and_add call returns a NULL pointer, the failure more likely is because lacking memory, hence returning -ENOMEM is better. Signed-off-by: Xiongwei Song Signed-off-by: Vlastimil Babka --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index c87628cd8a9a..1202ed1200f9 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -6224,7 +6224,7 @@ static int __init slab_sysfs_init(void) if (!slab_kset) { mutex_unlock(&slab_mutex); pr_err("Cannot register slab subsystem.\n"); - return -ENOSYS; + return -ENOMEM; } slab_state = FULL; -- cgit v1.2.3 From 81bd31793fd3be4fec2818e2267997f56ac18fb2 Mon Sep 17 00:00:00 2001 From: Xiongwei Song Date: Thu, 13 Apr 2023 22:34:49 +0800 Subject: slub: Put objects_show() into CONFIG_SLUB_DEBUG enabled block The SO_ALL|SO_OBJECTS pair is only used when enabling CONFIG_SLUB_DEBUG option, so the objects_show() definition should be surrounded by CONFIG_SLUB_DEBUG too. Signed-off-by: Xiongwei Song Signed-off-by: Vlastimil Babka --- mm/slub.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index 1202ed1200f9..bd3b00bb865b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5612,12 +5612,6 @@ static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) } SLAB_ATTR_RO(cpu_slabs); -static ssize_t objects_show(struct kmem_cache *s, char *buf) -{ - return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); -} -SLAB_ATTR_RO(objects); - static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) { return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); @@ -5712,6 +5706,12 @@ static ssize_t total_objects_show(struct kmem_cache *s, char *buf) } SLAB_ATTR_RO(total_objects); +static ssize_t objects_show(struct kmem_cache *s, char *buf) +{ + return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); +} +SLAB_ATTR_RO(objects); + static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) { return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); @@ -5943,7 +5943,6 @@ static struct attribute *slab_attrs[] = { &order_attr.attr, &min_partial_attr.attr, &cpu_partial_attr.attr, - &objects_attr.attr, &objects_partial_attr.attr, &partial_attr.attr, &cpu_slabs_attr.attr, @@ -5957,6 +5956,7 @@ static struct attribute *slab_attrs[] = { &slabs_cpu_partial_attr.attr, #ifdef CONFIG_SLUB_DEBUG &total_objects_attr.attr, + &objects_attr.attr, &slabs_attr.attr, &sanity_checks_attr.attr, &trace_attr.attr, -- cgit v1.2.3 From c6c17c4dc36684fe119a1d40eb41ea65677bdfae Mon Sep 17 00:00:00 2001 From: Xiongwei Song Date: Thu, 13 Apr 2023 22:34:50 +0800 Subject: slub: Remove CONFIG_SMP defined check As CONFIG_SMP is one of dependencies of CONFIG_SLUB_CPU_PARTIAL, so if CONFIG_SLUB_CPU_PARTIAL is defined then CONFIG_SMP must be defined, no need to check CONFIG_SMP definition here. Signed-off-by: Xiongwei Song Signed-off-by: Vlastimil Babka --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index bd3b00bb865b..2ae6af40c1a4 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5640,7 +5640,7 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) objects = (slabs * oo_objects(s->oo)) / 2; len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs); -#if defined(CONFIG_SLUB_CPU_PARTIAL) && defined(CONFIG_SMP) +#ifdef CONFIG_SLUB_CPU_PARTIAL for_each_online_cpu(cpu) { struct slab *slab; -- cgit v1.2.3 From 4f174a8bacebf57ffa18be0439b9ed7ee6d00c70 Mon Sep 17 00:00:00 2001 From: Xiongwei Song Date: Thu, 13 Apr 2023 22:34:51 +0800 Subject: slub: Remove slabs_node() function When traversing nodes one by one, the get_node() function called in for_each_kmem_cache_node macro, no need to call get_node() again in slabs_node(), just reading nr_slabs field should be enough. However, the node_nr_slabs() function can do this. Hence, the slabs_node() function is not needed anymore. Signed-off-by: Xiongwei Song Signed-off-by: Vlastimil Babka --- mm/slub.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index 2ae6af40c1a4..53be9a208bb6 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1344,14 +1344,6 @@ static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct list_del(&slab->slab_list); } -/* Tracking of the number of slabs for debugging purposes */ -static inline unsigned long slabs_node(struct kmem_cache *s, int node) -{ - struct kmem_cache_node *n = get_node(s, node); - - return atomic_long_read(&n->nr_slabs); -} - static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) { return atomic_long_read(&n->nr_slabs); @@ -1722,8 +1714,6 @@ slab_flags_t kmem_cache_flags(unsigned int object_size, #define disable_higher_order_debug 0 -static inline unsigned long slabs_node(struct kmem_cache *s, int node) - { return 0; } static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) { return 0; } static inline void inc_slabs_node(struct kmem_cache *s, int node, @@ -4598,7 +4588,7 @@ bool __kmem_cache_empty(struct kmem_cache *s) struct kmem_cache_node *n; for_each_kmem_cache_node(s, node, n) - if (n->nr_partial || slabs_node(s, node)) + if (n->nr_partial || node_nr_slabs(n)) return false; return true; } @@ -4615,7 +4605,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s) /* Attempt to free all objects */ for_each_kmem_cache_node(s, node, n) { free_partial(s, n); - if (n->nr_partial || slabs_node(s, node)) + if (n->nr_partial || node_nr_slabs(n)) return 1; } return 0; @@ -4828,7 +4818,7 @@ static int __kmem_cache_do_shrink(struct kmem_cache *s) list_for_each_entry_safe(slab, t, &discard, slab_list) free_slab(s, slab); - if (slabs_node(s, node)) + if (node_nr_slabs(n)) ret = 1; } -- cgit v1.2.3 From 8040cbf5e1da2fe86558239b92927a947c79ecd6 Mon Sep 17 00:00:00 2001 From: Xiongwei Song Date: Thu, 13 Apr 2023 22:34:52 +0800 Subject: slub: Don't read nr_slabs and total_objects directly We have node_nr_slabs() to read nr_slabs, node_nr_objs() to read total_objects in a kmem_cache_node, so no need to access the two members directly. Signed-off-by: Xiongwei Song Signed-off-by: Vlastimil Babka --- mm/slub.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index 53be9a208bb6..58cc832d0afd 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5156,9 +5156,9 @@ static int validate_slab_node(struct kmem_cache *s, validate_slab(s, slab, obj_map); count++; } - if (count != atomic_long_read(&n->nr_slabs)) { + if (count != node_nr_slabs(n)) { pr_err("SLUB: %s %ld slabs counted but counter=%ld\n", - s->name, count, atomic_long_read(&n->nr_slabs)); + s->name, count, node_nr_slabs(n)); slab_add_kunit_errors(); } @@ -5442,12 +5442,11 @@ static ssize_t show_slab_objects(struct kmem_cache *s, for_each_kmem_cache_node(s, node, n) { if (flags & SO_TOTAL) - x = atomic_long_read(&n->total_objects); + x = node_nr_objs(n); else if (flags & SO_OBJECTS) - x = atomic_long_read(&n->total_objects) - - count_partial(n, count_free); + x = node_nr_objs(n) - count_partial(n, count_free); else - x = atomic_long_read(&n->nr_slabs); + x = node_nr_slabs(n); total += x; nodes[node] += x; } @@ -6386,7 +6385,7 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep) unsigned long flags; struct slab *slab; - if (!atomic_long_read(&n->nr_slabs)) + if (!node_nr_slabs(n)) continue; spin_lock_irqsave(&n->list_lock, flags); -- cgit v1.2.3 From f7e466e951a15bc7cec496f22f6276b854d3c310 Mon Sep 17 00:00:00 2001 From: David Keisar Schmidt Date: Sun, 16 Apr 2023 20:22:42 +0300 Subject: mm/slab: Replace invocation of weak PRNG The Slab allocator randomization uses the prandom_u32 PRNG. That was added to prevent attackers to obtain information on the heap state, by randomizing the freelists state. However, this PRNG turned out to be weak, as noted in commit c51f8f88d705 To fix it, we have changed the invocation of prandom_u32_state to get_random_u32 to ensure the PRNG is strong. Since a modulo operation is applied right after that, we used get_random_u32_below, to achieve uniformity. In addition, we changed the freelist_init_state union to struct, since the rnd_state inside which is used to store the state of prandom_u32, is not needed anymore, since get_random_u32 maintains its own state. Signed-off-by: David Keisar Schmidt Signed-off-by: Vlastimil Babka --- mm/slab.c | 29 +++++++++-------------------- 1 file changed, 9 insertions(+), 20 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index bb57f7fdbae1..e82a55270ad1 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2359,44 +2359,34 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct slab *slab) #ifdef CONFIG_SLAB_FREELIST_RANDOM /* Hold information during a freelist initialization */ -union freelist_init_state { - struct { - unsigned int pos; - unsigned int *list; - unsigned int count; - }; - struct rnd_state rnd_state; +struct freelist_init_state { + unsigned int pos; + unsigned int *list; + unsigned int count; }; /* * Initialize the state based on the randomization method available. * return true if the pre-computed list is available, false otherwise. */ -static bool freelist_state_initialize(union freelist_init_state *state, +static bool freelist_state_initialize(struct freelist_init_state *state, struct kmem_cache *cachep, unsigned int count) { bool ret; - unsigned int rand; - - /* Use best entropy available to define a random shift */ - rand = get_random_u32(); - - /* Use a random state if the pre-computed list is not available */ if (!cachep->random_seq) { - prandom_seed_state(&state->rnd_state, rand); ret = false; } else { state->list = cachep->random_seq; state->count = count; - state->pos = rand % count; + state->pos = get_random_u32_below(count); ret = true; } return ret; } /* Get the next entry on the list and randomize it using a random shift */ -static freelist_idx_t next_random_slot(union freelist_init_state *state) +static freelist_idx_t next_random_slot(struct freelist_init_state *state) { if (state->pos >= state->count) state->pos = 0; @@ -2417,7 +2407,7 @@ static void swap_free_obj(struct slab *slab, unsigned int a, unsigned int b) static bool shuffle_freelist(struct kmem_cache *cachep, struct slab *slab) { unsigned int objfreelist = 0, i, rand, count = cachep->num; - union freelist_init_state state; + struct freelist_init_state state; bool precomputed; if (count < 2) @@ -2446,8 +2436,7 @@ static bool shuffle_freelist(struct kmem_cache *cachep, struct slab *slab) /* Fisher-Yates shuffle */ for (i = count - 1; i > 0; i--) { - rand = prandom_u32_state(&state.rnd_state); - rand %= (i + 1); + rand = get_random_u32_below(i + 1); swap_free_obj(slab, i, rand); } } else { -- cgit v1.2.3 From ffe4dfe0baddaf61318bd275cacd64d4dbbbe724 Mon Sep 17 00:00:00 2001 From: David Keisar Schmidt Date: Sun, 16 Apr 2023 20:22:55 +0300 Subject: mm/slab_common: Replace invocation of weak PRNG The Slab allocator randomization inside slab_common.c uses the prandom_u32 PRNG. That was added to prevent attackers to obtain information on the heap state. However, this PRNG turned out to be weak, as noted in commit c51f8f88d705 To fix it, we have changed the invocation of prandom_u32_state to get_random_u32 to ensure the PRNG is strong. Since a modulo operation is applied right after that, in the Fisher-Yates shuffle, we used get_random_u32_below, to achieve uniformity. Signed-off-by: David Keisar Schmidt Signed-off-by: Vlastimil Babka --- mm/slab_common.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/slab_common.c b/mm/slab_common.c index 607249785c07..2071cefbe651 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1141,7 +1141,7 @@ EXPORT_SYMBOL(kmalloc_large_node); #ifdef CONFIG_SLAB_FREELIST_RANDOM /* Randomize a generic freelist */ -static void freelist_randomize(struct rnd_state *state, unsigned int *list, +static void freelist_randomize(unsigned int *list, unsigned int count) { unsigned int rand; @@ -1152,8 +1152,7 @@ static void freelist_randomize(struct rnd_state *state, unsigned int *list, /* Fisher-Yates shuffle */ for (i = count - 1; i > 0; i--) { - rand = prandom_u32_state(state); - rand %= (i + 1); + rand = get_random_u32_below(i + 1); swap(list[i], list[rand]); } } @@ -1162,7 +1161,6 @@ static void freelist_randomize(struct rnd_state *state, unsigned int *list, int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, gfp_t gfp) { - struct rnd_state state; if (count < 2 || cachep->random_seq) return 0; @@ -1171,10 +1169,7 @@ int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, if (!cachep->random_seq) return -ENOMEM; - /* Get best entropy at this stage of boot */ - prandom_seed_state(&state, get_random_long()); - - freelist_randomize(&state, cachep->random_seq, count); + freelist_randomize(cachep->random_seq, count); return 0; } -- cgit v1.2.3 From fc493f83a25835c14cd96379c1a07459230881bc Mon Sep 17 00:00:00 2001 From: Claudio Migliorelli Date: Sun, 23 Apr 2023 15:29:35 +0200 Subject: Fix some coding style errors in memblock.c This patch removes the initialization of some static variables to 0 and `false` in the memblock source file, according to the coding style guidelines. Signed-off-by: Claudio Migliorelli Link: https://lore.kernel.org/r/87r0sa7mm8.fsf@mail.polimi.it Signed-off-by: Mike Rapoport (IBM) --- mm/memblock.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 3feafea06ab2..9e28c1286ec6 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -156,10 +156,10 @@ static __refdata struct memblock_type *memblock_memory = &memblock.memory; } while (0) static int memblock_debug __initdata_memblock; -static bool system_has_some_mirror __initdata_memblock = false; +static bool system_has_some_mirror __initdata_memblock; static int memblock_can_resize __initdata_memblock; -static int memblock_memory_in_slab __initdata_memblock = 0; -static int memblock_reserved_in_slab __initdata_memblock = 0; +static int memblock_memory_in_slab __initdata_memblock; +static int memblock_reserved_in_slab __initdata_memblock; static enum memblock_flags __init_memblock choose_memblock_flags(void) { -- cgit v1.2.3 From 493f349e38d022057b3b6e13f589f108269c42b0 Mon Sep 17 00:00:00 2001 From: Yuwei Guan Date: Fri, 19 May 2023 18:53:21 +0800 Subject: memblock: Add flags and nid info in memblock debugfs Currently, the memblock debugfs can display the count of memblock_type and the base and end of the reg. However, when memblock_mark_*() or memblock_set_node() is executed on some range, the information in the existing debugfs cannot make it clear why the address is not consecutive. For example, cat /sys/kernel/debug/memblock/memory 0: 0x0000000080000000..0x00000000901fffff 1: 0x0000000090200000..0x00000000905fffff 2: 0x0000000090600000..0x0000000092ffffff 3: 0x0000000093000000..0x00000000973fffff 4: 0x0000000097400000..0x00000000b71fffff 5: 0x00000000c0000000..0x00000000dfffffff 6: 0x00000000e2500000..0x00000000f87fffff 7: 0x00000000f8800000..0x00000000fa7fffff 8: 0x00000000fa800000..0x00000000fd3effff 9: 0x00000000fd3f0000..0x00000000fd3fefff 10: 0x00000000fd3ff000..0x00000000fd7fffff 11: 0x00000000fd800000..0x00000000fd901fff 12: 0x00000000fd902000..0x00000000fd909fff 13: 0x00000000fd90a000..0x00000000fd90bfff 14: 0x00000000fd90c000..0x00000000ffffffff 15: 0x0000000880000000..0x0000000affffffff So we can add flags and nid to this debugfs. For example, cat /sys/kernel/debug/memblock/memory 0: 0x0000000080000000..0x00000000901fffff 0 NONE 1: 0x0000000090200000..0x00000000905fffff 0 NOMAP 2: 0x0000000090600000..0x0000000092ffffff 0 NONE 3: 0x0000000093000000..0x00000000973fffff 0 NOMAP 4: 0x0000000097400000..0x00000000b71fffff 0 NONE 5: 0x00000000c0000000..0x00000000dfffffff 0 NONE 6: 0x00000000e2500000..0x00000000f87fffff 0 NONE 7: 0x00000000f8800000..0x00000000fa7fffff 0 NOMAP 8: 0x00000000fa800000..0x00000000fd3effff 0 NONE 9: 0x00000000fd3f0000..0x00000000fd3fefff 0 NOMAP 10: 0x00000000fd3ff000..0x00000000fd7fffff 0 NONE 11: 0x00000000fd800000..0x00000000fd901fff 0 NOMAP 12: 0x00000000fd902000..0x00000000fd909fff 0 NONE 13: 0x00000000fd90a000..0x00000000fd90bfff 0 NOMAP 14: 0x00000000fd90c000..0x00000000ffffffff 0 NONE 15: 0x0000000880000000..0x0000000affffffff 0 NONE Signed-off-by: Yuwei Guan Reviewed-by: Anshuman Khandual Reviewed-by: Kefeng Wang Link: https://lore.kernel.org/r/20230519105321.333-1-ssawgyw@gmail.com Signed-off-by: Mike Rapoport (IBM) --- mm/memblock.c | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 9e28c1286ec6..c5c80d9bcea3 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -2158,12 +2158,19 @@ void __init memblock_free_all(void) } #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK) +static const char * const flagname[] = { + [ilog2(MEMBLOCK_HOTPLUG)] = "HOTPLUG", + [ilog2(MEMBLOCK_MIRROR)] = "MIRROR", + [ilog2(MEMBLOCK_NOMAP)] = "NOMAP", + [ilog2(MEMBLOCK_DRIVER_MANAGED)] = "DRV_MNG", +}; static int memblock_debug_show(struct seq_file *m, void *private) { struct memblock_type *type = m->private; struct memblock_region *reg; - int i; + int i, j; + unsigned int count = ARRAY_SIZE(flagname); phys_addr_t end; for (i = 0; i < type->cnt; i++) { @@ -2171,7 +2178,20 @@ static int memblock_debug_show(struct seq_file *m, void *private) end = reg->base + reg->size - 1; seq_printf(m, "%4d: ", i); - seq_printf(m, "%pa..%pa\n", ®->base, &end); + seq_printf(m, "%pa..%pa ", ®->base, &end); + seq_printf(m, "%4d ", memblock_get_region_node(reg)); + if (reg->flags) { + for (j = 0; j < count; j++) { + if (reg->flags & (1U << j)) { + seq_printf(m, "%s\n", flagname[j]); + break; + } + } + if (j == count) + seq_printf(m, "%s\n", "UNKNOWN"); + } else { + seq_printf(m, "%s\n", "NONE"); + } } return 0; } -- cgit v1.2.3 From d2e527f0d8d1124b1fab93e2e7b2c6a114c0e5a2 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 23 May 2023 09:26:35 +0200 Subject: mm/slab: remove HAVE_HARDENED_USERCOPY_ALLOCATOR With SLOB removed, both remaining allocators support hardened usercopy, so remove the config and associated #ifdef. Signed-off-by: Vlastimil Babka Reviewed-by: David Hildenbrand Reviewed-by: Lorenzo Stoakes Reviewed-by: Kees Cook Acked-by: David Rientjes Acked-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> --- mm/Kconfig | 2 -- mm/slab.h | 8 -------- security/Kconfig | 8 -------- 3 files changed, 18 deletions(-) (limited to 'mm') diff --git a/mm/Kconfig b/mm/Kconfig index 7672a22647b4..041f0da42f2b 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -221,7 +221,6 @@ choice config SLAB bool "SLAB" depends on !PREEMPT_RT - select HAVE_HARDENED_USERCOPY_ALLOCATOR help The regular slab allocator that is established and known to work well in all environments. It organizes cache hot objects in @@ -229,7 +228,6 @@ config SLAB config SLUB bool "SLUB (Unqueued Allocator)" - select HAVE_HARDENED_USERCOPY_ALLOCATOR help SLUB is a slab allocator that minimizes cache line usage instead of managing queues of cached objects (SLAB approach). diff --git a/mm/slab.h b/mm/slab.h index f01ac256a8f5..ae41c0c50ba6 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -832,16 +832,8 @@ struct kmem_obj_info { void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab); #endif -#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR void __check_heap_object(const void *ptr, unsigned long n, const struct slab *slab, bool to_user); -#else -static inline -void __check_heap_object(const void *ptr, unsigned long n, - const struct slab *slab, bool to_user) -{ -} -#endif #ifdef CONFIG_SLUB_DEBUG void skip_orig_size_check(struct kmem_cache *s, const void *object); diff --git a/security/Kconfig b/security/Kconfig index 97abeb9b9a19..52c9af08ad35 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -127,16 +127,8 @@ config LSM_MMAP_MIN_ADDR this low address space will need the permission specific to the systems running LSM. -config HAVE_HARDENED_USERCOPY_ALLOCATOR - bool - help - The heap allocator implements __check_heap_object() for - validating memory ranges against heap object sizes in - support of CONFIG_HARDENED_USERCOPY. - config HARDENED_USERCOPY bool "Harden memory copies between kernel and userspace" - depends on HAVE_HARDENED_USERCOPY_ALLOCATOR imply STRICT_DEVMEM help This option checks for obviously wrong memory regions when -- cgit v1.2.3 From c37222082f23c456664d1c3182a714670ab8f9a4 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 May 2023 14:49:48 +0100 Subject: splice: Fix filemap_splice_read() to use the correct inode Fix filemap_splice_read() to use file->f_mapping->host, not file->f_inode, as the source of the file size because in the case of a block device, file->f_inode points to the block-special file (which is typically 0 length) and not the backing store. Fixes: 07073eb01c5f ("splice: Add a func to do a splice from a buffered file without ITER_PIPE") Signed-off-by: David Howells Reviewed-by: Christoph Hellwig Reviewed-by: Christian Brauner cc: Steve French cc: Jens Axboe cc: Al Viro cc: David Hildenbrand cc: John Hubbard cc: linux-mm@kvack.org cc: linux-block@vger.kernel.org cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/20230522135018.2742245-2-dhowells@redhat.com Signed-off-by: Jens Axboe --- mm/filemap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index b4c9bd368b7e..a2006936a6ae 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2900,7 +2900,7 @@ ssize_t filemap_splice_read(struct file *in, loff_t *ppos, do { cond_resched(); - if (*ppos >= i_size_read(file_inode(in))) + if (*ppos >= i_size_read(in->f_mapping->host)) break; iocb.ki_pos = *ppos; @@ -2916,7 +2916,7 @@ ssize_t filemap_splice_read(struct file *in, loff_t *ppos, * part of the page is not copied back to userspace (unless * another truncate extends the file - this is desired though). */ - isize = i_size_read(file_inode(in)); + isize = i_size_read(in->f_mapping->host); if (unlikely(*ppos >= isize)) break; end_offset = min_t(loff_t, isize, *ppos + len); -- cgit v1.2.3 From 83aeff881e53fab5b46e4ceb5fb47fa0a22acba9 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 May 2023 14:49:49 +0100 Subject: splice: Make filemap_splice_read() check s_maxbytes Make filemap_splice_read() check s_maxbytes analogously to filemap_read(). Signed-off-by: David Howells Reviewed-by: Christoph Hellwig Reviewed-by: Christian Brauner cc: Steve French cc: Jens Axboe cc: Al Viro cc: David Hildenbrand cc: John Hubbard cc: linux-mm@kvack.org cc: linux-block@vger.kernel.org cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/20230522135018.2742245-3-dhowells@redhat.com Signed-off-by: Jens Axboe --- mm/filemap.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index a2006936a6ae..0fcb0b80c2e2 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2887,6 +2887,9 @@ ssize_t filemap_splice_read(struct file *in, loff_t *ppos, bool writably_mapped; int i, error = 0; + if (unlikely(*ppos >= in->f_mapping->host->i_sb->s_maxbytes)) + return 0; + init_sync_kiocb(&iocb, in); iocb.ki_pos = *ppos; -- cgit v1.2.3 From bd194b187115da7b98b660b049315f6c9c8267d1 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 May 2023 14:49:56 +0100 Subject: shmem: Implement splice-read The new filemap_splice_read() has an implicit expectation via filemap_get_pages() that ->read_folio() exists if ->readahead() doesn't fully populate the pagecache of the file it is reading from[1], potentially leading to a jump to NULL if this doesn't exist. shmem, however, (and by extension, tmpfs, ramfs and rootfs), doesn't have ->read_folio(), Work around this by equipping shmem with its own splice-read implementation, based on filemap_splice_read(), but able to paste in zero_page when there's a page missing. Signed-off-by: David Howells cc: Daniel Golle cc: Guenter Roeck cc: Christoph Hellwig cc: Jens Axboe cc: Al Viro cc: John Hubbard cc: David Hildenbrand cc: Matthew Wilcox cc: Hugh Dickins cc: linux-block@vger.kernel.org cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org Link: https://lore.kernel.org/r/Y+pdHFFTk1TTEBsO@makrotopia.org/ [1] Link: https://lore.kernel.org/r/20230522135018.2742245-10-dhowells@redhat.com Signed-off-by: Jens Axboe --- mm/shmem.c | 134 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 133 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/shmem.c b/mm/shmem.c index e40a08c5c6d7..1f504ed982cf 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2731,6 +2731,138 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) return retval ? retval : error; } +static bool zero_pipe_buf_get(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +{ + return true; +} + +static void zero_pipe_buf_release(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +{ +} + +static bool zero_pipe_buf_try_steal(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +{ + return false; +} + +static const struct pipe_buf_operations zero_pipe_buf_ops = { + .release = zero_pipe_buf_release, + .try_steal = zero_pipe_buf_try_steal, + .get = zero_pipe_buf_get, +}; + +static size_t splice_zeropage_into_pipe(struct pipe_inode_info *pipe, + loff_t fpos, size_t size) +{ + size_t offset = fpos & ~PAGE_MASK; + + size = min_t(size_t, size, PAGE_SIZE - offset); + + if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) { + struct pipe_buffer *buf = pipe_head_buf(pipe); + + *buf = (struct pipe_buffer) { + .ops = &zero_pipe_buf_ops, + .page = ZERO_PAGE(0), + .offset = offset, + .len = size, + }; + pipe->head++; + } + + return size; +} + +static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, + struct pipe_inode_info *pipe, + size_t len, unsigned int flags) +{ + struct inode *inode = file_inode(in); + struct address_space *mapping = inode->i_mapping; + struct folio *folio = NULL; + size_t total_spliced = 0, used, npages, n, part; + loff_t isize; + int error = 0; + + /* Work out how much data we can actually add into the pipe */ + used = pipe_occupancy(pipe->head, pipe->tail); + npages = max_t(ssize_t, pipe->max_usage - used, 0); + len = min_t(size_t, len, npages * PAGE_SIZE); + + do { + if (*ppos >= i_size_read(inode)) + break; + + error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio, SGP_READ); + if (error) { + if (error == -EINVAL) + error = 0; + break; + } + if (folio) { + folio_unlock(folio); + + if (folio_test_hwpoison(folio)) { + error = -EIO; + break; + } + } + + /* + * i_size must be checked after we know the pages are Uptodate. + * + * Checking i_size after the check allows us to calculate + * the correct value for "nr", which means the zero-filled + * part of the page is not copied back to userspace (unless + * another truncate extends the file - this is desired though). + */ + isize = i_size_read(inode); + if (unlikely(*ppos >= isize)) + break; + part = min_t(loff_t, isize - *ppos, len); + + if (folio) { + /* + * If users can be writing to this page using arbitrary + * virtual addresses, take care about potential aliasing + * before reading the page on the kernel side. + */ + if (mapping_writably_mapped(mapping)) + flush_dcache_folio(folio); + folio_mark_accessed(folio); + /* + * Ok, we have the page, and it's up-to-date, so we can + * now splice it into the pipe. + */ + n = splice_folio_into_pipe(pipe, folio, *ppos, part); + folio_put(folio); + folio = NULL; + } else { + n = splice_zeropage_into_pipe(pipe, *ppos, len); + } + + if (!n) + break; + len -= n; + total_spliced += n; + *ppos += n; + in->f_ra.prev_pos = *ppos; + if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) + break; + + cond_resched(); + } while (len); + + if (folio) + folio_put(folio); + + file_accessed(in); + return total_spliced ? total_spliced : error; +} + static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) { struct address_space *mapping = file->f_mapping; @@ -3971,7 +4103,7 @@ static const struct file_operations shmem_file_operations = { .read_iter = shmem_file_read_iter, .write_iter = generic_file_write_iter, .fsync = noop_fsync, - .splice_read = generic_file_splice_read, + .splice_read = shmem_file_splice_read, .splice_write = iter_file_splice_write, .fallocate = shmem_fallocate, #endif -- cgit v1.2.3 From 3fc40265ae2b48a7475c41c5c0b256374c419f4b Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 May 2023 14:50:17 +0100 Subject: iov_iter: Kill ITER_PIPE The ITER_PIPE-type iterator was only used by generic_file_splice_read() and that has been replaced and removed. This leaves ITER_PIPE unused - so remove it too. Signed-off-by: David Howells Reviewed-by: Christoph Hellwig Reviewed-by: Christian Brauner cc: Jens Axboe cc: Al Viro cc: David Hildenbrand cc: John Hubbard cc: linux-mm@kvack.org cc: linux-block@vger.kernel.org cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/20230522135018.2742245-31-dhowells@redhat.com Signed-off-by: Jens Axboe --- include/linux/uio.h | 14 -- lib/iov_iter.c | 431 +--------------------------------------------------- mm/filemap.c | 3 +- 3 files changed, 4 insertions(+), 444 deletions(-) (limited to 'mm') diff --git a/include/linux/uio.h b/include/linux/uio.h index 044c1d8c230c..60c342bb7ab8 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -11,7 +11,6 @@ #include struct page; -struct pipe_inode_info; typedef unsigned int __bitwise iov_iter_extraction_t; @@ -25,7 +24,6 @@ enum iter_type { ITER_IOVEC, ITER_KVEC, ITER_BVEC, - ITER_PIPE, ITER_XARRAY, ITER_DISCARD, ITER_UBUF, @@ -74,7 +72,6 @@ struct iov_iter { const struct kvec *kvec; const struct bio_vec *bvec; struct xarray *xarray; - struct pipe_inode_info *pipe; void __user *ubuf; }; size_t count; @@ -82,10 +79,6 @@ struct iov_iter { }; union { unsigned long nr_segs; - struct { - unsigned int head; - unsigned int start_head; - }; loff_t xarray_start; }; }; @@ -133,11 +126,6 @@ static inline bool iov_iter_is_bvec(const struct iov_iter *i) return iov_iter_type(i) == ITER_BVEC; } -static inline bool iov_iter_is_pipe(const struct iov_iter *i) -{ - return iov_iter_type(i) == ITER_PIPE; -} - static inline bool iov_iter_is_discard(const struct iov_iter *i) { return iov_iter_type(i) == ITER_DISCARD; @@ -286,8 +274,6 @@ void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec unsigned long nr_segs, size_t count); void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec, unsigned long nr_segs, size_t count); -void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe, - size_t count); void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count); void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray, loff_t start, size_t count); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 960223ed9199..f18138e0292a 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -14,8 +14,6 @@ #include #include -#define PIPE_PARANOIA /* for now */ - /* covers ubuf and kbuf alike */ #define iterate_buf(i, n, base, len, off, __p, STEP) { \ size_t __maybe_unused off = 0; \ @@ -198,150 +196,6 @@ static int copyin(void *to, const void __user *from, size_t n) return res; } -#ifdef PIPE_PARANOIA -static bool sanity(const struct iov_iter *i) -{ - struct pipe_inode_info *pipe = i->pipe; - unsigned int p_head = pipe->head; - unsigned int p_tail = pipe->tail; - unsigned int p_occupancy = pipe_occupancy(p_head, p_tail); - unsigned int i_head = i->head; - unsigned int idx; - - if (i->last_offset) { - struct pipe_buffer *p; - if (unlikely(p_occupancy == 0)) - goto Bad; // pipe must be non-empty - if (unlikely(i_head != p_head - 1)) - goto Bad; // must be at the last buffer... - - p = pipe_buf(pipe, i_head); - if (unlikely(p->offset + p->len != abs(i->last_offset))) - goto Bad; // ... at the end of segment - } else { - if (i_head != p_head) - goto Bad; // must be right after the last buffer - } - return true; -Bad: - printk(KERN_ERR "idx = %d, offset = %d\n", i_head, i->last_offset); - printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n", - p_head, p_tail, pipe->ring_size); - for (idx = 0; idx < pipe->ring_size; idx++) - printk(KERN_ERR "[%p %p %d %d]\n", - pipe->bufs[idx].ops, - pipe->bufs[idx].page, - pipe->bufs[idx].offset, - pipe->bufs[idx].len); - WARN_ON(1); - return false; -} -#else -#define sanity(i) true -#endif - -static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size) -{ - struct page *page = alloc_page(GFP_USER); - if (page) { - struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); - *buf = (struct pipe_buffer) { - .ops = &default_pipe_buf_ops, - .page = page, - .offset = 0, - .len = size - }; - } - return page; -} - -static void push_page(struct pipe_inode_info *pipe, struct page *page, - unsigned int offset, unsigned int size) -{ - struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); - *buf = (struct pipe_buffer) { - .ops = &page_cache_pipe_buf_ops, - .page = page, - .offset = offset, - .len = size - }; - get_page(page); -} - -static inline int last_offset(const struct pipe_buffer *buf) -{ - if (buf->ops == &default_pipe_buf_ops) - return buf->len; // buf->offset is 0 for those - else - return -(buf->offset + buf->len); -} - -static struct page *append_pipe(struct iov_iter *i, size_t size, - unsigned int *off) -{ - struct pipe_inode_info *pipe = i->pipe; - int offset = i->last_offset; - struct pipe_buffer *buf; - struct page *page; - - if (offset > 0 && offset < PAGE_SIZE) { - // some space in the last buffer; add to it - buf = pipe_buf(pipe, pipe->head - 1); - size = min_t(size_t, size, PAGE_SIZE - offset); - buf->len += size; - i->last_offset += size; - i->count -= size; - *off = offset; - return buf->page; - } - // OK, we need a new buffer - *off = 0; - size = min_t(size_t, size, PAGE_SIZE); - if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) - return NULL; - page = push_anon(pipe, size); - if (!page) - return NULL; - i->head = pipe->head - 1; - i->last_offset = size; - i->count -= size; - return page; -} - -static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, - struct iov_iter *i) -{ - struct pipe_inode_info *pipe = i->pipe; - unsigned int head = pipe->head; - - if (unlikely(bytes > i->count)) - bytes = i->count; - - if (unlikely(!bytes)) - return 0; - - if (!sanity(i)) - return 0; - - if (offset && i->last_offset == -offset) { // could we merge it? - struct pipe_buffer *buf = pipe_buf(pipe, head - 1); - if (buf->page == page) { - buf->len += bytes; - i->last_offset -= bytes; - i->count -= bytes; - return bytes; - } - } - if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) - return 0; - - push_page(pipe, page, offset, bytes); - i->last_offset = -(offset + bytes); - i->head = head; - i->count -= bytes; - return bytes; -} - /* * fault_in_iov_iter_readable - fault in iov iterator for reading * @i: iterator @@ -446,46 +300,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction, } EXPORT_SYMBOL(iov_iter_init); -// returns the offset in partial buffer (if any) -static inline unsigned int pipe_npages(const struct iov_iter *i, int *npages) -{ - struct pipe_inode_info *pipe = i->pipe; - int used = pipe->head - pipe->tail; - int off = i->last_offset; - - *npages = max((int)pipe->max_usage - used, 0); - - if (off > 0 && off < PAGE_SIZE) { // anon and not full - (*npages)++; - return off; - } - return 0; -} - -static size_t copy_pipe_to_iter(const void *addr, size_t bytes, - struct iov_iter *i) -{ - unsigned int off, chunk; - - if (unlikely(bytes > i->count)) - bytes = i->count; - if (unlikely(!bytes)) - return 0; - - if (!sanity(i)) - return 0; - - for (size_t n = bytes; n; n -= chunk) { - struct page *page = append_pipe(i, n, &off); - chunk = min_t(size_t, n, PAGE_SIZE - off); - if (!page) - return bytes - n; - memcpy_to_page(page, off, addr, chunk); - addr += chunk; - } - return bytes; -} - static __wsum csum_and_memcpy(void *to, const void *from, size_t len, __wsum sum, size_t off) { @@ -493,44 +307,10 @@ static __wsum csum_and_memcpy(void *to, const void *from, size_t len, return csum_block_add(sum, next, off); } -static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, - struct iov_iter *i, __wsum *sump) -{ - __wsum sum = *sump; - size_t off = 0; - unsigned int chunk, r; - - if (unlikely(bytes > i->count)) - bytes = i->count; - if (unlikely(!bytes)) - return 0; - - if (!sanity(i)) - return 0; - - while (bytes) { - struct page *page = append_pipe(i, bytes, &r); - char *p; - - if (!page) - break; - chunk = min_t(size_t, bytes, PAGE_SIZE - r); - p = kmap_local_page(page); - sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off); - kunmap_local(p); - off += chunk; - bytes -= chunk; - } - *sump = sum; - return off; -} - size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { if (WARN_ON_ONCE(i->data_source)) return 0; - if (unlikely(iov_iter_is_pipe(i))) - return copy_pipe_to_iter(addr, bytes, i); if (user_backed_iter(i)) might_fault(); iterate_and_advance(i, bytes, base, len, off, @@ -552,42 +332,6 @@ static int copyout_mc(void __user *to, const void *from, size_t n) return n; } -static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, - struct iov_iter *i) -{ - size_t xfer = 0; - unsigned int off, chunk; - - if (unlikely(bytes > i->count)) - bytes = i->count; - if (unlikely(!bytes)) - return 0; - - if (!sanity(i)) - return 0; - - while (bytes) { - struct page *page = append_pipe(i, bytes, &off); - unsigned long rem; - char *p; - - if (!page) - break; - chunk = min_t(size_t, bytes, PAGE_SIZE - off); - p = kmap_local_page(page); - rem = copy_mc_to_kernel(p + off, addr + xfer, chunk); - chunk -= rem; - kunmap_local(p); - xfer += chunk; - bytes -= chunk; - if (rem) { - iov_iter_revert(i, rem); - break; - } - } - return xfer; -} - /** * _copy_mc_to_iter - copy to iter with source memory error exception handling * @addr: source kernel address @@ -607,9 +351,8 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, * alignment and poison alignment assumptions to avoid re-triggering * hardware exceptions. * - * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. - * Compare to copy_to_iter() where only ITER_IOVEC attempts might return - * a short copy. + * * ITER_KVEC and ITER_BVEC can return short copies. Compare to + * copy_to_iter() where only ITER_IOVEC attempts might return a short copy. * * Return: number of bytes copied (may be %0) */ @@ -617,8 +360,6 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { if (WARN_ON_ONCE(i->data_source)) return 0; - if (unlikely(iov_iter_is_pipe(i))) - return copy_mc_pipe_to_iter(addr, bytes, i); if (user_backed_iter(i)) might_fault(); __iterate_and_advance(i, bytes, base, len, off, @@ -732,8 +473,6 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, return 0; if (WARN_ON_ONCE(i->data_source)) return 0; - if (unlikely(iov_iter_is_pipe(i))) - return copy_page_to_iter_pipe(page, offset, bytes, i); page += offset / PAGE_SIZE; // first subpage offset %= PAGE_SIZE; while (1) { @@ -764,8 +503,6 @@ size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t byte return 0; if (WARN_ON_ONCE(i->data_source)) return 0; - if (unlikely(iov_iter_is_pipe(i))) - return copy_page_to_iter_pipe(page, offset, bytes, i); page += offset / PAGE_SIZE; // first subpage offset %= PAGE_SIZE; while (1) { @@ -818,36 +555,8 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, } EXPORT_SYMBOL(copy_page_from_iter); -static size_t pipe_zero(size_t bytes, struct iov_iter *i) -{ - unsigned int chunk, off; - - if (unlikely(bytes > i->count)) - bytes = i->count; - if (unlikely(!bytes)) - return 0; - - if (!sanity(i)) - return 0; - - for (size_t n = bytes; n; n -= chunk) { - struct page *page = append_pipe(i, n, &off); - char *p; - - if (!page) - return bytes - n; - chunk = min_t(size_t, n, PAGE_SIZE - off); - p = kmap_local_page(page); - memset(p + off, 0, chunk); - kunmap_local(p); - } - return bytes; -} - size_t iov_iter_zero(size_t bytes, struct iov_iter *i) { - if (unlikely(iov_iter_is_pipe(i))) - return pipe_zero(bytes, i); iterate_and_advance(i, bytes, base, len, count, clear_user(base, len), memset(base, 0, len) @@ -878,32 +587,6 @@ size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t byt } EXPORT_SYMBOL(copy_page_from_iter_atomic); -static void pipe_advance(struct iov_iter *i, size_t size) -{ - struct pipe_inode_info *pipe = i->pipe; - int off = i->last_offset; - - if (!off && !size) { - pipe_discard_from(pipe, i->start_head); // discard everything - return; - } - i->count -= size; - while (1) { - struct pipe_buffer *buf = pipe_buf(pipe, i->head); - if (off) /* make it relative to the beginning of buffer */ - size += abs(off) - buf->offset; - if (size <= buf->len) { - buf->len = size; - i->last_offset = last_offset(buf); - break; - } - size -= buf->len; - i->head++; - off = 0; - } - pipe_discard_from(pipe, i->head + 1); // discard everything past this one -} - static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) { const struct bio_vec *bvec, *end; @@ -955,8 +638,6 @@ void iov_iter_advance(struct iov_iter *i, size_t size) iov_iter_iovec_advance(i, size); } else if (iov_iter_is_bvec(i)) { iov_iter_bvec_advance(i, size); - } else if (iov_iter_is_pipe(i)) { - pipe_advance(i, size); } else if (iov_iter_is_discard(i)) { i->count -= size; } @@ -970,26 +651,6 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll) if (WARN_ON(unroll > MAX_RW_COUNT)) return; i->count += unroll; - if (unlikely(iov_iter_is_pipe(i))) { - struct pipe_inode_info *pipe = i->pipe; - unsigned int head = pipe->head; - - while (head > i->start_head) { - struct pipe_buffer *b = pipe_buf(pipe, --head); - if (unroll < b->len) { - b->len -= unroll; - i->last_offset = last_offset(b); - i->head = head; - return; - } - unroll -= b->len; - pipe_buf_release(pipe, b); - pipe->head--; - } - i->last_offset = 0; - i->head = head; - return; - } if (unlikely(iov_iter_is_discard(i))) return; if (unroll <= i->iov_offset) { @@ -1079,24 +740,6 @@ void iov_iter_bvec(struct iov_iter *i, unsigned int direction, } EXPORT_SYMBOL(iov_iter_bvec); -void iov_iter_pipe(struct iov_iter *i, unsigned int direction, - struct pipe_inode_info *pipe, - size_t count) -{ - BUG_ON(direction != READ); - WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); - *i = (struct iov_iter){ - .iter_type = ITER_PIPE, - .data_source = false, - .pipe = pipe, - .head = pipe->head, - .start_head = pipe->head, - .last_offset = 0, - .count = count - }; -} -EXPORT_SYMBOL(iov_iter_pipe); - /** * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray * @i: The iterator to initialise. @@ -1224,19 +867,6 @@ bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, if (iov_iter_is_bvec(i)) return iov_iter_aligned_bvec(i, addr_mask, len_mask); - if (iov_iter_is_pipe(i)) { - size_t size = i->count; - - if (size & len_mask) - return false; - if (size && i->last_offset > 0) { - if (i->last_offset & addr_mask) - return false; - } - - return true; - } - if (iov_iter_is_xarray(i)) { if (i->count & len_mask) return false; @@ -1307,14 +937,6 @@ unsigned long iov_iter_alignment(const struct iov_iter *i) if (iov_iter_is_bvec(i)) return iov_iter_alignment_bvec(i); - if (iov_iter_is_pipe(i)) { - size_t size = i->count; - - if (size && i->last_offset > 0) - return size | i->last_offset; - return size; - } - if (iov_iter_is_xarray(i)) return (i->xarray_start + i->iov_offset) | i->count; @@ -1367,36 +989,6 @@ static int want_pages_array(struct page ***res, size_t size, return count; } -static ssize_t pipe_get_pages(struct iov_iter *i, - struct page ***pages, size_t maxsize, unsigned maxpages, - size_t *start) -{ - unsigned int npages, count, off, chunk; - struct page **p; - size_t left; - - if (!sanity(i)) - return -EFAULT; - - *start = off = pipe_npages(i, &npages); - if (!npages) - return -EFAULT; - count = want_pages_array(pages, maxsize, off, min(npages, maxpages)); - if (!count) - return -ENOMEM; - p = *pages; - for (npages = 0, left = maxsize ; npages < count; npages++, left -= chunk) { - struct page *page = append_pipe(i, left, &off); - if (!page) - break; - chunk = min_t(size_t, left, PAGE_SIZE - off); - get_page(*p++ = page); - } - if (!npages) - return -EFAULT; - return maxsize - left; -} - static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, pgoff_t index, unsigned int nr_pages) { @@ -1547,8 +1139,6 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, } return maxsize; } - if (iov_iter_is_pipe(i)) - return pipe_get_pages(i, pages, maxsize, maxpages, start); if (iov_iter_is_xarray(i)) return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); return -EFAULT; @@ -1638,9 +1228,7 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, } sum = csum_shift(csstate->csum, csstate->off); - if (unlikely(iov_iter_is_pipe(i))) - bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum); - else iterate_and_advance(i, bytes, base, len, off, ({ + iterate_and_advance(i, bytes, base, len, off, ({ next = csum_and_copy_to_user(addr + off, base, len); sum = csum_block_add(sum, next, off); next ? 0 : len; @@ -1725,15 +1313,6 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages) return iov_npages(i, maxpages); if (iov_iter_is_bvec(i)) return bvec_npages(i, maxpages); - if (iov_iter_is_pipe(i)) { - int npages; - - if (!sanity(i)) - return 0; - - pipe_npages(i, &npages); - return min(npages, maxpages); - } if (iov_iter_is_xarray(i)) { unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); @@ -1746,10 +1325,6 @@ EXPORT_SYMBOL(iov_iter_npages); const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) { *new = *old; - if (unlikely(iov_iter_is_pipe(new))) { - WARN_ON(1); - return NULL; - } if (iov_iter_is_bvec(new)) return new->bvec = kmemdup(new->bvec, new->nr_segs * sizeof(struct bio_vec), diff --git a/mm/filemap.c b/mm/filemap.c index 0fcb0b80c2e2..603b562d69b1 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2687,8 +2687,7 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter, if (unlikely(iocb->ki_pos >= i_size_read(inode))) break; - error = filemap_get_pages(iocb, iter->count, &fbatch, - iov_iter_is_pipe(iter)); + error = filemap_get_pages(iocb, iter->count, &fbatch, false); if (error < 0) break; -- cgit v1.2.3 From 9eee8bd81421c5e961cbb1a3c3fa1a06fad545e8 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 May 2023 14:50:18 +0100 Subject: splice: kdoc for filemap_splice_read() and copy_splice_read() Provide kerneldoc comments for filemap_splice_read() and copy_splice_read(). Signed-off-by: David Howells cc: Christian Brauner cc: Christoph Hellwig cc: Jens Axboe cc: Steve French cc: Al Viro cc: linux-mm@kvack.org cc: linux-block@vger.kernel.org cc: linux-cifs@vger.kernel.org cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/20230522135018.2742245-32-dhowells@redhat.com Signed-off-by: Jens Axboe --- fs/splice.c | 21 +++++++++++++++++++-- mm/filemap.c | 21 ++++++++++++++++++--- 2 files changed, 37 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/fs/splice.c b/fs/splice.c index 9be4cb3b9879..2420ead610a7 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -299,8 +299,25 @@ void splice_shrink_spd(struct splice_pipe_desc *spd) kfree(spd->partial); } -/* - * Copy data from a file into pages and then splice those into the output pipe. +/** + * copy_splice_read - Copy data from a file and splice the copy into a pipe + * @in: The file to read from + * @ppos: Pointer to the file position to read from + * @pipe: The pipe to splice into + * @len: The amount to splice + * @flags: The SPLICE_F_* flags + * + * This function allocates a bunch of pages sufficient to hold the requested + * amount of data (but limited by the remaining pipe capacity), passes it to + * the file's ->read_iter() to read into and then splices the used pages into + * the pipe. + * + * Return: On success, the number of bytes read will be returned and *@ppos + * will be updated if appropriate; 0 will be returned if there is no more data + * to be read; -EAGAIN will be returned if the pipe had no space, and some + * other negative error code will be returned on error. A short read may occur + * if the pipe has insufficient space, we reach the end of the data or we hit a + * hole. */ ssize_t copy_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, diff --git a/mm/filemap.c b/mm/filemap.c index 603b562d69b1..f87e2ad8cff1 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2871,9 +2871,24 @@ size_t splice_folio_into_pipe(struct pipe_inode_info *pipe, return spliced; } -/* - * Splice folios from the pagecache of a buffered (ie. non-O_DIRECT) file into - * a pipe. +/** + * filemap_splice_read - Splice data from a file's pagecache into a pipe + * @in: The file to read from + * @ppos: Pointer to the file position to read from + * @pipe: The pipe to splice into + * @len: The amount to splice + * @flags: The SPLICE_F_* flags + * + * This function gets folios from a file's pagecache and splices them into the + * pipe. Readahead will be called as necessary to fill more folios. This may + * be used for blockdevs also. + * + * Return: On success, the number of bytes read will be returned and *@ppos + * will be updated if appropriate; 0 will be returned if there is no more data + * to be read; -EAGAIN will be returned if the pipe had no space, and some + * other negative error code will be returned on error. A short read may occur + * if the pipe has insufficient space, we reach the end of the data or we hit a + * hole. */ ssize_t filemap_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, -- cgit v1.2.3 From eb07c4f39c3e858a7d0cc4bb15b8a304f83f0497 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 23 May 2023 09:06:34 +0200 Subject: mm/slab: rename CONFIG_SLAB to CONFIG_SLAB_DEPRECATED As discussed at LSF/MM [1] [2] and with no objections raised there, deprecate the SLAB allocator. Rename the user-visible option so that users with CONFIG_SLAB=y get a new prompt with explanation during make oldconfig, while make olddefconfig will just switch to SLUB. In all defconfigs with CONFIG_SLAB=y remove the line so those also switch to SLUB. Regressions due to the switch should be reported to linux-mm and slab maintainers. [1] https://lore.kernel.org/all/4b9fc9c6-b48c-198f-5f80-811a44737e5f@suse.cz/ [2] https://lwn.net/Articles/932201/ Signed-off-by: Vlastimil Babka Acked-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: David Rientjes Acked-by: Geert Uytterhoeven # m68k Acked-by: Helge Deller # parisc --- arch/arc/configs/axs103_smp_defconfig | 1 - arch/arc/configs/haps_hs_defconfig | 1 - arch/arc/configs/haps_hs_smp_defconfig | 1 - arch/arc/configs/hsdk_defconfig | 1 - arch/arc/configs/tb10x_defconfig | 1 - arch/arm/configs/am200epdkit_defconfig | 1 - arch/arm/configs/at91_dt_defconfig | 1 - arch/arm/configs/dove_defconfig | 1 - arch/arm/configs/ep93xx_defconfig | 1 - arch/arm/configs/imx_v4_v5_defconfig | 1 - arch/arm/configs/lpc32xx_defconfig | 1 - arch/arm/configs/mmp2_defconfig | 1 - arch/arm/configs/mvebu_v7_defconfig | 1 - arch/arm/configs/nhk8815_defconfig | 1 - arch/arm/configs/omap2plus_defconfig | 1 - arch/arm/configs/pxa168_defconfig | 1 - arch/arm/configs/pxa3xx_defconfig | 1 - arch/arm/configs/pxa910_defconfig | 1 - arch/arm/configs/realview_defconfig | 1 - arch/arm/configs/rpc_defconfig | 1 - arch/arm/configs/sama5_defconfig | 1 - arch/arm/configs/sama7_defconfig | 1 - arch/arm/configs/shmobile_defconfig | 1 - arch/arm/configs/sp7021_defconfig | 1 - arch/arm/configs/tegra_defconfig | 1 - arch/arm/configs/versatile_defconfig | 1 - arch/m68k/configs/amiga_defconfig | 1 - arch/m68k/configs/apollo_defconfig | 1 - arch/m68k/configs/atari_defconfig | 1 - arch/m68k/configs/bvme6000_defconfig | 1 - arch/m68k/configs/hp300_defconfig | 1 - arch/m68k/configs/mac_defconfig | 1 - arch/m68k/configs/multi_defconfig | 1 - arch/m68k/configs/mvme147_defconfig | 1 - arch/m68k/configs/mvme16x_defconfig | 1 - arch/m68k/configs/q40_defconfig | 1 - arch/m68k/configs/sun3_defconfig | 1 - arch/m68k/configs/sun3x_defconfig | 1 - arch/microblaze/configs/mmu_defconfig | 1 - arch/mips/configs/ar7_defconfig | 1 - arch/mips/configs/bcm47xx_defconfig | 1 - arch/mips/configs/bigsur_defconfig | 1 - arch/mips/configs/cavium_octeon_defconfig | 1 - arch/mips/configs/ci20_defconfig | 1 - arch/mips/configs/cu1000-neo_defconfig | 1 - arch/mips/configs/cu1830-neo_defconfig | 1 - arch/mips/configs/db1xxx_defconfig | 1 - arch/mips/configs/decstation_64_defconfig | 1 - arch/mips/configs/decstation_defconfig | 1 - arch/mips/configs/decstation_r4k_defconfig | 1 - arch/mips/configs/fuloong2e_defconfig | 1 - arch/mips/configs/gpr_defconfig | 1 - arch/mips/configs/ip22_defconfig | 1 - arch/mips/configs/ip27_defconfig | 1 - arch/mips/configs/ip28_defconfig | 1 - arch/mips/configs/ip32_defconfig | 1 - arch/mips/configs/jazz_defconfig | 1 - arch/mips/configs/malta_defconfig | 1 - arch/mips/configs/malta_kvm_defconfig | 1 - arch/mips/configs/malta_qemu_32r6_defconfig | 1 - arch/mips/configs/maltaaprp_defconfig | 1 - arch/mips/configs/maltasmvp_defconfig | 1 - arch/mips/configs/maltasmvp_eva_defconfig | 1 - arch/mips/configs/maltaup_defconfig | 1 - arch/mips/configs/maltaup_xpa_defconfig | 1 - arch/mips/configs/mtx1_defconfig | 1 - arch/mips/configs/pic32mzda_defconfig | 1 - arch/mips/configs/qi_lb60_defconfig | 1 - arch/mips/configs/rb532_defconfig | 1 - arch/mips/configs/rbtx49xx_defconfig | 1 - arch/mips/configs/rm200_defconfig | 1 - arch/mips/configs/rs90_defconfig | 1 - arch/mips/configs/sb1250_swarm_defconfig | 1 - arch/nios2/configs/10m50_defconfig | 1 - arch/nios2/configs/3c120_defconfig | 1 - arch/parisc/configs/generic-32bit_defconfig | 1 - arch/powerpc/configs/40x/klondike_defconfig | 1 - arch/powerpc/configs/52xx/pcm030_defconfig | 1 - arch/powerpc/configs/83xx/kmeter1_defconfig | 1 - arch/powerpc/configs/83xx/mpc837x_rdb_defconfig | 1 - arch/powerpc/configs/85xx/ge_imp3a_defconfig | 1 - arch/powerpc/configs/ep8248e_defconfig | 1 - arch/powerpc/configs/gamecube_defconfig | 1 - arch/powerpc/configs/mgcoge_defconfig | 1 - arch/powerpc/configs/mpc512x_defconfig | 1 - arch/powerpc/configs/mpc83xx_defconfig | 1 - arch/powerpc/configs/ps3_defconfig | 1 - arch/powerpc/configs/wii_defconfig | 1 - arch/sh/configs/ap325rxa_defconfig | 1 - arch/sh/configs/apsh4a3a_defconfig | 1 - arch/sh/configs/apsh4ad0a_defconfig | 1 - arch/sh/configs/dreamcast_defconfig | 1 - arch/sh/configs/ecovec24-romimage_defconfig | 1 - arch/sh/configs/ecovec24_defconfig | 1 - arch/sh/configs/espt_defconfig | 1 - arch/sh/configs/hp6xx_defconfig | 1 - arch/sh/configs/kfr2r09-romimage_defconfig | 1 - arch/sh/configs/kfr2r09_defconfig | 1 - arch/sh/configs/landisk_defconfig | 1 - arch/sh/configs/lboxre2_defconfig | 1 - arch/sh/configs/magicpanelr2_defconfig | 1 - arch/sh/configs/microdev_defconfig | 1 - arch/sh/configs/migor_defconfig | 1 - arch/sh/configs/polaris_defconfig | 1 - arch/sh/configs/r7780mp_defconfig | 1 - arch/sh/configs/r7785rp_defconfig | 1 - arch/sh/configs/rsk7264_defconfig | 1 - arch/sh/configs/rsk7269_defconfig | 1 - arch/sh/configs/rts7751r2d1_defconfig | 1 - arch/sh/configs/rts7751r2dplus_defconfig | 1 - arch/sh/configs/sdk7786_defconfig | 1 - arch/sh/configs/se7343_defconfig | 1 - arch/sh/configs/se7619_defconfig | 1 - arch/sh/configs/se7705_defconfig | 1 - arch/sh/configs/se7712_defconfig | 1 - arch/sh/configs/se7721_defconfig | 1 - arch/sh/configs/se7724_defconfig | 1 - arch/sh/configs/se7750_defconfig | 1 - arch/sh/configs/se7751_defconfig | 1 - arch/sh/configs/se7780_defconfig | 1 - arch/sh/configs/secureedge5410_defconfig | 1 - arch/sh/configs/sh03_defconfig | 1 - arch/sh/configs/sh2007_defconfig | 1 - arch/sh/configs/sh7710voipgw_defconfig | 1 - arch/sh/configs/sh7757lcr_defconfig | 1 - arch/sh/configs/sh7763rdp_defconfig | 1 - arch/sh/configs/sh7785lcr_32bit_defconfig | 1 - arch/sh/configs/sh7785lcr_defconfig | 1 - arch/sh/configs/titan_defconfig | 1 - arch/sh/configs/urquell_defconfig | 1 - arch/sparc/configs/sparc32_defconfig | 1 - arch/sparc/configs/sparc64_defconfig | 1 - arch/um/configs/i386_defconfig | 1 - arch/um/configs/x86_64_defconfig | 1 - kernel/configs/tiny.config | 1 - mm/Kconfig | 16 ++++++++++++++-- 136 files changed, 14 insertions(+), 137 deletions(-) (limited to 'mm') diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index 2f336d99a8cf..07c89281c2e3 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig @@ -13,7 +13,6 @@ CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig index 899b2fd5c71d..8c3ed5d6e6c3 100644 --- a/arch/arc/configs/haps_hs_defconfig +++ b/arch/arc/configs/haps_hs_defconfig @@ -14,7 +14,6 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_ARC_BUILTIN_DTB_NAME="haps_hs" CONFIG_MODULES=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig index 0d32aac8069f..61107e8bac33 100644 --- a/arch/arc/configs/haps_hs_smp_defconfig +++ b/arch/arc/configs/haps_hs_smp_defconfig @@ -15,7 +15,6 @@ CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_SMP=y CONFIG_ARC_BUILTIN_DTB_NAME="haps_hs_idu" CONFIG_KPROBES=y diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig index d18378d2c2a6..4ee2a1507b57 100644 --- a/arch/arc/configs/hsdk_defconfig +++ b/arch/arc/configs/hsdk_defconfig @@ -13,7 +13,6 @@ CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_ARC_SOC_HSDK=y diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig index 6f0d2be9d926..941bbadd6bf2 100644 --- a/arch/arc/configs/tb10x_defconfig +++ b/arch/arc/configs/tb10x_defconfig @@ -19,7 +19,6 @@ CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y # CONFIG_COMPAT_BRK is not set CONFIG_ISA_ARCOMPACT=y -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y diff --git a/arch/arm/configs/am200epdkit_defconfig b/arch/arm/configs/am200epdkit_defconfig index 3f633eaf9770..a25834e4c901 100644 --- a/arch/arm/configs/am200epdkit_defconfig +++ b/arch/arm/configs/am200epdkit_defconfig @@ -15,7 +15,6 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y # CONFIG_SWAP is not set -CONFIG_SLAB=y # CONFIG_VM_EVENT_COUNTERS is not set CONFIG_NET=y CONFIG_PACKET=m diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig index 82bcf4dc7f54..8cc602853cc5 100644 --- a/arch/arm/configs/at91_dt_defconfig +++ b/arch/arm/configs/at91_dt_defconfig @@ -27,7 +27,6 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_SWAP is not set -CONFIG_SLAB=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/dove_defconfig b/arch/arm/configs/dove_defconfig index c8d559f38f48..46859e6fee5b 100644 --- a/arch/arm/configs/dove_defconfig +++ b/arch/arm/configs/dove_defconfig @@ -17,7 +17,6 @@ CONFIG_VFP=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y -CONFIG_SLAB=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/ep93xx_defconfig b/arch/arm/configs/ep93xx_defconfig index 3154125321c5..44e89a980d29 100644 --- a/arch/arm/configs/ep93xx_defconfig +++ b/arch/arm/configs/ep93xx_defconfig @@ -24,7 +24,6 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y -CONFIG_SLAB=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig index c9a602aee715..ec45e6225225 100644 --- a/arch/arm/configs/imx_v4_v5_defconfig +++ b/arch/arm/configs/imx_v4_v5_defconfig @@ -26,7 +26,6 @@ CONFIG_KPROBES=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_SWAP is not set -CONFIG_SLAB=y # CONFIG_COMPAT_BRK is not set CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/arm/configs/lpc32xx_defconfig b/arch/arm/configs/lpc32xx_defconfig index fabb66a53350..d7df0486850f 100644 --- a/arch/arm/configs/lpc32xx_defconfig +++ b/arch/arm/configs/lpc32xx_defconfig @@ -23,7 +23,6 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_SLAB=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/mmp2_defconfig b/arch/arm/configs/mmp2_defconfig index 7984640e994e..3d1d6f3b592a 100644 --- a/arch/arm/configs/mmp2_defconfig +++ b/arch/arm/configs/mmp2_defconfig @@ -12,7 +12,6 @@ CONFIG_VFP=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_SLAB=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/mvebu_v7_defconfig b/arch/arm/configs/mvebu_v7_defconfig index 68a18264f31b..2d2a4dc8f379 100644 --- a/arch/arm/configs/mvebu_v7_defconfig +++ b/arch/arm/configs/mvebu_v7_defconfig @@ -23,7 +23,6 @@ CONFIG_VFP=y CONFIG_NEON=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_SLAB=y # CONFIG_COMPACTION is not set CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig index 4171dafddc0a..ea28ed8991b4 100644 --- a/arch/arm/configs/nhk8815_defconfig +++ b/arch/arm/configs/nhk8815_defconfig @@ -16,7 +16,6 @@ CONFIG_AEABI=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_SWAP is not set -CONFIG_SLAB=y CONFIG_CMA=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index c4216c552100..9bd36dd39bd0 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -62,7 +62,6 @@ CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_PARTITION_ADVANCED=y CONFIG_BINFMT_MISC=y -CONFIG_SLAB=y CONFIG_CMA=y CONFIG_ZSMALLOC=m CONFIG_NET=y diff --git a/arch/arm/configs/pxa168_defconfig b/arch/arm/configs/pxa168_defconfig index 8422ddc9bab2..ec3a43f9c85e 100644 --- a/arch/arm/configs/pxa168_defconfig +++ b/arch/arm/configs/pxa168_defconfig @@ -12,7 +12,6 @@ CONFIG_FPE_NWFPE=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_SLAB=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/pxa3xx_defconfig b/arch/arm/configs/pxa3xx_defconfig index d1e83b52e03a..7f95fa273a7d 100644 --- a/arch/arm/configs/pxa3xx_defconfig +++ b/arch/arm/configs/pxa3xx_defconfig @@ -12,7 +12,6 @@ CONFIG_AEABI=y CONFIG_CMDLINE="root=/dev/nfs rootfstype=nfs nfsroot=192.168.1.100:/nfsroot/ ip=192.168.1.101:192.168.1.100::255.255.255.0::eth0:on console=ttyS0,115200 mem=64M debug" CONFIG_FPE_NWFPE=y CONFIG_MODULES=y -CONFIG_SLAB=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/pxa910_defconfig b/arch/arm/configs/pxa910_defconfig index 48e41ca582af..958d958377dc 100644 --- a/arch/arm/configs/pxa910_defconfig +++ b/arch/arm/configs/pxa910_defconfig @@ -13,7 +13,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set CONFIG_AEABI=y CONFIG_FPE_NWFPE=y -CONFIG_SLAB=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/realview_defconfig b/arch/arm/configs/realview_defconfig index 92f803c2805c..a221a99f6472 100644 --- a/arch/arm/configs/realview_defconfig +++ b/arch/arm/configs/realview_defconfig @@ -20,7 +20,6 @@ CONFIG_VFP=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_SWAP is not set -CONFIG_SLAB=y CONFIG_CMA=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/arm/configs/rpc_defconfig b/arch/arm/configs/rpc_defconfig index 210974364d61..65a3fded55be 100644 --- a/arch/arm/configs/rpc_defconfig +++ b/arch/arm/configs/rpc_defconfig @@ -11,7 +11,6 @@ CONFIG_CPU_SA110=y CONFIG_FPE_NWFPE=y CONFIG_PARTITION_ADVANCED=y CONFIG_BSD_DISKLABEL=y -CONFIG_SLAB=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig index c6aff6fb084d..56ee511210de 100644 --- a/arch/arm/configs/sama5_defconfig +++ b/arch/arm/configs/sama5_defconfig @@ -26,7 +26,6 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_SWAP is not set -CONFIG_SLAB=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/sama7_defconfig b/arch/arm/configs/sama7_defconfig index 954112041403..a89767c89d17 100644 --- a/arch/arm/configs/sama7_defconfig +++ b/arch/arm/configs/sama7_defconfig @@ -41,7 +41,6 @@ CONFIG_PARTITION_ADVANCED=y # CONFIG_EFI_PARTITION is not set # CONFIG_COREDUMP is not set # CONFIG_SWAP is not set -CONFIG_SLAB=y # CONFIG_COMPACTION is not set CONFIG_CMA=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig index 0b21c0a47582..1b08c94e541d 100644 --- a/arch/arm/configs/shmobile_defconfig +++ b/arch/arm/configs/shmobile_defconfig @@ -24,7 +24,6 @@ CONFIG_CPUFREQ_DT=y CONFIG_VFP=y CONFIG_NEON=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_SLAB=y CONFIG_CMA=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/arm/configs/sp7021_defconfig b/arch/arm/configs/sp7021_defconfig index c6448ac860b6..ec723401b440 100644 --- a/arch/arm/configs/sp7021_defconfig +++ b/arch/arm/configs/sp7021_defconfig @@ -24,7 +24,6 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_SLAB=y CONFIG_UEVENT_HELPER=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig index f32047e24b63..3c6af935e932 100644 --- a/arch/arm/configs/tegra_defconfig +++ b/arch/arm/configs/tegra_defconfig @@ -32,7 +32,6 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y -CONFIG_SLAB=y CONFIG_CMA=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/arm/configs/versatile_defconfig b/arch/arm/configs/versatile_defconfig index 67e3f9138306..849118cbbb44 100644 --- a/arch/arm/configs/versatile_defconfig +++ b/arch/arm/configs/versatile_defconfig @@ -14,7 +14,6 @@ CONFIG_VFP=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_PARTITION_ADVANCED=y -CONFIG_SLAB=y CONFIG_CMA=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index b26469a65bc1..e9f364b7f743 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -41,7 +41,6 @@ CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m -CONFIG_SLAB=y # CONFIG_COMPACTION is not set CONFIG_USERFAULTFD=y CONFIG_NET=y diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 944a49a129be..fc1d2e5e35d7 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -37,7 +37,6 @@ CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m -CONFIG_SLAB=y # CONFIG_COMPACTION is not set CONFIG_USERFAULTFD=y CONFIG_NET=y diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index a32dd884fcce..c8d87c450804 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -44,7 +44,6 @@ CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m -CONFIG_SLAB=y # CONFIG_COMPACTION is not set CONFIG_USERFAULTFD=y CONFIG_NET=y diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 23b7805309bd..df174677145d 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -34,7 +34,6 @@ CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m -CONFIG_SLAB=y # CONFIG_COMPACTION is not set CONFIG_USERFAULTFD=y CONFIG_NET=y diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index 5605ab5c3dcf..c944c3e2697a 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -36,7 +36,6 @@ CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m -CONFIG_SLAB=y # CONFIG_COMPACTION is not set CONFIG_USERFAULTFD=y CONFIG_NET=y diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index d0d1f9c33756..8a3ef860d30d 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -35,7 +35,6 @@ CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m -CONFIG_SLAB=y # CONFIG_COMPACTION is not set CONFIG_USERFAULTFD=y CONFIG_NET=y diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 6d04314ce7ea..4cb49c77b66d 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -55,7 +55,6 @@ CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m -CONFIG_SLAB=y # CONFIG_COMPACTION is not set CONFIG_USERFAULTFD=y CONFIG_NET=y diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index e6f5ae526d08..59ee60ec1911 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -33,7 +33,6 @@ CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m -CONFIG_SLAB=y # CONFIG_COMPACTION is not set CONFIG_USERFAULTFD=y CONFIG_NET=y diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index f2d4dff4787a..cf6aff2cfbe6 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -34,7 +34,6 @@ CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m -CONFIG_SLAB=y # CONFIG_COMPACTION is not set CONFIG_USERFAULTFD=y CONFIG_NET=y diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index 907eedecd040..b04de348ebb1 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -35,7 +35,6 @@ CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m -CONFIG_SLAB=y # CONFIG_COMPACTION is not set CONFIG_USERFAULTFD=y CONFIG_NET=y diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 9e3d47008f21..1a5532598ee6 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -31,7 +31,6 @@ CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m -CONFIG_SLAB=y # CONFIG_COMPACTION is not set CONFIG_USERFAULTFD=y CONFIG_NET=y diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index f6540078cb4b..4e606dc8ec17 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -31,7 +31,6 @@ CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m -CONFIG_SLAB=y # CONFIG_COMPACTION is not set CONFIG_USERFAULTFD=y CONFIG_NET=y diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig index 756feb9369a6..38dcdce367ec 100644 --- a/arch/microblaze/configs/mmu_defconfig +++ b/arch/microblaze/configs/mmu_defconfig @@ -8,7 +8,6 @@ CONFIG_SYSFS_DEPRECATED_V2=y # CONFIG_BASE_FULL is not set CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y -CONFIG_SLAB=y CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1 CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1 CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1 diff --git a/arch/mips/configs/ar7_defconfig b/arch/mips/configs/ar7_defconfig index ed4a6388791e..329c60aa570a 100644 --- a/arch/mips/configs/ar7_defconfig +++ b/arch/mips/configs/ar7_defconfig @@ -11,7 +11,6 @@ CONFIG_EXPERT=y # CONFIG_KALLSYMS is not set # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_AR7=y CONFIG_HZ_100=y CONFIG_KEXEC=y diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig index 22ffde722bb9..62c462a23edc 100644 --- a/arch/mips/configs/bcm47xx_defconfig +++ b/arch/mips/configs/bcm47xx_defconfig @@ -3,7 +3,6 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_EMBEDDED=y -CONFIG_SLAB=y CONFIG_BCM47XX=y CONFIG_PCI=y # CONFIG_SUSPEND is not set diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig index d15961f00ece..b0cbfa781ffc 100644 --- a/arch/mips/configs/bigsur_defconfig +++ b/arch/mips/configs/bigsur_defconfig @@ -17,7 +17,6 @@ CONFIG_USER_NS=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y -CONFIG_SLAB=y CONFIG_SIBYTE_BIGSUR=y CONFIG_64BIT=y CONFIG_SMP=y diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig index 0bc2e3cc573b..f523ee6f25bf 100644 --- a/arch/mips/configs/cavium_octeon_defconfig +++ b/arch/mips/configs/cavium_octeon_defconfig @@ -9,7 +9,6 @@ CONFIG_LOG_BUF_SHIFT=14 CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y -CONFIG_SLAB=y CONFIG_CAVIUM_OCTEON_SOC=y CONFIG_CAVIUM_CN63XXP1=y CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE=2 diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig index 11f08b6a3013..28eb15a1ffc4 100644 --- a/arch/mips/configs/ci20_defconfig +++ b/arch/mips/configs/ci20_defconfig @@ -21,7 +21,6 @@ CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_MACH_INGENIC_SOC=y CONFIG_JZ4780_CI20=y CONFIG_HIGHMEM=y diff --git a/arch/mips/configs/cu1000-neo_defconfig b/arch/mips/configs/cu1000-neo_defconfig index 1cbc9302e1d1..afe39ce7568e 100644 --- a/arch/mips/configs/cu1000-neo_defconfig +++ b/arch/mips/configs/cu1000-neo_defconfig @@ -18,7 +18,6 @@ CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_MACH_INGENIC_SOC=y CONFIG_X1000_CU1000_NEO=y CONFIG_HIGHMEM=y diff --git a/arch/mips/configs/cu1830-neo_defconfig b/arch/mips/configs/cu1830-neo_defconfig index a0f73f3cd6ce..347c9fd1c21d 100644 --- a/arch/mips/configs/cu1830-neo_defconfig +++ b/arch/mips/configs/cu1830-neo_defconfig @@ -18,7 +18,6 @@ CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_MACH_INGENIC_SOC=y CONFIG_X1830_CU1830_NEO=y CONFIG_HIGHMEM=y diff --git a/arch/mips/configs/db1xxx_defconfig b/arch/mips/configs/db1xxx_defconfig index af070be1b583..08d29b56e175 100644 --- a/arch/mips/configs/db1xxx_defconfig +++ b/arch/mips/configs/db1xxx_defconfig @@ -18,7 +18,6 @@ CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y -CONFIG_SLAB=y CONFIG_MIPS_ALCHEMY=y CONFIG_HZ_100=y CONFIG_PCI=y diff --git a/arch/mips/configs/decstation_64_defconfig b/arch/mips/configs/decstation_64_defconfig index 49ec1575234e..92a1d0aea38c 100644 --- a/arch/mips/configs/decstation_64_defconfig +++ b/arch/mips/configs/decstation_64_defconfig @@ -9,7 +9,6 @@ CONFIG_EXPERT=y # CONFIG_SYSFS_SYSCALL is not set CONFIG_BPF_SYSCALL=y # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_MACH_DECSTATION=y CONFIG_64BIT=y CONFIG_PAGE_SIZE_16KB=y diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig index 5cec4c096e2c..db214fcebcbe 100644 --- a/arch/mips/configs/decstation_defconfig +++ b/arch/mips/configs/decstation_defconfig @@ -9,7 +9,6 @@ CONFIG_EXPERT=y # CONFIG_SYSFS_SYSCALL is not set CONFIG_BPF_SYSCALL=y # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_MACH_DECSTATION=y CONFIG_CPU_R3000=y CONFIG_TC=y diff --git a/arch/mips/configs/decstation_r4k_defconfig b/arch/mips/configs/decstation_r4k_defconfig index af37e26d9b5b..15b769e96d5b 100644 --- a/arch/mips/configs/decstation_r4k_defconfig +++ b/arch/mips/configs/decstation_r4k_defconfig @@ -9,7 +9,6 @@ CONFIG_EXPERT=y # CONFIG_SYSFS_SYSCALL is not set CONFIG_BPF_SYSCALL=y # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_MACH_DECSTATION=y CONFIG_TC=y # CONFIG_SUSPEND is not set diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig index 843d6a5a4f61..5458573286a2 100644 --- a/arch/mips/configs/fuloong2e_defconfig +++ b/arch/mips/configs/fuloong2e_defconfig @@ -13,7 +13,6 @@ CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_EXPERT=y # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_MACH_LOONGSON2EF=y CONFIG_PCI=y diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig index eb755650f821..92fc0edbac47 100644 --- a/arch/mips/configs/gpr_defconfig +++ b/arch/mips/configs/gpr_defconfig @@ -8,7 +8,6 @@ CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y -CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_MIPS_ALCHEMY=y CONFIG_MIPS_GPR=y diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig index 13df29901237..897e55579af0 100644 --- a/arch/mips/configs/ip22_defconfig +++ b/arch/mips/configs/ip22_defconfig @@ -10,7 +10,6 @@ CONFIG_USER_NS=y CONFIG_RELAY=y CONFIG_EXPERT=y # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_SGI_IP22=y CONFIG_ARC_CONSOLE=y CONFIG_CPU_R5000=y diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig index 3e86f8106ba0..b51f738a39a0 100644 --- a/arch/mips/configs/ip27_defconfig +++ b/arch/mips/configs/ip27_defconfig @@ -9,7 +9,6 @@ CONFIG_CGROUPS=y CONFIG_CPUSETS=y CONFIG_RELAY=y CONFIG_EXPERT=y -CONFIG_SLAB=y CONFIG_SGI_IP27=y CONFIG_NUMA=y CONFIG_SMP=y diff --git a/arch/mips/configs/ip28_defconfig b/arch/mips/configs/ip28_defconfig index ba13eea0509f..e0040110a3ee 100644 --- a/arch/mips/configs/ip28_defconfig +++ b/arch/mips/configs/ip28_defconfig @@ -5,7 +5,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_RELAY=y CONFIG_EXPERT=y -CONFIG_SLAB=y CONFIG_SGI_IP28=y CONFIG_ARC_CONSOLE=y CONFIG_EISA=y diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig index 8ced2224c328..418a4181e2fe 100644 --- a/arch/mips/configs/ip32_defconfig +++ b/arch/mips/configs/ip32_defconfig @@ -7,7 +7,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_RELAY=y CONFIG_EXPERT=y -CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_SGI_IP32=y # CONFIG_SECCOMP is not set diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig index 106b21cb677f..6b5d7e963ed9 100644 --- a/arch/mips/configs/jazz_defconfig +++ b/arch/mips/configs/jazz_defconfig @@ -7,7 +7,6 @@ CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_RELAY=y CONFIG_EXPERT=y -CONFIG_SLAB=y CONFIG_MACH_JAZZ=y CONFIG_OLIVETTI_M700=y CONFIG_MIPS_MAGNUM_4000=y diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig index 265d38dffbf6..743209047792 100644 --- a/arch/mips/configs/malta_defconfig +++ b/arch/mips/configs/malta_defconfig @@ -7,7 +7,6 @@ CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_MIPS_MALTA=y CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R2=y diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig index 1d2b248c7cd3..dd2b9c181f32 100644 --- a/arch/mips/configs/malta_kvm_defconfig +++ b/arch/mips/configs/malta_kvm_defconfig @@ -7,7 +7,6 @@ CONFIG_RELAY=y CONFIG_EXPERT=y CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_MIPS_MALTA=y CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R2=y diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig index fd63a2b152f6..82183ec6bc31 100644 --- a/arch/mips/configs/malta_qemu_32r6_defconfig +++ b/arch/mips/configs/malta_qemu_32r6_defconfig @@ -6,7 +6,6 @@ CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=15 CONFIG_EMBEDDED=y -CONFIG_SLAB=y CONFIG_MIPS_MALTA=y CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R6=y diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig index 1f07e354c954..9a199867a5e7 100644 --- a/arch/mips/configs/maltaaprp_defconfig +++ b/arch/mips/configs/maltaaprp_defconfig @@ -6,7 +6,6 @@ CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=15 CONFIG_EMBEDDED=y -CONFIG_SLAB=y CONFIG_MIPS_MALTA=y CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R2=y diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig index 5cd3eca236de..e5502d66a474 100644 --- a/arch/mips/configs/maltasmvp_defconfig +++ b/arch/mips/configs/maltasmvp_defconfig @@ -6,7 +6,6 @@ CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=15 CONFIG_EMBEDDED=y -CONFIG_SLAB=y CONFIG_MIPS_MALTA=y CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R2=y diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig index 45688e742a15..a378aad97138 100644 --- a/arch/mips/configs/maltasmvp_eva_defconfig +++ b/arch/mips/configs/maltasmvp_eva_defconfig @@ -6,7 +6,6 @@ CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=15 CONFIG_EMBEDDED=y -CONFIG_SLAB=y CONFIG_MIPS_MALTA=y CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R2=y diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig index 136f965784db..fc6f88cae7be 100644 --- a/arch/mips/configs/maltaup_defconfig +++ b/arch/mips/configs/maltaup_defconfig @@ -7,7 +7,6 @@ CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=15 CONFIG_EMBEDDED=y -CONFIG_SLAB=y CONFIG_MIPS_MALTA=y CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R2=y diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig index 75b8da8d9927..97c2d7f530b3 100644 --- a/arch/mips/configs/maltaup_xpa_defconfig +++ b/arch/mips/configs/maltaup_xpa_defconfig @@ -8,7 +8,6 @@ CONFIG_NAMESPACES=y CONFIG_RELAY=y CONFIG_EXPERT=y # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_MIPS_MALTA=y CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R2=y diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index a5c68e9b0482..b64172179160 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -8,7 +8,6 @@ CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y -CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_MIPS_ALCHEMY=y CONFIG_MIPS_MTX1=y diff --git a/arch/mips/configs/pic32mzda_defconfig b/arch/mips/configs/pic32mzda_defconfig index 48dd02d01ac1..8397f28355d5 100644 --- a/arch/mips/configs/pic32mzda_defconfig +++ b/arch/mips/configs/pic32mzda_defconfig @@ -9,7 +9,6 @@ CONFIG_RELAY=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_EMBEDDED=y # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_MACH_PIC32=y CONFIG_DTB_PIC32_MZDA_SK=y CONFIG_HZ_100=y diff --git a/arch/mips/configs/qi_lb60_defconfig b/arch/mips/configs/qi_lb60_defconfig index 7e5d9741bd5d..c27c8c7151a1 100644 --- a/arch/mips/configs/qi_lb60_defconfig +++ b/arch/mips/configs/qi_lb60_defconfig @@ -6,7 +6,6 @@ CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_MACH_INGENIC_SOC=y CONFIG_JZ4740_QI_LB60=y CONFIG_HZ_100=y diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig index 93306f5e045b..02ec6c1a5116 100644 --- a/arch/mips/configs/rb532_defconfig +++ b/arch/mips/configs/rb532_defconfig @@ -11,7 +11,6 @@ CONFIG_EXPERT=y # CONFIG_ELF_CORE is not set # CONFIG_KALLSYMS is not set # CONFIG_VM_EVENT_COUNTERS is not set -CONFIG_SLAB=y CONFIG_MIKROTIK_RB532=y CONFIG_HZ_100=y # CONFIG_SECCOMP is not set diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig index 30c195f28278..03a7bbe28a53 100644 --- a/arch/mips/configs/rbtx49xx_defconfig +++ b/arch/mips/configs/rbtx49xx_defconfig @@ -7,7 +7,6 @@ CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y # CONFIG_EPOLL is not set -CONFIG_SLAB=y CONFIG_MACH_TX49XX=y CONFIG_TOSHIBA_RBTX4927=y # CONFIG_SECCOMP is not set diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig index 9932a593e3c3..7475c2cbea89 100644 --- a/arch/mips/configs/rm200_defconfig +++ b/arch/mips/configs/rm200_defconfig @@ -7,7 +7,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_RELAY=y CONFIG_EXPERT=y -CONFIG_SLAB=y CONFIG_SNI_RM=y CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_ARC_CONSOLE=y diff --git a/arch/mips/configs/rs90_defconfig b/arch/mips/configs/rs90_defconfig index 7ce3b814fdc8..85ea2a6775f5 100644 --- a/arch/mips/configs/rs90_defconfig +++ b/arch/mips/configs/rs90_defconfig @@ -17,7 +17,6 @@ CONFIG_LD_DEAD_CODE_DATA_ELIMINATION=y # CONFIG_KALLSYMS is not set CONFIG_EMBEDDED=y # CONFIG_PERF_EVENTS is not set -CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_MACH_INGENIC_SOC=y CONFIG_JZ4740_RS90=y diff --git a/arch/mips/configs/sb1250_swarm_defconfig b/arch/mips/configs/sb1250_swarm_defconfig index 030186f89501..ce855b644bb0 100644 --- a/arch/mips/configs/sb1250_swarm_defconfig +++ b/arch/mips/configs/sb1250_swarm_defconfig @@ -10,7 +10,6 @@ CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_SIBYTE_SWARM=y CONFIG_CPU_SB1_PASS_2_2=y CONFIG_64BIT=y diff --git a/arch/nios2/configs/10m50_defconfig b/arch/nios2/configs/10m50_defconfig index 91c3fce4dc7f..63151ebf1470 100644 --- a/arch/nios2/configs/10m50_defconfig +++ b/arch/nios2/configs/10m50_defconfig @@ -10,7 +10,6 @@ CONFIG_LOG_BUF_SHIFT=14 # CONFIG_SHMEM is not set # CONFIG_AIO is not set CONFIG_EMBEDDED=y -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_NIOS2_MEM_BASE=0x8000000 diff --git a/arch/nios2/configs/3c120_defconfig b/arch/nios2/configs/3c120_defconfig index c42ad7e162a3..0daf3038d7aa 100644 --- a/arch/nios2/configs/3c120_defconfig +++ b/arch/nios2/configs/3c120_defconfig @@ -10,7 +10,6 @@ CONFIG_LOG_BUF_SHIFT=14 # CONFIG_SHMEM is not set # CONFIG_AIO is not set CONFIG_EMBEDDED=y -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_NIOS2_MEM_BASE=0x10000000 diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig index 8ce0ae370680..8c4d4844321f 100644 --- a/arch/parisc/configs/generic-32bit_defconfig +++ b/arch/parisc/configs/generic-32bit_defconfig @@ -12,7 +12,6 @@ CONFIG_USER_NS=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y CONFIG_PERF_EVENTS=y -CONFIG_SLAB=y CONFIG_PA7100LC=y CONFIG_SMP=y CONFIG_HZ_100=y diff --git a/arch/powerpc/configs/40x/klondike_defconfig b/arch/powerpc/configs/40x/klondike_defconfig index 6a735ee75715..acafbb8f6808 100644 --- a/arch/powerpc/configs/40x/klondike_defconfig +++ b/arch/powerpc/configs/40x/klondike_defconfig @@ -5,7 +5,6 @@ CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y CONFIG_EMBEDDED=y -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_APM8018X=y diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig index 789622ffd844..88fbe0d42e11 100644 --- a/arch/powerpc/configs/52xx/pcm030_defconfig +++ b/arch/powerpc/configs/52xx/pcm030_defconfig @@ -10,7 +10,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_EXPERT=y # CONFIG_VM_EVENT_COUNTERS is not set -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/powerpc/configs/83xx/kmeter1_defconfig b/arch/powerpc/configs/83xx/kmeter1_defconfig index 24bf1bde1bb4..487e5e1bbf4c 100644 --- a/arch/powerpc/configs/83xx/kmeter1_defconfig +++ b/arch/powerpc/configs/83xx/kmeter1_defconfig @@ -5,7 +5,6 @@ CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_EXPERT=y -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig index 4e3373381ab6..58fae5131fa7 100644 --- a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig +++ b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig @@ -2,7 +2,6 @@ CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig index 6cb7e90d52c1..da6fc203e2dc 100644 --- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig +++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig @@ -17,7 +17,6 @@ CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y CONFIG_PERF_EVENTS=y -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/powerpc/configs/ep8248e_defconfig b/arch/powerpc/configs/ep8248e_defconfig index 8df6d3a293e3..b9dfa3a0713b 100644 --- a/arch/powerpc/configs/ep8248e_defconfig +++ b/arch/powerpc/configs/ep8248e_defconfig @@ -4,7 +4,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_EXPERT=y CONFIG_KALLSYMS_ALL=y -CONFIG_SLAB=y CONFIG_PARTITION_ADVANCED=y # CONFIG_PPC_CHRP is not set # CONFIG_PPC_PMAC is not set diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig index 91a1b99f4e8f..d77eeb525366 100644 --- a/arch/powerpc/configs/gamecube_defconfig +++ b/arch/powerpc/configs/gamecube_defconfig @@ -8,7 +8,6 @@ CONFIG_EXPERT=y # CONFIG_ELF_CORE is not set CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/powerpc/configs/mgcoge_defconfig b/arch/powerpc/configs/mgcoge_defconfig index 498d35db7833..ce220ceeb91c 100644 --- a/arch/powerpc/configs/mgcoge_defconfig +++ b/arch/powerpc/configs/mgcoge_defconfig @@ -10,7 +10,6 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_KALLSYMS_ALL=y # CONFIG_PCSPKR_PLATFORM is not set CONFIG_EMBEDDED=y -CONFIG_SLAB=y CONFIG_PARTITION_ADVANCED=y # CONFIG_PPC_PMAC is not set CONFIG_PPC_82xx=y diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig index 10fe061c5e6d..d24457bc5791 100644 --- a/arch/powerpc/configs/mpc512x_defconfig +++ b/arch/powerpc/configs/mpc512x_defconfig @@ -4,7 +4,6 @@ CONFIG_NO_HZ=y CONFIG_LOG_BUF_SHIFT=16 CONFIG_BLK_DEV_INITRD=y # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig index 8779f03bced1..83c4710017e9 100644 --- a/arch/powerpc/configs/mpc83xx_defconfig +++ b/arch/powerpc/configs/mpc83xx_defconfig @@ -2,7 +2,6 @@ CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig index 52a8c5450ecb..ea3ee0719bb2 100644 --- a/arch/powerpc/configs/ps3_defconfig +++ b/arch/powerpc/configs/ps3_defconfig @@ -33,7 +33,6 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=y -CONFIG_SLAB=y # CONFIG_COMPAT_BRK is not set # CONFIG_SPARSEMEM_VMEMMAP is not set # CONFIG_COMPACTION is not set diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig index 0ab78c51455d..5017a697b67b 100644 --- a/arch/powerpc/configs/wii_defconfig +++ b/arch/powerpc/configs/wii_defconfig @@ -9,7 +9,6 @@ CONFIG_EXPERT=y # CONFIG_ELF_CORE is not set CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_PPC_CHRP is not set diff --git a/arch/sh/configs/ap325rxa_defconfig b/arch/sh/configs/ap325rxa_defconfig index 4d83576b89c6..4464a2ad42ed 100644 --- a/arch/sh/configs/ap325rxa_defconfig +++ b/arch/sh/configs/ap325rxa_defconfig @@ -3,7 +3,6 @@ CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_KALLSYMS is not set -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/sh/configs/apsh4a3a_defconfig b/arch/sh/configs/apsh4a3a_defconfig index 99931a13a74d..cc909f347877 100644 --- a/arch/sh/configs/apsh4a3a_defconfig +++ b/arch/sh/configs/apsh4a3a_defconfig @@ -6,7 +6,6 @@ CONFIG_LOG_BUF_SHIFT=14 CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y -CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig index d9fb124bf015..64558bf60e10 100644 --- a/arch/sh/configs/apsh4ad0a_defconfig +++ b/arch/sh/configs/apsh4ad0a_defconfig @@ -15,7 +15,6 @@ CONFIG_NAMESPACES=y CONFIG_BLK_DEV_INITRD=y CONFIG_KALLSYMS_ALL=y # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y diff --git a/arch/sh/configs/dreamcast_defconfig b/arch/sh/configs/dreamcast_defconfig index 6a82c7b8ff32..0c9f2030bb7c 100644 --- a/arch/sh/configs/dreamcast_defconfig +++ b/arch/sh/configs/dreamcast_defconfig @@ -1,7 +1,6 @@ CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y diff --git a/arch/sh/configs/ecovec24-romimage_defconfig b/arch/sh/configs/ecovec24-romimage_defconfig index 5c60e71d839e..13fec4338416 100644 --- a/arch/sh/configs/ecovec24-romimage_defconfig +++ b/arch/sh/configs/ecovec24-romimage_defconfig @@ -6,7 +6,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y # CONFIG_KALLSYMS is not set -CONFIG_SLAB=y # CONFIG_BLK_DEV_BSG is not set CONFIG_CPU_SUBTYPE_SH7724=y CONFIG_MEMORY_SIZE=0x10000000 diff --git a/arch/sh/configs/ecovec24_defconfig b/arch/sh/configs/ecovec24_defconfig index 4d655e8d4d74..ee1b36682155 100644 --- a/arch/sh/configs/ecovec24_defconfig +++ b/arch/sh/configs/ecovec24_defconfig @@ -3,7 +3,6 @@ CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_KALLSYMS is not set -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/sh/configs/espt_defconfig b/arch/sh/configs/espt_defconfig index 2804cb760a76..623012d9af9d 100644 --- a/arch/sh/configs/espt_defconfig +++ b/arch/sh/configs/espt_defconfig @@ -5,7 +5,6 @@ CONFIG_LOG_BUF_SHIFT=14 CONFIG_NAMESPACES=y CONFIG_UTS_NS=y CONFIG_IPC_NS=y -CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_MODULES=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/sh/configs/hp6xx_defconfig b/arch/sh/configs/hp6xx_defconfig index 70e6605d7f7e..0c45f2a0f9bd 100644 --- a/arch/sh/configs/hp6xx_defconfig +++ b/arch/sh/configs/hp6xx_defconfig @@ -3,7 +3,6 @@ CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SLAB=y # CONFIG_BLK_DEV_BSG is not set CONFIG_CPU_SUBTYPE_SH7709=y CONFIG_MEMORY_START=0x0d000000 diff --git a/arch/sh/configs/kfr2r09-romimage_defconfig b/arch/sh/configs/kfr2r09-romimage_defconfig index 04436b4fbd76..42bf34181a3e 100644 --- a/arch/sh/configs/kfr2r09-romimage_defconfig +++ b/arch/sh/configs/kfr2r09-romimage_defconfig @@ -6,7 +6,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y # CONFIG_KALLSYMS is not set -CONFIG_SLAB=y # CONFIG_BLOCK is not set CONFIG_CPU_SUBTYPE_SH7724=y CONFIG_MEMORY_SIZE=0x08000000 diff --git a/arch/sh/configs/kfr2r09_defconfig b/arch/sh/configs/kfr2r09_defconfig index 833404490cfe..d80e83e7ec38 100644 --- a/arch/sh/configs/kfr2r09_defconfig +++ b/arch/sh/configs/kfr2r09_defconfig @@ -6,7 +6,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y # CONFIG_KALLSYMS is not set -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/sh/configs/landisk_defconfig b/arch/sh/configs/landisk_defconfig index 7037320b654a..541082090918 100644 --- a/arch/sh/configs/landisk_defconfig +++ b/arch/sh/configs/landisk_defconfig @@ -1,7 +1,6 @@ CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_KALLSYMS_EXTRA_PASS=y -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y diff --git a/arch/sh/configs/lboxre2_defconfig b/arch/sh/configs/lboxre2_defconfig index 05e4ac6fed5f..6a234761bfd7 100644 --- a/arch/sh/configs/lboxre2_defconfig +++ b/arch/sh/configs/lboxre2_defconfig @@ -1,7 +1,6 @@ CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_KALLSYMS_EXTRA_PASS=y -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y diff --git a/arch/sh/configs/magicpanelr2_defconfig b/arch/sh/configs/magicpanelr2_defconfig index ef1d98e35c91..52937f9cc2ab 100644 --- a/arch/sh/configs/magicpanelr2_defconfig +++ b/arch/sh/configs/magicpanelr2_defconfig @@ -8,7 +8,6 @@ CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_KALLSYMS_ALL=y -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y diff --git a/arch/sh/configs/microdev_defconfig b/arch/sh/configs/microdev_defconfig index e9825196dd66..39a83d832cac 100644 --- a/arch/sh/configs/microdev_defconfig +++ b/arch/sh/configs/microdev_defconfig @@ -2,7 +2,6 @@ CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SLAB=y # CONFIG_BLK_DEV_BSG is not set CONFIG_CPU_SUBTYPE_SH4_202=y CONFIG_FLATMEM_MANUAL=y diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig index 4859cd30cfc4..2d1e65cad239 100644 --- a/arch/sh/configs/migor_defconfig +++ b/arch/sh/configs/migor_defconfig @@ -4,7 +4,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_MODULES=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/sh/configs/polaris_defconfig b/arch/sh/configs/polaris_defconfig index f42e4867ddc1..4f0396c2ac79 100644 --- a/arch/sh/configs/polaris_defconfig +++ b/arch/sh/configs/polaris_defconfig @@ -7,7 +7,6 @@ CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_AUDIT=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y diff --git a/arch/sh/configs/r7780mp_defconfig b/arch/sh/configs/r7780mp_defconfig index e527cd60a191..6bd6c0ae85d7 100644 --- a/arch/sh/configs/r7780mp_defconfig +++ b/arch/sh/configs/r7780mp_defconfig @@ -5,7 +5,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_FUTEX is not set # CONFIG_EPOLL is not set -CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y diff --git a/arch/sh/configs/r7785rp_defconfig b/arch/sh/configs/r7785rp_defconfig index a3f952a83d97..cde668569cc1 100644 --- a/arch/sh/configs/r7785rp_defconfig +++ b/arch/sh/configs/r7785rp_defconfig @@ -7,7 +7,6 @@ CONFIG_RCU_TRACE=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_KPROBES=y CONFIG_MODULES=y diff --git a/arch/sh/configs/rsk7264_defconfig b/arch/sh/configs/rsk7264_defconfig index f7b9c528c6df..de9e350cdf28 100644 --- a/arch/sh/configs/rsk7264_defconfig +++ b/arch/sh/configs/rsk7264_defconfig @@ -12,7 +12,6 @@ CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_COUNTERS=y # CONFIG_VM_EVENT_COUNTERS is not set -CONFIG_SLAB=y CONFIG_MMAP_ALLOW_UNINITIALIZED=y CONFIG_PROFILING=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/sh/configs/rsk7269_defconfig b/arch/sh/configs/rsk7269_defconfig index 4bff14fb185d..7ca6a5c25a9f 100644 --- a/arch/sh/configs/rsk7269_defconfig +++ b/arch/sh/configs/rsk7269_defconfig @@ -2,7 +2,6 @@ CONFIG_LOG_BUF_SHIFT=14 CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set -CONFIG_SLAB=y # CONFIG_BLK_DEV_BSG is not set CONFIG_SWAP_IO_SPACE=y CONFIG_CPU_SUBTYPE_SH7269=y diff --git a/arch/sh/configs/rts7751r2d1_defconfig b/arch/sh/configs/rts7751r2d1_defconfig index 96263a4912b7..c863a11c7592 100644 --- a/arch/sh/configs/rts7751r2d1_defconfig +++ b/arch/sh/configs/rts7751r2d1_defconfig @@ -1,7 +1,6 @@ CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_MODULES=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/sh/configs/rts7751r2dplus_defconfig b/arch/sh/configs/rts7751r2dplus_defconfig index 92e586e6c974..7e4f710d46c7 100644 --- a/arch/sh/configs/rts7751r2dplus_defconfig +++ b/arch/sh/configs/rts7751r2dplus_defconfig @@ -1,7 +1,6 @@ CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_MODULES=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig index 97b7356639ed..a080c5d05a7b 100644 --- a/arch/sh/configs/sdk7786_defconfig +++ b/arch/sh/configs/sdk7786_defconfig @@ -31,7 +31,6 @@ CONFIG_RD_BZIP2=y CONFIG_RD_LZMA=y CONFIG_RD_LZO=y # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_KPROBES=y CONFIG_MODULES=y diff --git a/arch/sh/configs/se7343_defconfig b/arch/sh/configs/se7343_defconfig index 5d6c19338ebf..75db12fb9ad1 100644 --- a/arch/sh/configs/se7343_defconfig +++ b/arch/sh/configs/se7343_defconfig @@ -5,7 +5,6 @@ CONFIG_LOG_BUF_SHIFT=14 # CONFIG_FUTEX is not set # CONFIG_EPOLL is not set # CONFIG_SHMEM is not set -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y diff --git a/arch/sh/configs/se7619_defconfig b/arch/sh/configs/se7619_defconfig index 71a672c30716..14d0f5ead502 100644 --- a/arch/sh/configs/se7619_defconfig +++ b/arch/sh/configs/se7619_defconfig @@ -8,7 +8,6 @@ CONFIG_LOG_BUF_SHIFT=14 # CONFIG_FUTEX is not set # CONFIG_EPOLL is not set # CONFIG_VM_EVENT_COUNTERS is not set -CONFIG_SLAB=y # CONFIG_BLK_DEV_BSG is not set CONFIG_MEMORY_START=0x0c000000 CONFIG_FLATMEM_MANUAL=y diff --git a/arch/sh/configs/se7705_defconfig b/arch/sh/configs/se7705_defconfig index ed00a6eeadf5..16a0f72f0822 100644 --- a/arch/sh/configs/se7705_defconfig +++ b/arch/sh/configs/se7705_defconfig @@ -4,7 +4,6 @@ CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set # CONFIG_KALLSYMS is not set # CONFIG_HOTPLUG is not set -CONFIG_SLAB=y CONFIG_MODULES=y # CONFIG_BLK_DEV_BSG is not set CONFIG_CPU_SUBTYPE_SH7705=y diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig index 36356223d51c..dc854293da43 100644 --- a/arch/sh/configs/se7712_defconfig +++ b/arch/sh/configs/se7712_defconfig @@ -9,7 +9,6 @@ CONFIG_KALLSYMS_ALL=y # CONFIG_BUG is not set # CONFIG_BASE_FULL is not set # CONFIG_SHMEM is not set -CONFIG_SLAB=y CONFIG_MODULES=y # CONFIG_BLK_DEV_BSG is not set CONFIG_CPU_SUBTYPE_SH7712=y diff --git a/arch/sh/configs/se7721_defconfig b/arch/sh/configs/se7721_defconfig index 46c5a263a239..c891945b8a90 100644 --- a/arch/sh/configs/se7721_defconfig +++ b/arch/sh/configs/se7721_defconfig @@ -9,7 +9,6 @@ CONFIG_KALLSYMS_ALL=y # CONFIG_BUG is not set # CONFIG_BASE_FULL is not set # CONFIG_SHMEM is not set -CONFIG_SLAB=y CONFIG_MODULES=y # CONFIG_BLK_DEV_BSG is not set CONFIG_CPU_SUBTYPE_SH7721=y diff --git a/arch/sh/configs/se7724_defconfig b/arch/sh/configs/se7724_defconfig index d817df7cc4a7..96521271758c 100644 --- a/arch/sh/configs/se7724_defconfig +++ b/arch/sh/configs/se7724_defconfig @@ -3,7 +3,6 @@ CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_KALLSYMS is not set -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/sh/configs/se7750_defconfig b/arch/sh/configs/se7750_defconfig index 4defc7628a49..5fa6239ae4ea 100644 --- a/arch/sh/configs/se7750_defconfig +++ b/arch/sh/configs/se7750_defconfig @@ -6,7 +6,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set # CONFIG_HOTPLUG is not set -CONFIG_SLAB=y CONFIG_MODULES=y # CONFIG_BLK_DEV_BSG is not set CONFIG_CPU_SUBTYPE_SH7750=y diff --git a/arch/sh/configs/se7751_defconfig b/arch/sh/configs/se7751_defconfig index 4a024065bb75..8b5fe4ec16bc 100644 --- a/arch/sh/configs/se7751_defconfig +++ b/arch/sh/configs/se7751_defconfig @@ -4,7 +4,6 @@ CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set # CONFIG_HOTPLUG is not set -CONFIG_SLAB=y CONFIG_MODULES=y # CONFIG_BLK_DEV_BSG is not set CONFIG_CPU_SUBTYPE_SH7751=y diff --git a/arch/sh/configs/se7780_defconfig b/arch/sh/configs/se7780_defconfig index dcd85b858ac8..21303304eda2 100644 --- a/arch/sh/configs/se7780_defconfig +++ b/arch/sh/configs/se7780_defconfig @@ -6,7 +6,6 @@ CONFIG_LOG_BUF_SHIFT=14 # CONFIG_KALLSYMS is not set # CONFIG_HOTPLUG is not set # CONFIG_EPOLL is not set -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_CPU_SUBTYPE_SH7780=y diff --git a/arch/sh/configs/secureedge5410_defconfig b/arch/sh/configs/secureedge5410_defconfig index 8422599cfb04..120176afe3f6 100644 --- a/arch/sh/configs/secureedge5410_defconfig +++ b/arch/sh/configs/secureedge5410_defconfig @@ -2,7 +2,6 @@ CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y # CONFIG_HOTPLUG is not set -CONFIG_SLAB=y # CONFIG_BLK_DEV_BSG is not set CONFIG_CPU_SUBTYPE_SH7751R=y CONFIG_MEMORY_SIZE=0x01000000 diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig index 9fcf68b22dba..f661ef816f09 100644 --- a/arch/sh/configs/sh03_defconfig +++ b/arch/sh/configs/sh03_defconfig @@ -3,7 +3,6 @@ CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y -CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y diff --git a/arch/sh/configs/sh2007_defconfig b/arch/sh/configs/sh2007_defconfig index 259c69e3fa22..977ae405a75e 100644 --- a/arch/sh/configs/sh2007_defconfig +++ b/arch/sh/configs/sh2007_defconfig @@ -8,7 +8,6 @@ CONFIG_IKCONFIG=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_KALLSYMS_ALL=y -CONFIG_SLAB=y # CONFIG_BLK_DEV_BSG is not set CONFIG_CPU_SUBTYPE_SH7780=y CONFIG_MEMORY_SIZE=0x08000000 diff --git a/arch/sh/configs/sh7710voipgw_defconfig b/arch/sh/configs/sh7710voipgw_defconfig index 0d814770b07f..7f742729df69 100644 --- a/arch/sh/configs/sh7710voipgw_defconfig +++ b/arch/sh/configs/sh7710voipgw_defconfig @@ -5,7 +5,6 @@ CONFIG_LOG_BUF_SHIFT=14 # CONFIG_FUTEX is not set # CONFIG_EPOLL is not set # CONFIG_SHMEM is not set -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y diff --git a/arch/sh/configs/sh7757lcr_defconfig b/arch/sh/configs/sh7757lcr_defconfig index 2579dc4bc0c8..48a0f9beb116 100644 --- a/arch/sh/configs/sh7757lcr_defconfig +++ b/arch/sh/configs/sh7757lcr_defconfig @@ -9,7 +9,6 @@ CONFIG_TASK_IO_ACCOUNTING=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y CONFIG_KALLSYMS_ALL=y -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/sh/configs/sh7763rdp_defconfig b/arch/sh/configs/sh7763rdp_defconfig index 8a6a446f9eb8..ae726745d9a0 100644 --- a/arch/sh/configs/sh7763rdp_defconfig +++ b/arch/sh/configs/sh7763rdp_defconfig @@ -5,7 +5,6 @@ CONFIG_LOG_BUF_SHIFT=14 CONFIG_NAMESPACES=y CONFIG_UTS_NS=y CONFIG_IPC_NS=y -CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_MODULES=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/sh/configs/sh7785lcr_32bit_defconfig b/arch/sh/configs/sh7785lcr_32bit_defconfig index 781ff13227fc..59262f42abe6 100644 --- a/arch/sh/configs/sh7785lcr_32bit_defconfig +++ b/arch/sh/configs/sh7785lcr_32bit_defconfig @@ -6,7 +6,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=16 CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_GCOV_KERNEL=y CONFIG_MODULES=y diff --git a/arch/sh/configs/sh7785lcr_defconfig b/arch/sh/configs/sh7785lcr_defconfig index 1b88929083f7..94381f8268ff 100644 --- a/arch/sh/configs/sh7785lcr_defconfig +++ b/arch/sh/configs/sh7785lcr_defconfig @@ -3,7 +3,6 @@ CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig index 73a0d68b0de6..871092753591 100644 --- a/arch/sh/configs/titan_defconfig +++ b/arch/sh/configs/titan_defconfig @@ -6,7 +6,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=16 CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y diff --git a/arch/sh/configs/urquell_defconfig b/arch/sh/configs/urquell_defconfig index 8fc687c98fd1..445bb451a5ec 100644 --- a/arch/sh/configs/urquell_defconfig +++ b/arch/sh/configs/urquell_defconfig @@ -17,7 +17,6 @@ CONFIG_CGROUP_MEMCG=y CONFIG_CGROUP_SCHED=y CONFIG_RT_GROUP_SCHED=y CONFIG_BLK_DEV_INITRD=y -CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y diff --git a/arch/sparc/configs/sparc32_defconfig b/arch/sparc/configs/sparc32_defconfig index 7b3efe5edc1a..7c489e7f19b7 100644 --- a/arch/sparc/configs/sparc32_defconfig +++ b/arch/sparc/configs/sparc32_defconfig @@ -4,7 +4,6 @@ CONFIG_LOG_BUF_SHIFT=14 CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index 18099099583e..0bb0d4da5227 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig @@ -6,7 +6,6 @@ CONFIG_LOG_BUF_SHIFT=18 CONFIG_BLK_DEV_INITRD=y CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_KPROBES=y CONFIG_MODULES=y diff --git a/arch/um/configs/i386_defconfig b/arch/um/configs/i386_defconfig index c0162286d68b..05ae535ba0dc 100644 --- a/arch/um/configs/i386_defconfig +++ b/arch/um/configs/i386_defconfig @@ -22,7 +22,6 @@ CONFIG_BLK_CGROUP=y # CONFIG_PID_NS is not set CONFIG_SYSFS_DEPRECATED=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/um/configs/x86_64_defconfig b/arch/um/configs/x86_64_defconfig index bec6e5d95687..1c757238be7f 100644 --- a/arch/um/configs/x86_64_defconfig +++ b/arch/um/configs/x86_64_defconfig @@ -20,7 +20,6 @@ CONFIG_BLK_CGROUP=y # CONFIG_PID_NS is not set CONFIG_SYSFS_DEPRECATED=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set diff --git a/kernel/configs/tiny.config b/kernel/configs/tiny.config index 144b2bd86b14..00009f7d0835 100644 --- a/kernel/configs/tiny.config +++ b/kernel/configs/tiny.config @@ -6,6 +6,5 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_KERNEL_XZ=y # CONFIG_KERNEL_LZO is not set # CONFIG_KERNEL_LZ4 is not set -# CONFIG_SLAB is not set CONFIG_SLUB=y CONFIG_SLUB_TINY=y diff --git a/mm/Kconfig b/mm/Kconfig index 7672a22647b4..b537c4436d18 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -218,11 +218,18 @@ choice help This option allows to select a slab allocator. -config SLAB - bool "SLAB" +config SLAB_DEPRECATED + bool "SLAB (DEPRECATED)" depends on !PREEMPT_RT select HAVE_HARDENED_USERCOPY_ALLOCATOR help + Deprecated and scheduled for removal in a few cycles. Replaced by + SLUB. + + If you cannot migrate to SLUB, please contact linux-mm@kvack.org + and the people listed in the SLAB ALLOCATOR section of MAINTAINERS + file, explaining why. + The regular slab allocator that is established and known to work well in all environments. It organizes cache hot objects in per cpu and per node queues. @@ -240,6 +247,11 @@ config SLUB endchoice +config SLAB + bool + default y + depends on SLAB_DEPRECATED + config SLUB_TINY bool "Configure SLUB for minimal memory footprint" depends on SLUB && EXPERT -- cgit v1.2.3 From 81a31a860bb61d54eb688af2568d9332ed9b8942 Mon Sep 17 00:00:00 2001 From: Ruihan Li Date: Mon, 15 May 2023 21:09:57 +0800 Subject: mm: page_table_check: Make it dependent on EXCLUSIVE_SYSTEM_RAM Without EXCLUSIVE_SYSTEM_RAM, users are allowed to map arbitrary physical memory regions into the userspace via /dev/mem. At the same time, pages may change their properties (e.g., from anonymous pages to named pages) while they are still being mapped in the userspace, leading to "corruption" detected by the page table check. To avoid these false positives, this patch makes PAGE_TABLE_CHECK depends on EXCLUSIVE_SYSTEM_RAM. This dependency is understandable because PAGE_TABLE_CHECK is a hardening technique but /dev/mem without STRICT_DEVMEM (i.e., !EXCLUSIVE_SYSTEM_RAM) is itself a security problem. Even with EXCLUSIVE_SYSTEM_RAM, I/O pages may be still allowed to be mapped via /dev/mem. However, these pages are always considered as named pages, so they won't break the logic used in the page table check. Cc: # 5.17 Signed-off-by: Ruihan Li Acked-by: David Hildenbrand Acked-by: Pasha Tatashin Link: https://lore.kernel.org/r/20230515130958.32471-4-lrh2000@pku.edu.cn Signed-off-by: Greg Kroah-Hartman --- Documentation/mm/page_table_check.rst | 19 +++++++++++++++++++ mm/Kconfig.debug | 1 + 2 files changed, 20 insertions(+) (limited to 'mm') diff --git a/Documentation/mm/page_table_check.rst b/Documentation/mm/page_table_check.rst index cfd8f4117cf3..c12838ce6b8d 100644 --- a/Documentation/mm/page_table_check.rst +++ b/Documentation/mm/page_table_check.rst @@ -52,3 +52,22 @@ Build kernel with: Optionally, build kernel with PAGE_TABLE_CHECK_ENFORCED in order to have page table support without extra kernel parameter. + +Implementation notes +==================== + +We specifically decided not to use VMA information in order to avoid relying on +MM states (except for limited "struct page" info). The page table check is a +separate from Linux-MM state machine that verifies that the user accessible +pages are not falsely shared. + +PAGE_TABLE_CHECK depends on EXCLUSIVE_SYSTEM_RAM. The reason is that without +EXCLUSIVE_SYSTEM_RAM, users are allowed to map arbitrary physical memory +regions into the userspace via /dev/mem. At the same time, pages may change +their properties (e.g., from anonymous pages to named pages) while they are +still being mapped in the userspace, leading to "corruption" detected by the +page table check. + +Even with EXCLUSIVE_SYSTEM_RAM, I/O pages may be still allowed to be mapped via +/dev/mem. However, these pages are always considered as named pages, so they +won't break the logic used in the page table check. diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug index a925415b4d10..018a5bd2f576 100644 --- a/mm/Kconfig.debug +++ b/mm/Kconfig.debug @@ -98,6 +98,7 @@ config PAGE_OWNER config PAGE_TABLE_CHECK bool "Check for invalid mappings in user page tables" depends on ARCH_SUPPORTS_PAGE_TABLE_CHECK + depends on EXCLUSIVE_SYSTEM_RAM select PAGE_EXTENSION help Check that anonymous page is not being mapped twice with read write -- cgit v1.2.3 From 44d0fb387b53e56c8a050bac5c7d460e21eb226f Mon Sep 17 00:00:00 2001 From: Ruihan Li Date: Mon, 15 May 2023 21:09:58 +0800 Subject: mm: page_table_check: Ensure user pages are not slab pages The current uses of PageAnon in page table check functions can lead to type confusion bugs between struct page and slab [1], if slab pages are accidentally mapped into the user space. This is because slab reuses the bits in struct page to store its internal states, which renders PageAnon ineffective on slab pages. Since slab pages are not expected to be mapped into the user space, this patch adds BUG_ON(PageSlab(page)) checks to make sure that slab pages are not inadvertently mapped. Otherwise, there must be some bugs in the kernel. Reported-by: syzbot+fcf1a817ceb50935ce99@syzkaller.appspotmail.com Closes: https://lore.kernel.org/lkml/000000000000258e5e05fae79fc1@google.com/ [1] Fixes: df4e817b7108 ("mm: page table check") Cc: # 5.17 Signed-off-by: Ruihan Li Acked-by: Pasha Tatashin Link: https://lore.kernel.org/r/20230515130958.32471-5-lrh2000@pku.edu.cn Signed-off-by: Greg Kroah-Hartman --- include/linux/page-flags.h | 6 ++++++ mm/page_table_check.c | 6 ++++++ 2 files changed, 12 insertions(+) (limited to 'mm') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 1c68d67b832f..92a2063a0a23 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -617,6 +617,12 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) * Please note that, confusingly, "page_mapping" refers to the inode * address_space which maps the page from disk; whereas "page_mapped" * refers to user virtual address space into which the page is mapped. + * + * For slab pages, since slab reuses the bits in struct page to store its + * internal states, the page->mapping does not exist as such, nor do these + * flags below. So in order to avoid testing non-existent bits, please + * make sure that PageSlab(page) actually evaluates to false before calling + * the following functions (e.g., PageAnon). See mm/slab.h. */ #define PAGE_MAPPING_ANON 0x1 #define PAGE_MAPPING_MOVABLE 0x2 diff --git a/mm/page_table_check.c b/mm/page_table_check.c index 25d8610c0042..f2baf97d5f38 100644 --- a/mm/page_table_check.c +++ b/mm/page_table_check.c @@ -71,6 +71,8 @@ static void page_table_check_clear(struct mm_struct *mm, unsigned long addr, page = pfn_to_page(pfn); page_ext = page_ext_get(page); + + BUG_ON(PageSlab(page)); anon = PageAnon(page); for (i = 0; i < pgcnt; i++) { @@ -107,6 +109,8 @@ static void page_table_check_set(struct mm_struct *mm, unsigned long addr, page = pfn_to_page(pfn); page_ext = page_ext_get(page); + + BUG_ON(PageSlab(page)); anon = PageAnon(page); for (i = 0; i < pgcnt; i++) { @@ -133,6 +137,8 @@ void __page_table_check_zero(struct page *page, unsigned int order) struct page_ext *page_ext; unsigned long i; + BUG_ON(PageSlab(page)); + page_ext = page_ext_get(page); BUG_ON(!page_ext); for (i = 0; i < (1ul << order); i++) { -- cgit v1.2.3 From c8070b78751955e59b42457b974bea4a4fe00187 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 26 May 2023 22:41:40 +0100 Subject: mm: Don't pin ZERO_PAGE in pin_user_pages() Make pin_user_pages*() leave a ZERO_PAGE unpinned if it extracts a pointer to it from the page tables and make unpin_user_page*() correspondingly ignore a ZERO_PAGE when unpinning. We don't want to risk overrunning a zero page's refcount as we're only allowed ~2 million pins on it - something that userspace can conceivably trigger. Add a pair of functions to test whether a page or a folio is a ZERO_PAGE. Signed-off-by: David Howells cc: Christoph Hellwig cc: David Hildenbrand cc: Lorenzo Stoakes cc: Andrew Morton cc: Jens Axboe cc: Al Viro cc: Matthew Wilcox cc: Jan Kara cc: Jeff Layton cc: Jason Gunthorpe cc: Logan Gunthorpe cc: Hillf Danton cc: Christian Brauner cc: Linus Torvalds cc: linux-fsdevel@vger.kernel.org cc: linux-block@vger.kernel.org cc: linux-kernel@vger.kernel.org cc: linux-mm@kvack.org Reviewed-by: Lorenzo Stoakes Reviewed-by: Christoph Hellwig Acked-by: David Hildenbrand Link: https://lore.kernel.org/r/20230526214142.958751-2-dhowells@redhat.com Signed-off-by: Jens Axboe --- Documentation/core-api/pin_user_pages.rst | 6 ++++++ include/linux/mm.h | 26 ++++++++++++++++++++++++-- mm/gup.c | 31 ++++++++++++++++++++++++++++++- 3 files changed, 60 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/Documentation/core-api/pin_user_pages.rst b/Documentation/core-api/pin_user_pages.rst index 9fb0b1080d3b..d3c1f6d8c0e0 100644 --- a/Documentation/core-api/pin_user_pages.rst +++ b/Documentation/core-api/pin_user_pages.rst @@ -112,6 +112,12 @@ pages: This also leads to limitations: there are only 31-10==21 bits available for a counter that increments 10 bits at a time. +* Because of that limitation, special handling is applied to the zero pages + when using FOLL_PIN. We only pretend to pin a zero page - we don't alter its + refcount or pincount at all (it is permanent, so there's no need). The + unpinning functions also don't do anything to a zero page. This is + transparent to the caller. + * Callers must specifically request "dma-pinned tracking of pages". In other words, just calling get_user_pages() will not suffice; a new set of functions, pin_user_page() and related, must be used. diff --git a/include/linux/mm.h b/include/linux/mm.h index 27ce77080c79..3c2f6b452586 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1910,6 +1910,28 @@ static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma, return page_maybe_dma_pinned(page); } +/** + * is_zero_page - Query if a page is a zero page + * @page: The page to query + * + * This returns true if @page is one of the permanent zero pages. + */ +static inline bool is_zero_page(const struct page *page) +{ + return is_zero_pfn(page_to_pfn(page)); +} + +/** + * is_zero_folio - Query if a folio is a zero page + * @folio: The folio to query + * + * This returns true if @folio is one of the permanent zero pages. + */ +static inline bool is_zero_folio(const struct folio *folio) +{ + return is_zero_page(&folio->page); +} + /* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */ #ifdef CONFIG_MIGRATION static inline bool is_longterm_pinnable_page(struct page *page) @@ -1920,8 +1942,8 @@ static inline bool is_longterm_pinnable_page(struct page *page) if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE) return false; #endif - /* The zero page may always be pinned */ - if (is_zero_pfn(page_to_pfn(page))) + /* The zero page can be "pinned" but gets special handling. */ + if (is_zero_page(page)) return true; /* Coherent device memory must always allow eviction. */ diff --git a/mm/gup.c b/mm/gup.c index bbe416236593..ad28261dcafd 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -51,7 +51,8 @@ static inline void sanity_check_pinned_pages(struct page **pages, struct page *page = *pages; struct folio *folio = page_folio(page); - if (!folio_test_anon(folio)) + if (is_zero_page(page) || + !folio_test_anon(folio)) continue; if (!folio_test_large(folio) || folio_test_hugetlb(folio)) VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page); @@ -131,6 +132,13 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags) else if (flags & FOLL_PIN) { struct folio *folio; + /* + * Don't take a pin on the zero page - it's not going anywhere + * and it is used in a *lot* of places. + */ + if (is_zero_page(page)) + return page_folio(page); + /* * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a * right zone, so fail and let the caller fall back to the slow @@ -180,6 +188,8 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags) static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) { if (flags & FOLL_PIN) { + if (is_zero_folio(folio)) + return; node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs); if (folio_test_large(folio)) atomic_sub(refs, &folio->_pincount); @@ -224,6 +234,13 @@ int __must_check try_grab_page(struct page *page, unsigned int flags) if (flags & FOLL_GET) folio_ref_inc(folio); else if (flags & FOLL_PIN) { + /* + * Don't take a pin on the zero page - it's not going anywhere + * and it is used in a *lot* of places. + */ + if (is_zero_page(page)) + return 0; + /* * Similar to try_grab_folio(): be sure to *also* * increment the normal page refcount field at least once, @@ -3079,6 +3096,9 @@ EXPORT_SYMBOL_GPL(get_user_pages_fast); * * FOLL_PIN means that the pages must be released via unpin_user_page(). Please * see Documentation/core-api/pin_user_pages.rst for further details. + * + * Note that if a zero_page is amongst the returned pages, it will not have + * pins in it and unpin_user_page() will not remove pins from it. */ int pin_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) @@ -3110,6 +3130,9 @@ EXPORT_SYMBOL_GPL(pin_user_pages_fast); * * FOLL_PIN means that the pages must be released via unpin_user_page(). Please * see Documentation/core-api/pin_user_pages.rst for details. + * + * Note that if a zero_page is amongst the returned pages, it will not have + * pins in it and unpin_user_page*() will not remove pins from it. */ long pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, @@ -3143,6 +3166,9 @@ EXPORT_SYMBOL(pin_user_pages_remote); * * FOLL_PIN means that the pages must be released via unpin_user_page(). Please * see Documentation/core-api/pin_user_pages.rst for details. + * + * Note that if a zero_page is amongst the returned pages, it will not have + * pins in it and unpin_user_page*() will not remove pins from it. */ long pin_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, @@ -3161,6 +3187,9 @@ EXPORT_SYMBOL(pin_user_pages); * pin_user_pages_unlocked() is the FOLL_PIN variant of * get_user_pages_unlocked(). Behavior is the same, except that this one sets * FOLL_PIN and rejects FOLL_GET. + * + * Note that if a zero_page is amongst the returned pages, it will not have + * pins in it and unpin_user_page*() will not remove pins from it. */ long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags) -- cgit v1.2.3 From 1101fb8f89e5fc548c4d0ad66750e98980291815 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 26 May 2023 22:41:41 +0100 Subject: mm: Provide a function to get an additional pin on a page Provide a function to get an additional pin on a page that we already have a pin on. This will be used in fs/direct-io.c when dispatching multiple bios to a page we've extracted from a user-backed iter rather than redoing the extraction. Signed-off-by: David Howells cc: Christoph Hellwig cc: David Hildenbrand cc: Lorenzo Stoakes cc: Andrew Morton cc: Jens Axboe cc: Al Viro cc: Matthew Wilcox cc: Jan Kara cc: Jeff Layton cc: Jason Gunthorpe cc: Logan Gunthorpe cc: Hillf Danton cc: Christian Brauner cc: Linus Torvalds cc: linux-fsdevel@vger.kernel.org cc: linux-block@vger.kernel.org cc: linux-kernel@vger.kernel.org cc: linux-mm@kvack.org Reviewed-by: Christoph Hellwig Acked-by: David Hildenbrand Link: https://lore.kernel.org/r/20230526214142.958751-3-dhowells@redhat.com Signed-off-by: Jens Axboe --- include/linux/mm.h | 1 + mm/gup.c | 27 +++++++++++++++++++++++++++ 2 files changed, 28 insertions(+) (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index 3c2f6b452586..200068d98686 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2405,6 +2405,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages); int pin_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages); +void folio_add_pin(struct folio *folio); int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc); int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, diff --git a/mm/gup.c b/mm/gup.c index ad28261dcafd..0814576b7366 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -275,6 +275,33 @@ void unpin_user_page(struct page *page) } EXPORT_SYMBOL(unpin_user_page); +/** + * folio_add_pin - Try to get an additional pin on a pinned folio + * @folio: The folio to be pinned + * + * Get an additional pin on a folio we already have a pin on. Makes no change + * if the folio is a zero_page. + */ +void folio_add_pin(struct folio *folio) +{ + if (is_zero_folio(folio)) + return; + + /* + * Similar to try_grab_folio(): be sure to *also* increment the normal + * page refcount field at least once, so that the page really is + * pinned. + */ + if (folio_test_large(folio)) { + WARN_ON_ONCE(atomic_read(&folio->_pincount) < 1); + folio_ref_inc(folio); + atomic_inc(&folio->_pincount); + } else { + WARN_ON_ONCE(folio_ref_count(folio) < GUP_PIN_COUNTING_BIAS); + folio_ref_add(folio, GUP_PIN_COUNTING_BIAS); + } +} + static inline struct folio *gup_folio_range_next(struct page *start, unsigned long npages, unsigned long i, unsigned int *ntails) { -- cgit v1.2.3 From cb58bf91b138c1a8b18cca9503308789e26e3522 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Wed, 31 May 2023 04:50:24 -0700 Subject: swap: use __bio_add_page to add page to bio The swap code only adds a single page to a newly created bio. So use __bio_add_page() to add the page which is guaranteed to succeed in this case. This brings us closer to marking bio_add_page() as __must_check. Reviewed-by: Damien Le Moal Reviewed-by: Christoph Hellwig Signed-off-by: Johannes Thumshirn Link: https://lore.kernel.org/r/5bdafd9de806b2dab92302b30eb7a3a5f10c37d9.1685532726.git.johannes.thumshirn@wdc.com Signed-off-by: Jens Axboe --- mm/page_io.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/page_io.c b/mm/page_io.c index 87b682d18850..684cd3c7b59b 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -338,7 +338,7 @@ static void swap_writepage_bdev_sync(struct page *page, bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc)); bio.bi_iter.bi_sector = swap_page_sector(page); - bio_add_page(&bio, page, thp_size(page), 0); + __bio_add_page(&bio, page, thp_size(page), 0); bio_associate_blkg_from_page(&bio, page); count_swpout_vm_event(page); @@ -360,7 +360,7 @@ static void swap_writepage_bdev_async(struct page *page, GFP_NOIO); bio->bi_iter.bi_sector = swap_page_sector(page); bio->bi_end_io = end_swap_bio_write; - bio_add_page(bio, page, thp_size(page), 0); + __bio_add_page(bio, page, thp_size(page), 0); bio_associate_blkg_from_page(bio, page); count_swpout_vm_event(page); @@ -468,7 +468,7 @@ static void swap_readpage_bdev_sync(struct page *page, bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ); bio.bi_iter.bi_sector = swap_page_sector(page); - bio_add_page(&bio, page, thp_size(page), 0); + __bio_add_page(&bio, page, thp_size(page), 0); /* * Keep this task valid during swap readpage because the oom killer may * attempt to access it in the page fault retry time check. @@ -488,7 +488,7 @@ static void swap_readpage_bdev_async(struct page *page, bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL); bio->bi_iter.bi_sector = swap_page_sector(page); bio->bi_end_io = end_swap_bio_read; - bio_add_page(bio, page, thp_size(page), 0); + __bio_add_page(bio, page, thp_size(page), 0); count_vm_event(PSWPIN); submit_bio(bio); } -- cgit v1.2.3 From de649e7f5edb2e61dbd3d64deae44cb165e657ad Mon Sep 17 00:00:00 2001 From: Yuwei Guan Date: Thu, 1 Jun 2023 21:31:49 +0800 Subject: memblock: Update nid info in memblock debugfs The node id for memblock reserved regions will be wrong, so let's show 'x' for reg->nid == MAX_NUMNODES in debugfs to keep it align. Suggested-by: Mike Rapoport (IBM) Co-developed-by: Kefeng Wang Signed-off-by: Kefeng Wang Signed-off-by: Yuwei Guan Link: https://lore.kernel.org/r/20230601133149.37160-1-ssawgyw@gmail.com Signed-off-by: Mike Rapoport (IBM) --- mm/memblock.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index c5c80d9bcea3..3d449aaba052 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -2169,17 +2169,21 @@ static int memblock_debug_show(struct seq_file *m, void *private) { struct memblock_type *type = m->private; struct memblock_region *reg; - int i, j; + int i, j, nid; unsigned int count = ARRAY_SIZE(flagname); phys_addr_t end; for (i = 0; i < type->cnt; i++) { reg = &type->regions[i]; end = reg->base + reg->size - 1; + nid = memblock_get_region_node(reg); seq_printf(m, "%4d: ", i); seq_printf(m, "%pa..%pa ", ®->base, &end); - seq_printf(m, "%4d ", memblock_get_region_node(reg)); + if (nid != MAX_NUMNODES) + seq_printf(m, "%4d ", nid); + else + seq_printf(m, "%4c ", 'x'); if (reg->flags) { for (j = 0; j < count; j++) { if (reg->flags & (1U << j)) { -- cgit v1.2.3 From d0bf7d5759c1d89fb013aa41cca5832e00b9632a Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 17 Jan 2023 14:40:00 +0100 Subject: mm/slab: introduce kmem_cache flag SLAB_NO_MERGE Allow API users of kmem_cache_create to specify that they don't want any slab merge or aliasing (with similar sized objects). Use this in kfence_test. The SKB (sk_buff) kmem_cache slab is critical for network performance. Network stack uses kmem_cache_{alloc,free}_bulk APIs to gain performance by amortising the alloc/free cost. For the bulk API to perform efficiently the slub fragmentation need to be low. Especially for the SLUB allocator, the efficiency of bulk free API depend on objects belonging to the same slab (page). When running different network performance microbenchmarks, I started to notice that performance was reduced (slightly) when machines had longer uptimes. I believe the cause was 'skbuff_head_cache' got aliased/merged into the general slub for 256 bytes sized objects (with my kernel config, without CONFIG_HARDENED_USERCOPY). For SKB kmem_cache network stack have reasons for not merging, but it varies depending on kernel config (e.g. CONFIG_HARDENED_USERCOPY). We want to explicitly set SLAB_NO_MERGE for this kmem_cache. Another use case for the flag has been described by David Sterba [1]: > This can be used for more fine grained control over the caches or for > debugging builds where separate slabs can verify that no objects leak. > The slab_nomerge boot option is too coarse and would need to be > enabled on all testing hosts. There are some other ways how to disable > merging, e.g. a slab constructor but this disables poisoning besides > that it adds additional overhead. Other flags are internal and may > have other semantics. > A concrete example what motivates the flag. During 'btrfs balance' > slab top reported huge increase in caches like > 1330095 1330095 100% 0.10K 34105 39 136420K Acpi-ParseExt > 1734684 1734684 100% 0.14K 61953 28 247812K pid_namespace > 8244036 6873075 83% 0.11K 229001 36 916004K khugepaged_mm_slot > which was confusing and that it's because of slab merging was not the > first idea. After rebooting with slab_nomerge all the caches were > from btrfs_ namespace as expected. [1] https://lore.kernel.org/all/20230524101748.30714-1-dsterba@suse.com/ [ vbabka@suse.cz: rename to SLAB_NO_MERGE, change the flag value to the one proposed by David so it does not collide with internal SLAB/SLUB flags, write a comment for the flag, expand changelog, drop the skbuff part to be handled spearately ] Link: https://lore.kernel.org/all/167396280045.539803.7540459812377220500.stgit@firesoul/ Reported-by: David Sterba Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Vlastimil Babka Acked-by: Jesper Dangaard Brouer Acked-by: Roman Gushchin --- include/linux/slab.h | 12 ++++++++++++ mm/kfence/kfence_test.c | 7 +++---- mm/slab.h | 5 +++-- mm/slab_common.c | 2 +- 4 files changed, 19 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/include/linux/slab.h b/include/linux/slab.h index 6b3e155b70bf..72bc906d8bc7 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -106,6 +106,18 @@ /* Avoid kmemleak tracing */ #define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U) +/* + * Prevent merging with compatible kmem caches. This flag should be used + * cautiously. Valid use cases: + * + * - caches created for self-tests (e.g. kunit) + * - general caches created and used by a subsystem, only when a + * (subsystem-specific) debug option is enabled + * - performance critical caches, should be very rare and consulted with slab + * maintainers, and not used together with CONFIG_SLUB_TINY + */ +#define SLAB_NO_MERGE ((slab_flags_t __force)0x01000000U) + /* Fault injection mark */ #ifdef CONFIG_FAILSLAB # define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U) diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c index 6aee19a79236..9e008a336d9f 100644 --- a/mm/kfence/kfence_test.c +++ b/mm/kfence/kfence_test.c @@ -191,11 +191,10 @@ static size_t setup_test_cache(struct kunit *test, size_t size, slab_flags_t fla kunit_info(test, "%s: size=%zu, ctor=%ps\n", __func__, size, ctor); /* - * Use SLAB_NOLEAKTRACE to prevent merging with existing caches. Any - * other flag in SLAB_NEVER_MERGE also works. Use SLAB_ACCOUNT to - * allocate via memcg, if enabled. + * Use SLAB_NO_MERGE to prevent merging with existing caches. + * Use SLAB_ACCOUNT to allocate via memcg, if enabled. */ - flags |= SLAB_NOLEAKTRACE | SLAB_ACCOUNT; + flags |= SLAB_NO_MERGE | SLAB_ACCOUNT; test_cache = kmem_cache_create("test", size, 1, flags, ctor); KUNIT_ASSERT_TRUE_MSG(test, test_cache, "could not create cache"); diff --git a/mm/slab.h b/mm/slab.h index f01ac256a8f5..9005ddc51cf8 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -294,11 +294,11 @@ static inline bool is_kmalloc_cache(struct kmem_cache *s) #if defined(CONFIG_SLAB) #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ - SLAB_ACCOUNT) + SLAB_ACCOUNT | SLAB_NO_MERGE) #elif defined(CONFIG_SLUB) #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ SLAB_TEMPORARY | SLAB_ACCOUNT | \ - SLAB_NO_USER_FLAGS | SLAB_KMALLOC) + SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE) #else #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE) #endif @@ -319,6 +319,7 @@ static inline bool is_kmalloc_cache(struct kmem_cache *s) SLAB_TEMPORARY | \ SLAB_ACCOUNT | \ SLAB_KMALLOC | \ + SLAB_NO_MERGE | \ SLAB_NO_USER_FLAGS) bool __kmem_cache_empty(struct kmem_cache *); diff --git a/mm/slab_common.c b/mm/slab_common.c index 607249785c07..0e0a617eae7d 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -47,7 +47,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work, */ #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \ - SLAB_FAILSLAB | kasan_never_merge()) + SLAB_FAILSLAB | SLAB_NO_MERGE | kasan_never_merge()) #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ SLAB_CACHE_DMA32 | SLAB_ACCOUNT) -- cgit v1.2.3 From 6801be4f2653e5fdddca73b527cf0728284ba8a3 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 31 May 2023 15:08:43 +0200 Subject: slub: Replace cmpxchg_double() Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Arnd Bergmann Reviewed-by: Mark Rutland Acked-by: Vlastimil Babka Acked-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Tested-by: Mark Rutland Link: https://lore.kernel.org/r/20230531132323.924677086@infradead.org --- include/linux/slub_def.h | 12 +++- mm/slab.h | 53 +++++++++++++++--- mm/slub.c | 139 ++++++++++++++++++++++++++++------------------- 3 files changed, 137 insertions(+), 67 deletions(-) (limited to 'mm') diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index f6df03f934e5..deb90cf4bffb 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -39,7 +39,8 @@ enum stat_item { CPU_PARTIAL_FREE, /* Refill cpu partial on free */ CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */ CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ - NR_SLUB_STAT_ITEMS }; + NR_SLUB_STAT_ITEMS +}; #ifndef CONFIG_SLUB_TINY /* @@ -47,8 +48,13 @@ enum stat_item { * with this_cpu_cmpxchg_double() alignment requirements. */ struct kmem_cache_cpu { - void **freelist; /* Pointer to next available object */ - unsigned long tid; /* Globally unique transaction id */ + union { + struct { + void **freelist; /* Pointer to next available object */ + unsigned long tid; /* Globally unique transaction id */ + }; + freelist_aba_t freelist_tid; + }; struct slab *slab; /* The slab from which we are allocating */ #ifdef CONFIG_SLUB_CPU_PARTIAL struct slab *partial; /* Partially allocated frozen slabs */ diff --git a/mm/slab.h b/mm/slab.h index f01ac256a8f5..bc36edd5ba4f 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -6,6 +6,38 @@ */ void __init kmem_cache_init(void); +#ifdef CONFIG_64BIT +# ifdef system_has_cmpxchg128 +# define system_has_freelist_aba() system_has_cmpxchg128() +# define try_cmpxchg_freelist try_cmpxchg128 +# endif +#define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg128 +typedef u128 freelist_full_t; +#else /* CONFIG_64BIT */ +# ifdef system_has_cmpxchg64 +# define system_has_freelist_aba() system_has_cmpxchg64() +# define try_cmpxchg_freelist try_cmpxchg64 +# endif +#define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg64 +typedef u64 freelist_full_t; +#endif /* CONFIG_64BIT */ + +#if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) +#undef system_has_freelist_aba +#endif + +/* + * Freelist pointer and counter to cmpxchg together, avoids the typical ABA + * problems with cmpxchg of just a pointer. + */ +typedef union { + struct { + void *freelist; + unsigned long counter; + }; + freelist_full_t full; +} freelist_aba_t; + /* Reuses the bits in struct page */ struct slab { unsigned long __page_flags; @@ -38,14 +70,21 @@ struct slab { #endif }; /* Double-word boundary */ - void *freelist; /* first free object */ union { - unsigned long counters; struct { - unsigned inuse:16; - unsigned objects:15; - unsigned frozen:1; + void *freelist; /* first free object */ + union { + unsigned long counters; + struct { + unsigned inuse:16; + unsigned objects:15; + unsigned frozen:1; + }; + }; }; +#ifdef system_has_freelist_aba + freelist_aba_t freelist_counter; +#endif }; }; struct rcu_head rcu_head; @@ -72,8 +111,8 @@ SLAB_MATCH(memcg_data, memcg_data); #endif #undef SLAB_MATCH static_assert(sizeof(struct slab) <= sizeof(struct page)); -#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && defined(CONFIG_SLUB) -static_assert(IS_ALIGNED(offsetof(struct slab, freelist), 2*sizeof(void *))); +#if defined(system_has_freelist_aba) && defined(CONFIG_SLUB) +static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t))); #endif /** diff --git a/mm/slub.c b/mm/slub.c index c87628cd8a9a..7529626bbec2 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -292,7 +292,12 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) /* Poison object */ #define __OBJECT_POISON ((slab_flags_t __force)0x80000000U) /* Use cmpxchg_double */ + +#ifdef system_has_freelist_aba #define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U) +#else +#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0U) +#endif /* * Tracking user of a slab. @@ -512,6 +517,40 @@ static __always_inline void slab_unlock(struct slab *slab) __bit_spin_unlock(PG_locked, &page->flags); } +static inline bool +__update_freelist_fast(struct slab *slab, + void *freelist_old, unsigned long counters_old, + void *freelist_new, unsigned long counters_new) +{ +#ifdef system_has_freelist_aba + freelist_aba_t old = { .freelist = freelist_old, .counter = counters_old }; + freelist_aba_t new = { .freelist = freelist_new, .counter = counters_new }; + + return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full); +#else + return false; +#endif +} + +static inline bool +__update_freelist_slow(struct slab *slab, + void *freelist_old, unsigned long counters_old, + void *freelist_new, unsigned long counters_new) +{ + bool ret = false; + + slab_lock(slab); + if (slab->freelist == freelist_old && + slab->counters == counters_old) { + slab->freelist = freelist_new; + slab->counters = counters_new; + ret = true; + } + slab_unlock(slab); + + return ret; +} + /* * Interrupts must be disabled (for the fallback code to work right), typically * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is @@ -519,33 +558,25 @@ static __always_inline void slab_unlock(struct slab *slab) * allocation/ free operation in hardirq context. Therefore nothing can * interrupt the operation. */ -static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab, +static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab, void *freelist_old, unsigned long counters_old, void *freelist_new, unsigned long counters_new, const char *n) { + bool ret; + if (USE_LOCKLESS_FAST_PATH()) lockdep_assert_irqs_disabled(); -#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ - defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) + if (s->flags & __CMPXCHG_DOUBLE) { - if (cmpxchg_double(&slab->freelist, &slab->counters, - freelist_old, counters_old, - freelist_new, counters_new)) - return true; - } else -#endif - { - slab_lock(slab); - if (slab->freelist == freelist_old && - slab->counters == counters_old) { - slab->freelist = freelist_new; - slab->counters = counters_new; - slab_unlock(slab); - return true; - } - slab_unlock(slab); + ret = __update_freelist_fast(slab, freelist_old, counters_old, + freelist_new, counters_new); + } else { + ret = __update_freelist_slow(slab, freelist_old, counters_old, + freelist_new, counters_new); } + if (likely(ret)) + return true; cpu_relax(); stat(s, CMPXCHG_DOUBLE_FAIL); @@ -557,36 +588,26 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab return false; } -static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab, +static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab, void *freelist_old, unsigned long counters_old, void *freelist_new, unsigned long counters_new, const char *n) { -#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ - defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) + bool ret; + if (s->flags & __CMPXCHG_DOUBLE) { - if (cmpxchg_double(&slab->freelist, &slab->counters, - freelist_old, counters_old, - freelist_new, counters_new)) - return true; - } else -#endif - { + ret = __update_freelist_fast(slab, freelist_old, counters_old, + freelist_new, counters_new); + } else { unsigned long flags; local_irq_save(flags); - slab_lock(slab); - if (slab->freelist == freelist_old && - slab->counters == counters_old) { - slab->freelist = freelist_new; - slab->counters = counters_new; - slab_unlock(slab); - local_irq_restore(flags); - return true; - } - slab_unlock(slab); + ret = __update_freelist_slow(slab, freelist_old, counters_old, + freelist_new, counters_new); local_irq_restore(flags); } + if (likely(ret)) + return true; cpu_relax(); stat(s, CMPXCHG_DOUBLE_FAIL); @@ -2228,7 +2249,7 @@ static inline void *acquire_slab(struct kmem_cache *s, VM_BUG_ON(new.frozen); new.frozen = 1; - if (!__cmpxchg_double_slab(s, slab, + if (!__slab_update_freelist(s, slab, freelist, counters, new.freelist, new.counters, "acquire_slab")) @@ -2554,7 +2575,7 @@ redo: } - if (!cmpxchg_double_slab(s, slab, + if (!slab_update_freelist(s, slab, old.freelist, old.counters, new.freelist, new.counters, "unfreezing slab")) { @@ -2611,7 +2632,7 @@ static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab) new.frozen = 0; - } while (!__cmpxchg_double_slab(s, slab, + } while (!__slab_update_freelist(s, slab, old.freelist, old.counters, new.freelist, new.counters, "unfreezing slab")); @@ -3008,6 +3029,18 @@ static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags) } #ifndef CONFIG_SLUB_TINY +static inline bool +__update_cpu_freelist_fast(struct kmem_cache *s, + void *freelist_old, void *freelist_new, + unsigned long tid) +{ + freelist_aba_t old = { .freelist = freelist_old, .counter = tid }; + freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) }; + + return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full, + &old.full, new.full); +} + /* * Check the slab->freelist and either transfer the freelist to the * per cpu freelist or deactivate the slab. @@ -3034,7 +3067,7 @@ static inline void *get_freelist(struct kmem_cache *s, struct slab *slab) new.inuse = slab->objects; new.frozen = freelist != NULL; - } while (!__cmpxchg_double_slab(s, slab, + } while (!__slab_update_freelist(s, slab, freelist, counters, NULL, new.counters, "get_freelist")); @@ -3359,11 +3392,7 @@ redo: * against code executing on this cpu *not* from access by * other cpus. */ - if (unlikely(!this_cpu_cmpxchg_double( - s->cpu_slab->freelist, s->cpu_slab->tid, - object, tid, - next_object, next_tid(tid)))) { - + if (unlikely(!__update_cpu_freelist_fast(s, object, next_object, tid))) { note_cmpxchg_failure("slab_alloc", s, tid); goto redo; } @@ -3631,7 +3660,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab, } } - } while (!cmpxchg_double_slab(s, slab, + } while (!slab_update_freelist(s, slab, prior, counters, head, new.counters, "__slab_free")); @@ -3736,11 +3765,7 @@ redo: set_freepointer(s, tail_obj, freelist); - if (unlikely(!this_cpu_cmpxchg_double( - s->cpu_slab->freelist, s->cpu_slab->tid, - freelist, tid, - head, next_tid(tid)))) { - + if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) { note_cmpxchg_failure("slab_free", s, tid); goto redo; } @@ -4505,11 +4530,11 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) } } -#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ - defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) - if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0) +#ifdef system_has_freelist_aba + if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) { /* Enable fast mode */ s->flags |= __CMPXCHG_DOUBLE; + } #endif /* -- cgit v1.2.3 From 0718afd47f70cf46877c39c25d06b786e1a3f36c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Jun 2023 11:44:52 +0200 Subject: block: introduce holder ops Add a new blk_holder_ops structure, which is passed to blkdev_get_by_* and installed in the block_device for exclusive claims. It will be used to allow the block layer to call back into the user of the block device for thing like notification of a removed device or a device resize. Signed-off-by: Christoph Hellwig Reviewed-by: Jan Kara Acked-by: Dave Chinner Reviewed-by: Dave Chinner Link: https://lore.kernel.org/r/20230601094459.1350643-10-hch@lst.de Signed-off-by: Jens Axboe --- block/bdev.c | 41 ++++++++++++++++++++++++++----------- block/fops.c | 2 +- block/genhd.c | 6 ++++-- block/ioctl.c | 3 ++- drivers/block/drbd/drbd_nl.c | 3 ++- drivers/block/loop.c | 2 +- drivers/block/pktcdvd.c | 5 +++-- drivers/block/rnbd/rnbd-srv.c | 2 +- drivers/block/xen-blkback/xenbus.c | 2 +- drivers/block/zram/zram_drv.c | 2 +- drivers/md/bcache/super.c | 2 +- drivers/md/dm.c | 2 +- drivers/md/md.c | 2 +- drivers/mtd/devices/block2mtd.c | 4 ++-- drivers/nvme/target/io-cmd-bdev.c | 2 +- drivers/s390/block/dasd_genhd.c | 2 +- drivers/target/target_core_iblock.c | 2 +- drivers/target/target_core_pscsi.c | 3 ++- fs/btrfs/dev-replace.c | 2 +- fs/btrfs/volumes.c | 6 +++--- fs/erofs/super.c | 2 +- fs/ext4/super.c | 3 ++- fs/f2fs/super.c | 4 ++-- fs/jfs/jfs_logmgr.c | 2 +- fs/nfs/blocklayout/dev.c | 5 +++-- fs/nilfs2/super.c | 2 +- fs/ocfs2/cluster/heartbeat.c | 2 +- fs/reiserfs/journal.c | 5 +++-- fs/super.c | 4 ++-- fs/xfs/xfs_super.c | 2 +- include/linux/blk_types.h | 2 ++ include/linux/blkdev.h | 11 +++++++--- kernel/power/swap.c | 4 ++-- mm/swapfile.c | 3 ++- 34 files changed, 90 insertions(+), 56 deletions(-) (limited to 'mm') diff --git a/block/bdev.c b/block/bdev.c index f5ffcac762e0..5c46ff107706 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -102,7 +102,7 @@ int truncate_bdev_range(struct block_device *bdev, fmode_t mode, * under live filesystem. */ if (!(mode & FMODE_EXCL)) { - int err = bd_prepare_to_claim(bdev, truncate_bdev_range); + int err = bd_prepare_to_claim(bdev, truncate_bdev_range, NULL); if (err) goto invalidate; } @@ -415,6 +415,7 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno) bdev = I_BDEV(inode); mutex_init(&bdev->bd_fsfreeze_mutex); spin_lock_init(&bdev->bd_size_lock); + mutex_init(&bdev->bd_holder_lock); bdev->bd_partno = partno; bdev->bd_inode = inode; bdev->bd_queue = disk->queue; @@ -464,13 +465,15 @@ long nr_blockdev_pages(void) * bd_may_claim - test whether a block device can be claimed * @bdev: block device of interest * @holder: holder trying to claim @bdev + * @hops: holder ops * * Test whether @bdev can be claimed by @holder. * * RETURNS: * %true if @bdev can be claimed, %false otherwise. */ -static bool bd_may_claim(struct block_device *bdev, void *holder) +static bool bd_may_claim(struct block_device *bdev, void *holder, + const struct blk_holder_ops *hops) { struct block_device *whole = bdev_whole(bdev); @@ -480,8 +483,11 @@ static bool bd_may_claim(struct block_device *bdev, void *holder) /* * The same holder can always re-claim. */ - if (bdev->bd_holder == holder) + if (bdev->bd_holder == holder) { + if (WARN_ON_ONCE(bdev->bd_holder_ops != hops)) + return false; return true; + } return false; } @@ -499,6 +505,7 @@ static bool bd_may_claim(struct block_device *bdev, void *holder) * bd_prepare_to_claim - claim a block device * @bdev: block device of interest * @holder: holder trying to claim @bdev + * @hops: holder ops. * * Claim @bdev. This function fails if @bdev is already claimed by another * holder and waits if another claiming is in progress. return, the caller @@ -507,7 +514,8 @@ static bool bd_may_claim(struct block_device *bdev, void *holder) * RETURNS: * 0 if @bdev can be claimed, -EBUSY otherwise. */ -int bd_prepare_to_claim(struct block_device *bdev, void *holder) +int bd_prepare_to_claim(struct block_device *bdev, void *holder, + const struct blk_holder_ops *hops) { struct block_device *whole = bdev_whole(bdev); @@ -516,7 +524,7 @@ int bd_prepare_to_claim(struct block_device *bdev, void *holder) retry: mutex_lock(&bdev_lock); /* if someone else claimed, fail */ - if (!bd_may_claim(bdev, holder)) { + if (!bd_may_claim(bdev, holder, hops)) { mutex_unlock(&bdev_lock); return -EBUSY; } @@ -557,12 +565,13 @@ static void bd_clear_claiming(struct block_device *whole, void *holder) * Finish exclusive open of a block device. Mark the device as exlusively * open by the holder and wake up all waiters for exclusive open to finish. */ -static void bd_finish_claiming(struct block_device *bdev, void *holder) +static void bd_finish_claiming(struct block_device *bdev, void *holder, + const struct blk_holder_ops *hops) { struct block_device *whole = bdev_whole(bdev); mutex_lock(&bdev_lock); - BUG_ON(!bd_may_claim(bdev, holder)); + BUG_ON(!bd_may_claim(bdev, holder, hops)); /* * Note that for a whole device bd_holders will be incremented twice, * and bd_holder will be set to bd_may_claim before being set to holder @@ -570,7 +579,10 @@ static void bd_finish_claiming(struct block_device *bdev, void *holder) whole->bd_holders++; whole->bd_holder = bd_may_claim; bdev->bd_holders++; + mutex_lock(&bdev->bd_holder_lock); bdev->bd_holder = holder; + bdev->bd_holder_ops = hops; + mutex_unlock(&bdev->bd_holder_lock); bd_clear_claiming(whole, holder); mutex_unlock(&bdev_lock); } @@ -605,7 +617,10 @@ static void bd_end_claim(struct block_device *bdev) WARN_ON_ONCE(--bdev->bd_holders < 0); WARN_ON_ONCE(--whole->bd_holders < 0); if (!bdev->bd_holders) { + mutex_lock(&bdev->bd_holder_lock); bdev->bd_holder = NULL; + bdev->bd_holder_ops = NULL; + mutex_unlock(&bdev->bd_holder_lock); if (bdev->bd_write_holder) unblock = true; } @@ -735,6 +750,7 @@ void blkdev_put_no_open(struct block_device *bdev) * @dev: device number of block device to open * @mode: FMODE_* mask * @holder: exclusive holder identifier + * @hops: holder operations * * Open the block device described by device number @dev. If @mode includes * %FMODE_EXCL, the block device is opened with exclusive access. Specifying @@ -751,7 +767,8 @@ void blkdev_put_no_open(struct block_device *bdev) * RETURNS: * Reference to the block_device on success, ERR_PTR(-errno) on failure. */ -struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder) +struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder, + const struct blk_holder_ops *hops) { bool unblock_events = true; struct block_device *bdev; @@ -771,7 +788,7 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder) disk = bdev->bd_disk; if (mode & FMODE_EXCL) { - ret = bd_prepare_to_claim(bdev, holder); + ret = bd_prepare_to_claim(bdev, holder, hops); if (ret) goto put_blkdev; } @@ -791,7 +808,7 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder) if (ret) goto put_module; if (mode & FMODE_EXCL) { - bd_finish_claiming(bdev, holder); + bd_finish_claiming(bdev, holder, hops); /* * Block event polling for write claims if requested. Any write @@ -842,7 +859,7 @@ EXPORT_SYMBOL(blkdev_get_by_dev); * Reference to the block_device on success, ERR_PTR(-errno) on failure. */ struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, - void *holder) + void *holder, const struct blk_holder_ops *hops) { struct block_device *bdev; dev_t dev; @@ -852,7 +869,7 @@ struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, if (error) return ERR_PTR(error); - bdev = blkdev_get_by_dev(dev, mode, holder); + bdev = blkdev_get_by_dev(dev, mode, holder, hops); if (!IS_ERR(bdev) && (mode & FMODE_WRITE) && bdev_read_only(bdev)) { blkdev_put(bdev, mode); return ERR_PTR(-EACCES); diff --git a/block/fops.c b/block/fops.c index b12c4b2a3a69..6a3087b750a6 100644 --- a/block/fops.c +++ b/block/fops.c @@ -490,7 +490,7 @@ static int blkdev_open(struct inode *inode, struct file *filp) if ((filp->f_flags & O_ACCMODE) == 3) filp->f_mode |= FMODE_WRITE_IOCTL; - bdev = blkdev_get_by_dev(inode->i_rdev, filp->f_mode, filp); + bdev = blkdev_get_by_dev(inode->i_rdev, filp->f_mode, filp, NULL); if (IS_ERR(bdev)) return PTR_ERR(bdev); diff --git a/block/genhd.c b/block/genhd.c index a668d2f02087..b3bd58e9fbea 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -370,13 +370,15 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode) * scanners. */ if (!(mode & FMODE_EXCL)) { - ret = bd_prepare_to_claim(disk->part0, disk_scan_partitions); + ret = bd_prepare_to_claim(disk->part0, disk_scan_partitions, + NULL); if (ret) return ret; } set_bit(GD_NEED_PART_SCAN, &disk->state); - bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~FMODE_EXCL, NULL); + bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~FMODE_EXCL, NULL, + NULL); if (IS_ERR(bdev)) ret = PTR_ERR(bdev); else diff --git a/block/ioctl.c b/block/ioctl.c index 9c5f637ff153..c7d7d4345edb 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -454,7 +454,8 @@ static int blkdev_bszset(struct block_device *bdev, fmode_t mode, if (mode & FMODE_EXCL) return set_blocksize(bdev, n); - if (IS_ERR(blkdev_get_by_dev(bdev->bd_dev, mode | FMODE_EXCL, &bdev))) + if (IS_ERR(blkdev_get_by_dev(bdev->bd_dev, mode | FMODE_EXCL, &bdev, + NULL))) return -EBUSY; ret = set_blocksize(bdev, n); blkdev_put(bdev, mode | FMODE_EXCL); diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 1a5d3d72d91d..cab59dab3410 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1641,7 +1641,8 @@ static struct block_device *open_backing_dev(struct drbd_device *device, int err = 0; bdev = blkdev_get_by_path(bdev_path, - FMODE_READ | FMODE_WRITE | FMODE_EXCL, claim_ptr); + FMODE_READ | FMODE_WRITE | FMODE_EXCL, + claim_ptr, NULL); if (IS_ERR(bdev)) { drbd_err(device, "open(\"%s\") failed with %ld\n", bdev_path, PTR_ERR(bdev)); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index bc31bb7072a2..a73c857f5bfe 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1015,7 +1015,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, * here to avoid changing device under exclusive owner. */ if (!(mode & FMODE_EXCL)) { - error = bd_prepare_to_claim(bdev, loop_configure); + error = bd_prepare_to_claim(bdev, loop_configure, NULL); if (error) goto out_putf; } diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index d5d7884cedd4..377f8b345352 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2125,7 +2125,8 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) * to read/write from/to it. It is already opened in O_NONBLOCK mode * so open should not fail. */ - bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd); + bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd, + NULL); if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); goto out; @@ -2530,7 +2531,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) } } - bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL); + bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL, NULL); if (IS_ERR(bdev)) return PTR_ERR(bdev); sdev = scsi_device_from_queue(bdev->bd_disk->queue); diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c index 2cfed2e58d64..cec22bbae2f9 100644 --- a/drivers/block/rnbd/rnbd-srv.c +++ b/drivers/block/rnbd/rnbd-srv.c @@ -719,7 +719,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess, goto reject; } - bdev = blkdev_get_by_path(full_path, open_flags, THIS_MODULE); + bdev = blkdev_get_by_path(full_path, open_flags, THIS_MODULE, NULL); if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %d\n", diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 4807af1d5805..43b36da9b354 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -492,7 +492,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, vbd->pdevice = MKDEV(major, minor); bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ? - FMODE_READ : FMODE_WRITE, NULL); + FMODE_READ : FMODE_WRITE, NULL, NULL); if (IS_ERR(bdev)) { pr_warn("xen_vbd_create: device %08x could not be opened\n", diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index b86691d2133e..0bc779446c6f 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -508,7 +508,7 @@ static ssize_t backing_dev_store(struct device *dev, } bdev = blkdev_get_by_dev(inode->i_rdev, - FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram); + FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram, NULL); if (IS_ERR(bdev)) { err = PTR_ERR(bdev); bdev = NULL; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 7e9d19fd21dd..d84c09a73af8 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -2560,7 +2560,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, err = "failed to open device"; bdev = blkdev_get_by_path(strim(path), FMODE_READ|FMODE_WRITE|FMODE_EXCL, - sb); + sb, NULL); if (IS_ERR(bdev)) { if (bdev == ERR_PTR(-EBUSY)) { dev_t dev; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 3b694ba3a106..d759f8bdb3df 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -746,7 +746,7 @@ static struct table_device *open_table_device(struct mapped_device *md, return ERR_PTR(-ENOMEM); refcount_set(&td->count, 1); - bdev = blkdev_get_by_dev(dev, mode | FMODE_EXCL, _dm_claim_ptr); + bdev = blkdev_get_by_dev(dev, mode | FMODE_EXCL, _dm_claim_ptr, NULL); if (IS_ERR(bdev)) { r = PTR_ERR(bdev); goto out_free_td; diff --git a/drivers/md/md.c b/drivers/md/md.c index 6a559a7e89c0..fabf9c543735 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3642,7 +3642,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe rdev->bdev = blkdev_get_by_dev(newdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, - super_format == -2 ? &claim_rdev : rdev); + super_format == -2 ? &claim_rdev : rdev, NULL); if (IS_ERR(rdev->bdev)) { pr_warn("md: could not open device unknown-block(%u,%u).\n", MAJOR(newdev), MINOR(newdev)); diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c index 4cd37ec45762..7ac82c6fe350 100644 --- a/drivers/mtd/devices/block2mtd.c +++ b/drivers/mtd/devices/block2mtd.c @@ -235,7 +235,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size, return NULL; /* Get a handle on the device */ - bdev = blkdev_get_by_path(devname, mode, dev); + bdev = blkdev_get_by_path(devname, mode, dev, NULL); #ifndef MODULE /* @@ -257,7 +257,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size, devt = name_to_dev_t(devname); if (!devt) continue; - bdev = blkdev_get_by_dev(devt, mode, dev); + bdev = blkdev_get_by_dev(devt, mode, dev, NULL); } #endif diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index c2d6cea0236b..9b6d6d85c725 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -85,7 +85,7 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns) return -ENOTBLK; ns->bdev = blkdev_get_by_path(ns->device_path, - FMODE_READ | FMODE_WRITE, NULL); + FMODE_READ | FMODE_WRITE, NULL, NULL); if (IS_ERR(ns->bdev)) { ret = PTR_ERR(ns->bdev); if (ret != -ENOTBLK) { diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index 998a961e1704..f21198bc483e 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -130,7 +130,7 @@ int dasd_scan_partitions(struct dasd_block *block) struct block_device *bdev; int rc; - bdev = blkdev_get_by_dev(disk_devt(block->gdp), FMODE_READ, NULL); + bdev = blkdev_get_by_dev(disk_devt(block->gdp), FMODE_READ, NULL, NULL); if (IS_ERR(bdev)) { DBF_DEV_EVENT(DBF_ERR, block->base, "scan partitions error, blkdev_get returned %ld", diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index cc838ffd1294..a5cbbefa78ee 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -114,7 +114,7 @@ static int iblock_configure_device(struct se_device *dev) else dev->dev_flags |= DF_READ_ONLY; - bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); + bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev, NULL); if (IS_ERR(bd)) { ret = PTR_ERR(bd); goto out_free_bioset; diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index e7425549e39c..e3494e036c6c 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -367,7 +367,8 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) * for TYPE_DISK and TYPE_ZBC using supplied udev_path */ bd = blkdev_get_by_path(dev->udev_path, - FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); + FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv, + NULL); if (IS_ERR(bd)) { pr_err("pSCSI: blkdev_get_by_path() failed\n"); scsi_device_put(sd); diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 78696d331639..4de4984fa99b 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -258,7 +258,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, } bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, - fs_info->bdev_holder); + fs_info->bdev_holder, NULL); if (IS_ERR(bdev)) { btrfs_err(fs_info, "target device %s is invalid!", device_path); return PTR_ERR(bdev); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 841e799dece5..784ccc8f6c69 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -496,7 +496,7 @@ btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, { int ret; - *bdev = blkdev_get_by_path(device_path, flags, holder); + *bdev = blkdev_get_by_path(device_path, flags, holder, NULL); if (IS_ERR(*bdev)) { ret = PTR_ERR(*bdev); @@ -1377,7 +1377,7 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, * values temporarily, as the device paths of the fsid are the only * required information for assembling the volume. */ - bdev = blkdev_get_by_path(path, flags, holder); + bdev = blkdev_get_by_path(path, flags, holder, NULL); if (IS_ERR(bdev)) return ERR_CAST(bdev); @@ -2629,7 +2629,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path return -EROFS; bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, - fs_info->bdev_holder); + fs_info->bdev_holder, NULL); if (IS_ERR(bdev)) return PTR_ERR(bdev); diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 811ab66d805e..6c263e9cd38b 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -254,7 +254,7 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb, dif->fscache = fscache; } else if (!sbi->devs->flatdev) { bdev = blkdev_get_by_path(dif->path, FMODE_READ | FMODE_EXCL, - sb->s_type); + sb->s_type, NULL); if (IS_ERR(bdev)) return PTR_ERR(bdev); dif->bdev = bdev; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 9680fe753e59..865625089ecc 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1103,7 +1103,8 @@ static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb) { struct block_device *bdev; - bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); + bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb, + NULL); if (IS_ERR(bdev)) goto fail; return bdev; diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 9f15b03037db..7c34ab082f13 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -4025,7 +4025,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi) /* Single zoned block device mount */ FDEV(0).bdev = blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev, - sbi->sb->s_mode, sbi->sb->s_type); + sbi->sb->s_mode, sbi->sb->s_type, NULL); } else { /* Multi-device mount */ memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN); @@ -4044,7 +4044,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi) sbi->log_blocks_per_seg) - 1; } FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path, - sbi->sb->s_mode, sbi->sb->s_type); + sbi->sb->s_mode, sbi->sb->s_type, NULL); } if (IS_ERR(FDEV(i).bdev)) return PTR_ERR(FDEV(i).bdev); diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index 15c645827dec..46d393c8088a 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c @@ -1101,7 +1101,7 @@ int lmLogOpen(struct super_block *sb) */ bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, - log); + log, NULL); if (IS_ERR(bdev)) { rc = PTR_ERR(bdev); goto free; diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c index fea5f8821da5..38b066ca699e 100644 --- a/fs/nfs/blocklayout/dev.c +++ b/fs/nfs/blocklayout/dev.c @@ -243,7 +243,7 @@ bl_parse_simple(struct nfs_server *server, struct pnfs_block_dev *d, if (!dev) return -EIO; - bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_WRITE, NULL); + bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_WRITE, NULL, NULL); if (IS_ERR(bdev)) { printk(KERN_WARNING "pNFS: failed to open device %d:%d (%ld)\n", MAJOR(dev), MINOR(dev), PTR_ERR(bdev)); @@ -312,7 +312,8 @@ bl_open_path(struct pnfs_block_volume *v, const char *prefix) if (!devname) return ERR_PTR(-ENOMEM); - bdev = blkdev_get_by_path(devname, FMODE_READ | FMODE_WRITE, NULL); + bdev = blkdev_get_by_path(devname, FMODE_READ | FMODE_WRITE, NULL, + NULL); if (IS_ERR(bdev)) { pr_warn("pNFS: failed to open device %s (%ld)\n", devname, PTR_ERR(bdev)); diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 77f1e5778d1c..91bfbd973d1d 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -1285,7 +1285,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags, if (!(flags & SB_RDONLY)) mode |= FMODE_WRITE; - sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type); + sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type, NULL); if (IS_ERR(sd.bdev)) return ERR_CAST(sd.bdev); diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 60b97c92e2b2..6b13b8c3f2b8 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -1786,7 +1786,7 @@ static ssize_t o2hb_region_dev_store(struct config_item *item, goto out2; reg->hr_bdev = blkdev_get_by_dev(f.file->f_mapping->host->i_rdev, - FMODE_WRITE | FMODE_READ, NULL); + FMODE_WRITE | FMODE_READ, NULL, NULL); if (IS_ERR(reg->hr_bdev)) { ret = PTR_ERR(reg->hr_bdev); reg->hr_bdev = NULL; diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 4d11d60f493c..5e4db9a0c8e5 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -2616,7 +2616,7 @@ static int journal_init_dev(struct super_block *super, if (jdev == super->s_dev) blkdev_mode &= ~FMODE_EXCL; journal->j_dev_bd = blkdev_get_by_dev(jdev, blkdev_mode, - journal); + journal, NULL); journal->j_dev_mode = blkdev_mode; if (IS_ERR(journal->j_dev_bd)) { result = PTR_ERR(journal->j_dev_bd); @@ -2632,7 +2632,8 @@ static int journal_init_dev(struct super_block *super, } journal->j_dev_mode = blkdev_mode; - journal->j_dev_bd = blkdev_get_by_path(jdev_name, blkdev_mode, journal); + journal->j_dev_bd = blkdev_get_by_path(jdev_name, blkdev_mode, journal, + NULL); if (IS_ERR(journal->j_dev_bd)) { result = PTR_ERR(journal->j_dev_bd); journal->j_dev_bd = NULL; diff --git a/fs/super.c b/fs/super.c index 34afe411cf2b..012ce1400803 100644 --- a/fs/super.c +++ b/fs/super.c @@ -1248,7 +1248,7 @@ int get_tree_bdev(struct fs_context *fc, if (!fc->source) return invalf(fc, "No source specified"); - bdev = blkdev_get_by_path(fc->source, mode, fc->fs_type); + bdev = blkdev_get_by_path(fc->source, mode, fc->fs_type, NULL); if (IS_ERR(bdev)) { errorf(fc, "%s: Can't open blockdev", fc->source); return PTR_ERR(bdev); @@ -1333,7 +1333,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type, if (!(flags & SB_RDONLY)) mode |= FMODE_WRITE; - bdev = blkdev_get_by_path(dev_name, mode, fs_type); + bdev = blkdev_get_by_path(dev_name, mode, fs_type, NULL); if (IS_ERR(bdev)) return ERR_CAST(bdev); diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 7e706255f165..5684c538eb76 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -386,7 +386,7 @@ xfs_blkdev_get( int error = 0; *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, - mp); + mp, NULL); if (IS_ERR(*bdevp)) { error = PTR_ERR(*bdevp); xfs_warn(mp, "Invalid device [%s], error=%d", name, error); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 8ef209e3aa96..deb69eeab6bd 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -55,6 +55,8 @@ struct block_device { struct super_block * bd_super; void * bd_claiming; void * bd_holder; + const struct blk_holder_ops *bd_holder_ops; + struct mutex bd_holder_lock; /* The counter of freeze processes */ int bd_fsfreeze_count; int bd_holders; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d89c2da14698..44f2a8bc57e8 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1470,10 +1470,15 @@ void blkdev_show(struct seq_file *seqf, off_t offset); #define BLKDEV_MAJOR_MAX 0 #endif +struct blk_holder_ops { +}; + +struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder, + const struct blk_holder_ops *hops); struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, - void *holder); -struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder); -int bd_prepare_to_claim(struct block_device *bdev, void *holder); + void *holder, const struct blk_holder_ops *hops); +int bd_prepare_to_claim(struct block_device *bdev, void *holder, + const struct blk_holder_ops *hops); void bd_abort_claiming(struct block_device *bdev, void *holder); void blkdev_put(struct block_device *bdev, fmode_t mode); diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 92e41ed292ad..801c411530d1 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -357,7 +357,7 @@ static int swsusp_swap_check(void) root_swap = res; hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, FMODE_WRITE, - NULL); + NULL, NULL); if (IS_ERR(hib_resume_bdev)) return PTR_ERR(hib_resume_bdev); @@ -1524,7 +1524,7 @@ int swsusp_check(void) mode |= FMODE_EXCL; hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, - mode, &holder); + mode, &holder, NULL); if (!IS_ERR(hib_resume_bdev)) { set_blocksize(hib_resume_bdev, PAGE_SIZE); clear_page(swsusp_header); diff --git a/mm/swapfile.c b/mm/swapfile.c index 274bbf797480..cfbcf7d5705f 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2770,7 +2770,8 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode) if (S_ISBLK(inode->i_mode)) { p->bdev = blkdev_get_by_dev(inode->i_rdev, - FMODE_READ | FMODE_WRITE | FMODE_EXCL, p); + FMODE_READ | FMODE_WRITE | FMODE_EXCL, p, + NULL); if (IS_ERR(p->bdev)) { error = PTR_ERR(p->bdev); p->bdev = NULL; -- cgit v1.2.3 From b9dad156af1fd6c66ffa40f007c09823a8319abe Mon Sep 17 00:00:00 2001 From: Zhen Lei Date: Tue, 6 Jun 2023 14:55:43 +0800 Subject: mm/slab_common: reduce an if statement in create_cache() Move the 'out:' statement block out of the successful path to avoid redundant check on 'err'. The value of 'err' is always zero on success and negative on failure. No functional changes, no performance improvements, just a little more readability. Signed-off-by: Zhen Lei Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka --- mm/slab_common.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/slab_common.c b/mm/slab_common.c index 607249785c07..f6fe35105774 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -236,14 +236,12 @@ static struct kmem_cache *create_cache(const char *name, s->refcount = 1; list_add(&s->list, &slab_caches); -out: - if (err) - return ERR_PTR(err); return s; out_free_cache: kmem_cache_free(kmem_cache, s); - goto out; +out: + return ERR_PTR(err); } /** -- cgit v1.2.3 From dcdfdd40fa82b6704d2841938e5c8ec3051eb0d6 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 6 Jun 2023 17:26:29 +0300 Subject: mm: Add support for unaccepted memory UEFI Specification version 2.9 introduces the concept of memory acceptance. Some Virtual Machine platforms, such as Intel TDX or AMD SEV-SNP, require memory to be accepted before it can be used by the guest. Accepting happens via a protocol specific to the Virtual Machine platform. There are several ways the kernel can deal with unaccepted memory: 1. Accept all the memory during boot. It is easy to implement and it doesn't have runtime cost once the system is booted. The downside is very long boot time. Accept can be parallelized to multiple CPUs to keep it manageable (i.e. via DEFERRED_STRUCT_PAGE_INIT), but it tends to saturate memory bandwidth and does not scale beyond the point. 2. Accept a block of memory on the first use. It requires more infrastructure and changes in page allocator to make it work, but it provides good boot time. On-demand memory accept means latency spikes every time kernel steps onto a new memory block. The spikes will go away once workload data set size gets stabilized or all memory gets accepted. 3. Accept all memory in background. Introduce a thread (or multiple) that gets memory accepted proactively. It will minimize time the system experience latency spikes on memory allocation while keeping low boot time. This approach cannot function on its own. It is an extension of #2: background memory acceptance requires functional scheduler, but the page allocator may need to tap into unaccepted memory before that. The downside of the approach is that these threads also steal CPU cycles and memory bandwidth from the user's workload and may hurt user experience. Implement #1 and #2 for now. #2 is the default. Some workloads may want to use #1 with accept_memory=eager in kernel command line. #3 can be implemented later based on user's demands. Support of unaccepted memory requires a few changes in core-mm code: - memblock accepts memory on allocation. It serves early boot memory allocations and doesn't limit them to pre-accepted pool of memory. - page allocator accepts memory on the first allocation of the page. When kernel runs out of accepted memory, it accepts memory until the high watermark is reached. It helps to minimize fragmentation. EFI code will provide two helpers if the platform supports unaccepted memory: - accept_memory() makes a range of physical addresses accepted. - range_contains_unaccepted_memory() checks anything within the range of physical addresses requires acceptance. Signed-off-by: Kirill A. Shutemov Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Vlastimil Babka Acked-by: Mike Rapoport # memblock Link: https://lore.kernel.org/r/20230606142637.5171-2-kirill.shutemov@linux.intel.com --- drivers/base/node.c | 7 ++ fs/proc/meminfo.c | 5 ++ include/linux/mm.h | 19 ++++++ include/linux/mmzone.h | 8 +++ mm/memblock.c | 9 +++ mm/mm_init.c | 7 ++ mm/page_alloc.c | 173 +++++++++++++++++++++++++++++++++++++++++++++++++ mm/vmstat.c | 3 + 8 files changed, 231 insertions(+) (limited to 'mm') diff --git a/drivers/base/node.c b/drivers/base/node.c index b46db17124f3..655975946ef6 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -448,6 +448,9 @@ static ssize_t node_read_meminfo(struct device *dev, "Node %d ShmemPmdMapped: %8lu kB\n" "Node %d FileHugePages: %8lu kB\n" "Node %d FilePmdMapped: %8lu kB\n" +#endif +#ifdef CONFIG_UNACCEPTED_MEMORY + "Node %d Unaccepted: %8lu kB\n" #endif , nid, K(node_page_state(pgdat, NR_FILE_DIRTY)), @@ -477,6 +480,10 @@ static ssize_t node_read_meminfo(struct device *dev, nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)), nid, K(node_page_state(pgdat, NR_FILE_THPS)), nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED)) +#endif +#ifdef CONFIG_UNACCEPTED_MEMORY + , + nid, K(sum_zone_node_page_state(nid, NR_UNACCEPTED)) #endif ); len += hugetlb_report_node_meminfo(buf, len, nid); diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index b43d0bd42762..8dca4d6d96c7 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -168,6 +168,11 @@ static int meminfo_proc_show(struct seq_file *m, void *v) global_zone_page_state(NR_FREE_CMA_PAGES)); #endif +#ifdef CONFIG_UNACCEPTED_MEMORY + show_val_kb(m, "Unaccepted: ", + global_zone_page_state(NR_UNACCEPTED)); +#endif + hugetlb_report_meminfo(m); arch_report_meminfo(m); diff --git a/include/linux/mm.h b/include/linux/mm.h index 27ce77080c79..d9174d464348 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3816,4 +3816,23 @@ madvise_set_anon_name(struct mm_struct *mm, unsigned long start, } #endif +#ifdef CONFIG_UNACCEPTED_MEMORY + +bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end); +void accept_memory(phys_addr_t start, phys_addr_t end); + +#else + +static inline bool range_contains_unaccepted_memory(phys_addr_t start, + phys_addr_t end) +{ + return false; +} + +static inline void accept_memory(phys_addr_t start, phys_addr_t end) +{ +} + +#endif + #endif /* _LINUX_MM_H */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index a4889c9d4055..6c1c2fc13017 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -143,6 +143,9 @@ enum zone_stat_item { NR_ZSPAGES, /* allocated in zsmalloc */ #endif NR_FREE_CMA_PAGES, +#ifdef CONFIG_UNACCEPTED_MEMORY + NR_UNACCEPTED, +#endif NR_VM_ZONE_STAT_ITEMS }; enum node_stat_item { @@ -910,6 +913,11 @@ struct zone { /* free areas of different sizes */ struct free_area free_area[MAX_ORDER + 1]; +#ifdef CONFIG_UNACCEPTED_MEMORY + /* Pages to be accepted. All pages on the list are MAX_ORDER */ + struct list_head unaccepted_pages; +#endif + /* zone flags, see below */ unsigned long flags; diff --git a/mm/memblock.c b/mm/memblock.c index 3feafea06ab2..50b921119600 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1436,6 +1436,15 @@ done: */ kmemleak_alloc_phys(found, size, 0); + /* + * Some Virtual Machine platforms, such as Intel TDX or AMD SEV-SNP, + * require memory to be accepted before it can be used by the + * guest. + * + * Accept the memory of the allocated buffer. + */ + accept_memory(found, found + size); + return found; } diff --git a/mm/mm_init.c b/mm/mm_init.c index 7f7f9c677854..1cfc08e25f93 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1375,6 +1375,10 @@ static void __meminit zone_init_free_lists(struct zone *zone) INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); zone->free_area[order].nr_free = 0; } + +#ifdef CONFIG_UNACCEPTED_MEMORY + INIT_LIST_HEAD(&zone->unaccepted_pages); +#endif } void __meminit init_currently_empty_zone(struct zone *zone, @@ -1960,6 +1964,9 @@ static void __init deferred_free_range(unsigned long pfn, return; } + /* Accept chunks smaller than MAX_ORDER upfront */ + accept_memory(PFN_PHYS(pfn), PFN_PHYS(pfn + nr_pages)); + for (i = 0; i < nr_pages; i++, page++, pfn++) { if (pageblock_aligned(pfn)) set_pageblock_migratetype(page, MIGRATE_MOVABLE); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 47421bedc12b..d239fba3f31c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -387,6 +387,12 @@ EXPORT_SYMBOL(nr_node_ids); EXPORT_SYMBOL(nr_online_nodes); #endif +static bool page_contains_unaccepted(struct page *page, unsigned int order); +static void accept_page(struct page *page, unsigned int order); +static bool try_to_accept_memory(struct zone *zone, unsigned int order); +static inline bool has_unaccepted_memory(void); +static bool __free_unaccepted(struct page *page); + int page_group_by_mobility_disabled __read_mostly; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT @@ -1481,6 +1487,13 @@ void __free_pages_core(struct page *page, unsigned int order) atomic_long_add(nr_pages, &page_zone(page)->managed_pages); + if (page_contains_unaccepted(page, order)) { + if (order == MAX_ORDER && __free_unaccepted(page)) + return; + + accept_page(page, order); + } + /* * Bypass PCP and place fresh pages right to the tail, primarily * relevant for memory onlining. @@ -3159,6 +3172,9 @@ static inline long __zone_watermark_unusable_free(struct zone *z, if (!(alloc_flags & ALLOC_CMA)) unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); #endif +#ifdef CONFIG_UNACCEPTED_MEMORY + unusable_free += zone_page_state(z, NR_UNACCEPTED); +#endif return unusable_free; } @@ -3458,6 +3474,11 @@ retry: gfp_mask)) { int ret; + if (has_unaccepted_memory()) { + if (try_to_accept_memory(zone, order)) + goto try_this_zone; + } + #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT /* * Watermark failed for this zone, but see if we can @@ -3510,6 +3531,11 @@ try_this_zone: return page; } else { + if (has_unaccepted_memory()) { + if (try_to_accept_memory(zone, order)) + goto try_this_zone; + } + #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT /* Try again if zone has deferred pages */ if (deferred_pages_enabled()) { @@ -7215,3 +7241,150 @@ bool has_managed_dma(void) return false; } #endif /* CONFIG_ZONE_DMA */ + +#ifdef CONFIG_UNACCEPTED_MEMORY + +/* Counts number of zones with unaccepted pages. */ +static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages); + +static bool lazy_accept = true; + +static int __init accept_memory_parse(char *p) +{ + if (!strcmp(p, "lazy")) { + lazy_accept = true; + return 0; + } else if (!strcmp(p, "eager")) { + lazy_accept = false; + return 0; + } else { + return -EINVAL; + } +} +early_param("accept_memory", accept_memory_parse); + +static bool page_contains_unaccepted(struct page *page, unsigned int order) +{ + phys_addr_t start = page_to_phys(page); + phys_addr_t end = start + (PAGE_SIZE << order); + + return range_contains_unaccepted_memory(start, end); +} + +static void accept_page(struct page *page, unsigned int order) +{ + phys_addr_t start = page_to_phys(page); + + accept_memory(start, start + (PAGE_SIZE << order)); +} + +static bool try_to_accept_memory_one(struct zone *zone) +{ + unsigned long flags; + struct page *page; + bool last; + + if (list_empty(&zone->unaccepted_pages)) + return false; + + spin_lock_irqsave(&zone->lock, flags); + page = list_first_entry_or_null(&zone->unaccepted_pages, + struct page, lru); + if (!page) { + spin_unlock_irqrestore(&zone->lock, flags); + return false; + } + + list_del(&page->lru); + last = list_empty(&zone->unaccepted_pages); + + __mod_zone_freepage_state(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); + __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); + spin_unlock_irqrestore(&zone->lock, flags); + + accept_page(page, MAX_ORDER); + + __free_pages_ok(page, MAX_ORDER, FPI_TO_TAIL); + + if (last) + static_branch_dec(&zones_with_unaccepted_pages); + + return true; +} + +static bool try_to_accept_memory(struct zone *zone, unsigned int order) +{ + long to_accept; + int ret = false; + + /* How much to accept to get to high watermark? */ + to_accept = high_wmark_pages(zone) - + (zone_page_state(zone, NR_FREE_PAGES) - + __zone_watermark_unusable_free(zone, order, 0)); + + /* Accept at least one page */ + do { + if (!try_to_accept_memory_one(zone)) + break; + ret = true; + to_accept -= MAX_ORDER_NR_PAGES; + } while (to_accept > 0); + + return ret; +} + +static inline bool has_unaccepted_memory(void) +{ + return static_branch_unlikely(&zones_with_unaccepted_pages); +} + +static bool __free_unaccepted(struct page *page) +{ + struct zone *zone = page_zone(page); + unsigned long flags; + bool first = false; + + if (!lazy_accept) + return false; + + spin_lock_irqsave(&zone->lock, flags); + first = list_empty(&zone->unaccepted_pages); + list_add_tail(&page->lru, &zone->unaccepted_pages); + __mod_zone_freepage_state(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); + __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); + spin_unlock_irqrestore(&zone->lock, flags); + + if (first) + static_branch_inc(&zones_with_unaccepted_pages); + + return true; +} + +#else + +static bool page_contains_unaccepted(struct page *page, unsigned int order) +{ + return false; +} + +static void accept_page(struct page *page, unsigned int order) +{ +} + +static bool try_to_accept_memory(struct zone *zone, unsigned int order) +{ + return false; +} + +static inline bool has_unaccepted_memory(void) +{ + return false; +} + +static bool __free_unaccepted(struct page *page) +{ + BUILD_BUG(); + return false; +} + +#endif /* CONFIG_UNACCEPTED_MEMORY */ diff --git a/mm/vmstat.c b/mm/vmstat.c index c28046371b45..282349cabf01 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1180,6 +1180,9 @@ const char * const vmstat_text[] = { "nr_zspages", #endif "nr_free_cma", +#ifdef CONFIG_UNACCEPTED_MEMORY + "nr_unaccepted", +#endif /* enum numa_stat_item counters */ #ifdef CONFIG_NUMA -- cgit v1.2.3 From 3b8abb3239530c423c0b97e42af7f7e856e1ee96 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Tue, 2 May 2023 09:08:38 -0700 Subject: mm: kmem: fix a NULL pointer dereference in obj_stock_flush_required() KCSAN found an issue in obj_stock_flush_required(): stock->cached_objcg can be reset between the check and dereference: ================================================================== BUG: KCSAN: data-race in drain_all_stock / drain_obj_stock write to 0xffff888237c2a2f8 of 8 bytes by task 19625 on cpu 0: drain_obj_stock+0x408/0x4e0 mm/memcontrol.c:3306 refill_obj_stock+0x9c/0x1e0 mm/memcontrol.c:3340 obj_cgroup_uncharge+0xe/0x10 mm/memcontrol.c:3408 memcg_slab_free_hook mm/slab.h:587 [inline] __cache_free mm/slab.c:3373 [inline] __do_kmem_cache_free mm/slab.c:3577 [inline] kmem_cache_free+0x105/0x280 mm/slab.c:3602 __d_free fs/dcache.c:298 [inline] dentry_free fs/dcache.c:375 [inline] __dentry_kill+0x422/0x4a0 fs/dcache.c:621 dentry_kill+0x8d/0x1e0 dput+0x118/0x1f0 fs/dcache.c:913 __fput+0x3bf/0x570 fs/file_table.c:329 ____fput+0x15/0x20 fs/file_table.c:349 task_work_run+0x123/0x160 kernel/task_work.c:179 resume_user_mode_work include/linux/resume_user_mode.h:49 [inline] exit_to_user_mode_loop+0xcf/0xe0 kernel/entry/common.c:171 exit_to_user_mode_prepare+0x6a/0xa0 kernel/entry/common.c:203 __syscall_exit_to_user_mode_work kernel/entry/common.c:285 [inline] syscall_exit_to_user_mode+0x26/0x140 kernel/entry/common.c:296 do_syscall_64+0x4d/0xc0 arch/x86/entry/common.c:86 entry_SYSCALL_64_after_hwframe+0x63/0xcd read to 0xffff888237c2a2f8 of 8 bytes by task 19632 on cpu 1: obj_stock_flush_required mm/memcontrol.c:3319 [inline] drain_all_stock+0x174/0x2a0 mm/memcontrol.c:2361 try_charge_memcg+0x6d0/0xd10 mm/memcontrol.c:2703 try_charge mm/memcontrol.c:2837 [inline] mem_cgroup_charge_skmem+0x51/0x140 mm/memcontrol.c:7290 sock_reserve_memory+0xb1/0x390 net/core/sock.c:1025 sk_setsockopt+0x800/0x1e70 net/core/sock.c:1525 udp_lib_setsockopt+0x99/0x6c0 net/ipv4/udp.c:2692 udp_setsockopt+0x73/0xa0 net/ipv4/udp.c:2817 sock_common_setsockopt+0x61/0x70 net/core/sock.c:3668 __sys_setsockopt+0x1c3/0x230 net/socket.c:2271 __do_sys_setsockopt net/socket.c:2282 [inline] __se_sys_setsockopt net/socket.c:2279 [inline] __x64_sys_setsockopt+0x66/0x80 net/socket.c:2279 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd value changed: 0xffff8881382d52c0 -> 0xffff888138893740 Reported by Kernel Concurrency Sanitizer on: CPU: 1 PID: 19632 Comm: syz-executor.0 Not tainted 6.3.0-rc2-syzkaller-00387-g534293368afa #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 03/02/2023 Fix it by using READ_ONCE()/WRITE_ONCE() for all accesses to stock->cached_objcg. Link: https://lkml.kernel.org/r/20230502160839.361544-1-roman.gushchin@linux.dev Fixes: bf4f059954dc ("mm: memcg/slab: obj_cgroup API") Signed-off-by: Roman Gushchin Reported-by: syzbot+774c29891415ab0fd29d@syzkaller.appspotmail.com Reported-by: Dmitry Vyukov Link: https://lore.kernel.org/linux-mm/CACT4Y+ZfucZhM60YPphWiCLJr6+SGFhT+jjm8k1P-a_8Kkxsjg@mail.gmail.com/T/#t Reviewed-by: Yosry Ahmed Acked-by: Shakeel Butt Reviewed-by: Dmitry Vyukov Signed-off-by: Andrew Morton --- mm/memcontrol.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 4b27e245a055..c823c35c2ed4 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3208,12 +3208,12 @@ void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, * accumulating over a page of vmstat data or when pgdat or idx * changes. */ - if (stock->cached_objcg != objcg) { + if (READ_ONCE(stock->cached_objcg) != objcg) { old = drain_obj_stock(stock); obj_cgroup_get(objcg); stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; - stock->cached_objcg = objcg; + WRITE_ONCE(stock->cached_objcg, objcg); stock->cached_pgdat = pgdat; } else if (stock->cached_pgdat != pgdat) { /* Flush the existing cached vmstat data */ @@ -3267,7 +3267,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) local_lock_irqsave(&memcg_stock.stock_lock, flags); stock = this_cpu_ptr(&memcg_stock); - if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { + if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) { stock->nr_bytes -= nr_bytes; ret = true; } @@ -3279,7 +3279,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock) { - struct obj_cgroup *old = stock->cached_objcg; + struct obj_cgroup *old = READ_ONCE(stock->cached_objcg); if (!old) return NULL; @@ -3332,7 +3332,7 @@ static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock) stock->cached_pgdat = NULL; } - stock->cached_objcg = NULL; + WRITE_ONCE(stock->cached_objcg, NULL); /* * The `old' objects needs to be released by the caller via * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock. @@ -3343,10 +3343,11 @@ static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock) static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, struct mem_cgroup *root_memcg) { + struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg); struct mem_cgroup *memcg; - if (stock->cached_objcg) { - memcg = obj_cgroup_memcg(stock->cached_objcg); + if (objcg) { + memcg = obj_cgroup_memcg(objcg); if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) return true; } @@ -3365,10 +3366,10 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, local_lock_irqsave(&memcg_stock.stock_lock, flags); stock = this_cpu_ptr(&memcg_stock); - if (stock->cached_objcg != objcg) { /* reset if necessary */ + if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */ old = drain_obj_stock(stock); obj_cgroup_get(objcg); - stock->cached_objcg = objcg; + WRITE_ONCE(stock->cached_objcg, objcg); stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; allow_uncharge = true; /* Allow uncharge when objcg changes */ -- cgit v1.2.3 From f785a8f21a9cc46fced9f53c51a6f2dc647ed484 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Tue, 2 May 2023 09:08:39 -0700 Subject: mm: memcg: use READ_ONCE()/WRITE_ONCE() to access stock->cached A memcg pointer in the percpu stock can be accessed by drain_all_stock() from another cpu in a lockless way. In theory it might lead to an issue, similar to the one which has been discovered with stock->cached_objcg, where the pointer was zeroed between the check for being NULL and dereferencing. In this case the issue is unlikely a real problem, but to make it bulletproof and similar to stock->cached_objcg, let's annotate all accesses to stock->cached with READ_ONCE()/WTRITE_ONCE(). Link: https://lkml.kernel.org/r/20230502160839.361544-2-roman.gushchin@linux.dev Signed-off-by: Roman Gushchin Acked-by: Shakeel Butt Cc: Dmitry Vyukov Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/memcontrol.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c823c35c2ed4..1e364ad495a3 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2275,7 +2275,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) local_lock_irqsave(&memcg_stock.stock_lock, flags); stock = this_cpu_ptr(&memcg_stock); - if (memcg == stock->cached && stock->nr_pages >= nr_pages) { + if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) { stock->nr_pages -= nr_pages; ret = true; } @@ -2290,7 +2290,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) */ static void drain_stock(struct memcg_stock_pcp *stock) { - struct mem_cgroup *old = stock->cached; + struct mem_cgroup *old = READ_ONCE(stock->cached); if (!old) return; @@ -2303,7 +2303,7 @@ static void drain_stock(struct memcg_stock_pcp *stock) } css_put(&old->css); - stock->cached = NULL; + WRITE_ONCE(stock->cached, NULL); } static void drain_local_stock(struct work_struct *dummy) @@ -2338,10 +2338,10 @@ static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) struct memcg_stock_pcp *stock; stock = this_cpu_ptr(&memcg_stock); - if (stock->cached != memcg) { /* reset if necessary */ + if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */ drain_stock(stock); css_get(&memcg->css); - stock->cached = memcg; + WRITE_ONCE(stock->cached, memcg); } stock->nr_pages += nr_pages; @@ -2383,7 +2383,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) bool flush = false; rcu_read_lock(); - memcg = stock->cached; + memcg = READ_ONCE(stock->cached); if (memcg && stock->nr_pages && mem_cgroup_is_descendant(memcg, root_memcg)) flush = true; -- cgit v1.2.3 From 4bb6dc79d987b243d65c70c5029e51e719cfb94b Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Fri, 28 Apr 2023 13:54:38 -0700 Subject: migrate_pages: avoid blocking for IO in MIGRATE_SYNC_LIGHT The MIGRATE_SYNC_LIGHT mode is intended to block for things that will finish quickly but not for things that will take a long time. Exactly how long is too long is not well defined, but waits of tens of milliseconds is likely non-ideal. When putting a Chromebook under memory pressure (opening over 90 tabs on a 4GB machine) it was fairly easy to see delays waiting for some locks in the kcompactd code path of > 100 ms. While the laptop wasn't amazingly usable in this state, it was still limping along and this state isn't something artificial. Sometimes we simply end up with a lot of memory pressure. Putting the same Chromebook under memory pressure while it was running Android apps (though not stressing them) showed a much worse result (NOTE: this was on a older kernel but the codepaths here are similar). Android apps on ChromeOS currently run from a 128K-block, zlib-compressed, loopback-mounted squashfs disk. If we get a page fault from something backed by the squashfs filesystem we could end up holding a folio lock while reading enough from disk to decompress 128K (and then decompressing it using the somewhat slow zlib algorithms). That reading goes through the ext4 subsystem (because it's a loopback mount) before eventually ending up in the block subsystem. This extra jaunt adds extra overhead. Without much work I could see cases where we ended up blocked on a folio lock for over a second. With more extreme memory pressure I could see up to 25 seconds. We considered adding a timeout in the case of MIGRATE_SYNC_LIGHT for the two locks that were seen to be slow [1] and that generated much discussion. After discussion, it was decided that we should avoid waiting for the two locks during MIGRATE_SYNC_LIGHT if they were being held for IO. We'll continue with the unbounded wait for the more full SYNC modes. With this change, I couldn't see any slow waits on these locks with my previous testcases. NOTE: The reason I stated digging into this originally isn't because some benchmark had gone awry, but because we've received in-the-field crash reports where we have a hung task waiting on the page lock (which is the equivalent code path on old kernels). While the root cause of those crashes is likely unrelated and won't be fixed by this patch, analyzing those crash reports did point out these very long waits seemed like something good to fix. With this patch we should no longer hang waiting on these locks, but presumably the system will still be in a bad shape and hang somewhere else. [1] https://lore.kernel.org/r/20230421151135.v2.1.I2b71e11264c5c214bc59744b9e13e4c353bc5714@changeid Link: https://lkml.kernel.org/r/20230428135414.v3.1.Ia86ccac02a303154a0b8bc60567e7a95d34c96d3@changeid Signed-off-by: Douglas Anderson Suggested-by: Matthew Wilcox Reviewed-by: Matthew Wilcox (Oracle) Acked-by: Mel Gorman Cc: Hillf Danton Cc: Gao Xiang Cc: Alexander Viro Cc: Christian Brauner Cc: Gao Xiang Cc: Huang Ying Cc: Vlastimil Babka Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/migrate.c | 49 ++++++++++++++++++++++++++----------------------- 1 file changed, 26 insertions(+), 23 deletions(-) (limited to 'mm') diff --git a/mm/migrate.c b/mm/migrate.c index 01cac26a3127..43d818fd1afd 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -692,37 +692,32 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head, enum migrate_mode mode) { struct buffer_head *bh = head; + struct buffer_head *failed_bh; - /* Simple case, sync compaction */ - if (mode != MIGRATE_ASYNC) { - do { - lock_buffer(bh); - bh = bh->b_this_page; - - } while (bh != head); - - return true; - } - - /* async case, we cannot block on lock_buffer so use trylock_buffer */ do { if (!trylock_buffer(bh)) { - /* - * We failed to lock the buffer and cannot stall in - * async migration. Release the taken locks - */ - struct buffer_head *failed_bh = bh; - bh = head; - while (bh != failed_bh) { - unlock_buffer(bh); - bh = bh->b_this_page; - } - return false; + if (mode == MIGRATE_ASYNC) + goto unlock; + if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh)) + goto unlock; + lock_buffer(bh); } bh = bh->b_this_page; } while (bh != head); + return true; + +unlock: + /* We failed to lock the buffer and cannot stall. */ + failed_bh = bh; + bh = head; + while (bh != failed_bh) { + unlock_buffer(bh); + bh = bh->b_this_page; + } + + return false; } static int __buffer_migrate_folio(struct address_space *mapping, @@ -1156,6 +1151,14 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page if (current->flags & PF_MEMALLOC) goto out; + /* + * In "light" mode, we can wait for transient locks (eg + * inserting a page into the page table), but it's not + * worth waiting for I/O. + */ + if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src)) + goto out; + folio_lock(src); } locked = true; -- cgit v1.2.3 From 5b42360c73b0679505dac6ec44d234cfec61120c Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Fri, 28 Apr 2023 13:24:05 +0000 Subject: memcg: use seq_buf_do_printk() with mem_cgroup_print_oom_meminfo() Currently, we format all the memcg stats into a buffer in mem_cgroup_print_oom_meminfo() and use pr_info() to dump it to the logs. However, this buffer is large in size. Although it is currently working as intended, ther is a dependency between the memcg stats buffer and the printk record size limit. If we add more stats in the future and the buffer becomes larger than the printk record size limit, or if the prink record size limit is reduced, the logs may be truncated. It is safer to use seq_buf_do_printk(), which will automatically break up the buffer at line breaks and issue small printk() calls. Refactor the code to move the seq_buf from memory_stat_format() to its callers, and use seq_buf_do_printk() to print the seq_buf in mem_cgroup_print_oom_meminfo(). Link: https://lkml.kernel.org/r/20230428132406.2540811-2-yosryahmed@google.com Signed-off-by: Yosry Ahmed Acked-by: Michal Hocko Reviewed-by: Sergey Senozhatsky Acked-by: Shakeel Butt Reviewed-by: Muchun Song Cc: Johannes Weiner Cc: Michal Hocko Cc: Petr Mladek Cc: Roman Gushchin Cc: Steven Rostedt (Google) Signed-off-by: Andrew Morton --- mm/memcontrol.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1e364ad495a3..a79a47f7fc20 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1580,13 +1580,10 @@ static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg, return memcg_page_state(memcg, item) * memcg_page_state_unit(item); } -static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize) +static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) { - struct seq_buf s; int i; - seq_buf_init(&s, buf, bufsize); - /* * Provide statistics on the state of the memory subsystem as * well as cumulative event counters that show past behavior. @@ -1603,21 +1600,21 @@ static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize) u64 size; size = memcg_page_state_output(memcg, memory_stats[i].idx); - seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size); + seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size); if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) { size += memcg_page_state_output(memcg, NR_SLAB_RECLAIMABLE_B); - seq_buf_printf(&s, "slab %llu\n", size); + seq_buf_printf(s, "slab %llu\n", size); } } /* Accumulated memory events */ - seq_buf_printf(&s, "pgscan %lu\n", + seq_buf_printf(s, "pgscan %lu\n", memcg_events(memcg, PGSCAN_KSWAPD) + memcg_events(memcg, PGSCAN_DIRECT) + memcg_events(memcg, PGSCAN_KHUGEPAGED)); - seq_buf_printf(&s, "pgsteal %lu\n", + seq_buf_printf(s, "pgsteal %lu\n", memcg_events(memcg, PGSTEAL_KSWAPD) + memcg_events(memcg, PGSTEAL_DIRECT) + memcg_events(memcg, PGSTEAL_KHUGEPAGED)); @@ -1627,13 +1624,13 @@ static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize) memcg_vm_event_stat[i] == PGPGOUT) continue; - seq_buf_printf(&s, "%s %lu\n", + seq_buf_printf(s, "%s %lu\n", vm_event_name(memcg_vm_event_stat[i]), memcg_events(memcg, memcg_vm_event_stat[i])); } /* The above should easily fit into one page */ - WARN_ON_ONCE(seq_buf_has_overflowed(&s)); + WARN_ON_ONCE(seq_buf_has_overflowed(s)); } #define K(x) ((x) << (PAGE_SHIFT-10)) @@ -1671,6 +1668,7 @@ void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) { /* Use static buffer, for the caller is holding oom_lock. */ static char buf[PAGE_SIZE]; + struct seq_buf s; lockdep_assert_held(&oom_lock); @@ -1693,8 +1691,9 @@ void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) pr_info("Memory cgroup stats for "); pr_cont_cgroup_path(memcg->css.cgroup); pr_cont(":"); - memory_stat_format(memcg, buf, sizeof(buf)); - pr_info("%s", buf); + seq_buf_init(&s, buf, sizeof(buf)); + memory_stat_format(memcg, &s); + seq_buf_do_printk(&s, KERN_INFO); } /* @@ -6635,10 +6634,12 @@ static int memory_stat_show(struct seq_file *m, void *v) { struct mem_cgroup *memcg = mem_cgroup_from_seq(m); char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + struct seq_buf s; if (!buf) return -ENOMEM; - memory_stat_format(memcg, buf, PAGE_SIZE); + seq_buf_init(&s, buf, PAGE_SIZE); + memory_stat_format(memcg, &s); seq_puts(m, buf); kfree(buf); return 0; -- cgit v1.2.3 From dddb44ffa0d59c8a3f2a5cb9690ccebe3150810c Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Fri, 28 Apr 2023 13:24:06 +0000 Subject: memcg: dump memory.stat during cgroup OOM for v1 Patch series "memcg: OOM log improvements", v2. This short patch series brings back some cgroup v1 stats in OOM logs that were unnecessarily changed before. It also makes memcg OOM logs less reliant on printk() internals. This patch (of 2): Commit c8713d0b2312 ("mm: memcontrol: dump memory.stat during cgroup OOM") made sure we dump all the stats in memory.stat during a cgroup OOM, but it also introduced a slight behavioral change. The code used to print the non-hierarchical v1 cgroup stats for the entire cgroup subtree, now it only prints the v2 cgroup stats for the cgroup under OOM. For cgroup v1 users, this introduces a few problems: (a) The non-hierarchical stats of the memcg under OOM are no longer shown. (b) A couple of v1-only stats (e.g. pgpgin, pgpgout) are no longer shown. (c) We show the list of cgroup v2 stats, even in cgroup v1. This list of stats is not tracked with v1 in mind. While most of the stats seem to be working on v1, there may be some stats that are not fully or correctly tracked. Although OOM log is not set in stone, we should not change it for no reason. When upgrading the kernel version to a version including commit c8713d0b2312 ("mm: memcontrol: dump memory.stat during cgroup OOM"), these behavioral changes are noticed in cgroup v1. The fix is simple. Commit c8713d0b2312 ("mm: memcontrol: dump memory.stat during cgroup OOM") separated stats formatting from stats display for v2, to reuse the stats formatting in the OOM logs. Do the same for v1. Move the v2 specific formatting from memory_stat_format() to memcg_stat_format(), add memcg1_stat_format() for v1, and make memory_stat_format() select between them based on cgroup version. Since memory_stat_show() now works for both v1 & v2, drop memcg_stat_show(). Link: https://lkml.kernel.org/r/20230428132406.2540811-1-yosryahmed@google.com Link: https://lkml.kernel.org/r/20230428132406.2540811-3-yosryahmed@google.com Signed-off-by: Yosry Ahmed Acked-by: Shakeel Butt Acked-by: Michal Hocko Cc: Johannes Weiner Cc: Muchun Song Cc: Petr Mladek Cc: Roman Gushchin Cc: Sergey Senozhatsky Cc: Steven Rostedt (Google) Cc: Michal Hocko Signed-off-by: Andrew Morton --- mm/memcontrol.c | 60 +++++++++++++++++++++++++++++++++------------------------ 1 file changed, 35 insertions(+), 25 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a79a47f7fc20..a05c53ab5238 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1580,7 +1580,7 @@ static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg, return memcg_page_state(memcg, item) * memcg_page_state_unit(item); } -static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) +static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) { int i; @@ -1633,6 +1633,17 @@ static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) WARN_ON_ONCE(seq_buf_has_overflowed(s)); } +static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s); + +static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) +{ + if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) + memcg_stat_format(memcg, s); + else + memcg1_stat_format(memcg, s); + WARN_ON_ONCE(seq_buf_has_overflowed(s)); +} + #define K(x) ((x) << (PAGE_SHIFT-10)) /** * mem_cgroup_print_oom_context: Print OOM information relevant to @@ -4135,9 +4146,8 @@ static const unsigned int memcg1_events[] = { PGMAJFAULT, }; -static int memcg_stat_show(struct seq_file *m, void *v) +static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) { - struct mem_cgroup *memcg = mem_cgroup_from_seq(m); unsigned long memory, memsw; struct mem_cgroup *mi; unsigned int i; @@ -4152,18 +4162,18 @@ static int memcg_stat_show(struct seq_file *m, void *v) if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) continue; nr = memcg_page_state_local(memcg, memcg1_stats[i]); - seq_printf(m, "%s %lu\n", memcg1_stat_names[i], + seq_buf_printf(s, "%s %lu\n", memcg1_stat_names[i], nr * memcg_page_state_unit(memcg1_stats[i])); } for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) - seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]), - memcg_events_local(memcg, memcg1_events[i])); + seq_buf_printf(s, "%s %lu\n", vm_event_name(memcg1_events[i]), + memcg_events_local(memcg, memcg1_events[i])); for (i = 0; i < NR_LRU_LISTS; i++) - seq_printf(m, "%s %lu\n", lru_list_name(i), - memcg_page_state_local(memcg, NR_LRU_BASE + i) * - PAGE_SIZE); + seq_buf_printf(s, "%s %lu\n", lru_list_name(i), + memcg_page_state_local(memcg, NR_LRU_BASE + i) * + PAGE_SIZE); /* Hierarchical information */ memory = memsw = PAGE_COUNTER_MAX; @@ -4171,11 +4181,11 @@ static int memcg_stat_show(struct seq_file *m, void *v) memory = min(memory, READ_ONCE(mi->memory.max)); memsw = min(memsw, READ_ONCE(mi->memsw.max)); } - seq_printf(m, "hierarchical_memory_limit %llu\n", - (u64)memory * PAGE_SIZE); + seq_buf_printf(s, "hierarchical_memory_limit %llu\n", + (u64)memory * PAGE_SIZE); if (do_memsw_account()) - seq_printf(m, "hierarchical_memsw_limit %llu\n", - (u64)memsw * PAGE_SIZE); + seq_buf_printf(s, "hierarchical_memsw_limit %llu\n", + (u64)memsw * PAGE_SIZE); for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { unsigned long nr; @@ -4183,19 +4193,19 @@ static int memcg_stat_show(struct seq_file *m, void *v) if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) continue; nr = memcg_page_state(memcg, memcg1_stats[i]); - seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], + seq_buf_printf(s, "total_%s %llu\n", memcg1_stat_names[i], (u64)nr * memcg_page_state_unit(memcg1_stats[i])); } for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) - seq_printf(m, "total_%s %llu\n", - vm_event_name(memcg1_events[i]), - (u64)memcg_events(memcg, memcg1_events[i])); + seq_buf_printf(s, "total_%s %llu\n", + vm_event_name(memcg1_events[i]), + (u64)memcg_events(memcg, memcg1_events[i])); for (i = 0; i < NR_LRU_LISTS; i++) - seq_printf(m, "total_%s %llu\n", lru_list_name(i), - (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * - PAGE_SIZE); + seq_buf_printf(s, "total_%s %llu\n", lru_list_name(i), + (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * + PAGE_SIZE); #ifdef CONFIG_DEBUG_VM { @@ -4210,12 +4220,10 @@ static int memcg_stat_show(struct seq_file *m, void *v) anon_cost += mz->lruvec.anon_cost; file_cost += mz->lruvec.file_cost; } - seq_printf(m, "anon_cost %lu\n", anon_cost); - seq_printf(m, "file_cost %lu\n", file_cost); + seq_buf_printf(s, "anon_cost %lu\n", anon_cost); + seq_buf_printf(s, "file_cost %lu\n", file_cost); } #endif - - return 0; } static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, @@ -5059,6 +5067,8 @@ static int mem_cgroup_slab_show(struct seq_file *m, void *p) } #endif +static int memory_stat_show(struct seq_file *m, void *v); + static struct cftype mem_cgroup_legacy_files[] = { { .name = "usage_in_bytes", @@ -5091,7 +5101,7 @@ static struct cftype mem_cgroup_legacy_files[] = { }, { .name = "stat", - .seq_show = memcg_stat_show, + .seq_show = memory_stat_show, }, { .name = "force_empty", -- cgit v1.2.3 From 8b9167cd9ef039d95b65ef9600a7507795173121 Mon Sep 17 00:00:00 2001 From: Wen Yang Date: Tue, 25 Apr 2023 23:52:35 +0800 Subject: mm: compaction: optimize compact_memory to comply with the admin-guide For the /proc/sys/vm/compact_memory file, the admin-guide states: When 1 is written to the file, all zones are compacted such that free memory is available in contiguous blocks where possible. This can be important for example in the allocation of huge pages although processes will also directly compact memory as required But it was not strictly followed, writing any value would cause all zones to be compacted. It has been slightly optimized to comply with the admin-guide. Enforce the 1 on the unlikely chance that the sysctl handler is ever extended to do something different. Commit ef4984384172 ("mm/compaction: remove unused variable sysctl_compact_memory") has also been optimized a bit here, as the declaration in the external header file has been eliminated, and sysctl_compact_memory also needs to be verified. [akpm@linux-foundation.org: add __read_mostly, per Mel] Link: https://lkml.kernel.org/r/tencent_DFF54DB2A60F3333F97D3F6B5441519B050A@qq.com Signed-off-by: Wen Yang Acked-by: Mel Gorman Cc: Oscar Salvador Cc: William Lam Cc: Pintu Kumar Cc: Fu Wei Signed-off-by: Andrew Morton --- mm/compaction.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index c8bcdea15f5f..5584fa5fa3d4 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1736,6 +1736,7 @@ static int sysctl_compact_unevictable_allowed __read_mostly = CONFIG_COMPACT_UNE */ static unsigned int __read_mostly sysctl_compaction_proactiveness = 20; static int sysctl_extfrag_threshold = 500; +static int __read_mostly sysctl_compact_memory; static inline void update_fast_start_pfn(struct compact_control *cc, unsigned long pfn) @@ -2780,6 +2781,15 @@ static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int static int sysctl_compaction_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { + int ret; + + ret = proc_dointvec(table, write, buffer, length, ppos); + if (ret) + return ret; + + if (sysctl_compact_memory != 1) + return -EINVAL; + if (write) compact_nodes(); @@ -3095,7 +3105,7 @@ static int proc_dointvec_minmax_warn_RT_change(struct ctl_table *table, static struct ctl_table vm_compaction[] = { { .procname = "compact_memory", - .data = NULL, + .data = &sysctl_compact_memory, .maxlen = sizeof(int), .mode = 0200, .proc_handler = sysctl_compaction_handler, -- cgit v1.2.3 From 3c4322c94b9af33dc62e47cb80c057f9814fb595 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 24 Apr 2023 21:45:39 +0800 Subject: mm/page_alloc: drop the unnecessary pfn_valid() for start pfn __pageblock_pfn_to_page() currently performs both pfn_valid check and pfn_to_online_page(). The former one is redundant because the latter is a stronger check. Drop pfn_valid(). Link: https://lkml.kernel.org/r/c3868b58c6714c09a43440d7d02c7b4eed6e03f6.1682342634.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Reviewed-by: David Hildenbrand Reviewed-by: "Huang, Ying" Acked-by: Michal Hocko Cc: Mel Gorman Cc: Mike Rapoport (IBM) Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 47421bedc12b..af9c995d3c1e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1521,7 +1521,7 @@ struct page *__pageblock_pfn_to_page(unsigned long start_pfn, /* end_pfn is one past the range we are checking */ end_pfn--; - if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) + if (!pfn_valid(end_pfn)) return NULL; start_page = pfn_to_online_page(start_pfn); -- cgit v1.2.3 From 190409caaf7e3eee6926943488e486590efe6fde Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Fri, 21 Apr 2023 17:40:17 +0000 Subject: memcg: flush stats non-atomically in mem_cgroup_wb_stats() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The previous patch moved the wb_over_bg_thresh()->mem_cgroup_wb_stats() code path in wb_writeback() outside the lock section. We no longer need to flush the stats atomically. Flush the stats non-atomically. Link: https://lkml.kernel.org/r/20230421174020.2994750-3-yosryahmed@google.com Signed-off-by: Yosry Ahmed Reviewed-by: Michal Koutný Acked-by: Shakeel Butt Acked-by: Tejun Heo Cc: Alexander Viro Cc: Christian Brauner Cc: Jan Kara Cc: Jens Axboe Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Cc: Roman Gushchin Signed-off-by: Andrew Morton --- mm/memcontrol.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a05c53ab5238..929162c5f45f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4656,11 +4656,7 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); struct mem_cgroup *parent; - /* - * wb_writeback() takes a spinlock and calls - * wb_over_bg_thresh()->mem_cgroup_wb_stats(). Do not sleep. - */ - mem_cgroup_flush_stats_atomic(); + mem_cgroup_flush_stats(); *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); -- cgit v1.2.3 From f82a7a86dbfbd0ee81a4907ca41ba78bda1846f4 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Fri, 21 Apr 2023 17:40:18 +0000 Subject: memcg: calculate root usage from global state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, we approximate the root usage by adding the memcg stats for anon, file, and conditionally swap (for memsw). To read the memcg stats we need to invoke an rstat flush. rstat flushes can be expensive, they scale with the number of cpus and cgroups on the system. mem_cgroup_usage() is called by memcg_events()->mem_cgroup_threshold() with irqs disabled, so such an expensive operation with irqs disabled can cause problems. Instead, approximate the root usage from global state. This is not 100% accurate, but the root usage has always been ill-defined anyway. Link: https://lkml.kernel.org/r/20230421174020.2994750-4-yosryahmed@google.com Signed-off-by: Yosry Ahmed Reviewed-by: Michal Koutný Acked-by: Shakeel Butt Cc: Alexander Viro Cc: Christian Brauner Cc: Jan Kara Cc: Jens Axboe Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Cc: Roman Gushchin Cc: Tejun Heo Signed-off-by: Andrew Morton --- mm/memcontrol.c | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 929162c5f45f..7474aa8e4026 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3710,27 +3710,13 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) if (mem_cgroup_is_root(memcg)) { /* - * We can reach here from irq context through: - * uncharge_batch() - * |--memcg_check_events() - * |--mem_cgroup_threshold() - * |--__mem_cgroup_threshold() - * |--mem_cgroup_usage - * - * rstat flushing is an expensive operation that should not be - * done from irq context; use stale stats in this case. - * Arguably, usage threshold events are not reliable on the root - * memcg anyway since its usage is ill-defined. - * - * Additionally, other call paths through memcg_check_events() - * disable irqs, so make sure we are flushing stats atomically. + * Approximate root's usage from global state. This isn't + * perfect, but the root usage was always an approximation. */ - if (in_task()) - mem_cgroup_flush_stats_atomic(); - val = memcg_page_state(memcg, NR_FILE_PAGES) + - memcg_page_state(memcg, NR_ANON_MAPPED); + val = global_node_page_state(NR_FILE_PAGES) + + global_node_page_state(NR_ANON_MAPPED); if (swap) - val += memcg_page_state(memcg, MEMCG_SWAP); + val += total_swap_pages - get_nr_swap_pages(); } else { if (!swap) val = page_counter_read(&memcg->memory); -- cgit v1.2.3 From 35822fdae3bf509532b0954088070f17de51ff15 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Fri, 21 Apr 2023 17:40:19 +0000 Subject: memcg: remove mem_cgroup_flush_stats_atomic() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previous patches removed all callers of mem_cgroup_flush_stats_atomic(). Remove the function and simplify the code. Link: https://lkml.kernel.org/r/20230421174020.2994750-5-yosryahmed@google.com Signed-off-by: Yosry Ahmed Acked-by: Shakeel Butt Cc: Alexander Viro Cc: Christian Brauner Cc: Jan Kara Cc: Jens Axboe Cc: Johannes Weiner Cc: Michal Hocko Cc: Michal Koutný Cc: Muchun Song Cc: Roman Gushchin Cc: Tejun Heo Signed-off-by: Andrew Morton --- include/linux/memcontrol.h | 5 ----- mm/memcontrol.c | 24 +++++------------------- 2 files changed, 5 insertions(+), 24 deletions(-) (limited to 'mm') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 222d7370134c..00a88cf947e1 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1038,7 +1038,6 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, } void mem_cgroup_flush_stats(void); -void mem_cgroup_flush_stats_atomic(void); void mem_cgroup_flush_stats_ratelimited(void); void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, @@ -1537,10 +1536,6 @@ static inline void mem_cgroup_flush_stats(void) { } -static inline void mem_cgroup_flush_stats_atomic(void) -{ -} - static inline void mem_cgroup_flush_stats_ratelimited(void) { } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7474aa8e4026..2184a9c566f1 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -639,7 +639,7 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val) } } -static void do_flush_stats(bool atomic) +static void do_flush_stats(void) { /* * We always flush the entire tree, so concurrent flushers can just @@ -652,30 +652,16 @@ static void do_flush_stats(bool atomic) WRITE_ONCE(flush_next_time, jiffies_64 + 2*FLUSH_TIME); - if (atomic) - cgroup_rstat_flush_atomic(root_mem_cgroup->css.cgroup); - else - cgroup_rstat_flush(root_mem_cgroup->css.cgroup); + cgroup_rstat_flush(root_mem_cgroup->css.cgroup); atomic_set(&stats_flush_threshold, 0); atomic_set(&stats_flush_ongoing, 0); } -static bool should_flush_stats(void) -{ - return atomic_read(&stats_flush_threshold) > num_online_cpus(); -} - void mem_cgroup_flush_stats(void) { - if (should_flush_stats()) - do_flush_stats(false); -} - -void mem_cgroup_flush_stats_atomic(void) -{ - if (should_flush_stats()) - do_flush_stats(true); + if (atomic_read(&stats_flush_threshold) > num_online_cpus()) + do_flush_stats(); } void mem_cgroup_flush_stats_ratelimited(void) @@ -690,7 +676,7 @@ static void flush_memcg_stats_dwork(struct work_struct *w) * Always flush here so that flushing in latency-sensitive paths is * as cheap as possible. */ - do_flush_stats(false); + do_flush_stats(); queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME); } -- cgit v1.2.3 From 857f21397f7113e4f764aa65fb0160eb7f404808 Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Wed, 19 Apr 2023 03:07:38 +0000 Subject: memcg, oom: remove unnecessary check in mem_cgroup_oom_synchronize() mem_cgroup_oom_synchronize() is only used when the memcg oom handling is handed over to the edge of the #PF path. Since commit 29ef680ae7c2 ("memcg, oom: move out_of_memory back to the charge path") this is the case only when the kernel memcg oom killer is disabled (current->memcg_in_oom is only set if memcg->oom_kill_disable). Therefore a check for oom_kill_disable in mem_cgroup_oom_synchronize() is not required. Link: https://lkml.kernel.org/r/20230419030739.115845-1-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Suggested-by: Michal Hocko Acked-by: Michal Hocko Cc: Johannes Weiner Cc: Roman Gushchin Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/memcontrol.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2184a9c566f1..e8aead97454b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2024,16 +2024,9 @@ bool mem_cgroup_oom_synchronize(bool handle) if (locked) mem_cgroup_oom_notify(memcg); - if (locked && !READ_ONCE(memcg->oom_kill_disable)) { - mem_cgroup_unmark_under_oom(memcg); - finish_wait(&memcg_oom_waitq, &owait.wait); - mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, - current->memcg_oom_order); - } else { - schedule(); - mem_cgroup_unmark_under_oom(memcg); - finish_wait(&memcg_oom_waitq, &owait.wait); - } + schedule(); + mem_cgroup_unmark_under_oom(memcg); + finish_wait(&memcg_oom_waitq, &owait.wait); if (locked) { mem_cgroup_oom_unlock(memcg); -- cgit v1.2.3 From 18b1d18bc2bd8f54e8df6bc8096185361a6d1b15 Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Wed, 19 Apr 2023 03:07:39 +0000 Subject: memcg, oom: remove explicit wakeup in mem_cgroup_oom_synchronize() Before commit 29ef680ae7c2 ("memcg, oom: move out_of_memory back to the charge path"), all memcg oom killers were delayed to page fault path. And the explicit wakeup is used in this case: thread A: ... if (locked) { // complete oom-kill, hold the lock mem_cgroup_oom_unlock(memcg); ... } ... thread B: ... if (locked && !memcg->oom_kill_disable) { ... } else { schedule(); // can't acquire the lock ... } ... The reason is that thread A kicks off the OOM-killer, which leads to wakeups from the uncharges of the exiting task. But thread B is not guaranteed to see them if it enters the OOM path after the OOM kills but before thread A releases the lock. Now only oom_kill_disable case is handled from the #PF path. In that case it is userspace to trigger the wake up not the #PF path itself. All potential paths to free some charges are responsible to call memcg_oom_recover() , so the explicit wakeup is not needed in the mem_cgroup_oom_synchronize() path which doesn't release any memory itself. Link: https://lkml.kernel.org/r/20230419030739.115845-2-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Suggested-by: Michal Hocko Acked-by: Michal Hocko Cc: Johannes Weiner Cc: Roman Gushchin Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/memcontrol.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e8aead97454b..d31fb1e2cb33 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2028,15 +2028,8 @@ bool mem_cgroup_oom_synchronize(bool handle) mem_cgroup_unmark_under_oom(memcg); finish_wait(&memcg_oom_waitq, &owait.wait); - if (locked) { + if (locked) mem_cgroup_oom_unlock(memcg); - /* - * There is no guarantee that an OOM-lock contender - * sees the wakeups triggered by the OOM kill - * uncharges. Wake any sleepers explicitly. - */ - memcg_oom_recover(memcg); - } cleanup: current->memcg_in_oom = NULL; css_put(&memcg->css); -- cgit v1.2.3 From ffcb5f5262b756a598eefb11e340bbd027cde037 Mon Sep 17 00:00:00 2001 From: Nhat Pham Date: Tue, 2 May 2023 18:36:06 -0700 Subject: workingset: refactor LRU refault to expose refault recency check Patch series "cachestat: a new syscall for page cache state of files", v13. There is currently no good way to query the page cache statistics of large files and directory trees. There is mincore(), but it scales poorly: the kernel writes out a lot of bitmap data that userspace has to aggregate, when the user really does not care about per-page information in that case. The user also needs to mmap and unmap each file as it goes along, which can be quite slow as well. Some use cases where this information could come in handy: * Allowing database to decide whether to perform an index scan or direct table queries based on the in-memory cache state of the index. * Visibility into the writeback algorithm, for performance issues diagnostic. * Workload-aware writeback pacing: estimating IO fulfilled by page cache (and IO to be done) within a range of a file, allowing for more frequent syncing when and where there is IO capacity, and batching when there is not. * Computing memory usage of large files/directory trees, analogous to the du tool for disk usage. More information about these use cases could be found in this thread: https://lore.kernel.org/lkml/20230315170934.GA97793@cmpxchg.org/ This series of patches introduces a new system call, cachestat, that summarizes the page cache statistics (number of cached pages, dirty pages, pages marked for writeback, evicted pages etc.) of a file, in a specified range of bytes. It also include a selftest suite that tests some typical usage. Currently, the syscall is only wired in for x86 architecture. This interface is inspired by past discussion and concerns with fincore, which has a similar design (and as a result, issues) as mincore. Relevant links: https://lkml.indiana.edu/hypermail/linux/kernel/1302.1/04207.html https://lkml.indiana.edu/hypermail/linux/kernel/1302.1/04209.html I have also developed a small tool that computes the memory usage of files and directories, analogous to the du utility. User can choose between mincore or cachestat (with cachestat exporting more information than mincore). To compare the performance of these two options, I benchmarked the tool on the root directory of a Meta's server machine, each for five runs: Using cachestat real -- Median: 33.377s, Average: 33.475s, Standard Deviation: 0.3602 user -- Median: 4.08s, Average: 4.1078s, Standard Deviation: 0.0742 sys -- Median: 28.823s, Average: 28.8866s, Standard Deviation: 0.2689 Using mincore: real -- Median: 102.352s, Average: 102.3442s, Standard Deviation: 0.2059 user -- Median: 10.149s, Average: 10.1482s, Standard Deviation: 0.0162 sys -- Median: 91.186s, Average: 91.2084s, Standard Deviation: 0.2046 I also ran both syscalls on a 2TB sparse file: Using cachestat: real 0m0.009s user 0m0.000s sys 0m0.009s Using mincore: real 0m37.510s user 0m2.934s sys 0m34.558s Very large files like this are the pathological case for mincore. In fact, to compute the stats for a single 2TB file, mincore takes as long as cachestat takes to compute the stats for the entire tree! This could easily happen inadvertently when we run it on subdirectories. Mincore is clearly not suitable for a general-purpose command line tool. Regarding security concerns, cachestat() should not pose any additional issues. The caller already has read permission to the file itself (since they need an fd to that file to call cachestat). This means that the caller can access the underlying data in its entirety, which is a much greater source of information (and as a result, a much greater security risk) than the cache status itself. The latest API change (in v13 of the patch series) is suggested by Jens Axboe. It allows for 64-bit length argument, even on 32-bit architecture (which is previously not possible due to the limit on the number of syscall arguments). Furthermore, it eliminates the need for compatibility handling - every user can use the same ABI. This patch (of 4): In preparation for computing recently evicted pages in cachestat, refactor workingset_refault and lru_gen_refault to expose a helper function that would test if an evicted page is recently evicted. [penguin-kernel@I-love.SAKURA.ne.jp: add missing rcu_read_unlock() in lru_gen_refault()] Link: https://lkml.kernel.org/r/610781bc-cf11-fc89-a46f-87cb8235d439@I-love.SAKURA.ne.jp Link: https://lkml.kernel.org/r/20230503013608.2431726-1-nphamcs@gmail.com Link: https://lkml.kernel.org/r/20230503013608.2431726-2-nphamcs@gmail.com Signed-off-by: Nhat Pham Signed-off-by: Tetsuo Handa Acked-by: Johannes Weiner Cc: Brian Foster Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Cc: Michael Kerrisk Cc: Tetsuo Handa Signed-off-by: Andrew Morton --- include/linux/swap.h | 1 + mm/workingset.c | 150 ++++++++++++++++++++++++++++++++++----------------- 2 files changed, 103 insertions(+), 48 deletions(-) (limited to 'mm') diff --git a/include/linux/swap.h b/include/linux/swap.h index 3c69cb653cb9..b2128df5edea 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -368,6 +368,7 @@ static inline void folio_set_swap_entry(struct folio *folio, swp_entry_t entry) } /* linux/mm/workingset.c */ +bool workingset_test_recent(void *shadow, bool file, bool *workingset); void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg); void workingset_refault(struct folio *folio, void *shadow); diff --git a/mm/workingset.c b/mm/workingset.c index 817758951886..90ae785d4c9c 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -255,6 +255,29 @@ static void *lru_gen_eviction(struct folio *folio) return pack_shadow(mem_cgroup_id(memcg), pgdat, token, refs); } +/* + * Tests if the shadow entry is for a folio that was recently evicted. + * Fills in @memcgid, @pglist_data, @token, @workingset with the values + * unpacked from shadow. + */ +static bool lru_gen_test_recent(void *shadow, bool file, int *memcgid, + struct pglist_data **pgdat, unsigned long *token, bool *workingset) +{ + struct mem_cgroup *eviction_memcg; + struct lruvec *lruvec; + struct lru_gen_folio *lrugen; + unsigned long min_seq; + + unpack_shadow(shadow, memcgid, pgdat, token, workingset); + eviction_memcg = mem_cgroup_from_id(*memcgid); + + lruvec = mem_cgroup_lruvec(eviction_memcg, *pgdat); + lrugen = &lruvec->lrugen; + + min_seq = READ_ONCE(lrugen->min_seq[file]); + return (*token >> LRU_REFS_WIDTH) == (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH)); +} + static void lru_gen_refault(struct folio *folio, void *shadow) { int hist, tier, refs; @@ -269,23 +292,22 @@ static void lru_gen_refault(struct folio *folio, void *shadow) int type = folio_is_file_lru(folio); int delta = folio_nr_pages(folio); - unpack_shadow(shadow, &memcg_id, &pgdat, &token, &workingset); - - if (pgdat != folio_pgdat(folio)) - return; - rcu_read_lock(); + if (!lru_gen_test_recent(shadow, type, &memcg_id, &pgdat, &token, + &workingset)) + goto unlock; + memcg = folio_memcg_rcu(folio); if (memcg_id != mem_cgroup_id(memcg)) goto unlock; + if (pgdat != folio_pgdat(folio)) + goto unlock; + lruvec = mem_cgroup_lruvec(memcg, pgdat); lrugen = &lruvec->lrugen; - min_seq = READ_ONCE(lrugen->min_seq[type]); - if ((token >> LRU_REFS_WIDTH) != (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH))) - goto unlock; hist = lru_hist_from_seq(min_seq); /* see the comment in folio_lru_refs() */ @@ -317,6 +339,12 @@ static void *lru_gen_eviction(struct folio *folio) return NULL; } +static bool lru_gen_test_recent(void *shadow, bool file, int *memcgid, + struct pglist_data **pgdat, unsigned long *token, bool *workingset) +{ + return false; +} + static void lru_gen_refault(struct folio *folio, void *shadow) { } @@ -385,42 +413,34 @@ void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg) } /** - * workingset_refault - Evaluate the refault of a previously evicted folio. - * @folio: The freshly allocated replacement folio. - * @shadow: Shadow entry of the evicted folio. - * - * Calculates and evaluates the refault distance of the previously - * evicted folio in the context of the node and the memcg whose memory - * pressure caused the eviction. + * workingset_test_recent - tests if the shadow entry is for a folio that was + * recently evicted. Also fills in @workingset with the value unpacked from + * shadow. + * @shadow: the shadow entry to be tested. + * @file: whether the corresponding folio is from the file lru. + * @workingset: where the workingset value unpacked from shadow should + * be stored. + * + * Return: true if the shadow is for a recently evicted folio; false otherwise. */ -void workingset_refault(struct folio *folio, void *shadow) +bool workingset_test_recent(void *shadow, bool file, bool *workingset) { - bool file = folio_is_file_lru(folio); struct mem_cgroup *eviction_memcg; struct lruvec *eviction_lruvec; unsigned long refault_distance; unsigned long workingset_size; - struct pglist_data *pgdat; - struct mem_cgroup *memcg; - unsigned long eviction; - struct lruvec *lruvec; unsigned long refault; - bool workingset; int memcgid; - long nr; + struct pglist_data *pgdat; + unsigned long eviction; - if (lru_gen_enabled()) { - lru_gen_refault(folio, shadow); - return; - } + if (lru_gen_enabled()) + return lru_gen_test_recent(shadow, file, &memcgid, &pgdat, &eviction, + workingset); - unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset); + unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset); eviction <<= bucket_order; - /* Flush stats (and potentially sleep) before holding RCU read lock */ - mem_cgroup_flush_stats_ratelimited(); - - rcu_read_lock(); /* * Look up the memcg associated with the stored ID. It might * have been deleted since the folio's eviction. @@ -439,7 +459,8 @@ void workingset_refault(struct folio *folio, void *shadow) */ eviction_memcg = mem_cgroup_from_id(memcgid); if (!mem_cgroup_disabled() && !eviction_memcg) - goto out; + return false; + eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat); refault = atomic_long_read(&eviction_lruvec->nonresident_age); @@ -461,20 +482,6 @@ void workingset_refault(struct folio *folio, void *shadow) */ refault_distance = (refault - eviction) & EVICTION_MASK; - /* - * The activation decision for this folio is made at the level - * where the eviction occurred, as that is where the LRU order - * during folio reclaim is being determined. - * - * However, the cgroup that will own the folio is the one that - * is actually experiencing the refault event. - */ - nr = folio_nr_pages(folio); - memcg = folio_memcg(folio); - pgdat = folio_pgdat(folio); - lruvec = mem_cgroup_lruvec(memcg, pgdat); - - mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr); /* * Compare the distance to the existing workingset size. We * don't activate pages that couldn't stay resident even if @@ -495,7 +502,54 @@ void workingset_refault(struct folio *folio, void *shadow) NR_INACTIVE_ANON); } } - if (refault_distance > workingset_size) + + return refault_distance <= workingset_size; +} + +/** + * workingset_refault - Evaluate the refault of a previously evicted folio. + * @folio: The freshly allocated replacement folio. + * @shadow: Shadow entry of the evicted folio. + * + * Calculates and evaluates the refault distance of the previously + * evicted folio in the context of the node and the memcg whose memory + * pressure caused the eviction. + */ +void workingset_refault(struct folio *folio, void *shadow) +{ + bool file = folio_is_file_lru(folio); + struct pglist_data *pgdat; + struct mem_cgroup *memcg; + struct lruvec *lruvec; + bool workingset; + long nr; + + if (lru_gen_enabled()) { + lru_gen_refault(folio, shadow); + return; + } + + /* Flush stats (and potentially sleep) before holding RCU read lock */ + mem_cgroup_flush_stats_ratelimited(); + + rcu_read_lock(); + + /* + * The activation decision for this folio is made at the level + * where the eviction occurred, as that is where the LRU order + * during folio reclaim is being determined. + * + * However, the cgroup that will own the folio is the one that + * is actually experiencing the refault event. + */ + nr = folio_nr_pages(folio); + memcg = folio_memcg(folio); + pgdat = folio_pgdat(folio); + lruvec = mem_cgroup_lruvec(memcg, pgdat); + + mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr); + + if (!workingset_test_recent(shadow, file, &workingset)) goto out; folio_set_active(folio); -- cgit v1.2.3 From cf264e1329fb0307e044f7675849f9f38b44c11a Mon Sep 17 00:00:00 2001 From: Nhat Pham Date: Tue, 2 May 2023 18:36:07 -0700 Subject: cachestat: implement cachestat syscall There is currently no good way to query the page cache state of large file sets and directory trees. There is mincore(), but it scales poorly: the kernel writes out a lot of bitmap data that userspace has to aggregate, when the user really doesn not care about per-page information in that case. The user also needs to mmap and unmap each file as it goes along, which can be quite slow as well. Some use cases where this information could come in handy: * Allowing database to decide whether to perform an index scan or direct table queries based on the in-memory cache state of the index. * Visibility into the writeback algorithm, for performance issues diagnostic. * Workload-aware writeback pacing: estimating IO fulfilled by page cache (and IO to be done) within a range of a file, allowing for more frequent syncing when and where there is IO capacity, and batching when there is not. * Computing memory usage of large files/directory trees, analogous to the du tool for disk usage. More information about these use cases could be found in the following thread: https://lore.kernel.org/lkml/20230315170934.GA97793@cmpxchg.org/ This patch implements a new syscall that queries cache state of a file and summarizes the number of cached pages, number of dirty pages, number of pages marked for writeback, number of (recently) evicted pages, etc. in a given range. Currently, the syscall is only wired in for x86 architecture. NAME cachestat - query the page cache statistics of a file. SYNOPSIS #include struct cachestat_range { __u64 off; __u64 len; }; struct cachestat { __u64 nr_cache; __u64 nr_dirty; __u64 nr_writeback; __u64 nr_evicted; __u64 nr_recently_evicted; }; int cachestat(unsigned int fd, struct cachestat_range *cstat_range, struct cachestat *cstat, unsigned int flags); DESCRIPTION cachestat() queries the number of cached pages, number of dirty pages, number of pages marked for writeback, number of evicted pages, number of recently evicted pages, in the bytes range given by `off` and `len`. An evicted page is a page that is previously in the page cache but has been evicted since. A page is recently evicted if its last eviction was recent enough that its reentry to the cache would indicate that it is actively being used by the system, and that there is memory pressure on the system. These values are returned in a cachestat struct, whose address is given by the `cstat` argument. The `off` and `len` arguments must be non-negative integers. If `len` > 0, the queried range is [`off`, `off` + `len`]. If `len` == 0, we will query in the range from `off` to the end of the file. The `flags` argument is unused for now, but is included for future extensibility. User should pass 0 (i.e no flag specified). Currently, hugetlbfs is not supported. Because the status of a page can change after cachestat() checks it but before it returns to the application, the returned values may contain stale information. RETURN VALUE On success, cachestat returns 0. On error, -1 is returned, and errno is set to indicate the error. ERRORS EFAULT cstat or cstat_args points to an invalid address. EINVAL invalid flags. EBADF invalid file descriptor. EOPNOTSUPP file descriptor is of a hugetlbfs file [nphamcs@gmail.com: replace rounddown logic with the existing helper] Link: https://lkml.kernel.org/r/20230504022044.3675469-1-nphamcs@gmail.com Link: https://lkml.kernel.org/r/20230503013608.2431726-3-nphamcs@gmail.com Signed-off-by: Nhat Pham Acked-by: Johannes Weiner Cc: Brian Foster Cc: Matthew Wilcox (Oracle) Cc: Michael Kerrisk Signed-off-by: Andrew Morton --- arch/x86/entry/syscalls/syscall_32.tbl | 1 + arch/x86/entry/syscalls/syscall_64.tbl | 1 + include/linux/syscalls.h | 5 + include/uapi/asm-generic/unistd.h | 5 +- include/uapi/linux/mman.h | 14 +++ init/Kconfig | 10 ++ kernel/sys_ni.c | 1 + mm/filemap.c | 171 +++++++++++++++++++++++++++++++++ 8 files changed, 207 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index 320480a8db4f..bc0a3c941b35 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -455,3 +455,4 @@ 448 i386 process_mrelease sys_process_mrelease 449 i386 futex_waitv sys_futex_waitv 450 i386 set_mempolicy_home_node sys_set_mempolicy_home_node +451 i386 cachestat sys_cachestat diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index c84d12608cd2..227538b0ce80 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -372,6 +372,7 @@ 448 common process_mrelease sys_process_mrelease 449 common futex_waitv sys_futex_waitv 450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat # # Due to a historical design error, certain syscalls are numbered differently diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 33a0ee3bcb2e..6648c07c4381 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -72,6 +72,8 @@ struct open_how; struct mount_attr; struct landlock_ruleset_attr; enum landlock_rule_type; +struct cachestat_range; +struct cachestat; #include #include @@ -1058,6 +1060,9 @@ asmlinkage long sys_memfd_secret(unsigned int flags); asmlinkage long sys_set_mempolicy_home_node(unsigned long start, unsigned long len, unsigned long home_node, unsigned long flags); +asmlinkage long sys_cachestat(unsigned int fd, + struct cachestat_range __user *cstat_range, + struct cachestat __user *cstat, unsigned int flags); /* * Architecture-specific system calls diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 45fa180cc56a..cd639fae9086 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -886,8 +886,11 @@ __SYSCALL(__NR_futex_waitv, sys_futex_waitv) #define __NR_set_mempolicy_home_node 450 __SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node) +#define __NR_cachestat 451 +__SYSCALL(__NR_cachestat, sys_cachestat) + #undef __NR_syscalls -#define __NR_syscalls 451 +#define __NR_syscalls 452 /* * 32 bit systems traditionally used different diff --git a/include/uapi/linux/mman.h b/include/uapi/linux/mman.h index f55bc680b5b0..a246e11988d5 100644 --- a/include/uapi/linux/mman.h +++ b/include/uapi/linux/mman.h @@ -4,6 +4,7 @@ #include #include +#include #define MREMAP_MAYMOVE 1 #define MREMAP_FIXED 2 @@ -41,4 +42,17 @@ #define MAP_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB #define MAP_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB +struct cachestat_range { + __u64 off; + __u64 len; +}; + +struct cachestat { + __u64 nr_cache; + __u64 nr_dirty; + __u64 nr_writeback; + __u64 nr_evicted; + __u64 nr_recently_evicted; +}; + #endif /* _UAPI_LINUX_MMAN_H */ diff --git a/init/Kconfig b/init/Kconfig index 32c24950c4ce..f7f65af4ee12 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1771,6 +1771,16 @@ config RSEQ If unsure, say Y. +config CACHESTAT_SYSCALL + bool "Enable cachestat() system call" if EXPERT + default y + help + Enable the cachestat system call, which queries the page cache + statistics of a file (number of cached pages, dirty pages, + pages marked for writeback, (recently) evicted pages). + + If unsure say Y here. + config DEBUG_RSEQ default n bool "Enabled debugging of rseq() system call" if EXPERT diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 860b2dcf3ac4..04bfb1e4d377 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -299,6 +299,7 @@ COND_SYSCALL(set_mempolicy); COND_SYSCALL(migrate_pages); COND_SYSCALL(move_pages); COND_SYSCALL(set_mempolicy_home_node); +COND_SYSCALL(cachestat); COND_SYSCALL(perf_event_open); COND_SYSCALL(accept4); diff --git a/mm/filemap.c b/mm/filemap.c index b4c9bd368b7e..2d3d70c64dfd 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -58,6 +59,8 @@ #include +#include "swap.h" + /* * Shared mappings implemented 30.11.1994. It's not fully working yet, * though. @@ -4119,3 +4122,171 @@ bool filemap_release_folio(struct folio *folio, gfp_t gfp) return try_to_free_buffers(folio); } EXPORT_SYMBOL(filemap_release_folio); + +#ifdef CONFIG_CACHESTAT_SYSCALL +/** + * filemap_cachestat() - compute the page cache statistics of a mapping + * @mapping: The mapping to compute the statistics for. + * @first_index: The starting page cache index. + * @last_index: The final page index (inclusive). + * @cs: the cachestat struct to write the result to. + * + * This will query the page cache statistics of a mapping in the + * page range of [first_index, last_index] (inclusive). The statistics + * queried include: number of dirty pages, number of pages marked for + * writeback, and the number of (recently) evicted pages. + */ +static void filemap_cachestat(struct address_space *mapping, + pgoff_t first_index, pgoff_t last_index, struct cachestat *cs) +{ + XA_STATE(xas, &mapping->i_pages, first_index); + struct folio *folio; + + rcu_read_lock(); + xas_for_each(&xas, folio, last_index) { + unsigned long nr_pages; + pgoff_t folio_first_index, folio_last_index; + + if (xas_retry(&xas, folio)) + continue; + + if (xa_is_value(folio)) { + /* page is evicted */ + void *shadow = (void *)folio; + bool workingset; /* not used */ + int order = xa_get_order(xas.xa, xas.xa_index); + + nr_pages = 1 << order; + folio_first_index = round_down(xas.xa_index, 1 << order); + folio_last_index = folio_first_index + nr_pages - 1; + + /* Folios might straddle the range boundaries, only count covered pages */ + if (folio_first_index < first_index) + nr_pages -= first_index - folio_first_index; + + if (folio_last_index > last_index) + nr_pages -= folio_last_index - last_index; + + cs->nr_evicted += nr_pages; + +#ifdef CONFIG_SWAP /* implies CONFIG_MMU */ + if (shmem_mapping(mapping)) { + /* shmem file - in swap cache */ + swp_entry_t swp = radix_to_swp_entry(folio); + + shadow = get_shadow_from_swap_cache(swp); + } +#endif + if (workingset_test_recent(shadow, true, &workingset)) + cs->nr_recently_evicted += nr_pages; + + goto resched; + } + + nr_pages = folio_nr_pages(folio); + folio_first_index = folio_pgoff(folio); + folio_last_index = folio_first_index + nr_pages - 1; + + /* Folios might straddle the range boundaries, only count covered pages */ + if (folio_first_index < first_index) + nr_pages -= first_index - folio_first_index; + + if (folio_last_index > last_index) + nr_pages -= folio_last_index - last_index; + + /* page is in cache */ + cs->nr_cache += nr_pages; + + if (folio_test_dirty(folio)) + cs->nr_dirty += nr_pages; + + if (folio_test_writeback(folio)) + cs->nr_writeback += nr_pages; + +resched: + if (need_resched()) { + xas_pause(&xas); + cond_resched_rcu(); + } + } + rcu_read_unlock(); +} + +/* + * The cachestat(2) system call. + * + * cachestat() returns the page cache statistics of a file in the + * bytes range specified by `off` and `len`: number of cached pages, + * number of dirty pages, number of pages marked for writeback, + * number of evicted pages, and number of recently evicted pages. + * + * An evicted page is a page that is previously in the page cache + * but has been evicted since. A page is recently evicted if its last + * eviction was recent enough that its reentry to the cache would + * indicate that it is actively being used by the system, and that + * there is memory pressure on the system. + * + * `off` and `len` must be non-negative integers. If `len` > 0, + * the queried range is [`off`, `off` + `len`]. If `len` == 0, + * we will query in the range from `off` to the end of the file. + * + * The `flags` argument is unused for now, but is included for future + * extensibility. User should pass 0 (i.e no flag specified). + * + * Currently, hugetlbfs is not supported. + * + * Because the status of a page can change after cachestat() checks it + * but before it returns to the application, the returned values may + * contain stale information. + * + * return values: + * zero - success + * -EFAULT - cstat or cstat_range points to an illegal address + * -EINVAL - invalid flags + * -EBADF - invalid file descriptor + * -EOPNOTSUPP - file descriptor is of a hugetlbfs file + */ +SYSCALL_DEFINE4(cachestat, unsigned int, fd, + struct cachestat_range __user *, cstat_range, + struct cachestat __user *, cstat, unsigned int, flags) +{ + struct fd f = fdget(fd); + struct address_space *mapping; + struct cachestat_range csr; + struct cachestat cs; + pgoff_t first_index, last_index; + + if (!f.file) + return -EBADF; + + if (copy_from_user(&csr, cstat_range, + sizeof(struct cachestat_range))) { + fdput(f); + return -EFAULT; + } + + /* hugetlbfs is not supported */ + if (is_file_hugepages(f.file)) { + fdput(f); + return -EOPNOTSUPP; + } + + if (flags != 0) { + fdput(f); + return -EINVAL; + } + + first_index = csr.off >> PAGE_SHIFT; + last_index = + csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT; + memset(&cs, 0, sizeof(struct cachestat)); + mapping = f.file->f_mapping; + filemap_cachestat(mapping, first_index, last_index, &cs); + fdput(f); + + if (copy_to_user(cstat, &cs, sizeof(struct cachestat))) + return -EFAULT; + + return 0; +} +#endif /* CONFIG_CACHESTAT_SYSCALL */ -- cgit v1.2.3 From 9f297db35667021925b67e27d81463e38445041c Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 26 Jan 2023 13:51:25 -0800 Subject: dmapool: create/destroy cleanup Set the 'empty' bool directly from the result of the function that determines its value instead of adding additional logic. Link: https://lkml.kernel.org/r/20230126215125.4069751-13-kbusch@meta.com Fixes: 2d55c16c0c54 ("dmapool: create/destroy cleanup") Signed-off-by: Keith Busch Reviewed-by: Christoph Hellwig Cc: Matthew Wilcox Cc: Tony Battersby Signed-off-by: Andrew Morton --- mm/dmapool.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/dmapool.c b/mm/dmapool.c index d2b0f8fc9649..a151a21e571b 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -226,7 +226,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, { struct dma_pool *retval; size_t allocation; - bool empty = false; + bool empty; if (!dev) return NULL; @@ -276,8 +276,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, */ mutex_lock(&pools_reg_lock); mutex_lock(&pools_lock); - if (list_empty(&dev->dma_pools)) - empty = true; + empty = list_empty(&dev->dma_pools); list_add(&retval->pools, &dev->dma_pools); mutex_unlock(&pools_lock); if (empty) { @@ -361,7 +360,7 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) void dma_pool_destroy(struct dma_pool *pool) { struct dma_page *page, *tmp; - bool empty = false, busy = false; + bool empty, busy = false; if (unlikely(!pool)) return; @@ -369,8 +368,7 @@ void dma_pool_destroy(struct dma_pool *pool) mutex_lock(&pools_reg_lock); mutex_lock(&pools_lock); list_del(&pool->pools); - if (list_empty(&pool->dev->dma_pools)) - empty = true; + empty = list_empty(&pool->dev->dma_pools); mutex_unlock(&pools_lock); if (empty) device_remove_file(pool->dev, &dev_attr_pools); -- cgit v1.2.3 From 311150343e69ed4fe6380d2de74602a2a29df7c2 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Sat, 6 May 2023 15:05:25 +0100 Subject: mm/gup: add missing gup_must_unshare() check to gup_huge_pgd() All other instances of gup_huge_pXd() perform the unshare check, so update the PGD-specific function to do so as well. While checking pgd_write() might seem unusual, this function already performs such a check via pgd_access_permitted() so this is in line with the existing implementation. David said: : This change makes unshare handling across all GUP-fast variants : consistent, which is desirable as GUP-fast is complicated enough : already even when consistent. : : This function was the only one I seemed to have missed (or left out and : forgot why -- maybe because it's really dead code for now). The COW : selftest would identify the problem, so far there was no report. : Either the selftest wasn't run on corresponding architectures with that : hugetlb size, or that code is still dead code and unused by : architectures. : : the original commit(s) that added unsharing explain why we care about : these checks: : : a7f226604170acd6 ("mm/gup: trigger FAULT_FLAG_UNSHARE when R/O-pinning a possibly shared anonymous page") : 84209e87c6963f92 ("mm/gup: reliable R/O long-term pinning in COW mappings") Link: https://lkml.kernel.org/r/cb971ac8dd315df97058ea69442ecc007b9a364a.1683381545.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Suggested-by: David Hildenbrand Acked-by: David Hildenbrand Signed-off-by: Andrew Morton --- mm/gup.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'mm') diff --git a/mm/gup.c b/mm/gup.c index bbe416236593..e19b06a66229 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2755,6 +2755,11 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, return 0; } + if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { + gup_put_folio(folio, refs, flags); + return 0; + } + *nr += refs; folio_set_referenced(folio); return 1; -- cgit v1.2.3 From c963901197188189e85b4d768a059fe1bbc2a502 Mon Sep 17 00:00:00 2001 From: Pankaj Raghav Date: Wed, 10 May 2023 14:47:16 +0200 Subject: filemap: remove page_endio() page_endio() is not used anymore. Remove it. Link: https://lkml.kernel.org/r/20230510124716.73655-1-p.raghav@samsung.com Signed-off-by: Pankaj Raghav Reviewed-by: Christoph Hellwig Acked-by: Matthew Wilcox (Oracle) Cc: Luis Chamberlain Signed-off-by: Andrew Morton --- include/linux/pagemap.h | 2 -- mm/filemap.c | 30 ------------------------------ 2 files changed, 32 deletions(-) (limited to 'mm') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a56308a9d1a4..c1ae5ebc375f 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -1078,8 +1078,6 @@ int filemap_migrate_folio(struct address_space *mapping, struct folio *dst, #else #define filemap_migrate_folio NULL #endif -void page_endio(struct page *page, bool is_write, int err); - void folio_end_private_2(struct folio *folio); void folio_wait_private_2(struct folio *folio); int folio_wait_private_2_killable(struct folio *folio); diff --git a/mm/filemap.c b/mm/filemap.c index 2d3d70c64dfd..570bc8c3db87 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1628,36 +1628,6 @@ void folio_end_writeback(struct folio *folio) } EXPORT_SYMBOL(folio_end_writeback); -/* - * After completing I/O on a page, call this routine to update the page - * flags appropriately - */ -void page_endio(struct page *page, bool is_write, int err) -{ - struct folio *folio = page_folio(page); - - if (!is_write) { - if (!err) { - folio_mark_uptodate(folio); - } else { - folio_clear_uptodate(folio); - folio_set_error(folio); - } - folio_unlock(folio); - } else { - if (err) { - struct address_space *mapping; - - folio_set_error(folio); - mapping = folio_mapping(folio); - if (mapping) - mapping_set_error(mapping, err); - } - folio_end_writeback(folio); - } -} -EXPORT_SYMBOL_GPL(page_endio); - /** * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it. * @folio: The folio to lock -- cgit v1.2.3 From 501350459b1fe7a0da6d089484fa112ff48f5252 Mon Sep 17 00:00:00 2001 From: Rick Wertenbroek Date: Wed, 10 May 2023 11:07:57 +0200 Subject: mm: memory_hotplug: fix format string in warnings The format string in __add_pages and __remove_pages has a typo and prints e.g., "Misaligned __add_pages start: 0xfc605 end: #fc609" instead of "Misaligned __add_pages start: 0xfc605 end: 0xfc609" Fix "#%lx" => "%#lx" Link: https://lkml.kernel.org/r/20230510090758.3537242-1-rick.wertenbroek@gmail.com Signed-off-by: Rick Wertenbroek Reviewed-by: David Hildenbrand Reviewed-by: Mike Rapoport (IBM) Cc: Oscar Salvador Signed-off-by: Andrew Morton --- mm/memory_hotplug.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 8e0fa209d533..9061ac69b1b6 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -325,7 +325,7 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, } if (check_pfn_span(pfn, nr_pages)) { - WARN(1, "Misaligned %s start: %#lx end: #%lx\n", __func__, pfn, pfn + nr_pages - 1); + WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1); return -EINVAL; } @@ -525,7 +525,7 @@ void __remove_pages(unsigned long pfn, unsigned long nr_pages, map_offset = vmem_altmap_offset(altmap); if (check_pfn_span(pfn, nr_pages)) { - WARN(1, "Misaligned %s start: %#lx end: #%lx\n", __func__, pfn, pfn + nr_pages - 1); + WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1); return; } -- cgit v1.2.3 From 124abced647306aa3badb5d472c3616de23f180a Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Wed, 10 May 2023 11:18:29 +0800 Subject: migrate_pages_batch: simplify retrying and failure counting of large folios After recent changes to the retrying and failure counting in migrate_pages_batch(), it was found that it's unnecessary to count retrying and failure for normal, large, and THP folios separately. Because we don't use retrying and failure number of large folios directly. So, in this patch, we simplified retrying and failure counting of large folios via counting retrying and failure of normal and large folios together. This results in the reduced line number. Previously, in migrate_pages_batch we need to track whether the source folio is large/THP before splitting. So is_large is used to cache folio_test_large() result. Now, we don't need that variable any more because we don't count retrying and failure of large folios (only counting that of THP folios). So, in this patch, is_large is removed to simplify the code. This is just code cleanup, no functionality changes are expected. Link: https://lkml.kernel.org/r/20230510031829.11513-1-ying.huang@intel.com Signed-off-by: "Huang, Ying" Reviewed-by: Xin Hao Reviewed-by: Zi Yan Reviewed-by: Alistair Popple Cc: Yang Shi Cc: Baolin Wang Cc: Oscar Salvador Signed-off-by: Andrew Morton --- mm/migrate.c | 112 +++++++++++++++++++---------------------------------------- 1 file changed, 36 insertions(+), 76 deletions(-) (limited to 'mm') diff --git a/mm/migrate.c b/mm/migrate.c index 43d818fd1afd..cb292d2a90ce 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1617,13 +1617,10 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, int nr_pass) { int retry = 1; - int large_retry = 1; int thp_retry = 1; int nr_failed = 0; int nr_retry_pages = 0; - int nr_large_failed = 0; int pass = 0; - bool is_large = false; bool is_thp = false; struct folio *folio, *folio2, *dst = NULL, *dst2; int rc, rc_saved = 0, nr_pages; @@ -1634,20 +1631,13 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC && !list_empty(from) && !list_is_singular(from)); - for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) { + for (pass = 0; pass < nr_pass && retry; pass++) { retry = 0; - large_retry = 0; thp_retry = 0; nr_retry_pages = 0; list_for_each_entry_safe(folio, folio2, from, lru) { - /* - * Large folio statistics is based on the source large - * folio. Capture required information that might get - * lost during migration. - */ - is_large = folio_test_large(folio); - is_thp = is_large && folio_test_pmd_mappable(folio); + is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); nr_pages = folio_nr_pages(folio); cond_resched(); @@ -1663,7 +1653,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, * list is processed. */ if (!thp_migration_supported() && is_thp) { - nr_large_failed++; + nr_failed++; stats->nr_thp_failed++; if (!try_split_folio(folio, split_folios)) { stats->nr_thp_split++; @@ -1691,38 +1681,33 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, * When memory is low, don't bother to try to migrate * other folios, move unmapped folios, then exit. */ - if (is_large) { - nr_large_failed++; - stats->nr_thp_failed += is_thp; - /* Large folio NUMA faulting doesn't split to retry. */ - if (!nosplit) { - int ret = try_split_folio(folio, split_folios); - - if (!ret) { - stats->nr_thp_split += is_thp; - break; - } else if (reason == MR_LONGTERM_PIN && - ret == -EAGAIN) { - /* - * Try again to split large folio to - * mitigate the failure of longterm pinning. - */ - large_retry++; - thp_retry += is_thp; - nr_retry_pages += nr_pages; - /* Undo duplicated failure counting. */ - nr_large_failed--; - stats->nr_thp_failed -= is_thp; - break; - } + nr_failed++; + stats->nr_thp_failed += is_thp; + /* Large folio NUMA faulting doesn't split to retry. */ + if (folio_test_large(folio) && !nosplit) { + int ret = try_split_folio(folio, split_folios); + + if (!ret) { + stats->nr_thp_split += is_thp; + break; + } else if (reason == MR_LONGTERM_PIN && + ret == -EAGAIN) { + /* + * Try again to split large folio to + * mitigate the failure of longterm pinning. + */ + retry++; + thp_retry += is_thp; + nr_retry_pages += nr_pages; + /* Undo duplicated failure counting. */ + nr_failed--; + stats->nr_thp_failed -= is_thp; + break; } - } else { - nr_failed++; } stats->nr_failed_pages += nr_pages + nr_retry_pages; /* nr_failed isn't updated for not used */ - nr_large_failed += large_retry; stats->nr_thp_failed += thp_retry; rc_saved = rc; if (list_empty(&unmap_folios)) @@ -1730,12 +1715,8 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, else goto move; case -EAGAIN: - if (is_large) { - large_retry++; - thp_retry += is_thp; - } else { - retry++; - } + retry++; + thp_retry += is_thp; nr_retry_pages += nr_pages; break; case MIGRATEPAGE_SUCCESS: @@ -1753,20 +1734,14 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, * removed from migration folio list and not * retried in the next outer loop. */ - if (is_large) { - nr_large_failed++; - stats->nr_thp_failed += is_thp; - } else { - nr_failed++; - } - + nr_failed++; + stats->nr_thp_failed += is_thp; stats->nr_failed_pages += nr_pages; break; } } } nr_failed += retry; - nr_large_failed += large_retry; stats->nr_thp_failed += thp_retry; stats->nr_failed_pages += nr_retry_pages; move: @@ -1774,17 +1749,15 @@ move: try_to_unmap_flush(); retry = 1; - for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) { + for (pass = 0; pass < nr_pass && retry; pass++) { retry = 0; - large_retry = 0; thp_retry = 0; nr_retry_pages = 0; dst = list_first_entry(&dst_folios, struct folio, lru); dst2 = list_next_entry(dst, lru); list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { - is_large = folio_test_large(folio); - is_thp = is_large && folio_test_pmd_mappable(folio); + is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); nr_pages = folio_nr_pages(folio); cond_resched(); @@ -1800,12 +1773,8 @@ move: */ switch(rc) { case -EAGAIN: - if (is_large) { - large_retry++; - thp_retry += is_thp; - } else { - retry++; - } + retry++; + thp_retry += is_thp; nr_retry_pages += nr_pages; break; case MIGRATEPAGE_SUCCESS: @@ -1813,13 +1782,8 @@ move: stats->nr_thp_succeeded += is_thp; break; default: - if (is_large) { - nr_large_failed++; - stats->nr_thp_failed += is_thp; - } else { - nr_failed++; - } - + nr_failed++; + stats->nr_thp_failed += is_thp; stats->nr_failed_pages += nr_pages; break; } @@ -1828,14 +1792,10 @@ move: } } nr_failed += retry; - nr_large_failed += large_retry; stats->nr_thp_failed += thp_retry; stats->nr_failed_pages += nr_retry_pages; - if (rc_saved) - rc = rc_saved; - else - rc = nr_failed + nr_large_failed; + rc = rc_saved ? : nr_failed; out: /* Cleanup remaining folios */ dst = list_first_entry(&dst_folios, struct folio, lru); -- cgit v1.2.3 From fb646a4cd3f0ff27d19911bef7b6622263723df6 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 9 May 2023 16:57:20 +0200 Subject: kasan: add kasan_tag_mismatch prototype The kasan sw-tags implementation contains one function that is only called from assembler and has no prototype in a header. This causes a W=1 warning: mm/kasan/sw_tags.c:171:6: warning: no previous prototype for 'kasan_tag_mismatch' [-Wmissing-prototypes] 171 | void kasan_tag_mismatch(unsigned long addr, unsigned long access_info, Add a prototype in the local header to get a clean build. Link: https://lkml.kernel.org/r/20230509145735.9263-1-arnd@kernel.org Signed-off-by: Arnd Bergmann Cc: Alexander Potapenko Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Cc: Vincenzo Frascino Cc: Signed-off-by: Andrew Morton --- mm/kasan/kasan.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'mm') diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index f5e4f5f2ba20..cd846ca34f44 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -646,4 +646,7 @@ void *__hwasan_memset(void *addr, int c, size_t len); void *__hwasan_memmove(void *dest, const void *src, size_t len); void *__hwasan_memcpy(void *dest, const void *src, size_t len); +void kasan_tag_mismatch(unsigned long addr, unsigned long access_info, + unsigned long ret_ip); + #endif /* __MM_KASAN_KASAN_H */ -- cgit v1.2.3 From bb6e04a173f06e51819a4bb512e127dfbc50dcfa Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 9 May 2023 16:57:21 +0200 Subject: kasan: use internal prototypes matching gcc-13 builtins gcc-13 warns about function definitions for builtin interfaces that have a different prototype, e.g.: In file included from kasan_test.c:31: kasan.h:574:6: error: conflicting types for built-in function '__asan_register_globals'; expected 'void(void *, long int)' [-Werror=builtin-declaration-mismatch] 574 | void __asan_register_globals(struct kasan_global *globals, size_t size); kasan.h:577:6: error: conflicting types for built-in function '__asan_alloca_poison'; expected 'void(void *, long int)' [-Werror=builtin-declaration-mismatch] 577 | void __asan_alloca_poison(unsigned long addr, size_t size); kasan.h:580:6: error: conflicting types for built-in function '__asan_load1'; expected 'void(void *)' [-Werror=builtin-declaration-mismatch] 580 | void __asan_load1(unsigned long addr); kasan.h:581:6: error: conflicting types for built-in function '__asan_store1'; expected 'void(void *)' [-Werror=builtin-declaration-mismatch] 581 | void __asan_store1(unsigned long addr); kasan.h:643:6: error: conflicting types for built-in function '__hwasan_tag_memory'; expected 'void(void *, unsigned char, long int)' [-Werror=builtin-declaration-mismatch] 643 | void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size); The two problems are: - Addresses are passes as 'unsigned long' in the kernel, but gcc-13 expects a 'void *'. - sizes meant to use a signed ssize_t rather than size_t. Change all the prototypes to match these. Using 'void *' consistently for addresses gets rid of a couple of type casts, so push that down to the leaf functions where possible. This now passes all randconfig builds on arm, arm64 and x86, but I have not tested it on the other architectures that support kasan, since they tend to fail randconfig builds in other ways. This might fail if any of the 32-bit architectures expect a 'long' instead of 'int' for the size argument. The __asan_allocas_unpoison() function prototype is somewhat weird, since it uses a pointer for 'stack_top' and an size_t for 'stack_bottom'. This looks like it is meant to be 'addr' and 'size' like the others, but the implementation clearly treats them as 'top' and 'bottom'. Link: https://lkml.kernel.org/r/20230509145735.9263-2-arnd@kernel.org Signed-off-by: Arnd Bergmann Cc: Alexander Potapenko Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Cc: Vincenzo Frascino Cc: Signed-off-by: Andrew Morton --- arch/arm64/kernel/traps.c | 2 +- arch/arm64/mm/fault.c | 2 +- include/linux/kasan.h | 2 +- mm/kasan/common.c | 2 +- mm/kasan/generic.c | 72 ++++++++++----------- mm/kasan/kasan.h | 160 +++++++++++++++++++++++----------------------- mm/kasan/report.c | 17 +++-- mm/kasan/report_generic.c | 12 ++-- mm/kasan/report_hw_tags.c | 2 +- mm/kasan/report_sw_tags.c | 2 +- mm/kasan/shadow.c | 36 +++++------ mm/kasan/sw_tags.c | 20 +++--- 12 files changed, 164 insertions(+), 165 deletions(-) (limited to 'mm') diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 4bb1b8f47298..7b889445e5c6 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -1044,7 +1044,7 @@ static int kasan_handler(struct pt_regs *regs, unsigned long esr) bool recover = esr & KASAN_ESR_RECOVER; bool write = esr & KASAN_ESR_WRITE; size_t size = KASAN_ESR_SIZE(esr); - u64 addr = regs->regs[0]; + void *addr = (void *)regs->regs[0]; u64 pc = regs->pc; kasan_report(addr, size, write, pc); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index cb21ccd7940d..d5047eef4295 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -317,7 +317,7 @@ static void report_tag_fault(unsigned long addr, unsigned long esr, * find out access size. */ bool is_write = !!(esr & ESR_ELx_WNR); - kasan_report(addr, 0, is_write, regs->pc); + kasan_report((void *)addr, 0, is_write, regs->pc); } #else /* Tag faults aren't enabled without CONFIG_KASAN_HW_TAGS. */ diff --git a/include/linux/kasan.h b/include/linux/kasan.h index f7ef70661ce2..819b6bc8ac08 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -343,7 +343,7 @@ static inline void *kasan_reset_tag(const void *addr) * @is_write: whether the bad access is a write or a read * @ip: instruction pointer for the accessibility check or the bad access itself */ -bool kasan_report(unsigned long addr, size_t size, +bool kasan_report(const void *addr, size_t size, bool is_write, unsigned long ip); #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ diff --git a/mm/kasan/common.c b/mm/kasan/common.c index b376a5d055e5..256930da578a 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -445,7 +445,7 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag bool __kasan_check_byte(const void *address, unsigned long ip) { if (!kasan_byte_accessible(address)) { - kasan_report((unsigned long)address, 1, false, ip); + kasan_report(address, 1, false, ip); return false; } return true; diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index e5eef670735e..224d161a5a22 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -40,39 +40,39 @@ * depending on memory access size X. */ -static __always_inline bool memory_is_poisoned_1(unsigned long addr) +static __always_inline bool memory_is_poisoned_1(const void *addr) { - s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr); + s8 shadow_value = *(s8 *)kasan_mem_to_shadow(addr); if (unlikely(shadow_value)) { - s8 last_accessible_byte = addr & KASAN_GRANULE_MASK; + s8 last_accessible_byte = (unsigned long)addr & KASAN_GRANULE_MASK; return unlikely(last_accessible_byte >= shadow_value); } return false; } -static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr, +static __always_inline bool memory_is_poisoned_2_4_8(const void *addr, unsigned long size) { - u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr); + u8 *shadow_addr = (u8 *)kasan_mem_to_shadow(addr); /* * Access crosses 8(shadow size)-byte boundary. Such access maps * into 2 shadow bytes, so we need to check them both. */ - if (unlikely(((addr + size - 1) & KASAN_GRANULE_MASK) < size - 1)) + if (unlikely((((unsigned long)addr + size - 1) & KASAN_GRANULE_MASK) < size - 1)) return *shadow_addr || memory_is_poisoned_1(addr + size - 1); return memory_is_poisoned_1(addr + size - 1); } -static __always_inline bool memory_is_poisoned_16(unsigned long addr) +static __always_inline bool memory_is_poisoned_16(const void *addr) { - u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); + u16 *shadow_addr = (u16 *)kasan_mem_to_shadow(addr); /* Unaligned 16-bytes access maps into 3 shadow bytes. */ - if (unlikely(!IS_ALIGNED(addr, KASAN_GRANULE_SIZE))) + if (unlikely(!IS_ALIGNED((unsigned long)addr, KASAN_GRANULE_SIZE))) return *shadow_addr || memory_is_poisoned_1(addr + 15); return *shadow_addr; @@ -120,26 +120,25 @@ static __always_inline unsigned long memory_is_nonzero(const void *start, return bytes_is_nonzero(start, (end - start) % 8); } -static __always_inline bool memory_is_poisoned_n(unsigned long addr, - size_t size) +static __always_inline bool memory_is_poisoned_n(const void *addr, size_t size) { unsigned long ret; - ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr), - kasan_mem_to_shadow((void *)addr + size - 1) + 1); + ret = memory_is_nonzero(kasan_mem_to_shadow(addr), + kasan_mem_to_shadow(addr + size - 1) + 1); if (unlikely(ret)) { - unsigned long last_byte = addr + size - 1; - s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte); + const void *last_byte = addr + size - 1; + s8 *last_shadow = (s8 *)kasan_mem_to_shadow(last_byte); if (unlikely(ret != (unsigned long)last_shadow || - ((long)(last_byte & KASAN_GRANULE_MASK) >= *last_shadow))) + (((long)last_byte & KASAN_GRANULE_MASK) >= *last_shadow))) return true; } return false; } -static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) +static __always_inline bool memory_is_poisoned(const void *addr, size_t size) { if (__builtin_constant_p(size)) { switch (size) { @@ -159,7 +158,7 @@ static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) return memory_is_poisoned_n(addr, size); } -static __always_inline bool check_region_inline(unsigned long addr, +static __always_inline bool check_region_inline(const void *addr, size_t size, bool write, unsigned long ret_ip) { @@ -172,7 +171,7 @@ static __always_inline bool check_region_inline(unsigned long addr, if (unlikely(addr + size < addr)) return !kasan_report(addr, size, write, ret_ip); - if (unlikely(!addr_has_metadata((void *)addr))) + if (unlikely(!addr_has_metadata(addr))) return !kasan_report(addr, size, write, ret_ip); if (likely(!memory_is_poisoned(addr, size))) @@ -181,7 +180,7 @@ static __always_inline bool check_region_inline(unsigned long addr, return !kasan_report(addr, size, write, ret_ip); } -bool kasan_check_range(unsigned long addr, size_t size, bool write, +bool kasan_check_range(const void *addr, size_t size, bool write, unsigned long ret_ip) { return check_region_inline(addr, size, write, ret_ip); @@ -221,36 +220,37 @@ static void register_global(struct kasan_global *global) KASAN_GLOBAL_REDZONE, false); } -void __asan_register_globals(struct kasan_global *globals, size_t size) +void __asan_register_globals(void *ptr, ssize_t size) { int i; + struct kasan_global *globals = ptr; for (i = 0; i < size; i++) register_global(&globals[i]); } EXPORT_SYMBOL(__asan_register_globals); -void __asan_unregister_globals(struct kasan_global *globals, size_t size) +void __asan_unregister_globals(void *ptr, ssize_t size) { } EXPORT_SYMBOL(__asan_unregister_globals); #define DEFINE_ASAN_LOAD_STORE(size) \ - void __asan_load##size(unsigned long addr) \ + void __asan_load##size(void *addr) \ { \ check_region_inline(addr, size, false, _RET_IP_); \ } \ EXPORT_SYMBOL(__asan_load##size); \ __alias(__asan_load##size) \ - void __asan_load##size##_noabort(unsigned long); \ + void __asan_load##size##_noabort(void *); \ EXPORT_SYMBOL(__asan_load##size##_noabort); \ - void __asan_store##size(unsigned long addr) \ + void __asan_store##size(void *addr) \ { \ check_region_inline(addr, size, true, _RET_IP_); \ } \ EXPORT_SYMBOL(__asan_store##size); \ __alias(__asan_store##size) \ - void __asan_store##size##_noabort(unsigned long); \ + void __asan_store##size##_noabort(void *); \ EXPORT_SYMBOL(__asan_store##size##_noabort) DEFINE_ASAN_LOAD_STORE(1); @@ -259,24 +259,24 @@ DEFINE_ASAN_LOAD_STORE(4); DEFINE_ASAN_LOAD_STORE(8); DEFINE_ASAN_LOAD_STORE(16); -void __asan_loadN(unsigned long addr, size_t size) +void __asan_loadN(void *addr, ssize_t size) { kasan_check_range(addr, size, false, _RET_IP_); } EXPORT_SYMBOL(__asan_loadN); __alias(__asan_loadN) -void __asan_loadN_noabort(unsigned long, size_t); +void __asan_loadN_noabort(void *, ssize_t); EXPORT_SYMBOL(__asan_loadN_noabort); -void __asan_storeN(unsigned long addr, size_t size) +void __asan_storeN(void *addr, ssize_t size) { kasan_check_range(addr, size, true, _RET_IP_); } EXPORT_SYMBOL(__asan_storeN); __alias(__asan_storeN) -void __asan_storeN_noabort(unsigned long, size_t); +void __asan_storeN_noabort(void *, ssize_t); EXPORT_SYMBOL(__asan_storeN_noabort); /* to shut up compiler complaints */ @@ -284,7 +284,7 @@ void __asan_handle_no_return(void) {} EXPORT_SYMBOL(__asan_handle_no_return); /* Emitted by compiler to poison alloca()ed objects. */ -void __asan_alloca_poison(unsigned long addr, size_t size) +void __asan_alloca_poison(void *addr, ssize_t size) { size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE); size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) - @@ -295,7 +295,7 @@ void __asan_alloca_poison(unsigned long addr, size_t size) KASAN_ALLOCA_REDZONE_SIZE); const void *right_redzone = (const void *)(addr + rounded_up_size); - WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE)); + WARN_ON(!IS_ALIGNED((unsigned long)addr, KASAN_ALLOCA_REDZONE_SIZE)); kasan_unpoison((const void *)(addr + rounded_down_size), size - rounded_down_size, false); @@ -307,18 +307,18 @@ void __asan_alloca_poison(unsigned long addr, size_t size) EXPORT_SYMBOL(__asan_alloca_poison); /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */ -void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom) +void __asan_allocas_unpoison(void *stack_top, ssize_t stack_bottom) { - if (unlikely(!stack_top || stack_top > stack_bottom)) + if (unlikely(!stack_top || stack_top > (void *)stack_bottom)) return; - kasan_unpoison(stack_top, stack_bottom - stack_top, false); + kasan_unpoison(stack_top, (void *)stack_bottom - stack_top, false); } EXPORT_SYMBOL(__asan_allocas_unpoison); /* Emitted by the compiler to [un]poison local variables. */ #define DEFINE_ASAN_SET_SHADOW(byte) \ - void __asan_set_shadow_##byte(const void *addr, size_t size) \ + void __asan_set_shadow_##byte(const void *addr, ssize_t size) \ { \ __memset((void *)addr, 0x##byte, size); \ } \ diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index cd846ca34f44..b799f11e45dc 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -198,13 +198,13 @@ enum kasan_report_type { struct kasan_report_info { /* Filled in by kasan_report_*(). */ enum kasan_report_type type; - void *access_addr; + const void *access_addr; size_t access_size; bool is_write; unsigned long ip; /* Filled in by the common reporting code. */ - void *first_bad_addr; + const void *first_bad_addr; struct kmem_cache *cache; void *object; size_t alloc_size; @@ -311,7 +311,7 @@ static __always_inline bool addr_has_metadata(const void *addr) * @ret_ip: return address * @return: true if access was valid, false if invalid */ -bool kasan_check_range(unsigned long addr, size_t size, bool write, +bool kasan_check_range(const void *addr, size_t size, bool write, unsigned long ret_ip); #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ @@ -323,7 +323,7 @@ static __always_inline bool addr_has_metadata(const void *addr) #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ -void *kasan_find_first_bad_addr(void *addr, size_t size); +const void *kasan_find_first_bad_addr(const void *addr, size_t size); size_t kasan_get_alloc_size(void *object, struct kmem_cache *cache); void kasan_complete_mode_report_info(struct kasan_report_info *info); void kasan_metadata_fetch_row(char *buffer, void *row); @@ -346,7 +346,7 @@ void kasan_print_aux_stacks(struct kmem_cache *cache, const void *object); static inline void kasan_print_aux_stacks(struct kmem_cache *cache, const void *object) { } #endif -bool kasan_report(unsigned long addr, size_t size, +bool kasan_report(const void *addr, size_t size, bool is_write, unsigned long ip); void kasan_report_invalid_free(void *object, unsigned long ip, enum kasan_report_type type); @@ -571,82 +571,82 @@ void kasan_restore_multi_shot(bool enabled); */ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark); -void __asan_register_globals(struct kasan_global *globals, size_t size); -void __asan_unregister_globals(struct kasan_global *globals, size_t size); +void __asan_register_globals(void *globals, ssize_t size); +void __asan_unregister_globals(void *globals, ssize_t size); void __asan_handle_no_return(void); -void __asan_alloca_poison(unsigned long addr, size_t size); -void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom); - -void __asan_load1(unsigned long addr); -void __asan_store1(unsigned long addr); -void __asan_load2(unsigned long addr); -void __asan_store2(unsigned long addr); -void __asan_load4(unsigned long addr); -void __asan_store4(unsigned long addr); -void __asan_load8(unsigned long addr); -void __asan_store8(unsigned long addr); -void __asan_load16(unsigned long addr); -void __asan_store16(unsigned long addr); -void __asan_loadN(unsigned long addr, size_t size); -void __asan_storeN(unsigned long addr, size_t size); - -void __asan_load1_noabort(unsigned long addr); -void __asan_store1_noabort(unsigned long addr); -void __asan_load2_noabort(unsigned long addr); -void __asan_store2_noabort(unsigned long addr); -void __asan_load4_noabort(unsigned long addr); -void __asan_store4_noabort(unsigned long addr); -void __asan_load8_noabort(unsigned long addr); -void __asan_store8_noabort(unsigned long addr); -void __asan_load16_noabort(unsigned long addr); -void __asan_store16_noabort(unsigned long addr); -void __asan_loadN_noabort(unsigned long addr, size_t size); -void __asan_storeN_noabort(unsigned long addr, size_t size); - -void __asan_report_load1_noabort(unsigned long addr); -void __asan_report_store1_noabort(unsigned long addr); -void __asan_report_load2_noabort(unsigned long addr); -void __asan_report_store2_noabort(unsigned long addr); -void __asan_report_load4_noabort(unsigned long addr); -void __asan_report_store4_noabort(unsigned long addr); -void __asan_report_load8_noabort(unsigned long addr); -void __asan_report_store8_noabort(unsigned long addr); -void __asan_report_load16_noabort(unsigned long addr); -void __asan_report_store16_noabort(unsigned long addr); -void __asan_report_load_n_noabort(unsigned long addr, size_t size); -void __asan_report_store_n_noabort(unsigned long addr, size_t size); - -void __asan_set_shadow_00(const void *addr, size_t size); -void __asan_set_shadow_f1(const void *addr, size_t size); -void __asan_set_shadow_f2(const void *addr, size_t size); -void __asan_set_shadow_f3(const void *addr, size_t size); -void __asan_set_shadow_f5(const void *addr, size_t size); -void __asan_set_shadow_f8(const void *addr, size_t size); - -void *__asan_memset(void *addr, int c, size_t len); -void *__asan_memmove(void *dest, const void *src, size_t len); -void *__asan_memcpy(void *dest, const void *src, size_t len); - -void __hwasan_load1_noabort(unsigned long addr); -void __hwasan_store1_noabort(unsigned long addr); -void __hwasan_load2_noabort(unsigned long addr); -void __hwasan_store2_noabort(unsigned long addr); -void __hwasan_load4_noabort(unsigned long addr); -void __hwasan_store4_noabort(unsigned long addr); -void __hwasan_load8_noabort(unsigned long addr); -void __hwasan_store8_noabort(unsigned long addr); -void __hwasan_load16_noabort(unsigned long addr); -void __hwasan_store16_noabort(unsigned long addr); -void __hwasan_loadN_noabort(unsigned long addr, size_t size); -void __hwasan_storeN_noabort(unsigned long addr, size_t size); - -void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size); - -void *__hwasan_memset(void *addr, int c, size_t len); -void *__hwasan_memmove(void *dest, const void *src, size_t len); -void *__hwasan_memcpy(void *dest, const void *src, size_t len); - -void kasan_tag_mismatch(unsigned long addr, unsigned long access_info, +void __asan_alloca_poison(void *, ssize_t size); +void __asan_allocas_unpoison(void *stack_top, ssize_t stack_bottom); + +void __asan_load1(void *); +void __asan_store1(void *); +void __asan_load2(void *); +void __asan_store2(void *); +void __asan_load4(void *); +void __asan_store4(void *); +void __asan_load8(void *); +void __asan_store8(void *); +void __asan_load16(void *); +void __asan_store16(void *); +void __asan_loadN(void *, ssize_t size); +void __asan_storeN(void *, ssize_t size); + +void __asan_load1_noabort(void *); +void __asan_store1_noabort(void *); +void __asan_load2_noabort(void *); +void __asan_store2_noabort(void *); +void __asan_load4_noabort(void *); +void __asan_store4_noabort(void *); +void __asan_load8_noabort(void *); +void __asan_store8_noabort(void *); +void __asan_load16_noabort(void *); +void __asan_store16_noabort(void *); +void __asan_loadN_noabort(void *, ssize_t size); +void __asan_storeN_noabort(void *, ssize_t size); + +void __asan_report_load1_noabort(void *); +void __asan_report_store1_noabort(void *); +void __asan_report_load2_noabort(void *); +void __asan_report_store2_noabort(void *); +void __asan_report_load4_noabort(void *); +void __asan_report_store4_noabort(void *); +void __asan_report_load8_noabort(void *); +void __asan_report_store8_noabort(void *); +void __asan_report_load16_noabort(void *); +void __asan_report_store16_noabort(void *); +void __asan_report_load_n_noabort(void *, ssize_t size); +void __asan_report_store_n_noabort(void *, ssize_t size); + +void __asan_set_shadow_00(const void *addr, ssize_t size); +void __asan_set_shadow_f1(const void *addr, ssize_t size); +void __asan_set_shadow_f2(const void *addr, ssize_t size); +void __asan_set_shadow_f3(const void *addr, ssize_t size); +void __asan_set_shadow_f5(const void *addr, ssize_t size); +void __asan_set_shadow_f8(const void *addr, ssize_t size); + +void *__asan_memset(void *addr, int c, ssize_t len); +void *__asan_memmove(void *dest, const void *src, ssize_t len); +void *__asan_memcpy(void *dest, const void *src, ssize_t len); + +void __hwasan_load1_noabort(void *); +void __hwasan_store1_noabort(void *); +void __hwasan_load2_noabort(void *); +void __hwasan_store2_noabort(void *); +void __hwasan_load4_noabort(void *); +void __hwasan_store4_noabort(void *); +void __hwasan_load8_noabort(void *); +void __hwasan_store8_noabort(void *); +void __hwasan_load16_noabort(void *); +void __hwasan_store16_noabort(void *); +void __hwasan_loadN_noabort(void *, ssize_t size); +void __hwasan_storeN_noabort(void *, ssize_t size); + +void __hwasan_tag_memory(void *, u8 tag, ssize_t size); + +void *__hwasan_memset(void *addr, int c, ssize_t len); +void *__hwasan_memmove(void *dest, const void *src, ssize_t len); +void *__hwasan_memcpy(void *dest, const void *src, ssize_t len); + +void kasan_tag_mismatch(void *addr, unsigned long access_info, unsigned long ret_ip); #endif /* __MM_KASAN_KASAN_H */ diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 892a9dc9d4d3..84d9f3b37014 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -211,7 +211,7 @@ static void start_report(unsigned long *flags, bool sync) pr_err("==================================================================\n"); } -static void end_report(unsigned long *flags, void *addr) +static void end_report(unsigned long *flags, const void *addr) { if (addr) trace_error_report_end(ERROR_DETECTOR_KASAN, @@ -450,8 +450,8 @@ static void print_memory_metadata(const void *addr) static void print_report(struct kasan_report_info *info) { - void *addr = kasan_reset_tag(info->access_addr); - u8 tag = get_tag(info->access_addr); + void *addr = kasan_reset_tag((void *)info->access_addr); + u8 tag = get_tag((void *)info->access_addr); print_error_description(info); if (addr_has_metadata(addr)) @@ -468,12 +468,12 @@ static void print_report(struct kasan_report_info *info) static void complete_report_info(struct kasan_report_info *info) { - void *addr = kasan_reset_tag(info->access_addr); + void *addr = kasan_reset_tag((void *)info->access_addr); struct slab *slab; if (info->type == KASAN_REPORT_ACCESS) info->first_bad_addr = kasan_find_first_bad_addr( - info->access_addr, info->access_size); + (void *)info->access_addr, info->access_size); else info->first_bad_addr = addr; @@ -544,11 +544,10 @@ void kasan_report_invalid_free(void *ptr, unsigned long ip, enum kasan_report_ty * user_access_save/restore(): kasan_report_invalid_free() cannot be called * from a UACCESS region, and kasan_report_async() is not used on x86. */ -bool kasan_report(unsigned long addr, size_t size, bool is_write, +bool kasan_report(const void *addr, size_t size, bool is_write, unsigned long ip) { bool ret = true; - void *ptr = (void *)addr; unsigned long ua_flags = user_access_save(); unsigned long irq_flags; struct kasan_report_info info; @@ -562,7 +561,7 @@ bool kasan_report(unsigned long addr, size_t size, bool is_write, memset(&info, 0, sizeof(info)); info.type = KASAN_REPORT_ACCESS; - info.access_addr = ptr; + info.access_addr = addr; info.access_size = size; info.is_write = is_write; info.ip = ip; @@ -571,7 +570,7 @@ bool kasan_report(unsigned long addr, size_t size, bool is_write, print_report(&info); - end_report(&irq_flags, ptr); + end_report(&irq_flags, (void *)addr); out: user_access_restore(ua_flags); diff --git a/mm/kasan/report_generic.c b/mm/kasan/report_generic.c index 87d39bc0a673..51a1e8a8877f 100644 --- a/mm/kasan/report_generic.c +++ b/mm/kasan/report_generic.c @@ -30,9 +30,9 @@ #include "kasan.h" #include "../slab.h" -void *kasan_find_first_bad_addr(void *addr, size_t size) +const void *kasan_find_first_bad_addr(const void *addr, size_t size) { - void *p = addr; + const void *p = addr; if (!addr_has_metadata(p)) return p; @@ -362,14 +362,14 @@ void kasan_print_address_stack_frame(const void *addr) #endif /* CONFIG_KASAN_STACK */ #define DEFINE_ASAN_REPORT_LOAD(size) \ -void __asan_report_load##size##_noabort(unsigned long addr) \ +void __asan_report_load##size##_noabort(void *addr) \ { \ kasan_report(addr, size, false, _RET_IP_); \ } \ EXPORT_SYMBOL(__asan_report_load##size##_noabort) #define DEFINE_ASAN_REPORT_STORE(size) \ -void __asan_report_store##size##_noabort(unsigned long addr) \ +void __asan_report_store##size##_noabort(void *addr) \ { \ kasan_report(addr, size, true, _RET_IP_); \ } \ @@ -386,13 +386,13 @@ DEFINE_ASAN_REPORT_STORE(4); DEFINE_ASAN_REPORT_STORE(8); DEFINE_ASAN_REPORT_STORE(16); -void __asan_report_load_n_noabort(unsigned long addr, size_t size) +void __asan_report_load_n_noabort(void *addr, ssize_t size) { kasan_report(addr, size, false, _RET_IP_); } EXPORT_SYMBOL(__asan_report_load_n_noabort); -void __asan_report_store_n_noabort(unsigned long addr, size_t size) +void __asan_report_store_n_noabort(void *addr, ssize_t size) { kasan_report(addr, size, true, _RET_IP_); } diff --git a/mm/kasan/report_hw_tags.c b/mm/kasan/report_hw_tags.c index 32e80f78de7d..065e1b2fc484 100644 --- a/mm/kasan/report_hw_tags.c +++ b/mm/kasan/report_hw_tags.c @@ -15,7 +15,7 @@ #include "kasan.h" -void *kasan_find_first_bad_addr(void *addr, size_t size) +const void *kasan_find_first_bad_addr(const void *addr, size_t size) { /* * Hardware Tag-Based KASAN only calls this function for normal memory diff --git a/mm/kasan/report_sw_tags.c b/mm/kasan/report_sw_tags.c index 8b1f5a73ee6d..689e94f9fe3c 100644 --- a/mm/kasan/report_sw_tags.c +++ b/mm/kasan/report_sw_tags.c @@ -30,7 +30,7 @@ #include "kasan.h" #include "../slab.h" -void *kasan_find_first_bad_addr(void *addr, size_t size) +const void *kasan_find_first_bad_addr(const void *addr, size_t size) { u8 tag = get_tag(addr); void *p = kasan_reset_tag(addr); diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index c8b86f3273b5..3e62728ae25d 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -28,13 +28,13 @@ bool __kasan_check_read(const volatile void *p, unsigned int size) { - return kasan_check_range((unsigned long)p, size, false, _RET_IP_); + return kasan_check_range((void *)p, size, false, _RET_IP_); } EXPORT_SYMBOL(__kasan_check_read); bool __kasan_check_write(const volatile void *p, unsigned int size) { - return kasan_check_range((unsigned long)p, size, true, _RET_IP_); + return kasan_check_range((void *)p, size, true, _RET_IP_); } EXPORT_SYMBOL(__kasan_check_write); @@ -50,7 +50,7 @@ EXPORT_SYMBOL(__kasan_check_write); #undef memset void *memset(void *addr, int c, size_t len) { - if (!kasan_check_range((unsigned long)addr, len, true, _RET_IP_)) + if (!kasan_check_range(addr, len, true, _RET_IP_)) return NULL; return __memset(addr, c, len); @@ -60,8 +60,8 @@ void *memset(void *addr, int c, size_t len) #undef memmove void *memmove(void *dest, const void *src, size_t len) { - if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) || - !kasan_check_range((unsigned long)dest, len, true, _RET_IP_)) + if (!kasan_check_range(src, len, false, _RET_IP_) || + !kasan_check_range(dest, len, true, _RET_IP_)) return NULL; return __memmove(dest, src, len); @@ -71,17 +71,17 @@ void *memmove(void *dest, const void *src, size_t len) #undef memcpy void *memcpy(void *dest, const void *src, size_t len) { - if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) || - !kasan_check_range((unsigned long)dest, len, true, _RET_IP_)) + if (!kasan_check_range(src, len, false, _RET_IP_) || + !kasan_check_range(dest, len, true, _RET_IP_)) return NULL; return __memcpy(dest, src, len); } #endif -void *__asan_memset(void *addr, int c, size_t len) +void *__asan_memset(void *addr, int c, ssize_t len) { - if (!kasan_check_range((unsigned long)addr, len, true, _RET_IP_)) + if (!kasan_check_range(addr, len, true, _RET_IP_)) return NULL; return __memset(addr, c, len); @@ -89,10 +89,10 @@ void *__asan_memset(void *addr, int c, size_t len) EXPORT_SYMBOL(__asan_memset); #ifdef __HAVE_ARCH_MEMMOVE -void *__asan_memmove(void *dest, const void *src, size_t len) +void *__asan_memmove(void *dest, const void *src, ssize_t len) { - if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) || - !kasan_check_range((unsigned long)dest, len, true, _RET_IP_)) + if (!kasan_check_range(src, len, false, _RET_IP_) || + !kasan_check_range(dest, len, true, _RET_IP_)) return NULL; return __memmove(dest, src, len); @@ -100,10 +100,10 @@ void *__asan_memmove(void *dest, const void *src, size_t len) EXPORT_SYMBOL(__asan_memmove); #endif -void *__asan_memcpy(void *dest, const void *src, size_t len) +void *__asan_memcpy(void *dest, const void *src, ssize_t len) { - if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) || - !kasan_check_range((unsigned long)dest, len, true, _RET_IP_)) + if (!kasan_check_range(src, len, false, _RET_IP_) || + !kasan_check_range(dest, len, true, _RET_IP_)) return NULL; return __memcpy(dest, src, len); @@ -111,13 +111,13 @@ void *__asan_memcpy(void *dest, const void *src, size_t len) EXPORT_SYMBOL(__asan_memcpy); #ifdef CONFIG_KASAN_SW_TAGS -void *__hwasan_memset(void *addr, int c, size_t len) __alias(__asan_memset); +void *__hwasan_memset(void *addr, int c, ssize_t len) __alias(__asan_memset); EXPORT_SYMBOL(__hwasan_memset); #ifdef __HAVE_ARCH_MEMMOVE -void *__hwasan_memmove(void *dest, const void *src, size_t len) __alias(__asan_memmove); +void *__hwasan_memmove(void *dest, const void *src, ssize_t len) __alias(__asan_memmove); EXPORT_SYMBOL(__hwasan_memmove); #endif -void *__hwasan_memcpy(void *dest, const void *src, size_t len) __alias(__asan_memcpy); +void *__hwasan_memcpy(void *dest, const void *src, ssize_t len) __alias(__asan_memcpy); EXPORT_SYMBOL(__hwasan_memcpy); #endif diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c index 30da65fa02a1..220b5d4c6876 100644 --- a/mm/kasan/sw_tags.c +++ b/mm/kasan/sw_tags.c @@ -70,8 +70,8 @@ u8 kasan_random_tag(void) return (u8)(state % (KASAN_TAG_MAX + 1)); } -bool kasan_check_range(unsigned long addr, size_t size, bool write, - unsigned long ret_ip) +bool kasan_check_range(const void *addr, size_t size, bool write, + unsigned long ret_ip) { u8 tag; u8 *shadow_first, *shadow_last, *shadow; @@ -133,12 +133,12 @@ bool kasan_byte_accessible(const void *addr) } #define DEFINE_HWASAN_LOAD_STORE(size) \ - void __hwasan_load##size##_noabort(unsigned long addr) \ + void __hwasan_load##size##_noabort(void *addr) \ { \ - kasan_check_range(addr, size, false, _RET_IP_); \ + kasan_check_range(addr, size, false, _RET_IP_); \ } \ EXPORT_SYMBOL(__hwasan_load##size##_noabort); \ - void __hwasan_store##size##_noabort(unsigned long addr) \ + void __hwasan_store##size##_noabort(void *addr) \ { \ kasan_check_range(addr, size, true, _RET_IP_); \ } \ @@ -150,25 +150,25 @@ DEFINE_HWASAN_LOAD_STORE(4); DEFINE_HWASAN_LOAD_STORE(8); DEFINE_HWASAN_LOAD_STORE(16); -void __hwasan_loadN_noabort(unsigned long addr, unsigned long size) +void __hwasan_loadN_noabort(void *addr, ssize_t size) { kasan_check_range(addr, size, false, _RET_IP_); } EXPORT_SYMBOL(__hwasan_loadN_noabort); -void __hwasan_storeN_noabort(unsigned long addr, unsigned long size) +void __hwasan_storeN_noabort(void *addr, ssize_t size) { kasan_check_range(addr, size, true, _RET_IP_); } EXPORT_SYMBOL(__hwasan_storeN_noabort); -void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size) +void __hwasan_tag_memory(void *addr, u8 tag, ssize_t size) { - kasan_poison((void *)addr, size, tag, false); + kasan_poison(addr, size, tag, false); } EXPORT_SYMBOL(__hwasan_tag_memory); -void kasan_tag_mismatch(unsigned long addr, unsigned long access_info, +void kasan_tag_mismatch(void *addr, unsigned long access_info, unsigned long ret_ip) { kasan_report(addr, 1 << (access_info & 0xf), access_info & 0x10, -- cgit v1.2.3 From eb83f6528b563550e9ba92f9accfd453ca28e828 Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Mon, 8 May 2023 23:40:59 +0000 Subject: mm: hugetlb_vmemmap: provide stronger vmemmap allocation guarantees HugeTLB pages have a struct page optimizations where struct pages for tail pages are freed. However, when HugeTLB pages are destroyed, the memory for struct pages (vmemmap) need to be allocated again. Currently, __GFP_NORETRY flag is used to allocate the memory for vmemmap, but given that this flag makes very little effort to actually reclaim memory the returning of huge pages back to the system can be problem. Lets use __GFP_RETRY_MAYFAIL instead. This flag is also performs graceful reclaim without causing ooms, but at least it may perform a few retries, and will fail only when there is genuinely little amount of unused memory in the system. Freeing a 1G page requires 16M of free memory. A machine might need to be reconfigured from one task to another, and release a large number of 1G pages back to the system if allocating 16M fails, the release won't work. Link: https://lkml.kernel.org/r/20230508234059.2529638-1-pasha.tatashin@soleen.com Signed-off-by: Pasha Tatashin Suggested-by: David Rientjes Reviewed-by: Mike Kravetz Cc: Michal Hocko Cc: Muchun Song Signed-off-by: Andrew Morton --- mm/hugetlb_vmemmap.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index 27f001e0f0a2..f42079b73f82 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -384,8 +384,9 @@ static int vmemmap_remap_free(unsigned long start, unsigned long end, } static int alloc_vmemmap_page_list(unsigned long start, unsigned long end, - gfp_t gfp_mask, struct list_head *list) + struct list_head *list) { + gfp_t gfp_mask = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_THISNODE; unsigned long nr_pages = (end - start) >> PAGE_SHIFT; int nid = page_to_nid((struct page *)start); struct page *page, *next; @@ -413,12 +414,11 @@ out: * @end: end address of the vmemmap virtual address range that we want to * remap. * @reuse: reuse address. - * @gfp_mask: GFP flag for allocating vmemmap pages. * * Return: %0 on success, negative error code otherwise. */ static int vmemmap_remap_alloc(unsigned long start, unsigned long end, - unsigned long reuse, gfp_t gfp_mask) + unsigned long reuse) { LIST_HEAD(vmemmap_pages); struct vmemmap_remap_walk walk = { @@ -430,7 +430,7 @@ static int vmemmap_remap_alloc(unsigned long start, unsigned long end, /* See the comment in the vmemmap_remap_free(). */ BUG_ON(start - reuse != PAGE_SIZE); - if (alloc_vmemmap_page_list(start, end, gfp_mask, &vmemmap_pages)) + if (alloc_vmemmap_page_list(start, end, &vmemmap_pages)) return -ENOMEM; mmap_read_lock(&init_mm); @@ -476,8 +476,7 @@ int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head) * When a HugeTLB page is freed to the buddy allocator, previously * discarded vmemmap pages must be allocated and remapping. */ - ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse, - GFP_KERNEL | __GFP_NORETRY | __GFP_THISNODE); + ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse); if (!ret) { ClearHPageVmemmapOptimized(head); static_branch_dec(&hugetlb_optimize_vmemmap_key); -- cgit v1.2.3 From 97de10a9932c363a4e4ee9bb5f2297e254fb1413 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Mon, 8 May 2023 19:41:28 +0800 Subject: mm: memory-failure: move sysctl register in memory_failure_init() There is already a memory_failure_init(), don't add a new initcall, move register_sysctl_init() into it to cleanup a bit. Link: https://lkml.kernel.org/r/20230508114128.37081-2-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Acked-by: Naoya Horiguchi Signed-off-by: Andrew Morton --- mm/memory-failure.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 5b663eca1f29..004a02f44271 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -123,7 +123,6 @@ const struct attribute_group memory_failure_attr_group = { .attrs = memory_failure_attr, }; -#ifdef CONFIG_SYSCTL static struct ctl_table memory_failure_table[] = { { .procname = "memory_failure_early_kill", @@ -146,14 +145,6 @@ static struct ctl_table memory_failure_table[] = { { } }; -static int __init memory_failure_sysctl_init(void) -{ - register_sysctl_init("vm", memory_failure_table); - return 0; -} -late_initcall(memory_failure_sysctl_init); -#endif /* CONFIG_SYSCTL */ - /* * Return values: * 1: the page is dissolved (if needed) and taken off from buddy, @@ -2441,6 +2432,8 @@ static int __init memory_failure_init(void) INIT_WORK(&mf_cpu->work, memory_failure_work_func); } + register_sysctl_init("vm", memory_failure_table); + return 0; } core_initcall(memory_failure_init); -- cgit v1.2.3 From 4822acb1369637938c1252d534d3356c5e313cde Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Mon, 8 May 2023 07:35:38 +0000 Subject: mm, oom: do not check 0 mask in out_of_memory() Since commit 60e2793d440a ("mm, oom: do not trigger out_of_memory from the #PF"), no user sets gfp_mask to 0. Remove the 0 mask check and update the comments. Link: https://lkml.kernel.org/r/20230508073538.1168-1-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Acked-by: Michal Hocko Cc: Johannes Weiner Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/oom_kill.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 044e1eed720e..612b5597d3af 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -1130,12 +1130,10 @@ bool out_of_memory(struct oom_control *oc) /* * The OOM killer does not compensate for IO-less reclaim. - * pagefault_out_of_memory lost its gfp context so we have to - * make sure exclude 0 mask - all other users should have at least - * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to - * invoke the OOM killer even if it is a GFP_NOFS allocation. + * But mem_cgroup_oom() has to invoke the OOM killer even + * if it is a GFP_NOFS allocation. */ - if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc)) + if (!(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc)) return true; /* -- cgit v1.2.3 From 539aa041a9b1d98cd847b949ba8f91857c995f26 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 15 May 2023 12:33:41 +0100 Subject: mm: compaction: ensure rescanning only happens on partially scanned pageblocks Patch series "Follow-up "Fix excessive CPU usage during compaction"". The series "Fix excessive CPU usage during compaction" [1] attempted to fix a bug [2] but Vlastimil noted that the fix was incomplete [3]. While the series was merged, fast_find_migrateblock was still disabled. This series should fix the corner cases and allow 95e7a450b819 ("Revert "mm/compaction: fix set skip in fast_find_migrateblock"") to be safely reverted. Details on how many pageblocks are rescanned are in the changelog of the last patch. "Raghavendra K T" tested this and reported "decent improvement from perf perspective as well as compaction related data [4] [1] https://lore.kernel.org/r/20230125134434.18017-1-mgorman@techsingularity.net [2] https://bugzilla.suse.com/show_bug.cgi?id=1206848 [3] https://lore.kernel.org/r/a55cf026-a2f9-ef01-9a4c-398693e048ea@suse.cz [4] https://lkml.kernel.org/r/6d62686f-964d-342c-e085-0eae2555cc54@amd.com This patch (of 4): compact_zone() intends to rescan pageblocks if there is a failure to migrate "within the current order-aligned block". However, the pageblock scan may already be complete and moved to the next block causing the next pageblock to be "rescanned". Ensure only the most recent pageblock is rescanned. Link: https://lkml.kernel.org/r/20230515113344.6869-1-mgorman@techsingularity.net Link: https://lkml.kernel.org/r/20230515113344.6869-2-mgorman@techsingularity.net Signed-off-by: Mel Gorman Reported-by: Vlastimil Babka Tested-by: Raghavendra K T Acked-by: Vlastimil Babka Cc: Chuyi Zhou Cc: Jiri Slaby Cc: Maxim Levitsky Cc: Mel Gorman Cc: Michal Hocko Cc: Paolo Bonzini Cc: Pedro Falcato Signed-off-by: Andrew Morton --- mm/compaction.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 5584fa5fa3d4..d16b0fcd6db5 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2465,8 +2465,9 @@ rescan: * fast_find_migrateblock revisiting blocks that were * recently partially scanned. */ - if (cc->direct_compaction && !cc->finish_pageblock && - (cc->mode < MIGRATE_SYNC)) { + if (!pageblock_aligned(cc->migrate_pfn) && + cc->direct_compaction && !cc->finish_pageblock && + (cc->mode < MIGRATE_SYNC)) { cc->finish_pageblock = true; /* -- cgit v1.2.3 From 9ecc5fc50a9c75b0af252913163a9f1ac4206b17 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 15 May 2023 12:33:42 +0100 Subject: mm: compaction: only force pageblock scan completion when skip hints are obeyed fast_find_migrateblock relies on skip hints to avoid rescanning a recently selected pageblock but compact_zone() only forces the pageblock scan completion to set the skip hint if in direct compaction. While this prevents direct compaction repeatedly scanning a subset of blocks due to fast_find_migrateblock(), it does not prevent proactive compaction, node compaction and kcompactd encountering the same problem described in commit cfccd2e63e7e ("mm, compaction: finish pageblocks on complete migration failure"). Force the scan completion of a pageblock to set the skip hint if skip hints are obeyed to prevent fast_find_migrateblock() repeatedly selecting a subset of pageblocks. Link: https://lkml.kernel.org/r/20230515113344.6869-3-mgorman@techsingularity.net Signed-off-by: Mel Gorman Suggested-by: Vlastimil Babka Tested-by: Raghavendra K T Acked-by: Vlastimil Babka Cc: Chuyi Zhou Cc: Jiri Slaby Cc: Maxim Levitsky Cc: Michal Hocko Cc: Paolo Bonzini Cc: Pedro Falcato Signed-off-by: Andrew Morton --- mm/compaction.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index d16b0fcd6db5..009128d1e4ef 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2457,7 +2457,8 @@ rescan: } /* * If an ASYNC or SYNC_LIGHT fails to migrate a page - * within the current order-aligned block, scan the + * within the current order-aligned block and + * fast_find_migrateblock may be used then scan the * remainder of the pageblock. This will mark the * pageblock "skip" to avoid rescanning in the near * future. This will isolate more pages than necessary @@ -2466,7 +2467,7 @@ rescan: * recently partially scanned. */ if (!pageblock_aligned(cc->migrate_pfn) && - cc->direct_compaction && !cc->finish_pageblock && + !cc->ignore_skip_hint && !cc->finish_pageblock && (cc->mode < MIGRATE_SYNC)) { cc->finish_pageblock = true; -- cgit v1.2.3 From 590ccea80af950685de7f72ec43831765e5c8cb1 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 15 May 2023 12:33:43 +0100 Subject: mm: compaction: update pageblock skip when first migration candidate is not at the start isolate_migratepages_block should mark a pageblock as skip if scanning started on an aligned pageblock boundary but it only updates the skip flag if the first migration candidate is also aligned. Tracing during a compaction stress load (mmtests: workload-usemem-stress-numa-compact) that many pageblocks are not marked skip causing excessive scanning of blocks that had been recently checked. Update pageblock skip based on "valid_page" which is set if scanning started on a pageblock boundary. [mgorman@techsingularity.net: fix handling of skip bit] Link: https://lkml.kernel.org/r/20230602111622.swtxhn6lu2qwgrwq@techsingularity.net Link: https://lkml.kernel.org/r/20230515113344.6869-4-mgorman@techsingularity.net Signed-off-by: Mel Gorman Tested-by: Raghavendra K T Acked-by: Vlastimil Babka Cc: Chuyi Zhou Cc: Jiri Slaby Cc: Maxim Levitsky Cc: Michal Hocko Cc: Paolo Bonzini Cc: Pedro Falcato Signed-off-by: Andrew Morton --- mm/compaction.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 009128d1e4ef..02aa3788765d 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -392,18 +392,14 @@ void reset_isolation_suitable(pg_data_t *pgdat) * Sets the pageblock skip bit if it was clear. Note that this is a hint as * locks are not required for read/writers. Returns true if it was already set. */ -static bool test_and_set_skip(struct compact_control *cc, struct page *page, - unsigned long pfn) +static bool test_and_set_skip(struct compact_control *cc, struct page *page) { bool skip; - /* Do no update if skip hint is being ignored */ + /* Do not update if skip hint is being ignored */ if (cc->ignore_skip_hint) return false; - if (!pageblock_aligned(pfn)) - return false; - skip = get_pageblock_skip(page); if (!skip && !cc->no_set_skip_hint) set_pageblock_skip(page); @@ -470,8 +466,7 @@ static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) { } -static bool test_and_set_skip(struct compact_control *cc, struct page *page, - unsigned long pfn) +static bool test_and_set_skip(struct compact_control *cc, struct page *page) { return false; } @@ -1074,11 +1069,17 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, lruvec_memcg_debug(lruvec, page_folio(page)); - /* Try get exclusive access under lock */ - if (!skip_updated) { + /* + * Try get exclusive access under lock. If marked for + * skip, the scan is aborted unless the current context + * is a rescan to reach the end of the pageblock. + */ + if (!skip_updated && valid_page) { skip_updated = true; - if (test_and_set_skip(cc, page, low_pfn)) + if (test_and_set_skip(cc, valid_page) && + !cc->finish_pageblock) { goto isolate_abort; + } } /* -- cgit v1.2.3 From 90ed667c03fe553a41d79057740ed5df951eead0 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 15 May 2023 12:33:44 +0100 Subject: Revert "Revert "mm/compaction: fix set skip in fast_find_migrateblock"" This reverts commit 95e7a450b819 ("Revert "mm/compaction: fix set skip in fast_find_migrateblock""). Commit 7efc3b726103 ("mm/compaction: fix set skip in fast_find_migrateblock") was reverted due to bug reports about khugepaged consuming large amounts of CPU without making progress. The underlying bug was partially fixed by commit cfccd2e63e7e ("mm, compaction: finish pageblocks on complete migration failure") but it only mitigated the problem and Vlastimil Babka pointing out the same issue could theoretically happen to kcompactd. As pageblocks containing pages that fail to migrate should now be forcibly rescanned to set the skip hint if skip hints are used, fast_find_migrateblock() should no longer loop on a small subset of pageblocks for prolonged periods of time. Revert the revert so fast_find_migrateblock() is effective again. Using the mmtests config workload-usemem-stress-numa-compact, the number of unique ranges scanned was analysed for both kcompactd and !kcompactd activity. 6.4.0-rc1-vanilla kcompactd 7 range=(0x10d600~0x10d800) 7 range=(0x110c00~0x110e00) 7 range=(0x110e00~0x111000) 7 range=(0x111800~0x111a00) 7 range=(0x111a00~0x111c00) !kcompactd 1 range=(0x113e00~0x114000) 1 range=(0x114000~0x114020) 1 range=(0x114400~0x114489) 1 range=(0x114489~0x1144aa) 1 range=(0x1144aa~0x114600) 6.4.0-rc1-mm-revertfastmigrate kcompactd 17 range=(0x104200~0x104400) 17 range=(0x104400~0x104600) 17 range=(0x104600~0x104800) 17 range=(0x104800~0x104a00) 17 range=(0x104a00~0x104c00) !kcompactd 1793 range=(0x15c200~0x15c400) 5436 range=(0x105800~0x105a00) 19826 range=(0x150a00~0x150c00) 19833 range=(0x150800~0x150a00) 19834 range=(0x11ce00~0x11d000) 6.4.0-rc1-mm-follupfastfind kcompactd 22 range=(0x107200~0x107400) 23 range=(0x107400~0x107600) 23 range=(0x107600~0x107800) 23 range=(0x107c00~0x107e00) 23 range=(0x107e00~0x108000) !kcompactd 3 range=(0x890240~0x890400) 5 range=(0x886e00~0x887000) 5 range=(0x88a400~0x88a600) 6 range=(0x88f800~0x88fa00) 9 range=(0x88a400~0x88a420) Note that the vanilla kernel and the full series had some duplication of ranges scanned but it was not severe and would be in line with compaction resets when the skip hints are cleared. Just a revert of commit 7efc3b726103 ("mm/compaction: fix set skip in fast_find_migrateblock") showed excessive rescans of the same ranges so the series should not reintroduce bug 1206848. Link: https://bugzilla.suse.com/show_bug.cgi?id=1206848 Link: https://lkml.kernel.org/r/20230515113344.6869-5-mgorman@techsingularity.net Signed-off-by: Mel Gorman Tested-by: Raghavendra K T Acked-by: Vlastimil Babka Cc: Chuyi Zhou Cc: Jiri Slaby Cc: Maxim Levitsky Cc: Michal Hocko Cc: Paolo Bonzini Cc: Pedro Falcato Signed-off-by: Andrew Morton --- mm/compaction.c | 1 - 1 file changed, 1 deletion(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 02aa3788765d..f6465ae74d3f 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1866,7 +1866,6 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc) pfn = cc->zone->zone_start_pfn; cc->fast_search_fail = 0; found_block = true; - set_pageblock_skip(freepage); break; } } -- cgit v1.2.3 From b758fe6df50daf68fef089d8f3c1cd49fc794ed2 Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (IBM)" Date: Mon, 15 May 2023 11:34:00 +0300 Subject: mm/secretmem: make it on by default Following the discussion about direct map fragmentaion at LSF/MM [1], it appears that direct map fragmentation has a negligible effect on kernel data accesses. Since the only reason that warranted secretmem to be disabled by default was concern about performance regression caused by the direct map fragmentation, it makes perfect sense to lift this restriction and make secretmem enabled. secretmem obeys RLIMIT_MEMBLOCK and as such it is not expected to cause large fragmentation of the direct map or meaningfull increase in page tables allocated during split of the large mappings in the direct map. The secretmem.enable parameter is retained to allow system administrators to disable secretmem at boot. Switch the default setting of secretmem.enable parameter to 1. Link: https://lwn.net/Articles/931406/ [1] Link: https://lkml.kernel.org/r/20230515083400.3563974-1-rppt@kernel.org Signed-off-by: Mike Rapoport (IBM) Acked-by: David Hildenbrand Cc: Randy Dunlap Signed-off-by: Andrew Morton --- mm/secretmem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/secretmem.c b/mm/secretmem.c index 0b502625cd30..974b32ba8b9d 100644 --- a/mm/secretmem.c +++ b/mm/secretmem.c @@ -35,7 +35,7 @@ #define SECRETMEM_MODE_MASK (0x0) #define SECRETMEM_FLAGS_MASK SECRETMEM_MODE_MASK -static bool secretmem_enable __ro_after_init; +static bool secretmem_enable __ro_after_init = 1; module_param_named(enable, secretmem_enable, bool, 0400); MODULE_PARM_DESC(secretmem_enable, "Enable secretmem and memfd_secret(2) system call"); -- cgit v1.2.3 From f24f66eef5c03e07120edddde3608477f10b3980 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 16 May 2023 12:50:29 +0300 Subject: mm/zsmalloc: get rid of PAGE_MASK Use offset_in_page() macro instead of 'val & ~PAGE_MASK' Link: https://lkml.kernel.org/r/20230516095029.49036-2-avromanov@sberdevices.ru Signed-off-by: Alexey Romanov Reviewed-by: Sergey Senozhatsky Cc: Minchan Kim Signed-off-by: Andrew Morton --- mm/zsmalloc.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 02f7f414aade..c0d433541636 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1341,7 +1341,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, spin_unlock(&pool->lock); class = zspage_class(pool, zspage); - off = (class->size * obj_idx) & ~PAGE_MASK; + off = offset_in_page(class->size * obj_idx); local_lock(&zs_map_area.lock); area = this_cpu_ptr(&zs_map_area); @@ -1381,7 +1381,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) obj_to_location(obj, &page, &obj_idx); zspage = get_zspage(page); class = zspage_class(pool, zspage); - off = (class->size * obj_idx) & ~PAGE_MASK; + off = offset_in_page(class->size * obj_idx); area = this_cpu_ptr(&zs_map_area); if (off + class->size <= PAGE_SIZE) @@ -1438,7 +1438,7 @@ static unsigned long obj_malloc(struct zs_pool *pool, offset = obj * class->size; nr_page = offset >> PAGE_SHIFT; - m_offset = offset & ~PAGE_MASK; + m_offset = offset_in_page(offset); m_page = get_first_page(zspage); for (i = 0; i < nr_page; i++) @@ -1548,7 +1548,7 @@ static void obj_free(int class_size, unsigned long obj, unsigned long *handle) void *vaddr; obj_to_location(obj, &f_page, &f_objidx); - f_offset = (class_size * f_objidx) & ~PAGE_MASK; + f_offset = offset_in_page(class_size * f_objidx); zspage = get_zspage(f_page); vaddr = kmap_atomic(f_page); @@ -1640,8 +1640,8 @@ static void zs_object_copy(struct size_class *class, unsigned long dst, obj_to_location(src, &s_page, &s_objidx); obj_to_location(dst, &d_page, &d_objidx); - s_off = (class->size * s_objidx) & ~PAGE_MASK; - d_off = (class->size * d_objidx) & ~PAGE_MASK; + s_off = offset_in_page(class->size * s_objidx); + d_off = offset_in_page(class->size * d_objidx); if (s_off + class->size > PAGE_SIZE) s_size = PAGE_SIZE - s_off; -- cgit v1.2.3 From 072ba380cefc7722c9442cc14a9c2810898c13ac Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:09 +0800 Subject: mm: page_alloc: move mirrored_kernelcore into mm_init.c Patch series "mm: page_alloc: misc cleanup and refactor", v2. This aims to reduce more space in page_alloc.c, also do some cleanup, no functional changes intended. This patch (of 13): Since commit 9420f89db2dd ("mm: move most of core MM initialization to mm/mm_init.c"), mirrored_kernelcore should be moved into mm_init.c, as most related codes are already there. Link: https://lkml.kernel.org/r/20230516063821.121844-1-wangkefeng.wang@huawei.com Link: https://lkml.kernel.org/r/20230516063821.121844-2-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Mike Rapoport (IBM) Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- mm/mm_init.c | 2 ++ mm/page_alloc.c | 3 --- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/mm_init.c b/mm/mm_init.c index 7f7f9c677854..da162b7a044c 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -259,6 +259,8 @@ static int __init cmdline_parse_core(char *p, unsigned long *core, return 0; } +bool mirrored_kernelcore __initdata_memblock; + /* * kernelcore=size sets the amount of memory for use for allocations that * cannot be reclaimed or migrated. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index af9c995d3c1e..d1086aeca8f2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -374,8 +373,6 @@ int user_min_free_kbytes = -1; int watermark_boost_factor __read_mostly = 15000; int watermark_scale_factor = 10; -bool mirrored_kernelcore __initdata_memblock; - /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ int movable_zone; EXPORT_SYMBOL(movable_zone); -- cgit v1.2.3 From 5e7d5da2f41c1d762cd1dbdd97758be6c414ea29 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:10 +0800 Subject: mm: page_alloc: move init_on_alloc/free() into mm_init.c Since commit f2fc4b44ec2b ("mm: move init_mem_debugging_and_hardening() to mm/mm_init.c"), the init_on_alloc() and init_on_free() define is better to move there too. Link: https://lkml.kernel.org/r/20230516063821.121844-3-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Mike Rapoport (IBM) Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- mm/mm_init.c | 6 ++++++ mm/page_alloc.c | 5 ----- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/mm_init.c b/mm/mm_init.c index da162b7a044c..15201887f8e0 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2543,6 +2543,12 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn, __free_pages_core(page, order); } +DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); +EXPORT_SYMBOL(init_on_alloc); + +DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); +EXPORT_SYMBOL(init_on_free); + static bool _init_on_alloc_enabled_early __read_mostly = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON); static int __init early_init_on_alloc(char *buf) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d1086aeca8f2..4f094ba7c8fb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -233,11 +233,6 @@ unsigned long totalcma_pages __read_mostly; int percpu_pagelist_high_fraction; gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; -DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); -EXPORT_SYMBOL(init_on_alloc); - -DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); -EXPORT_SYMBOL(init_on_free); /* * A cached value of the page's pageblock's migratetype, used when the page is -- cgit v1.2.3 From 904d58578fce531be07619a2bc2cdc16c9fd49b6 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:11 +0800 Subject: mm: page_alloc: move set_zone_contiguous() into mm_init.c set_zone_contiguous() is only used in mm init/hotplug, and clear_zone_contiguous() only used in hotplug, move them from page_alloc.c to the more appropriate file. Link: https://lkml.kernel.org/r/20230516063821.121844-4-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- include/linux/memory_hotplug.h | 3 --- mm/internal.h | 7 +++++++ mm/mm_init.c | 22 ++++++++++++++++++++++ mm/page_alloc.c | 27 --------------------------- 4 files changed, 29 insertions(+), 30 deletions(-) (limited to 'mm') diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 9fcbf5706595..04bc286eed42 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -326,9 +326,6 @@ static inline int remove_memory(u64 start, u64 size) static inline void __remove_memory(u64 start, u64 size) {} #endif /* CONFIG_MEMORY_HOTREMOVE */ -extern void set_zone_contiguous(struct zone *zone); -extern void clear_zone_contiguous(struct zone *zone); - #ifdef CONFIG_MEMORY_HOTPLUG extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat); extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); diff --git a/mm/internal.h b/mm/internal.h index 68410c6d97ac..c99da2cfac71 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -371,6 +371,13 @@ static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); } +void set_zone_contiguous(struct zone *zone); + +static inline void clear_zone_contiguous(struct zone *zone) +{ + zone->contiguous = false; +} + extern int __isolate_free_page(struct page *page, unsigned int order); extern void __putback_isolated_page(struct page *page, unsigned int order, int mt); diff --git a/mm/mm_init.c b/mm/mm_init.c index 15201887f8e0..0fd4ddfdfb2e 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2330,6 +2330,28 @@ void __init init_cma_reserved_pageblock(struct page *page) } #endif +void set_zone_contiguous(struct zone *zone) +{ + unsigned long block_start_pfn = zone->zone_start_pfn; + unsigned long block_end_pfn; + + block_end_pfn = pageblock_end_pfn(block_start_pfn); + for (; block_start_pfn < zone_end_pfn(zone); + block_start_pfn = block_end_pfn, + block_end_pfn += pageblock_nr_pages) { + + block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); + + if (!__pageblock_pfn_to_page(block_start_pfn, + block_end_pfn, zone)) + return; + cond_resched(); + } + + /* We confirm that there is no hole */ + zone->contiguous = true; +} + void __init page_alloc_init_late(void) { struct zone *zone; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4f094ba7c8fb..7bb0d6abfe3d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1532,33 +1532,6 @@ struct page *__pageblock_pfn_to_page(unsigned long start_pfn, return start_page; } -void set_zone_contiguous(struct zone *zone) -{ - unsigned long block_start_pfn = zone->zone_start_pfn; - unsigned long block_end_pfn; - - block_end_pfn = pageblock_end_pfn(block_start_pfn); - for (; block_start_pfn < zone_end_pfn(zone); - block_start_pfn = block_end_pfn, - block_end_pfn += pageblock_nr_pages) { - - block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); - - if (!__pageblock_pfn_to_page(block_start_pfn, - block_end_pfn, zone)) - return; - cond_resched(); - } - - /* We confirm that there is no hole */ - zone->contiguous = true; -} - -void clear_zone_contiguous(struct zone *zone) -{ - zone->contiguous = false; -} - /* * The order of subdivision here is critical for the IO subsystem. * Please do not alter this order without good reasons and regression -- cgit v1.2.3 From e9aae1709264b2353d67d4846bd617c445a49fe0 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:12 +0800 Subject: mm: page_alloc: collect mem statistic into show_mem.c Let's move show_mem.c from lib to mm, as it belongs memory subsystem, also split some memory statistic related functions from page_alloc.c to show_mem.c, and we cleanup some unneeded include. There is no functional change. Link: https://lkml.kernel.org/r/20230516063821.121844-5-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- lib/Makefile | 2 +- lib/show_mem.c | 37 ----- mm/Makefile | 2 +- mm/page_alloc.c | 402 ---------------------------------------------------- mm/show_mem.c | 429 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 431 insertions(+), 441 deletions(-) delete mode 100644 lib/show_mem.c create mode 100644 mm/show_mem.c (limited to 'mm') diff --git a/lib/Makefile b/lib/Makefile index 876fcdeae34e..38f23f352736 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -30,7 +30,7 @@ endif lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o timerqueue.o xarray.o \ maple_tree.o idr.o extable.o irq_regs.o argv_split.o \ - flex_proportions.o ratelimit.o show_mem.o \ + flex_proportions.o ratelimit.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ nmi_backtrace.o win_minmax.o memcat_p.o \ diff --git a/lib/show_mem.c b/lib/show_mem.c deleted file mode 100644 index 1485c87be935..000000000000 --- a/lib/show_mem.c +++ /dev/null @@ -1,37 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Generic show_mem() implementation - * - * Copyright (C) 2008 Johannes Weiner - */ - -#include -#include - -void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) -{ - unsigned long total = 0, reserved = 0, highmem = 0; - struct zone *zone; - - printk("Mem-Info:\n"); - __show_free_areas(filter, nodemask, max_zone_idx); - - for_each_populated_zone(zone) { - - total += zone->present_pages; - reserved += zone->present_pages - zone_managed_pages(zone); - - if (is_highmem(zone)) - highmem += zone->present_pages; - } - - printk("%lu pages RAM\n", total); - printk("%lu pages HighMem/MovableOnly\n", highmem); - printk("%lu pages reserved\n", reserved); -#ifdef CONFIG_CMA - printk("%lu pages cma reserved\n", totalcma_pages); -#endif -#ifdef CONFIG_MEMORY_FAILURE - printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages)); -#endif -} diff --git a/mm/Makefile b/mm/Makefile index e29afc890cde..5262ce5baa28 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -51,7 +51,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \ readahead.o swap.o truncate.o vmscan.o shmem.o \ util.o mmzone.o vmstat.o backing-dev.o \ mm_init.o percpu.o slab_common.o \ - compaction.o \ + compaction.o show_mem.o\ interval_tree.o list_lru.o workingset.o \ debug.o gup.o mmap_lock.o $(mmu-y) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7bb0d6abfe3d..34a9fe934891 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -18,10 +18,7 @@ #include #include #include -#include -#include #include -#include #include #include #include @@ -30,8 +27,6 @@ #include #include #include -#include -#include #include #include #include @@ -40,19 +35,10 @@ #include #include #include -#include #include -#include -#include -#include -#include #include #include -#include #include -#include -#include -#include #include #include #include @@ -60,12 +46,9 @@ #include #include #include -#include -#include #include #include #include -#include #include #include #include @@ -73,13 +56,10 @@ #include #include #include -#include -#include #include #include "internal.h" #include "shuffle.h" #include "page_reporting.h" -#include "swap.h" /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ typedef int __bitwise fpi_t; @@ -226,11 +206,6 @@ nodemask_t node_states[NR_NODE_STATES] __read_mostly = { }; EXPORT_SYMBOL(node_states); -atomic_long_t _totalram_pages __read_mostly; -EXPORT_SYMBOL(_totalram_pages); -unsigned long totalreserve_pages __read_mostly; -unsigned long totalcma_pages __read_mostly; - int percpu_pagelist_high_fraction; gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; @@ -5102,383 +5077,6 @@ unsigned long nr_free_buffer_pages(void) } EXPORT_SYMBOL_GPL(nr_free_buffer_pages); -static inline void show_node(struct zone *zone) -{ - if (IS_ENABLED(CONFIG_NUMA)) - printk("Node %d ", zone_to_nid(zone)); -} - -long si_mem_available(void) -{ - long available; - unsigned long pagecache; - unsigned long wmark_low = 0; - unsigned long pages[NR_LRU_LISTS]; - unsigned long reclaimable; - struct zone *zone; - int lru; - - for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) - pages[lru] = global_node_page_state(NR_LRU_BASE + lru); - - for_each_zone(zone) - wmark_low += low_wmark_pages(zone); - - /* - * Estimate the amount of memory available for userspace allocations, - * without causing swapping or OOM. - */ - available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; - - /* - * Not all the page cache can be freed, otherwise the system will - * start swapping or thrashing. Assume at least half of the page - * cache, or the low watermark worth of cache, needs to stay. - */ - pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; - pagecache -= min(pagecache / 2, wmark_low); - available += pagecache; - - /* - * Part of the reclaimable slab and other kernel memory consists of - * items that are in use, and cannot be freed. Cap this estimate at the - * low watermark. - */ - reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + - global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); - available += reclaimable - min(reclaimable / 2, wmark_low); - - if (available < 0) - available = 0; - return available; -} -EXPORT_SYMBOL_GPL(si_mem_available); - -void si_meminfo(struct sysinfo *val) -{ - val->totalram = totalram_pages(); - val->sharedram = global_node_page_state(NR_SHMEM); - val->freeram = global_zone_page_state(NR_FREE_PAGES); - val->bufferram = nr_blockdev_pages(); - val->totalhigh = totalhigh_pages(); - val->freehigh = nr_free_highpages(); - val->mem_unit = PAGE_SIZE; -} - -EXPORT_SYMBOL(si_meminfo); - -#ifdef CONFIG_NUMA -void si_meminfo_node(struct sysinfo *val, int nid) -{ - int zone_type; /* needs to be signed */ - unsigned long managed_pages = 0; - unsigned long managed_highpages = 0; - unsigned long free_highpages = 0; - pg_data_t *pgdat = NODE_DATA(nid); - - for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) - managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]); - val->totalram = managed_pages; - val->sharedram = node_page_state(pgdat, NR_SHMEM); - val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); -#ifdef CONFIG_HIGHMEM - for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { - struct zone *zone = &pgdat->node_zones[zone_type]; - - if (is_highmem(zone)) { - managed_highpages += zone_managed_pages(zone); - free_highpages += zone_page_state(zone, NR_FREE_PAGES); - } - } - val->totalhigh = managed_highpages; - val->freehigh = free_highpages; -#else - val->totalhigh = managed_highpages; - val->freehigh = free_highpages; -#endif - val->mem_unit = PAGE_SIZE; -} -#endif - -/* - * Determine whether the node should be displayed or not, depending on whether - * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). - */ -static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) -{ - if (!(flags & SHOW_MEM_FILTER_NODES)) - return false; - - /* - * no node mask - aka implicit memory numa policy. Do not bother with - * the synchronization - read_mems_allowed_begin - because we do not - * have to be precise here. - */ - if (!nodemask) - nodemask = &cpuset_current_mems_allowed; - - return !node_isset(nid, *nodemask); -} - -static void show_migration_types(unsigned char type) -{ - static const char types[MIGRATE_TYPES] = { - [MIGRATE_UNMOVABLE] = 'U', - [MIGRATE_MOVABLE] = 'M', - [MIGRATE_RECLAIMABLE] = 'E', - [MIGRATE_HIGHATOMIC] = 'H', -#ifdef CONFIG_CMA - [MIGRATE_CMA] = 'C', -#endif -#ifdef CONFIG_MEMORY_ISOLATION - [MIGRATE_ISOLATE] = 'I', -#endif - }; - char tmp[MIGRATE_TYPES + 1]; - char *p = tmp; - int i; - - for (i = 0; i < MIGRATE_TYPES; i++) { - if (type & (1 << i)) - *p++ = types[i]; - } - - *p = '\0'; - printk(KERN_CONT "(%s) ", tmp); -} - -static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx) -{ - int zone_idx; - for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++) - if (zone_managed_pages(pgdat->node_zones + zone_idx)) - return true; - return false; -} - -/* - * Show free area list (used inside shift_scroll-lock stuff) - * We also calculate the percentage fragmentation. We do this by counting the - * memory on each free list with the exception of the first item on the list. - * - * Bits in @filter: - * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's - * cpuset. - */ -void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) -{ - unsigned long free_pcp = 0; - int cpu, nid; - struct zone *zone; - pg_data_t *pgdat; - - for_each_populated_zone(zone) { - if (zone_idx(zone) > max_zone_idx) - continue; - if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) - continue; - - for_each_online_cpu(cpu) - free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; - } - - printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" - " active_file:%lu inactive_file:%lu isolated_file:%lu\n" - " unevictable:%lu dirty:%lu writeback:%lu\n" - " slab_reclaimable:%lu slab_unreclaimable:%lu\n" - " mapped:%lu shmem:%lu pagetables:%lu\n" - " sec_pagetables:%lu bounce:%lu\n" - " kernel_misc_reclaimable:%lu\n" - " free:%lu free_pcp:%lu free_cma:%lu\n", - global_node_page_state(NR_ACTIVE_ANON), - global_node_page_state(NR_INACTIVE_ANON), - global_node_page_state(NR_ISOLATED_ANON), - global_node_page_state(NR_ACTIVE_FILE), - global_node_page_state(NR_INACTIVE_FILE), - global_node_page_state(NR_ISOLATED_FILE), - global_node_page_state(NR_UNEVICTABLE), - global_node_page_state(NR_FILE_DIRTY), - global_node_page_state(NR_WRITEBACK), - global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B), - global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B), - global_node_page_state(NR_FILE_MAPPED), - global_node_page_state(NR_SHMEM), - global_node_page_state(NR_PAGETABLE), - global_node_page_state(NR_SECONDARY_PAGETABLE), - global_zone_page_state(NR_BOUNCE), - global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE), - global_zone_page_state(NR_FREE_PAGES), - free_pcp, - global_zone_page_state(NR_FREE_CMA_PAGES)); - - for_each_online_pgdat(pgdat) { - if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) - continue; - if (!node_has_managed_zones(pgdat, max_zone_idx)) - continue; - - printk("Node %d" - " active_anon:%lukB" - " inactive_anon:%lukB" - " active_file:%lukB" - " inactive_file:%lukB" - " unevictable:%lukB" - " isolated(anon):%lukB" - " isolated(file):%lukB" - " mapped:%lukB" - " dirty:%lukB" - " writeback:%lukB" - " shmem:%lukB" -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - " shmem_thp: %lukB" - " shmem_pmdmapped: %lukB" - " anon_thp: %lukB" -#endif - " writeback_tmp:%lukB" - " kernel_stack:%lukB" -#ifdef CONFIG_SHADOW_CALL_STACK - " shadow_call_stack:%lukB" -#endif - " pagetables:%lukB" - " sec_pagetables:%lukB" - " all_unreclaimable? %s" - "\n", - pgdat->node_id, - K(node_page_state(pgdat, NR_ACTIVE_ANON)), - K(node_page_state(pgdat, NR_INACTIVE_ANON)), - K(node_page_state(pgdat, NR_ACTIVE_FILE)), - K(node_page_state(pgdat, NR_INACTIVE_FILE)), - K(node_page_state(pgdat, NR_UNEVICTABLE)), - K(node_page_state(pgdat, NR_ISOLATED_ANON)), - K(node_page_state(pgdat, NR_ISOLATED_FILE)), - K(node_page_state(pgdat, NR_FILE_MAPPED)), - K(node_page_state(pgdat, NR_FILE_DIRTY)), - K(node_page_state(pgdat, NR_WRITEBACK)), - K(node_page_state(pgdat, NR_SHMEM)), -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - K(node_page_state(pgdat, NR_SHMEM_THPS)), - K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)), - K(node_page_state(pgdat, NR_ANON_THPS)), -#endif - K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), - node_page_state(pgdat, NR_KERNEL_STACK_KB), -#ifdef CONFIG_SHADOW_CALL_STACK - node_page_state(pgdat, NR_KERNEL_SCS_KB), -#endif - K(node_page_state(pgdat, NR_PAGETABLE)), - K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)), - pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ? - "yes" : "no"); - } - - for_each_populated_zone(zone) { - int i; - - if (zone_idx(zone) > max_zone_idx) - continue; - if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) - continue; - - free_pcp = 0; - for_each_online_cpu(cpu) - free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; - - show_node(zone); - printk(KERN_CONT - "%s" - " free:%lukB" - " boost:%lukB" - " min:%lukB" - " low:%lukB" - " high:%lukB" - " reserved_highatomic:%luKB" - " active_anon:%lukB" - " inactive_anon:%lukB" - " active_file:%lukB" - " inactive_file:%lukB" - " unevictable:%lukB" - " writepending:%lukB" - " present:%lukB" - " managed:%lukB" - " mlocked:%lukB" - " bounce:%lukB" - " free_pcp:%lukB" - " local_pcp:%ukB" - " free_cma:%lukB" - "\n", - zone->name, - K(zone_page_state(zone, NR_FREE_PAGES)), - K(zone->watermark_boost), - K(min_wmark_pages(zone)), - K(low_wmark_pages(zone)), - K(high_wmark_pages(zone)), - K(zone->nr_reserved_highatomic), - K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), - K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), - K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), - K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), - K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), - K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), - K(zone->present_pages), - K(zone_managed_pages(zone)), - K(zone_page_state(zone, NR_MLOCK)), - K(zone_page_state(zone, NR_BOUNCE)), - K(free_pcp), - K(this_cpu_read(zone->per_cpu_pageset->count)), - K(zone_page_state(zone, NR_FREE_CMA_PAGES))); - printk("lowmem_reserve[]:"); - for (i = 0; i < MAX_NR_ZONES; i++) - printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); - printk(KERN_CONT "\n"); - } - - for_each_populated_zone(zone) { - unsigned int order; - unsigned long nr[MAX_ORDER + 1], flags, total = 0; - unsigned char types[MAX_ORDER + 1]; - - if (zone_idx(zone) > max_zone_idx) - continue; - if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) - continue; - show_node(zone); - printk(KERN_CONT "%s: ", zone->name); - - spin_lock_irqsave(&zone->lock, flags); - for (order = 0; order <= MAX_ORDER; order++) { - struct free_area *area = &zone->free_area[order]; - int type; - - nr[order] = area->nr_free; - total += nr[order] << order; - - types[order] = 0; - for (type = 0; type < MIGRATE_TYPES; type++) { - if (!free_area_empty(area, type)) - types[order] |= 1 << type; - } - } - spin_unlock_irqrestore(&zone->lock, flags); - for (order = 0; order <= MAX_ORDER; order++) { - printk(KERN_CONT "%lu*%lukB ", - nr[order], K(1UL) << order); - if (nr[order]) - show_migration_types(types[order]); - } - printk(KERN_CONT "= %lukB\n", K(total)); - } - - for_each_online_node(nid) { - if (show_mem_node_skip(filter, nid, nodemask)) - continue; - hugetlb_show_meminfo_node(nid); - } - - printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES)); - - show_swap_cache_info(); -} - static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) { zoneref->zone = zone; diff --git a/mm/show_mem.c b/mm/show_mem.c new file mode 100644 index 000000000000..01f8e9905817 --- /dev/null +++ b/mm/show_mem.c @@ -0,0 +1,429 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generic show_mem() implementation + * + * Copyright (C) 2008 Johannes Weiner + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "internal.h" +#include "swap.h" + +atomic_long_t _totalram_pages __read_mostly; +EXPORT_SYMBOL(_totalram_pages); +unsigned long totalreserve_pages __read_mostly; +unsigned long totalcma_pages __read_mostly; + +static inline void show_node(struct zone *zone) +{ + if (IS_ENABLED(CONFIG_NUMA)) + printk("Node %d ", zone_to_nid(zone)); +} + +long si_mem_available(void) +{ + long available; + unsigned long pagecache; + unsigned long wmark_low = 0; + unsigned long pages[NR_LRU_LISTS]; + unsigned long reclaimable; + struct zone *zone; + int lru; + + for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) + pages[lru] = global_node_page_state(NR_LRU_BASE + lru); + + for_each_zone(zone) + wmark_low += low_wmark_pages(zone); + + /* + * Estimate the amount of memory available for userspace allocations, + * without causing swapping or OOM. + */ + available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; + + /* + * Not all the page cache can be freed, otherwise the system will + * start swapping or thrashing. Assume at least half of the page + * cache, or the low watermark worth of cache, needs to stay. + */ + pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; + pagecache -= min(pagecache / 2, wmark_low); + available += pagecache; + + /* + * Part of the reclaimable slab and other kernel memory consists of + * items that are in use, and cannot be freed. Cap this estimate at the + * low watermark. + */ + reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + + global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); + available += reclaimable - min(reclaimable / 2, wmark_low); + + if (available < 0) + available = 0; + return available; +} +EXPORT_SYMBOL_GPL(si_mem_available); + +void si_meminfo(struct sysinfo *val) +{ + val->totalram = totalram_pages(); + val->sharedram = global_node_page_state(NR_SHMEM); + val->freeram = global_zone_page_state(NR_FREE_PAGES); + val->bufferram = nr_blockdev_pages(); + val->totalhigh = totalhigh_pages(); + val->freehigh = nr_free_highpages(); + val->mem_unit = PAGE_SIZE; +} + +EXPORT_SYMBOL(si_meminfo); + +#ifdef CONFIG_NUMA +void si_meminfo_node(struct sysinfo *val, int nid) +{ + int zone_type; /* needs to be signed */ + unsigned long managed_pages = 0; + unsigned long managed_highpages = 0; + unsigned long free_highpages = 0; + pg_data_t *pgdat = NODE_DATA(nid); + + for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) + managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]); + val->totalram = managed_pages; + val->sharedram = node_page_state(pgdat, NR_SHMEM); + val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); +#ifdef CONFIG_HIGHMEM + for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { + struct zone *zone = &pgdat->node_zones[zone_type]; + + if (is_highmem(zone)) { + managed_highpages += zone_managed_pages(zone); + free_highpages += zone_page_state(zone, NR_FREE_PAGES); + } + } + val->totalhigh = managed_highpages; + val->freehigh = free_highpages; +#else + val->totalhigh = managed_highpages; + val->freehigh = free_highpages; +#endif + val->mem_unit = PAGE_SIZE; +} +#endif + +/* + * Determine whether the node should be displayed or not, depending on whether + * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). + */ +static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) +{ + if (!(flags & SHOW_MEM_FILTER_NODES)) + return false; + + /* + * no node mask - aka implicit memory numa policy. Do not bother with + * the synchronization - read_mems_allowed_begin - because we do not + * have to be precise here. + */ + if (!nodemask) + nodemask = &cpuset_current_mems_allowed; + + return !node_isset(nid, *nodemask); +} + +static void show_migration_types(unsigned char type) +{ + static const char types[MIGRATE_TYPES] = { + [MIGRATE_UNMOVABLE] = 'U', + [MIGRATE_MOVABLE] = 'M', + [MIGRATE_RECLAIMABLE] = 'E', + [MIGRATE_HIGHATOMIC] = 'H', +#ifdef CONFIG_CMA + [MIGRATE_CMA] = 'C', +#endif +#ifdef CONFIG_MEMORY_ISOLATION + [MIGRATE_ISOLATE] = 'I', +#endif + }; + char tmp[MIGRATE_TYPES + 1]; + char *p = tmp; + int i; + + for (i = 0; i < MIGRATE_TYPES; i++) { + if (type & (1 << i)) + *p++ = types[i]; + } + + *p = '\0'; + printk(KERN_CONT "(%s) ", tmp); +} + +static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx) +{ + int zone_idx; + for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++) + if (zone_managed_pages(pgdat->node_zones + zone_idx)) + return true; + return false; +} + +/* + * Show free area list (used inside shift_scroll-lock stuff) + * We also calculate the percentage fragmentation. We do this by counting the + * memory on each free list with the exception of the first item on the list. + * + * Bits in @filter: + * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's + * cpuset. + */ +void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) +{ + unsigned long free_pcp = 0; + int cpu, nid; + struct zone *zone; + pg_data_t *pgdat; + + for_each_populated_zone(zone) { + if (zone_idx(zone) > max_zone_idx) + continue; + if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) + continue; + + for_each_online_cpu(cpu) + free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; + } + + printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" + " active_file:%lu inactive_file:%lu isolated_file:%lu\n" + " unevictable:%lu dirty:%lu writeback:%lu\n" + " slab_reclaimable:%lu slab_unreclaimable:%lu\n" + " mapped:%lu shmem:%lu pagetables:%lu\n" + " sec_pagetables:%lu bounce:%lu\n" + " kernel_misc_reclaimable:%lu\n" + " free:%lu free_pcp:%lu free_cma:%lu\n", + global_node_page_state(NR_ACTIVE_ANON), + global_node_page_state(NR_INACTIVE_ANON), + global_node_page_state(NR_ISOLATED_ANON), + global_node_page_state(NR_ACTIVE_FILE), + global_node_page_state(NR_INACTIVE_FILE), + global_node_page_state(NR_ISOLATED_FILE), + global_node_page_state(NR_UNEVICTABLE), + global_node_page_state(NR_FILE_DIRTY), + global_node_page_state(NR_WRITEBACK), + global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B), + global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B), + global_node_page_state(NR_FILE_MAPPED), + global_node_page_state(NR_SHMEM), + global_node_page_state(NR_PAGETABLE), + global_node_page_state(NR_SECONDARY_PAGETABLE), + global_zone_page_state(NR_BOUNCE), + global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE), + global_zone_page_state(NR_FREE_PAGES), + free_pcp, + global_zone_page_state(NR_FREE_CMA_PAGES)); + + for_each_online_pgdat(pgdat) { + if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) + continue; + if (!node_has_managed_zones(pgdat, max_zone_idx)) + continue; + + printk("Node %d" + " active_anon:%lukB" + " inactive_anon:%lukB" + " active_file:%lukB" + " inactive_file:%lukB" + " unevictable:%lukB" + " isolated(anon):%lukB" + " isolated(file):%lukB" + " mapped:%lukB" + " dirty:%lukB" + " writeback:%lukB" + " shmem:%lukB" +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + " shmem_thp: %lukB" + " shmem_pmdmapped: %lukB" + " anon_thp: %lukB" +#endif + " writeback_tmp:%lukB" + " kernel_stack:%lukB" +#ifdef CONFIG_SHADOW_CALL_STACK + " shadow_call_stack:%lukB" +#endif + " pagetables:%lukB" + " sec_pagetables:%lukB" + " all_unreclaimable? %s" + "\n", + pgdat->node_id, + K(node_page_state(pgdat, NR_ACTIVE_ANON)), + K(node_page_state(pgdat, NR_INACTIVE_ANON)), + K(node_page_state(pgdat, NR_ACTIVE_FILE)), + K(node_page_state(pgdat, NR_INACTIVE_FILE)), + K(node_page_state(pgdat, NR_UNEVICTABLE)), + K(node_page_state(pgdat, NR_ISOLATED_ANON)), + K(node_page_state(pgdat, NR_ISOLATED_FILE)), + K(node_page_state(pgdat, NR_FILE_MAPPED)), + K(node_page_state(pgdat, NR_FILE_DIRTY)), + K(node_page_state(pgdat, NR_WRITEBACK)), + K(node_page_state(pgdat, NR_SHMEM)), +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + K(node_page_state(pgdat, NR_SHMEM_THPS)), + K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)), + K(node_page_state(pgdat, NR_ANON_THPS)), +#endif + K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), + node_page_state(pgdat, NR_KERNEL_STACK_KB), +#ifdef CONFIG_SHADOW_CALL_STACK + node_page_state(pgdat, NR_KERNEL_SCS_KB), +#endif + K(node_page_state(pgdat, NR_PAGETABLE)), + K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)), + pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ? + "yes" : "no"); + } + + for_each_populated_zone(zone) { + int i; + + if (zone_idx(zone) > max_zone_idx) + continue; + if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) + continue; + + free_pcp = 0; + for_each_online_cpu(cpu) + free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; + + show_node(zone); + printk(KERN_CONT + "%s" + " free:%lukB" + " boost:%lukB" + " min:%lukB" + " low:%lukB" + " high:%lukB" + " reserved_highatomic:%luKB" + " active_anon:%lukB" + " inactive_anon:%lukB" + " active_file:%lukB" + " inactive_file:%lukB" + " unevictable:%lukB" + " writepending:%lukB" + " present:%lukB" + " managed:%lukB" + " mlocked:%lukB" + " bounce:%lukB" + " free_pcp:%lukB" + " local_pcp:%ukB" + " free_cma:%lukB" + "\n", + zone->name, + K(zone_page_state(zone, NR_FREE_PAGES)), + K(zone->watermark_boost), + K(min_wmark_pages(zone)), + K(low_wmark_pages(zone)), + K(high_wmark_pages(zone)), + K(zone->nr_reserved_highatomic), + K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), + K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), + K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), + K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), + K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), + K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), + K(zone->present_pages), + K(zone_managed_pages(zone)), + K(zone_page_state(zone, NR_MLOCK)), + K(zone_page_state(zone, NR_BOUNCE)), + K(free_pcp), + K(this_cpu_read(zone->per_cpu_pageset->count)), + K(zone_page_state(zone, NR_FREE_CMA_PAGES))); + printk("lowmem_reserve[]:"); + for (i = 0; i < MAX_NR_ZONES; i++) + printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); + printk(KERN_CONT "\n"); + } + + for_each_populated_zone(zone) { + unsigned int order; + unsigned long nr[MAX_ORDER + 1], flags, total = 0; + unsigned char types[MAX_ORDER + 1]; + + if (zone_idx(zone) > max_zone_idx) + continue; + if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) + continue; + show_node(zone); + printk(KERN_CONT "%s: ", zone->name); + + spin_lock_irqsave(&zone->lock, flags); + for (order = 0; order <= MAX_ORDER; order++) { + struct free_area *area = &zone->free_area[order]; + int type; + + nr[order] = area->nr_free; + total += nr[order] << order; + + types[order] = 0; + for (type = 0; type < MIGRATE_TYPES; type++) { + if (!free_area_empty(area, type)) + types[order] |= 1 << type; + } + } + spin_unlock_irqrestore(&zone->lock, flags); + for (order = 0; order <= MAX_ORDER; order++) { + printk(KERN_CONT "%lu*%lukB ", + nr[order], K(1UL) << order); + if (nr[order]) + show_migration_types(types[order]); + } + printk(KERN_CONT "= %lukB\n", K(total)); + } + + for_each_online_node(nid) { + if (show_mem_node_skip(filter, nid, nodemask)) + continue; + hugetlb_show_meminfo_node(nid); + } + + printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES)); + + show_swap_cache_info(); +} + +void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) +{ + unsigned long total = 0, reserved = 0, highmem = 0; + struct zone *zone; + + printk("Mem-Info:\n"); + __show_free_areas(filter, nodemask, max_zone_idx); + + for_each_populated_zone(zone) { + + total += zone->present_pages; + reserved += zone->present_pages - zone_managed_pages(zone); + + if (is_highmem(zone)) + highmem += zone->present_pages; + } + + printk("%lu pages RAM\n", total); + printk("%lu pages HighMem/MovableOnly\n", highmem); + printk("%lu pages reserved\n", reserved); +#ifdef CONFIG_CMA + printk("%lu pages cma reserved\n", totalcma_pages); +#endif +#ifdef CONFIG_MEMORY_FAILURE + printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages)); +#endif +} -- cgit v1.2.3 From 5b855aa37cf742960d4f8a6c5c8e16092d0463be Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:13 +0800 Subject: mm: page_alloc: squash page_is_consistent() Squash the page_is_consistent() into bad_range() as there is only one caller. Link: https://lkml.kernel.org/r/20230516063821.121844-6-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Mike Rapoport (IBM) Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- mm/page_alloc.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 34a9fe934891..ce0d81686de3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -517,13 +517,6 @@ static int page_outside_zone_boundaries(struct zone *zone, struct page *page) return ret; } -static int page_is_consistent(struct zone *zone, struct page *page) -{ - if (zone != page_zone(page)) - return 0; - - return 1; -} /* * Temporary debugging check for pages not lying within a given zone. */ @@ -531,7 +524,7 @@ static int __maybe_unused bad_range(struct zone *zone, struct page *page) { if (page_outside_zone_boundaries(zone, page)) return 1; - if (!page_is_consistent(zone, page)) + if (zone != page_zone(page)) return 1; return 0; -- cgit v1.2.3 From e9f2b529e10f9ca8d25ac83e574e027d504de879 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:14 +0800 Subject: mm: page_alloc: remove alloc_contig_dump_pages() stub DEFINE_DYNAMIC_DEBUG_METADATA and DYNAMIC_DEBUG_BRANCH already has stub definitions without dynamic debug feature, remove unnecessary alloc_contig_dump_pages() stub. Link: https://lkml.kernel.org/r/20230516063821.121844-7-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Mike Rapoport (IBM) Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- mm/page_alloc.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ce0d81686de3..37ac82083229 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6213,8 +6213,6 @@ out: } #ifdef CONFIG_CONTIG_ALLOC -#if defined(CONFIG_DYNAMIC_DEBUG) || \ - (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) /* Usage: See admin-guide/dynamic-debug-howto.rst */ static void alloc_contig_dump_pages(struct list_head *page_list) { @@ -6228,11 +6226,6 @@ static void alloc_contig_dump_pages(struct list_head *page_list) dump_page(page, "migration failure"); } } -#else -static inline void alloc_contig_dump_pages(struct list_head *page_list) -{ -} -#endif /* [start, end) must belong to a single zone. */ int __alloc_contig_migrate_range(struct compact_control *cc, -- cgit v1.2.3 From 0866e82e40fba45dae07e6e8385929b574201752 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:15 +0800 Subject: mm: page_alloc: split out FAIL_PAGE_ALLOC ... to a single file to reduce a bit of page_alloc.c. Link: https://lkml.kernel.org/r/20230516063821.121844-8-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- include/linux/fault-inject.h | 9 ++++++ mm/Makefile | 1 + mm/fail_page_alloc.c | 66 +++++++++++++++++++++++++++++++++++++++ mm/page_alloc.c | 74 -------------------------------------------- 4 files changed, 76 insertions(+), 74 deletions(-) create mode 100644 mm/fail_page_alloc.c (limited to 'mm') diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 481abf530b3c..6d5edef09d45 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -93,6 +93,15 @@ struct kmem_cache; bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order); +#ifdef CONFIG_FAIL_PAGE_ALLOC +bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order); +#else +static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) +{ + return false; +} +#endif /* CONFIG_FAIL_PAGE_ALLOC */ + int should_failslab(struct kmem_cache *s, gfp_t gfpflags); #ifdef CONFIG_FAILSLAB extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags); diff --git a/mm/Makefile b/mm/Makefile index 5262ce5baa28..0eec4bc72d3f 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -89,6 +89,7 @@ obj-$(CONFIG_KASAN) += kasan/ obj-$(CONFIG_KFENCE) += kfence/ obj-$(CONFIG_KMSAN) += kmsan/ obj-$(CONFIG_FAILSLAB) += failslab.o +obj-$(CONFIG_FAIL_PAGE_ALLOC) += fail_page_alloc.o obj-$(CONFIG_MEMTEST) += memtest.o obj-$(CONFIG_MIGRATION) += migrate.o obj-$(CONFIG_NUMA) += memory-tiers.o diff --git a/mm/fail_page_alloc.c b/mm/fail_page_alloc.c new file mode 100644 index 000000000000..b1b09cce9394 --- /dev/null +++ b/mm/fail_page_alloc.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +static struct { + struct fault_attr attr; + + bool ignore_gfp_highmem; + bool ignore_gfp_reclaim; + u32 min_order; +} fail_page_alloc = { + .attr = FAULT_ATTR_INITIALIZER, + .ignore_gfp_reclaim = true, + .ignore_gfp_highmem = true, + .min_order = 1, +}; + +static int __init setup_fail_page_alloc(char *str) +{ + return setup_fault_attr(&fail_page_alloc.attr, str); +} +__setup("fail_page_alloc=", setup_fail_page_alloc); + +bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) +{ + int flags = 0; + + if (order < fail_page_alloc.min_order) + return false; + if (gfp_mask & __GFP_NOFAIL) + return false; + if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) + return false; + if (fail_page_alloc.ignore_gfp_reclaim && + (gfp_mask & __GFP_DIRECT_RECLAIM)) + return false; + + /* See comment in __should_failslab() */ + if (gfp_mask & __GFP_NOWARN) + flags |= FAULT_NOWARN; + + return should_fail_ex(&fail_page_alloc.attr, 1 << order, flags); +} + +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS + +static int __init fail_page_alloc_debugfs(void) +{ + umode_t mode = S_IFREG | 0600; + struct dentry *dir; + + dir = fault_create_debugfs_attr("fail_page_alloc", NULL, + &fail_page_alloc.attr); + + debugfs_create_bool("ignore-gfp-wait", mode, dir, + &fail_page_alloc.ignore_gfp_reclaim); + debugfs_create_bool("ignore-gfp-highmem", mode, dir, + &fail_page_alloc.ignore_gfp_highmem); + debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order); + + return 0; +} + +late_initcall(fail_page_alloc_debugfs); + +#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 37ac82083229..2b8e4a086c3d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2994,80 +2994,6 @@ out: return page; } -#ifdef CONFIG_FAIL_PAGE_ALLOC - -static struct { - struct fault_attr attr; - - bool ignore_gfp_highmem; - bool ignore_gfp_reclaim; - u32 min_order; -} fail_page_alloc = { - .attr = FAULT_ATTR_INITIALIZER, - .ignore_gfp_reclaim = true, - .ignore_gfp_highmem = true, - .min_order = 1, -}; - -static int __init setup_fail_page_alloc(char *str) -{ - return setup_fault_attr(&fail_page_alloc.attr, str); -} -__setup("fail_page_alloc=", setup_fail_page_alloc); - -static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) -{ - int flags = 0; - - if (order < fail_page_alloc.min_order) - return false; - if (gfp_mask & __GFP_NOFAIL) - return false; - if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) - return false; - if (fail_page_alloc.ignore_gfp_reclaim && - (gfp_mask & __GFP_DIRECT_RECLAIM)) - return false; - - /* See comment in __should_failslab() */ - if (gfp_mask & __GFP_NOWARN) - flags |= FAULT_NOWARN; - - return should_fail_ex(&fail_page_alloc.attr, 1 << order, flags); -} - -#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS - -static int __init fail_page_alloc_debugfs(void) -{ - umode_t mode = S_IFREG | 0600; - struct dentry *dir; - - dir = fault_create_debugfs_attr("fail_page_alloc", NULL, - &fail_page_alloc.attr); - - debugfs_create_bool("ignore-gfp-wait", mode, dir, - &fail_page_alloc.ignore_gfp_reclaim); - debugfs_create_bool("ignore-gfp-highmem", mode, dir, - &fail_page_alloc.ignore_gfp_highmem); - debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order); - - return 0; -} - -late_initcall(fail_page_alloc_debugfs); - -#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ - -#else /* CONFIG_FAIL_PAGE_ALLOC */ - -static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) -{ - return false; -} - -#endif /* CONFIG_FAIL_PAGE_ALLOC */ - noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) { return __should_fail_alloc_page(gfp_mask, order); -- cgit v1.2.3 From 884c175f12ce1fabff18ac113349628149fc6cf2 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:16 +0800 Subject: mm: page_alloc: split out DEBUG_PAGEALLOC Move DEBUG_PAGEALLOC related functions into a single file to reduce a bit of page_alloc.c. Link: https://lkml.kernel.org/r/20230516063821.121844-9-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- include/linux/mm.h | 76 +++++++++++++++++++++++++++++++++------------------ mm/Makefile | 1 + mm/debug_page_alloc.c | 59 +++++++++++++++++++++++++++++++++++++++ mm/page_alloc.c | 69 ---------------------------------------------- 4 files changed, 109 insertions(+), 96 deletions(-) create mode 100644 mm/debug_page_alloc.c (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index f64bfbd53c65..2382eaf6fd81 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3471,9 +3471,58 @@ static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) if (debug_pagealloc_enabled_static()) __kernel_map_pages(page, numpages, 0); } + +extern unsigned int _debug_guardpage_minorder; +DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled); + +static inline unsigned int debug_guardpage_minorder(void) +{ + return _debug_guardpage_minorder; +} + +static inline bool debug_guardpage_enabled(void) +{ + return static_branch_unlikely(&_debug_guardpage_enabled); +} + +static inline bool page_is_guard(struct page *page) +{ + if (!debug_guardpage_enabled()) + return false; + + return PageGuard(page); +} + +bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order, + int migratetype); +static inline bool set_page_guard(struct zone *zone, struct page *page, + unsigned int order, int migratetype) +{ + if (!debug_guardpage_enabled()) + return false; + return __set_page_guard(zone, page, order, migratetype); +} + +void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order, + int migratetype); +static inline void clear_page_guard(struct zone *zone, struct page *page, + unsigned int order, int migratetype) +{ + if (!debug_guardpage_enabled()) + return; + __clear_page_guard(zone, page, order, migratetype); +} + #else /* CONFIG_DEBUG_PAGEALLOC */ static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {} static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {} +static inline unsigned int debug_guardpage_minorder(void) { return 0; } +static inline bool debug_guardpage_enabled(void) { return false; } +static inline bool page_is_guard(struct page *page) { return false; } +static inline bool set_page_guard(struct zone *zone, struct page *page, + unsigned int order, int migratetype) { return false; } +static inline void clear_page_guard(struct zone *zone, struct page *page, + unsigned int order, int migratetype) {} #endif /* CONFIG_DEBUG_PAGEALLOC */ #ifdef __HAVE_ARCH_GATE_AREA @@ -3711,33 +3760,6 @@ static inline bool vma_is_special_huge(const struct vm_area_struct *vma) #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ -#ifdef CONFIG_DEBUG_PAGEALLOC -extern unsigned int _debug_guardpage_minorder; -DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled); - -static inline unsigned int debug_guardpage_minorder(void) -{ - return _debug_guardpage_minorder; -} - -static inline bool debug_guardpage_enabled(void) -{ - return static_branch_unlikely(&_debug_guardpage_enabled); -} - -static inline bool page_is_guard(struct page *page) -{ - if (!debug_guardpage_enabled()) - return false; - - return PageGuard(page); -} -#else -static inline unsigned int debug_guardpage_minorder(void) { return 0; } -static inline bool debug_guardpage_enabled(void) { return false; } -static inline bool page_is_guard(struct page *page) { return false; } -#endif /* CONFIG_DEBUG_PAGEALLOC */ - #if MAX_NUMNODES > 1 void __init setup_nr_node_ids(void); #else diff --git a/mm/Makefile b/mm/Makefile index 0eec4bc72d3f..678530a07326 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -124,6 +124,7 @@ obj-$(CONFIG_SECRETMEM) += secretmem.o obj-$(CONFIG_CMA_SYSFS) += cma_sysfs.o obj-$(CONFIG_USERFAULTFD) += userfaultfd.o obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o +obj-$(CONFIG_DEBUG_PAGEALLOC) += debug_page_alloc.o obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o obj-$(CONFIG_DAMON) += damon/ obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o diff --git a/mm/debug_page_alloc.c b/mm/debug_page_alloc.c new file mode 100644 index 000000000000..f9d145730fd1 --- /dev/null +++ b/mm/debug_page_alloc.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +unsigned int _debug_guardpage_minorder; + +bool _debug_pagealloc_enabled_early __read_mostly + = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT); +EXPORT_SYMBOL(_debug_pagealloc_enabled_early); +DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); +EXPORT_SYMBOL(_debug_pagealloc_enabled); + +DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled); + +static int __init early_debug_pagealloc(char *buf) +{ + return kstrtobool(buf, &_debug_pagealloc_enabled_early); +} +early_param("debug_pagealloc", early_debug_pagealloc); + +static int __init debug_guardpage_minorder_setup(char *buf) +{ + unsigned long res; + + if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { + pr_err("Bad debug_guardpage_minorder value\n"); + return 0; + } + _debug_guardpage_minorder = res; + pr_info("Setting debug_guardpage_minorder to %lu\n", res); + return 0; +} +early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup); + +bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order, + int migratetype) +{ + if (order >= debug_guardpage_minorder()) + return false; + + __SetPageGuard(page); + INIT_LIST_HEAD(&page->buddy_list); + set_page_private(page, order); + /* Guard pages are not available for any usage */ + if (!is_migrate_isolate(migratetype)) + __mod_zone_freepage_state(zone, -(1 << order), migratetype); + + return true; +} + +void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order, + int migratetype) +{ + __ClearPageGuard(page); + + set_page_private(page, 0); + if (!is_migrate_isolate(migratetype)) + __mod_zone_freepage_state(zone, (1 << order), migratetype); +} diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2b8e4a086c3d..40fa763c5074 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -664,75 +664,6 @@ void destroy_large_folio(struct folio *folio) compound_page_dtors[dtor](&folio->page); } -#ifdef CONFIG_DEBUG_PAGEALLOC -unsigned int _debug_guardpage_minorder; - -bool _debug_pagealloc_enabled_early __read_mostly - = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT); -EXPORT_SYMBOL(_debug_pagealloc_enabled_early); -DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); -EXPORT_SYMBOL(_debug_pagealloc_enabled); - -DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled); - -static int __init early_debug_pagealloc(char *buf) -{ - return kstrtobool(buf, &_debug_pagealloc_enabled_early); -} -early_param("debug_pagealloc", early_debug_pagealloc); - -static int __init debug_guardpage_minorder_setup(char *buf) -{ - unsigned long res; - - if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { - pr_err("Bad debug_guardpage_minorder value\n"); - return 0; - } - _debug_guardpage_minorder = res; - pr_info("Setting debug_guardpage_minorder to %lu\n", res); - return 0; -} -early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup); - -static inline bool set_page_guard(struct zone *zone, struct page *page, - unsigned int order, int migratetype) -{ - if (!debug_guardpage_enabled()) - return false; - - if (order >= debug_guardpage_minorder()) - return false; - - __SetPageGuard(page); - INIT_LIST_HEAD(&page->buddy_list); - set_page_private(page, order); - /* Guard pages are not available for any usage */ - if (!is_migrate_isolate(migratetype)) - __mod_zone_freepage_state(zone, -(1 << order), migratetype); - - return true; -} - -static inline void clear_page_guard(struct zone *zone, struct page *page, - unsigned int order, int migratetype) -{ - if (!debug_guardpage_enabled()) - return; - - __ClearPageGuard(page); - - set_page_private(page, 0); - if (!is_migrate_isolate(migratetype)) - __mod_zone_freepage_state(zone, (1 << order), migratetype); -} -#else -static inline bool set_page_guard(struct zone *zone, struct page *page, - unsigned int order, int migratetype) { return false; } -static inline void clear_page_guard(struct zone *zone, struct page *page, - unsigned int order, int migratetype) {} -#endif - static inline void set_buddy_order(struct page *page, unsigned int order) { set_page_private(page, order); -- cgit v1.2.3 From 31a1b9d7fe768db521b12287ec6426983e9787e3 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:17 +0800 Subject: mm: page_alloc: move mark_free_page() into snapshot.c The mark_free_page() is only used in kernel/power/snapshot.c, move it out to reduce a bit of page_alloc.c Link: https://lkml.kernel.org/r/20230516063821.121844-10-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- include/linux/suspend.h | 3 --- kernel/power/snapshot.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++ mm/page_alloc.c | 55 ------------------------------------------------- 3 files changed, 52 insertions(+), 58 deletions(-) (limited to 'mm') diff --git a/include/linux/suspend.h b/include/linux/suspend.h index d0d4598a7b3f..3950a7bf33ae 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -364,9 +364,6 @@ struct pbe { struct pbe *next; }; -/* mm/page_alloc.c */ -extern void mark_free_pages(struct zone *zone); - /** * struct platform_hibernation_ops - hibernation platform support * diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index cd8b7b35f1e8..45ef0bf81c85 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1228,6 +1228,58 @@ unsigned int snapshot_additional_pages(struct zone *zone) return 2 * rtree; } +/* + * Touch the watchdog for every WD_PAGE_COUNT pages. + */ +#define WD_PAGE_COUNT (128*1024) + +static void mark_free_pages(struct zone *zone) +{ + unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT; + unsigned long flags; + unsigned int order, t; + struct page *page; + + if (zone_is_empty(zone)) + return; + + spin_lock_irqsave(&zone->lock, flags); + + max_zone_pfn = zone_end_pfn(zone); + for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) + if (pfn_valid(pfn)) { + page = pfn_to_page(pfn); + + if (!--page_count) { + touch_nmi_watchdog(); + page_count = WD_PAGE_COUNT; + } + + if (page_zone(page) != zone) + continue; + + if (!swsusp_page_is_forbidden(page)) + swsusp_unset_page_free(page); + } + + for_each_migratetype_order(order, t) { + list_for_each_entry(page, + &zone->free_area[order].free_list[t], buddy_list) { + unsigned long i; + + pfn = page_to_pfn(page); + for (i = 0; i < (1UL << order); i++) { + if (!--page_count) { + touch_nmi_watchdog(); + page_count = WD_PAGE_COUNT; + } + swsusp_set_page_free(pfn_to_page(pfn + i)); + } + } + } + spin_unlock_irqrestore(&zone->lock, flags); +} + #ifdef CONFIG_HIGHMEM /** * count_free_highmem_pages - Compute the total number of free highmem pages. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 40fa763c5074..8d306203e555 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2365,61 +2365,6 @@ void drain_all_pages(struct zone *zone) __drain_all_pages(zone, false); } -#ifdef CONFIG_HIBERNATION - -/* - * Touch the watchdog for every WD_PAGE_COUNT pages. - */ -#define WD_PAGE_COUNT (128*1024) - -void mark_free_pages(struct zone *zone) -{ - unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT; - unsigned long flags; - unsigned int order, t; - struct page *page; - - if (zone_is_empty(zone)) - return; - - spin_lock_irqsave(&zone->lock, flags); - - max_zone_pfn = zone_end_pfn(zone); - for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) - if (pfn_valid(pfn)) { - page = pfn_to_page(pfn); - - if (!--page_count) { - touch_nmi_watchdog(); - page_count = WD_PAGE_COUNT; - } - - if (page_zone(page) != zone) - continue; - - if (!swsusp_page_is_forbidden(page)) - swsusp_unset_page_free(page); - } - - for_each_migratetype_order(order, t) { - list_for_each_entry(page, - &zone->free_area[order].free_list[t], buddy_list) { - unsigned long i; - - pfn = page_to_pfn(page); - for (i = 0; i < (1UL << order); i++) { - if (!--page_count) { - touch_nmi_watchdog(); - page_count = WD_PAGE_COUNT; - } - swsusp_set_page_free(pfn_to_page(pfn + i)); - } - } - } - spin_unlock_irqrestore(&zone->lock, flags); -} -#endif /* CONFIG_PM */ - static bool free_unref_page_prepare(struct page *page, unsigned long pfn, unsigned int order) { -- cgit v1.2.3 From 07f44ac3c90c50a201307d3fe4dda120ee8394f5 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:18 +0800 Subject: mm: page_alloc: move pm_* function into power pm_restrict_gfp_mask()/pm_restore_gfp_mask() only used in power, let's move them out of page_alloc.c. Adding a general gfp_has_io_fs() function which return true if gfp with both __GFP_IO and __GFP_FS flags, then use it inside of pm_suspended_storage(), also the pm_suspended_storage() is moved into suspend.h. Link: https://lkml.kernel.org/r/20230516063821.121844-11-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- include/linux/gfp.h | 15 ++++----------- include/linux/suspend.h | 6 ++++++ kernel/power/main.c | 27 +++++++++++++++++++++++++++ kernel/power/power.h | 5 +++++ mm/page_alloc.c | 38 -------------------------------------- mm/swapfile.c | 1 + 6 files changed, 43 insertions(+), 49 deletions(-) (limited to 'mm') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index ed8cb537c6a7..665f06675c83 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -338,19 +338,12 @@ extern gfp_t gfp_allowed_mask; /* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); -extern void pm_restrict_gfp_mask(void); -extern void pm_restore_gfp_mask(void); - -extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma); - -#ifdef CONFIG_PM_SLEEP -extern bool pm_suspended_storage(void); -#else -static inline bool pm_suspended_storage(void) +static inline bool gfp_has_io_fs(gfp_t gfp) { - return false; + return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS); } -#endif /* CONFIG_PM_SLEEP */ + +extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma); #ifdef CONFIG_CONTIG_ALLOC /* The below functions must be run on a range from a single zone. */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 3950a7bf33ae..76923051c03d 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -502,6 +502,11 @@ extern void pm_report_max_hw_sleep(u64 t); extern bool events_check_enabled; extern suspend_state_t pm_suspend_target_state; +static inline bool pm_suspended_storage(void) +{ + return !gfp_has_io_fs(gfp_allowed_mask); +} + extern bool pm_wakeup_pending(void); extern void pm_system_wakeup(void); extern void pm_system_cancel_wakeup(void); @@ -535,6 +540,7 @@ static inline void ksys_sync_helper(void) {} #define pm_notifier(fn, pri) do { (void)(fn); } while (0) +static inline bool pm_suspended_storage(void) { return false; } static inline bool pm_wakeup_pending(void) { return false; } static inline void pm_system_wakeup(void) {} static inline void pm_wakeup_clear(bool reset) {} diff --git a/kernel/power/main.c b/kernel/power/main.c index 3113ec2f1db4..34fc8359145b 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -21,6 +21,33 @@ #include "power.h" #ifdef CONFIG_PM_SLEEP +/* + * The following functions are used by the suspend/hibernate code to temporarily + * change gfp_allowed_mask in order to avoid using I/O during memory allocations + * while devices are suspended. To avoid races with the suspend/hibernate code, + * they should always be called with system_transition_mutex held + * (gfp_allowed_mask also should only be modified with system_transition_mutex + * held, unless the suspend/hibernate code is guaranteed not to run in parallel + * with that modification). + */ +static gfp_t saved_gfp_mask; + +void pm_restore_gfp_mask(void) +{ + WARN_ON(!mutex_is_locked(&system_transition_mutex)); + if (saved_gfp_mask) { + gfp_allowed_mask = saved_gfp_mask; + saved_gfp_mask = 0; + } +} + +void pm_restrict_gfp_mask(void) +{ + WARN_ON(!mutex_is_locked(&system_transition_mutex)); + WARN_ON(saved_gfp_mask); + saved_gfp_mask = gfp_allowed_mask; + gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); +} unsigned int lock_system_sleep(void) { diff --git a/kernel/power/power.h b/kernel/power/power.h index b83c8d5e188d..ac14d1b463d1 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -216,6 +216,11 @@ static inline void suspend_test_finish(const char *label) {} /* kernel/power/main.c */ extern int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down); extern int pm_notifier_call_chain(unsigned long val); +void pm_restrict_gfp_mask(void); +void pm_restore_gfp_mask(void); +#else +static inline void pm_restrict_gfp_mask(void) {} +static inline void pm_restore_gfp_mask(void) {} #endif #ifdef CONFIG_HIGHMEM diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8d306203e555..005aa0202ae0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -227,44 +227,6 @@ static inline void set_pcppage_migratetype(struct page *page, int migratetype) page->index = migratetype; } -#ifdef CONFIG_PM_SLEEP -/* - * The following functions are used by the suspend/hibernate code to temporarily - * change gfp_allowed_mask in order to avoid using I/O during memory allocations - * while devices are suspended. To avoid races with the suspend/hibernate code, - * they should always be called with system_transition_mutex held - * (gfp_allowed_mask also should only be modified with system_transition_mutex - * held, unless the suspend/hibernate code is guaranteed not to run in parallel - * with that modification). - */ - -static gfp_t saved_gfp_mask; - -void pm_restore_gfp_mask(void) -{ - WARN_ON(!mutex_is_locked(&system_transition_mutex)); - if (saved_gfp_mask) { - gfp_allowed_mask = saved_gfp_mask; - saved_gfp_mask = 0; - } -} - -void pm_restrict_gfp_mask(void) -{ - WARN_ON(!mutex_is_locked(&system_transition_mutex)); - WARN_ON(saved_gfp_mask); - saved_gfp_mask = gfp_allowed_mask; - gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); -} - -bool pm_suspended_storage(void) -{ - if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) - return false; - return true; -} -#endif /* CONFIG_PM_SLEEP */ - #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE unsigned int pageblock_order __read_mostly; #endif diff --git a/mm/swapfile.c b/mm/swapfile.c index 274bbf797480..c74259001d5e 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include -- cgit v1.2.3 From 5221b5a89340f63dafa4d5f38537efb1ad506f15 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:19 +0800 Subject: mm: vmscan: use gfp_has_io_fs() Use gfp_has_io_fs() instead of open-code. Link: https://lkml.kernel.org/r/20230516063821.121844-12-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- mm/vmscan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 6d0cd2840cf0..15efbfbb1963 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2458,7 +2458,7 @@ static int too_many_isolated(struct pglist_data *pgdat, int file, * won't get blocked by normal direct-reclaimers, forming a circular * deadlock. */ - if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) + if (gfp_has_io_fs(sc->gfp_mask)) inactive >>= 3; too_many = isolated > inactive; -- cgit v1.2.3 From e95d372c4cd46b6ec4eeacc07adcb7260ab4cfa0 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:20 +0800 Subject: mm: page_alloc: move sysctls into it own fils This moves all page alloc related sysctls to its own file, as part of the kernel/sysctl.c spring cleaning, also move some functions declarations from mm.h into internal.h. Link: https://lkml.kernel.org/r/20230516063821.121844-13-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- include/linux/mm.h | 11 ------ include/linux/mmzone.h | 21 ---------- kernel/sysctl.c | 67 -------------------------------- mm/internal.h | 11 ++++++ mm/mm_init.c | 2 + mm/page_alloc.c | 103 ++++++++++++++++++++++++++++++++++++++++++------- 6 files changed, 102 insertions(+), 113 deletions(-) (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index 2382eaf6fd81..6d7e03d83da7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2994,12 +2994,6 @@ extern int __meminit early_pfn_to_nid(unsigned long pfn); #endif extern void set_dma_reserve(unsigned long new_dma_reserve); -extern void memmap_init_range(unsigned long, int, unsigned long, - unsigned long, unsigned long, enum meminit_context, - struct vmem_altmap *, int migratetype); -extern void setup_per_zone_wmarks(void); -extern void calculate_min_free_kbytes(void); -extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); extern void __init mmap_init(void); @@ -3020,11 +3014,6 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...); extern void setup_per_cpu_pageset(void); -/* page_alloc.c */ -extern int min_free_kbytes; -extern int watermark_boost_factor; -extern int watermark_scale_factor; - /* nommu.c */ extern atomic_long_t mmap_pages_allocated; extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index a4889c9d4055..3a68326c9989 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1512,27 +1512,6 @@ static inline bool has_managed_dma(void) } #endif -/* These two functions are used to setup the per zone pages min values */ -struct ctl_table; - -int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *, - loff_t *); -int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *, - size_t *, loff_t *); -extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; -int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *, - size_t *, loff_t *); -int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *, int, - void *, size_t *, loff_t *); -int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, - void *, size_t *, loff_t *); -int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, - void *, size_t *, loff_t *); -int numa_zonelist_order_handler(struct ctl_table *, int, - void *, size_t *, loff_t *); -extern int percpu_pagelist_high_fraction; -extern char numa_zonelist_order[]; -#define NUMA_ZONELIST_ORDER_LEN 16 #ifndef CONFIG_NUMA diff --git a/kernel/sysctl.c b/kernel/sysctl.c index bfe53e835524..a57de67f032f 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2119,13 +2119,6 @@ static struct ctl_table vm_table[] = { .extra2 = SYSCTL_ONE, }, #endif - { - .procname = "lowmem_reserve_ratio", - .data = &sysctl_lowmem_reserve_ratio, - .maxlen = sizeof(sysctl_lowmem_reserve_ratio), - .mode = 0644, - .proc_handler = lowmem_reserve_ratio_sysctl_handler, - }, { .procname = "drop_caches", .data = &sysctl_drop_caches, @@ -2135,39 +2128,6 @@ static struct ctl_table vm_table[] = { .extra1 = SYSCTL_ONE, .extra2 = SYSCTL_FOUR, }, - { - .procname = "min_free_kbytes", - .data = &min_free_kbytes, - .maxlen = sizeof(min_free_kbytes), - .mode = 0644, - .proc_handler = min_free_kbytes_sysctl_handler, - .extra1 = SYSCTL_ZERO, - }, - { - .procname = "watermark_boost_factor", - .data = &watermark_boost_factor, - .maxlen = sizeof(watermark_boost_factor), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ZERO, - }, - { - .procname = "watermark_scale_factor", - .data = &watermark_scale_factor, - .maxlen = sizeof(watermark_scale_factor), - .mode = 0644, - .proc_handler = watermark_scale_factor_sysctl_handler, - .extra1 = SYSCTL_ONE, - .extra2 = SYSCTL_THREE_THOUSAND, - }, - { - .procname = "percpu_pagelist_high_fraction", - .data = &percpu_pagelist_high_fraction, - .maxlen = sizeof(percpu_pagelist_high_fraction), - .mode = 0644, - .proc_handler = percpu_pagelist_high_fraction_sysctl_handler, - .extra1 = SYSCTL_ZERO, - }, { .procname = "page_lock_unfairness", .data = &sysctl_page_lock_unfairness, @@ -2223,24 +2183,6 @@ static struct ctl_table vm_table[] = { .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, }, - { - .procname = "min_unmapped_ratio", - .data = &sysctl_min_unmapped_ratio, - .maxlen = sizeof(sysctl_min_unmapped_ratio), - .mode = 0644, - .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, - .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE_HUNDRED, - }, - { - .procname = "min_slab_ratio", - .data = &sysctl_min_slab_ratio, - .maxlen = sizeof(sysctl_min_slab_ratio), - .mode = 0644, - .proc_handler = sysctl_min_slab_ratio_sysctl_handler, - .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE_HUNDRED, - }, #endif #ifdef CONFIG_SMP { @@ -2267,15 +2209,6 @@ static struct ctl_table vm_table[] = { .proc_handler = mmap_min_addr_handler, }, #endif -#ifdef CONFIG_NUMA - { - .procname = "numa_zonelist_order", - .data = &numa_zonelist_order, - .maxlen = NUMA_ZONELIST_ORDER_LEN, - .mode = 0644, - .proc_handler = numa_zonelist_order_handler, - }, -#endif #if (defined(CONFIG_X86_32) && !defined(CONFIG_UML))|| \ (defined(CONFIG_SUPERH) && defined(CONFIG_VSYSCALL)) { diff --git a/mm/internal.h b/mm/internal.h index c99da2cfac71..66d7ddf7e211 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -213,6 +213,13 @@ static inline bool is_check_pages_enabled(void) return static_branch_unlikely(&check_pages_enabled); } +extern int min_free_kbytes; + +void setup_per_zone_wmarks(void); +void calculate_min_free_kbytes(void); +int __meminit init_per_zone_wmark_min(void); +void page_alloc_sysctl_init(void); + /* * Structure for holding the mostly immutable allocation parameters passed * between functions involved in allocations, including the alloc_pages* @@ -423,6 +430,10 @@ extern void *memmap_alloc(phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, int nid, bool exact_nid); +void memmap_init_range(unsigned long, int, unsigned long, unsigned long, + unsigned long, enum meminit_context, struct vmem_altmap *, int); + + int split_free_page(struct page *free_page, unsigned int order, unsigned long split_pfn_offset); diff --git a/mm/mm_init.c b/mm/mm_init.c index 0fd4ddfdfb2e..10bf560302c4 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2392,6 +2392,8 @@ void __init page_alloc_init_late(void) /* Initialize page ext after all struct pages are initialized. */ if (deferred_struct_pages) page_ext_init(); + + page_alloc_sysctl_init(); } #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 005aa0202ae0..d19a05264125 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -206,7 +206,6 @@ nodemask_t node_states[NR_NODE_STATES] __read_mostly = { }; EXPORT_SYMBOL(node_states); -int percpu_pagelist_high_fraction; gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; /* @@ -302,8 +301,8 @@ compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = { int min_free_kbytes = 1024; int user_min_free_kbytes = -1; -int watermark_boost_factor __read_mostly = 15000; -int watermark_scale_factor = 10; +static int watermark_boost_factor __read_mostly = 15000; +static int watermark_scale_factor = 10; /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ int movable_zone; @@ -4880,12 +4879,12 @@ static int __parse_numa_zonelist_order(char *s) return 0; } -char numa_zonelist_order[] = "Node"; - +static char numa_zonelist_order[] = "Node"; +#define NUMA_ZONELIST_ORDER_LEN 16 /* * sysctl handler for numa_zonelist_order */ -int numa_zonelist_order_handler(struct ctl_table *table, int write, +static int numa_zonelist_order_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { if (write) @@ -4893,7 +4892,6 @@ int numa_zonelist_order_handler(struct ctl_table *table, int write, return proc_dostring(table, write, buffer, length, ppos); } - static int node_load[MAX_NUMNODES]; /** @@ -5296,6 +5294,7 @@ static int zone_batchsize(struct zone *zone) #endif } +static int percpu_pagelist_high_fraction; static int zone_highsize(struct zone *zone, int batch, int cpu_online) { #ifdef CONFIG_MMU @@ -5825,7 +5824,7 @@ postcore_initcall(init_per_zone_wmark_min) * that we can call two helper functions whenever min_free_kbytes * changes. */ -int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, +static int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int rc; @@ -5841,7 +5840,7 @@ int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, return 0; } -int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, +static int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int rc; @@ -5871,7 +5870,7 @@ static void setup_min_unmapped_ratio(void) } -int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, +static int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int rc; @@ -5898,7 +5897,7 @@ static void setup_min_slab_ratio(void) sysctl_min_slab_ratio) / 100; } -int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, +static int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int rc; @@ -5922,8 +5921,8 @@ int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, * minimum watermarks. The lowmem reserve ratio can only make sense * if in function of the boot time zone sizes. */ -int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, - void *buffer, size_t *length, loff_t *ppos) +static int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, + int write, void *buffer, size_t *length, loff_t *ppos) { int i; @@ -5943,7 +5942,7 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, * cpu. It is the fraction of total pages in each zone that a hot per cpu * pagelist can have before it gets flushed back to buddy allocator. */ -int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table, +static int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { struct zone *zone; @@ -5976,6 +5975,82 @@ out: return ret; } +static struct ctl_table page_alloc_sysctl_table[] = { + { + .procname = "min_free_kbytes", + .data = &min_free_kbytes, + .maxlen = sizeof(min_free_kbytes), + .mode = 0644, + .proc_handler = min_free_kbytes_sysctl_handler, + .extra1 = SYSCTL_ZERO, + }, + { + .procname = "watermark_boost_factor", + .data = &watermark_boost_factor, + .maxlen = sizeof(watermark_boost_factor), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + }, + { + .procname = "watermark_scale_factor", + .data = &watermark_scale_factor, + .maxlen = sizeof(watermark_scale_factor), + .mode = 0644, + .proc_handler = watermark_scale_factor_sysctl_handler, + .extra1 = SYSCTL_ONE, + .extra2 = SYSCTL_THREE_THOUSAND, + }, + { + .procname = "percpu_pagelist_high_fraction", + .data = &percpu_pagelist_high_fraction, + .maxlen = sizeof(percpu_pagelist_high_fraction), + .mode = 0644, + .proc_handler = percpu_pagelist_high_fraction_sysctl_handler, + .extra1 = SYSCTL_ZERO, + }, + { + .procname = "lowmem_reserve_ratio", + .data = &sysctl_lowmem_reserve_ratio, + .maxlen = sizeof(sysctl_lowmem_reserve_ratio), + .mode = 0644, + .proc_handler = lowmem_reserve_ratio_sysctl_handler, + }, +#ifdef CONFIG_NUMA + { + .procname = "numa_zonelist_order", + .data = &numa_zonelist_order, + .maxlen = NUMA_ZONELIST_ORDER_LEN, + .mode = 0644, + .proc_handler = numa_zonelist_order_handler, + }, + { + .procname = "min_unmapped_ratio", + .data = &sysctl_min_unmapped_ratio, + .maxlen = sizeof(sysctl_min_unmapped_ratio), + .mode = 0644, + .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE_HUNDRED, + }, + { + .procname = "min_slab_ratio", + .data = &sysctl_min_slab_ratio, + .maxlen = sizeof(sysctl_min_slab_ratio), + .mode = 0644, + .proc_handler = sysctl_min_slab_ratio_sysctl_handler, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE_HUNDRED, + }, +#endif + {} +}; + +void __init page_alloc_sysctl_init(void) +{ + register_sysctl_init("vm", page_alloc_sysctl_table); +} + #ifdef CONFIG_CONTIG_ALLOC /* Usage: See admin-guide/dynamic-debug-howto.rst */ static void alloc_contig_dump_pages(struct list_head *page_list) -- cgit v1.2.3 From ecbb490d8ee38fd84a0d682282589ff723dc62c0 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:21 +0800 Subject: mm: page_alloc: move is_check_pages_enabled() into page_alloc.c The is_check_pages_enabled() only used in page_alloc.c, move it into page_alloc.c, also use it in free_tail_page_prepare(). Link: https://lkml.kernel.org/r/20230516063821.121844-14-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- mm/internal.h | 5 ----- mm/page_alloc.c | 7 ++++++- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/internal.h b/mm/internal.h index 66d7ddf7e211..ec55da813c13 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -208,11 +208,6 @@ extern char * const zone_names[MAX_NR_ZONES]; /* perform sanity checks on struct pages being allocated or freed */ DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled); -static inline bool is_check_pages_enabled(void) -{ - return static_branch_unlikely(&check_pages_enabled); -} - extern int min_free_kbytes; void setup_per_zone_wmarks(void); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d19a05264125..ee23ba9c0ca7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -983,6 +983,11 @@ static inline bool free_page_is_bad(struct page *page) return true; } +static inline bool is_check_pages_enabled(void) +{ + return static_branch_unlikely(&check_pages_enabled); +} + static int free_tail_page_prepare(struct page *head_page, struct page *page) { struct folio *folio = (struct folio *)head_page; @@ -994,7 +999,7 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page) */ BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); - if (!static_branch_unlikely(&check_pages_enabled)) { + if (!is_check_pages_enabled()) { ret = 0; goto out; } -- cgit v1.2.3 From 54d020692b342f7bd02d7f5795fb5c401caecfcc Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Wed, 17 May 2023 20:25:33 +0100 Subject: mm/gup: remove unused vmas parameter from get_user_pages() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "remove the vmas parameter from GUP APIs", v6. (pin_/get)_user_pages[_remote]() each provide an optional output parameter for an array of VMA objects associated with each page in the input range. These provide the means for VMAs to be returned, as long as mm->mmap_lock is never released during the GUP operation (i.e. the internal flag FOLL_UNLOCKABLE is not specified). In addition, these VMAs can only be accessed with the mmap_lock held and become invalidated the moment it is released. The vast majority of invocations do not use this functionality and of those that do, all but one case retrieve a single VMA to perform checks upon. It is not egregious in the single VMA cases to simply replace the operation with a vma_lookup(). In these cases we duplicate the (fast) lookup on a slow path already under the mmap_lock, abstracted to a new get_user_page_vma_remote() inline helper function which also performs error checking and reference count maintenance. The special case is io_uring, where io_pin_pages() specifically needs to assert that the VMAs underlying the range do not result in broken long-term GUP file-backed mappings. As GUP now internally asserts that FOLL_LONGTERM mappings are not file-backed in a broken fashion (i.e. requiring dirty tracking) - as implemented in "mm/gup: disallow FOLL_LONGTERM GUP-nonfast writing to file-backed mappings" - this logic is no longer required and so we can simply remove it altogether from io_uring. Eliminating the vmas parameter eliminates an entire class of danging pointer errors that might have occured should the lock have been incorrectly released. In addition, the API is simplified and now clearly expresses what it is intended for - applying the specified GUP flags and (if pinning) returning pinned pages. This change additionally opens the door to further potential improvements in GUP and the possible marrying of disparate code paths. I have run this series against gup_test with no issues. Thanks to Matthew Wilcox for suggesting this refactoring! This patch (of 6): No invocation of get_user_pages() use the vmas parameter, so remove it. The GUP API is confusing and caveated. Recent changes have done much to improve that, however there is more we can do. Exporting vmas is a prime target as the caller has to be extremely careful to preclude their use after the mmap_lock has expired or otherwise be left with dangling pointers. Removing the vmas parameter focuses the GUP functions upon their primary purpose - pinning (and outputting) pages as well as performing the actions implied by the input flags. This is part of a patch series aiming to remove the vmas parameter altogether. Link: https://lkml.kernel.org/r/cover.1684350871.git.lstoakes@gmail.com Link: https://lkml.kernel.org/r/589e0c64794668ffc799651e8d85e703262b1e9d.1684350871.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Suggested-by: Matthew Wilcox (Oracle) Acked-by: Greg Kroah-Hartman Acked-by: David Hildenbrand Reviewed-by: Jason Gunthorpe Acked-by: Christian König (for radeon parts) Acked-by: Jarkko Sakkinen Reviewed-by: Christoph Hellwig Acked-by: Sean Christopherson (KVM) Cc: Catalin Marinas Cc: Dennis Dalessandro Cc: Janosch Frank Cc: Jens Axboe Cc: Sakari Ailus Signed-off-by: Andrew Morton --- arch/x86/kernel/cpu/sgx/ioctl.c | 2 +- drivers/gpu/drm/radeon/radeon_ttm.c | 2 +- drivers/misc/sgi-gru/grufault.c | 2 +- include/linux/mm.h | 3 +-- mm/gup.c | 9 +++------ mm/gup_test.c | 5 ++--- virt/kvm/kvm_main.c | 2 +- 7 files changed, 10 insertions(+), 15 deletions(-) (limited to 'mm') diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c index 21ca0a831b70..5d390df21440 100644 --- a/arch/x86/kernel/cpu/sgx/ioctl.c +++ b/arch/x86/kernel/cpu/sgx/ioctl.c @@ -214,7 +214,7 @@ static int __sgx_encl_add_page(struct sgx_encl *encl, if (!(vma->vm_flags & VM_MAYEXEC)) return -EACCES; - ret = get_user_pages(src, 1, 0, &src_page, NULL); + ret = get_user_pages(src, 1, 0, &src_page); if (ret < 1) return -EFAULT; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 2220cdf6a3f6..3a9db030f98f 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -359,7 +359,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm struct page **pages = ttm->pages + pinned; r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0, - pages, NULL); + pages); if (r < 0) goto release_pages; diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index b836936e9747..378cf02a2aa1 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c @@ -185,7 +185,7 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma, #else *pageshift = PAGE_SHIFT; #endif - if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0) + if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page) <= 0) return -EFAULT; *paddr = page_to_phys(page); put_page(page); diff --git a/include/linux/mm.h b/include/linux/mm.h index 6d7e03d83da7..6336253c18e2 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2369,8 +2369,7 @@ long pin_user_pages_remote(struct mm_struct *mm, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked); long get_user_pages(unsigned long start, unsigned long nr_pages, - unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas); + unsigned int gup_flags, struct page **pages); long pin_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas); diff --git a/mm/gup.c b/mm/gup.c index e19b06a66229..21daeee5f163 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2251,8 +2251,6 @@ long get_user_pages_remote(struct mm_struct *mm, * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. - * @vmas: array of pointers to vmas corresponding to each page. - * Or NULL if the caller does not require them. * * This is the same as get_user_pages_remote(), just with a less-flexible * calling convention where we assume that the mm being operated on belongs to @@ -2260,16 +2258,15 @@ long get_user_pages_remote(struct mm_struct *mm, * obviously don't pass FOLL_REMOTE in here. */ long get_user_pages(unsigned long start, unsigned long nr_pages, - unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas) + unsigned int gup_flags, struct page **pages) { int locked = 1; - if (!is_valid_gup_args(pages, vmas, NULL, &gup_flags, FOLL_TOUCH)) + if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, FOLL_TOUCH)) return -EINVAL; return __get_user_pages_locked(current->mm, start, nr_pages, pages, - vmas, &locked, gup_flags); + NULL, &locked, gup_flags); } EXPORT_SYMBOL(get_user_pages); diff --git a/mm/gup_test.c b/mm/gup_test.c index 8ae7307a1bb6..9ba8ea23f84e 100644 --- a/mm/gup_test.c +++ b/mm/gup_test.c @@ -139,8 +139,7 @@ static int __gup_test_ioctl(unsigned int cmd, pages + i); break; case GUP_BASIC_TEST: - nr = get_user_pages(addr, nr, gup->gup_flags, pages + i, - NULL); + nr = get_user_pages(addr, nr, gup->gup_flags, pages + i); break; case PIN_FAST_BENCHMARK: nr = pin_user_pages_fast(addr, nr, gup->gup_flags, @@ -161,7 +160,7 @@ static int __gup_test_ioctl(unsigned int cmd, pages + i, NULL); else nr = get_user_pages(addr, nr, gup->gup_flags, - pages + i, NULL); + pages + i); break; default: ret = -EINVAL; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 479802a892d4..51e4882d0873 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2477,7 +2477,7 @@ static inline int check_user_page_hwpoison(unsigned long addr) { int rc, flags = FOLL_HWPOISON | FOLL_WRITE; - rc = get_user_pages(addr, 1, flags, NULL, NULL); + rc = get_user_pages(addr, 1, flags, NULL); return rc == -EHWPOISON; } -- cgit v1.2.3 From 0b295316b3a9b7858eafbebdc31b4827a6edde03 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Wed, 17 May 2023 20:25:36 +0100 Subject: mm/gup: remove unused vmas parameter from pin_user_pages_remote() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No invocation of pin_user_pages_remote() uses the vmas parameter, so remove it. This forms part of a larger patch set eliminating the use of the vmas parameters altogether. Link: https://lkml.kernel.org/r/28f000beb81e45bf538a2aaa77c90f5482b67a32.1684350871.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Acked-by: David Hildenbrand Reviewed-by: Jason Gunthorpe Reviewed-by: Christoph Hellwig Cc: Catalin Marinas Cc: Christian König Cc: Dennis Dalessandro Cc: Greg Kroah-Hartman Cc: Janosch Frank Cc: Jarkko Sakkinen Cc: Jens Axboe Cc: Matthew Wilcox (Oracle) Cc: Sakari Ailus Cc: Sean Christopherson Signed-off-by: Andrew Morton --- drivers/iommu/iommufd/pages.c | 4 ++-- drivers/vfio/vfio_iommu_type1.c | 2 +- include/linux/mm.h | 2 +- kernel/trace/trace_events_user.c | 2 +- mm/gup.c | 8 +++----- mm/process_vm_access.c | 2 +- 6 files changed, 9 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c index 3c47846cc5ef..412ca96be128 100644 --- a/drivers/iommu/iommufd/pages.c +++ b/drivers/iommu/iommufd/pages.c @@ -786,7 +786,7 @@ static int pfn_reader_user_pin(struct pfn_reader_user *user, user->locked = 1; } rc = pin_user_pages_remote(pages->source_mm, uptr, npages, - user->gup_flags, user->upages, NULL, + user->gup_flags, user->upages, &user->locked); } if (rc <= 0) { @@ -1799,7 +1799,7 @@ static int iopt_pages_rw_page(struct iopt_pages *pages, unsigned long index, rc = pin_user_pages_remote( pages->source_mm, (uintptr_t)(pages->uptr + index * PAGE_SIZE), 1, (flags & IOMMUFD_ACCESS_RW_WRITE) ? FOLL_WRITE : 0, &page, - NULL, NULL); + NULL); mmap_read_unlock(pages->source_mm); if (rc != 1) { if (WARN_ON(rc >= 0)) diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 0d2f805468e1..306e6f1d1c70 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -562,7 +562,7 @@ static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr, mmap_read_lock(mm); ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM, - pages, NULL, NULL); + pages, NULL); if (ret > 0) { int i; diff --git a/include/linux/mm.h b/include/linux/mm.h index 6336253c18e2..cf17ffdf4fbf 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2367,7 +2367,7 @@ long get_user_pages_remote(struct mm_struct *mm, long pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas, int *locked); + int *locked); long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages); long pin_user_pages(unsigned long start, unsigned long nr_pages, diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c index b1ecd7677642..bdc2666e8d39 100644 --- a/kernel/trace/trace_events_user.c +++ b/kernel/trace/trace_events_user.c @@ -406,7 +406,7 @@ static int user_event_enabler_write(struct user_event_mm *mm, return -EBUSY; ret = pin_user_pages_remote(mm->mm, uaddr, 1, FOLL_WRITE | FOLL_NOFAULT, - &page, NULL, NULL); + &page, NULL); if (unlikely(ret <= 0)) { if (!fixup_fault) diff --git a/mm/gup.c b/mm/gup.c index 21daeee5f163..edf0fe2695b0 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -3100,8 +3100,6 @@ EXPORT_SYMBOL_GPL(pin_user_pages_fast); * @gup_flags: flags modifying lookup behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. - * @vmas: array of pointers to vmas corresponding to each page. - * Or NULL if the caller does not require them. * @locked: pointer to lock flag indicating whether lock is held and * subsequently whether VM_FAULT_RETRY functionality can be * utilised. Lock must initially be held. @@ -3116,14 +3114,14 @@ EXPORT_SYMBOL_GPL(pin_user_pages_fast); long pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas, int *locked) + int *locked) { int local_locked = 1; - if (!is_valid_gup_args(pages, vmas, locked, &gup_flags, + if (!is_valid_gup_args(pages, NULL, locked, &gup_flags, FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE)) return 0; - return __gup_longterm_locked(mm, start, nr_pages, pages, vmas, + return __gup_longterm_locked(mm, start, nr_pages, pages, NULL, locked ? locked : &local_locked, gup_flags); } diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 78dfaf9e8990..0523edab03a6 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -104,7 +104,7 @@ static int process_vm_rw_single_vec(unsigned long addr, mmap_read_lock(mm); pinned_pages = pin_user_pages_remote(mm, pa, pinned_pages, flags, process_pages, - NULL, &locked); + &locked); if (locked) mmap_read_unlock(mm); if (pinned_pages <= 0) -- cgit v1.2.3 From ca5e863233e8f6acd1792fd85d6bc2729a1b2c10 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Wed, 17 May 2023 20:25:39 +0100 Subject: mm/gup: remove vmas parameter from get_user_pages_remote() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The only instances of get_user_pages_remote() invocations which used the vmas parameter were for a single page which can instead simply look up the VMA directly. In particular:- - __update_ref_ctr() looked up the VMA but did nothing with it so we simply remove it. - __access_remote_vm() was already using vma_lookup() when the original lookup failed so by doing the lookup directly this also de-duplicates the code. We are able to perform these VMA operations as we already hold the mmap_lock in order to be able to call get_user_pages_remote(). As part of this work we add get_user_page_vma_remote() which abstracts the VMA lookup, error handling and decrementing the page reference count should the VMA lookup fail. This forms part of a broader set of patches intended to eliminate the vmas parameter altogether. [akpm@linux-foundation.org: avoid passing NULL to PTR_ERR] Link: https://lkml.kernel.org/r/d20128c849ecdbf4dd01cc828fcec32127ed939a.1684350871.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Catalin Marinas (for arm64) Acked-by: David Hildenbrand Reviewed-by: Janosch Frank (for s390) Reviewed-by: Christoph Hellwig Cc: Christian König Cc: Dennis Dalessandro Cc: Greg Kroah-Hartman Cc: Jarkko Sakkinen Cc: Jason Gunthorpe Cc: Jens Axboe Cc: Matthew Wilcox (Oracle) Cc: Sakari Ailus Cc: Sean Christopherson Signed-off-by: Andrew Morton --- arch/arm64/kernel/mte.c | 17 +++++++++-------- arch/s390/kvm/interrupt.c | 2 +- fs/exec.c | 2 +- include/linux/mm.h | 34 +++++++++++++++++++++++++++++++--- kernel/events/uprobes.c | 13 +++++-------- mm/gup.c | 12 ++++-------- mm/memory.c | 20 ++++++++++---------- mm/rmap.c | 2 +- security/tomoyo/domain.c | 2 +- virt/kvm/async_pf.c | 3 +-- 10 files changed, 64 insertions(+), 43 deletions(-) (limited to 'mm') diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 7e89968bd282..4c5ef9b20065 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -416,10 +416,9 @@ long get_mte_ctrl(struct task_struct *task) static int __access_remote_tags(struct mm_struct *mm, unsigned long addr, struct iovec *kiov, unsigned int gup_flags) { - struct vm_area_struct *vma; void __user *buf = kiov->iov_base; size_t len = kiov->iov_len; - int ret; + int err = 0; int write = gup_flags & FOLL_WRITE; if (!access_ok(buf, len)) @@ -429,14 +428,16 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr, return -EIO; while (len) { + struct vm_area_struct *vma; unsigned long tags, offset; void *maddr; - struct page *page = NULL; + struct page *page = get_user_page_vma_remote(mm, addr, + gup_flags, &vma); - ret = get_user_pages_remote(mm, addr, 1, gup_flags, &page, - &vma, NULL); - if (ret <= 0) + if (IS_ERR_OR_NULL(page)) { + err = page == NULL ? -EIO : PTR_ERR(page); break; + } /* * Only copy tags if the page has been mapped as PROT_MTE @@ -446,7 +447,7 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr, * was never mapped with PROT_MTE. */ if (!(vma->vm_flags & VM_MTE)) { - ret = -EOPNOTSUPP; + err = -EOPNOTSUPP; put_page(page); break; } @@ -479,7 +480,7 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr, kiov->iov_len = buf - kiov->iov_base; if (!kiov->iov_len) { /* check for error accessing the tracee's address space */ - if (ret <= 0) + if (err) return -EIO; else return -EFAULT; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index da6dac36e959..9bd0a873f3b1 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -2777,7 +2777,7 @@ static struct page *get_map_page(struct kvm *kvm, u64 uaddr) mmap_read_lock(kvm->mm); get_user_pages_remote(kvm->mm, uaddr, 1, FOLL_WRITE, - &page, NULL, NULL); + &page, NULL); mmap_read_unlock(kvm->mm); return page; } diff --git a/fs/exec.c b/fs/exec.c index a466e797c8e2..25c65b64544b 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -220,7 +220,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, */ mmap_read_lock(bprm->mm); ret = get_user_pages_remote(bprm->mm, pos, 1, gup_flags, - &page, NULL, NULL); + &page, NULL); mmap_read_unlock(bprm->mm); if (ret <= 0) return NULL; diff --git a/include/linux/mm.h b/include/linux/mm.h index cf17ffdf4fbf..fcbfb961b49f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2353,6 +2353,9 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, unmap_mapping_range(mapping, holebegin, holelen, 0); } +static inline struct vm_area_struct *vma_lookup(struct mm_struct *mm, + unsigned long addr); + extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, unsigned int gup_flags); extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, @@ -2361,13 +2364,38 @@ extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags); long get_user_pages_remote(struct mm_struct *mm, - unsigned long start, unsigned long nr_pages, - unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas, int *locked); + unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, + int *locked); long pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked); + +static inline struct page *get_user_page_vma_remote(struct mm_struct *mm, + unsigned long addr, + int gup_flags, + struct vm_area_struct **vmap) +{ + struct page *page; + struct vm_area_struct *vma; + int got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL); + + if (got < 0) + return ERR_PTR(got); + if (got == 0) + return NULL; + + vma = vma_lookup(mm, addr); + if (WARN_ON_ONCE(!vma)) { + put_page(page); + return ERR_PTR(-EINVAL); + } + + *vmap = vma; + return page; +} + long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages); long pin_user_pages(unsigned long start, unsigned long nr_pages, diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 59887c69d54c..607d742caa61 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -365,7 +365,6 @@ __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) { void *kaddr; struct page *page; - struct vm_area_struct *vma; int ret; short *ptr; @@ -373,7 +372,7 @@ __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) return -EINVAL; ret = get_user_pages_remote(mm, vaddr, 1, - FOLL_WRITE, &page, &vma, NULL); + FOLL_WRITE, &page, NULL); if (unlikely(ret <= 0)) { /* * We are asking for 1 page. If get_user_pages_remote() fails, @@ -474,10 +473,9 @@ retry: if (is_register) gup_flags |= FOLL_SPLIT_PMD; /* Read the page with vaddr into memory */ - ret = get_user_pages_remote(mm, vaddr, 1, gup_flags, - &old_page, &vma, NULL); - if (ret <= 0) - return ret; + old_page = get_user_page_vma_remote(mm, vaddr, gup_flags, &vma); + if (IS_ERR_OR_NULL(old_page)) + return old_page ? PTR_ERR(old_page) : 0; ret = verify_opcode(old_page, vaddr, &opcode); if (ret <= 0) @@ -2027,8 +2025,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) * but we treat this as a 'remote' access since it is * essentially a kernel access to the memory. */ - result = get_user_pages_remote(mm, vaddr, 1, FOLL_FORCE, &page, - NULL, NULL); + result = get_user_pages_remote(mm, vaddr, 1, FOLL_FORCE, &page, NULL); if (result < 0) return result; diff --git a/mm/gup.c b/mm/gup.c index edf0fe2695b0..764bf0c20827 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2165,8 +2165,6 @@ static bool is_valid_gup_args(struct page **pages, struct vm_area_struct **vmas, * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. - * @vmas: array of pointers to vmas corresponding to each page. - * Or NULL if the caller does not require them. * @locked: pointer to lock flag indicating whether lock is held and * subsequently whether VM_FAULT_RETRY functionality can be * utilised. Lock must initially be held. @@ -2181,8 +2179,6 @@ static bool is_valid_gup_args(struct page **pages, struct vm_area_struct **vmas, * * The caller is responsible for releasing returned @pages, via put_page(). * - * @vmas are valid only as long as mmap_lock is held. - * * Must be called with mmap_lock held for read or write. * * get_user_pages_remote walks a process's page tables and takes a reference @@ -2219,15 +2215,15 @@ static bool is_valid_gup_args(struct page **pages, struct vm_area_struct **vmas, long get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas, int *locked) + int *locked) { int local_locked = 1; - if (!is_valid_gup_args(pages, vmas, locked, &gup_flags, + if (!is_valid_gup_args(pages, NULL, locked, &gup_flags, FOLL_TOUCH | FOLL_REMOTE)) return -EINVAL; - return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, + return __get_user_pages_locked(mm, start, nr_pages, pages, NULL, locked ? locked : &local_locked, gup_flags); } @@ -2237,7 +2233,7 @@ EXPORT_SYMBOL(get_user_pages_remote); long get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas, int *locked) + int *locked) { return 0; } diff --git a/mm/memory.c b/mm/memory.c index f69fbc251198..4dd09f930c61 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5587,7 +5587,6 @@ EXPORT_SYMBOL_GPL(generic_access_phys); int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags) { - struct vm_area_struct *vma; void *old_buf = buf; int write = gup_flags & FOLL_WRITE; @@ -5596,29 +5595,30 @@ int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, /* ignore errors, just check how much was successfully transferred */ while (len) { - int bytes, ret, offset; + int bytes, offset; void *maddr; - struct page *page = NULL; + struct vm_area_struct *vma = NULL; + struct page *page = get_user_page_vma_remote(mm, addr, + gup_flags, &vma); - ret = get_user_pages_remote(mm, addr, 1, - gup_flags, &page, &vma, NULL); - if (ret <= 0) { + if (IS_ERR_OR_NULL(page)) { #ifndef CONFIG_HAVE_IOREMAP_PROT break; #else + int res = 0; + /* * Check if this is a VM_IO | VM_PFNMAP VMA, which * we can access using slightly different code. */ - vma = vma_lookup(mm, addr); if (!vma) break; if (vma->vm_ops && vma->vm_ops->access) - ret = vma->vm_ops->access(vma, addr, buf, + res = vma->vm_ops->access(vma, addr, buf, len, write); - if (ret <= 0) + if (res <= 0) break; - bytes = ret; + bytes = res; #endif } else { bytes = len; diff --git a/mm/rmap.c b/mm/rmap.c index 19392e090bec..cd918cb9a431 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2328,7 +2328,7 @@ int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, npages = get_user_pages_remote(mm, start, npages, FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, - pages, NULL, NULL); + pages, NULL); if (npages < 0) return npages; diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index 31af29f669d2..ac20c0bdff9d 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -916,7 +916,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos, */ mmap_read_lock(bprm->mm); ret = get_user_pages_remote(bprm->mm, pos, 1, - FOLL_FORCE, &page, NULL, NULL); + FOLL_FORCE, &page, NULL); mmap_read_unlock(bprm->mm); if (ret <= 0) return false; diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index 9bfe1d6f6529..e033c79d528e 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -61,8 +61,7 @@ static void async_pf_execute(struct work_struct *work) * access remotely. */ mmap_read_lock(mm); - get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, NULL, - &locked); + get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, &locked); if (locked) mmap_read_unlock(mm); -- cgit v1.2.3 From 4c630f307455c06f99bdeca7f7a1ab5318604fe0 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Wed, 17 May 2023 20:25:45 +0100 Subject: mm/gup: remove vmas parameter from pin_user_pages() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We are now in a position where no caller of pin_user_pages() requires the vmas parameter at all, so eliminate this parameter from the function and all callers. This clears the way to removing the vmas parameter from GUP altogether. Link: https://lkml.kernel.org/r/195a99ae949c9f5cb589d2222b736ced96ec199a.1684350871.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Acked-by: David Hildenbrand Acked-by: Dennis Dalessandro [qib] Reviewed-by: Christoph Hellwig Acked-by: Sakari Ailus [drivers/media] Cc: Catalin Marinas Cc: Christian König Cc: Greg Kroah-Hartman Cc: Janosch Frank Cc: Jarkko Sakkinen Cc: Jason Gunthorpe Cc: Jens Axboe Cc: Matthew Wilcox (Oracle) Cc: Sean Christopherson Signed-off-by: Andrew Morton --- arch/powerpc/mm/book3s64/iommu_api.c | 2 +- drivers/infiniband/hw/qib/qib_user_pages.c | 2 +- drivers/infiniband/hw/usnic/usnic_uiom.c | 2 +- drivers/infiniband/sw/siw/siw_mem.c | 2 +- drivers/media/v4l2-core/videobuf-dma-sg.c | 2 +- drivers/vdpa/vdpa_user/vduse_dev.c | 2 +- drivers/vhost/vdpa.c | 2 +- include/linux/mm.h | 3 +-- io_uring/rsrc.c | 2 +- mm/gup.c | 9 +++------ mm/gup_test.c | 9 ++++----- net/xdp/xdp_umem.c | 2 +- 12 files changed, 17 insertions(+), 22 deletions(-) (limited to 'mm') diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c index 81d7185e2ae8..d19fb1f3007d 100644 --- a/arch/powerpc/mm/book3s64/iommu_api.c +++ b/arch/powerpc/mm/book3s64/iommu_api.c @@ -105,7 +105,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, ret = pin_user_pages(ua + (entry << PAGE_SHIFT), n, FOLL_WRITE | FOLL_LONGTERM, - mem->hpages + entry, NULL); + mem->hpages + entry); if (ret == n) { pinned += n; continue; diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c index f693bc753b6b..1bb7507325bc 100644 --- a/drivers/infiniband/hw/qib/qib_user_pages.c +++ b/drivers/infiniband/hw/qib/qib_user_pages.c @@ -111,7 +111,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages, ret = pin_user_pages(start_page + got * PAGE_SIZE, num_pages - got, FOLL_LONGTERM | FOLL_WRITE, - p + got, NULL); + p + got); if (ret < 0) { mmap_read_unlock(current->mm); goto bail_release; diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 2a5cac2658ec..84e0f41e7dfa 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -140,7 +140,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, ret = pin_user_pages(cur_base, min_t(unsigned long, npages, PAGE_SIZE / sizeof(struct page *)), - gup_flags, page_list, NULL); + gup_flags, page_list); if (ret < 0) goto out; diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c index f51ab2ccf151..e6e25f15567d 100644 --- a/drivers/infiniband/sw/siw/siw_mem.c +++ b/drivers/infiniband/sw/siw/siw_mem.c @@ -422,7 +422,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable) umem->page_chunk[i].plist = plist; while (nents) { rv = pin_user_pages(first_page_va, nents, foll_flags, - plist, NULL); + plist); if (rv < 0) goto out_sem_up; diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index 53001532e8e3..405b89ea1054 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -180,7 +180,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma, data, size, dma->nr_pages); err = pin_user_pages(data & PAGE_MASK, dma->nr_pages, gup_flags, - dma->pages, NULL); + dma->pages); if (err != dma->nr_pages) { dma->nr_pages = (err >= 0) ? err : 0; diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index de97e38c3b82..4d4405f058e8 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -1052,7 +1052,7 @@ static int vduse_dev_reg_umem(struct vduse_dev *dev, goto out; pinned = pin_user_pages(uaddr, npages, FOLL_LONGTERM | FOLL_WRITE, - page_list, NULL); + page_list); if (pinned != npages) { ret = pinned < 0 ? pinned : -ENOMEM; goto out; diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 8c1aefc865f0..61223fcbe82b 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -983,7 +983,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v, while (npages) { sz2pin = min_t(unsigned long, npages, list_size); pinned = pin_user_pages(cur_base, sz2pin, - gup_flags, page_list, NULL); + gup_flags, page_list); if (sz2pin != pinned) { if (pinned < 0) { ret = pinned; diff --git a/include/linux/mm.h b/include/linux/mm.h index fcbfb961b49f..280429ffa91d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2399,8 +2399,7 @@ static inline struct page *get_user_page_vma_remote(struct mm_struct *mm, long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages); long pin_user_pages(unsigned long start, unsigned long nr_pages, - unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas); + unsigned int gup_flags, struct page **pages); long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags); long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index b6451f8bc5d5..b56bda46a9eb 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -1044,7 +1044,7 @@ struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages) ret = 0; mmap_read_lock(current->mm); pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM, - pages, NULL); + pages); if (pret == nr_pages) *npages = nr_pages; else diff --git a/mm/gup.c b/mm/gup.c index 764bf0c20827..18e3bc2ee3f1 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -3131,8 +3131,6 @@ EXPORT_SYMBOL(pin_user_pages_remote); * @gup_flags: flags modifying lookup behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. - * @vmas: array of pointers to vmas corresponding to each page. - * Or NULL if the caller does not require them. * * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and * FOLL_PIN is set. @@ -3141,15 +3139,14 @@ EXPORT_SYMBOL(pin_user_pages_remote); * see Documentation/core-api/pin_user_pages.rst for details. */ long pin_user_pages(unsigned long start, unsigned long nr_pages, - unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas) + unsigned int gup_flags, struct page **pages) { int locked = 1; - if (!is_valid_gup_args(pages, vmas, NULL, &gup_flags, FOLL_PIN)) + if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, FOLL_PIN)) return 0; return __gup_longterm_locked(current->mm, start, nr_pages, - pages, vmas, &locked, gup_flags); + pages, NULL, &locked, gup_flags); } EXPORT_SYMBOL(pin_user_pages); diff --git a/mm/gup_test.c b/mm/gup_test.c index 9ba8ea23f84e..1668ce0e0783 100644 --- a/mm/gup_test.c +++ b/mm/gup_test.c @@ -146,18 +146,17 @@ static int __gup_test_ioctl(unsigned int cmd, pages + i); break; case PIN_BASIC_TEST: - nr = pin_user_pages(addr, nr, gup->gup_flags, pages + i, - NULL); + nr = pin_user_pages(addr, nr, gup->gup_flags, pages + i); break; case PIN_LONGTERM_BENCHMARK: nr = pin_user_pages(addr, nr, gup->gup_flags | FOLL_LONGTERM, - pages + i, NULL); + pages + i); break; case DUMP_USER_PAGES_TEST: if (gup->test_flags & GUP_TEST_FLAG_DUMP_PAGES_USE_PIN) nr = pin_user_pages(addr, nr, gup->gup_flags, - pages + i, NULL); + pages + i); else nr = get_user_pages(addr, nr, gup->gup_flags, pages + i); @@ -270,7 +269,7 @@ static inline int pin_longterm_test_start(unsigned long arg) gup_flags, pages); else cur_pages = pin_user_pages(addr, remaining_pages, - gup_flags, pages, NULL); + gup_flags, pages); if (cur_pages < 0) { pin_longterm_test_stop(); ret = cur_pages; diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index 02207e852d79..06cead2b8e34 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -103,7 +103,7 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address) mmap_read_lock(current->mm); npgs = pin_user_pages(address, umem->npgs, - gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL); + gup_flags | FOLL_LONGTERM, &umem->pgs[0]); mmap_read_unlock(current->mm); if (npgs != umem->npgs) { -- cgit v1.2.3 From b2cac248191b7466c5819e0da617b0705a26e197 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Wed, 17 May 2023 20:25:48 +0100 Subject: mm/gup: remove vmas array from internal GUP functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now we have eliminated all callers to GUP APIs which use the vmas parameter, eliminate it altogether. This eliminates a class of bugs where vmas might have been kept around longer than the mmap_lock and thus we need not be concerned about locks being dropped during this operation leaving behind dangling pointers. This simplifies the GUP API and makes it considerably clearer as to its purpose - follow flags are applied and if pinning, an array of pages is returned. Link: https://lkml.kernel.org/r/6811b4b2b4b3baf3dd07f422bb18853bb2cd09fb.1684350871.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Acked-by: David Hildenbrand Reviewed-by: Christoph Hellwig Cc: Catalin Marinas Cc: Christian König Cc: Dennis Dalessandro Cc: Greg Kroah-Hartman Cc: Janosch Frank Cc: Jarkko Sakkinen Cc: Jason Gunthorpe Cc: Jens Axboe Cc: Matthew Wilcox (Oracle) Cc: Sakari Ailus Cc: Sean Christopherson Signed-off-by: Andrew Morton --- include/linux/hugetlb.h | 10 +++--- mm/gup.c | 83 ++++++++++++++++++------------------------------- mm/hugetlb.c | 24 ++++++-------- 3 files changed, 45 insertions(+), 72 deletions(-) (limited to 'mm') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index f1543a0568ff..21f942025fec 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -133,9 +133,8 @@ int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags); long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, - struct page **, struct vm_area_struct **, - unsigned long *, unsigned long *, long, unsigned int, - int *); + struct page **, unsigned long *, unsigned long *, + long, unsigned int, int *); void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long, struct page *, zap_flags_t); @@ -306,9 +305,8 @@ static inline struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, static inline long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, - struct vm_area_struct **vmas, unsigned long *position, - unsigned long *nr_pages, long i, unsigned int flags, - int *nonblocking) + unsigned long *position, unsigned long *nr_pages, + long i, unsigned int flags, int *nonblocking) { BUG(); return 0; diff --git a/mm/gup.c b/mm/gup.c index 18e3bc2ee3f1..8db58305f4eb 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1024,8 +1024,6 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. - * @vmas: array of pointers to vmas corresponding to each page. - * Or NULL if the caller does not require them. * @locked: whether we're still with the mmap_lock held * * Returns either number of pages pinned (which may be less than the @@ -1039,8 +1037,6 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) * * The caller is responsible for releasing returned @pages, via put_page(). * - * @vmas are valid only as long as mmap_lock is held. - * * Must be called with mmap_lock held. It may be released. See below. * * __get_user_pages walks a process's page tables and takes a reference to @@ -1076,7 +1072,7 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) static long __get_user_pages(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas, int *locked) + int *locked) { long ret = 0, i = 0; struct vm_area_struct *vma = NULL; @@ -1116,9 +1112,9 @@ static long __get_user_pages(struct mm_struct *mm, goto out; if (is_vm_hugetlb_page(vma)) { - i = follow_hugetlb_page(mm, vma, pages, vmas, - &start, &nr_pages, i, - gup_flags, locked); + i = follow_hugetlb_page(mm, vma, pages, + &start, &nr_pages, i, + gup_flags, locked); if (!*locked) { /* * We've got a VM_FAULT_RETRY @@ -1183,10 +1179,6 @@ retry: ctx.page_mask = 0; } next_page: - if (vmas) { - vmas[i] = vma; - ctx.page_mask = 0; - } page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); if (page_increm > nr_pages) page_increm = nr_pages; @@ -1341,7 +1333,6 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, - struct vm_area_struct **vmas, int *locked, unsigned int flags) { @@ -1379,7 +1370,7 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm, pages_done = 0; for (;;) { ret = __get_user_pages(mm, start, nr_pages, flags, pages, - vmas, locked); + locked); if (!(flags & FOLL_UNLOCKABLE)) { /* VM_FAULT_RETRY couldn't trigger, bypass */ pages_done = ret; @@ -1443,7 +1434,7 @@ retry: *locked = 1; ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED, - pages, NULL, locked); + pages, locked); if (!*locked) { /* Continue to retry until we succeeded */ BUG_ON(ret != 0); @@ -1541,7 +1532,7 @@ long populate_vma_page_range(struct vm_area_struct *vma, * not result in a stack expansion that recurses back here. */ ret = __get_user_pages(mm, start, nr_pages, gup_flags, - NULL, NULL, locked ? locked : &local_locked); + NULL, locked ? locked : &local_locked); lru_add_drain(); return ret; } @@ -1599,7 +1590,7 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start, return -EINVAL; ret = __get_user_pages(mm, start, nr_pages, gup_flags, - NULL, NULL, locked); + NULL, locked); lru_add_drain(); return ret; } @@ -1667,8 +1658,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) #else /* CONFIG_MMU */ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, - struct vm_area_struct **vmas, int *locked, - unsigned int foll_flags) + int *locked, unsigned int foll_flags) { struct vm_area_struct *vma; bool must_unlock = false; @@ -1712,8 +1702,7 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, if (pages[i]) get_page(pages[i]); } - if (vmas) - vmas[i] = vma; + start = (start + PAGE_SIZE) & PAGE_MASK; } @@ -1894,8 +1883,7 @@ struct page *get_dump_page(unsigned long addr) int locked = 0; int ret; - ret = __get_user_pages_locked(current->mm, addr, 1, &page, NULL, - &locked, + ret = __get_user_pages_locked(current->mm, addr, 1, &page, &locked, FOLL_FORCE | FOLL_DUMP | FOLL_GET); return (ret == 1) ? page : NULL; } @@ -2068,7 +2056,6 @@ static long __gup_longterm_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, - struct vm_area_struct **vmas, int *locked, unsigned int gup_flags) { @@ -2076,13 +2063,13 @@ static long __gup_longterm_locked(struct mm_struct *mm, long rc, nr_pinned_pages; if (!(gup_flags & FOLL_LONGTERM)) - return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, + return __get_user_pages_locked(mm, start, nr_pages, pages, locked, gup_flags); flags = memalloc_pin_save(); do { nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages, - pages, vmas, locked, + pages, locked, gup_flags); if (nr_pinned_pages <= 0) { rc = nr_pinned_pages; @@ -2100,9 +2087,8 @@ static long __gup_longterm_locked(struct mm_struct *mm, * Check that the given flags are valid for the exported gup/pup interface, and * update them with the required flags that the caller must have set. */ -static bool is_valid_gup_args(struct page **pages, struct vm_area_struct **vmas, - int *locked, unsigned int *gup_flags_p, - unsigned int to_set) +static bool is_valid_gup_args(struct page **pages, int *locked, + unsigned int *gup_flags_p, unsigned int to_set) { unsigned int gup_flags = *gup_flags_p; @@ -2144,13 +2130,6 @@ static bool is_valid_gup_args(struct page **pages, struct vm_area_struct **vmas, (gup_flags & FOLL_PCI_P2PDMA))) return false; - /* - * Can't use VMAs with locked, as locked allows GUP to unlock - * which invalidates the vmas array - */ - if (WARN_ON_ONCE(vmas && (gup_flags & FOLL_UNLOCKABLE))) - return false; - *gup_flags_p = gup_flags; return true; } @@ -2219,11 +2198,11 @@ long get_user_pages_remote(struct mm_struct *mm, { int local_locked = 1; - if (!is_valid_gup_args(pages, NULL, locked, &gup_flags, + if (!is_valid_gup_args(pages, locked, &gup_flags, FOLL_TOUCH | FOLL_REMOTE)) return -EINVAL; - return __get_user_pages_locked(mm, start, nr_pages, pages, NULL, + return __get_user_pages_locked(mm, start, nr_pages, pages, locked ? locked : &local_locked, gup_flags); } @@ -2258,11 +2237,11 @@ long get_user_pages(unsigned long start, unsigned long nr_pages, { int locked = 1; - if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, FOLL_TOUCH)) + if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH)) return -EINVAL; return __get_user_pages_locked(current->mm, start, nr_pages, pages, - NULL, &locked, gup_flags); + &locked, gup_flags); } EXPORT_SYMBOL(get_user_pages); @@ -2286,12 +2265,12 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, { int locked = 0; - if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, + if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH | FOLL_UNLOCKABLE)) return -EINVAL; return __get_user_pages_locked(current->mm, start, nr_pages, pages, - NULL, &locked, gup_flags); + &locked, gup_flags); } EXPORT_SYMBOL(get_user_pages_unlocked); @@ -2981,7 +2960,7 @@ static int internal_get_user_pages_fast(unsigned long start, start += nr_pinned << PAGE_SHIFT; pages += nr_pinned; ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned, - pages, NULL, &locked, + pages, &locked, gup_flags | FOLL_TOUCH | FOLL_UNLOCKABLE); if (ret < 0) { /* @@ -3023,7 +3002,7 @@ int get_user_pages_fast_only(unsigned long start, int nr_pages, * FOLL_FAST_ONLY is required in order to match the API description of * this routine: no fall back to regular ("slow") GUP. */ - if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, + if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET | FOLL_FAST_ONLY)) return -EINVAL; @@ -3056,7 +3035,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, * FOLL_GET, because gup fast is always a "pin with a +1 page refcount" * request. */ - if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, FOLL_GET)) + if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET)) return -EINVAL; return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); } @@ -3081,7 +3060,7 @@ EXPORT_SYMBOL_GPL(get_user_pages_fast); int pin_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) { - if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, FOLL_PIN)) + if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) return -EINVAL; return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); } @@ -3114,10 +3093,10 @@ long pin_user_pages_remote(struct mm_struct *mm, { int local_locked = 1; - if (!is_valid_gup_args(pages, NULL, locked, &gup_flags, + if (!is_valid_gup_args(pages, locked, &gup_flags, FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE)) return 0; - return __gup_longterm_locked(mm, start, nr_pages, pages, NULL, + return __gup_longterm_locked(mm, start, nr_pages, pages, locked ? locked : &local_locked, gup_flags); } @@ -3143,10 +3122,10 @@ long pin_user_pages(unsigned long start, unsigned long nr_pages, { int locked = 1; - if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, FOLL_PIN)) + if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) return 0; return __gup_longterm_locked(current->mm, start, nr_pages, - pages, NULL, &locked, gup_flags); + pages, &locked, gup_flags); } EXPORT_SYMBOL(pin_user_pages); @@ -3160,11 +3139,11 @@ long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, { int locked = 0; - if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, + if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN | FOLL_TOUCH | FOLL_UNLOCKABLE)) return 0; - return __gup_longterm_locked(current->mm, start, nr_pages, pages, NULL, + return __gup_longterm_locked(current->mm, start, nr_pages, pages, &locked, gup_flags); } EXPORT_SYMBOL(pin_user_pages_unlocked); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f154019e6b84..ea24718db4af 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6425,17 +6425,14 @@ out_release_nounlock: } #endif /* CONFIG_USERFAULTFD */ -static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma, - int refs, struct page **pages, - struct vm_area_struct **vmas) +static void record_subpages(struct page *page, struct vm_area_struct *vma, + int refs, struct page **pages) { int nr; for (nr = 0; nr < refs; nr++) { if (likely(pages)) pages[nr] = nth_page(page, nr); - if (vmas) - vmas[nr] = vma; } } @@ -6508,9 +6505,9 @@ out_unlock: } long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, - struct page **pages, struct vm_area_struct **vmas, - unsigned long *position, unsigned long *nr_pages, - long i, unsigned int flags, int *locked) + struct page **pages, unsigned long *position, + unsigned long *nr_pages, long i, unsigned int flags, + int *locked) { unsigned long pfn_offset; unsigned long vaddr = *position; @@ -6638,7 +6635,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, * If subpage information not requested, update counters * and skip the same_page loop below. */ - if (!pages && !vmas && !pfn_offset && + if (!pages && !pfn_offset && (vaddr + huge_page_size(h) < vma->vm_end) && (remainder >= pages_per_huge_page(h))) { vaddr += huge_page_size(h); @@ -6653,11 +6650,10 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, refs = min3(pages_per_huge_page(h) - pfn_offset, remainder, (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT); - if (pages || vmas) - record_subpages_vmas(nth_page(page, pfn_offset), - vma, refs, - likely(pages) ? pages + i : NULL, - vmas ? vmas + i : NULL); + if (pages) + record_subpages(nth_page(page, pfn_offset), + vma, refs, + likely(pages) ? pages + i : NULL); if (pages) { /* -- cgit v1.2.3 From 4e096ae1801e24b338e02715c65c3ffa8883ba5d Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 13 May 2023 01:11:01 +0100 Subject: mm: convert migrate_pages() to work on folios Almost all of the callers & implementors of migrate_pages() were already converted to use folios. compaction_alloc() & compaction_free() are trivial to convert a part of this patch and not worth splitting out. Link: https://lkml.kernel.org/r/20230513001101.276972-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: "Huang, Ying" Signed-off-by: Andrew Morton --- Documentation/mm/page_migration.rst | 7 +- .../translations/zh_CN/mm/page_migration.rst | 2 +- include/linux/migrate.h | 16 +- mm/compaction.c | 15 +- mm/mempolicy.c | 15 +- mm/migrate.c | 161 ++++++++++----------- mm/vmscan.c | 15 +- 7 files changed, 108 insertions(+), 123 deletions(-) (limited to 'mm') diff --git a/Documentation/mm/page_migration.rst b/Documentation/mm/page_migration.rst index 313dce18893e..e35af7805be5 100644 --- a/Documentation/mm/page_migration.rst +++ b/Documentation/mm/page_migration.rst @@ -73,14 +73,13 @@ In kernel use of migrate_pages() It also prevents the swapper or other scans from encountering the page. -2. We need to have a function of type new_page_t that can be +2. We need to have a function of type new_folio_t that can be passed to migrate_pages(). This function should figure out - how to allocate the correct new page given the old page. + how to allocate the correct new folio given the old folio. 3. The migrate_pages() function is called which attempts to do the migration. It will call the function to allocate - the new page for each page that is considered for - moving. + the new folio for each folio that is considered for moving. How migrate_pages() works ========================= diff --git a/Documentation/translations/zh_CN/mm/page_migration.rst b/Documentation/translations/zh_CN/mm/page_migration.rst index 076081dc1635..f95063826a15 100644 --- a/Documentation/translations/zh_CN/mm/page_migration.rst +++ b/Documentation/translations/zh_CN/mm/page_migration.rst @@ -55,7 +55,7 @@ mbind()设置一个新的内存策略。一个进程的页面也可以通过sys_ 消失。它还可以防止交换器或其他扫描器遇到该页。 -2. 我们需要有一个new_page_t类型的函数,可以传递给migrate_pages()。这个函数应该计算 +2. 我们需要有一个new_folio_t类型的函数,可以传递给migrate_pages()。这个函数应该计算 出如何在给定的旧页面中分配正确的新页面。 3. migrate_pages()函数被调用,它试图进行迁移。它将调用该函数为每个被考虑迁移的页面分 diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 6241a1596a75..6de5756d8533 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -7,8 +7,8 @@ #include #include -typedef struct page *new_page_t(struct page *page, unsigned long private); -typedef void free_page_t(struct page *page, unsigned long private); +typedef struct folio *new_folio_t(struct folio *folio, unsigned long private); +typedef void free_folio_t(struct folio *folio, unsigned long private); struct migration_target_control; @@ -67,10 +67,10 @@ int migrate_folio_extra(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode, int extra_count); int migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode); -int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, +int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free, unsigned long private, enum migrate_mode mode, int reason, unsigned int *ret_succeeded); -struct page *alloc_migration_target(struct page *page, unsigned long private); +struct folio *alloc_migration_target(struct folio *src, unsigned long private); bool isolate_movable_page(struct page *page, isolate_mode_t mode); int migrate_huge_page_move_mapping(struct address_space *mapping, @@ -85,11 +85,11 @@ int folio_migrate_mapping(struct address_space *mapping, #else static inline void putback_movable_pages(struct list_head *l) {} -static inline int migrate_pages(struct list_head *l, new_page_t new, - free_page_t free, unsigned long private, enum migrate_mode mode, - int reason, unsigned int *ret_succeeded) +static inline int migrate_pages(struct list_head *l, new_folio_t new, + free_folio_t free, unsigned long private, + enum migrate_mode mode, int reason, unsigned int *ret_succeeded) { return -ENOSYS; } -static inline struct page *alloc_migration_target(struct page *page, +static inline struct folio *alloc_migration_target(struct folio *src, unsigned long private) { return NULL; } static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode) diff --git a/mm/compaction.c b/mm/compaction.c index f6465ae74d3f..e23e00bec030 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1685,11 +1685,10 @@ splitmap: * This is a migrate-callback that "allocates" freepages by taking pages * from the isolated freelists in the block we are migrating to. */ -static struct page *compaction_alloc(struct page *migratepage, - unsigned long data) +static struct folio *compaction_alloc(struct folio *src, unsigned long data) { struct compact_control *cc = (struct compact_control *)data; - struct page *freepage; + struct folio *dst; if (list_empty(&cc->freepages)) { isolate_freepages(cc); @@ -1698,11 +1697,11 @@ static struct page *compaction_alloc(struct page *migratepage, return NULL; } - freepage = list_entry(cc->freepages.next, struct page, lru); - list_del(&freepage->lru); + dst = list_entry(cc->freepages.next, struct folio, lru); + list_del(&dst->lru); cc->nr_freepages--; - return freepage; + return dst; } /* @@ -1710,11 +1709,11 @@ static struct page *compaction_alloc(struct page *migratepage, * freelist. All pages on the freelist are from the same zone, so there is no * special handling needed for NUMA. */ -static void compaction_free(struct page *page, unsigned long data) +static void compaction_free(struct folio *dst, unsigned long data) { struct compact_control *cc = (struct compact_control *)data; - list_add(&page->lru, &cc->freepages); + list_add(&dst->lru, &cc->freepages); cc->nr_freepages++; } diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 1756389a0609..f06ca8c18e62 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1195,24 +1195,22 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, * list of pages handed to migrate_pages()--which is how we get here-- * is in virtual address order. */ -static struct page *new_page(struct page *page, unsigned long start) +static struct folio *new_folio(struct folio *src, unsigned long start) { - struct folio *dst, *src = page_folio(page); struct vm_area_struct *vma; unsigned long address; VMA_ITERATOR(vmi, current->mm, start); gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL; for_each_vma(vmi, vma) { - address = page_address_in_vma(page, vma); + address = page_address_in_vma(&src->page, vma); if (address != -EFAULT) break; } if (folio_test_hugetlb(src)) { - dst = alloc_hugetlb_folio_vma(folio_hstate(src), + return alloc_hugetlb_folio_vma(folio_hstate(src), vma, address); - return &dst->page; } if (folio_test_large(src)) @@ -1221,9 +1219,8 @@ static struct page *new_page(struct page *page, unsigned long start) /* * if !vma, vma_alloc_folio() will use task or system default policy */ - dst = vma_alloc_folio(gfp, folio_order(src), vma, address, + return vma_alloc_folio(gfp, folio_order(src), vma, address, folio_test_large(src)); - return &dst->page; } #else @@ -1239,7 +1236,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, return -ENOSYS; } -static struct page *new_page(struct page *page, unsigned long start) +static struct folio *new_folio(struct folio *src, unsigned long start) { return NULL; } @@ -1334,7 +1331,7 @@ static long do_mbind(unsigned long start, unsigned long len, if (!list_empty(&pagelist)) { WARN_ON_ONCE(flags & MPOL_MF_LAZY); - nr_failed = migrate_pages(&pagelist, new_page, NULL, + nr_failed = migrate_pages(&pagelist, new_folio, NULL, start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL); if (nr_failed) putback_movable_pages(&pagelist); diff --git a/mm/migrate.c b/mm/migrate.c index cb292d2a90ce..30b5ce10935e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1067,15 +1067,13 @@ static void migrate_folio_undo_src(struct folio *src, } /* Restore the destination folio to the original state upon failure */ -static void migrate_folio_undo_dst(struct folio *dst, - bool locked, - free_page_t put_new_page, - unsigned long private) +static void migrate_folio_undo_dst(struct folio *dst, bool locked, + free_folio_t put_new_folio, unsigned long private) { if (locked) folio_unlock(dst); - if (put_new_page) - put_new_page(&dst->page, private); + if (put_new_folio) + put_new_folio(dst, private); else folio_put(dst); } @@ -1099,14 +1097,13 @@ static void migrate_folio_done(struct folio *src, } /* Obtain the lock on page, remove all ptes. */ -static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page, - unsigned long private, struct folio *src, - struct folio **dstp, enum migrate_mode mode, - enum migrate_reason reason, struct list_head *ret) +static int migrate_folio_unmap(new_folio_t get_new_folio, + free_folio_t put_new_folio, unsigned long private, + struct folio *src, struct folio **dstp, enum migrate_mode mode, + enum migrate_reason reason, struct list_head *ret) { struct folio *dst; int rc = -EAGAIN; - struct page *newpage = NULL; int page_was_mapped = 0; struct anon_vma *anon_vma = NULL; bool is_lru = !__PageMovable(&src->page); @@ -1123,10 +1120,9 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page return MIGRATEPAGE_SUCCESS; } - newpage = get_new_page(&src->page, private); - if (!newpage) + dst = get_new_folio(src, private); + if (!dst) return -ENOMEM; - dst = page_folio(newpage); *dstp = dst; dst->private = NULL; @@ -1254,13 +1250,13 @@ out: ret = NULL; migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret); - migrate_folio_undo_dst(dst, dst_locked, put_new_page, private); + migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private); return rc; } /* Migrate the folio to the newly allocated folio in dst. */ -static int migrate_folio_move(free_page_t put_new_page, unsigned long private, +static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private, struct folio *src, struct folio *dst, enum migrate_mode mode, enum migrate_reason reason, struct list_head *ret) @@ -1332,7 +1328,7 @@ out: } migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret); - migrate_folio_undo_dst(dst, true, put_new_page, private); + migrate_folio_undo_dst(dst, true, put_new_folio, private); return rc; } @@ -1355,16 +1351,14 @@ out: * because then pte is replaced with migration swap entry and direct I/O code * will wait in the page fault for migration to complete. */ -static int unmap_and_move_huge_page(new_page_t get_new_page, - free_page_t put_new_page, unsigned long private, - struct page *hpage, int force, - enum migrate_mode mode, int reason, - struct list_head *ret) +static int unmap_and_move_huge_page(new_folio_t get_new_folio, + free_folio_t put_new_folio, unsigned long private, + struct folio *src, int force, enum migrate_mode mode, + int reason, struct list_head *ret) { - struct folio *dst, *src = page_folio(hpage); + struct folio *dst; int rc = -EAGAIN; int page_was_mapped = 0; - struct page *new_hpage; struct anon_vma *anon_vma = NULL; struct address_space *mapping = NULL; @@ -1374,10 +1368,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, return MIGRATEPAGE_SUCCESS; } - new_hpage = get_new_page(hpage, private); - if (!new_hpage) + dst = get_new_folio(src, private); + if (!dst) return -ENOMEM; - dst = page_folio(new_hpage); if (!folio_trylock(src)) { if (!force) @@ -1418,7 +1411,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, * semaphore in write mode here and set TTU_RMAP_LOCKED * to let lower levels know we have taken the lock. */ - mapping = hugetlb_page_mapping_lock_write(hpage); + mapping = hugetlb_page_mapping_lock_write(&src->page); if (unlikely(!mapping)) goto unlock_put_anon; @@ -1448,7 +1441,7 @@ put_anon: if (rc == MIGRATEPAGE_SUCCESS) { move_hugetlb_state(src, dst, reason); - put_new_page = NULL; + put_new_folio = NULL; } out_unlock: @@ -1464,8 +1457,8 @@ out: * it. Otherwise, put_page() will drop the reference grabbed during * isolation. */ - if (put_new_page) - put_new_page(new_hpage, private); + if (put_new_folio) + put_new_folio(dst, private); else folio_putback_active_hugetlb(dst); @@ -1512,8 +1505,8 @@ struct migrate_pages_stats { * exist any more. It is caller's responsibility to call putback_movable_pages() * only if ret != 0. */ -static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page, - free_page_t put_new_page, unsigned long private, +static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio, + free_folio_t put_new_folio, unsigned long private, enum migrate_mode mode, int reason, struct migrate_pages_stats *stats, struct list_head *ret_folios) @@ -1551,9 +1544,9 @@ static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page, continue; } - rc = unmap_and_move_huge_page(get_new_page, - put_new_page, private, - &folio->page, pass > 2, mode, + rc = unmap_and_move_huge_page(get_new_folio, + put_new_folio, private, + folio, pass > 2, mode, reason, ret_folios); /* * The rules are: @@ -1610,11 +1603,11 @@ static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page, * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the * length of the from list must be <= 1. */ -static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, - free_page_t put_new_page, unsigned long private, - enum migrate_mode mode, int reason, struct list_head *ret_folios, - struct list_head *split_folios, struct migrate_pages_stats *stats, - int nr_pass) +static int migrate_pages_batch(struct list_head *from, + new_folio_t get_new_folio, free_folio_t put_new_folio, + unsigned long private, enum migrate_mode mode, int reason, + struct list_head *ret_folios, struct list_head *split_folios, + struct migrate_pages_stats *stats, int nr_pass) { int retry = 1; int thp_retry = 1; @@ -1664,8 +1657,9 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, continue; } - rc = migrate_folio_unmap(get_new_page, put_new_page, private, - folio, &dst, mode, reason, ret_folios); + rc = migrate_folio_unmap(get_new_folio, put_new_folio, + private, folio, &dst, mode, reason, + ret_folios); /* * The rules are: * Success: folio will be freed @@ -1762,7 +1756,7 @@ move: cond_resched(); - rc = migrate_folio_move(put_new_page, private, + rc = migrate_folio_move(put_new_folio, private, folio, dst, mode, reason, ret_folios); /* @@ -1808,7 +1802,7 @@ out: migrate_folio_undo_src(folio, page_was_mapped, anon_vma, true, ret_folios); list_del(&dst->lru); - migrate_folio_undo_dst(dst, true, put_new_page, private); + migrate_folio_undo_dst(dst, true, put_new_folio, private); dst = dst2; dst2 = list_next_entry(dst, lru); } @@ -1816,10 +1810,11 @@ out: return rc; } -static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page, - free_page_t put_new_page, unsigned long private, - enum migrate_mode mode, int reason, struct list_head *ret_folios, - struct list_head *split_folios, struct migrate_pages_stats *stats) +static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio, + free_folio_t put_new_folio, unsigned long private, + enum migrate_mode mode, int reason, + struct list_head *ret_folios, struct list_head *split_folios, + struct migrate_pages_stats *stats) { int rc, nr_failed = 0; LIST_HEAD(folios); @@ -1827,7 +1822,7 @@ static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page, memset(&astats, 0, sizeof(astats)); /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */ - rc = migrate_pages_batch(from, get_new_page, put_new_page, private, MIGRATE_ASYNC, + rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC, reason, &folios, split_folios, &astats, NR_MAX_MIGRATE_ASYNC_RETRY); stats->nr_succeeded += astats.nr_succeeded; @@ -1849,7 +1844,7 @@ static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page, list_splice_tail_init(&folios, from); while (!list_empty(from)) { list_move(from->next, &folios); - rc = migrate_pages_batch(&folios, get_new_page, put_new_page, + rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio, private, mode, reason, ret_folios, split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY); list_splice_tail_init(&folios, ret_folios); @@ -1866,11 +1861,11 @@ static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page, * supplied as the target for the page migration * * @from: The list of folios to be migrated. - * @get_new_page: The function used to allocate free folios to be used + * @get_new_folio: The function used to allocate free folios to be used * as the target of the folio migration. - * @put_new_page: The function used to free target folios if migration + * @put_new_folio: The function used to free target folios if migration * fails, or NULL if no special handling is necessary. - * @private: Private data to be passed on to get_new_page() + * @private: Private data to be passed on to get_new_folio() * @mode: The migration mode that specifies the constraints for * folio migration, if any. * @reason: The reason for folio migration. @@ -1887,8 +1882,8 @@ static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page, * considered as the number of non-migrated large folio, no matter how many * split folios of the large folio are migrated successfully. */ -int migrate_pages(struct list_head *from, new_page_t get_new_page, - free_page_t put_new_page, unsigned long private, +int migrate_pages(struct list_head *from, new_folio_t get_new_folio, + free_folio_t put_new_folio, unsigned long private, enum migrate_mode mode, int reason, unsigned int *ret_succeeded) { int rc, rc_gather; @@ -1903,7 +1898,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, memset(&stats, 0, sizeof(stats)); - rc_gather = migrate_hugetlbs(from, get_new_page, put_new_page, private, + rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private, mode, reason, &stats, &ret_folios); if (rc_gather < 0) goto out; @@ -1926,12 +1921,14 @@ again: else list_splice_init(from, &folios); if (mode == MIGRATE_ASYNC) - rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private, - mode, reason, &ret_folios, &split_folios, &stats, - NR_MAX_MIGRATE_PAGES_RETRY); + rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio, + private, mode, reason, &ret_folios, + &split_folios, &stats, + NR_MAX_MIGRATE_PAGES_RETRY); else - rc = migrate_pages_sync(&folios, get_new_page, put_new_page, private, - mode, reason, &ret_folios, &split_folios, &stats); + rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio, + private, mode, reason, &ret_folios, + &split_folios, &stats); list_splice_tail_init(&folios, &ret_folios); if (rc < 0) { rc_gather = rc; @@ -1944,8 +1941,9 @@ again: * is counted as 1 failure already. And, we only try to migrate * with minimal effort, force MIGRATE_ASYNC mode and retry once. */ - migrate_pages_batch(&split_folios, get_new_page, put_new_page, private, - MIGRATE_ASYNC, reason, &ret_folios, NULL, &stats, 1); + migrate_pages_batch(&split_folios, get_new_folio, + put_new_folio, private, MIGRATE_ASYNC, reason, + &ret_folios, NULL, &stats, 1); list_splice_tail_init(&split_folios, &ret_folios); } rc_gather += rc; @@ -1980,14 +1978,11 @@ out: return rc_gather; } -struct page *alloc_migration_target(struct page *page, unsigned long private) +struct folio *alloc_migration_target(struct folio *src, unsigned long private) { - struct folio *folio = page_folio(page); struct migration_target_control *mtc; gfp_t gfp_mask; unsigned int order = 0; - struct folio *hugetlb_folio = NULL; - struct folio *new_folio = NULL; int nid; int zidx; @@ -1995,33 +1990,30 @@ struct page *alloc_migration_target(struct page *page, unsigned long private) gfp_mask = mtc->gfp_mask; nid = mtc->nid; if (nid == NUMA_NO_NODE) - nid = folio_nid(folio); + nid = folio_nid(src); - if (folio_test_hugetlb(folio)) { - struct hstate *h = folio_hstate(folio); + if (folio_test_hugetlb(src)) { + struct hstate *h = folio_hstate(src); gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); - hugetlb_folio = alloc_hugetlb_folio_nodemask(h, nid, + return alloc_hugetlb_folio_nodemask(h, nid, mtc->nmask, gfp_mask); - return &hugetlb_folio->page; } - if (folio_test_large(folio)) { + if (folio_test_large(src)) { /* * clear __GFP_RECLAIM to make the migration callback * consistent with regular THP allocations. */ gfp_mask &= ~__GFP_RECLAIM; gfp_mask |= GFP_TRANSHUGE; - order = folio_order(folio); + order = folio_order(src); } - zidx = zone_idx(folio_zone(folio)); + zidx = zone_idx(folio_zone(src)); if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) gfp_mask |= __GFP_HIGHMEM; - new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask); - - return &new_folio->page; + return __folio_alloc(gfp_mask, order, nid, mtc->nmask); } #ifdef CONFIG_NUMA @@ -2472,13 +2464,12 @@ static bool migrate_balanced_pgdat(struct pglist_data *pgdat, return false; } -static struct page *alloc_misplaced_dst_page(struct page *page, +static struct folio *alloc_misplaced_dst_folio(struct folio *src, unsigned long data) { int nid = (int) data; - int order = compound_order(page); + int order = folio_order(src); gfp_t gfp = __GFP_THISNODE; - struct folio *new; if (order > 0) gfp |= GFP_TRANSHUGE_LIGHT; @@ -2487,9 +2478,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page, __GFP_NOWARN; gfp &= ~__GFP_RECLAIM; } - new = __folio_alloc_node(gfp, order, nid); - - return &new->page; + return __folio_alloc_node(gfp, order, nid); } static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) @@ -2567,7 +2556,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, goto out; list_add(&page->lru, &migratepages); - nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page, + nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio, NULL, node, MIGRATE_ASYNC, MR_NUMA_MISPLACED, &nr_succeeded); if (nr_remaining) { diff --git a/mm/vmscan.c b/mm/vmscan.c index 15efbfbb1963..4637f6462e9c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1621,9 +1621,10 @@ static void folio_check_dirty_writeback(struct folio *folio, mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); } -static struct page *alloc_demote_page(struct page *page, unsigned long private) +static struct folio *alloc_demote_folio(struct folio *src, + unsigned long private) { - struct page *target_page; + struct folio *dst; nodemask_t *allowed_mask; struct migration_target_control *mtc; @@ -1641,14 +1642,14 @@ static struct page *alloc_demote_page(struct page *page, unsigned long private) */ mtc->nmask = NULL; mtc->gfp_mask |= __GFP_THISNODE; - target_page = alloc_migration_target(page, (unsigned long)mtc); - if (target_page) - return target_page; + dst = alloc_migration_target(src, (unsigned long)mtc); + if (dst) + return dst; mtc->gfp_mask &= ~__GFP_THISNODE; mtc->nmask = allowed_mask; - return alloc_migration_target(page, (unsigned long)mtc); + return alloc_migration_target(src, (unsigned long)mtc); } /* @@ -1683,7 +1684,7 @@ static unsigned int demote_folio_list(struct list_head *demote_folios, node_get_allowed_targets(pgdat, &allowed_mask); /* Demotion ignores all cpuset and mempolicy settings */ - migrate_pages(demote_folios, alloc_demote_page, NULL, + migrate_pages(demote_folios, alloc_demote_folio, NULL, (unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION, &nr_succeeded); -- cgit v1.2.3 From 89f499f35c11af61ba7075ddc23209d10805a25a Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Thu, 18 May 2023 10:55:14 -0400 Subject: maple_tree: add format option to mt_dump() Allow different formatting strings to be used when dumping the tree. Currently supports hex and decimal. Link: https://lkml.kernel.org/r/20230518145544.1722059-6-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: David Binderman Cc: Peng Zhang Cc: Sergey Senozhatsky Cc: Vernon Yang Cc: Wei Yang Signed-off-by: Andrew Morton --- include/linux/maple_tree.h | 9 ++++- lib/maple_tree.c | 87 ++++++++++++++++++++++++++-------------- lib/test_maple_tree.c | 10 ++--- mm/internal.h | 4 +- mm/mmap.c | 8 ++-- tools/testing/radix-tree/maple.c | 12 +++--- 6 files changed, 82 insertions(+), 48 deletions(-) (limited to 'mm') diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index 1fadb5f5978b..140fb271be4a 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -670,10 +670,15 @@ void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max); #ifdef CONFIG_DEBUG_MAPLE_TREE +enum mt_dump_format { + mt_dump_dec, + mt_dump_hex, +}; + extern atomic_t maple_tree_tests_run; extern atomic_t maple_tree_tests_passed; -void mt_dump(const struct maple_tree *mt); +void mt_dump(const struct maple_tree *mt, enum mt_dump_format format); void mt_validate(struct maple_tree *mt); void mt_cache_shrink(void); #define MT_BUG_ON(__tree, __x) do { \ @@ -681,7 +686,7 @@ void mt_cache_shrink(void); if (__x) { \ pr_info("BUG at %s:%d (%u)\n", \ __func__, __LINE__, __x); \ - mt_dump(__tree); \ + mt_dump(__tree, mt_dump_hex); \ pr_info("Pass: %u Run:%u\n", \ atomic_read(&maple_tree_tests_passed), \ atomic_read(&maple_tree_tests_run)); \ diff --git a/lib/maple_tree.c b/lib/maple_tree.c index e095e2c39a1b..dfa0271101d2 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -5694,7 +5694,7 @@ void *mas_store(struct ma_state *mas, void *entry) trace_ma_write(__func__, mas, 0, entry); #ifdef CONFIG_DEBUG_MAPLE_TREE if (mas->index > mas->last) - pr_err("Error %lu > %lu %p\n", mas->index, mas->last, entry); + pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry); MT_BUG_ON(mas->tree, mas->index > mas->last); if (mas->index > mas->last) { mas_set_err(mas, -EINVAL); @@ -6748,22 +6748,33 @@ static void mas_dfs_postorder(struct ma_state *mas, unsigned long max) /* Tree validations */ static void mt_dump_node(const struct maple_tree *mt, void *entry, - unsigned long min, unsigned long max, unsigned int depth); + unsigned long min, unsigned long max, unsigned int depth, + enum mt_dump_format format); static void mt_dump_range(unsigned long min, unsigned long max, - unsigned int depth) + unsigned int depth, enum mt_dump_format format) { static const char spaces[] = " "; - if (min == max) - pr_info("%.*s%lu: ", depth * 2, spaces, min); - else - pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max); + switch(format) { + case mt_dump_hex: + if (min == max) + pr_info("%.*s%lx: ", depth * 2, spaces, min); + else + pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max); + break; + default: + case mt_dump_dec: + if (min == max) + pr_info("%.*s%lu: ", depth * 2, spaces, min); + else + pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max); + } } static void mt_dump_entry(void *entry, unsigned long min, unsigned long max, - unsigned int depth) + unsigned int depth, enum mt_dump_format format) { - mt_dump_range(min, max, depth); + mt_dump_range(min, max, depth, format); if (xa_is_value(entry)) pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry), @@ -6777,7 +6788,8 @@ static void mt_dump_entry(void *entry, unsigned long min, unsigned long max, } static void mt_dump_range64(const struct maple_tree *mt, void *entry, - unsigned long min, unsigned long max, unsigned int depth) + unsigned long min, unsigned long max, unsigned int depth, + enum mt_dump_format format) { struct maple_range_64 *node = &mte_to_node(entry)->mr64; bool leaf = mte_is_leaf(entry); @@ -6785,8 +6797,16 @@ static void mt_dump_range64(const struct maple_tree *mt, void *entry, int i; pr_cont(" contents: "); - for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) - pr_cont("%p %lu ", node->slot[i], node->pivot[i]); + for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) { + switch(format) { + case mt_dump_hex: + pr_cont("%p %lX ", node->slot[i], node->pivot[i]); + break; + default: + case mt_dump_dec: + pr_cont("%p %lu ", node->slot[i], node->pivot[i]); + } + } pr_cont("%p\n", node->slot[i]); for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) { unsigned long last = max; @@ -6799,24 +6819,32 @@ static void mt_dump_range64(const struct maple_tree *mt, void *entry, break; if (leaf) mt_dump_entry(mt_slot(mt, node->slot, i), - first, last, depth + 1); + first, last, depth + 1, format); else if (node->slot[i]) mt_dump_node(mt, mt_slot(mt, node->slot, i), - first, last, depth + 1); + first, last, depth + 1, format); if (last == max) break; if (last > max) { - pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", + switch(format) { + case mt_dump_hex: + pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n", node, last, max, i); - break; + break; + default: + case mt_dump_dec: + pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", + node, last, max, i); + } } first = last + 1; } } static void mt_dump_arange64(const struct maple_tree *mt, void *entry, - unsigned long min, unsigned long max, unsigned int depth) + unsigned long min, unsigned long max, unsigned int depth, + enum mt_dump_format format) { struct maple_arange_64 *node = &mte_to_node(entry)->ma64; bool leaf = mte_is_leaf(entry); @@ -6841,10 +6869,10 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry, break; if (leaf) mt_dump_entry(mt_slot(mt, node->slot, i), - first, last, depth + 1); + first, last, depth + 1, format); else if (node->slot[i]) mt_dump_node(mt, mt_slot(mt, node->slot, i), - first, last, depth + 1); + first, last, depth + 1, format); if (last == max) break; @@ -6858,13 +6886,14 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry, } static void mt_dump_node(const struct maple_tree *mt, void *entry, - unsigned long min, unsigned long max, unsigned int depth) + unsigned long min, unsigned long max, unsigned int depth, + enum mt_dump_format format) { struct maple_node *node = mte_to_node(entry); unsigned int type = mte_node_type(entry); unsigned int i; - mt_dump_range(min, max, depth); + mt_dump_range(min, max, depth, format); pr_cont("node %p depth %d type %d parent %p", node, depth, type, node ? node->parent : NULL); @@ -6875,15 +6904,15 @@ static void mt_dump_node(const struct maple_tree *mt, void *entry, if (min + i > max) pr_cont("OUT OF RANGE: "); mt_dump_entry(mt_slot(mt, node->slot, i), - min + i, min + i, depth); + min + i, min + i, depth, format); } break; case maple_leaf_64: case maple_range_64: - mt_dump_range64(mt, entry, min, max, depth); + mt_dump_range64(mt, entry, min, max, depth, format); break; case maple_arange_64: - mt_dump_arange64(mt, entry, min, max, depth); + mt_dump_arange64(mt, entry, min, max, depth, format); break; default: @@ -6891,16 +6920,16 @@ static void mt_dump_node(const struct maple_tree *mt, void *entry, } } -void mt_dump(const struct maple_tree *mt) +void mt_dump(const struct maple_tree *mt, enum mt_dump_format format) { void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt)); pr_info("maple_tree(%p) flags %X, height %u root %p\n", mt, mt->ma_flags, mt_height(mt), entry); if (!xa_is_node(entry)) - mt_dump_entry(entry, 0, 0, 0); + mt_dump_entry(entry, 0, 0, 0, format); else if (entry) - mt_dump_node(mt, entry, 0, mt_node_max(entry), 0); + mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format); } EXPORT_SYMBOL_GPL(mt_dump); @@ -6953,7 +6982,7 @@ static void mas_validate_gaps(struct ma_state *mas) mas_mn(mas), i, mas_get_slot(mas, i), gap, p_end, p_start); - mt_dump(mas->tree); + mt_dump(mas->tree, mt_dump_hex); MT_BUG_ON(mas->tree, gap != p_end - p_start + 1); @@ -6986,7 +7015,7 @@ counted: MT_BUG_ON(mas->tree, max_gap > mas->max); if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) { pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap); - mt_dump(mas->tree); + mt_dump(mas->tree, mt_dump_hex); } MT_BUG_ON(mas->tree, diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index f1db333270e9..d6929270dd36 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -219,7 +219,7 @@ static noinline void check_rev_seq(struct maple_tree *mt, unsigned long max, #ifndef __KERNEL__ if (verbose) { rcu_barrier(); - mt_dump(mt); + mt_dump(mt, mt_dump_dec); pr_info(" %s test of 0-%lu %luK in %d active (%d total)\n", __func__, max, mt_get_alloc_size()/1024, mt_nr_allocated(), mt_nr_tallocated()); @@ -248,7 +248,7 @@ static noinline void check_seq(struct maple_tree *mt, unsigned long max, #ifndef __KERNEL__ if (verbose) { rcu_barrier(); - mt_dump(mt); + mt_dump(mt, mt_dump_dec); pr_info(" seq test of 0-%lu %luK in %d active (%d total)\n", max, mt_get_alloc_size()/1024, mt_nr_allocated(), mt_nr_tallocated()); @@ -893,7 +893,7 @@ static noinline void check_alloc_range(struct maple_tree *mt) #if DEBUG_ALLOC_RANGE pr_debug("\tInsert %lu-%lu\n", range[i] >> 12, (range[i + 1] >> 12) - 1); - mt_dump(mt); + mt_dump(mt, mt_dump_hex); #endif check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1, xa_mk_value(range[i] >> 12), 0); @@ -934,7 +934,7 @@ static noinline void check_alloc_range(struct maple_tree *mt) xa_mk_value(req_range[i] >> 12)); /* pointer */ mt_validate(mt); #if DEBUG_ALLOC_RANGE - mt_dump(mt); + mt_dump(mt, mt_dump_hex); #endif } @@ -1572,7 +1572,7 @@ static noinline void check_node_overwrite(struct maple_tree *mt) mtree_test_store_range(mt, i*100, i*100 + 50, xa_mk_value(i*100)); mtree_test_store_range(mt, 319951, 367950, NULL); - /*mt_dump(mt); */ + /*mt_dump(mt, mt_dump_dec); */ mt_validate(mt); } diff --git a/mm/internal.h b/mm/internal.h index ec55da813c13..692498a84fde 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1064,13 +1064,13 @@ static inline void vma_iter_store(struct vma_iterator *vmi, printk("%lu > %lu\n", vmi->mas.index, vma->vm_start); printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end); printk("into slot %lu-%lu", vmi->mas.index, vmi->mas.last); - mt_dump(vmi->mas.tree); + mt_dump(vmi->mas.tree, mt_dump_hex); } if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.last < vma->vm_start)) { printk("%lu < %lu\n", vmi->mas.last, vma->vm_start); printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end); printk("into slot %lu-%lu", vmi->mas.index, vmi->mas.last); - mt_dump(vmi->mas.tree); + mt_dump(vmi->mas.tree, mt_dump_hex); } #endif diff --git a/mm/mmap.c b/mm/mmap.c index 13678edaa22c..04bcf3b3c720 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -301,7 +301,7 @@ out: #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) extern void mt_validate(struct maple_tree *mt); -extern void mt_dump(const struct maple_tree *mt); +extern void mt_dump(const struct maple_tree *mt, enum mt_dump_format fmt); /* Validate the maple tree */ static void validate_mm_mt(struct mm_struct *mm) @@ -323,18 +323,18 @@ static void validate_mm_mt(struct mm_struct *mm) pr_emerg("mt vma: %p %lu - %lu\n", vma_mt, vma_mt->vm_start, vma_mt->vm_end); - mt_dump(mas.tree); + mt_dump(mas.tree, mt_dump_hex); if (vma_mt->vm_end != mas.last + 1) { pr_err("vma: %p vma_mt %lu-%lu\tmt %lu-%lu\n", mm, vma_mt->vm_start, vma_mt->vm_end, mas.index, mas.last); - mt_dump(mas.tree); + mt_dump(mas.tree, mt_dump_hex); } VM_BUG_ON_MM(vma_mt->vm_end != mas.last + 1, mm); if (vma_mt->vm_start != mas.index) { pr_err("vma: %p vma_mt %p %lu - %lu doesn't match\n", mm, vma_mt, vma_mt->vm_start, vma_mt->vm_end); - mt_dump(mas.tree); + mt_dump(mas.tree, mt_dump_hex); } VM_BUG_ON_MM(vma_mt->vm_start != mas.index, mm); } diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c index 75df543e019c..ebcb3faf85ea 100644 --- a/tools/testing/radix-tree/maple.c +++ b/tools/testing/radix-tree/maple.c @@ -1054,7 +1054,7 @@ static noinline void check_erase2_testset(struct maple_tree *mt, if (entry_count) MT_BUG_ON(mt, !mt_height(mt)); #if check_erase2_debug > 1 - mt_dump(mt); + mt_dump(mt, mt_dump_hex); #endif #if check_erase2_debug pr_err("Done\n"); @@ -1085,7 +1085,7 @@ static noinline void check_erase2_testset(struct maple_tree *mt, mas_for_each(&mas, foo, ULONG_MAX) { if (xa_is_zero(foo)) { if (addr == mas.index) { - mt_dump(mas.tree); + mt_dump(mas.tree, mt_dump_hex); pr_err("retry failed %lu - %lu\n", mas.index, mas.last); MT_BUG_ON(mt, 1); @@ -34513,7 +34513,7 @@ static void *rcu_reader_rev(void *ptr) if (mas.index != r_start) { alt = xa_mk_value(index + i * 2 + 1 + RCU_RANGE_COUNT); - mt_dump(test->mt); + mt_dump(test->mt, mt_dump_dec); printk("Error: %lu-%lu %p != %lu-%lu %p %p line %d i %d\n", mas.index, mas.last, entry, r_start, r_end, expected, alt, @@ -35784,10 +35784,10 @@ void farmer_tests(void) struct maple_node *node; DEFINE_MTREE(tree); - mt_dump(&tree); + mt_dump(&tree, mt_dump_dec); tree.ma_root = xa_mk_value(0); - mt_dump(&tree); + mt_dump(&tree, mt_dump_dec); node = mt_alloc_one(GFP_KERNEL); node->parent = (void *)((unsigned long)(&tree) | 1); @@ -35797,7 +35797,7 @@ void farmer_tests(void) node->mr64.pivot[1] = 1; node->mr64.pivot[2] = 0; tree.ma_root = mt_mk_node(node, maple_leaf_64); - mt_dump(&tree); + mt_dump(&tree, mt_dump_dec); node->parent = ma_parent_ptr(node); ma_free_rcu(node); -- cgit v1.2.3 From b50e195ff436625b26dcc9839bc52cc7c5bf1a54 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Thu, 18 May 2023 10:55:26 -0400 Subject: mm: update validate_mm() to use vma iterator Use the vma iterator in the validation code and combine the code to check the maple tree into the main validate_mm() function. Introduce a new function vma_iter_dump_tree() to dump the maple tree in hex layout. Replace all calls to validate_mm_mt() with validate_mm(). [Liam.Howlett@oracle.com: update validate_mm() to use vma iterator CONFIG flag] Link: https://lkml.kernel.org/r/20230606183538.588190-1-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20230518145544.1722059-18-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: David Binderman Cc: Peng Zhang Cc: Sergey Senozhatsky Cc: Vernon Yang Cc: Wei Yang Signed-off-by: Andrew Morton --- include/linux/mmdebug.h | 14 ++++++++ mm/debug.c | 9 +++++ mm/internal.h | 3 +- mm/mmap.c | 94 +++++++++++++++++++------------------------------ 4 files changed, 61 insertions(+), 59 deletions(-) (limited to 'mm') diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index b8728d11c949..7c3e7b0b0e8f 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -8,10 +8,12 @@ struct page; struct vm_area_struct; struct mm_struct; +struct vma_iterator; void dump_page(struct page *page, const char *reason); void dump_vma(const struct vm_area_struct *vma); void dump_mm(const struct mm_struct *mm); +void vma_iter_dump_tree(const struct vma_iterator *vmi); #ifdef CONFIG_DEBUG_VM #define VM_BUG_ON(cond) BUG_ON(cond) @@ -74,6 +76,17 @@ void dump_mm(const struct mm_struct *mm); } \ unlikely(__ret_warn_once); \ }) +#define VM_WARN_ON_ONCE_MM(cond, mm) ({ \ + static bool __section(".data.once") __warned; \ + int __ret_warn_once = !!(cond); \ + \ + if (unlikely(__ret_warn_once && !__warned)) { \ + dump_mm(mm); \ + __warned = true; \ + WARN_ON(1); \ + } \ + unlikely(__ret_warn_once); \ +}) #define VM_WARN_ON(cond) (void)WARN_ON(cond) #define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond) @@ -90,6 +103,7 @@ void dump_mm(const struct mm_struct *mm); #define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond) +#define VM_WARN_ON_ONCE_MM(cond, mm) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond) #endif diff --git a/mm/debug.c b/mm/debug.c index c7b228097bd9..ee533a5ceb79 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -268,4 +268,13 @@ void page_init_poison(struct page *page, size_t size) if (page_init_poisoning) memset(page, PAGE_POISON_PATTERN, size); } + +void vma_iter_dump_tree(const struct vma_iterator *vmi) +{ +#if defined(CONFIG_DEBUG_VM_MAPLE_TREE) + mas_dump(&vmi->mas); + mt_dump(vmi->mas.tree, mt_dump_hex); +#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ +} + #endif /* CONFIG_DEBUG_VM */ diff --git a/mm/internal.h b/mm/internal.h index 692498a84fde..41cc5e6225fb 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1064,13 +1064,14 @@ static inline void vma_iter_store(struct vma_iterator *vmi, printk("%lu > %lu\n", vmi->mas.index, vma->vm_start); printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end); printk("into slot %lu-%lu", vmi->mas.index, vmi->mas.last); - mt_dump(vmi->mas.tree, mt_dump_hex); + vma_iter_dump_tree(vmi); } if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.last < vma->vm_start)) { printk("%lu < %lu\n", vmi->mas.last, vma->vm_start); printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end); printk("into slot %lu-%lu", vmi->mas.index, vmi->mas.last); mt_dump(vmi->mas.tree, mt_dump_hex); + vma_iter_dump_tree(vmi); } #endif diff --git a/mm/mmap.c b/mm/mmap.c index 04bcf3b3c720..8f67d80c6dde 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -300,61 +300,40 @@ out: } #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) -extern void mt_validate(struct maple_tree *mt); -extern void mt_dump(const struct maple_tree *mt, enum mt_dump_format fmt); - -/* Validate the maple tree */ -static void validate_mm_mt(struct mm_struct *mm) -{ - struct maple_tree *mt = &mm->mm_mt; - struct vm_area_struct *vma_mt; - - MA_STATE(mas, mt, 0, 0); - - mt_validate(&mm->mm_mt); - mas_for_each(&mas, vma_mt, ULONG_MAX) { - if ((vma_mt->vm_start != mas.index) || - (vma_mt->vm_end - 1 != mas.last)) { - pr_emerg("issue in %s\n", current->comm); - dump_stack(); - dump_vma(vma_mt); - pr_emerg("mt piv: %p %lu - %lu\n", vma_mt, - mas.index, mas.last); - pr_emerg("mt vma: %p %lu - %lu\n", vma_mt, - vma_mt->vm_start, vma_mt->vm_end); - - mt_dump(mas.tree, mt_dump_hex); - if (vma_mt->vm_end != mas.last + 1) { - pr_err("vma: %p vma_mt %lu-%lu\tmt %lu-%lu\n", - mm, vma_mt->vm_start, vma_mt->vm_end, - mas.index, mas.last); - mt_dump(mas.tree, mt_dump_hex); - } - VM_BUG_ON_MM(vma_mt->vm_end != mas.last + 1, mm); - if (vma_mt->vm_start != mas.index) { - pr_err("vma: %p vma_mt %p %lu - %lu doesn't match\n", - mm, vma_mt, vma_mt->vm_start, vma_mt->vm_end); - mt_dump(mas.tree, mt_dump_hex); - } - VM_BUG_ON_MM(vma_mt->vm_start != mas.index, mm); - } - } -} - static void validate_mm(struct mm_struct *mm) { int bug = 0; int i = 0; struct vm_area_struct *vma; - MA_STATE(mas, &mm->mm_mt, 0, 0); - - validate_mm_mt(mm); + VMA_ITERATOR(vmi, mm, 0); - mas_for_each(&mas, vma, ULONG_MAX) { + mt_validate(&mm->mm_mt); + for_each_vma(vmi, vma) { #ifdef CONFIG_DEBUG_VM_RB struct anon_vma *anon_vma = vma->anon_vma; struct anon_vma_chain *avc; +#endif + unsigned long vmi_start, vmi_end; + bool warn = 0; + vmi_start = vma_iter_addr(&vmi); + vmi_end = vma_iter_end(&vmi); + if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm)) + warn = 1; + + if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm)) + warn = 1; + + if (warn) { + pr_emerg("issue in %s\n", current->comm); + dump_stack(); + dump_vma(vma); + pr_emerg("tree range: %px start %lx end %lx\n", vma, + vmi_start, vmi_end - 1); + vma_iter_dump_tree(&vmi); + } + +#ifdef CONFIG_DEBUG_VM_RB if (anon_vma) { anon_vma_lock_read(anon_vma); list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) @@ -365,14 +344,13 @@ static void validate_mm(struct mm_struct *mm) i++; } if (i != mm->map_count) { - pr_emerg("map_count %d mas_for_each %d\n", mm->map_count, i); + pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i); bug = 1; } VM_BUG_ON_MM(bug, mm); } #else /* !CONFIG_DEBUG_VM_MAPLE_TREE */ -#define validate_mm_mt(root) do { } while (0) #define validate_mm(mm) do { } while (0) #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ @@ -2234,7 +2212,7 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, struct vm_area_struct *new; int err; - validate_mm_mt(vma->vm_mm); + validate_mm(vma->vm_mm); WARN_ON(vma->vm_start >= addr); WARN_ON(vma->vm_end <= addr); @@ -2292,7 +2270,7 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, /* Success. */ if (new_below) vma_next(vmi); - validate_mm_mt(vma->vm_mm); + validate_mm(vma->vm_mm); return 0; out_free_mpol: @@ -2301,7 +2279,7 @@ out_free_vmi: vma_iter_free(vmi); out_free_vma: vm_area_free(new); - validate_mm_mt(vma->vm_mm); + validate_mm(vma->vm_mm); return err; } @@ -2936,7 +2914,7 @@ int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, arch_unmap(mm, start, end); ret = do_vmi_align_munmap(vmi, vma, mm, start, end, uf, downgrade); - validate_mm_mt(mm); + validate_mm(mm); return ret; } @@ -2958,7 +2936,7 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, struct mm_struct *mm = current->mm; struct vma_prepare vp; - validate_mm_mt(mm); + validate_mm(mm); /* * Check against address space limits by the changed size * Note: This happens *after* clearing old mappings in some code paths. @@ -3199,7 +3177,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, bool faulted_in_anon_vma = true; VMA_ITERATOR(vmi, mm, addr); - validate_mm_mt(mm); + validate_mm(mm); /* * If anonymous vma has not yet been faulted, update new pgoff * to match new location, to increase its chance of merging. @@ -3258,7 +3236,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, goto out_vma_link; *need_rmap_locks = false; } - validate_mm_mt(mm); + validate_mm(mm); return new_vma; out_vma_link: @@ -3274,7 +3252,7 @@ out_free_mempol: out_free_vma: vm_area_free(new_vma); out: - validate_mm_mt(mm); + validate_mm(mm); return NULL; } @@ -3411,7 +3389,7 @@ static struct vm_area_struct *__install_special_mapping( int ret; struct vm_area_struct *vma; - validate_mm_mt(mm); + validate_mm(mm); vma = vm_area_alloc(mm); if (unlikely(vma == NULL)) return ERR_PTR(-ENOMEM); @@ -3434,12 +3412,12 @@ static struct vm_area_struct *__install_special_mapping( perf_event_mmap(vma); - validate_mm_mt(mm); + validate_mm(mm); return vma; out: vm_area_free(vma); - validate_mm_mt(mm); + validate_mm(mm); return ERR_PTR(ret); } -- cgit v1.2.3 From 36bd931049bf93a4eaec4a558beca477db152881 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Thu, 18 May 2023 10:55:27 -0400 Subject: mm: update vma_iter_store() to use MAS_WARN_ON() MAS_WARN_ON() will provide more information on the maple state and can be more useful for debugging. Use this version of WARN_ON() in the debugging code when storing to the tree. Update the printk to a pr_warn(), but this will only be printed when maple tree debug is enabled anyways. Making all print statements into one will keep them together on a busy terminal. Link: https://lkml.kernel.org/r/20230518145544.1722059-19-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Reviewed-by: Sergey Senozhatsky Cc: David Binderman Cc: Peng Zhang Cc: Vernon Yang Cc: Wei Yang Signed-off-by: Andrew Morton --- mm/internal.h | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/mm/internal.h b/mm/internal.h index 41cc5e6225fb..bb6542279599 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1060,18 +1060,17 @@ static inline void vma_iter_store(struct vma_iterator *vmi, { #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) - if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.index > vma->vm_start)) { - printk("%lu > %lu\n", vmi->mas.index, vma->vm_start); - printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end); - printk("into slot %lu-%lu", vmi->mas.index, vmi->mas.last); - vma_iter_dump_tree(vmi); + if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START && + vmi->mas.index > vma->vm_start)) { + pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n", + vmi->mas.index, vma->vm_start, vma->vm_start, + vma->vm_end, vmi->mas.index, vmi->mas.last); } - if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.last < vma->vm_start)) { - printk("%lu < %lu\n", vmi->mas.last, vma->vm_start); - printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end); - printk("into slot %lu-%lu", vmi->mas.index, vmi->mas.last); - mt_dump(vmi->mas.tree, mt_dump_hex); - vma_iter_dump_tree(vmi); + if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START && + vmi->mas.last < vma->vm_start)) { + pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n", + vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end, + vmi->mas.index, vmi->mas.last); } #endif -- cgit v1.2.3 From 15c0c60b8cee4a6db263585520f994654429fac3 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Thu, 18 May 2023 10:55:31 -0400 Subject: mm/mmap: change do_vmi_align_munmap() for maple tree iterator changes The maple tree iterator clean up is incompatible with the way do_vmi_align_munmap() expects it to behave. Update the expected behaviour to map now since the change will work currently. Link: https://lkml.kernel.org/r/20230518145544.1722059-23-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: David Binderman Cc: Peng Zhang Cc: Sergey Senozhatsky Cc: Vernon Yang Cc: Wei Yang Signed-off-by: Andrew Morton --- mm/mmap.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index 8f67d80c6dde..19d3c843be0c 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2388,7 +2388,12 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, #endif } - next = vma_next(vmi); + if (vma_iter_end(vmi) > end) + next = vma_iter_load(vmi); + + if (!next) + next = vma_next(vmi); + if (unlikely(uf)) { /* * If userfaultfd_unmap_prep returns an error the vmas -- cgit v1.2.3 From 5c1c03de1b1636f041a275e777171307cac8d958 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Thu, 18 May 2023 10:55:44 -0400 Subject: mm: avoid rewalk in mmap_region If the iterator has moved to the previous entry, then step forward one range, back to the gap. Link: https://lkml.kernel.org/r/20230518145544.1722059-36-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: David Binderman Cc: Peng Zhang Cc: Sergey Senozhatsky Cc: Vernon Yang Cc: Wei Yang Signed-off-by: Andrew Morton --- mm/mmap.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index 19d3c843be0c..44be7fdfaac9 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2606,6 +2606,9 @@ unsigned long mmap_region(struct file *file, unsigned long addr, } cannot_expand: + if (prev) + vma_iter_next_range(&vmi); + /* * Determine the object being mapped and call the appropriate * specific mapper. the address has already been validated, but -- cgit v1.2.3 From 62069aace145658bc8ce79cbf7b6cf611db4a22f Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Thu, 18 May 2023 10:11:19 -0400 Subject: mm: page_alloc: set sysctl_lowmem_reserve_ratio storage-class-specifier to static smatch reports mm/page_alloc.c:247:5: warning: symbol 'sysctl_lowmem_reserve_ratio' was not declared. Should it be static? This variable is only used in its defining file, so it should be static Link: https://lkml.kernel.org/r/20230518141119.927074-1-trix@redhat.com Signed-off-by: Tom Rix Signed-off-by: Andrew Morton --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ee23ba9c0ca7..b9a9ba2db9e9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -244,7 +244,7 @@ static void __free_pages_ok(struct page *page, unsigned int order, * TBD: should special case ZONE_DMA32 machines here - in those we normally * don't need any ZONE_NORMAL reservation */ -int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { +static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { #ifdef CONFIG_ZONE_DMA [ZONE_DMA] = 256, #endif -- cgit v1.2.3 From ecd8b2928f2efc7b678b361d51920c15b5ef3885 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 19 May 2023 14:39:55 +0200 Subject: mm: compaction: remove compaction result helpers Patch series "mm: compaction: cleanups & simplifications". These compaction cleanups are split out from the huge page allocator series[1], as requested by reviewer feedback. [1] https://lore.kernel.org/linux-mm/20230418191313.268131-1-hannes@cmpxchg.org/ This patch (of 5): The compaction result helpers encode quirks that are specific to the allocator's retry logic. E.g. COMPACT_SUCCESS and COMPACT_COMPLETE actually represent failures that should be retried upon, and so on. I frequently found myself pulling up the helper implementation in order to understand and work on the retry logic. They're not quite clean abstractions; rather they split the retry logic into two locations. Remove the helpers and inline the checks. Then comment on the result interpretations directly where the decision making happens. Link: https://lkml.kernel.org/r/20230519123959.77335-1-hannes@cmpxchg.org Link: https://lkml.kernel.org/r/20230519123959.77335-2-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Michal Hocko Signed-off-by: Andrew Morton --- include/linux/compaction.h | 92 ------------------------------------------ include/trace/events/mmflags.h | 4 +- mm/page_alloc.c | 30 ++++++++------ 3 files changed, 19 insertions(+), 107 deletions(-) (limited to 'mm') diff --git a/include/linux/compaction.h b/include/linux/compaction.h index a6e512cfb670..1f0328a2ba48 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -95,78 +95,6 @@ extern enum compact_result compaction_suitable(struct zone *zone, int order, extern void compaction_defer_reset(struct zone *zone, int order, bool alloc_success); -/* Compaction has made some progress and retrying makes sense */ -static inline bool compaction_made_progress(enum compact_result result) -{ - /* - * Even though this might sound confusing this in fact tells us - * that the compaction successfully isolated and migrated some - * pageblocks. - */ - if (result == COMPACT_SUCCESS) - return true; - - return false; -} - -/* Compaction has failed and it doesn't make much sense to keep retrying. */ -static inline bool compaction_failed(enum compact_result result) -{ - /* All zones were scanned completely and still not result. */ - if (result == COMPACT_COMPLETE) - return true; - - return false; -} - -/* Compaction needs reclaim to be performed first, so it can continue. */ -static inline bool compaction_needs_reclaim(enum compact_result result) -{ - /* - * Compaction backed off due to watermark checks for order-0 - * so the regular reclaim has to try harder and reclaim something. - */ - if (result == COMPACT_SKIPPED) - return true; - - return false; -} - -/* - * Compaction has backed off for some reason after doing some work or none - * at all. It might be throttling or lock contention. Retrying might be still - * worthwhile, but with a higher priority if allowed. - */ -static inline bool compaction_withdrawn(enum compact_result result) -{ - /* - * If compaction is deferred for high-order allocations, it is - * because sync compaction recently failed. If this is the case - * and the caller requested a THP allocation, we do not want - * to heavily disrupt the system, so we fail the allocation - * instead of entering direct reclaim. - */ - if (result == COMPACT_DEFERRED) - return true; - - /* - * If compaction in async mode encounters contention or blocks higher - * priority task we back off early rather than cause stalls. - */ - if (result == COMPACT_CONTENDED) - return true; - - /* - * Page scanners have met but we haven't scanned full zones so this - * is a back off in fact. - */ - if (result == COMPACT_PARTIAL_SKIPPED) - return true; - - return false; -} - - bool compaction_zonelist_suitable(struct alloc_context *ac, int order, int alloc_flags); @@ -185,26 +113,6 @@ static inline enum compact_result compaction_suitable(struct zone *zone, int ord return COMPACT_SKIPPED; } -static inline bool compaction_made_progress(enum compact_result result) -{ - return false; -} - -static inline bool compaction_failed(enum compact_result result) -{ - return false; -} - -static inline bool compaction_needs_reclaim(enum compact_result result) -{ - return false; -} - -static inline bool compaction_withdrawn(enum compact_result result) -{ - return true; -} - static inline void kcompactd_run(int nid) { } diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index b63e7c0fbbe5..1478b9dd05fa 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -223,8 +223,8 @@ IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \ #define compact_result_to_feedback(result) \ ({ \ enum compact_result __result = result; \ - (compaction_failed(__result)) ? COMPACTION_FAILED : \ - (compaction_withdrawn(__result)) ? COMPACTION_WITHDRAWN : COMPACTION_PROGRESS; \ + (__result == COMPACT_COMPLETE) ? COMPACTION_FAILED : \ + (__result == COMPACT_SUCCESS) ? COMPACTION_PROGRESS : COMPACTION_WITHDRAWN; \ }) #define COMPACTION_FEEDBACK \ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b9a9ba2db9e9..e3a3ebc2dfce 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3469,35 +3469,39 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, if (fatal_signal_pending(current)) return false; - if (compaction_made_progress(compact_result)) + /* + * Compaction managed to coalesce some page blocks, but the + * allocation failed presumably due to a race. Retry some. + */ + if (compact_result == COMPACT_SUCCESS) (*compaction_retries)++; /* - * compaction considers all the zone as desperately out of memory - * so it doesn't really make much sense to retry except when the + * All zones were scanned completely and still no result. It + * doesn't really make much sense to retry except when the * failure could be caused by insufficient priority */ - if (compaction_failed(compact_result)) + if (compact_result == COMPACT_COMPLETE) goto check_priority; /* - * compaction was skipped because there are not enough order-0 pages - * to work with, so we retry only if it looks like reclaim can help. + * Compaction was skipped due to a lack of free order-0 + * migration targets. Continue if reclaim can help. */ - if (compaction_needs_reclaim(compact_result)) { + if (compact_result == COMPACT_SKIPPED) { ret = compaction_zonelist_suitable(ac, order, alloc_flags); goto out; } /* - * make sure the compaction wasn't deferred or didn't bail out early - * due to locks contention before we declare that we should give up. - * But the next retry should use a higher priority if allowed, so - * we don't just keep bailing out endlessly. + * If compaction backed due to being deferred, due to + * contended locks in async mode, or due to scanners meeting + * after a partial scan, retry with increased priority. */ - if (compaction_withdrawn(compact_result)) { + if (compact_result == COMPACT_DEFERRED || + compact_result == COMPACT_CONTENDED || + compact_result == COMPACT_PARTIAL_SKIPPED) goto check_priority; - } /* * !costly requests are much more important than __GFP_RETRY_MAYFAIL -- cgit v1.2.3 From 511a69b27fe6c2d7312789bd9e2e40b00e3903ef Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 19 May 2023 14:39:56 +0200 Subject: mm: compaction: simplify should_compact_retry() The different branches for retry are unnecessarily complicated. There are really only three outcomes: progress (retry n times), skipped (retry if reclaim can help), failed (retry with higher priority). Rearrange the branches and the retry counter to make it simpler. [hannes@cmpxchg.org: restore behavior when hitting max_retries] Link: https://lkml.kernel.org/r/20230602144705.GB161817@cmpxchg.org Link: https://lkml.kernel.org/r/20230519123959.77335-3-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Michal Hocko Signed-off-by: Andrew Morton --- mm/page_alloc.c | 57 +++++++++++++++++++-------------------------------------- 1 file changed, 19 insertions(+), 38 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e3a3ebc2dfce..7e8673504a3d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3469,21 +3469,6 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, if (fatal_signal_pending(current)) return false; - /* - * Compaction managed to coalesce some page blocks, but the - * allocation failed presumably due to a race. Retry some. - */ - if (compact_result == COMPACT_SUCCESS) - (*compaction_retries)++; - - /* - * All zones were scanned completely and still no result. It - * doesn't really make much sense to retry except when the - * failure could be caused by insufficient priority - */ - if (compact_result == COMPACT_COMPLETE) - goto check_priority; - /* * Compaction was skipped due to a lack of free order-0 * migration targets. Continue if reclaim can help. @@ -3494,35 +3479,31 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, } /* - * If compaction backed due to being deferred, due to - * contended locks in async mode, or due to scanners meeting - * after a partial scan, retry with increased priority. + * Compaction managed to coalesce some page blocks, but the + * allocation failed presumably due to a race. Retry some. */ - if (compact_result == COMPACT_DEFERRED || - compact_result == COMPACT_CONTENDED || - compact_result == COMPACT_PARTIAL_SKIPPED) - goto check_priority; + if (compact_result == COMPACT_SUCCESS) { + /* + * !costly requests are much more important than + * __GFP_RETRY_MAYFAIL costly ones because they are de + * facto nofail and invoke OOM killer to move on while + * costly can fail and users are ready to cope with + * that. 1/4 retries is rather arbitrary but we would + * need much more detailed feedback from compaction to + * make a better decision. + */ + if (order > PAGE_ALLOC_COSTLY_ORDER) + max_retries /= 4; - /* - * !costly requests are much more important than __GFP_RETRY_MAYFAIL - * costly ones because they are de facto nofail and invoke OOM - * killer to move on while costly can fail and users are ready - * to cope with that. 1/4 retries is rather arbitrary but we - * would need much more detailed feedback from compaction to - * make a better decision. - */ - if (order > PAGE_ALLOC_COSTLY_ORDER) - max_retries /= 4; - if (*compaction_retries <= max_retries) { - ret = true; - goto out; + if (++(*compaction_retries) <= max_retries) { + ret = true; + goto out; + } } /* - * Make sure there are attempts at the highest priority if we exhausted - * all retries or failed at the lower priorities. + * Compaction failed. Retry with increasing priority. */ -check_priority: min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; -- cgit v1.2.3 From e8606320e9af9774fd879e71c940fc9e5fd9b901 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 19 May 2023 14:39:57 +0200 Subject: mm: compaction: refactor __compaction_suitable() __compaction_suitable() is supposed to check for available migration targets. However, it also checks whether the operation was requested via /proc/sys/vm/compact_memory, and whether the original allocation request can already succeed. These don't apply to all callsites. Move the checks out to the callers, so that later patches can deal with them one by one. No functional change intended. [hannes@cmpxchg.org: fix comment, per Vlastimil] Link: https://lkml.kernel.org/r/20230602144942.GC161817@cmpxchg.org Link: https://lkml.kernel.org/r/20230519123959.77335-4-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Michal Hocko Signed-off-by: Andrew Morton --- include/linux/compaction.h | 4 +-- mm/compaction.c | 79 +++++++++++++++++++++++++++++----------------- mm/vmscan.c | 35 ++++++++++++-------- 3 files changed, 73 insertions(+), 45 deletions(-) (limited to 'mm') diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 1f0328a2ba48..9f7cf3e1bf89 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -90,7 +90,7 @@ extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, struct page **page); extern void reset_isolation_suitable(pg_data_t *pgdat); extern enum compact_result compaction_suitable(struct zone *zone, int order, - unsigned int alloc_flags, int highest_zoneidx); + int highest_zoneidx); extern void compaction_defer_reset(struct zone *zone, int order, bool alloc_success); @@ -108,7 +108,7 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat) } static inline enum compact_result compaction_suitable(struct zone *zone, int order, - int alloc_flags, int highest_zoneidx) + int highest_zoneidx) { return COMPACT_SKIPPED; } diff --git a/mm/compaction.c b/mm/compaction.c index e23e00bec030..bb9b76244a5d 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2194,24 +2194,10 @@ static enum compact_result compact_finished(struct compact_control *cc) } static enum compact_result __compaction_suitable(struct zone *zone, int order, - unsigned int alloc_flags, int highest_zoneidx, unsigned long wmark_target) { unsigned long watermark; - - if (is_via_compact_memory(order)) - return COMPACT_CONTINUE; - - watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); - /* - * If watermarks for high-order allocation are already met, there - * should be no need for compaction at all. - */ - if (zone_watermark_ok(zone, order, watermark, highest_zoneidx, - alloc_flags)) - return COMPACT_SUCCESS; - /* * Watermarks for order-0 must be met for compaction to be able to * isolate free pages for migration targets. This means that the @@ -2240,17 +2226,15 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order, * compaction_suitable: Is this suitable to run compaction on this zone now? * Returns * COMPACT_SKIPPED - If there are too few free pages for compaction - * COMPACT_SUCCESS - If the allocation would succeed without compaction * COMPACT_CONTINUE - If compaction should run now */ enum compact_result compaction_suitable(struct zone *zone, int order, - unsigned int alloc_flags, int highest_zoneidx) { enum compact_result ret; int fragindex; - ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx, + ret = __compaction_suitable(zone, order, highest_zoneidx, zone_page_state(zone, NR_FREE_PAGES)); /* * fragmentation index determines if allocation failures are due to @@ -2294,7 +2278,16 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->highest_zoneidx, ac->nodemask) { unsigned long available; - enum compact_result compact_result; + unsigned long watermark; + + if (is_via_compact_memory(order)) + return true; + + /* Allocation can already succeed, nothing to do */ + watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); + if (zone_watermark_ok(zone, order, watermark, + ac->highest_zoneidx, alloc_flags)) + continue; /* * Do not consider all the reclaimable memory because we do not @@ -2304,9 +2297,8 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, */ available = zone_reclaimable_pages(zone) / order; available += zone_page_state_snapshot(zone, NR_FREE_PAGES); - compact_result = __compaction_suitable(zone, order, alloc_flags, - ac->highest_zoneidx, available); - if (compact_result == COMPACT_CONTINUE) + if (__compaction_suitable(zone, order, ac->highest_zoneidx, + available) == COMPACT_CONTINUE) return true; } @@ -2336,11 +2328,23 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) INIT_LIST_HEAD(&cc->migratepages); cc->migratetype = gfp_migratetype(cc->gfp_mask); - ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, - cc->highest_zoneidx); - /* Compaction is likely to fail */ - if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED) - return ret; + + if (!is_via_compact_memory(cc->order)) { + unsigned long watermark; + + /* Allocation can already succeed, nothing to do */ + watermark = wmark_pages(cc->zone, + cc->alloc_flags & ALLOC_WMARK_MASK); + if (zone_watermark_ok(cc->zone, cc->order, watermark, + cc->highest_zoneidx, cc->alloc_flags)) + return COMPACT_SUCCESS; + + ret = compaction_suitable(cc->zone, cc->order, + cc->highest_zoneidx); + /* Compaction is likely to fail */ + if (ret == COMPACT_SKIPPED) + return ret; + } /* * Clear pageblock skip if there were failures recently and compaction @@ -2844,7 +2848,16 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat) if (!populated_zone(zone)) continue; - if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0, + if (is_via_compact_memory(pgdat->kcompactd_max_order)) + return true; + + /* Allocation can already succeed, check other zones */ + if (zone_watermark_ok(zone, pgdat->kcompactd_max_order, + min_wmark_pages(zone), + highest_zoneidx, 0)) + continue; + + if (compaction_suitable(zone, pgdat->kcompactd_max_order, highest_zoneidx) == COMPACT_CONTINUE) return true; } @@ -2882,10 +2895,18 @@ static void kcompactd_do_work(pg_data_t *pgdat) if (compaction_deferred(zone, cc.order)) continue; - if (compaction_suitable(zone, cc.order, 0, zoneid) != - COMPACT_CONTINUE) + if (is_via_compact_memory(cc.order)) + goto compact; + + /* Allocation can already succeed, nothing to do */ + if (zone_watermark_ok(zone, cc.order, + min_wmark_pages(zone), zoneid, 0)) continue; + if (compaction_suitable(zone, cc.order, + zoneid) != COMPACT_CONTINUE) + continue; +compact: if (kthread_should_stop()) return; diff --git a/mm/vmscan.c b/mm/vmscan.c index 4637f6462e9c..9f8bfd1fcf58 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -6399,14 +6399,17 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat, if (!managed_zone(zone)) continue; - switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) { - case COMPACT_SUCCESS: - case COMPACT_CONTINUE: + if (sc->order == -1) /* is_via_compact_memory() */ + return false; + + /* Allocation can already succeed, nothing to do */ + if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), + sc->reclaim_idx, 0)) + return false; + + if (compaction_suitable(zone, sc->order, + sc->reclaim_idx) == COMPACT_CONTINUE) return false; - default: - /* check next zone */ - ; - } } /* @@ -6594,16 +6597,20 @@ again: static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) { unsigned long watermark; - enum compact_result suitable; - suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx); - if (suitable == COMPACT_SUCCESS) - /* Allocation should succeed already. Don't reclaim. */ + if (sc->order == -1) /* is_via_compact_memory() */ + goto suitable; + + /* Allocation can already succeed, nothing to do */ + if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), + sc->reclaim_idx, 0)) return true; - if (suitable == COMPACT_SKIPPED) - /* Compaction cannot yet proceed. Do reclaim. */ - return false; + /* Compaction cannot yet proceed. Do reclaim. */ + if (compaction_suitable(zone, sc->order, + sc->reclaim_idx) == COMPACT_SKIPPED) + return false; +suitable: /* * Compaction is already possible, but it takes time to run and there * are potentially other callers using the pages just freed. So proceed -- cgit v1.2.3 From f98a497e1f16ee411df72629e32e31cba4cfa9cf Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 19 May 2023 14:39:58 +0200 Subject: mm: compaction: remove unnecessary is_via_compact_memory() checks Remove from all paths not reachable via /proc/sys/vm/compact_memory. Link: https://lkml.kernel.org/r/20230519123959.77335-5-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Michal Hocko Cc: Baolin Wang Signed-off-by: Andrew Morton --- mm/compaction.c | 11 +---------- mm/vmscan.c | 8 +------- 2 files changed, 2 insertions(+), 17 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index bb9b76244a5d..bc1f389ed378 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2280,9 +2280,6 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, unsigned long available; unsigned long watermark; - if (is_via_compact_memory(order)) - return true; - /* Allocation can already succeed, nothing to do */ watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); if (zone_watermark_ok(zone, order, watermark, @@ -2848,9 +2845,6 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat) if (!populated_zone(zone)) continue; - if (is_via_compact_memory(pgdat->kcompactd_max_order)) - return true; - /* Allocation can already succeed, check other zones */ if (zone_watermark_ok(zone, pgdat->kcompactd_max_order, min_wmark_pages(zone), @@ -2895,9 +2889,6 @@ static void kcompactd_do_work(pg_data_t *pgdat) if (compaction_deferred(zone, cc.order)) continue; - if (is_via_compact_memory(cc.order)) - goto compact; - /* Allocation can already succeed, nothing to do */ if (zone_watermark_ok(zone, cc.order, min_wmark_pages(zone), zoneid, 0)) @@ -2906,7 +2897,7 @@ static void kcompactd_do_work(pg_data_t *pgdat) if (compaction_suitable(zone, cc.order, zoneid) != COMPACT_CONTINUE) continue; -compact: + if (kthread_should_stop()) return; diff --git a/mm/vmscan.c b/mm/vmscan.c index 9f8bfd1fcf58..99e4ae44850d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -6399,9 +6399,6 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat, if (!managed_zone(zone)) continue; - if (sc->order == -1) /* is_via_compact_memory() */ - return false; - /* Allocation can already succeed, nothing to do */ if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), sc->reclaim_idx, 0)) @@ -6598,9 +6595,6 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) { unsigned long watermark; - if (sc->order == -1) /* is_via_compact_memory() */ - goto suitable; - /* Allocation can already succeed, nothing to do */ if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), sc->reclaim_idx, 0)) @@ -6610,7 +6604,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) if (compaction_suitable(zone, sc->order, sc->reclaim_idx) == COMPACT_SKIPPED) return false; -suitable: + /* * Compaction is already possible, but it takes time to run and there * are potentially other callers using the pages just freed. So proceed -- cgit v1.2.3 From 1c9568e806a589da84b7afbdf0619b2c1f6c102a Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 19 May 2023 14:39:59 +0200 Subject: mm: compaction: drop redundant watermark check in compaction_zonelist_suitable() The watermark check in compaction_zonelist_suitable(), called from should_compact_retry(), is sandwiched between two watermark checks already: before, there are freelist attempts as part of direct reclaim and direct compaction; after, there is a last-minute freelist attempt in __alloc_pages_may_oom(). The check in compaction_zonelist_suitable() isn't necessary. Kill it. Link: https://lkml.kernel.org/r/20230519123959.77335-6-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Michal Hocko Cc: Baolin Wang Signed-off-by: Andrew Morton --- mm/compaction.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index bc1f389ed378..470cfd24ef18 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2278,13 +2278,6 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->highest_zoneidx, ac->nodemask) { unsigned long available; - unsigned long watermark; - - /* Allocation can already succeed, nothing to do */ - watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); - if (zone_watermark_ok(zone, order, watermark, - ac->highest_zoneidx, alloc_flags)) - continue; /* * Do not consider all the reclaimable memory because we do not -- cgit v1.2.3 From 3cf04937529020e149666f56a41ebdeb226b69ed Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 2 Jun 2023 11:12:04 -0400 Subject: mm: compaction: have compaction_suitable() return bool Since it only returns COMPACT_CONTINUE or COMPACT_SKIPPED now, a bool return value simplifies the callsites. Link: https://lkml.kernel.org/r/20230602151204.GD161817@cmpxchg.org Signed-off-by: Johannes Weiner Suggested-by: Vlastimil Babka Acked-by: Vlastimil Babka Cc: Baolin Wang Cc: Mel Gorman Cc: Michal Hocko Signed-off-by: Andrew Morton --- include/linux/compaction.h | 6 ++--- mm/compaction.c | 64 ++++++++++++++++++++++------------------------ mm/vmscan.c | 6 ++--- 3 files changed, 36 insertions(+), 40 deletions(-) (limited to 'mm') diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 9f7cf3e1bf89..57b16e69c19a 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -89,7 +89,7 @@ extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, const struct alloc_context *ac, enum compact_priority prio, struct page **page); extern void reset_isolation_suitable(pg_data_t *pgdat); -extern enum compact_result compaction_suitable(struct zone *zone, int order, +extern bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx); extern void compaction_defer_reset(struct zone *zone, int order, @@ -107,10 +107,10 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat) { } -static inline enum compact_result compaction_suitable(struct zone *zone, int order, +static inline bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx) { - return COMPACT_SKIPPED; + return false; } static inline void kcompactd_run(int nid) diff --git a/mm/compaction.c b/mm/compaction.c index 470cfd24ef18..9b550bfe900b 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2193,9 +2193,9 @@ static enum compact_result compact_finished(struct compact_control *cc) return ret; } -static enum compact_result __compaction_suitable(struct zone *zone, int order, - int highest_zoneidx, - unsigned long wmark_target) +static bool __compaction_suitable(struct zone *zone, int order, + int highest_zoneidx, + unsigned long wmark_target) { unsigned long watermark; /* @@ -2215,27 +2215,20 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order, watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? low_wmark_pages(zone) : min_wmark_pages(zone); watermark += compact_gap(order); - if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx, - ALLOC_CMA, wmark_target)) - return COMPACT_SKIPPED; - - return COMPACT_CONTINUE; + return __zone_watermark_ok(zone, 0, watermark, highest_zoneidx, + ALLOC_CMA, wmark_target); } /* * compaction_suitable: Is this suitable to run compaction on this zone now? - * Returns - * COMPACT_SKIPPED - If there are too few free pages for compaction - * COMPACT_CONTINUE - If compaction should run now */ -enum compact_result compaction_suitable(struct zone *zone, int order, - int highest_zoneidx) +bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx) { - enum compact_result ret; - int fragindex; + enum compact_result compact_result; + bool suitable; - ret = __compaction_suitable(zone, order, highest_zoneidx, - zone_page_state(zone, NR_FREE_PAGES)); + suitable = __compaction_suitable(zone, order, highest_zoneidx, + zone_page_state(zone, NR_FREE_PAGES)); /* * fragmentation index determines if allocation failures are due to * low memory or external fragmentation @@ -2252,17 +2245,24 @@ enum compact_result compaction_suitable(struct zone *zone, int order, * excessive compaction for costly orders, but it should not be at the * expense of system stability. */ - if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) { - fragindex = fragmentation_index(zone, order); - if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) - ret = COMPACT_NOT_SUITABLE_ZONE; + if (suitable) { + compact_result = COMPACT_CONTINUE; + if (order > PAGE_ALLOC_COSTLY_ORDER) { + int fragindex = fragmentation_index(zone, order); + + if (fragindex >= 0 && + fragindex <= sysctl_extfrag_threshold) { + suitable = false; + compact_result = COMPACT_NOT_SUITABLE_ZONE; + } + } + } else { + compact_result = COMPACT_SKIPPED; } - trace_mm_compaction_suitable(zone, order, ret); - if (ret == COMPACT_NOT_SUITABLE_ZONE) - ret = COMPACT_SKIPPED; + trace_mm_compaction_suitable(zone, order, compact_result); - return ret; + return suitable; } bool compaction_zonelist_suitable(struct alloc_context *ac, int order, @@ -2288,7 +2288,7 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, available = zone_reclaimable_pages(zone) / order; available += zone_page_state_snapshot(zone, NR_FREE_PAGES); if (__compaction_suitable(zone, order, ac->highest_zoneidx, - available) == COMPACT_CONTINUE) + available)) return true; } @@ -2329,11 +2329,10 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) cc->highest_zoneidx, cc->alloc_flags)) return COMPACT_SUCCESS; - ret = compaction_suitable(cc->zone, cc->order, - cc->highest_zoneidx); /* Compaction is likely to fail */ - if (ret == COMPACT_SKIPPED) - return ret; + if (!compaction_suitable(cc->zone, cc->order, + cc->highest_zoneidx)) + return COMPACT_SKIPPED; } /* @@ -2845,7 +2844,7 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat) continue; if (compaction_suitable(zone, pgdat->kcompactd_max_order, - highest_zoneidx) == COMPACT_CONTINUE) + highest_zoneidx)) return true; } @@ -2887,8 +2886,7 @@ static void kcompactd_do_work(pg_data_t *pgdat) min_wmark_pages(zone), zoneid, 0)) continue; - if (compaction_suitable(zone, cc.order, - zoneid) != COMPACT_CONTINUE) + if (!compaction_suitable(zone, cc.order, zoneid)) continue; if (kthread_should_stop()) diff --git a/mm/vmscan.c b/mm/vmscan.c index 99e4ae44850d..df7e52b522ec 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -6404,8 +6404,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat, sc->reclaim_idx, 0)) return false; - if (compaction_suitable(zone, sc->order, - sc->reclaim_idx) == COMPACT_CONTINUE) + if (compaction_suitable(zone, sc->order, sc->reclaim_idx)) return false; } @@ -6601,8 +6600,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) return true; /* Compaction cannot yet proceed. Do reclaim. */ - if (compaction_suitable(zone, sc->order, - sc->reclaim_idx) == COMPACT_SKIPPED) + if (!compaction_suitable(zone, sc->order, sc->reclaim_idx)) return false; /* -- cgit v1.2.3 From 4fbbb3fde3c69879ceebb33a8edd9d867008728b Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 19 May 2023 13:13:59 +0200 Subject: mm: compaction: avoid GFP_NOFS ABBA deadlock During stress testing with higher-order allocations, a deadlock scenario was observed in compaction: One GFP_NOFS allocation was sleeping on mm/compaction.c::too_many_isolated(), while all CPUs in the system were busy with compactors spinning on buffer locks held by the sleeping GFP_NOFS allocation. Reclaim is susceptible to this same deadlock; we fixed it by granting GFP_NOFS allocations additional LRU isolation headroom, to ensure it makes forward progress while holding fs locks that other reclaimers might acquire. Do the same here. This code has been like this since compaction was initially merged, and I only managed to trigger this with out-of-tree patches that dramatically increase the contexts that do GFP_NOFS compaction. While the issue is real, it seems theoretical in nature given existing allocation sites. Worth fixing now, but no Fixes tag or stable CC. Link: https://lkml.kernel.org/r/20230519111359.40475-1-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Mel Gorman Acked-by: Vlastimil Babka Cc: Michal Hocko Signed-off-by: Andrew Morton --- mm/compaction.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 9b550bfe900b..261071a07681 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -740,8 +740,9 @@ isolate_freepages_range(struct compact_control *cc, } /* Similar to reclaim, but different enough that they don't share logic */ -static bool too_many_isolated(pg_data_t *pgdat) +static bool too_many_isolated(struct compact_control *cc) { + pg_data_t *pgdat = cc->zone->zone_pgdat; bool too_many; unsigned long active, inactive, isolated; @@ -753,6 +754,17 @@ static bool too_many_isolated(pg_data_t *pgdat) isolated = node_page_state(pgdat, NR_ISOLATED_FILE) + node_page_state(pgdat, NR_ISOLATED_ANON); + /* + * Allow GFP_NOFS to isolate past the limit set for regular + * compaction runs. This prevents an ABBA deadlock when other + * compactors have already isolated to the limit, but are + * blocked on filesystem locks held by the GFP_NOFS thread. + */ + if (cc->gfp_mask & __GFP_FS) { + inactive >>= 3; + active >>= 3; + } + too_many = isolated > (inactive + active) / 2; if (!too_many) wake_throttle_isolated(pgdat); @@ -801,7 +813,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * list by either parallel reclaimers or compaction. If there are, * delay for some time until fewer pages are isolated */ - while (unlikely(too_many_isolated(pgdat))) { + while (unlikely(too_many_isolated(cc))) { /* stop isolation if there are still pages not migrated */ if (cc->nr_migratepages) return -EAGAIN; -- cgit v1.2.3 From 3c54a298db4c6b38bbd5f86216ce0f5ad4596ccf Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Mon, 22 May 2023 09:24:12 +0100 Subject: mm/mmap: refactor mlock_future_check() In all but one instance, mlock_future_check() is treated as a boolean function despite returning an error code. In one instance, this error code is ignored and replaced with -ENOMEM. This is confusing, and the inversion of true -> failure, false -> success is not warranted. Convert the function to a bool, lightly refactor and return true if the check passes, false if not. Link: https://lkml.kernel.org/r/20230522082412.56685-1-lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Acked-by: Vlastimil Babka Cc: Liam Howlett Cc: Mike Rapoport (IBM) Signed-off-by: Andrew Morton --- mm/internal.h | 4 ++-- mm/mmap.c | 33 +++++++++++++++++---------------- mm/mremap.c | 2 +- mm/secretmem.c | 2 +- 4 files changed, 21 insertions(+), 20 deletions(-) (limited to 'mm') diff --git a/mm/internal.h b/mm/internal.h index bb6542279599..66dd214b302a 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -576,8 +576,8 @@ extern long populate_vma_page_range(struct vm_area_struct *vma, extern long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, bool write, int *locked); -extern int mlock_future_check(struct mm_struct *mm, unsigned long flags, - unsigned long len); +extern bool mlock_future_check(struct mm_struct *mm, unsigned long flags, + unsigned long bytes); /* * mlock_vma_folio() and munlock_vma_folio(): * should be called with vma's mmap_lock held for read or write, diff --git a/mm/mmap.c b/mm/mmap.c index 44be7fdfaac9..28d2c489a7e5 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -182,7 +182,8 @@ static int check_brk_limits(unsigned long addr, unsigned long len) if (IS_ERR_VALUE(mapped_addr)) return mapped_addr; - return mlock_future_check(current->mm, current->mm->def_flags, len); + return mlock_future_check(current->mm, current->mm->def_flags, len) + ? 0 : -EAGAIN; } static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, unsigned long addr, unsigned long request, unsigned long flags); @@ -1145,21 +1146,21 @@ static inline unsigned long round_hint_to_min(unsigned long hint) return hint; } -int mlock_future_check(struct mm_struct *mm, unsigned long flags, - unsigned long len) +bool mlock_future_check(struct mm_struct *mm, unsigned long flags, + unsigned long bytes) { - unsigned long locked, lock_limit; + unsigned long locked_pages, limit_pages; - /* mlock MCL_FUTURE? */ - if (flags & VM_LOCKED) { - locked = len >> PAGE_SHIFT; - locked += mm->locked_vm; - lock_limit = rlimit(RLIMIT_MEMLOCK); - lock_limit >>= PAGE_SHIFT; - if (locked > lock_limit && !capable(CAP_IPC_LOCK)) - return -EAGAIN; - } - return 0; + if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) + return true; + + locked_pages = bytes >> PAGE_SHIFT; + locked_pages += mm->locked_vm; + + limit_pages = rlimit(RLIMIT_MEMLOCK); + limit_pages >>= PAGE_SHIFT; + + return locked_pages <= limit_pages; } static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) @@ -1271,7 +1272,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, if (!can_do_mlock()) return -EPERM; - if (mlock_future_check(mm, vm_flags, len)) + if (!mlock_future_check(mm, vm_flags, len)) return -EAGAIN; if (file) { @@ -1889,7 +1890,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, return -ENOMEM; /* mlock limit tests */ - if (mlock_future_check(mm, vma->vm_flags, grow << PAGE_SHIFT)) + if (!mlock_future_check(mm, vma->vm_flags, grow << PAGE_SHIFT)) return -ENOMEM; /* Check to ensure the stack will not grow into a hugetlb-only region */ diff --git a/mm/mremap.c b/mm/mremap.c index b11ce6c92099..bcfcb8df5875 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -775,7 +775,7 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) return ERR_PTR(-EFAULT); - if (mlock_future_check(mm, vma->vm_flags, new_len - old_len)) + if (!mlock_future_check(mm, vma->vm_flags, new_len - old_len)) return ERR_PTR(-EAGAIN); if (!may_expand_vm(mm, vma->vm_flags, diff --git a/mm/secretmem.c b/mm/secretmem.c index 974b32ba8b9d..58d2af12df4f 100644 --- a/mm/secretmem.c +++ b/mm/secretmem.c @@ -125,7 +125,7 @@ static int secretmem_mmap(struct file *file, struct vm_area_struct *vma) if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) return -EINVAL; - if (mlock_future_check(vma->vm_mm, vma->vm_flags | VM_LOCKED, len)) + if (!mlock_future_check(vma->vm_mm, vma->vm_flags | VM_LOCKED, len)) return -EAGAIN; vm_flags_set(vma, VM_LOCKED | VM_DONTDUMP); -- cgit v1.2.3 From b0cc5e89caadc21c2d8f7dc4c97947761b358876 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 22 May 2023 13:52:10 -0700 Subject: mm/mlock: rename mlock_future_check() to mlock_future_ok() It is felt that the name mlock_future_check() is vague - it doesn't particularly convey the function's operation. mlock_future_ok() is a clearer name for a predicate function. Acked-by: Vlastimil Babka Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Mike Rapoport (IBM) Signed-off-by: Andrew Morton --- mm/internal.h | 2 +- mm/mmap.c | 8 ++++---- mm/mremap.c | 2 +- mm/secretmem.c | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/internal.h b/mm/internal.h index 66dd214b302a..f45f5eb4514f 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -576,7 +576,7 @@ extern long populate_vma_page_range(struct vm_area_struct *vma, extern long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, bool write, int *locked); -extern bool mlock_future_check(struct mm_struct *mm, unsigned long flags, +extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, unsigned long bytes); /* * mlock_vma_folio() and munlock_vma_folio(): diff --git a/mm/mmap.c b/mm/mmap.c index 28d2c489a7e5..e1624cb2c04e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -182,7 +182,7 @@ static int check_brk_limits(unsigned long addr, unsigned long len) if (IS_ERR_VALUE(mapped_addr)) return mapped_addr; - return mlock_future_check(current->mm, current->mm->def_flags, len) + return mlock_future_ok(current->mm, current->mm->def_flags, len) ? 0 : -EAGAIN; } static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, @@ -1146,7 +1146,7 @@ static inline unsigned long round_hint_to_min(unsigned long hint) return hint; } -bool mlock_future_check(struct mm_struct *mm, unsigned long flags, +bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, unsigned long bytes) { unsigned long locked_pages, limit_pages; @@ -1272,7 +1272,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, if (!can_do_mlock()) return -EPERM; - if (!mlock_future_check(mm, vm_flags, len)) + if (!mlock_future_ok(mm, vm_flags, len)) return -EAGAIN; if (file) { @@ -1890,7 +1890,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, return -ENOMEM; /* mlock limit tests */ - if (!mlock_future_check(mm, vma->vm_flags, grow << PAGE_SHIFT)) + if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT)) return -ENOMEM; /* Check to ensure the stack will not grow into a hugetlb-only region */ diff --git a/mm/mremap.c b/mm/mremap.c index bcfcb8df5875..da107f2c71bf 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -775,7 +775,7 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) return ERR_PTR(-EFAULT); - if (!mlock_future_check(mm, vma->vm_flags, new_len - old_len)) + if (!mlock_future_ok(mm, vma->vm_flags, new_len - old_len)) return ERR_PTR(-EAGAIN); if (!may_expand_vm(mm, vma->vm_flags, diff --git a/mm/secretmem.c b/mm/secretmem.c index 58d2af12df4f..86442a15d12f 100644 --- a/mm/secretmem.c +++ b/mm/secretmem.c @@ -125,7 +125,7 @@ static int secretmem_mmap(struct file *file, struct vm_area_struct *vma) if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) return -EINVAL; - if (!mlock_future_check(vma->vm_mm, vma->vm_flags | VM_LOCKED, len)) + if (!mlock_future_ok(vma->vm_mm, vma->vm_flags | VM_LOCKED, len)) return -EAGAIN; vm_flags_set(vma, VM_LOCKED | VM_DONTDUMP); -- cgit v1.2.3 From 08e0f49e9991b175f2cda7ba32a7e9f1320dcbad Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Mon, 22 May 2023 09:52:32 +0000 Subject: mm/memcontrol: fix typo in comment Replace 'then' with 'than'. Link: https://lkml.kernel.org/r/20230522095233.4246-1-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Cc: Johannes Weiner Cc: Michal Hocko Cc: Roman Gushchin Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/memcontrol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d31fb1e2cb33..7c681492b47b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -6862,7 +6862,7 @@ static unsigned long effective_protection(unsigned long usage, protected = min(usage, setting); /* * If all cgroups at this level combined claim and use more - * protection then what the parent affords them, distribute + * protection than what the parent affords them, distribute * shares in proportion to utilization. * * We are using actual utilization rather than the statically -- cgit v1.2.3 From 0285762c6f161c3a93ffc75ba278aad21719460a Mon Sep 17 00:00:00 2001 From: "T.J. Alumbaugh" Date: Mon, 22 May 2023 11:20:55 +0000 Subject: mm: multi-gen LRU: use macro for bitmap Use DECLARE_BITMAP macro when possible. Link: https://lkml.kernel.org/r/20230522112058.2965866-1-talumbau@google.com Signed-off-by: T.J. Alumbaugh Reviewed-by: David Hildenbrand Reviewed-by: Yuanchu Xie Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/vmscan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index df7e52b522ec..cafb933d609f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4144,7 +4144,7 @@ static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end, unsigned long next; unsigned long addr; struct vm_area_struct *vma; - unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)]; + DECLARE_BITMAP(bitmap, MIN_LRU_BATCH); unsigned long first = -1; struct lru_gen_mm_walk *walk = args->private; -- cgit v1.2.3 From 5c7e7a0d79072eb02780a2c0dee730b23cde711d Mon Sep 17 00:00:00 2001 From: "T.J. Alumbaugh" Date: Mon, 22 May 2023 11:20:56 +0000 Subject: mm: multi-gen LRU: cleanup lru_gen_soft_reclaim() lru_gen_soft_reclaim() gets the lruvec from the memcg and node ID to keep a cleaner interface on the caller side. Link: https://lkml.kernel.org/r/20230522112058.2965866-2-talumbau@google.com Signed-off-by: T.J. Alumbaugh Reviewed-by: Yuanchu Xie Cc: David Hildenbrand Cc: Yu Zhao Signed-off-by: Andrew Morton --- include/linux/mmzone.h | 4 ++-- mm/memcontrol.c | 2 +- mm/vmscan.c | 4 +++- 3 files changed, 6 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 3a68326c9989..5a7ada0413da 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -534,7 +534,7 @@ void lru_gen_exit_memcg(struct mem_cgroup *memcg); void lru_gen_online_memcg(struct mem_cgroup *memcg); void lru_gen_offline_memcg(struct mem_cgroup *memcg); void lru_gen_release_memcg(struct mem_cgroup *memcg); -void lru_gen_soft_reclaim(struct lruvec *lruvec); +void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid); #else /* !CONFIG_MEMCG */ @@ -585,7 +585,7 @@ static inline void lru_gen_release_memcg(struct mem_cgroup *memcg) { } -static inline void lru_gen_soft_reclaim(struct lruvec *lruvec) +static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid) { } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7c681492b47b..6a3d4ce87b8a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -485,7 +485,7 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid) if (lru_gen_enabled()) { if (soft_limit_excess(memcg)) - lru_gen_soft_reclaim(&memcg->nodeinfo[nid]->lruvec); + lru_gen_soft_reclaim(memcg, nid); return; } diff --git a/mm/vmscan.c b/mm/vmscan.c index cafb933d609f..a51a7e0f8b63 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4846,8 +4846,10 @@ void lru_gen_release_memcg(struct mem_cgroup *memcg) } } -void lru_gen_soft_reclaim(struct lruvec *lruvec) +void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid) { + struct lruvec *lruvec = get_lruvec(memcg, nid); + /* see the comment on MEMCG_NR_GENS */ if (lru_gen_memcg_seg(lruvec) != MEMCG_LRU_HEAD) lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD); -- cgit v1.2.3 From bd02df412cbb9a63e945a647e3dbe4d6f9e06d19 Mon Sep 17 00:00:00 2001 From: "T.J. Alumbaugh" Date: Mon, 22 May 2023 11:20:57 +0000 Subject: mm: multi-gen LRU: add helpers in page table walks Add helpers to page table walking code: - Clarifies intent via name "should_walk_mmu" and "should_clear_pmd_young" - Avoids repeating same logic in two places Link: https://lkml.kernel.org/r/20230522112058.2965866-3-talumbau@google.com Signed-off-by: T.J. Alumbaugh Reviewed-by: Yuanchu Xie Cc: David Hildenbrand Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/vmscan.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index a51a7e0f8b63..dbbfcc631f5c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3234,6 +3234,16 @@ DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS); #define get_cap(cap) static_branch_unlikely(&lru_gen_caps[cap]) #endif +static bool should_walk_mmu(void) +{ + return arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK); +} + +static bool should_clear_pmd_young(void) +{ + return arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG); +} + /****************************************************************************** * shorthand helpers ******************************************************************************/ @@ -4098,7 +4108,7 @@ static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area goto next; if (!pmd_trans_huge(pmd[i])) { - if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG)) + if (should_clear_pmd_young()) pmdp_test_and_clear_young(vma, addr, pmd + i); goto next; } @@ -4191,7 +4201,7 @@ restart: #endif walk->mm_stats[MM_NONLEAF_TOTAL]++; - if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG)) { + if (should_clear_pmd_young()) { if (!pmd_young(val)) continue; @@ -4493,7 +4503,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, * handful of PTEs. Spreading the work out over a period of time usually * is less efficient, but it avoids bursty page faults. */ - if (!arch_has_hw_pte_young() || !get_cap(LRU_GEN_MM_WALK)) { + if (!should_walk_mmu()) { success = iterate_mm_list_nowalk(lruvec, max_seq); goto done; } @@ -5730,10 +5740,10 @@ static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, c if (get_cap(LRU_GEN_CORE)) caps |= BIT(LRU_GEN_CORE); - if (arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK)) + if (should_walk_mmu()) caps |= BIT(LRU_GEN_MM_WALK); - if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG)) + if (should_clear_pmd_young()) caps |= BIT(LRU_GEN_NONLEAF_YOUNG); return sysfs_emit(buf, "0x%04x\n", caps); -- cgit v1.2.3 From d7f1afd0e3ac4c5c703b67d6003b62f760e95ba8 Mon Sep 17 00:00:00 2001 From: "T.J. Alumbaugh" Date: Mon, 22 May 2023 11:20:58 +0000 Subject: mm: multi-gen LRU: cleanup lru_gen_test_recent() Avoid passing memcg* and pglist_data* to lru_gen_test_recent() since we only use the lruvec anyway. Link: https://lkml.kernel.org/r/20230522112058.2965866-4-talumbau@google.com Signed-off-by: T.J. Alumbaugh Reviewed-by: Yuanchu Xie Cc: David Hildenbrand Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/workingset.c | 46 ++++++++++++++++------------------------------ 1 file changed, 16 insertions(+), 30 deletions(-) (limited to 'mm') diff --git a/mm/workingset.c b/mm/workingset.c index 90ae785d4c9c..5796e927e6d7 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -257,59 +257,46 @@ static void *lru_gen_eviction(struct folio *folio) /* * Tests if the shadow entry is for a folio that was recently evicted. - * Fills in @memcgid, @pglist_data, @token, @workingset with the values - * unpacked from shadow. + * Fills in @lruvec, @token, @workingset with the values unpacked from shadow. */ -static bool lru_gen_test_recent(void *shadow, bool file, int *memcgid, - struct pglist_data **pgdat, unsigned long *token, bool *workingset) +static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec, + unsigned long *token, bool *workingset) { - struct mem_cgroup *eviction_memcg; - struct lruvec *lruvec; - struct lru_gen_folio *lrugen; + int memcg_id; unsigned long min_seq; + struct mem_cgroup *memcg; + struct pglist_data *pgdat; - unpack_shadow(shadow, memcgid, pgdat, token, workingset); - eviction_memcg = mem_cgroup_from_id(*memcgid); + unpack_shadow(shadow, &memcg_id, &pgdat, token, workingset); - lruvec = mem_cgroup_lruvec(eviction_memcg, *pgdat); - lrugen = &lruvec->lrugen; + memcg = mem_cgroup_from_id(memcg_id); + *lruvec = mem_cgroup_lruvec(memcg, pgdat); - min_seq = READ_ONCE(lrugen->min_seq[file]); + min_seq = READ_ONCE((*lruvec)->lrugen.min_seq[file]); return (*token >> LRU_REFS_WIDTH) == (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH)); } static void lru_gen_refault(struct folio *folio, void *shadow) { int hist, tier, refs; - int memcg_id; bool workingset; unsigned long token; - unsigned long min_seq; struct lruvec *lruvec; struct lru_gen_folio *lrugen; - struct mem_cgroup *memcg; - struct pglist_data *pgdat; int type = folio_is_file_lru(folio); int delta = folio_nr_pages(folio); rcu_read_lock(); - if (!lru_gen_test_recent(shadow, type, &memcg_id, &pgdat, &token, - &workingset)) - goto unlock; - - memcg = folio_memcg_rcu(folio); - if (memcg_id != mem_cgroup_id(memcg)) + if (!lru_gen_test_recent(shadow, type, &lruvec, &token, &workingset)) goto unlock; - if (pgdat != folio_pgdat(folio)) + if (lruvec != folio_lruvec(folio)) goto unlock; - lruvec = mem_cgroup_lruvec(memcg, pgdat); lrugen = &lruvec->lrugen; - min_seq = READ_ONCE(lrugen->min_seq[type]); - hist = lru_hist_from_seq(min_seq); + hist = lru_hist_from_seq(READ_ONCE(lrugen->min_seq[type])); /* see the comment in folio_lru_refs() */ refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + workingset; tier = lru_tier_from_refs(refs); @@ -339,8 +326,8 @@ static void *lru_gen_eviction(struct folio *folio) return NULL; } -static bool lru_gen_test_recent(void *shadow, bool file, int *memcgid, - struct pglist_data **pgdat, unsigned long *token, bool *workingset) +static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec, + unsigned long *token, bool *workingset) { return false; } @@ -435,8 +422,7 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset) unsigned long eviction; if (lru_gen_enabled()) - return lru_gen_test_recent(shadow, file, &memcgid, &pgdat, &eviction, - workingset); + return lru_gen_test_recent(shadow, file, &eviction_lruvec, &eviction, workingset); unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset); eviction <<= bucket_order; -- cgit v1.2.3 From fc1e0d980037e065441cd1d9a1a5e9c9117e4ba2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 25 May 2023 14:57:03 +0200 Subject: mm/vmalloc: prevent stale TLBs in fully utilized blocks Patch series "mm/vmalloc: Assorted fixes and improvements", v2. this series addresses the following issues: 1) Prevent the stale TLB problem related to fully utilized vmap blocks 2) Avoid the double per CPU list walk in _vm_unmap_aliases() 3) Avoid flushing dirty space over and over 4) Add a lockless quickcheck in vb_alloc() and add missing READ/WRITE_ONCE() annotations 5) Prevent overeager purging of usable vmap_blocks if not under memory/address space pressure. This patch (of 6): _vm_unmap_aliases() is used to ensure that no unflushed TLB entries for a page are left in the system. This is required due to the lazy TLB flush mechanism in vmalloc. This is tried to achieve by walking the per CPU free lists, but those do not contain fully utilized vmap blocks because they are removed from the free list once the blocks free space became zero. When the block is not fully unmapped then it is not on the purge list either. So neither the per CPU list iteration nor the purge list walk find the block and if the page was mapped via such a block and the TLB has not yet been flushed, the guarantee of _vm_unmap_aliases() that there are no stale TLBs after returning is broken: x = vb_alloc() // Removes vmap_block from free list because vb->free became 0 vb_free(x) // Unmaps page and marks in dirty_min/max range // Block has still mappings and is not put on purge list // Page is reused vm_unmap_aliases() // Can't find vmap block with the dirty space -> FAIL So instead of walking the per CPU free lists, walk the per CPU xarrays which hold pointers to _all_ active blocks in the system including those removed from the free lists. Link: https://lkml.kernel.org/r/20230525122342.109672430@linutronix.de Link: https://lkml.kernel.org/r/20230525124504.573987880@linutronix.de Fixes: db64fe02258f ("mm: rewrite vmap layer") Signed-off-by: Thomas Gleixner Reviewed-by: Christoph Hellwig Reviewed-by: Lorenzo Stoakes Reviewed-by: Uladzislau Rezki (Sony) Reviewed-by: Baoquan He Signed-off-by: Andrew Morton --- mm/vmalloc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 9683573f1225..ac80369eb37a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2236,9 +2236,10 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) for_each_possible_cpu(cpu) { struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); struct vmap_block *vb; + unsigned long idx; rcu_read_lock(); - list_for_each_entry_rcu(vb, &vbq->free, free_list) { + xa_for_each(&vbq->vmap_blocks, idx, vb) { spin_lock(&vb->lock); if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) { unsigned long va_start = vb->va->va_start; -- cgit v1.2.3 From ca5e46c3400badc418a8fbcaeba711ad60ff4e1b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 25 May 2023 14:57:04 +0200 Subject: mm/vmalloc: avoid iterating over per CPU vmap blocks twice _vunmap_aliases() walks the per CPU xarrays to find partially unmapped blocks and then walks the per cpu free lists to purge fragmented blocks. Arguably that's waste of CPU cycles and cache lines as the full xarray walk already touches every block. Avoid this double iteration: - Split out the code to purge one block and the code to free the local purge list into helper functions. - Try to purge the fragmented blocks in the xarray walk before looking at their dirty space. Link: https://lkml.kernel.org/r/20230525124504.633469722@linutronix.de Signed-off-by: Thomas Gleixner Reviewed-by: Christoph Hellwig Reviewed-by: Baoquan He Cc: Lorenzo Stoakes Cc: Uladzislau Rezki (Sony) Signed-off-by: Andrew Morton --- mm/vmalloc.c | 70 +++++++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 46 insertions(+), 24 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ac80369eb37a..eaef5e0400db 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2086,39 +2086,54 @@ static void free_vmap_block(struct vmap_block *vb) kfree_rcu(vb, rcu_head); } +static bool purge_fragmented_block(struct vmap_block *vb, + struct vmap_block_queue *vbq, struct list_head *purge_list) +{ + if (vb->free + vb->dirty != VMAP_BBMAP_BITS || + vb->dirty == VMAP_BBMAP_BITS) + return false; + + /* prevent further allocs after releasing lock */ + vb->free = 0; + /* prevent purging it again */ + vb->dirty = VMAP_BBMAP_BITS; + vb->dirty_min = 0; + vb->dirty_max = VMAP_BBMAP_BITS; + spin_lock(&vbq->lock); + list_del_rcu(&vb->free_list); + spin_unlock(&vbq->lock); + list_add_tail(&vb->purge, purge_list); + return true; +} + +static void free_purged_blocks(struct list_head *purge_list) +{ + struct vmap_block *vb, *n_vb; + + list_for_each_entry_safe(vb, n_vb, purge_list, purge) { + list_del(&vb->purge); + free_vmap_block(vb); + } +} + static void purge_fragmented_blocks(int cpu) { LIST_HEAD(purge); struct vmap_block *vb; - struct vmap_block *n_vb; struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); rcu_read_lock(); list_for_each_entry_rcu(vb, &vbq->free, free_list) { - - if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) + if (vb->free + vb->dirty != VMAP_BBMAP_BITS || + vb->dirty == VMAP_BBMAP_BITS) continue; spin_lock(&vb->lock); - if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { - vb->free = 0; /* prevent further allocs after releasing lock */ - vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ - vb->dirty_min = 0; - vb->dirty_max = VMAP_BBMAP_BITS; - spin_lock(&vbq->lock); - list_del_rcu(&vb->free_list); - spin_unlock(&vbq->lock); - spin_unlock(&vb->lock); - list_add_tail(&vb->purge, &purge); - } else - spin_unlock(&vb->lock); + purge_fragmented_block(vb, vbq, &purge); + spin_unlock(&vb->lock); } rcu_read_unlock(); - - list_for_each_entry_safe(vb, n_vb, &purge, purge) { - list_del(&vb->purge); - free_vmap_block(vb); - } + free_purged_blocks(&purge); } static void purge_fragmented_blocks_allcpus(void) @@ -2226,12 +2241,13 @@ static void vb_free(unsigned long addr, unsigned long size) static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) { + LIST_HEAD(purge_list); int cpu; if (unlikely(!vmap_initialized)) return; - might_sleep(); + mutex_lock(&vmap_purge_lock); for_each_possible_cpu(cpu) { struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); @@ -2241,7 +2257,14 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) rcu_read_lock(); xa_for_each(&vbq->vmap_blocks, idx, vb) { spin_lock(&vb->lock); - if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) { + + /* + * Try to purge a fragmented block first. If it's + * not purgeable, check whether there is dirty + * space to be flushed. + */ + if (!purge_fragmented_block(vb, vbq, &purge_list) && + vb->dirty && vb->dirty != VMAP_BBMAP_BITS) { unsigned long va_start = vb->va->va_start; unsigned long s, e; @@ -2257,9 +2280,8 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) } rcu_read_unlock(); } + free_purged_blocks(&purge_list); - mutex_lock(&vmap_purge_lock); - purge_fragmented_blocks_allcpus(); if (!__purge_vmap_area_lazy(start, end) && flush) flush_tlb_kernel_range(start, end); mutex_unlock(&vmap_purge_lock); -- cgit v1.2.3 From a09fad96ffb1d0da007283727235a03b813f989b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 25 May 2023 14:57:05 +0200 Subject: mm/vmalloc: prevent flushing dirty space over and over vmap blocks which have active mappings cannot be purged. Allocations which have been freed are accounted for in vmap_block::dirty_min/max, so that they can be detected in _vm_unmap_aliases() as potentially stale TLBs. If there are several invocations of _vm_unmap_aliases() then each of them will flush the dirty range. That's pointless and just increases the probability of full TLB flushes. Avoid that by resetting the flush range after accounting for it. That's safe versus other invocations of _vm_unmap_aliases() because this is all serialized with vmap_purge_lock. Link: https://lkml.kernel.org/r/20230525124504.692056496@linutronix.de Signed-off-by: Thomas Gleixner Reviewed-by: Baoquan He Reviewed-by: Christoph Hellwig Reviewed-by: Lorenzo Stoakes Cc: Uladzislau Rezki (Sony) Signed-off-by: Andrew Morton --- mm/vmalloc.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index eaef5e0400db..ad9a1d9e314f 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2226,7 +2226,7 @@ static void vb_free(unsigned long addr, unsigned long size) spin_lock(&vb->lock); - /* Expand dirty range */ + /* Expand the not yet TLB flushed dirty range */ vb->dirty_min = min(vb->dirty_min, offset); vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); @@ -2264,7 +2264,7 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) * space to be flushed. */ if (!purge_fragmented_block(vb, vbq, &purge_list) && - vb->dirty && vb->dirty != VMAP_BBMAP_BITS) { + vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) { unsigned long va_start = vb->va->va_start; unsigned long s, e; @@ -2274,6 +2274,10 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) start = min(s, start); end = max(e, end); + /* Prevent that this is flushed again */ + vb->dirty_min = VMAP_BBMAP_BITS; + vb->dirty_max = 0; + flush = 1; } spin_unlock(&vb->lock); -- cgit v1.2.3 From 43d7650234c62201ba3ca5b731226b0b189989a8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 25 May 2023 14:57:07 +0200 Subject: mm/vmalloc: check free space in vmap_block lockless vb_alloc() unconditionally locks a vmap_block on the free list to check the free space. This can be done locklessly because vmap_block::free never increases, it's only decreased on allocations. Check the free space lockless and only if that succeeds, recheck under the lock. Link: https://lkml.kernel.org/r/20230525124504.750481992@linutronix.de Signed-off-by: Thomas Gleixner Reviewed-by: Uladzislau Rezki (Sony) Reviewed-by: Christoph Hellwig Reviewed-by: Lorenzo Stoakes Reviewed-by: Baoquan He Signed-off-by: Andrew Morton --- mm/vmalloc.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ad9a1d9e314f..679112e2ffd2 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2168,6 +2168,9 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) list_for_each_entry_rcu(vb, &vbq->free, free_list) { unsigned long pages_off; + if (READ_ONCE(vb->free) < (1UL << order)) + continue; + spin_lock(&vb->lock); if (vb->free < (1UL << order)) { spin_unlock(&vb->lock); @@ -2176,7 +2179,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) pages_off = VMAP_BBMAP_BITS - vb->free; vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); - vb->free -= 1UL << order; + WRITE_ONCE(vb->free, vb->free - (1UL << order)); bitmap_set(vb->used_map, pages_off, (1UL << order)); if (vb->free == 0) { spin_lock(&vbq->lock); -- cgit v1.2.3 From 7f48121e9fa82bdaf0bd0f7a7e49f48803c6c0e8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 25 May 2023 14:57:08 +0200 Subject: mm/vmalloc: add missing READ/WRITE_ONCE() annotations purge_fragmented_blocks() accesses vmap_block::free and vmap_block::dirty lockless for a quick check. Add the missing READ/WRITE_ONCE() annotations. Link: https://lkml.kernel.org/r/20230525124504.807356682@linutronix.de Signed-off-by: Thomas Gleixner Reviewed-by: Uladzislau Rezki (Sony) Reviewed-by: Christoph Hellwig Reviewed-by: Baoquan He Cc: Lorenzo Stoakes Signed-off-by: Andrew Morton --- mm/vmalloc.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 679112e2ffd2..f643c7286c7a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2094,9 +2094,9 @@ static bool purge_fragmented_block(struct vmap_block *vb, return false; /* prevent further allocs after releasing lock */ - vb->free = 0; + WRITE_ONCE(vb->free, 0); /* prevent purging it again */ - vb->dirty = VMAP_BBMAP_BITS; + WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS); vb->dirty_min = 0; vb->dirty_max = VMAP_BBMAP_BITS; spin_lock(&vbq->lock); @@ -2124,8 +2124,11 @@ static void purge_fragmented_blocks(int cpu) rcu_read_lock(); list_for_each_entry_rcu(vb, &vbq->free, free_list) { - if (vb->free + vb->dirty != VMAP_BBMAP_BITS || - vb->dirty == VMAP_BBMAP_BITS) + unsigned long free = READ_ONCE(vb->free); + unsigned long dirty = READ_ONCE(vb->dirty); + + if (free + dirty != VMAP_BBMAP_BITS || + dirty == VMAP_BBMAP_BITS) continue; spin_lock(&vb->lock); @@ -2233,7 +2236,7 @@ static void vb_free(unsigned long addr, unsigned long size) vb->dirty_min = min(vb->dirty_min, offset); vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); - vb->dirty += 1UL << order; + WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order)); if (vb->dirty == VMAP_BBMAP_BITS) { BUG_ON(vb->free); spin_unlock(&vb->lock); -- cgit v1.2.3 From 77e50af07f14ea7b53f82f9417ddf2fd96c78da3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 25 May 2023 14:57:09 +0200 Subject: mm/vmalloc: dont purge usable blocks unnecessarily Purging fragmented blocks is done unconditionally in several contexts: 1) From drain_vmap_area_work(), when the number of lazy to be freed vmap_areas reached the threshold 2) Reclaiming vmalloc address space from pcpu_get_vm_areas() 3) _vm_unmap_aliases() #1 There is no reason to zap fragmented vmap blocks unconditionally, simply because reclaiming all lazy areas drains at least 32MB * fls(num_online_cpus()) per invocation which is plenty. #2 Reclaiming when running out of space or due to memory pressure makes a lot of sense #3 _unmap_aliases() requires to touch everything because the caller has no clue which vmap_area used a particular page last and the vmap_area lost that information too. Except for the vfree + VM_FLUSH_RESET_PERMS case, which removes the vmap area first and then cares about the flush. That in turn requires a full walk of _all_ vmap areas including the one which was just added to the purge list. But as this has to be flushed anyway this is an opportunity to combine outstanding TLB flushes and do the housekeeping of purging freed areas, but like #1 there is no real good reason to zap usable vmap blocks unconditionally. Add a @force_purge argument to the newly split out block purge function and if not true only purge fragmented blocks which have less than 1/4 of their capacity left. Rename purge_vmap_area_lazy() to reclaim_and_purge_vmap_areas() to make it clear what the function does. [lstoakes@gmail.com: correct VMAP_PURGE_THRESHOLD check] Link: https://lkml.kernel.org/r/3e92ef61-b910-4576-88e7-cf43211fd4e7@lucifer.local Link: https://lkml.kernel.org/r/20230525124504.864005691@linutronix.de Signed-off-by: Thomas Gleixner Signed-off-by: Lorenzo Stoakes Reviewed-by: Baoquan He Cc: Christoph Hellwig Cc: Lorenzo Stoakes Cc: Uladzislau Rezki (Sony) Signed-off-by: Andrew Morton --- mm/vmalloc.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index f643c7286c7a..7c32435219b1 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -791,7 +791,7 @@ get_subtree_max_size(struct rb_node *node) RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb, struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size) -static void purge_vmap_area_lazy(void); +static void reclaim_and_purge_vmap_areas(void); static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); static void drain_vmap_area_work(struct work_struct *work); static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work); @@ -1649,7 +1649,7 @@ retry: overflow: if (!purged) { - purge_vmap_area_lazy(); + reclaim_and_purge_vmap_areas(); purged = 1; goto retry; } @@ -1785,9 +1785,10 @@ out: } /* - * Kick off a purge of the outstanding lazy areas. + * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list. */ -static void purge_vmap_area_lazy(void) +static void reclaim_and_purge_vmap_areas(void) + { mutex_lock(&vmap_purge_lock); purge_fragmented_blocks_allcpus(); @@ -1908,6 +1909,12 @@ static struct vmap_area *find_unlink_vmap_area(unsigned long addr) #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) +/* + * Purge threshold to prevent overeager purging of fragmented blocks for + * regular operations: Purge if vb->free is less than 1/4 of the capacity. + */ +#define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4) + #define VMAP_RAM 0x1 /* indicates vm_map_ram area*/ #define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/ #define VMAP_FLAGS_MASK 0x3 @@ -2087,12 +2094,17 @@ static void free_vmap_block(struct vmap_block *vb) } static bool purge_fragmented_block(struct vmap_block *vb, - struct vmap_block_queue *vbq, struct list_head *purge_list) + struct vmap_block_queue *vbq, struct list_head *purge_list, + bool force_purge) { if (vb->free + vb->dirty != VMAP_BBMAP_BITS || vb->dirty == VMAP_BBMAP_BITS) return false; + /* Don't overeagerly purge usable blocks unless requested */ + if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD)) + return false; + /* prevent further allocs after releasing lock */ WRITE_ONCE(vb->free, 0); /* prevent purging it again */ @@ -2132,7 +2144,7 @@ static void purge_fragmented_blocks(int cpu) continue; spin_lock(&vb->lock); - purge_fragmented_block(vb, vbq, &purge); + purge_fragmented_block(vb, vbq, &purge, true); spin_unlock(&vb->lock); } rcu_read_unlock(); @@ -2269,7 +2281,7 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) * not purgeable, check whether there is dirty * space to be flushed. */ - if (!purge_fragmented_block(vb, vbq, &purge_list) && + if (!purge_fragmented_block(vb, vbq, &purge_list, false) && vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) { unsigned long va_start = vb->va->va_start; unsigned long s, e; @@ -4175,7 +4187,7 @@ recovery: overflow: spin_unlock(&free_vmap_area_lock); if (!purged) { - purge_vmap_area_lazy(); + reclaim_and_purge_vmap_areas(); purged = true; /* Before "retry", check if we recover. */ -- cgit v1.2.3 From 75990f6459b9cf61a94e8a08d0f6a4aa0b8cf3b5 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 25 May 2023 20:53:56 +0800 Subject: mm: compaction: drop the redundant page validation in update_pageblock_skip() Patch series "Misc cleanups and improvements for compaction". This series cantains some cleanups and improvements for compaction. This patch (of 6): The caller has validated the page before calling update_pageblock_skip(), thus drop the redundant page validation in update_pageblock_skip(). Link: https://lkml.kernel.org/r/5142e15b9295fe8c447dbb39b7907a20177a1413.1685018752.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: Vlastimil Babka Cc: Mel Gorman Signed-off-by: Andrew Morton --- mm/compaction.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 261071a07681..83004c15715a 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -436,9 +436,6 @@ static void update_pageblock_skip(struct compact_control *cc, if (cc->no_set_skip_hint) return; - if (!page) - return; - set_pageblock_skip(page); /* Update where async and sync compaction should restart */ -- cgit v1.2.3 From 2dbd90054f965c899b9adb62b2d0d215f687d04b Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 25 May 2023 20:53:57 +0800 Subject: mm: compaction: change fast_isolate_freepages() to void type No caller cares about the return value of fast_isolate_freepages(), void it. Link: https://lkml.kernel.org/r/759fca20b22ebf4c81afa30496837b9e0fb2e53b.1685018752.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: Vlastimil Babka Cc: Mel Gorman Signed-off-by: Andrew Morton --- mm/compaction.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 83004c15715a..d4cee5803214 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1413,8 +1413,7 @@ static int next_search_order(struct compact_control *cc, int order) return order; } -static unsigned long -fast_isolate_freepages(struct compact_control *cc) +static void fast_isolate_freepages(struct compact_control *cc) { unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1); unsigned int nr_scanned = 0; @@ -1427,7 +1426,7 @@ fast_isolate_freepages(struct compact_control *cc) /* Full compaction passes in a negative order */ if (cc->order <= 0) - return cc->free_pfn; + return; /* * If starting the scan, use a deeper search and use the highest @@ -1566,11 +1565,10 @@ fast_isolate_freepages(struct compact_control *cc) cc->total_free_scanned += nr_scanned; if (!page) - return cc->free_pfn; + return; low_pfn = page_to_pfn(page); fast_isolate_around(cc, low_pfn); - return low_pfn; } /* -- cgit v1.2.3 From cf650342f83ae655c6d05a1a74ae1672459973d0 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 25 May 2023 20:53:58 +0800 Subject: mm: compaction: skip more fully scanned pageblock In fast_isolate_around(), it assumes the pageblock is fully scanned if cc->nr_freepages < cc->nr_migratepages after trying to isolate some free pages, and will set skip flag to avoid scanning in future. However this can miss setting the skip flag for a fully scanned pageblock (returned 'start_pfn' is equal to 'end_pfn') in the case where cc->nr_freepages is larger than cc->nr_migratepages. So using the returned 'start_pfn' from isolate_freepages_block() and 'end_pfn' to decide if a pageblock is fully scanned makes more sense. It can also cover the case where cc->nr_freepages < cc->nr_migratepages, which means the 'start_pfn' is usually equal to 'end_pfn' except some uncommon fatal error occurs after non-strict mode isolation. Link: https://lkml.kernel.org/r/f4efd2fa08735794a6d809da3249b6715ba6ad38.1685018752.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: Vlastimil Babka Cc: Mel Gorman Signed-off-by: Andrew Morton --- mm/compaction.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index d4cee5803214..c5f97bfd629c 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1389,7 +1389,7 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn) isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); /* Skip this pageblock in the future as it's full or nearly full */ - if (cc->nr_freepages < cc->nr_migratepages) + if (start_pfn == end_pfn) set_pageblock_skip(page); return; -- cgit v1.2.3 From 8b71b499ff98fdcda7efefc146841a8b4d26813d Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 25 May 2023 20:53:59 +0800 Subject: mm: compaction: only set skip flag if cc->no_set_skip_hint is false To keep the same logic as test_and_set_skip(), only set the skip flag if cc->no_set_skip_hint is false, which makes code more reasonable. Link: https://lkml.kernel.org/r/0eb2cd2407ffb259ae6e3071e10f70f2d41d0f3e.1685018752.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: Vlastimil Babka Cc: Mel Gorman Signed-off-by: Andrew Morton --- mm/compaction.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index c5f97bfd629c..3b09d8d02581 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1201,7 +1201,7 @@ isolate_abort: * rescanned twice in a row. */ if (low_pfn == end_pfn && (!nr_isolated || cc->finish_pageblock)) { - if (valid_page && !skip_updated) + if (!cc->no_set_skip_hint && valid_page && !skip_updated) set_pageblock_skip(valid_page); update_cached_migrate(cc, low_pfn); } -- cgit v1.2.3 From 447ba88658faa8dbfd29d557daa38b7d88f460ec Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 25 May 2023 20:54:00 +0800 Subject: mm: compaction: add trace event for fast freepages isolation The fast_isolate_freepages() can also isolate freepages, but we can not know the fast isolation efficiency to understand the fast isolation pressure. So add a trace event to show some numbers to help to understand the efficiency for fast freepages isolation. Link: https://lkml.kernel.org/r/78d2932d0160d122c15372aceb3f2c45460a17fc.1685018752.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: Vlastimil Babka Cc: Mel Gorman Signed-off-by: Andrew Morton --- include/trace/events/compaction.h | 11 +++++++++++ mm/compaction.c | 6 +++++- 2 files changed, 16 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h index 3313eb83c117..2b2a975efd20 100644 --- a/include/trace/events/compaction.h +++ b/include/trace/events/compaction.h @@ -64,6 +64,17 @@ DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages, TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken) ); +DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_fast_isolate_freepages, + + TP_PROTO( + unsigned long start_pfn, + unsigned long end_pfn, + unsigned long nr_scanned, + unsigned long nr_taken), + + TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken) +); + #ifdef CONFIG_COMPACTION TRACE_EVENT(mm_compaction_migratepages, diff --git a/mm/compaction.c b/mm/compaction.c index 3b09d8d02581..ce6293bf9c4a 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1416,7 +1416,7 @@ static int next_search_order(struct compact_control *cc, int order) static void fast_isolate_freepages(struct compact_control *cc) { unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1); - unsigned int nr_scanned = 0; + unsigned int nr_scanned = 0, total_isolated = 0; unsigned long low_pfn, min_pfn, highest = 0; unsigned long nr_isolated = 0; unsigned long distance; @@ -1515,6 +1515,7 @@ static void fast_isolate_freepages(struct compact_control *cc) set_page_private(page, order); nr_isolated = 1 << order; nr_scanned += nr_isolated - 1; + total_isolated += nr_isolated; cc->nr_freepages += nr_isolated; list_add_tail(&page->lru, &cc->freepages); count_compact_events(COMPACTISOLATED, nr_isolated); @@ -1535,6 +1536,9 @@ static void fast_isolate_freepages(struct compact_control *cc) limit = max(1U, limit >> 1); } + trace_mm_compaction_fast_isolate_freepages(min_pfn, cc->free_pfn, + nr_scanned, total_isolated); + if (!page) { cc->fast_search_fail++; if (scan_start) { -- cgit v1.2.3 From a8d13355c660255266ece529e81e6cb26754941a Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 25 May 2023 20:54:01 +0800 Subject: mm: compaction: skip fast freepages isolation if enough freepages are isolated I've observed that fast isolation often isolates more pages than cc->migratepages, and the excess freepages will be released back to the buddy system. So skip fast freepages isolation if enough freepages are isolated to save some CPU cycles. Link: https://lkml.kernel.org/r/f39c2c07f2dba2732fd9c0843572e5bef96f7f67.1685018752.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Johannes Weiner Signed-off-by: Andrew Morton --- mm/compaction.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index ce6293bf9c4a..767b0815c874 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1528,6 +1528,10 @@ static void fast_isolate_freepages(struct compact_control *cc) spin_unlock_irqrestore(&cc->zone->lock, flags); + /* Skip fast search if enough freepages isolated */ + if (cc->nr_freepages >= cc->nr_migratepages) + break; + /* * Smaller scan on next order so the total scan is related * to freelist_scan_limit. -- cgit v1.2.3 From 283ebdee2da30f65cba04c8fe690b97acfc7f4c4 Mon Sep 17 00:00:00 2001 From: Tu Jinjiang Date: Thu, 25 May 2023 11:16:40 +0800 Subject: mm: shmem: fix UAF bug in shmem_show_options() shmem_show_options() uses sbinfo->mpol without adding it's refcnt. This may lead to race with replacement of the mpol by remount. The execution sequence is as follows. CPU0 CPU1 shmem_show_options() shmem_reconfigure() shmem_show_mpol(seq, sbinfo->mpol) mpol = sbinfo->mpol mpol_put(mpol) mpol->mode The KASAN report is as follows. BUG: KASAN: slab-use-after-free in shmem_show_options+0x21b/0x340 Read of size 2 at addr ffff888124324004 by task mount/2388 CPU: 2 PID: 2388 Comm: mount Not tainted 6.4.0-rc3-00017-g9d646009f65d-dirty #8 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1.1 04/01/2014 Call Trace: dump_stack_lvl+0x37/0x50 print_report+0xd0/0x620 ? shmem_show_options+0x21b/0x340 ? __virt_addr_valid+0xf4/0x180 ? shmem_show_options+0x21b/0x340 kasan_report+0xb8/0xe0 ? shmem_show_options+0x21b/0x340 shmem_show_options+0x21b/0x340 ? __pfx_shmem_show_options+0x10/0x10 ? strchr+0x2c/0x50 ? strlen+0x23/0x40 ? seq_puts+0x7d/0x90 show_vfsmnt+0x1e6/0x260 ? __pfx_show_vfsmnt+0x10/0x10 ? __kasan_kmalloc+0x7f/0x90 seq_read_iter+0x57a/0x740 vfs_read+0x2e2/0x4a0 ? __pfx_vfs_read+0x10/0x10 ? down_write_killable+0xb8/0x140 ? __pfx_down_write_killable+0x10/0x10 ? __fget_light+0xa9/0x1e0 ? up_write+0x3f/0x80 ksys_read+0xb8/0x150 ? __pfx_ksys_read+0x10/0x10 ? fpregs_assert_state_consistent+0x55/0x60 ? exit_to_user_mode_prepare+0x2d/0x120 do_syscall_64+0x3c/0x90 entry_SYSCALL_64_after_hwframe+0x72/0xdc Allocated by task 2387: kasan_save_stack+0x22/0x50 kasan_set_track+0x25/0x30 __kasan_slab_alloc+0x59/0x70 kmem_cache_alloc+0xdd/0x220 mpol_new+0x83/0x150 mpol_parse_str+0x280/0x4a0 shmem_parse_one+0x364/0x520 vfs_parse_fs_param+0xf8/0x1a0 vfs_parse_fs_string+0xc9/0x130 shmem_parse_options+0xb2/0x110 path_mount+0x597/0xdf0 do_mount+0xcd/0xf0 __x64_sys_mount+0xbd/0x100 do_syscall_64+0x3c/0x90 entry_SYSCALL_64_after_hwframe+0x72/0xdc Freed by task 2389: kasan_save_stack+0x22/0x50 kasan_set_track+0x25/0x30 kasan_save_free_info+0x2e/0x50 __kasan_slab_free+0x10e/0x1a0 kmem_cache_free+0x9c/0x350 shmem_reconfigure+0x278/0x370 reconfigure_super+0x383/0x450 path_mount+0xcc5/0xdf0 do_mount+0xcd/0xf0 __x64_sys_mount+0xbd/0x100 do_syscall_64+0x3c/0x90 entry_SYSCALL_64_after_hwframe+0x72/0xdc The buggy address belongs to the object at ffff888124324000 which belongs to the cache numa_policy of size 32 The buggy address is located 4 bytes inside of freed 32-byte region [ffff888124324000, ffff888124324020) ================================================================== To fix the bug, shmem_get_sbmpol() / mpol_put() needs to be called before / after shmem_show_mpol() call. Link: https://lkml.kernel.org/r/20230525031640.593733-1-tujinjiang@huawei.com Signed-off-by: Tu Jinjiang Reviewed-by: Kefeng Wang Acked-by: Hugh Dickins Cc: Nanyong Sun Signed-off-by: Andrew Morton --- mm/shmem.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/shmem.c b/mm/shmem.c index e40a08c5c6d7..5e54ab5f61f2 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3726,6 +3726,7 @@ out: static int shmem_show_options(struct seq_file *seq, struct dentry *root) { struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); + struct mempolicy *mpol; if (sbinfo->max_blocks != shmem_default_max_blocks()) seq_printf(seq, ",size=%luk", @@ -3768,7 +3769,9 @@ static int shmem_show_options(struct seq_file *seq, struct dentry *root) if (sbinfo->huge) seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); #endif - shmem_show_mpol(seq, sbinfo->mpol); + mpol = shmem_get_sbmpol(sbinfo); + shmem_show_mpol(seq, mpol); + mpol_put(mpol); if (sbinfo->noswap) seq_printf(seq, ",noswap"); return 0; -- cgit v1.2.3 From e0e0b4126c1f1effd480777507a61bd09360dc8f Mon Sep 17 00:00:00 2001 From: "Lars R. Damerow" Date: Wed, 24 May 2023 11:17:33 -0700 Subject: mm/memcontrol: export memcg.swap watermark via sysfs for v2 memcg This patch is similar to commit 8e20d4b33266 ("mm/memcontrol: export memcg->watermark via sysfs for v2 memcg"), but exports the swap counter's watermark. We allocate jobs to our compute farm using heuristics determined by memory and swap usage from previous jobs. Tracking the peak swap usage for new jobs is important for determining when jobs are exceeding their expected bounds, or when our baseline metrics are getting outdated. Our toolset was written to use the "memory.memsw.max_usage_in_bytes" file in cgroups v1, and altering it to poll cgroups v2's "memory.swap.current" would give less accurate results as well as add complication to the code. Having this watermark exposed in sysfs is much preferred. Link: https://lkml.kernel.org/r/20230524181734.125696-1-lars@pixar.com Signed-off-by: Lars R. Damerow Acked-by: Johannes Weiner Cc: Jonathan Corbet Cc: Michal Hocko Cc: Muchun Song Cc: Roman Gushchin Cc: Shakeel Butt Cc: Tejun Heo Cc: Zefan Li Signed-off-by: Andrew Morton --- Documentation/admin-guide/cgroup-v2.rst | 7 +++++++ mm/memcontrol.c | 13 +++++++++++++ 2 files changed, 20 insertions(+) (limited to 'mm') diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index f67c0829350b..1ffe019483ac 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1582,6 +1582,13 @@ PAGE_SIZE multiple when read back. Healthy workloads are not expected to reach this limit. + memory.swap.peak + A read-only single value file which exists on non-root + cgroups. + + The max swap usage recorded for the cgroup and its + descendants since the creation of the cgroup. + memory.swap.max A read-write single value file which exists on non-root cgroups. The default is "max". diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6a3d4ce87b8a..6ee433be4c3b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -7622,6 +7622,14 @@ static u64 swap_current_read(struct cgroup_subsys_state *css, return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; } +static u64 swap_peak_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + return (u64)memcg->swap.watermark * PAGE_SIZE; +} + static int swap_high_show(struct seq_file *m, void *v) { return seq_puts_memcg_tunable(m, @@ -7700,6 +7708,11 @@ static struct cftype swap_files[] = { .seq_show = swap_max_show, .write = swap_max_write, }, + { + .name = "swap.peak", + .flags = CFTYPE_NOT_ON_ROOT, + .read_u64 = swap_peak_read, + }, { .name = "swap.events", .flags = CFTYPE_NOT_ON_ROOT, -- cgit v1.2.3 From 3af0191a594d5ca5d6d2e3602b5d4284c6835e77 Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Tue, 23 May 2023 13:59:21 -0700 Subject: Multi-gen LRU: fix workingset accounting On Android app cycle workloads, MGLRU showed a significant reduction in workingset refaults although pgpgin/pswpin remained relatively unchanged. This indicated MGLRU may be undercounting workingset refaults. This has impact on userspace programs, like Android's LMKD, that monitor workingset refault statistics to detect thrashing. It was found that refaults were only accounted if the MGLRU shadow entry was for a recently evicted folio. However, recently evicted folios should be accounted as workingset activation, and refaults should be accounted regardless of recency. Fix MGLRU's workingset refault and activation accounting to more closely match that of the conventional active/inactive LRU. Link: https://lkml.kernel.org/r/20230523205922.3852731-1-kaleshsingh@google.com Fixes: ac35a4902374 ("mm: multi-gen LRU: minimal implementation") Signed-off-by: Kalesh Singh Reported-by: Charan Teja Kalla Acked-by: Yu Zhao Cc: Brian Geffon Cc: Jan Alexander Steffens (heftig) Cc: Oleksandr Natalenko Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton --- mm/vmscan.c | 1 - mm/workingset.c | 10 +++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index dbbfcc631f5c..3024b4619b65 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4925,7 +4925,6 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx) WRITE_ONCE(lrugen->protected[hist][type][tier - 1], lrugen->protected[hist][type][tier - 1] + delta); - __mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + type, delta); return true; } diff --git a/mm/workingset.c b/mm/workingset.c index 5796e927e6d7..4686ae363000 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -278,6 +278,7 @@ static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec, static void lru_gen_refault(struct folio *folio, void *shadow) { + bool recent; int hist, tier, refs; bool workingset; unsigned long token; @@ -288,10 +289,13 @@ static void lru_gen_refault(struct folio *folio, void *shadow) rcu_read_lock(); - if (!lru_gen_test_recent(shadow, type, &lruvec, &token, &workingset)) + recent = lru_gen_test_recent(shadow, type, &lruvec, &token, &workingset); + if (lruvec != folio_lruvec(folio)) goto unlock; - if (lruvec != folio_lruvec(folio)) + mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + type, delta); + + if (!recent) goto unlock; lrugen = &lruvec->lrugen; @@ -302,7 +306,7 @@ static void lru_gen_refault(struct folio *folio, void *shadow) tier = lru_tier_from_refs(refs); atomic_long_add(delta, &lrugen->refaulted[hist][type][tier]); - mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + type, delta); + mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + type, delta); /* * Count the following two cases as stalls: -- cgit v1.2.3 From ba1b67c79cb3c5f5d11cb475bb7045929b235538 Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Fri, 26 May 2023 08:52:50 +0000 Subject: mm/mm_init.c: introduce reset_memoryless_node_totalpages() Currently, no matter whether a node actually has memory or not, calculate_node_totalpages() is used to account number of pages in zone/node. However, for node without memory, these unnecessary calculations can be skipped. All the zone/node page counts can be set to 0 directly. So introduce reset_memoryless_node_totalpages() to perform this action. Furthermore, calculate_node_totalpages() only gets called for the node with memory. Link: https://lkml.kernel.org/r/20230526085251.1977-1-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Suggested-by: Mike Rapoport Reviewed-by: Mike Rapoport (IBM) Cc: David Hildenbrand Cc: Michal Hocko Signed-off-by: Andrew Morton --- mm/mm_init.c | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/mm_init.c b/mm/mm_init.c index 10bf560302c4..6f7da396b67b 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1176,10 +1176,6 @@ static unsigned long __init zone_absent_pages_in_node(int nid, unsigned long zone_start_pfn, zone_end_pfn; unsigned long nr_absent; - /* When hotadd a new node from cpu_up(), the node should be empty */ - if (!node_start_pfn && !node_end_pfn) - return 0; - zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); @@ -1229,9 +1225,6 @@ static unsigned long __init zone_spanned_pages_in_node(int nid, { unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; - /* When hotadd a new node from cpu_up(), the node should be empty */ - if (!node_start_pfn && !node_end_pfn) - return 0; /* Get the start and end of the zone */ *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); @@ -1252,6 +1245,24 @@ static unsigned long __init zone_spanned_pages_in_node(int nid, return *zone_end_pfn - *zone_start_pfn; } +static void __init reset_memoryless_node_totalpages(struct pglist_data *pgdat) +{ + struct zone *z; + + for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) { + z->zone_start_pfn = 0; + z->spanned_pages = 0; + z->present_pages = 0; +#if defined(CONFIG_MEMORY_HOTPLUG) + z->present_early_pages = 0; +#endif + } + + pgdat->node_spanned_pages = 0; + pgdat->node_present_pages = 0; + pr_debug("On node %d totalpages: 0\n", pgdat->node_id); +} + static void __init calculate_node_totalpages(struct pglist_data *pgdat, unsigned long node_start_pfn, unsigned long node_end_pfn) @@ -1704,11 +1715,13 @@ static void __init free_area_init_node(int nid) pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, (u64)start_pfn << PAGE_SHIFT, end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); + + calculate_node_totalpages(pgdat, start_pfn, end_pfn); } else { pr_info("Initmem setup node %d as memoryless\n", nid); - } - calculate_node_totalpages(pgdat, start_pfn, end_pfn); + reset_memoryless_node_totalpages(pgdat); + } alloc_node_mem_map(pgdat); pgdat_set_deferred_range(pgdat); -- cgit v1.2.3 From 1c2d252f5b4289e1c6840bcf394157b70c639d6e Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Fri, 26 May 2023 08:52:51 +0000 Subject: mm/mm_init.c: do not calculate zone_start_pfn/zone_end_pfn in zone_absent_pages_in_node() In calculate_node_totalpages(), zone_start_pfn/zone_end_pfn are already calculated in zone_spanned_pages_in_node(), so use them as parameters instead of node_start_pfn/node_end_pfn and the duplicated calculation process can de dropped. Link: https://lkml.kernel.org/r/20230526085251.1977-2-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Suggested-by: Mike Rapoport Reviewed-by: Mike Rapoport (IBM) Cc: David Hildenbrand Cc: Haifeng Xu Cc: Michal Hocko Signed-off-by: Andrew Morton --- mm/mm_init.c | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) (limited to 'mm') diff --git a/mm/mm_init.c b/mm/mm_init.c index 6f7da396b67b..19652c1b3426 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1168,20 +1168,15 @@ unsigned long __init absent_pages_in_range(unsigned long start_pfn, /* Return the number of page frames in holes in a zone on a node */ static unsigned long __init zone_absent_pages_in_node(int nid, unsigned long zone_type, - unsigned long node_start_pfn, - unsigned long node_end_pfn) + unsigned long zone_start_pfn, + unsigned long zone_end_pfn) { - unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; - unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; - unsigned long zone_start_pfn, zone_end_pfn; unsigned long nr_absent; - zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); - zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); + /* zone is empty, we don't have any absent pages */ + if (zone_start_pfn == zone_end_pfn) + return 0; - adjust_zone_range_for_zone_movable(nid, zone_type, - node_start_pfn, node_end_pfn, - &zone_start_pfn, &zone_end_pfn); nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); /* @@ -1274,7 +1269,7 @@ static void __init calculate_node_totalpages(struct pglist_data *pgdat, struct zone *zone = pgdat->node_zones + i; unsigned long zone_start_pfn, zone_end_pfn; unsigned long spanned, absent; - unsigned long size, real_size; + unsigned long real_size; spanned = zone_spanned_pages_in_node(pgdat->node_id, i, node_start_pfn, @@ -1282,23 +1277,22 @@ static void __init calculate_node_totalpages(struct pglist_data *pgdat, &zone_start_pfn, &zone_end_pfn); absent = zone_absent_pages_in_node(pgdat->node_id, i, - node_start_pfn, - node_end_pfn); + zone_start_pfn, + zone_end_pfn); - size = spanned; - real_size = size - absent; + real_size = spanned - absent; - if (size) + if (spanned) zone->zone_start_pfn = zone_start_pfn; else zone->zone_start_pfn = 0; - zone->spanned_pages = size; + zone->spanned_pages = spanned; zone->present_pages = real_size; #if defined(CONFIG_MEMORY_HOTPLUG) zone->present_early_pages = real_size; #endif - totalpages += size; + totalpages += spanned; realtotalpages += real_size; } -- cgit v1.2.3 From f9f956b550b8ce6fb902af9f29ba31b3e0fd052d Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Mon, 29 May 2023 14:13:51 +0800 Subject: swap: remove get/put_swap_device() in __swap_count() Patch series "swap: cleanup get/put_swap_device() usage", v3. The general rule to use a swap entry is as follows. When we get a swap entry, if there aren't some other ways to prevent swapoff, such as the folio in swap cache is locked, page table lock is held, etc., the swap entry may become invalid because of swapoff. Then, we need to enclose all swap related functions with get_swap_device() and put_swap_device(), unless the swap functions call get/put_swap_device() by themselves. Based on the above rule, all get/put_swap_device() usage are checked and cleaned up if necessary. This patch (of 5): get/put_swap_device() are added to __swap_count() in commit eb085574a752 ("mm, swap: fix race between swapoff and some swap operations"). Later, in commit 2799e77529c2 ("swap: fix do_swap_page() race with swapoff"), get/put_swap_device() are added to do_swap_page(). And they enclose the only call site of __swap_count(). So, it's safe to remove get/put_swap_device() in __swap_count() now. Link: https://lkml.kernel.org/r/20230529061355.125791-1-ying.huang@intel.com Link: https://lkml.kernel.org/r/20230529061355.125791-2-ying.huang@intel.com Signed-off-by: "Huang, Ying" Reviewed-by: Yosry Ahmed Reviewed-by: David Hildenbrand Reviewed-by: Chris Li (Google) Cc: Hugh Dickins Cc: Johannes Weiner Cc: Matthew Wilcox Cc: Michal Hocko Cc: Minchan Kim Cc: Tim Chen Cc: Yang Shi Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/swapfile.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index c74259001d5e..cf8b16b6a98e 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1433,16 +1433,10 @@ void swapcache_free_entries(swp_entry_t *entries, int n) int __swap_count(swp_entry_t entry) { - struct swap_info_struct *si; + struct swap_info_struct *si = swp_swap_info(entry); pgoff_t offset = swp_offset(entry); - int count = 0; - si = get_swap_device(entry); - if (si) { - count = swap_count(si->swap_map[offset]); - put_swap_device(si); - } - return count; + return swap_count(si->swap_map[offset]); } /* -- cgit v1.2.3 From 46a774d3eae523fd9a1907ba90e9a116cd1c5ddc Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Mon, 29 May 2023 14:13:52 +0800 Subject: swap, __read_swap_cache_async(): enlarge get/put_swap_device protection range This makes the function a little easier to be understood because we don't need to consider swapoff. And this makes it possible to remove get/put_swap_device() calling in some functions called by __read_swap_cache_async(). Link: https://lkml.kernel.org/r/20230529061355.125791-3-ying.huang@intel.com Signed-off-by: "Huang, Ying" Reviewed-by: David Hildenbrand Reviewed-by: Chris Li (Google) Cc: Hugh Dickins Cc: Johannes Weiner Cc: Matthew Wilcox Cc: Michal Hocko Cc: Minchan Kim Cc: Tim Chen Cc: Yang Shi Cc: Yu Zhao Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/swap_state.c | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/swap_state.c b/mm/swap_state.c index b76a65ac28b3..a8450b4a110c 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -417,9 +417,13 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, { struct swap_info_struct *si; struct folio *folio; + struct page *page; void *shadow = NULL; *new_page_allocated = false; + si = get_swap_device(entry); + if (!si) + return NULL; for (;;) { int err; @@ -428,14 +432,12 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * called after swap_cache_get_folio() failed, re-calling * that would confuse statistics. */ - si = get_swap_device(entry); - if (!si) - return NULL; folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry)); - put_swap_device(si); - if (!IS_ERR(folio)) - return folio_file_page(folio, swp_offset(entry)); + if (!IS_ERR(folio)) { + page = folio_file_page(folio, swp_offset(entry)); + goto got_page; + } /* * Just skip read ahead for unused swap slot. @@ -446,7 +448,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * else swap_off will be aborted if we return NULL. */ if (!__swp_swapcount(entry) && swap_slot_cache_enabled) - return NULL; + goto fail_put_swap; /* * Get a new page to read into from swap. Allocate it now, @@ -455,7 +457,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, */ folio = vma_alloc_folio(gfp_mask, 0, vma, addr, false); if (!folio) - return NULL; + goto fail_put_swap; /* * Swap entry may have been freed since our caller observed it. @@ -466,7 +468,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, folio_put(folio); if (err != -EEXIST) - return NULL; + goto fail_put_swap; /* * We might race against __delete_from_swap_cache(), and @@ -500,12 +502,17 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, /* Caller will initiate read into locked folio */ folio_add_lru(folio); *new_page_allocated = true; - return &folio->page; + page = &folio->page; +got_page: + put_swap_device(si); + return page; fail_unlock: put_swap_folio(folio, entry); folio_unlock(folio); folio_put(folio); +fail_put_swap: + put_swap_device(si); return NULL; } @@ -514,6 +521,10 @@ fail_unlock: * and reading the disk if it is not already cached. * A failure return means that either the page allocation failed or that * the swap entry is no longer in use. + * + * get/put_swap_device() aren't needed to call this function, because + * __read_swap_cache_async() call them and swap_readpage() holds the + * swap cache folio lock. */ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, -- cgit v1.2.3 From 3ecdeb0f876e91c4a7129ba2ba5baa530aa6c4f9 Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Mon, 29 May 2023 14:13:53 +0800 Subject: swap: remove __swp_swapcount() __swp_swapcount() just encloses the calling to swap_swapcount() with get/put_swap_device(). It is called in __read_swap_cache_async() only, which encloses the calling with get/put_swap_device() already. So, __read_swap_cache_async() can call swap_swapcount() directly. Link: https://lkml.kernel.org/r/20230529061355.125791-4-ying.huang@intel.com Signed-off-by: "Huang, Ying" Reviewed-by: David Hildenbrand Reviewed-by: Chris Li (Google) Cc: Hugh Dickins Cc: Johannes Weiner Cc: Matthew Wilcox Cc: Michal Hocko Cc: Minchan Kim Cc: Tim Chen Cc: Yang Shi Cc: Yu Zhao Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- include/linux/swap.h | 4 ++-- mm/swap_state.c | 2 +- mm/swapfile.c | 20 +------------------- 3 files changed, 4 insertions(+), 22 deletions(-) (limited to 'mm') diff --git a/include/linux/swap.h b/include/linux/swap.h index b2128df5edea..2ddbfd85f6c7 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -513,7 +513,7 @@ int find_first_swap(dev_t *device); extern unsigned int count_swap_pages(int, int); extern sector_t swapdev_block(int, pgoff_t); extern int __swap_count(swp_entry_t entry); -extern int __swp_swapcount(swp_entry_t entry); +extern int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry); extern int swp_swapcount(swp_entry_t entry); extern struct swap_info_struct *page_swap_info(struct page *); extern struct swap_info_struct *swp_swap_info(swp_entry_t entry); @@ -591,7 +591,7 @@ static inline int __swap_count(swp_entry_t entry) return 0; } -static inline int __swp_swapcount(swp_entry_t entry) +static inline int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) { return 0; } diff --git a/mm/swap_state.c b/mm/swap_state.c index a8450b4a110c..ef32353c18a6 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -447,7 +447,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * as SWAP_HAS_CACHE. That's done in later part of code or * else swap_off will be aborted if we return NULL. */ - if (!__swp_swapcount(entry) && swap_slot_cache_enabled) + if (!swap_swapcount(si, entry) && swap_slot_cache_enabled) goto fail_put_swap; /* diff --git a/mm/swapfile.c b/mm/swapfile.c index cf8b16b6a98e..2d264efe90d2 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1444,7 +1444,7 @@ int __swap_count(swp_entry_t entry) * This does not give an exact answer when swap count is continued, * but does include the high COUNT_CONTINUED flag to allow for that. */ -static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) +int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) { pgoff_t offset = swp_offset(entry); struct swap_cluster_info *ci; @@ -1456,24 +1456,6 @@ static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) return count; } -/* - * How many references to @entry are currently swapped out? - * This does not give an exact answer when swap count is continued, - * but does include the high COUNT_CONTINUED flag to allow for that. - */ -int __swp_swapcount(swp_entry_t entry) -{ - int count = 0; - struct swap_info_struct *si; - - si = get_swap_device(entry); - if (si) { - count = swap_swapcount(si, entry); - put_swap_device(si); - } - return count; -} - /* * How many references to @entry are currently swapped out? * This considers COUNT_CONTINUED so it returns exact answer. -- cgit v1.2.3 From c07aee4f82af3c466509782b15658837fe53babc Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Mon, 29 May 2023 14:13:54 +0800 Subject: swap: remove get/put_swap_device() in __swap_duplicate() __swap_duplicate() is called by - swap_shmem_alloc(): the folio in swap cache is locked. - copy_nonpresent_pte() -> swap_duplicate() and try_to_unmap_one() -> swap_duplicate(): the page table lock is held. - __read_swap_cache_async() -> swapcache_prepare(): enclosed with get/put_swap_device() in __read_swap_cache_async() already. So, it's safe to remove get/put_swap_device() in __swap_duplicate(). Link: https://lkml.kernel.org/r/20230529061355.125791-5-ying.huang@intel.com Signed-off-by: "Huang, Ying" Reviewed-by: Yosry Ahmed Reviewed-by: David Hildenbrand Reviewed-by: Chris Li (Google) Cc: Hugh Dickins Cc: Johannes Weiner Cc: Matthew Wilcox Cc: Michal Hocko Cc: Minchan Kim Cc: Tim Chen Cc: Yang Shi Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/swapfile.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index 2d264efe90d2..bd035677f196 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3265,9 +3265,7 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage) unsigned char has_cache; int err; - p = get_swap_device(entry); - if (!p) - return -EINVAL; + p = swp_swap_info(entry); offset = swp_offset(entry); ci = lock_cluster_or_swap_info(p, offset); @@ -3314,7 +3312,6 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage) unlock_out: unlock_cluster_or_swap_info(p, ci); - put_swap_device(p); return err; } -- cgit v1.2.3 From a95722a047724ef75567381976a36f0e44230bd9 Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Mon, 29 May 2023 14:13:55 +0800 Subject: swap: comments get_swap_device() with usage rule The general rule to use a swap entry is as follows. When we get a swap entry, if there aren't some other ways to prevent swapoff, such as the folio in swap cache is locked, page table lock is held, etc., the swap entry may become invalid because of swapoff. Then, we need to enclose all swap related functions with get_swap_device() and put_swap_device(), unless the swap functions call get/put_swap_device() by themselves. Add the rule as comments of get_swap_device(). Link: https://lkml.kernel.org/r/20230529061355.125791-6-ying.huang@intel.com Signed-off-by: "Huang, Ying" Reviewed-by: David Hildenbrand Reviewed-by: Yosry Ahmed Reviewed-by: Chris Li (Google) Cc: Hugh Dickins Cc: Johannes Weiner Cc: Matthew Wilcox Cc: Michal Hocko Cc: Minchan Kim Cc: Tim Chen Cc: Yang Shi Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/swapfile.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index bd035677f196..df312534e239 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1220,6 +1220,13 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p, } /* + * When we get a swap entry, if there aren't some other ways to + * prevent swapoff, such as the folio in swap cache is locked, page + * table lock is held, etc., the swap entry may become invalid because + * of swapoff. Then, we need to enclose all swap related functions + * with get_swap_device() and put_swap_device(), unless the swap + * functions call get/put_swap_device() by themselves. + * * Check whether swap entry is valid in the swap device. If so, * return pointer to swap_info_struct, and keep the swap entry valid * via preventing the swap device from being swapoff, until @@ -1228,9 +1235,8 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p, * Notice that swapoff or swapoff+swapon can still happen before the * percpu_ref_tryget_live() in get_swap_device() or after the * percpu_ref_put() in put_swap_device() if there isn't any other way - * to prevent swapoff, such as page lock, page table lock, etc. The - * caller must be prepared for that. For example, the following - * situation is possible. + * to prevent swapoff. The caller must be prepared for that. For + * example, the following situation is possible. * * CPU1 CPU2 * do_swap_page() -- cgit v1.2.3 From deedad80f660af8199ea3b3f70939f2d226b9154 Mon Sep 17 00:00:00 2001 From: Yin Fengwei Date: Sat, 29 Apr 2023 16:27:58 +0800 Subject: THP: avoid lock when check whether THP is in deferred list free_transhuge_page() acquires split queue lock then check whether the THP was added to deferred list or not. It brings high deferred queue lock contention. It's safe to check whether the THP is in deferred list or not without holding the deferred queue lock in free_transhuge_page() because when code hit free_transhuge_page(), there is no one tries to add the folio to _deferred_list. Running page_fault1 of will-it-scale + order 2 folio for anonymous mapping with 96 processes on an Ice Lake 48C/96T test box, we could see the 61% split_queue_lock contention: - 63.02% 0.01% page_fault1_pro [kernel.kallsyms] [k] free_transhuge_page - 63.01% free_transhuge_page + 62.91% _raw_spin_lock_irqsave With this patch applied, the split_queue_lock contention is less than 1%. Link: https://lkml.kernel.org/r/20230429082759.1600796-2-fengwei.yin@intel.com Signed-off-by: Yin Fengwei Acked-by: Kirill A. Shutemov Reviewed-by: "Huang, Ying" Cc: Matthew Wilcox Cc: Ryan Roberts Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/huge_memory.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 624671aaa60d..9ccdb3fe3244 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2792,12 +2792,19 @@ void free_transhuge_page(struct page *page) struct deferred_split *ds_queue = get_deferred_split_queue(folio); unsigned long flags; - spin_lock_irqsave(&ds_queue->split_queue_lock, flags); - if (!list_empty(&folio->_deferred_list)) { - ds_queue->split_queue_len--; - list_del(&folio->_deferred_list); + /* + * At this point, there is no one trying to add the folio to + * deferred_list. If folio is not in deferred_list, it's safe + * to check without acquiring the split_queue_lock. + */ + if (data_race(!list_empty(&folio->_deferred_list))) { + spin_lock_irqsave(&ds_queue->split_queue_lock, flags); + if (!list_empty(&folio->_deferred_list)) { + ds_queue->split_queue_len--; + list_del(&folio->_deferred_list); + } + spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); } - spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); free_compound_page(page); } -- cgit v1.2.3 From 837c2ba56d6fd1ecf7a1c5aa0cdc872f3b74185b Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Sun, 28 May 2023 04:57:20 +0000 Subject: mm/mm_init.c: remove free_area_init_memoryless_node() free_area_init_memoryless_node() is just a wrapper of free_area_init_node(), remove it to clean up. Link: https://lkml.kernel.org/r/20230528045720.4835-1-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Acked-by: Michal Hocko Cc: Mike Rapoport Signed-off-by: Andrew Morton --- mm/mm_init.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/mm_init.c b/mm/mm_init.c index 19652c1b3426..3ddd18a89b66 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1754,11 +1754,6 @@ void __init setup_nr_node_ids(void) } #endif -static void __init free_area_init_memoryless_node(int nid) -{ - free_area_init_node(nid); -} - /* * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For * such cases we allow max_zone_pfn sorted in the descending order @@ -1869,7 +1864,7 @@ void __init free_area_init(unsigned long *max_zone_pfn) panic("Cannot allocate %zuB for node %d.\n", sizeof(*pgdat), nid); arch_refresh_nodedata(nid, pgdat); - free_area_init_memoryless_node(nid); + free_area_init_node(nid); /* * We do not want to confuse userspace by sysfs -- cgit v1.2.3 From 3b11edf1f2398cac206a224308de6628ebeea924 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sun, 14 May 2023 09:28:56 +0900 Subject: mm/page_alloc: don't wake kswapd from rmqueue() unless __GFP_KSWAPD_RECLAIM is specified Commit 73444bc4d8f9 ("mm, page_alloc: do not wake kswapd with zone lock held") moved wakeup_kswapd() from steal_suitable_fallback() to rmqueue() using ZONE_BOOSTED_WATERMARK flag. Only allocation contexts that include ALLOC_KSWAPD (which corresponds to __GFP_KSWAPD_RECLAIM) should wake kswapd, for callers are supposed to remove __GFP_KSWAPD_RECLAIM if trying to hold pgdat->kswapd_wait has a risk of deadlock. But since zone->flags is a shared variable, a thread doing !__GFP_KSWAPD_RECLAIM allocation request might observe this flag being set immediately after another thread doing __GFP_KSWAPD_RECLAIM allocation request set this flag, causing possibility of deadlock. Link: https://lkml.kernel.org/r/c3c3dacf-dd3b-77c9-f96a-d0982b4b2a4f@I-love.SAKURA.ne.jp Fixes: 73444bc4d8f9 ("mm, page_alloc: do not wake kswapd with zone lock held") Signed-off-by: Tetsuo Handa Acked-by: Mel Gorman Cc: "Huang, Ying" Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/page_alloc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7e8673504a3d..091a6cb44265 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2827,7 +2827,8 @@ struct page *rmqueue(struct zone *preferred_zone, out: /* Separate test+clear to avoid unnecessary atomics */ - if (unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { + if ((alloc_flags & ALLOC_KSWAPD) && + unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); wakeup_kswapd(zone, 0, 0, zone_idx(zone)); } -- cgit v1.2.3 From 16618670276a77480e274117992cec5e42ce66a9 Mon Sep 17 00:00:00 2001 From: Xin Hao Date: Wed, 31 May 2023 17:58:17 +0800 Subject: mm: khugepaged: avoid pointless allocation for "struct mm_slot" In __khugepaged_enter(), if "mm->flags" with MMF_VM_HUGEPAGE bit is set, the "mm_slot" will be released and return, so we can call mm_slot_alloc() after test_and_set_bit(). Link: https://lkml.kernel.org/r/20230531095817.11012-1-xhao@linux.alibaba.com Signed-off-by: Xin Hao Reviewed-by: Andrew Morton Signed-off-by: Andrew Morton --- mm/khugepaged.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 6b9d39d65b73..3649ba12a235 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -422,19 +422,17 @@ void __khugepaged_enter(struct mm_struct *mm) struct mm_slot *slot; int wakeup; + /* __khugepaged_exit() must not run from under us */ + VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm); + if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) + return; + mm_slot = mm_slot_alloc(mm_slot_cache); if (!mm_slot) return; slot = &mm_slot->slot; - /* __khugepaged_exit() must not run from under us */ - VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm); - if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { - mm_slot_free(mm_slot_cache, mm_slot); - return; - } - spin_lock(&khugepaged_mm_lock); mm_slot_insert(mm_slots_hash, mm, slot); /* -- cgit v1.2.3 From e3d9b45fb17cfddb1c414b5981743d4245fcf486 Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Thu, 1 Jun 2023 06:35:35 +0000 Subject: mm/mm_init.c: move set_pageblock_order() to free_area_init() pageblock_order only needs to be set once, there is no need to initialize it in every zone/node. Link: https://lkml.kernel.org/r/20230601063536.26882-1-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Reviewed-by: Mike Rapoport (IBM) Cc: Michal Hocko Signed-off-by: Andrew Morton --- mm/mm_init.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/mm_init.c b/mm/mm_init.c index 3ddd18a89b66..015355dfdc0b 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1585,7 +1585,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat) if (!size) continue; - set_pageblock_order(); setup_usemap(zone); init_currently_empty_zone(zone, zone->zone_start_pfn, size); } @@ -1852,6 +1851,8 @@ void __init free_area_init(unsigned long *max_zone_pfn) /* Initialise every node */ mminit_verify_pageflags_layout(); setup_nr_node_ids(); + set_pageblock_order(); + for_each_node(nid) { pg_data_t *pgdat; -- cgit v1.2.3 From e0228d590beb0d0af345c58a282f01afac5c57f3 Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Fri, 26 May 2023 20:32:27 +0200 Subject: mm: zswap: shrink until can accept This update addresses an issue with the zswap reclaim mechanism, which hinders the efficient offloading of cold pages to disk, thereby compromising the preservation of the LRU order and consequently diminishing, if not inverting, its performance benefits. The functioning of the zswap shrink worker was found to be inadequate, as shown by basic benchmark test. For the test, a kernel build was utilized as a reference, with its memory confined to 1G via a cgroup and a 5G swap file provided. The results are presented below, these are averages of three runs without the use of zswap: real 46m26s user 35m4s sys 7m37s With zswap (zbud) enabled and max_pool_percent set to 1 (in a 32G system), the results changed to: real 56m4s user 35m13s sys 8m43s written_back_pages: 18 reject_reclaim_fail: 0 pool_limit_hit:1478 Besides the evident regression, one thing to notice from this data is the extremely low number of written_back_pages and pool_limit_hit. The pool_limit_hit counter, which is increased in zswap_frontswap_store when zswap is completely full, doesn't account for a particular scenario: once zswap hits his limit, zswap_pool_reached_full is set to true; with this flag on, zswap_frontswap_store rejects pages if zswap is still above the acceptance threshold. Once we include the rejections due to zswap_pool_reached_full && !zswap_can_accept(), the number goes from 1478 to a significant 21578266. Zswap is stuck in an undesirable state where it rejects pages because it's above the acceptance threshold, yet fails to attempt memory reclaimation. This happens because the shrink work is only queued when zswap_frontswap_store detects that it's full and the work itself only reclaims one page per run. This state results in hot pages getting written directly to disk, while cold ones remain memory, waiting only to be invalidated. The LRU order is completely broken and zswap ends up being just an overhead without providing any benefits. This commit applies 2 changes: a) the shrink worker is set to reclaim pages until the acceptance threshold is met and b) the task is also enqueued when zswap is not full but still above the threshold. Testing this suggested update showed much better numbers: real 36m37s user 35m8s sys 9m32s written_back_pages: 10459423 reject_reclaim_fail: 12896 pool_limit_hit: 75653 Link: https://lkml.kernel.org/r/20230526183227.793977-1-cerasuolodomenico@gmail.com Fixes: 45190f01dd40 ("mm/zswap.c: add allocation hysteresis if pool limit is hit") Signed-off-by: Domenico Cerasuolo Acked-by: Johannes Weiner Reviewed-by: Yosry Ahmed Reviewed-by: Vitaly Wool Cc: Dan Streetman Cc: Seth Jennings Signed-off-by: Andrew Morton --- mm/zswap.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/zswap.c b/mm/zswap.c index 59da2a415fbb..bcb82e09eb64 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -37,6 +37,7 @@ #include #include "swap.h" +#include "internal.h" /********************************* * statistics @@ -587,9 +588,19 @@ static void shrink_worker(struct work_struct *w) { struct zswap_pool *pool = container_of(w, typeof(*pool), shrink_work); + int ret, failures = 0; - if (zpool_shrink(pool->zpool, 1, NULL)) - zswap_reject_reclaim_fail++; + do { + ret = zpool_shrink(pool->zpool, 1, NULL); + if (ret) { + zswap_reject_reclaim_fail++; + if (ret != -EAGAIN) + break; + if (++failures == MAX_RECLAIM_RETRIES) + break; + } + cond_resched(); + } while (!zswap_can_accept()); zswap_pool_put(pool); } @@ -1188,7 +1199,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, if (zswap_pool_reached_full) { if (!zswap_can_accept()) { ret = -ENOMEM; - goto reject; + goto shrink; } else zswap_pool_reached_full = false; } -- cgit v1.2.3 From 0d625446d0a451a683a357799912b9e688629707 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Jun 2023 16:58:53 +0200 Subject: backing_dev: remove current->backing_dev_info Patch series "cleanup the filemap / direct I/O interaction", v4. This series cleans up some of the generic write helper calling conventions and the page cache writeback / invalidation for direct I/O. This is a spinoff from the no-bufferhead kernel project, for which we'll want to an use iomap based buffered write path in the block layer. This patch (of 12): The last user of current->backing_dev_info disappeared in commit b9b1335e6403 ("remove bdi_congested() and wb_congested() and related functions"). Remove the field and all assignments to it. Link: https://lkml.kernel.org/r/20230601145904.1385409-1-hch@lst.de Link: https://lkml.kernel.org/r/20230601145904.1385409-2-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: Christian Brauner Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Reviewed-by: Johannes Thumshirn Reviewed-by: Darrick J. Wong Acked-by: Theodore Ts'o Cc: Al Viro Cc: Andreas Gruenbacher Cc: Anna Schumaker Cc: Chao Yu Cc: Ilya Dryomov Cc: Jaegeuk Kim Cc: Jens Axboe Cc: Matthew Wilcox Cc: Miklos Szeredi Cc: Miklos Szeredi Cc: Trond Myklebust Cc: Xiubo Li Signed-off-by: Andrew Morton --- fs/btrfs/file.c | 6 +----- fs/ceph/file.c | 4 ---- fs/ext4/file.c | 2 -- fs/f2fs/file.c | 2 -- fs/fuse/file.c | 4 ---- fs/gfs2/file.c | 2 -- fs/nfs/file.c | 5 +---- fs/ntfs/file.c | 2 -- fs/ntfs3/file.c | 3 --- fs/xfs/xfs_file.c | 4 ---- include/linux/sched.h | 3 --- mm/filemap.c | 3 --- 12 files changed, 2 insertions(+), 38 deletions(-) (limited to 'mm') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index f649647392e0..ecd43ab66fa6 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1145,7 +1145,6 @@ static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from, !(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC))) return -EAGAIN; - current->backing_dev_info = inode_to_bdi(inode); ret = file_remove_privs(file); if (ret) return ret; @@ -1165,10 +1164,8 @@ static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from, loff_t end_pos = round_up(pos + count, fs_info->sectorsize); ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, end_pos); - if (ret) { - current->backing_dev_info = NULL; + if (ret) return ret; - } } return 0; @@ -1689,7 +1686,6 @@ ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from, if (sync) atomic_dec(&inode->sync_writers); - current->backing_dev_info = NULL; return num_written; } diff --git a/fs/ceph/file.c b/fs/ceph/file.c index f4d8bf7dec88..c8ef72f723ba 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -1791,9 +1791,6 @@ retry_snap: else ceph_start_io_write(inode); - /* We can write back this queue in page reclaim */ - current->backing_dev_info = inode_to_bdi(inode); - if (iocb->ki_flags & IOCB_APPEND) { err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false); if (err < 0) @@ -1940,7 +1937,6 @@ out: ceph_end_io_write(inode); out_unlocked: ceph_free_cap_flush(prealloc_cf); - current->backing_dev_info = NULL; return written ? written : err; } diff --git a/fs/ext4/file.c b/fs/ext4/file.c index d101b3b0c7da..bc430270c23c 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -285,9 +285,7 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb, if (ret <= 0) goto out; - current->backing_dev_info = inode_to_bdi(inode); ret = generic_perform_write(iocb, from); - current->backing_dev_info = NULL; out: inode_unlock(inode); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 5ac53d2627d2..4f423d367a44 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -4517,9 +4517,7 @@ static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb, if (iocb->ki_flags & IOCB_NOWAIT) return -EOPNOTSUPP; - current->backing_dev_info = inode_to_bdi(inode); ret = generic_perform_write(iocb, from); - current->backing_dev_info = NULL; if (ret > 0) { iocb->ki_pos += ret; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 89d97f6188e0..97d435874b14 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1362,9 +1362,6 @@ static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) writethrough: inode_lock(inode); - /* We can write back this queue in page reclaim */ - current->backing_dev_info = inode_to_bdi(inode); - err = generic_write_checks(iocb, from); if (err <= 0) goto out; @@ -1409,7 +1406,6 @@ writethrough: iocb->ki_pos += written; } out: - current->backing_dev_info = NULL; inode_unlock(inode); if (written > 0) written = generic_write_sync(iocb, written); diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 300844f50dcd..904a0d6ac1a1 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -1041,11 +1041,9 @@ retry: goto out_unlock; } - current->backing_dev_info = inode_to_bdi(inode); pagefault_disable(); ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops); pagefault_enable(); - current->backing_dev_info = NULL; if (ret > 0) { iocb->ki_pos += ret; written += ret; diff --git a/fs/nfs/file.c b/fs/nfs/file.c index f0edf5a36237..665ce3fc62ea 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -648,11 +648,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) since = filemap_sample_wb_err(file->f_mapping); nfs_start_io_write(inode); result = generic_write_checks(iocb, from); - if (result > 0) { - current->backing_dev_info = inode_to_bdi(inode); + if (result > 0) result = generic_perform_write(iocb, from); - current->backing_dev_info = NULL; - } nfs_end_io_write(inode); if (result <= 0) goto out; diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index c481b14e4fd9..e296f804a9c4 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -1911,11 +1911,9 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) inode_lock(vi); /* We can write back this queue in page reclaim. */ - current->backing_dev_info = inode_to_bdi(vi); err = ntfs_prepare_file_for_write(iocb, from); if (iov_iter_count(from) && !err) written = ntfs_perform_write(file, from, iocb->ki_pos); - current->backing_dev_info = NULL; inode_unlock(vi); iocb->ki_pos += written; if (likely(written > 0)) diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c index 9a3d55c367d9..86d16a2c8339 100644 --- a/fs/ntfs3/file.c +++ b/fs/ntfs3/file.c @@ -820,7 +820,6 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) if (!pages) return -ENOMEM; - current->backing_dev_info = inode_to_bdi(inode); err = file_remove_privs(file); if (err) goto out; @@ -993,8 +992,6 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) out: kfree(pages); - current->backing_dev_info = NULL; - if (err < 0) return err; diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index aede746541f8..431c3fd0e2b5 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -717,9 +717,6 @@ write_retry: if (ret) goto out; - /* We can write back this queue in page reclaim */ - current->backing_dev_info = inode_to_bdi(inode); - trace_xfs_file_buffered_write(iocb, from); ret = iomap_file_buffered_write(iocb, from, &xfs_buffered_write_iomap_ops); @@ -753,7 +750,6 @@ write_retry: goto write_retry; } - current->backing_dev_info = NULL; out: if (iolock) xfs_iunlock(ip, iolock); diff --git a/include/linux/sched.h b/include/linux/sched.h index eed5d65b8d1f..54780571fe9a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -41,7 +41,6 @@ /* task_struct member predeclarations (sorted alphabetically): */ struct audit_context; -struct backing_dev_info; struct bio_list; struct blk_plug; struct bpf_local_storage; @@ -1186,8 +1185,6 @@ struct task_struct { /* VM state: */ struct reclaim_state *reclaim_state; - struct backing_dev_info *backing_dev_info; - struct io_context *io_context; #ifdef CONFIG_COMPACTION diff --git a/mm/filemap.c b/mm/filemap.c index 570bc8c3db87..0d371ed91a68 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3964,8 +3964,6 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) ssize_t err; ssize_t status; - /* We can write back this queue in page reclaim */ - current->backing_dev_info = inode_to_bdi(inode); err = file_remove_privs(file); if (err) goto out; @@ -4026,7 +4024,6 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) iocb->ki_pos += written; } out: - current->backing_dev_info = NULL; return written ? written : err; } EXPORT_SYMBOL(__generic_file_write_iter); -- cgit v1.2.3 From 182c25e9c157f37bd0ab5a82fe2417e2223df459 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Jun 2023 16:58:55 +0200 Subject: filemap: update ki_pos in generic_perform_write All callers of generic_perform_write need to updated ki_pos, move it into common code. Link: https://lkml.kernel.org/r/20230601145904.1385409-4-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: Xiubo Li Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Acked-by: Theodore Ts'o Acked-by: Darrick J. Wong Cc: Al Viro Cc: Andreas Gruenbacher Cc: Anna Schumaker Cc: Chao Yu Cc: Christian Brauner Cc: Ilya Dryomov Cc: Jaegeuk Kim Cc: Jens Axboe Cc: Johannes Thumshirn Cc: Matthew Wilcox Cc: Miklos Szeredi Cc: Miklos Szeredi Cc: Trond Myklebust Signed-off-by: Andrew Morton --- fs/ceph/file.c | 2 -- fs/ext4/file.c | 9 +++------ fs/f2fs/file.c | 1 - fs/nfs/file.c | 1 - mm/filemap.c | 8 ++++---- 5 files changed, 7 insertions(+), 14 deletions(-) (limited to 'mm') diff --git a/fs/ceph/file.c b/fs/ceph/file.c index c8ef72f723ba..767f4dfe7def 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -1891,8 +1891,6 @@ retry_snap: * can not run at the same time */ written = generic_perform_write(iocb, from); - if (likely(written >= 0)) - iocb->ki_pos = pos + written; ceph_end_io_write(inode); } diff --git a/fs/ext4/file.c b/fs/ext4/file.c index bc430270c23c..ea0ada3985cb 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -289,12 +289,9 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb, out: inode_unlock(inode); - if (likely(ret > 0)) { - iocb->ki_pos += ret; - ret = generic_write_sync(iocb, ret); - } - - return ret; + if (unlikely(ret <= 0)) + return ret; + return generic_write_sync(iocb, ret); } static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset, diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 4f423d367a44..7134fe8bd008 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -4520,7 +4520,6 @@ static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb, ret = generic_perform_write(iocb, from); if (ret > 0) { - iocb->ki_pos += ret; f2fs_update_iostat(F2FS_I_SB(inode), inode, APP_BUFFERED_IO, ret); } diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 665ce3fc62ea..e8bb4c48a321 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -655,7 +655,6 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) goto out; written = result; - iocb->ki_pos += written; nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written); if (mntflags & NFS_MOUNT_WRITE_EAGER) { diff --git a/mm/filemap.c b/mm/filemap.c index 0d371ed91a68..3a80a69fa9fa 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3930,7 +3930,10 @@ again: balance_dirty_pages_ratelimited(mapping); } while (iov_iter_count(i)); - return written ? written : status; + if (!written) + return status; + iocb->ki_pos += written; + return written; } EXPORT_SYMBOL(generic_perform_write); @@ -4007,7 +4010,6 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) endbyte = pos + status - 1; err = filemap_write_and_wait_range(mapping, pos, endbyte); if (err == 0) { - iocb->ki_pos = endbyte + 1; written += status; invalidate_mapping_pages(mapping, pos >> PAGE_SHIFT, @@ -4020,8 +4022,6 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) } } else { written = generic_perform_write(iocb, from); - if (likely(written > 0)) - iocb->ki_pos += written; } out: return written ? written : err; -- cgit v1.2.3 From 3c435a0fe35c220bec442dffff04a64aacf952b0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Jun 2023 16:58:56 +0200 Subject: filemap: add a kiocb_write_and_wait helper Factor out a helper that does filemap_write_and_wait_range for the range covered by a read kiocb, or returns -EAGAIN if the kiocb is marked as nowait and there would be pages to write. Link: https://lkml.kernel.org/r/20230601145904.1385409-5-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Acked-by: Darrick J. Wong Cc: Al Viro Cc: Andreas Gruenbacher Cc: Anna Schumaker Cc: Chao Yu Cc: Christian Brauner Cc: Ilya Dryomov Cc: Jaegeuk Kim Cc: Jens Axboe Cc: Johannes Thumshirn Cc: Matthew Wilcox Cc: Miklos Szeredi Cc: Miklos Szeredi Cc: Theodore Ts'o Cc: Trond Myklebust Cc: Xiubo Li Signed-off-by: Andrew Morton --- block/fops.c | 18 +++--------------- include/linux/pagemap.h | 2 ++ mm/filemap.c | 30 ++++++++++++++++++------------ 3 files changed, 23 insertions(+), 27 deletions(-) (limited to 'mm') diff --git a/block/fops.c b/block/fops.c index 58d0aebc7313..575171049c5d 100644 --- a/block/fops.c +++ b/block/fops.c @@ -576,21 +576,9 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) goto reexpand; /* skip atime */ if (iocb->ki_flags & IOCB_DIRECT) { - struct address_space *mapping = iocb->ki_filp->f_mapping; - - if (iocb->ki_flags & IOCB_NOWAIT) { - if (filemap_range_needs_writeback(mapping, pos, - pos + count - 1)) { - ret = -EAGAIN; - goto reexpand; - } - } else { - ret = filemap_write_and_wait_range(mapping, pos, - pos + count - 1); - if (ret < 0) - goto reexpand; - } - + ret = kiocb_write_and_wait(iocb, count); + if (ret < 0) + goto reexpand; file_accessed(iocb->ki_filp); ret = blkdev_direct_IO(iocb, to); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index c1ae5ebc375f..b6a12ca108b7 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -30,6 +30,7 @@ static inline void invalidate_remote_inode(struct inode *inode) int invalidate_inode_pages2(struct address_space *mapping); int invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end); + int write_inode_now(struct inode *, int sync); int filemap_fdatawrite(struct address_space *); int filemap_flush(struct address_space *); @@ -54,6 +55,7 @@ int filemap_check_errors(struct address_space *mapping); void __filemap_set_wb_err(struct address_space *mapping, int err); int filemap_fdatawrite_wbc(struct address_space *mapping, struct writeback_control *wbc); +int kiocb_write_and_wait(struct kiocb *iocb, size_t count); static inline int filemap_write_and_wait(struct address_space *mapping) { diff --git a/mm/filemap.c b/mm/filemap.c index 3a80a69fa9fa..5566e10ca1a7 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2735,6 +2735,21 @@ put_folios: } EXPORT_SYMBOL_GPL(filemap_read); +int kiocb_write_and_wait(struct kiocb *iocb, size_t count) +{ + struct address_space *mapping = iocb->ki_filp->f_mapping; + loff_t pos = iocb->ki_pos; + loff_t end = pos + count - 1; + + if (iocb->ki_flags & IOCB_NOWAIT) { + if (filemap_range_needs_writeback(mapping, pos, end)) + return -EAGAIN; + return 0; + } + + return filemap_write_and_wait_range(mapping, pos, end); +} + /** * generic_file_read_iter - generic filesystem read routine * @iocb: kernel I/O control block @@ -2770,18 +2785,9 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; - if (iocb->ki_flags & IOCB_NOWAIT) { - if (filemap_range_needs_writeback(mapping, iocb->ki_pos, - iocb->ki_pos + count - 1)) - return -EAGAIN; - } else { - retval = filemap_write_and_wait_range(mapping, - iocb->ki_pos, - iocb->ki_pos + count - 1); - if (retval < 0) - return retval; - } - + retval = kiocb_write_and_wait(iocb, count); + if (retval < 0) + return retval; file_accessed(file); retval = mapping->a_ops->direct_IO(iocb, iter); -- cgit v1.2.3 From e003f74afbd2feadbb9ffbf9135e2d2fb5d320a5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Jun 2023 16:58:57 +0200 Subject: filemap: add a kiocb_invalidate_pages helper Factor out a helper that calls filemap_write_and_wait_range and invalidate_inode_pages2_range for the range covered by a write kiocb or returns -EAGAIN if the kiocb is marked as nowait and there would be pages to write or invalidate. Link: https://lkml.kernel.org/r/20230601145904.1385409-6-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Acked-by: Darrick J. Wong Cc: Al Viro Cc: Andreas Gruenbacher Cc: Anna Schumaker Cc: Chao Yu Cc: Christian Brauner Cc: Ilya Dryomov Cc: Jaegeuk Kim Cc: Jens Axboe Cc: Johannes Thumshirn Cc: Matthew Wilcox Cc: Miklos Szeredi Cc: Miklos Szeredi Cc: Theodore Ts'o Cc: Trond Myklebust Cc: Xiubo Li Signed-off-by: Andrew Morton --- include/linux/pagemap.h | 1 + mm/filemap.c | 48 ++++++++++++++++++++++++++++-------------------- 2 files changed, 29 insertions(+), 20 deletions(-) (limited to 'mm') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index b6a12ca108b7..7b66a67dba51 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -30,6 +30,7 @@ static inline void invalidate_remote_inode(struct inode *inode) int invalidate_inode_pages2(struct address_space *mapping); int invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end); +int kiocb_invalidate_pages(struct kiocb *iocb, size_t count); int write_inode_now(struct inode *, int sync); int filemap_fdatawrite(struct address_space *); diff --git a/mm/filemap.c b/mm/filemap.c index 5566e10ca1a7..6ba6233c4bbb 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2750,6 +2750,33 @@ int kiocb_write_and_wait(struct kiocb *iocb, size_t count) return filemap_write_and_wait_range(mapping, pos, end); } +int kiocb_invalidate_pages(struct kiocb *iocb, size_t count) +{ + struct address_space *mapping = iocb->ki_filp->f_mapping; + loff_t pos = iocb->ki_pos; + loff_t end = pos + count - 1; + int ret; + + if (iocb->ki_flags & IOCB_NOWAIT) { + /* we could block if there are any pages in the range */ + if (filemap_range_has_page(mapping, pos, end)) + return -EAGAIN; + } else { + ret = filemap_write_and_wait_range(mapping, pos, end); + if (ret) + return ret; + } + + /* + * After a write we want buffered reads to be sure to go to disk to get + * the new data. We invalidate clean cached page from the region we're + * about to write. We do this *before* the write so that we can return + * without clobbering -EIOCBQUEUED from ->direct_IO(). + */ + return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, + end >> PAGE_SHIFT); +} + /** * generic_file_read_iter - generic filesystem read routine * @iocb: kernel I/O control block @@ -3793,30 +3820,11 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) write_len = iov_iter_count(from); end = (pos + write_len - 1) >> PAGE_SHIFT; - if (iocb->ki_flags & IOCB_NOWAIT) { - /* If there are pages to writeback, return */ - if (filemap_range_has_page(file->f_mapping, pos, - pos + write_len - 1)) - return -EAGAIN; - } else { - written = filemap_write_and_wait_range(mapping, pos, - pos + write_len - 1); - if (written) - goto out; - } - - /* - * After a write we want buffered reads to be sure to go to disk to get - * the new data. We invalidate clean cached page from the region we're - * about to write. We do this *before* the write so that we can return - * without clobbering -EIOCBQUEUED from ->direct_IO(). - */ - written = invalidate_inode_pages2_range(mapping, - pos >> PAGE_SHIFT, end); /* * If a page can not be invalidated, return 0 to fall back * to buffered write. */ + written = kiocb_invalidate_pages(iocb, write_len); if (written) { if (written == -EBUSY) return 0; -- cgit v1.2.3 From c402a9a9430b670926decbb284b756ee6f47c1ec Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Jun 2023 16:58:58 +0200 Subject: filemap: add a kiocb_invalidate_post_direct_write helper Add a helper to invalidate page cache after a dio write. Link: https://lkml.kernel.org/r/20230601145904.1385409-7-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Acked-by: Darrick J. Wong Cc: Al Viro Cc: Andreas Gruenbacher Cc: Anna Schumaker Cc: Chao Yu Cc: Christian Brauner Cc: Ilya Dryomov Cc: Jaegeuk Kim Cc: Jens Axboe Cc: Johannes Thumshirn Cc: Matthew Wilcox Cc: Miklos Szeredi Cc: Miklos Szeredi Cc: Theodore Ts'o Cc: Trond Myklebust Cc: Xiubo Li Signed-off-by: Andrew Morton --- fs/direct-io.c | 10 ++-------- fs/iomap/direct-io.c | 12 ++---------- include/linux/fs.h | 5 ----- include/linux/pagemap.h | 1 + mm/filemap.c | 37 ++++++++++++++++++++----------------- 5 files changed, 25 insertions(+), 40 deletions(-) (limited to 'mm') diff --git a/fs/direct-io.c b/fs/direct-io.c index 0b380bb8a81e..4f9069aee0fe 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -285,14 +285,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags) * zeros from unwritten extents. */ if (flags & DIO_COMPLETE_INVALIDATE && - ret > 0 && dio_op == REQ_OP_WRITE && - dio->inode->i_mapping->nrpages) { - err = invalidate_inode_pages2_range(dio->inode->i_mapping, - offset >> PAGE_SHIFT, - (offset + ret - 1) >> PAGE_SHIFT); - if (err) - dio_warn_stale_pagecache(dio->iocb->ki_filp); - } + ret > 0 && dio_op == REQ_OP_WRITE) + kiocb_invalidate_post_direct_write(dio->iocb, ret); inode_dio_end(dio->inode); diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index 6207a59d2162..0795c54a745b 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -81,7 +81,6 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio) { const struct iomap_dio_ops *dops = dio->dops; struct kiocb *iocb = dio->iocb; - struct inode *inode = file_inode(iocb->ki_filp); loff_t offset = iocb->ki_pos; ssize_t ret = dio->error; @@ -108,15 +107,8 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio) * ->end_io() when necessary, otherwise a racing buffer read would cache * zeros from unwritten extents. */ - if (!dio->error && dio->size && - (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) { - int err; - err = invalidate_inode_pages2_range(inode->i_mapping, - offset >> PAGE_SHIFT, - (offset + dio->size - 1) >> PAGE_SHIFT); - if (err) - dio_warn_stale_pagecache(iocb->ki_filp); - } + if (!dio->error && dio->size && (dio->flags & IOMAP_DIO_WRITE)) + kiocb_invalidate_post_direct_write(iocb, dio->size); inode_dio_end(file_inode(iocb->ki_filp)); diff --git a/include/linux/fs.h b/include/linux/fs.h index 86b50271b4f7..4f196f827d9d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2843,11 +2843,6 @@ static inline void inode_dio_end(struct inode *inode) wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); } -/* - * Warn about a page cache invalidation failure diring a direct I/O write. - */ -void dio_warn_stale_pagecache(struct file *filp); - extern void inode_set_flags(struct inode *inode, unsigned int flags, unsigned int mask); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 7b66a67dba51..716953ee1ebd 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -31,6 +31,7 @@ int invalidate_inode_pages2(struct address_space *mapping); int invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end); int kiocb_invalidate_pages(struct kiocb *iocb, size_t count); +void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count); int write_inode_now(struct inode *, int sync); int filemap_fdatawrite(struct address_space *); diff --git a/mm/filemap.c b/mm/filemap.c index 6ba6233c4bbb..b45506f74133 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3789,7 +3789,7 @@ EXPORT_SYMBOL(read_cache_page_gfp); /* * Warn about a page cache invalidation failure during a direct I/O write. */ -void dio_warn_stale_pagecache(struct file *filp) +static void dio_warn_stale_pagecache(struct file *filp) { static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST); char pathname[128]; @@ -3806,19 +3806,23 @@ void dio_warn_stale_pagecache(struct file *filp) } } +void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count) +{ + struct address_space *mapping = iocb->ki_filp->f_mapping; + + if (mapping->nrpages && + invalidate_inode_pages2_range(mapping, + iocb->ki_pos >> PAGE_SHIFT, + (iocb->ki_pos + count - 1) >> PAGE_SHIFT)) + dio_warn_stale_pagecache(iocb->ki_filp); +} + ssize_t generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) { - struct file *file = iocb->ki_filp; - struct address_space *mapping = file->f_mapping; - struct inode *inode = mapping->host; - loff_t pos = iocb->ki_pos; - ssize_t written; - size_t write_len; - pgoff_t end; - - write_len = iov_iter_count(from); - end = (pos + write_len - 1) >> PAGE_SHIFT; + struct address_space *mapping = iocb->ki_filp->f_mapping; + size_t write_len = iov_iter_count(from); + ssize_t written; /* * If a page can not be invalidated, return 0 to fall back @@ -3828,7 +3832,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) if (written) { if (written == -EBUSY) return 0; - goto out; + return written; } written = mapping->a_ops->direct_IO(iocb, from); @@ -3850,11 +3854,11 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) * * Skip invalidation for async writes or if mapping has no pages. */ - if (written > 0 && mapping->nrpages && - invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end)) - dio_warn_stale_pagecache(file); - if (written > 0) { + struct inode *inode = mapping->host; + loff_t pos = iocb->ki_pos; + + kiocb_invalidate_post_direct_write(iocb, written); pos += written; write_len -= written; if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { @@ -3865,7 +3869,6 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) } if (written != -EIOCBQUEUED) iov_iter_revert(from, write_len - iov_iter_count(from)); -out: return written; } EXPORT_SYMBOL(generic_file_direct_write); -- cgit v1.2.3 From 44fff0fa08ec5a6d9d5fb05443a36d854d0ece4d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Jun 2023 16:59:01 +0200 Subject: fs: factor out a direct_write_fallback helper Add a helper dealing with handling the syncing of a buffered write fallback for direct I/O. Link: https://lkml.kernel.org/r/20230601145904.1385409-10-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Miklos Szeredi Reviewed-by: Darrick J. Wong Cc: Al Viro Cc: Andreas Gruenbacher Cc: Anna Schumaker Cc: Chao Yu Cc: Christian Brauner Cc: Hannes Reinecke Cc: Ilya Dryomov Cc: Jaegeuk Kim Cc: Jens Axboe Cc: Johannes Thumshirn Cc: Matthew Wilcox Cc: Miklos Szeredi Cc: Theodore Ts'o Cc: Trond Myklebust Cc: Xiubo Li Signed-off-by: Andrew Morton --- fs/libfs.c | 41 +++++++++++++++++++++++++++++++++ include/linux/fs.h | 2 ++ mm/filemap.c | 66 +++++++++++++----------------------------------------- 3 files changed, 58 insertions(+), 51 deletions(-) (limited to 'mm') diff --git a/fs/libfs.c b/fs/libfs.c index 89cf614a3271..5b851315eeed 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -1613,3 +1613,44 @@ u64 inode_query_iversion(struct inode *inode) return cur >> I_VERSION_QUERIED_SHIFT; } EXPORT_SYMBOL(inode_query_iversion); + +ssize_t direct_write_fallback(struct kiocb *iocb, struct iov_iter *iter, + ssize_t direct_written, ssize_t buffered_written) +{ + struct address_space *mapping = iocb->ki_filp->f_mapping; + loff_t pos = iocb->ki_pos - buffered_written; + loff_t end = iocb->ki_pos - 1; + int err; + + /* + * If the buffered write fallback returned an error, we want to return + * the number of bytes which were written by direct I/O, or the error + * code if that was zero. + * + * Note that this differs from normal direct-io semantics, which will + * return -EFOO even if some bytes were written. + */ + if (unlikely(buffered_written < 0)) { + if (direct_written) + return direct_written; + return buffered_written; + } + + /* + * We need to ensure that the page cache pages are written to disk and + * invalidated to preserve the expected O_DIRECT semantics. + */ + err = filemap_write_and_wait_range(mapping, pos, end); + if (err < 0) { + /* + * We don't know how much we wrote, so just return the number of + * bytes which were direct-written + */ + if (direct_written) + return direct_written; + return err; + } + invalidate_mapping_pages(mapping, pos >> PAGE_SHIFT, end >> PAGE_SHIFT); + return direct_written + buffered_written; +} +EXPORT_SYMBOL_GPL(direct_write_fallback); diff --git a/include/linux/fs.h b/include/linux/fs.h index 4f196f827d9d..c363f8687c7e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2744,6 +2744,8 @@ extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *); ssize_t generic_perform_write(struct kiocb *, struct iov_iter *); +ssize_t direct_write_fallback(struct kiocb *iocb, struct iov_iter *iter, + ssize_t direct_written, ssize_t buffered_written); ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos, rwf_t flags); diff --git a/mm/filemap.c b/mm/filemap.c index b45506f74133..916b7c6444fe 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3979,23 +3979,19 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; - struct inode *inode = mapping->host; - ssize_t written = 0; - ssize_t err; - ssize_t status; + struct inode *inode = mapping->host; + ssize_t ret; - err = file_remove_privs(file); - if (err) - goto out; + ret = file_remove_privs(file); + if (ret) + return ret; - err = file_update_time(file); - if (err) - goto out; + ret = file_update_time(file); + if (ret) + return ret; if (iocb->ki_flags & IOCB_DIRECT) { - loff_t pos, endbyte; - - written = generic_file_direct_write(iocb, from); + ret = generic_file_direct_write(iocb, from); /* * If the write stopped short of completing, fall back to * buffered writes. Some filesystems do this for writes to @@ -4003,45 +3999,13 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) * not succeed (even if it did, DAX does not handle dirty * page-cache pages correctly). */ - if (written < 0 || !iov_iter_count(from) || IS_DAX(inode)) - goto out; - - pos = iocb->ki_pos; - status = generic_perform_write(iocb, from); - /* - * If generic_perform_write() returned a synchronous error - * then we want to return the number of bytes which were - * direct-written, or the error code if that was zero. Note - * that this differs from normal direct-io semantics, which - * will return -EFOO even if some bytes were written. - */ - if (unlikely(status < 0)) { - err = status; - goto out; - } - /* - * We need to ensure that the page cache pages are written to - * disk and invalidated to preserve the expected O_DIRECT - * semantics. - */ - endbyte = pos + status - 1; - err = filemap_write_and_wait_range(mapping, pos, endbyte); - if (err == 0) { - written += status; - invalidate_mapping_pages(mapping, - pos >> PAGE_SHIFT, - endbyte >> PAGE_SHIFT); - } else { - /* - * We don't know how much we wrote, so just return - * the number of bytes which were direct-written - */ - } - } else { - written = generic_perform_write(iocb, from); + if (ret < 0 || !iov_iter_count(from) || IS_DAX(inode)) + return ret; + return direct_write_fallback(iocb, from, ret, + generic_perform_write(iocb, from)); } -out: - return written ? written : err; + + return generic_perform_write(iocb, from); } EXPORT_SYMBOL(__generic_file_write_iter); -- cgit v1.2.3 From 501b26510ae3bbdf9333b83addcd4e5c4214346d Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Tue, 30 May 2023 11:52:35 -0300 Subject: vmstat: allow_direct_reclaim should use zone_page_state_snapshot A customer provided evidence indicating that a process was stalled in direct reclaim: - The process was trapped in throttle_direct_reclaim(). The function wait_event_killable() was called to wait condition allow_direct_reclaim(pgdat) for current node to be true. The allow_direct_reclaim(pgdat) examined the number of free pages on the node by zone_page_state() which just returns value in zone->vm_stat[NR_FREE_PAGES]. - On node #1, zone->vm_stat[NR_FREE_PAGES] was 0. However, the freelist on this node was not empty. - This inconsistent of vmstat value was caused by percpu vmstat on nohz_full cpus. Every increment/decrement of vmstat is performed on percpu vmstat counter at first, then pooled diffs are cumulated to the zone's vmstat counter in timely manner. However, on nohz_full cpus (in case of this customer's system, 48 of 52 cpus) these pooled diffs were not cumulated once the cpu had no event on it so that the cpu started sleeping infinitely. I checked percpu vmstat and found there were total 69 counts not cumulated to the zone's vmstat counter yet. - In this situation, kswapd did not help the trapped process. In pgdat_balanced(), zone_wakermark_ok_safe() examined the number of free pages on the node by zone_page_state_snapshot() which checks pending counts on percpu vmstat. Therefore kswapd could know there were 69 free pages correctly. Since zone->_watermark = {8, 20, 32}, kswapd did not work because 69 was greater than 32 as high watermark. Change allow_direct_reclaim to use zone_page_state_snapshot, which allows a more precise version of the vmstat counters to be used. allow_direct_reclaim will only be called from try_to_free_pages, which is not a hot path. Testing: Due to difficulties accessing the system, it has not been possible for the reproducer to test the patch (however its clear from available data and analysis that it should fix it). Link: https://lkml.kernel.org/r/20230530145335.677325196@redhat.com Reviewed-by: Michal Hocko Reviewed-by: Aaron Tomlin Signed-off-by: Marcelo Tosatti Cc: Christoph Lameter Cc: Frederic Weisbecker Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/vmscan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 3024b4619b65..5993c0bae162 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -6898,7 +6898,7 @@ static bool allow_direct_reclaim(pg_data_t *pgdat) continue; pfmemalloc_reserve += min_wmark_pages(zone); - free_pages += zone_page_state(zone, NR_FREE_PAGES); + free_pages += zone_page_state_snapshot(zone, NR_FREE_PAGES); } /* If there are no reserves (unexpected config) then do not throttle */ -- cgit v1.2.3 From b3f78e74986546a6da5d28c24e627d95d17f79ec Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Fri, 2 Jun 2023 10:29:46 +0100 Subject: mm: vmalloc must set pte via arch code Patch series "Fixes for pte encapsulation bypasses", v3. A series to improve the encapsulation of pte entries by disallowing non-arch code from directly dereferencing pte_t pointers. This patch (of 4): It is bad practice to directly set pte entries within a pte table. Instead all modifications must go through arch-provided helpers such as set_pte_at() to give the arch code visibility and allow it to check (and potentially modify) the operation. Link: https://lkml.kernel.org/r/20230602092949.545577-1-ryan.roberts@arm.com Link: https://lkml.kernel.org/r/20230602092949.545577-2-ryan.roberts@arm.com Fixes: 3e9a9e256b1e ("mm: add a vmap_pfn function") Signed-off-by: Ryan Roberts Reviewed-by: Zi Yan Acked-by: Lorenzo Stoakes Reviewed-by: Christoph Hellwig Reviewed-by: Uladzislau Rezki (Sony) Reviewed-by: Mike Rapoport (IBM) Cc: Kirill A. Shutemov Cc: Matthew Wilcox (Oracle) Cc: SeongJae Park Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/vmalloc.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 7c32435219b1..9d64a4098c36 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2944,10 +2944,16 @@ struct vmap_pfn_data { static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private) { struct vmap_pfn_data *data = private; + unsigned long pfn = data->pfns[data->idx]; + pte_t ptent; - if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx]))) + if (WARN_ON_ONCE(pfn_valid(pfn))) return -EINVAL; - *pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot)); + + ptent = pte_mkspecial(pfn_pte(pfn, data->prot)); + set_pte_at(&init_mm, addr, pte, ptent); + + data->idx++; return 0; } -- cgit v1.2.3 From c11d34fa139e4b0fb4249a30f37b178353533fa1 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Fri, 2 Jun 2023 10:29:47 +0100 Subject: mm/damon/ops-common: atomically test and clear young on ptes and pmds It is racy to non-atomically read a pte, then clear the young bit, then write it back as this could discard dirty information. Further, it is bad practice to directly set a pte entry within a table. Instead clearing young must go through the arch-provided helper, ptep_test_and_clear_young() to ensure it is modified atomically and to give the arch code visibility and allow it to check (and potentially modify) the operation. Link: https://lkml.kernel.org/r/20230602092949.545577-3-ryan.roberts@arm.com Fixes: 3f49584b262c ("mm/damon: implement primitives for the virtual memory address spaces"). Signed-off-by: Ryan Roberts Reviewed-by: Zi Yan Reviewed-by: SeongJae Park Reviewed-by: Mike Rapoport (IBM) Cc: Christoph Hellwig Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Uladzislau Rezki (Sony) Cc: Yu Zhao Cc: Signed-off-by: Andrew Morton --- mm/damon/ops-common.c | 16 ++++++---------- mm/damon/ops-common.h | 4 ++-- mm/damon/paddr.c | 4 ++-- mm/damon/vaddr.c | 4 ++-- 4 files changed, 12 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c index cc63cf953636..acc264b97903 100644 --- a/mm/damon/ops-common.c +++ b/mm/damon/ops-common.c @@ -37,7 +37,7 @@ struct folio *damon_get_folio(unsigned long pfn) return folio; } -void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr) +void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr) { bool referenced = false; struct folio *folio = damon_get_folio(pte_pfn(*pte)); @@ -45,13 +45,11 @@ void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr) if (!folio) return; - if (pte_young(*pte)) { + if (ptep_test_and_clear_young(vma, addr, pte)) referenced = true; - *pte = pte_mkold(*pte); - } #ifdef CONFIG_MMU_NOTIFIER - if (mmu_notifier_clear_young(mm, addr, addr + PAGE_SIZE)) + if (mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE)) referenced = true; #endif /* CONFIG_MMU_NOTIFIER */ @@ -62,7 +60,7 @@ void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr) folio_put(folio); } -void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr) +void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE bool referenced = false; @@ -71,13 +69,11 @@ void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr) if (!folio) return; - if (pmd_young(*pmd)) { + if (pmdp_test_and_clear_young(vma, addr, pmd)) referenced = true; - *pmd = pmd_mkold(*pmd); - } #ifdef CONFIG_MMU_NOTIFIER - if (mmu_notifier_clear_young(mm, addr, addr + HPAGE_PMD_SIZE)) + if (mmu_notifier_clear_young(vma->vm_mm, addr, addr + HPAGE_PMD_SIZE)) referenced = true; #endif /* CONFIG_MMU_NOTIFIER */ diff --git a/mm/damon/ops-common.h b/mm/damon/ops-common.h index 14f4bc69f29b..18d837d11bce 100644 --- a/mm/damon/ops-common.h +++ b/mm/damon/ops-common.h @@ -9,8 +9,8 @@ struct folio *damon_get_folio(unsigned long pfn); -void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr); -void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr); +void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr); +void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr); int damon_cold_score(struct damon_ctx *c, struct damon_region *r, struct damos *s); diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 467b99166b43..5b3a3463d078 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -24,9 +24,9 @@ static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma, while (page_vma_mapped_walk(&pvmw)) { addr = pvmw.address; if (pvmw.pte) - damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr); + damon_ptep_mkold(pvmw.pte, vma, addr); else - damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr); + damon_pmdp_mkold(pvmw.pmd, vma, addr); } return true; } diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 1fec16d7263e..37994fb6120c 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -311,7 +311,7 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, } if (pmd_trans_huge(*pmd)) { - damon_pmdp_mkold(pmd, walk->mm, addr); + damon_pmdp_mkold(pmd, walk->vma, addr); spin_unlock(ptl); return 0; } @@ -323,7 +323,7 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); if (!pte_present(*pte)) goto out; - damon_ptep_mkold(pte, walk->mm, addr); + damon_ptep_mkold(pte, walk->vma, addr); out: pte_unmap_unlock(pte, ptl); return 0; -- cgit v1.2.3 From fa8c919dac3f5f325b17f9fcf8ac7dd899992598 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Fri, 2 Jun 2023 10:29:48 +0100 Subject: mm/damon/ops-common: refactor to use {pte|pmd}p_clear_young_notify() With the fix in place to atomically test and clear young on ptes and pmds, simplify the code to handle the clearing for both the primary mmu and the mmu notifier with a single API call. Link: https://lkml.kernel.org/r/20230602092949.545577-4-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: Yu Zhao Reviewed-by: SeongJae Park Cc: Christoph Hellwig Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (IBM) Cc: Uladzislau Rezki (Sony) Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/damon/ops-common.c | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) (limited to 'mm') diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c index acc264b97903..d4ab81229136 100644 --- a/mm/damon/ops-common.c +++ b/mm/damon/ops-common.c @@ -39,21 +39,12 @@ struct folio *damon_get_folio(unsigned long pfn) void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr) { - bool referenced = false; struct folio *folio = damon_get_folio(pte_pfn(*pte)); if (!folio) return; - if (ptep_test_and_clear_young(vma, addr, pte)) - referenced = true; - -#ifdef CONFIG_MMU_NOTIFIER - if (mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE)) - referenced = true; -#endif /* CONFIG_MMU_NOTIFIER */ - - if (referenced) + if (ptep_clear_young_notify(vma, addr, pte)) folio_set_young(folio); folio_set_idle(folio); @@ -63,21 +54,12 @@ void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - bool referenced = false; struct folio *folio = damon_get_folio(pmd_pfn(*pmd)); if (!folio) return; - if (pmdp_test_and_clear_young(vma, addr, pmd)) - referenced = true; - -#ifdef CONFIG_MMU_NOTIFIER - if (mmu_notifier_clear_young(vma->vm_mm, addr, addr + HPAGE_PMD_SIZE)) - referenced = true; -#endif /* CONFIG_MMU_NOTIFIER */ - - if (referenced) + if (pmdp_clear_young_notify(vma, addr, pmd)) folio_set_young(folio); folio_set_idle(folio); -- cgit v1.2.3 From 3b65f437d9e8dd696a2b88e7afcd51385532ab35 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Fri, 2 Jun 2023 10:29:49 +0100 Subject: mm: fix failure to unmap pte on highmem systems The loser of a race to service a pte for a device private entry in the swap path previously unlocked the ptl, but failed to unmap the pte. This only affects highmem systems since unmapping a pte is a noop on non-highmem systems. Link: https://lkml.kernel.org/r/20230602092949.545577-5-ryan.roberts@arm.com Fixes: 16ce101db85d ("mm/memory.c: fix race when faulting a device private page") Signed-off-by: Ryan Roberts Reviewed-by: Zi Yan Reviewed-by: Mike Rapoport (IBM) Cc: Christoph Hellwig Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: SeongJae Park Cc: Uladzislau Rezki (Sony) Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/memory.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 4dd09f930c61..36082fd42df4 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3728,10 +3728,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) vmf->page = pfn_swap_entry_to_page(entry); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); - if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { - spin_unlock(vmf->ptl); - goto out; - } + if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) + goto unlock; /* * Get a page reference while we know the page can't be -- cgit v1.2.3 From 12dd992accd96c09ec765a3bb4706a3ed24ae5b3 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 3 Jun 2023 19:25:58 +0800 Subject: mm: page_alloc: remove unneeded header files Remove some unneeded header files. No functional change intended. Link: https://lkml.kernel.org/r/20230603112558.213694-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton --- mm/page_alloc.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 091a6cb44265..6a18f2232e3e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -36,8 +35,6 @@ #include #include #include -#include -#include #include #include #include @@ -52,7 +49,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From 396faf88981917f975773d8589ce4f6e6c679e13 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 3 Jun 2023 15:21:16 +0800 Subject: memcg: use helper macro FLUSH_TIME Use helper macro FLUSH_TIME to indicate the flush time to improve the readability a bit. No functional change intended. Link: https://lkml.kernel.org/r/20230603072116.1101690-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Acked-by: Shakeel Butt Reviewed-by: Muchun Song Reviewed-by: David Hildenbrand Acked-by: Roman Gushchin Cc: Johannes Weiner Cc: Michal Hocko Signed-off-by: Andrew Morton --- mm/memcontrol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6ee433be4c3b..caf6ab55f8e3 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5428,7 +5428,7 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css) if (unlikely(mem_cgroup_is_root(memcg))) queue_delayed_work(system_unbound_wq, &stats_flush_dwork, - 2UL*HZ); + FLUSH_TIME); lru_gen_online_memcg(memcg); return 0; offline_kmem: -- cgit v1.2.3 From 5e07472583840308949ce807b11274de15cb79a0 Mon Sep 17 00:00:00 2001 From: Liam Ni Date: Sat, 3 Jun 2023 10:31:16 +0800 Subject: mm/early_ioremap.c: improve the execution efficiency of early_ioremap_setup() Reduce the number of invalid loops of the function early_ioremap_setup() to improve the efficiency of function execution Link: https://lkml.kernel.org/r/CACZJ9cU6t5sLoDwE6_XOg+UJLpZt4+qHfjYN2bA0s+3y9y6pQQ@mail.gmail.com Signed-off-by: LiamNi Signed-off-by: Andrew Morton --- mm/early_ioremap.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c index 9bc12e526ed0..ce06b2884789 100644 --- a/mm/early_ioremap.c +++ b/mm/early_ioremap.c @@ -72,12 +72,10 @@ void __init early_ioremap_setup(void) { int i; - for (i = 0; i < FIX_BTMAPS_SLOTS; i++) - if (WARN_ON(prev_map[i])) - break; - - for (i = 0; i < FIX_BTMAPS_SLOTS; i++) + for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { + WARN_ON_ONCE(prev_map[i]); slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); + } } static int __init check_early_ioremap_leak(void) -- cgit v1.2.3 From 54cbbbf3faf610fb4eba6f8d39d933bcbfc6f4de Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Thu, 4 May 2023 22:27:51 +0100 Subject: mm/mmap: separate writenotify and dirty tracking logic MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "mm/gup: disallow GUP writing to file-backed mappings by default", v9. Writing to file-backed mappings which require folio dirty tracking using GUP is a fundamentally broken operation, as kernel write access to GUP mappings do not adhere to the semantics expected by a file system. A GUP caller uses the direct mapping to access the folio, which does not cause write notify to trigger, nor does it enforce that the caller marks the folio dirty. The problem arises when, after an initial write to the folio, writeback results in the folio being cleaned and then the caller, via the GUP interface, writes to the folio again. As a result of the use of this secondary, direct, mapping to the folio no write notify will occur, and if the caller does mark the folio dirty, this will be done so unexpectedly. For example, consider the following scenario:- 1. A folio is written to via GUP which write-faults the memory, notifying the file system and dirtying the folio. 2. Later, writeback is triggered, resulting in the folio being cleaned and the PTE being marked read-only. 3. The GUP caller writes to the folio, as it is mapped read/write via the direct mapping. 4. The GUP caller, now done with the page, unpins it and sets it dirty (though it does not have to). This change updates both the PUP FOLL_LONGTERM slow and fast APIs. As pin_user_pages_fast_only() does not exist, we can rely on a slightly imperfect whitelisting in the PUP-fast case and fall back to the slow case should this fail. This patch (of 3): vma_wants_writenotify() is specifically intended for setting PTE page table flags, accounting for existing page table flag state and whether the underlying filesystem performs dirty tracking for a file-backed mapping. Everything is predicated firstly on whether the mapping is shared writable, as this is the only instance where dirty tracking is pertinent - MAP_PRIVATE mappings will always be CoW'd and unshared, and read-only file-backed shared mappings cannot be written to, even with FOLL_FORCE. All other checks are in line with existing logic, though now separated into checks eplicitily for dirty tracking and those for determining how to set page table flags. We make this change so we can perform checks in the GUP logic to determine which mappings might be problematic when written to. Link: https://lkml.kernel.org/r/cover.1683235180.git.lstoakes@gmail.com Link: https://lkml.kernel.org/r/0f218370bd49b4e6bbfbb499f7c7b92c26ba1ceb.1683235180.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Reviewed-by: John Hubbard Reviewed-by: Mika Penttilä Reviewed-by: Jan Kara Reviewed-by: Jason Gunthorpe Acked-by: David Hildenbrand Cc: Kirill A . Shutemov Cc: Peter Zijlstra Signed-off-by: Andrew Morton --- include/linux/mm.h | 1 + mm/mmap.c | 58 +++++++++++++++++++++++++++++++++++++++++++----------- 2 files changed, 47 insertions(+), 12 deletions(-) (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index 62bb3272e531..66032f0d515c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2461,6 +2461,7 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma, #define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \ MM_CP_UFFD_WP_RESOLVE) +bool vma_needs_dirty_tracking(struct vm_area_struct *vma); int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma) { diff --git a/mm/mmap.c b/mm/mmap.c index e1624cb2c04e..f084b7940431 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1454,6 +1454,48 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) } #endif /* __ARCH_WANT_SYS_OLD_MMAP */ +static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops) +{ + return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite); +} + +static bool vma_is_shared_writable(struct vm_area_struct *vma) +{ + return (vma->vm_flags & (VM_WRITE | VM_SHARED)) == + (VM_WRITE | VM_SHARED); +} + +static bool vma_fs_can_writeback(struct vm_area_struct *vma) +{ + /* No managed pages to writeback. */ + if (vma->vm_flags & VM_PFNMAP) + return false; + + return vma->vm_file && vma->vm_file->f_mapping && + mapping_can_writeback(vma->vm_file->f_mapping); +} + +/* + * Does this VMA require the underlying folios to have their dirty state + * tracked? + */ +bool vma_needs_dirty_tracking(struct vm_area_struct *vma) +{ + /* Only shared, writable VMAs require dirty tracking. */ + if (!vma_is_shared_writable(vma)) + return false; + + /* Does the filesystem need to be notified? */ + if (vm_ops_needs_writenotify(vma->vm_ops)) + return true; + + /* + * Even if the filesystem doesn't indicate a need for writenotify, if it + * can writeback, dirty tracking is still required. + */ + return vma_fs_can_writeback(vma); +} + /* * Some shared mappings will want the pages marked read-only * to track write events. If so, we'll downgrade vm_page_prot @@ -1462,21 +1504,18 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) */ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) { - vm_flags_t vm_flags = vma->vm_flags; - const struct vm_operations_struct *vm_ops = vma->vm_ops; - /* If it was private or non-writable, the write bit is already clear */ - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) + if (!vma_is_shared_writable(vma)) return 0; /* The backer wishes to know when pages are first written to? */ - if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite)) + if (vm_ops_needs_writenotify(vma->vm_ops)) return 1; /* The open routine did something to the protections that pgprot_modify * won't preserve? */ if (pgprot_val(vm_page_prot) != - pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags))) + pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags))) return 0; /* @@ -1490,13 +1529,8 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) if (userfaultfd_wp(vma)) return 1; - /* Specialty mapping? */ - if (vm_flags & VM_PFNMAP) - return 0; - /* Can the mapping track the dirty pages? */ - return vma->vm_file && vma->vm_file->f_mapping && - mapping_can_writeback(vma->vm_file->f_mapping); + return vma_fs_can_writeback(vma); } /* -- cgit v1.2.3 From 8ac268436e6d52f3bb4d212df9395aec97afaa00 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Thu, 4 May 2023 22:27:52 +0100 Subject: mm/gup: disallow FOLL_LONGTERM GUP-nonfast writing to file-backed mappings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Writing to file-backed mappings which require folio dirty tracking using GUP is a fundamentally broken operation, as kernel write access to GUP mappings do not adhere to the semantics expected by a file system. A GUP caller uses the direct mapping to access the folio, which does not cause write notify to trigger, nor does it enforce that the caller marks the folio dirty. The problem arises when, after an initial write to the folio, writeback results in the folio being cleaned and then the caller, via the GUP interface, writes to the folio again. As a result of the use of this secondary, direct, mapping to the folio no write notify will occur, and if the caller does mark the folio dirty, this will be done so unexpectedly. For example, consider the following scenario:- 1. A folio is written to via GUP which write-faults the memory, notifying the file system and dirtying the folio. 2. Later, writeback is triggered, resulting in the folio being cleaned and the PTE being marked read-only. 3. The GUP caller writes to the folio, as it is mapped read/write via the direct mapping. 4. The GUP caller, now done with the page, unpins it and sets it dirty (though it does not have to). This results in both data being written to a folio without writenotify, and the folio being dirtied unexpectedly (if the caller decides to do so). This issue was first reported by Jan Kara [1] in 2018, where the problem resulted in file system crashes. This is only relevant when the mappings are file-backed and the underlying file system requires folio dirty tracking. File systems which do not, such as shmem or hugetlb, are not at risk and therefore can be written to without issue. Unfortunately this limitation of GUP has been present for some time and requires future rework of the GUP API in order to provide correct write access to such mappings. However, for the time being we introduce this check to prevent the most egregious case of this occurring, use of the FOLL_LONGTERM pin. These mappings are considerably more likely to be written to after folios are cleaned and thus simply must not be permitted to do so. This patch changes only the slow-path GUP functions, a following patch adapts the GUP-fast path along similar lines. [1] https://lore.kernel.org/linux-mm/20180103100430.GE4911@quack2.suse.cz/ Link: https://lkml.kernel.org/r/7282506742d2390c125949c2f9894722750bb68a.1683235180.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Suggested-by: Jason Gunthorpe Reviewed-by: John Hubbard Reviewed-by: Mika Penttilä Reviewed-by: Jan Kara Reviewed-by: Jason Gunthorpe Acked-by: David Hildenbrand Cc: Kirill A . Shutemov Cc: Peter Zijlstra Signed-off-by: Andrew Morton --- mm/gup.c | 44 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/gup.c b/mm/gup.c index 8db58305f4eb..67999dd43d83 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -959,16 +959,54 @@ static int faultin_page(struct vm_area_struct *vma, return 0; } +/* + * Writing to file-backed mappings which require folio dirty tracking using GUP + * is a fundamentally broken operation, as kernel write access to GUP mappings + * do not adhere to the semantics expected by a file system. + * + * Consider the following scenario:- + * + * 1. A folio is written to via GUP which write-faults the memory, notifying + * the file system and dirtying the folio. + * 2. Later, writeback is triggered, resulting in the folio being cleaned and + * the PTE being marked read-only. + * 3. The GUP caller writes to the folio, as it is mapped read/write via the + * direct mapping. + * 4. The GUP caller, now done with the page, unpins it and sets it dirty + * (though it does not have to). + * + * This results in both data being written to a folio without writenotify, and + * the folio being dirtied unexpectedly (if the caller decides to do so). + */ +static bool writable_file_mapping_allowed(struct vm_area_struct *vma, + unsigned long gup_flags) +{ + /* + * If we aren't pinning then no problematic write can occur. A long term + * pin is the most egregious case so this is the case we disallow. + */ + if ((gup_flags & (FOLL_PIN | FOLL_LONGTERM)) != + (FOLL_PIN | FOLL_LONGTERM)) + return true; + + /* + * If the VMA does not require dirty tracking then no problematic write + * can occur either. + */ + return !vma_needs_dirty_tracking(vma); +} + static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) { vm_flags_t vm_flags = vma->vm_flags; int write = (gup_flags & FOLL_WRITE); int foreign = (gup_flags & FOLL_REMOTE); + bool vma_anon = vma_is_anonymous(vma); if (vm_flags & (VM_IO | VM_PFNMAP)) return -EFAULT; - if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) + if ((gup_flags & FOLL_ANON) && !vma_anon) return -EFAULT; if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma)) @@ -978,6 +1016,10 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) return -EFAULT; if (write) { + if (!vma_anon && + !writable_file_mapping_allowed(vma, gup_flags)) + return -EFAULT; + if (!(vm_flags & VM_WRITE)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; -- cgit v1.2.3 From a6e79df92e4a371ba7b751a20e3a31fde0f528b8 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Thu, 4 May 2023 22:27:53 +0100 Subject: mm/gup: disallow FOLL_LONGTERM GUP-fast writing to file-backed mappings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Writing to file-backed dirty-tracked mappings via GUP is inherently broken as we cannot rule out folios being cleaned and then a GUP user writing to them again and possibly marking them dirty unexpectedly. This is especially egregious for long-term mappings (as indicated by the use of the FOLL_LONGTERM flag), so we disallow this case in GUP-fast as we have already done in the slow path. We have access to less information in the fast path as we cannot examine the VMA containing the mapping, however we can determine whether the folio is anonymous or belonging to a whitelisted filesystem - specifically hugetlb and shmem mappings. We take special care to ensure that both the folio and mapping are safe to access when performing these checks and document folio_fast_pin_allowed() accordingly. It's important to note that there are no APIs allowing users to specify FOLL_FAST_ONLY for a PUP-fast let alone with FOLL_LONGTERM, so we can always rely on the fact that if we fail to pin on the fast path, the code will fall back to the slow path which can perform the more thorough check. Link: https://lkml.kernel.org/r/a27d39b87ded7f3dad5fd4181edb106393660453.1683235180.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Suggested-by: David Hildenbrand Suggested-by: Kirill A . Shutemov Suggested-by: Peter Zijlstra Reviewed-by: Jan Kara Acked-by: David Hildenbrand Cc: Jason Gunthorpe Cc: John Hubbard Cc: Mika Penttilä Signed-off-by: Andrew Morton --- mm/gup.c | 101 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) (limited to 'mm') diff --git a/mm/gup.c b/mm/gup.c index 67999dd43d83..a718b956edbe 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -2351,6 +2352,82 @@ EXPORT_SYMBOL(get_user_pages_unlocked); */ #ifdef CONFIG_HAVE_FAST_GUP +/* + * Used in the GUP-fast path to determine whether a pin is permitted for a + * specific folio. + * + * This call assumes the caller has pinned the folio, that the lowest page table + * level still points to this folio, and that interrupts have been disabled. + * + * Writing to pinned file-backed dirty tracked folios is inherently problematic + * (see comment describing the writable_file_mapping_allowed() function). We + * therefore try to avoid the most egregious case of a long-term mapping doing + * so. + * + * This function cannot be as thorough as that one as the VMA is not available + * in the fast path, so instead we whitelist known good cases and if in doubt, + * fall back to the slow path. + */ +static bool folio_fast_pin_allowed(struct folio *folio, unsigned int flags) +{ + struct address_space *mapping; + unsigned long mapping_flags; + + /* + * If we aren't pinning then no problematic write can occur. A long term + * pin is the most egregious case so this is the one we disallow. + */ + if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) != + (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) + return true; + + /* The folio is pinned, so we can safely access folio fields. */ + + if (WARN_ON_ONCE(folio_test_slab(folio))) + return false; + + /* hugetlb mappings do not require dirty-tracking. */ + if (folio_test_hugetlb(folio)) + return true; + + /* + * GUP-fast disables IRQs. When IRQS are disabled, RCU grace periods + * cannot proceed, which means no actions performed under RCU can + * proceed either. + * + * inodes and thus their mappings are freed under RCU, which means the + * mapping cannot be freed beneath us and thus we can safely dereference + * it. + */ + lockdep_assert_irqs_disabled(); + + /* + * However, there may be operations which _alter_ the mapping, so ensure + * we read it once and only once. + */ + mapping = READ_ONCE(folio->mapping); + + /* + * The mapping may have been truncated, in any case we cannot determine + * if this mapping is safe - fall back to slow path to determine how to + * proceed. + */ + if (!mapping) + return false; + + /* Anonymous folios pose no problem. */ + mapping_flags = (unsigned long)mapping & PAGE_MAPPING_FLAGS; + if (mapping_flags) + return mapping_flags & PAGE_MAPPING_ANON; + + /* + * At this point, we know the mapping is non-null and points to an + * address_space object. The only remaining whitelisted file system is + * shmem. + */ + return shmem_mapping(mapping); +} + static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start, unsigned int flags, struct page **pages) @@ -2436,6 +2513,11 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, goto pte_unmap; } + if (!folio_fast_pin_allowed(folio, flags)) { + gup_put_folio(folio, 1, flags); + goto pte_unmap; + } + if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) { gup_put_folio(folio, 1, flags); goto pte_unmap; @@ -2628,6 +2710,11 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, return 0; } + if (!folio_fast_pin_allowed(folio, flags)) { + gup_put_folio(folio, refs, flags); + return 0; + } + if (!pte_write(pte) && gup_must_unshare(NULL, flags, &folio->page)) { gup_put_folio(folio, refs, flags); return 0; @@ -2694,6 +2781,10 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, return 0; } + if (!folio_fast_pin_allowed(folio, flags)) { + gup_put_folio(folio, refs, flags); + return 0; + } if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { gup_put_folio(folio, refs, flags); return 0; @@ -2734,6 +2825,11 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, return 0; } + if (!folio_fast_pin_allowed(folio, flags)) { + gup_put_folio(folio, refs, flags); + return 0; + } + if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { gup_put_folio(folio, refs, flags); return 0; @@ -2774,6 +2870,11 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, return 0; } + if (!folio_fast_pin_allowed(folio, flags)) { + gup_put_folio(folio, refs, flags); + return 0; + } + *nr += refs; folio_set_referenced(folio); return 1; -- cgit v1.2.3 From 52bb85d64361e7861cc2d38cfbc85881bc57c0b6 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 17 May 2023 15:10:51 +0200 Subject: mm: sparse: mark populate_section_memmap() static There are two definitions of this function, but the second one lacks the 'static' annotation: mm/sparse.c:704:25: error: no previous prototype for 'populate_section_memmap' [-Werror=missing-prototypes] Link: https://lkml.kernel.org/r/20230517131102.934196-4-arnd@kernel.org Signed-off-by: Arnd Bergmann Cc: Boqun Feng Cc: Catalin Marinas Cc: Christoph Lameter Cc: Dennis Zhou Cc: Eric Paris Cc: Heiko Carstens Cc: Helge Deller Cc: Ingo Molnar Cc: Michael Ellerman Cc: Michal Simek Cc: Palmer Dabbelt Cc: Paul Moore Cc: Pavel Machek Cc: Peter Zijlstra Cc: Rafael J. Wysocki Cc: Russell King Cc: Tejun Heo Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Waiman Long Cc: Will Deacon Signed-off-by: Andrew Morton --- mm/sparse.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/sparse.c b/mm/sparse.c index c2afdb26039e..b8d5d58fe240 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -701,7 +701,7 @@ static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages) return rc; } #else -struct page * __meminit populate_section_memmap(unsigned long pfn, +static struct page * __meminit populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap, struct dev_pagemap *pgmap) { -- cgit v1.2.3 From 2736e8eeb0ccdc71d1f4256c9c9a28f58cc43307 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 8 Jun 2023 13:02:43 +0200 Subject: block: use the holder as indication for exclusive opens The current interface for exclusive opens is rather confusing as it requires both the FMODE_EXCL flag and a holder. Remove the need to pass FMODE_EXCL and just key off the exclusive open off a non-NULL holder. For blkdev_put this requires adding the holder argument, which provides better debug checking that only the holder actually releases the hold, but at the same time allows removing the now superfluous mode argument. Signed-off-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Acked-by: Christian Brauner Acked-by: David Sterba [btrfs] Acked-by: Jack Wang [rnbd] Link: https://lore.kernel.org/r/20230608110258.189493-16-hch@lst.de Signed-off-by: Jens Axboe --- block/bdev.c | 37 ++++++++++++++++++++---------------- block/fops.c | 6 ++++-- block/genhd.c | 5 ++--- block/ioctl.c | 5 ++--- drivers/block/drbd/drbd_nl.c | 23 +++++++++++++--------- drivers/block/pktcdvd.c | 13 ++++++------- drivers/block/rnbd/rnbd-srv.c | 4 ++-- drivers/block/xen-blkback/xenbus.c | 2 +- drivers/block/zram/zram_drv.c | 8 ++++---- drivers/md/bcache/super.c | 15 +++++++-------- drivers/md/dm.c | 6 +++--- drivers/md/md.c | 38 +++++++++++++++++++------------------ drivers/mtd/devices/block2mtd.c | 4 ++-- drivers/nvme/target/io-cmd-bdev.c | 2 +- drivers/s390/block/dasd_genhd.c | 2 +- drivers/target/target_core_iblock.c | 6 +++--- drivers/target/target_core_pscsi.c | 8 +++----- fs/btrfs/dev-replace.c | 6 +++--- fs/btrfs/ioctl.c | 12 ++++++------ fs/btrfs/volumes.c | 28 +++++++++++++-------------- fs/btrfs/volumes.h | 6 +++--- fs/erofs/super.c | 7 ++++--- fs/ext4/super.c | 11 +++-------- fs/f2fs/super.c | 2 +- fs/jfs/jfs_logmgr.c | 6 +++--- fs/nfs/blocklayout/dev.c | 4 ++-- fs/nilfs2/super.c | 6 +++--- fs/ocfs2/cluster/heartbeat.c | 4 ++-- fs/reiserfs/journal.c | 19 +++++++++---------- fs/reiserfs/reiserfs.h | 1 - fs/super.c | 20 +++++++++---------- fs/xfs/xfs_super.c | 15 ++++++++------- include/linux/blkdev.h | 2 +- kernel/power/hibernate.c | 12 ++++-------- kernel/power/power.h | 2 +- kernel/power/swap.c | 21 +++++++++----------- mm/swapfile.c | 7 +++---- 37 files changed, 183 insertions(+), 192 deletions(-) (limited to 'mm') diff --git a/block/bdev.c b/block/bdev.c index 2c6888ceb378..db63e5bcc46f 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -604,7 +604,7 @@ void bd_abort_claiming(struct block_device *bdev, void *holder) } EXPORT_SYMBOL(bd_abort_claiming); -static void bd_end_claim(struct block_device *bdev) +static void bd_end_claim(struct block_device *bdev, void *holder) { struct block_device *whole = bdev_whole(bdev); bool unblock = false; @@ -614,6 +614,7 @@ static void bd_end_claim(struct block_device *bdev) * bdev_lock. open_mutex is used to synchronize disk_holder unlinking. */ mutex_lock(&bdev_lock); + WARN_ON_ONCE(bdev->bd_holder != holder); WARN_ON_ONCE(--bdev->bd_holders < 0); WARN_ON_ONCE(--whole->bd_holders < 0); if (!bdev->bd_holders) { @@ -750,10 +751,9 @@ void blkdev_put_no_open(struct block_device *bdev) * @holder: exclusive holder identifier * @hops: holder operations * - * Open the block device described by device number @dev. If @mode includes - * %FMODE_EXCL, the block device is opened with exclusive access. Specifying - * %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may nest for - * the same @holder. + * Open the block device described by device number @dev. If @holder is not + * %NULL, the block device is opened with exclusive access. Exclusive opens may + * nest for the same @holder. * * Use this interface ONLY if you really do not have anything better - i.e. when * you are behind a truly sucky interface and all you are given is a device @@ -785,10 +785,16 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder, return ERR_PTR(-ENXIO); disk = bdev->bd_disk; - if (mode & FMODE_EXCL) { + if (holder) { + mode |= FMODE_EXCL; ret = bd_prepare_to_claim(bdev, holder, hops); if (ret) goto put_blkdev; + } else { + if (WARN_ON_ONCE(mode & FMODE_EXCL)) { + ret = -EIO; + goto put_blkdev; + } } disk_block_events(disk); @@ -805,7 +811,7 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder, ret = blkdev_get_whole(bdev, mode); if (ret) goto put_module; - if (mode & FMODE_EXCL) { + if (holder) { bd_finish_claiming(bdev, holder, hops); /* @@ -829,7 +835,7 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder, put_module: module_put(disk->fops->owner); abort_claiming: - if (mode & FMODE_EXCL) + if (holder) bd_abort_claiming(bdev, holder); mutex_unlock(&disk->open_mutex); disk_unblock_events(disk); @@ -845,10 +851,9 @@ EXPORT_SYMBOL(blkdev_get_by_dev); * @mode: FMODE_* mask * @holder: exclusive holder identifier * - * Open the block device described by the device file at @path. If @mode - * includes %FMODE_EXCL, the block device is opened with exclusive access. - * Specifying %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may - * nest for the same @holder. + * Open the block device described by the device file at @path. If @holder is + * not %NULL, the block device is opened with exclusive access. Exclusive opens + * may nest for the same @holder. * * CONTEXT: * Might sleep. @@ -869,7 +874,7 @@ struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, bdev = blkdev_get_by_dev(dev, mode, holder, hops); if (!IS_ERR(bdev) && (mode & FMODE_WRITE) && bdev_read_only(bdev)) { - blkdev_put(bdev, mode); + blkdev_put(bdev, holder); return ERR_PTR(-EACCES); } @@ -877,7 +882,7 @@ struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, } EXPORT_SYMBOL(blkdev_get_by_path); -void blkdev_put(struct block_device *bdev, fmode_t mode) +void blkdev_put(struct block_device *bdev, void *holder) { struct gendisk *disk = bdev->bd_disk; @@ -892,8 +897,8 @@ void blkdev_put(struct block_device *bdev, fmode_t mode) sync_blockdev(bdev); mutex_lock(&disk->open_mutex); - if (mode & FMODE_EXCL) - bd_end_claim(bdev); + if (holder) + bd_end_claim(bdev, holder); /* * Trigger event checking and tell drivers to flush MEDIA_CHANGE diff --git a/block/fops.c b/block/fops.c index 26af2b39c758..9f26e25bafa1 100644 --- a/block/fops.c +++ b/block/fops.c @@ -490,7 +490,9 @@ static int blkdev_open(struct inode *inode, struct file *filp) if ((filp->f_flags & O_ACCMODE) == 3) filp->f_mode |= FMODE_WRITE_IOCTL; - bdev = blkdev_get_by_dev(inode->i_rdev, filp->f_mode, filp, NULL); + bdev = blkdev_get_by_dev(inode->i_rdev, filp->f_mode, + (filp->f_mode & FMODE_EXCL) ? filp : NULL, + NULL); if (IS_ERR(bdev)) return PTR_ERR(bdev); @@ -504,7 +506,7 @@ static int blkdev_release(struct inode *inode, struct file *filp) { struct block_device *bdev = filp->private_data; - blkdev_put(bdev, filp->f_mode); + blkdev_put(bdev, (filp->f_mode & FMODE_EXCL) ? filp : NULL); return 0; } diff --git a/block/genhd.c b/block/genhd.c index 4e5fd6aaa883..b56f8b5c88b3 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -365,12 +365,11 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode) } set_bit(GD_NEED_PART_SCAN, &disk->state); - bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~FMODE_EXCL, NULL, - NULL); + bdev = blkdev_get_by_dev(disk_devt(disk), mode, NULL, NULL); if (IS_ERR(bdev)) ret = PTR_ERR(bdev); else - blkdev_put(bdev, mode & ~FMODE_EXCL); + blkdev_put(bdev, NULL); /* * If blkdev_get_by_dev() failed early, GD_NEED_PART_SCAN is still set, diff --git a/block/ioctl.c b/block/ioctl.c index c7d7d4345edb..b39bd5b41ee4 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -454,11 +454,10 @@ static int blkdev_bszset(struct block_device *bdev, fmode_t mode, if (mode & FMODE_EXCL) return set_blocksize(bdev, n); - if (IS_ERR(blkdev_get_by_dev(bdev->bd_dev, mode | FMODE_EXCL, &bdev, - NULL))) + if (IS_ERR(blkdev_get_by_dev(bdev->bd_dev, mode, &bdev, NULL))) return -EBUSY; ret = set_blocksize(bdev, n); - blkdev_put(bdev, mode | FMODE_EXCL); + blkdev_put(bdev, &bdev); return ret; } diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index cab59dab3410..10b1e5171332 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1640,8 +1640,7 @@ static struct block_device *open_backing_dev(struct drbd_device *device, struct block_device *bdev; int err = 0; - bdev = blkdev_get_by_path(bdev_path, - FMODE_READ | FMODE_WRITE | FMODE_EXCL, + bdev = blkdev_get_by_path(bdev_path, FMODE_READ | FMODE_WRITE, claim_ptr, NULL); if (IS_ERR(bdev)) { drbd_err(device, "open(\"%s\") failed with %ld\n", @@ -1654,7 +1653,7 @@ static struct block_device *open_backing_dev(struct drbd_device *device, err = bd_link_disk_holder(bdev, device->vdisk); if (err) { - blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); + blkdev_put(bdev, claim_ptr); drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n", bdev_path, err); bdev = ERR_PTR(err); @@ -1696,13 +1695,13 @@ static int open_backing_devices(struct drbd_device *device, } static void close_backing_dev(struct drbd_device *device, struct block_device *bdev, - bool do_bd_unlink) + void *claim_ptr, bool do_bd_unlink) { if (!bdev) return; if (do_bd_unlink) bd_unlink_disk_holder(bdev, device->vdisk); - blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); + blkdev_put(bdev, claim_ptr); } void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev) @@ -1710,8 +1709,11 @@ void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev * if (ldev == NULL) return; - close_backing_dev(device, ldev->md_bdev, ldev->md_bdev != ldev->backing_bdev); - close_backing_dev(device, ldev->backing_bdev, true); + close_backing_dev(device, ldev->md_bdev, + ldev->md.meta_dev_idx < 0 ? + (void *)device : (void *)drbd_m_holder, + ldev->md_bdev != ldev->backing_bdev); + close_backing_dev(device, ldev->backing_bdev, device, true); kfree(ldev->disk_conf); kfree(ldev); @@ -2127,8 +2129,11 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) fail: conn_reconfig_done(connection); if (nbc) { - close_backing_dev(device, nbc->md_bdev, nbc->md_bdev != nbc->backing_bdev); - close_backing_dev(device, nbc->backing_bdev, true); + close_backing_dev(device, nbc->md_bdev, + nbc->disk_conf->meta_dev_idx < 0 ? + (void *)device : (void *)drbd_m_holder, + nbc->md_bdev != nbc->backing_bdev); + close_backing_dev(device, nbc->backing_bdev, device, true); kfree(nbc); } kfree(new_disk_conf); diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 7bfc058cb665..c3299e49edd5 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2167,8 +2167,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) * to read/write from/to it. It is already opened in O_NONBLOCK mode * so open should not fail. */ - bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd, - NULL); + bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ, pd, NULL); if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); goto out; @@ -2215,7 +2214,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) return 0; out_putdev: - blkdev_put(bdev, FMODE_READ | FMODE_EXCL); + blkdev_put(bdev, pd); out: return ret; } @@ -2234,7 +2233,7 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush) pkt_lock_door(pd, 0); pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); - blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL); + blkdev_put(pd->bdev, pd); pkt_shrink_pktlist(pd); } @@ -2520,7 +2519,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) return PTR_ERR(bdev); sdev = scsi_device_from_queue(bdev->bd_disk->queue); if (!sdev) { - blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); + blkdev_put(bdev, NULL); return -EINVAL; } put_device(&sdev->sdev_gendev); @@ -2545,7 +2544,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) return 0; out_mem: - blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); + blkdev_put(bdev, NULL); /* This is safe: open() is still holding a reference. */ module_put(THIS_MODULE); return -ENOMEM; @@ -2751,7 +2750,7 @@ static int pkt_remove_dev(dev_t pkt_dev) pkt_debugfs_dev_remove(pd); pkt_sysfs_dev_remove(pd); - blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY); + blkdev_put(pd->bdev, NULL); remove_proc_entry(pd->disk->disk_name, pkt_proc); dev_notice(ddev, "writer unmapped\n"); diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c index a92a4289d0ec..a909f8763ce7 100644 --- a/drivers/block/rnbd/rnbd-srv.c +++ b/drivers/block/rnbd/rnbd-srv.c @@ -219,7 +219,7 @@ void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev, bool keep_id) rnbd_put_sess_dev(sess_dev); wait_for_completion(&dc); /* wait for inflights to drop to zero */ - blkdev_put(sess_dev->bdev, sess_dev->open_flags); + blkdev_put(sess_dev->bdev, NULL); mutex_lock(&sess_dev->dev->lock); list_del(&sess_dev->dev_list); if (sess_dev->open_flags & FMODE_WRITE) @@ -791,7 +791,7 @@ srv_dev_put: } rnbd_put_srv_dev(srv_dev); blkdev_put: - blkdev_put(bdev, open_flags); + blkdev_put(bdev, NULL); free_path: kfree(full_path); reject: diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 43b36da9b354..141b60aad570 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -473,7 +473,7 @@ static void xenvbd_sysfs_delif(struct xenbus_device *dev) static void xen_vbd_free(struct xen_vbd *vbd) { if (vbd->bdev) - blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE); + blkdev_put(vbd->bdev, NULL); vbd->bdev = NULL; } diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index f5644c606040..21615d67a9bd 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -420,7 +420,7 @@ static void reset_bdev(struct zram *zram) return; bdev = zram->bdev; - blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); + blkdev_put(bdev, zram); /* hope filp_close flush all of IO */ filp_close(zram->backing_dev, NULL); zram->backing_dev = NULL; @@ -507,8 +507,8 @@ static ssize_t backing_dev_store(struct device *dev, goto out; } - bdev = blkdev_get_by_dev(inode->i_rdev, - FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram, NULL); + bdev = blkdev_get_by_dev(inode->i_rdev, FMODE_READ | FMODE_WRITE, zram, + NULL); if (IS_ERR(bdev)) { err = PTR_ERR(bdev); bdev = NULL; @@ -539,7 +539,7 @@ out: kvfree(bitmap); if (bdev) - blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); + blkdev_put(bdev, zram); if (backing_dev) filp_close(backing_dev, NULL); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 4a2aed047aec..7022fea396f2 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1369,7 +1369,7 @@ static void cached_dev_free(struct closure *cl) put_page(virt_to_page(dc->sb_disk)); if (!IS_ERR_OR_NULL(dc->bdev)) - blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); + blkdev_put(dc->bdev, bcache_kobj); wake_up(&unregister_wait); @@ -2218,7 +2218,7 @@ void bch_cache_release(struct kobject *kobj) put_page(virt_to_page(ca->sb_disk)); if (!IS_ERR_OR_NULL(ca->bdev)) - blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); + blkdev_put(ca->bdev, bcache_kobj); kfree(ca); module_put(THIS_MODULE); @@ -2359,7 +2359,7 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk, * call blkdev_put() to bdev in bch_cache_release(). So we * explicitly call blkdev_put() here. */ - blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); + blkdev_put(bdev, bcache_kobj); if (ret == -ENOMEM) err = "cache_alloc(): -ENOMEM"; else if (ret == -EPERM) @@ -2461,7 +2461,7 @@ static void register_bdev_worker(struct work_struct *work) if (!dc) { fail = true; put_page(virt_to_page(args->sb_disk)); - blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); + blkdev_put(args->bdev, bcache_kobj); goto out; } @@ -2491,7 +2491,7 @@ static void register_cache_worker(struct work_struct *work) if (!ca) { fail = true; put_page(virt_to_page(args->sb_disk)); - blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); + blkdev_put(args->bdev, bcache_kobj); goto out; } @@ -2558,8 +2558,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, ret = -EINVAL; err = "failed to open device"; - bdev = blkdev_get_by_path(strim(path), - FMODE_READ|FMODE_WRITE|FMODE_EXCL, + bdev = blkdev_get_by_path(strim(path), FMODE_READ | FMODE_WRITE, bcache_kobj, NULL); if (IS_ERR(bdev)) { if (bdev == ERR_PTR(-EBUSY)) { @@ -2648,7 +2647,7 @@ async_done: out_put_sb_page: put_page(virt_to_page(sb_disk)); out_blkdev_put: - blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); + blkdev_put(bdev, register_bcache); out_free_sb: kfree(sb); out_free_path: diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 246b8f028a98..b16e37362c5a 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -746,7 +746,7 @@ static struct table_device *open_table_device(struct mapped_device *md, return ERR_PTR(-ENOMEM); refcount_set(&td->count, 1); - bdev = blkdev_get_by_dev(dev, mode | FMODE_EXCL, _dm_claim_ptr, NULL); + bdev = blkdev_get_by_dev(dev, mode, _dm_claim_ptr, NULL); if (IS_ERR(bdev)) { r = PTR_ERR(bdev); goto out_free_td; @@ -771,7 +771,7 @@ static struct table_device *open_table_device(struct mapped_device *md, return td; out_blkdev_put: - blkdev_put(bdev, mode | FMODE_EXCL); + blkdev_put(bdev, _dm_claim_ptr); out_free_td: kfree(td); return ERR_PTR(r); @@ -784,7 +784,7 @@ static void close_table_device(struct table_device *td, struct mapped_device *md { if (md->disk->slave_dir) bd_unlink_disk_holder(td->dm_dev.bdev, md->disk); - blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); + blkdev_put(td->dm_dev.bdev, _dm_claim_ptr); put_dax(td->dm_dev.dax_dev); list_del(&td->list); kfree(td); diff --git a/drivers/md/md.c b/drivers/md/md.c index 159197dd7b6d..dad4a5539f9f 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2449,7 +2449,10 @@ static void rdev_delayed_delete(struct work_struct *ws) void md_autodetect_dev(dev_t dev); -static void export_rdev(struct md_rdev *rdev) +/* just for claiming the bdev */ +static struct md_rdev claim_rdev; + +static void export_rdev(struct md_rdev *rdev, struct mddev *mddev) { pr_debug("md: export_rdev(%pg)\n", rdev->bdev); md_rdev_clear(rdev); @@ -2457,7 +2460,7 @@ static void export_rdev(struct md_rdev *rdev) if (test_bit(AutoDetected, &rdev->flags)) md_autodetect_dev(rdev->bdev->bd_dev); #endif - blkdev_put(rdev->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); + blkdev_put(rdev->bdev, mddev->major_version == -2 ? &claim_rdev : rdev); rdev->bdev = NULL; kobject_put(&rdev->kobj); } @@ -2485,7 +2488,7 @@ static void md_kick_rdev_from_array(struct md_rdev *rdev) INIT_WORK(&rdev->del_work, rdev_delayed_delete); kobject_get(&rdev->kobj); queue_work(md_rdev_misc_wq, &rdev->del_work); - export_rdev(rdev); + export_rdev(rdev, rdev->mddev); } static void export_array(struct mddev *mddev) @@ -3612,6 +3615,7 @@ int md_rdev_init(struct md_rdev *rdev) return badblocks_init(&rdev->badblocks, 0); } EXPORT_SYMBOL_GPL(md_rdev_init); + /* * Import a device. If 'super_format' >= 0, then sanity check the superblock * @@ -3624,7 +3628,6 @@ EXPORT_SYMBOL_GPL(md_rdev_init); */ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) { - static struct md_rdev claim_rdev; /* just for claiming the bdev */ struct md_rdev *rdev; sector_t size; int err; @@ -3640,8 +3643,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe if (err) goto out_clear_rdev; - rdev->bdev = blkdev_get_by_dev(newdev, - FMODE_READ | FMODE_WRITE | FMODE_EXCL, + rdev->bdev = blkdev_get_by_dev(newdev, FMODE_READ | FMODE_WRITE, super_format == -2 ? &claim_rdev : rdev, NULL); if (IS_ERR(rdev->bdev)) { pr_warn("md: could not open device unknown-block(%u,%u).\n", @@ -3679,7 +3681,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe return rdev; out_blkdev_put: - blkdev_put(rdev->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); + blkdev_put(rdev->bdev, super_format == -2 ? &claim_rdev : rdev); out_clear_rdev: md_rdev_clear(rdev); out_free_rdev: @@ -4560,7 +4562,7 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len) err = bind_rdev_to_array(rdev, mddev); out: if (err) - export_rdev(rdev); + export_rdev(rdev, mddev); mddev_unlock(mddev); if (!err) md_new_event(); @@ -6498,7 +6500,7 @@ static void autorun_devices(int part) rdev_for_each_list(rdev, tmp, &candidates) { list_del_init(&rdev->same_set); if (bind_rdev_to_array(rdev, mddev)) - export_rdev(rdev); + export_rdev(rdev, mddev); } autorun_array(mddev); mddev_unlock(mddev); @@ -6508,7 +6510,7 @@ static void autorun_devices(int part) */ rdev_for_each_list(rdev, tmp, &candidates) { list_del_init(&rdev->same_set); - export_rdev(rdev); + export_rdev(rdev, mddev); } mddev_put(mddev); } @@ -6696,13 +6698,13 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) pr_warn("md: %pg has different UUID to %pg\n", rdev->bdev, rdev0->bdev); - export_rdev(rdev); + export_rdev(rdev, mddev); return -EINVAL; } } err = bind_rdev_to_array(rdev, mddev); if (err) - export_rdev(rdev); + export_rdev(rdev, mddev); return err; } @@ -6746,7 +6748,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) /* This was a hot-add request, but events doesn't * match, so reject it. */ - export_rdev(rdev); + export_rdev(rdev, mddev); return -EINVAL; } @@ -6772,7 +6774,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) } } if (has_journal || mddev->bitmap) { - export_rdev(rdev); + export_rdev(rdev, mddev); return -EBUSY; } set_bit(Journal, &rdev->flags); @@ -6787,7 +6789,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) /* --add initiated by this node */ err = md_cluster_ops->add_new_disk(mddev, rdev); if (err) { - export_rdev(rdev); + export_rdev(rdev, mddev); return err; } } @@ -6797,7 +6799,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) err = bind_rdev_to_array(rdev, mddev); if (err) - export_rdev(rdev); + export_rdev(rdev, mddev); if (mddev_is_clustered(mddev)) { if (info->state & (1 << MD_DISK_CANDIDATE)) { @@ -6860,7 +6862,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) err = bind_rdev_to_array(rdev, mddev); if (err) { - export_rdev(rdev); + export_rdev(rdev, mddev); return err; } } @@ -6985,7 +6987,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev) return 0; abort_export: - export_rdev(rdev); + export_rdev(rdev, mddev); return err; } diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c index 218eb2af564a..44fc23af4c3f 100644 --- a/drivers/mtd/devices/block2mtd.c +++ b/drivers/mtd/devices/block2mtd.c @@ -209,7 +209,7 @@ static void block2mtd_free_device(struct block2mtd_dev *dev) if (dev->blkdev) { invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping, 0, -1); - blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); + blkdev_put(dev->blkdev, NULL); } kfree(dev); @@ -261,7 +261,7 @@ static struct block_device __ref *mdtblock_early_get_bdev(const char *devname, static struct block2mtd_dev *add_device(char *devname, int erase_size, char *label, int timeout) { - const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL; + const fmode_t mode = FMODE_READ | FMODE_WRITE; struct block_device *bdev; struct block2mtd_dev *dev; char *name; diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 9b6d6d85c725..65ed2d478fac 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -51,7 +51,7 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) void nvmet_bdev_ns_disable(struct nvmet_ns *ns) { if (ns->bdev) { - blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ); + blkdev_put(ns->bdev, NULL); ns->bdev = NULL; } } diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index f21198bc483e..d2b27b84f854 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -179,7 +179,7 @@ void dasd_destroy_partitions(struct dasd_block *block) mutex_unlock(&bdev->bd_disk->open_mutex); /* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */ - blkdev_put(bdev, FMODE_READ); + blkdev_put(bdev, NULL); } int dasd_gendisk_init(void) diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index a5cbbefa78ee..c62f961f46e3 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -108,7 +108,7 @@ static int iblock_configure_device(struct se_device *dev) pr_debug( "IBLOCK: Claiming struct block_device: %s\n", ib_dev->ibd_udev_path); - mode = FMODE_READ|FMODE_EXCL; + mode = FMODE_READ; if (!ib_dev->ibd_readonly) mode |= FMODE_WRITE; else @@ -175,7 +175,7 @@ static int iblock_configure_device(struct se_device *dev) return 0; out_blkdev_put: - blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); + blkdev_put(ib_dev->ibd_bd, ib_dev); out_free_bioset: bioset_exit(&ib_dev->ibd_bio_set); out: @@ -201,7 +201,7 @@ static void iblock_destroy_device(struct se_device *dev) struct iblock_dev *ib_dev = IBLOCK_DEV(dev); if (ib_dev->ibd_bd != NULL) - blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); + blkdev_put(ib_dev->ibd_bd, ib_dev); bioset_exit(&ib_dev->ibd_bio_set); } diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index e3494e036c6c..da3b5512d7ae 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -366,8 +366,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) * Claim exclusive struct block_device access to struct scsi_device * for TYPE_DISK and TYPE_ZBC using supplied udev_path */ - bd = blkdev_get_by_path(dev->udev_path, - FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv, + bd = blkdev_get_by_path(dev->udev_path, FMODE_WRITE | FMODE_READ, pdv, NULL); if (IS_ERR(bd)) { pr_err("pSCSI: blkdev_get_by_path() failed\n"); @@ -378,7 +377,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) ret = pscsi_add_device_to_list(dev, sd); if (ret) { - blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); + blkdev_put(pdv->pdv_bd, pdv); scsi_device_put(sd); return ret; } @@ -566,8 +565,7 @@ static void pscsi_destroy_device(struct se_device *dev) */ if ((sd->type == TYPE_DISK || sd->type == TYPE_ZBC) && pdv->pdv_bd) { - blkdev_put(pdv->pdv_bd, - FMODE_WRITE|FMODE_READ|FMODE_EXCL); + blkdev_put(pdv->pdv_bd, pdv); pdv->pdv_bd = NULL; } /* diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 4de4984fa99b..677e9d9e1527 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -257,7 +257,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, return -EINVAL; } - bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, + bdev = blkdev_get_by_path(device_path, FMODE_WRITE, fs_info->bdev_holder, NULL); if (IS_ERR(bdev)) { btrfs_err(fs_info, "target device %s is invalid!", device_path); @@ -315,7 +315,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, device->bdev = bdev; set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); set_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); - device->mode = FMODE_EXCL; + device->holder = fs_info->bdev_holder; device->dev_stats_valid = 1; set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); device->fs_devices = fs_devices; @@ -334,7 +334,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, return 0; error: - blkdev_put(bdev, FMODE_EXCL); + blkdev_put(bdev, fs_info->bdev_holder); return ret; } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 2fa36f694daa..d99376a79ef4 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2672,7 +2672,7 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg) struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_ioctl_vol_args_v2 *vol_args; struct block_device *bdev = NULL; - fmode_t mode; + void *holder; int ret; bool cancel = false; @@ -2709,7 +2709,7 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg) goto err_drop; /* Exclusive operation is now claimed */ - ret = btrfs_rm_device(fs_info, &args, &bdev, &mode); + ret = btrfs_rm_device(fs_info, &args, &bdev, &holder); btrfs_exclop_finish(fs_info); @@ -2724,7 +2724,7 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg) err_drop: mnt_drop_write_file(file); if (bdev) - blkdev_put(bdev, mode); + blkdev_put(bdev, holder); out: btrfs_put_dev_args_from_path(&args); kfree(vol_args); @@ -2738,7 +2738,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg) struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_ioctl_vol_args *vol_args; struct block_device *bdev = NULL; - fmode_t mode; + void *holder; int ret; bool cancel = false; @@ -2765,7 +2765,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg) ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE, cancel); if (ret == 0) { - ret = btrfs_rm_device(fs_info, &args, &bdev, &mode); + ret = btrfs_rm_device(fs_info, &args, &bdev, &holder); if (!ret) btrfs_info(fs_info, "disk deleted %s", vol_args->name); btrfs_exclop_finish(fs_info); @@ -2773,7 +2773,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg) mnt_drop_write_file(file); if (bdev) - blkdev_put(bdev, mode); + blkdev_put(bdev, holder); out: btrfs_put_dev_args_from_path(&args); kfree(vol_args); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 035868cee3dd..7b12e05cdbf0 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -507,14 +507,14 @@ btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, sync_blockdev(*bdev); ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); if (ret) { - blkdev_put(*bdev, flags); + blkdev_put(*bdev, holder); goto error; } invalidate_bdev(*bdev); *disk_super = btrfs_read_dev_super(*bdev); if (IS_ERR(*disk_super)) { ret = PTR_ERR(*disk_super); - blkdev_put(*bdev, flags); + blkdev_put(*bdev, holder); goto error; } @@ -642,7 +642,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, device->bdev = bdev; clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); - device->mode = flags; + device->holder = holder; fs_devices->open_devices++; if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && @@ -656,7 +656,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, error_free_page: btrfs_release_disk_super(disk_super); - blkdev_put(bdev, flags); + blkdev_put(bdev, holder); return -EINVAL; } @@ -1057,7 +1057,7 @@ static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, continue; if (device->bdev) { - blkdev_put(device->bdev, device->mode); + blkdev_put(device->bdev, device->holder); device->bdev = NULL; fs_devices->open_devices--; } @@ -1103,7 +1103,7 @@ static void btrfs_close_bdev(struct btrfs_device *device) invalidate_bdev(device->bdev); } - blkdev_put(device->bdev, device->mode); + blkdev_put(device->bdev, device->holder); } static void btrfs_close_one_device(struct btrfs_device *device) @@ -1213,8 +1213,6 @@ static int open_fs_devices(struct btrfs_fs_devices *fs_devices, struct btrfs_device *latest_dev = NULL; struct btrfs_device *tmp_device; - flags |= FMODE_EXCL; - list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, dev_list) { int ret; @@ -1400,7 +1398,7 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags) btrfs_release_disk_super(disk_super); error_bdev_put: - blkdev_put(bdev, flags); + blkdev_put(bdev, NULL); return device; } @@ -2087,7 +2085,7 @@ void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, int btrfs_rm_device(struct btrfs_fs_info *fs_info, struct btrfs_dev_lookup_args *args, - struct block_device **bdev, fmode_t *mode) + struct block_device **bdev, void **holder) { struct btrfs_trans_handle *trans; struct btrfs_device *device; @@ -2226,7 +2224,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, } *bdev = device->bdev; - *mode = device->mode; + *holder = device->holder; synchronize_rcu(); btrfs_free_device(device); @@ -2394,7 +2392,7 @@ int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, else memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE); btrfs_release_disk_super(disk_super); - blkdev_put(bdev, FMODE_READ); + blkdev_put(bdev, NULL); return 0; } @@ -2627,7 +2625,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path if (sb_rdonly(sb) && !fs_devices->seeding) return -EROFS; - bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, + bdev = blkdev_get_by_path(device_path, FMODE_WRITE, fs_info->bdev_holder, NULL); if (IS_ERR(bdev)) return PTR_ERR(bdev); @@ -2690,7 +2688,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path device->commit_total_bytes = device->total_bytes; set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); - device->mode = FMODE_EXCL; + device->holder = fs_info->bdev_holder; device->dev_stats_valid = 1; set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); @@ -2848,7 +2846,7 @@ error_free_zone: error_free_device: btrfs_free_device(device); error: - blkdev_put(bdev, FMODE_EXCL); + blkdev_put(bdev, fs_info->bdev_holder); if (locked) { mutex_unlock(&uuid_mutex); up_write(&sb->s_umount); diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index eb97a397b3c3..840a8df39907 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -94,8 +94,8 @@ struct btrfs_device { struct btrfs_zoned_device_info *zone_info; - /* the mode sent to blkdev_get */ - fmode_t mode; + /* block device holder for blkdev_get/put */ + void *holder; /* * Device's major-minor number. Must be set even if the device is not @@ -619,7 +619,7 @@ void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args); void btrfs_free_device(struct btrfs_device *device); int btrfs_rm_device(struct btrfs_fs_info *fs_info, struct btrfs_dev_lookup_args *args, - struct block_device **bdev, fmode_t *mode); + struct block_device **bdev, void **holder); void __exit btrfs_cleanup_fs_uuids(void); int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len); int btrfs_grow_device(struct btrfs_trans_handle *trans, diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 6c263e9cd38b..54dba967a2d4 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -19,6 +19,7 @@ #include static struct kmem_cache *erofs_inode_cachep __read_mostly; +struct file_system_type erofs_fs_type; void _erofs_err(struct super_block *sb, const char *function, const char *fmt, ...) @@ -253,8 +254,8 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb, return PTR_ERR(fscache); dif->fscache = fscache; } else if (!sbi->devs->flatdev) { - bdev = blkdev_get_by_path(dif->path, FMODE_READ | FMODE_EXCL, - sb->s_type, NULL); + bdev = blkdev_get_by_path(dif->path, FMODE_READ, sb->s_type, + NULL); if (IS_ERR(bdev)) return PTR_ERR(bdev); dif->bdev = bdev; @@ -877,7 +878,7 @@ static int erofs_release_device_info(int id, void *ptr, void *data) fs_put_dax(dif->dax_dev, NULL); if (dif->bdev) - blkdev_put(dif->bdev, FMODE_READ | FMODE_EXCL); + blkdev_put(dif->bdev, &erofs_fs_type); erofs_fscache_unregister_cookie(dif->fscache); dif->fscache = NULL; kfree(dif->path); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 9070ea9154d7..92dd699139a3 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1112,7 +1112,7 @@ static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb) { struct block_device *bdev; - bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb, + bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_WRITE, sb, &ext4_holder_ops); if (IS_ERR(bdev)) goto fail; @@ -1128,17 +1128,12 @@ fail: /* * Release the journal device */ -static void ext4_blkdev_put(struct block_device *bdev) -{ - blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); -} - static void ext4_blkdev_remove(struct ext4_sb_info *sbi) { struct block_device *bdev; bdev = sbi->s_journal_bdev; if (bdev) { - ext4_blkdev_put(bdev); + blkdev_put(bdev, sbi->s_es); sbi->s_journal_bdev = NULL; } } @@ -5915,7 +5910,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb, out_journal: jbd2_journal_destroy(journal); out_bdev: - ext4_blkdev_put(bdev); + blkdev_put(bdev, sb); return NULL; } diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 7c34ab082f13..a5adb1d316e3 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1538,7 +1538,7 @@ static void destroy_device_list(struct f2fs_sb_info *sbi) int i; for (i = 0; i < sbi->s_ndevs; i++) { - blkdev_put(FDEV(i).bdev, FMODE_EXCL); + blkdev_put(FDEV(i).bdev, sbi->sb->s_type); #ifdef CONFIG_BLK_DEV_ZONED kvfree(FDEV(i).blkz_seq); #endif diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index 46d393c8088a..82f70d46f4e5 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c @@ -1100,7 +1100,7 @@ int lmLogOpen(struct super_block *sb) * file systems to log may have n-to-1 relationship; */ - bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, + bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ | FMODE_WRITE, log, NULL); if (IS_ERR(bdev)) { rc = PTR_ERR(bdev); @@ -1141,7 +1141,7 @@ journal_found: lbmLogShutdown(log); close: /* close external log device */ - blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); + blkdev_put(bdev, log); free: /* free log descriptor */ mutex_unlock(&jfs_log_mutex); @@ -1485,7 +1485,7 @@ int lmLogClose(struct super_block *sb) bdev = log->bdev; rc = lmLogShutdown(log); - blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); + blkdev_put(bdev, log); kfree(log); diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c index 38b066ca699e..9be7f958f60e 100644 --- a/fs/nfs/blocklayout/dev.c +++ b/fs/nfs/blocklayout/dev.c @@ -35,7 +35,7 @@ bl_free_device(struct pnfs_block_dev *dev) } if (dev->bdev) - blkdev_put(dev->bdev, FMODE_READ | FMODE_WRITE); + blkdev_put(dev->bdev, NULL); } } @@ -374,7 +374,7 @@ bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d, return 0; out_blkdev_put: - blkdev_put(d->bdev, FMODE_READ | FMODE_WRITE); + blkdev_put(d->bdev, NULL); return error; } diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 91bfbd973d1d..61d5e79a5e81 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -1278,7 +1278,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags, { struct nilfs_super_data sd; struct super_block *s; - fmode_t mode = FMODE_READ | FMODE_EXCL; + fmode_t mode = FMODE_READ; struct dentry *root_dentry; int err, s_new = false; @@ -1357,7 +1357,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags, } if (!s_new) - blkdev_put(sd.bdev, mode); + blkdev_put(sd.bdev, fs_type); return root_dentry; @@ -1366,7 +1366,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags, failed: if (!s_new) - blkdev_put(sd.bdev, mode); + blkdev_put(sd.bdev, fs_type); return ERR_PTR(err); } diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 6b13b8c3f2b8..c6ae9aee01ed 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -1503,7 +1503,7 @@ static void o2hb_region_release(struct config_item *item) } if (reg->hr_bdev) - blkdev_put(reg->hr_bdev, FMODE_READ|FMODE_WRITE); + blkdev_put(reg->hr_bdev, NULL); kfree(reg->hr_slots); @@ -1893,7 +1893,7 @@ static ssize_t o2hb_region_dev_store(struct config_item *item, out3: if (ret < 0) { - blkdev_put(reg->hr_bdev, FMODE_READ | FMODE_WRITE); + blkdev_put(reg->hr_bdev, NULL); reg->hr_bdev = NULL; } out2: diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 5e4db9a0c8e5..905297ea5545 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -2589,7 +2589,7 @@ static void release_journal_dev(struct super_block *super, struct reiserfs_journal *journal) { if (journal->j_dev_bd != NULL) { - blkdev_put(journal->j_dev_bd, journal->j_dev_mode); + blkdev_put(journal->j_dev_bd, journal); journal->j_dev_bd = NULL; } } @@ -2598,9 +2598,10 @@ static int journal_init_dev(struct super_block *super, struct reiserfs_journal *journal, const char *jdev_name) { + fmode_t blkdev_mode = FMODE_READ; + void *holder = journal; int result; dev_t jdev; - fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL; result = 0; @@ -2608,16 +2609,15 @@ static int journal_init_dev(struct super_block *super, jdev = SB_ONDISK_JOURNAL_DEVICE(super) ? new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev; - if (bdev_read_only(super->s_bdev)) - blkdev_mode = FMODE_READ; + if (!bdev_read_only(super->s_bdev)) + blkdev_mode |= FMODE_WRITE; /* there is no "jdev" option and journal is on separate device */ if ((!jdev_name || !jdev_name[0])) { if (jdev == super->s_dev) - blkdev_mode &= ~FMODE_EXCL; - journal->j_dev_bd = blkdev_get_by_dev(jdev, blkdev_mode, - journal, NULL); - journal->j_dev_mode = blkdev_mode; + holder = NULL; + journal->j_dev_bd = blkdev_get_by_dev(jdev, blkdev_mode, holder, + NULL); if (IS_ERR(journal->j_dev_bd)) { result = PTR_ERR(journal->j_dev_bd); journal->j_dev_bd = NULL; @@ -2631,8 +2631,7 @@ static int journal_init_dev(struct super_block *super, return 0; } - journal->j_dev_mode = blkdev_mode; - journal->j_dev_bd = blkdev_get_by_path(jdev_name, blkdev_mode, journal, + journal->j_dev_bd = blkdev_get_by_path(jdev_name, blkdev_mode, holder, NULL); if (IS_ERR(journal->j_dev_bd)) { result = PTR_ERR(journal->j_dev_bd); diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h index 1bccf6a2e908..55e85256aae8 100644 --- a/fs/reiserfs/reiserfs.h +++ b/fs/reiserfs/reiserfs.h @@ -300,7 +300,6 @@ struct reiserfs_journal { struct reiserfs_journal_cnode *j_first; struct block_device *j_dev_bd; - fmode_t j_dev_mode; /* first block on s_dev of reserved area journal */ int j_1st_reserved_block; diff --git a/fs/super.c b/fs/super.c index f127589700ab..8563794a8bc4 100644 --- a/fs/super.c +++ b/fs/super.c @@ -1255,7 +1255,7 @@ int get_tree_bdev(struct fs_context *fc, { struct block_device *bdev; struct super_block *s; - fmode_t mode = FMODE_READ | FMODE_EXCL; + fmode_t mode = FMODE_READ; int error = 0; if (!(fc->sb_flags & SB_RDONLY)) @@ -1279,7 +1279,7 @@ int get_tree_bdev(struct fs_context *fc, if (bdev->bd_fsfreeze_count > 0) { mutex_unlock(&bdev->bd_fsfreeze_mutex); warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev); - blkdev_put(bdev, mode); + blkdev_put(bdev, fc->fs_type); return -EBUSY; } @@ -1288,7 +1288,7 @@ int get_tree_bdev(struct fs_context *fc, s = sget_fc(fc, test_bdev_super_fc, set_bdev_super_fc); mutex_unlock(&bdev->bd_fsfreeze_mutex); if (IS_ERR(s)) { - blkdev_put(bdev, mode); + blkdev_put(bdev, fc->fs_type); return PTR_ERR(s); } @@ -1297,7 +1297,7 @@ int get_tree_bdev(struct fs_context *fc, if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) { warnf(fc, "%pg: Can't mount, would change RO state", bdev); deactivate_locked_super(s); - blkdev_put(bdev, mode); + blkdev_put(bdev, fc->fs_type); return -EBUSY; } @@ -1309,7 +1309,7 @@ int get_tree_bdev(struct fs_context *fc, * holding an active reference. */ up_write(&s->s_umount); - blkdev_put(bdev, mode); + blkdev_put(bdev, fc->fs_type); down_write(&s->s_umount); } else { s->s_mode = mode; @@ -1344,7 +1344,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type, { struct block_device *bdev; struct super_block *s; - fmode_t mode = FMODE_READ | FMODE_EXCL; + fmode_t mode = FMODE_READ; int error = 0; if (!(flags & SB_RDONLY)) @@ -1386,7 +1386,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type, * holding an active reference. */ up_write(&s->s_umount); - blkdev_put(bdev, mode); + blkdev_put(bdev, fs_type); down_write(&s->s_umount); } else { s->s_mode = mode; @@ -1409,7 +1409,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type, error_s: error = PTR_ERR(s); error_bdev: - blkdev_put(bdev, mode); + blkdev_put(bdev, fs_type); error: return ERR_PTR(error); } @@ -1418,13 +1418,11 @@ EXPORT_SYMBOL(mount_bdev); void kill_block_super(struct super_block *sb) { struct block_device *bdev = sb->s_bdev; - fmode_t mode = sb->s_mode; bdev->bd_super = NULL; generic_shutdown_super(sb); sync_blockdev(bdev); - WARN_ON_ONCE(!(mode & FMODE_EXCL)); - blkdev_put(bdev, mode | FMODE_EXCL); + blkdev_put(bdev, sb->s_type); } EXPORT_SYMBOL(kill_block_super); diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 1b4bd5c88f4a..3b7cf8268057 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -396,8 +396,8 @@ xfs_blkdev_get( { int error = 0; - *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, - mp, &xfs_holder_ops); + *bdevp = blkdev_get_by_path(name, FMODE_READ | FMODE_WRITE, mp, + &xfs_holder_ops); if (IS_ERR(*bdevp)) { error = PTR_ERR(*bdevp); xfs_warn(mp, "Invalid device [%s], error=%d", name, error); @@ -408,10 +408,11 @@ xfs_blkdev_get( STATIC void xfs_blkdev_put( + struct xfs_mount *mp, struct block_device *bdev) { if (bdev) - blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); + blkdev_put(bdev, mp); } STATIC void @@ -422,13 +423,13 @@ xfs_close_devices( struct block_device *logdev = mp->m_logdev_targp->bt_bdev; xfs_free_buftarg(mp->m_logdev_targp); - xfs_blkdev_put(logdev); + xfs_blkdev_put(mp, logdev); } if (mp->m_rtdev_targp) { struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; xfs_free_buftarg(mp->m_rtdev_targp); - xfs_blkdev_put(rtdev); + xfs_blkdev_put(mp, rtdev); } xfs_free_buftarg(mp->m_ddev_targp); } @@ -503,10 +504,10 @@ xfs_open_devices( out_free_ddev_targ: xfs_free_buftarg(mp->m_ddev_targp); out_close_rtdev: - xfs_blkdev_put(rtdev); + xfs_blkdev_put(mp, rtdev); out_close_logdev: if (logdev && logdev != ddev) - xfs_blkdev_put(logdev); + xfs_blkdev_put(mp, logdev); return error; } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 25bdd0cc74dc..d5b99796f12c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1480,7 +1480,7 @@ struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, int bd_prepare_to_claim(struct block_device *bdev, void *holder, const struct blk_holder_ops *hops); void bd_abort_claiming(struct block_device *bdev, void *holder); -void blkdev_put(struct block_device *bdev, fmode_t mode); +void blkdev_put(struct block_device *bdev, void *holder); /* just for blk-cgroup, don't use elsewhere */ struct block_device *blkdev_get_no_open(dev_t dev); diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 7ae95ec72f99..f62e89d0d906 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -688,22 +688,18 @@ static int load_image_and_restore(bool snapshot_test) { int error; unsigned int flags; - fmode_t mode = FMODE_READ; - - if (snapshot_test) - mode |= FMODE_EXCL; pm_pr_dbg("Loading hibernation image.\n"); lock_device_hotplug(); error = create_basic_memory_bitmaps(); if (error) { - swsusp_close(mode); + swsusp_close(snapshot_test); goto Unlock; } error = swsusp_read(&flags); - swsusp_close(mode); + swsusp_close(snapshot_test); if (!error) error = hibernation_restore(flags & SF_PLATFORM_MODE); @@ -956,7 +952,7 @@ static int software_resume(void) /* The snapshot device should not be opened while we're running */ if (!hibernate_acquire()) { error = -EBUSY; - swsusp_close(FMODE_READ | FMODE_EXCL); + swsusp_close(false); goto Unlock; } @@ -991,7 +987,7 @@ static int software_resume(void) pm_pr_dbg("Hibernation image not present or could not be loaded.\n"); return error; Close_Finish: - swsusp_close(FMODE_READ | FMODE_EXCL); + swsusp_close(false); goto Finish; } diff --git a/kernel/power/power.h b/kernel/power/power.h index 978189fcafd1..a8e0c44b804e 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -177,7 +177,7 @@ int swsusp_check(bool snapshot_test); extern void swsusp_free(void); extern int swsusp_read(unsigned int *flags_p); extern int swsusp_write(unsigned int flags); -extern void swsusp_close(fmode_t); +void swsusp_close(bool snapshot_test); #ifdef CONFIG_SUSPEND extern int swsusp_unmark(void); #endif diff --git a/kernel/power/swap.c b/kernel/power/swap.c index b03ff1a33c7f..cc9259307c94 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -363,7 +363,7 @@ static int swsusp_swap_check(void) res = set_blocksize(hib_resume_bdev, PAGE_SIZE); if (res < 0) - blkdev_put(hib_resume_bdev, FMODE_WRITE); + blkdev_put(hib_resume_bdev, NULL); return res; } @@ -443,7 +443,7 @@ static int get_swap_writer(struct swap_map_handle *handle) err_rel: release_swap_writer(handle); err_close: - swsusp_close(FMODE_WRITE); + swsusp_close(false); return ret; } @@ -508,7 +508,7 @@ static int swap_writer_finish(struct swap_map_handle *handle, if (error) free_all_swap_pages(root_swap); release_swap_writer(handle); - swsusp_close(FMODE_WRITE); + swsusp_close(false); return error; } @@ -1518,14 +1518,11 @@ static void *swsusp_holder; int swsusp_check(bool snapshot_test) { + void *holder = snapshot_test ? &swsusp_holder : NULL; int error; - fmode_t mode = FMODE_READ; - if (snapshot_test) - mode |= FMODE_EXCL; - - hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, - mode, &swsusp_holder, NULL); + hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, FMODE_READ, + holder, NULL); if (!IS_ERR(hib_resume_bdev)) { set_blocksize(hib_resume_bdev, PAGE_SIZE); clear_page(swsusp_header); @@ -1552,7 +1549,7 @@ int swsusp_check(bool snapshot_test) put: if (error) - blkdev_put(hib_resume_bdev, mode); + blkdev_put(hib_resume_bdev, holder); else pr_debug("Image signature found, resuming\n"); } else { @@ -1569,14 +1566,14 @@ put: * swsusp_close - close swap device. */ -void swsusp_close(fmode_t mode) +void swsusp_close(bool snapshot_test) { if (IS_ERR(hib_resume_bdev)) { pr_debug("Image device not initialised\n"); return; } - blkdev_put(hib_resume_bdev, mode); + blkdev_put(hib_resume_bdev, snapshot_test ? &swsusp_holder : NULL); } /** diff --git a/mm/swapfile.c b/mm/swapfile.c index cfbcf7d5705f..16554256be65 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2539,7 +2539,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) struct block_device *bdev = I_BDEV(inode); set_blocksize(bdev, old_block_size); - blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); + blkdev_put(bdev, p); } inode_lock(inode); @@ -2770,8 +2770,7 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode) if (S_ISBLK(inode->i_mode)) { p->bdev = blkdev_get_by_dev(inode->i_rdev, - FMODE_READ | FMODE_WRITE | FMODE_EXCL, p, - NULL); + FMODE_READ | FMODE_WRITE, p, NULL); if (IS_ERR(p->bdev)) { error = PTR_ERR(p->bdev); p->bdev = NULL; @@ -3222,7 +3221,7 @@ bad_swap: p->cluster_next_cpu = NULL; if (inode && S_ISBLK(inode->i_mode) && p->bdev) { set_blocksize(p->bdev, p->old_block_size); - blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); + blkdev_put(p->bdev, p); } inode = NULL; destroy_swap_extents(p); -- cgit v1.2.3 From 05bdb9965305bbfdae79b31d22df03d1e2cfcb22 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 8 Jun 2023 13:02:55 +0200 Subject: block: replace fmode_t with a block-specific type for block open flags The only overlap between the block open flags mapped into the fmode_t and other uses of fmode_t are FMODE_READ and FMODE_WRITE. Define a new blk_mode_t instead for use in blkdev_get_by_{dev,path}, ->open and ->ioctl and stop abusing fmode_t. Signed-off-by: Christoph Hellwig Acked-by: Jack Wang [rnbd] Reviewed-by: Hannes Reinecke Reviewed-by: Christian Brauner Link: https://lore.kernel.org/r/20230608110258.189493-28-hch@lst.de Signed-off-by: Jens Axboe --- arch/um/drivers/ubd_kern.c | 8 +++---- arch/xtensa/platforms/iss/simdisk.c | 2 +- block/bdev.c | 32 ++++++++++++++-------------- block/blk-zoned.c | 8 +++---- block/blk.h | 11 +++++----- block/fops.c | 32 ++++++++++++++++++++++------ block/genhd.c | 8 +++---- block/ioctl.c | 42 +++++++++++-------------------------- drivers/block/amiflop.c | 12 +++++------ drivers/block/aoe/aoeblk.c | 4 ++-- drivers/block/ataflop.c | 25 +++++++++++----------- drivers/block/drbd/drbd_main.c | 7 ++++--- drivers/block/drbd/drbd_nl.c | 2 +- drivers/block/floppy.c | 28 ++++++++++++------------- drivers/block/loop.c | 22 +++++++++---------- drivers/block/mtip32xx/mtip32xx.c | 4 ++-- drivers/block/nbd.c | 4 ++-- drivers/block/pktcdvd.c | 17 ++++++++------- drivers/block/rbd.c | 2 +- drivers/block/rnbd/rnbd-clt.c | 4 ++-- drivers/block/rnbd/rnbd-srv.c | 4 ++-- drivers/block/sunvdc.c | 2 +- drivers/block/swim.c | 16 +++++++------- drivers/block/swim3.c | 24 ++++++++++----------- drivers/block/ublk_drv.c | 2 +- drivers/block/xen-blkback/xenbus.c | 2 +- drivers/block/xen-blkfront.c | 2 +- drivers/block/z2ram.c | 2 +- drivers/block/zram/zram_drv.c | 6 +++--- drivers/cdrom/cdrom.c | 6 +++--- drivers/cdrom/gdrom.c | 4 ++-- drivers/md/bcache/bcache.h | 2 +- drivers/md/bcache/request.c | 4 ++-- drivers/md/bcache/super.c | 6 +++--- drivers/md/dm-cache-target.c | 12 +++++------ drivers/md/dm-clone-target.c | 10 ++++----- drivers/md/dm-core.h | 7 +++---- drivers/md/dm-era-target.c | 6 ++++-- drivers/md/dm-ioctl.c | 10 ++++----- drivers/md/dm-snap.c | 4 ++-- drivers/md/dm-table.c | 11 +++++----- drivers/md/dm-thin.c | 9 ++++---- drivers/md/dm-verity-fec.c | 2 +- drivers/md/dm-verity-target.c | 6 +++--- drivers/md/dm.c | 10 ++++----- drivers/md/dm.h | 2 +- drivers/md/md.c | 8 +++---- drivers/mmc/core/block.c | 8 +++---- drivers/mtd/devices/block2mtd.c | 4 ++-- drivers/mtd/mtd_blkdevs.c | 4 ++-- drivers/mtd/ubi/block.c | 5 ++--- drivers/nvme/host/core.c | 2 +- drivers/nvme/host/ioctl.c | 8 +++---- drivers/nvme/host/multipath.c | 2 +- drivers/nvme/host/nvme.h | 4 ++-- drivers/nvme/target/io-cmd-bdev.c | 2 +- drivers/s390/block/dasd.c | 6 ++---- drivers/s390/block/dasd_genhd.c | 3 ++- drivers/s390/block/dasd_int.h | 3 ++- drivers/s390/block/dasd_ioctl.c | 2 +- drivers/s390/block/dcssblk.c | 4 ++-- drivers/scsi/sd.c | 19 +++++++++-------- drivers/scsi/sr.c | 10 ++++----- drivers/target/target_core_iblock.c | 5 ++--- drivers/target/target_core_pscsi.c | 4 ++-- fs/btrfs/dev-replace.c | 2 +- fs/btrfs/super.c | 8 +++---- fs/btrfs/volumes.c | 16 +++++++------- fs/btrfs/volumes.h | 4 ++-- fs/erofs/super.c | 2 +- fs/ext4/super.c | 2 +- fs/f2fs/super.c | 2 +- fs/jfs/jfs_logmgr.c | 2 +- fs/nfs/blocklayout/dev.c | 5 +++-- fs/ocfs2/cluster/heartbeat.c | 3 ++- fs/reiserfs/journal.c | 4 ++-- fs/xfs/xfs_super.c | 2 +- include/linux/blkdev.h | 30 +++++++++++++++++++------- include/linux/cdrom.h | 3 ++- include/linux/device-mapper.h | 8 +++---- kernel/power/swap.c | 6 +++--- mm/swapfile.c | 2 +- 82 files changed, 334 insertions(+), 315 deletions(-) (limited to 'mm') diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 20c1a16199c5..50206feac577 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -108,9 +108,9 @@ static inline void ubd_set_bit(__u64 bit, unsigned char *data) static DEFINE_MUTEX(ubd_lock); static DEFINE_MUTEX(ubd_mutex); /* replaces BKL, might not be needed */ -static int ubd_open(struct gendisk *disk, fmode_t mode); +static int ubd_open(struct gendisk *disk, blk_mode_t mode); static void ubd_release(struct gendisk *disk); -static int ubd_ioctl(struct block_device *bdev, fmode_t mode, +static int ubd_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg); static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo); @@ -1154,7 +1154,7 @@ static int __init ubd_driver_init(void){ device_initcall(ubd_driver_init); -static int ubd_open(struct gendisk *disk, fmode_t mode) +static int ubd_open(struct gendisk *disk, blk_mode_t mode) { struct ubd *ubd_dev = disk->private_data; int err = 0; @@ -1389,7 +1389,7 @@ static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo) return 0; } -static int ubd_ioctl(struct block_device *bdev, fmode_t mode, +static int ubd_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct ubd *ubd_dev = bdev->bd_disk->private_data; diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c index 2ad9da3de0d9..178cf96ca10a 100644 --- a/arch/xtensa/platforms/iss/simdisk.c +++ b/arch/xtensa/platforms/iss/simdisk.c @@ -120,7 +120,7 @@ static void simdisk_submit_bio(struct bio *bio) bio_endio(bio); } -static int simdisk_open(struct gendisk *disk, fmode_t mode) +static int simdisk_open(struct gendisk *disk, blk_mode_t mode) { struct simdisk *dev = disk->private_data; diff --git a/block/bdev.c b/block/bdev.c index db63e5bcc46f..bd558a9ba3cd 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -93,7 +93,7 @@ EXPORT_SYMBOL(invalidate_bdev); * Drop all buffers & page cache for given bdev range. This function bails * with error if bdev has other exclusive owner (such as filesystem). */ -int truncate_bdev_range(struct block_device *bdev, fmode_t mode, +int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode, loff_t lstart, loff_t lend) { /* @@ -101,14 +101,14 @@ int truncate_bdev_range(struct block_device *bdev, fmode_t mode, * while we discard the buffer cache to avoid discarding buffers * under live filesystem. */ - if (!(mode & FMODE_EXCL)) { + if (!(mode & BLK_OPEN_EXCL)) { int err = bd_prepare_to_claim(bdev, truncate_bdev_range, NULL); if (err) goto invalidate; } truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend); - if (!(mode & FMODE_EXCL)) + if (!(mode & BLK_OPEN_EXCL)) bd_abort_claiming(bdev, truncate_bdev_range); return 0; @@ -647,7 +647,7 @@ static void blkdev_flush_mapping(struct block_device *bdev) bdev_write_inode(bdev); } -static int blkdev_get_whole(struct block_device *bdev, fmode_t mode) +static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode) { struct gendisk *disk = bdev->bd_disk; int ret; @@ -679,7 +679,7 @@ static void blkdev_put_whole(struct block_device *bdev) bdev->bd_disk->fops->release(bdev->bd_disk); } -static int blkdev_get_part(struct block_device *part, fmode_t mode) +static int blkdev_get_part(struct block_device *part, blk_mode_t mode) { struct gendisk *disk = part->bd_disk; int ret; @@ -743,11 +743,11 @@ void blkdev_put_no_open(struct block_device *bdev) { put_device(&bdev->bd_device); } - + /** * blkdev_get_by_dev - open a block device by device number * @dev: device number of block device to open - * @mode: FMODE_* mask + * @mode: open mode (BLK_OPEN_*) * @holder: exclusive holder identifier * @hops: holder operations * @@ -765,7 +765,7 @@ void blkdev_put_no_open(struct block_device *bdev) * RETURNS: * Reference to the block_device on success, ERR_PTR(-errno) on failure. */ -struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder, +struct block_device *blkdev_get_by_dev(dev_t dev, blk_mode_t mode, void *holder, const struct blk_holder_ops *hops) { bool unblock_events = true; @@ -775,8 +775,8 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder, ret = devcgroup_check_permission(DEVCG_DEV_BLOCK, MAJOR(dev), MINOR(dev), - ((mode & FMODE_READ) ? DEVCG_ACC_READ : 0) | - ((mode & FMODE_WRITE) ? DEVCG_ACC_WRITE : 0)); + ((mode & BLK_OPEN_READ) ? DEVCG_ACC_READ : 0) | + ((mode & BLK_OPEN_WRITE) ? DEVCG_ACC_WRITE : 0)); if (ret) return ERR_PTR(ret); @@ -786,12 +786,12 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder, disk = bdev->bd_disk; if (holder) { - mode |= FMODE_EXCL; + mode |= BLK_OPEN_EXCL; ret = bd_prepare_to_claim(bdev, holder, hops); if (ret) goto put_blkdev; } else { - if (WARN_ON_ONCE(mode & FMODE_EXCL)) { + if (WARN_ON_ONCE(mode & BLK_OPEN_EXCL)) { ret = -EIO; goto put_blkdev; } @@ -821,7 +821,7 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder, * writeable reference is too fragile given the way @mode is * used in blkdev_get/put(). */ - if ((mode & FMODE_WRITE) && !bdev->bd_write_holder && + if ((mode & BLK_OPEN_WRITE) && !bdev->bd_write_holder && (disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) { bdev->bd_write_holder = true; unblock_events = false; @@ -848,7 +848,7 @@ EXPORT_SYMBOL(blkdev_get_by_dev); /** * blkdev_get_by_path - open a block device by name * @path: path to the block device to open - * @mode: FMODE_* mask + * @mode: open mode (BLK_OPEN_*) * @holder: exclusive holder identifier * * Open the block device described by the device file at @path. If @holder is @@ -861,7 +861,7 @@ EXPORT_SYMBOL(blkdev_get_by_dev); * RETURNS: * Reference to the block_device on success, ERR_PTR(-errno) on failure. */ -struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, +struct block_device *blkdev_get_by_path(const char *path, blk_mode_t mode, void *holder, const struct blk_holder_ops *hops) { struct block_device *bdev; @@ -873,7 +873,7 @@ struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, return ERR_PTR(error); bdev = blkdev_get_by_dev(dev, mode, holder, hops); - if (!IS_ERR(bdev) && (mode & FMODE_WRITE) && bdev_read_only(bdev)) { + if (!IS_ERR(bdev) && (mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) { blkdev_put(bdev, holder); return ERR_PTR(-EACCES); } diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 02cc2c629ac9..0f9f97cdddd9 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -356,8 +356,8 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd, return 0; } -static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode, - const struct blk_zone_range *zrange) +static int blkdev_truncate_zone_range(struct block_device *bdev, + blk_mode_t mode, const struct blk_zone_range *zrange) { loff_t start, end; @@ -376,7 +376,7 @@ static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode, * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing. * Called from blkdev_ioctl. */ -int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, +int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; @@ -390,7 +390,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, if (!bdev_is_zoned(bdev)) return -ENOTTY; - if (!(mode & FMODE_WRITE)) + if (!(mode & BLK_OPEN_WRITE)) return -EBADF; if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range))) diff --git a/block/blk.h b/block/blk.h index e28d5d67d31a..768852a84fef 100644 --- a/block/blk.h +++ b/block/blk.h @@ -396,7 +396,7 @@ void disk_free_zone_bitmaps(struct gendisk *disk); void disk_clear_zone_settings(struct gendisk *disk); int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long arg); -int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, +int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg); #else /* CONFIG_BLK_DEV_ZONED */ static inline void disk_free_zone_bitmaps(struct gendisk *disk) {} @@ -407,7 +407,7 @@ static inline int blkdev_report_zones_ioctl(struct block_device *bdev, return -ENOTTY; } static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev, - fmode_t mode, unsigned int cmd, unsigned long arg) + blk_mode_t mode, unsigned int cmd, unsigned long arg) { return -ENOTTY; } @@ -451,7 +451,7 @@ static inline void bio_release_page(struct bio *bio, struct page *page) struct request_queue *blk_alloc_queue(int node_id); -int disk_scan_partitions(struct gendisk *disk, fmode_t mode); +int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode); int disk_alloc_events(struct gendisk *disk); void disk_add_events(struct gendisk *disk); @@ -466,8 +466,9 @@ extern struct device_attribute dev_attr_events_poll_msecs; extern struct attribute_group blk_trace_attr_group; -int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart, - loff_t lend); +blk_mode_t file_to_blk_mode(struct file *file); +int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode, + loff_t lstart, loff_t lend); long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); diff --git a/block/fops.c b/block/fops.c index 9f26e25bafa1..086612103b9d 100644 --- a/block/fops.c +++ b/block/fops.c @@ -470,6 +470,30 @@ static int blkdev_fsync(struct file *filp, loff_t start, loff_t end, return error; } +blk_mode_t file_to_blk_mode(struct file *file) +{ + blk_mode_t mode = 0; + + if (file->f_mode & FMODE_READ) + mode |= BLK_OPEN_READ; + if (file->f_mode & FMODE_WRITE) + mode |= BLK_OPEN_WRITE; + if (file->f_mode & FMODE_EXCL) + mode |= BLK_OPEN_EXCL; + if (file->f_flags & O_NDELAY) + mode |= BLK_OPEN_NDELAY; + + /* + * If all bits in O_ACCMODE set (aka O_RDWR | O_WRONLY), the floppy + * driver has historically allowed ioctls as if the file was opened for + * writing, but does not allow and actual reads or writes. + */ + if ((file->f_flags & O_ACCMODE) == (O_RDWR | O_WRONLY)) + mode |= BLK_OPEN_WRITE_IOCTL; + + return mode; +} + static int blkdev_open(struct inode *inode, struct file *filp) { struct block_device *bdev; @@ -483,14 +507,10 @@ static int blkdev_open(struct inode *inode, struct file *filp) filp->f_flags |= O_LARGEFILE; filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC; - if (filp->f_flags & O_NDELAY) - filp->f_mode |= FMODE_NDELAY; if (filp->f_flags & O_EXCL) filp->f_mode |= FMODE_EXCL; - if ((filp->f_flags & O_ACCMODE) == 3) - filp->f_mode |= FMODE_WRITE_IOCTL; - bdev = blkdev_get_by_dev(inode->i_rdev, filp->f_mode, + bdev = blkdev_get_by_dev(inode->i_rdev, file_to_blk_mode(filp), (filp->f_mode & FMODE_EXCL) ? filp : NULL, NULL); if (IS_ERR(bdev)) @@ -648,7 +668,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start, filemap_invalidate_lock(inode->i_mapping); /* Invalidate the page cache, including dirty pages. */ - error = truncate_bdev_range(bdev, file->f_mode, start, end); + error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end); if (error) goto fail; diff --git a/block/genhd.c b/block/genhd.c index b56f8b5c88b3..2c2f9a716822 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -339,7 +339,7 @@ void disk_uevent(struct gendisk *disk, enum kobject_action action) } EXPORT_SYMBOL_GPL(disk_uevent); -int disk_scan_partitions(struct gendisk *disk, fmode_t mode) +int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode) { struct block_device *bdev; int ret = 0; @@ -357,7 +357,7 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode) * synchronize with other exclusive openers and other partition * scanners. */ - if (!(mode & FMODE_EXCL)) { + if (!(mode & BLK_OPEN_EXCL)) { ret = bd_prepare_to_claim(disk->part0, disk_scan_partitions, NULL); if (ret) @@ -377,7 +377,7 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode) * creat partition for underlying disk. */ clear_bit(GD_NEED_PART_SCAN, &disk->state); - if (!(mode & FMODE_EXCL)) + if (!(mode & BLK_OPEN_EXCL)) bd_abort_claiming(disk->part0, disk_scan_partitions); return ret; } @@ -505,7 +505,7 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk, bdev_add(disk->part0, ddev->devt); if (get_capacity(disk)) - disk_scan_partitions(disk, FMODE_READ); + disk_scan_partitions(disk, BLK_OPEN_READ); /* * Announce the disk and partitions after all partitions are diff --git a/block/ioctl.c b/block/ioctl.c index 3a10c34b8ef6..61bb94fd4281 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -82,7 +82,7 @@ static int compat_blkpg_ioctl(struct block_device *bdev, } #endif -static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode, +static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode, unsigned long arg) { uint64_t range[2]; @@ -90,7 +90,7 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode, struct inode *inode = bdev->bd_inode; int err; - if (!(mode & FMODE_WRITE)) + if (!(mode & BLK_OPEN_WRITE)) return -EBADF; if (!bdev_max_discard_sectors(bdev)) @@ -120,14 +120,14 @@ fail: return err; } -static int blk_ioctl_secure_erase(struct block_device *bdev, fmode_t mode, +static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode, void __user *argp) { uint64_t start, len; uint64_t range[2]; int err; - if (!(mode & FMODE_WRITE)) + if (!(mode & BLK_OPEN_WRITE)) return -EBADF; if (!bdev_max_secure_erase_sectors(bdev)) return -EOPNOTSUPP; @@ -151,7 +151,7 @@ static int blk_ioctl_secure_erase(struct block_device *bdev, fmode_t mode, } -static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode, +static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode, unsigned long arg) { uint64_t range[2]; @@ -159,7 +159,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode, struct inode *inode = bdev->bd_inode; int err; - if (!(mode & FMODE_WRITE)) + if (!(mode & BLK_OPEN_WRITE)) return -EBADF; if (copy_from_user(range, (void __user *)arg, sizeof(range))) @@ -240,7 +240,7 @@ static int compat_put_ulong(compat_ulong_t __user *argp, compat_ulong_t val) * drivers that implement only commands that are completely compatible * between 32-bit and 64-bit user space */ -int blkdev_compat_ptr_ioctl(struct block_device *bdev, fmode_t mode, +int blkdev_compat_ptr_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned cmd, unsigned long arg) { struct gendisk *disk = bdev->bd_disk; @@ -439,7 +439,7 @@ static int compat_hdio_getgeo(struct block_device *bdev, #endif /* set the logical block size */ -static int blkdev_bszset(struct block_device *bdev, fmode_t mode, +static int blkdev_bszset(struct block_device *bdev, blk_mode_t mode, int __user *argp) { int ret, n; @@ -451,7 +451,7 @@ static int blkdev_bszset(struct block_device *bdev, fmode_t mode, if (get_user(n, argp)) return -EFAULT; - if (mode & FMODE_EXCL) + if (mode & BLK_OPEN_EXCL) return set_blocksize(bdev, n); if (IS_ERR(blkdev_get_by_dev(bdev->bd_dev, mode, &bdev, NULL))) @@ -467,7 +467,7 @@ static int blkdev_bszset(struct block_device *bdev, fmode_t mode, * user space. Note the separate arg/argp parameters that are needed * to deal with the compat_ptr() conversion. */ -static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode, +static int blkdev_common_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg, void __user *argp) { @@ -560,18 +560,9 @@ long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct block_device *bdev = I_BDEV(file->f_mapping->host); void __user *argp = (void __user *)arg; - fmode_t mode = file->f_mode; + blk_mode_t mode = file_to_blk_mode(file); int ret; - /* - * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have - * to updated it before every ioctl. - */ - if (file->f_flags & O_NDELAY) - mode |= FMODE_NDELAY; - else - mode &= ~FMODE_NDELAY; - switch (cmd) { /* These need separate implementations for the data structure */ case HDIO_GETGEO: @@ -630,16 +621,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) void __user *argp = compat_ptr(arg); struct block_device *bdev = I_BDEV(file->f_mapping->host); struct gendisk *disk = bdev->bd_disk; - fmode_t mode = file->f_mode; - - /* - * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have - * to updated it before every ioctl. - */ - if (file->f_flags & O_NDELAY) - mode |= FMODE_NDELAY; - else - mode &= ~FMODE_NDELAY; + blk_mode_t mode = file_to_blk_mode(file); switch (cmd) { /* These need separate implementations for the data structure */ diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 9a0e9dc74a8c..e460c9799d9f 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1532,7 +1532,7 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo) return 0; } -static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, +static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long param) { struct amiga_floppy_struct *p = bdev->bd_disk->private_data; @@ -1607,7 +1607,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, return 0; } -static int fd_ioctl(struct block_device *bdev, fmode_t mode, +static int fd_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long param) { int ret; @@ -1654,7 +1654,7 @@ static void fd_probe(int dev) * /dev/PS0 etc), and disallows simultaneous access to the same * drive with different device numbers. */ -static int floppy_open(struct gendisk *disk, fmode_t mode) +static int floppy_open(struct gendisk *disk, blk_mode_t mode) { int drive = disk->first_minor & 3; int system = (disk->first_minor & 4) >> 2; @@ -1673,10 +1673,9 @@ static int floppy_open(struct gendisk *disk, fmode_t mode) mutex_unlock(&amiflop_mutex); return -ENXIO; } - - if (mode & (FMODE_READ|FMODE_WRITE)) { + if (mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) { disk_check_media_change(disk); - if (mode & FMODE_WRITE) { + if (mode & BLK_OPEN_WRITE) { int wrprot; get_fdc(drive); @@ -1691,7 +1690,6 @@ static int floppy_open(struct gendisk *disk, fmode_t mode) } } } - local_irq_save(flags); fd_ref[drive]++; fd_device[drive] = system; diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index c3a39e02ab95..cf6883756155 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -204,7 +204,7 @@ aoedisk_rm_debugfs(struct aoedev *d) } static int -aoeblk_open(struct gendisk *disk, fmode_t mode) +aoeblk_open(struct gendisk *disk, blk_mode_t mode) { struct aoedev *d = disk->private_data; ulong flags; @@ -285,7 +285,7 @@ aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo) } static int -aoeblk_ioctl(struct block_device *bdev, fmode_t mode, uint cmd, ulong arg) +aoeblk_ioctl(struct block_device *bdev, blk_mode_t mode, uint cmd, ulong arg) { struct aoedev *d; diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 66a3242bb062..cd738cab725f 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -442,12 +442,12 @@ static void fd_times_out(struct timer_list *unused); static void finish_fdc( void ); static void finish_fdc_done( int dummy ); static void setup_req_params( int drive ); -static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int - cmd, unsigned long param); +static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode, + unsigned int cmd, unsigned long param); static void fd_probe( int drive ); static int fd_test_drive_present( int drive ); static void config_types( void ); -static int floppy_open(struct gendisk *disk, fmode_t mode); +static int floppy_open(struct gendisk *disk, blk_mode_t mode); static void floppy_release(struct gendisk *disk); /************************* End of Prototypes **************************/ @@ -1581,7 +1581,7 @@ out: return BLK_STS_OK; } -static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, +static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long param) { struct gendisk *disk = bdev->bd_disk; @@ -1768,7 +1768,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, } } -static int fd_ioctl(struct block_device *bdev, fmode_t mode, +static int fd_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { int ret; @@ -1915,7 +1915,7 @@ static void __init config_types( void ) * drive with different device numbers. */ -static int floppy_open(struct gendisk *disk, fmode_t mode) +static int floppy_open(struct gendisk *disk, blk_mode_t mode) { struct atari_floppy_struct *p = disk->private_data; int type = disk->first_minor >> 2; @@ -1924,23 +1924,22 @@ static int floppy_open(struct gendisk *disk, fmode_t mode) if (p->ref && p->type != type) return -EBUSY; - if (p->ref == -1 || (p->ref && mode & FMODE_EXCL)) + if (p->ref == -1 || (p->ref && mode & BLK_OPEN_EXCL)) return -EBUSY; - - if (mode & FMODE_EXCL) + if (mode & BLK_OPEN_EXCL) p->ref = -1; else p->ref++; p->type = type; - if (mode & FMODE_NDELAY) + if (mode & BLK_OPEN_NDELAY) return 0; - if (mode & (FMODE_READ|FMODE_WRITE)) { + if (mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) { if (disk_check_media_change(disk)) floppy_revalidate(disk); - if (mode & FMODE_WRITE) { + if (mode & BLK_OPEN_WRITE) { if (p->wpstat) { if (p->ref < 0) p->ref = 0; @@ -1953,7 +1952,7 @@ static int floppy_open(struct gendisk *disk, fmode_t mode) return 0; } -static int floppy_unlocked_open(struct gendisk *disk, fmode_t mode) +static int floppy_unlocked_open(struct gendisk *disk, blk_mode_t mode) { int ret; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 7f3d7ca6ce6b..965f672557f2 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -49,7 +49,7 @@ #include "drbd_debugfs.h" static DEFINE_MUTEX(drbd_main_mutex); -static int drbd_open(struct gendisk *disk, fmode_t mode); +static int drbd_open(struct gendisk *disk, blk_mode_t mode); static void drbd_release(struct gendisk *gd); static void md_sync_timer_fn(struct timer_list *t); static int w_bitmap_io(struct drbd_work *w, int unused); @@ -1882,7 +1882,7 @@ int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void return 0; } -static int drbd_open(struct gendisk *disk, fmode_t mode) +static int drbd_open(struct gendisk *disk, blk_mode_t mode) { struct drbd_device *device = disk->private_data; unsigned long flags; @@ -1894,7 +1894,7 @@ static int drbd_open(struct gendisk *disk, fmode_t mode) * and no race with updating open_cnt */ if (device->state.role != R_PRIMARY) { - if (mode & FMODE_WRITE) + if (mode & BLK_OPEN_WRITE) rv = -EROFS; else if (!drbd_allow_oos) rv = -EMEDIUMTYPE; @@ -1911,6 +1911,7 @@ static int drbd_open(struct gendisk *disk, fmode_t mode) static void drbd_release(struct gendisk *gd) { struct drbd_device *device = gd->private_data; + mutex_lock(&drbd_main_mutex); device->open_cnt--; mutex_unlock(&drbd_main_mutex); diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 10b1e5171332..cddae6f4b00f 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1640,7 +1640,7 @@ static struct block_device *open_backing_dev(struct drbd_device *device, struct block_device *bdev; int err = 0; - bdev = blkdev_get_by_path(bdev_path, FMODE_READ | FMODE_WRITE, + bdev = blkdev_get_by_path(bdev_path, BLK_OPEN_READ | BLK_OPEN_WRITE, claim_ptr, NULL); if (IS_ERR(bdev)) { drbd_err(device, "open(\"%s\") failed with %ld\n", diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index d79fac288a73..2db9b186b977 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -3394,8 +3394,8 @@ static bool valid_floppy_drive_params(const short autodetect[FD_AUTODETECT_SIZE] return true; } -static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, - unsigned long param) +static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode, + unsigned int cmd, unsigned long param) { int drive = (long)bdev->bd_disk->private_data; int type = ITYPE(drive_state[drive].fd_device); @@ -3428,7 +3428,8 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int return ret; /* permission checks */ - if (((cmd & 0x40) && !(mode & (FMODE_WRITE | FMODE_WRITE_IOCTL))) || + if (((cmd & 0x40) && + !(mode & (BLK_OPEN_WRITE | BLK_OPEN_WRITE_IOCTL))) || ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))) return -EPERM; @@ -3566,7 +3567,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int return 0; } -static int fd_ioctl(struct block_device *bdev, fmode_t mode, +static int fd_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long param) { int ret; @@ -3654,8 +3655,8 @@ struct compat_floppy_write_errors { #define FDGETFDCSTAT32 _IOR(2, 0x15, struct compat_floppy_fdc_state) #define FDWERRORGET32 _IOR(2, 0x17, struct compat_floppy_write_errors) -static int compat_set_geometry(struct block_device *bdev, fmode_t mode, unsigned int cmd, - struct compat_floppy_struct __user *arg) +static int compat_set_geometry(struct block_device *bdev, blk_mode_t mode, + unsigned int cmd, struct compat_floppy_struct __user *arg) { struct floppy_struct v; int drive, type; @@ -3664,7 +3665,7 @@ static int compat_set_geometry(struct block_device *bdev, fmode_t mode, unsigned BUILD_BUG_ON(offsetof(struct floppy_struct, name) != offsetof(struct compat_floppy_struct, name)); - if (!(mode & (FMODE_WRITE | FMODE_WRITE_IOCTL))) + if (!(mode & (BLK_OPEN_WRITE | BLK_OPEN_WRITE_IOCTL))) return -EPERM; memset(&v, 0, sizeof(struct floppy_struct)); @@ -3861,8 +3862,8 @@ static int compat_werrorget(int drive, return 0; } -static int fd_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, - unsigned long param) +static int fd_compat_ioctl(struct block_device *bdev, blk_mode_t mode, + unsigned int cmd, unsigned long param) { int drive = (long)bdev->bd_disk->private_data; switch (cmd) { @@ -3984,7 +3985,7 @@ static void floppy_release(struct gendisk *disk) * /dev/PS0 etc), and disallows simultaneous access to the same * drive with different device numbers. */ -static int floppy_open(struct gendisk *disk, fmode_t mode) +static int floppy_open(struct gendisk *disk, blk_mode_t mode) { int drive = (long)disk->private_data; int old_dev, new_dev; @@ -4049,9 +4050,8 @@ static int floppy_open(struct gendisk *disk, fmode_t mode) if (fdc_state[FDC(drive)].rawcmd == 1) fdc_state[FDC(drive)].rawcmd = 2; - - if (!(mode & FMODE_NDELAY)) { - if (mode & (FMODE_READ|FMODE_WRITE)) { + if (!(mode & BLK_OPEN_NDELAY)) { + if (mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) { drive_state[drive].last_checked = 0; clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags); @@ -4063,7 +4063,7 @@ static int floppy_open(struct gendisk *disk, fmode_t mode) goto out; } res = -EROFS; - if ((mode & FMODE_WRITE) && + if ((mode & BLK_OPEN_WRITE) && !test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags)) goto out; } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index ca40d24572ae..37511d2b2caf 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -990,7 +990,7 @@ loop_set_status_from_info(struct loop_device *lo, return 0; } -static int loop_configure(struct loop_device *lo, fmode_t mode, +static int loop_configure(struct loop_device *lo, blk_mode_t mode, struct block_device *bdev, const struct loop_config *config) { @@ -1014,7 +1014,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, * If we don't hold exclusive handle for the device, upgrade to it * here to avoid changing device under exclusive owner. */ - if (!(mode & FMODE_EXCL)) { + if (!(mode & BLK_OPEN_EXCL)) { error = bd_prepare_to_claim(bdev, loop_configure, NULL); if (error) goto out_putf; @@ -1050,7 +1050,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, if (error) goto out_unlock; - if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) || + if (!(file->f_mode & FMODE_WRITE) || !(mode & BLK_OPEN_WRITE) || !file->f_op->write_iter) lo->lo_flags |= LO_FLAGS_READ_ONLY; @@ -1116,7 +1116,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, if (partscan) loop_reread_partitions(lo); - if (!(mode & FMODE_EXCL)) + if (!(mode & BLK_OPEN_EXCL)) bd_abort_claiming(bdev, loop_configure); return 0; @@ -1124,7 +1124,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, out_unlock: loop_global_unlock(lo, is_loop); out_bdev: - if (!(mode & FMODE_EXCL)) + if (!(mode & BLK_OPEN_EXCL)) bd_abort_claiming(bdev, loop_configure); out_putf: fput(file); @@ -1528,7 +1528,7 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd, return err; } -static int lo_ioctl(struct block_device *bdev, fmode_t mode, +static int lo_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct loop_device *lo = bdev->bd_disk->private_data; @@ -1563,24 +1563,22 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, return loop_clr_fd(lo); case LOOP_SET_STATUS: err = -EPERM; - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) { + if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN)) err = loop_set_status_old(lo, argp); - } break; case LOOP_GET_STATUS: return loop_get_status_old(lo, argp); case LOOP_SET_STATUS64: err = -EPERM; - if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) { + if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN)) err = loop_set_status64(lo, argp); - } break; case LOOP_GET_STATUS64: return loop_get_status64(lo, argp); case LOOP_SET_CAPACITY: case LOOP_SET_DIRECT_IO: case LOOP_SET_BLOCK_SIZE: - if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN)) + if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN)) return -EPERM; fallthrough; default: @@ -1691,7 +1689,7 @@ loop_get_status_compat(struct loop_device *lo, return err; } -static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, +static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct loop_device *lo = bdev->bd_disk->private_data; diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 815d77ba6381..b200950e8fb5 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -3041,7 +3041,7 @@ static int rssd_disk_name_format(char *prefix, * structure pointer. */ static int mtip_block_ioctl(struct block_device *dev, - fmode_t mode, + blk_mode_t mode, unsigned cmd, unsigned long arg) { @@ -3079,7 +3079,7 @@ static int mtip_block_ioctl(struct block_device *dev, * structure pointer. */ static int mtip_block_compat_ioctl(struct block_device *dev, - fmode_t mode, + blk_mode_t mode, unsigned cmd, unsigned long arg) { diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index cfb835238684..8576d696c7a2 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1502,7 +1502,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, return -ENOTTY; } -static int nbd_ioctl(struct block_device *bdev, fmode_t mode, +static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct nbd_device *nbd = bdev->bd_disk->private_data; @@ -1553,7 +1553,7 @@ static struct nbd_config *nbd_alloc_config(void) return config; } -static int nbd_open(struct gendisk *disk, fmode_t mode) +static int nbd_open(struct gendisk *disk, blk_mode_t mode) { struct nbd_device *nbd; int ret = 0; diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index c3299e49edd5..a1428538bda5 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2154,7 +2154,7 @@ static int pkt_open_write(struct pktcdvd_device *pd) /* * called at open time. */ -static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) +static int pkt_open_dev(struct pktcdvd_device *pd, bool write) { struct device *ddev = disk_to_dev(pd->disk); int ret; @@ -2167,7 +2167,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) * to read/write from/to it. It is already opened in O_NONBLOCK mode * so open should not fail. */ - bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ, pd, NULL); + bdev = blkdev_get_by_dev(pd->bdev->bd_dev, BLK_OPEN_READ, pd, NULL); if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); goto out; @@ -2247,7 +2247,7 @@ static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor) return pkt_devs[dev_minor]; } -static int pkt_open(struct gendisk *disk, fmode_t mode) +static int pkt_open(struct gendisk *disk, blk_mode_t mode) { struct pktcdvd_device *pd = NULL; int ret; @@ -2263,13 +2263,13 @@ static int pkt_open(struct gendisk *disk, fmode_t mode) pd->refcnt++; if (pd->refcnt > 1) { - if ((mode & FMODE_WRITE) && + if ((mode & BLK_OPEN_WRITE) && !test_bit(PACKET_WRITABLE, &pd->flags)) { ret = -EBUSY; goto out_dec; } } else { - ret = pkt_open_dev(pd, mode & FMODE_WRITE); + ret = pkt_open_dev(pd, mode & BLK_OPEN_WRITE); if (ret) goto out_dec; /* @@ -2278,7 +2278,6 @@ static int pkt_open(struct gendisk *disk, fmode_t mode) */ set_blocksize(disk->part0, CD_FRAMESIZE); } - mutex_unlock(&ctl_mutex); mutex_unlock(&pktcdvd_mutex); return 0; @@ -2514,7 +2513,8 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) } } - bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL, NULL); + bdev = blkdev_get_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY, NULL, + NULL); if (IS_ERR(bdev)) return PTR_ERR(bdev); sdev = scsi_device_from_queue(bdev->bd_disk->queue); @@ -2550,7 +2550,8 @@ out_mem: return -ENOMEM; } -static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) +static int pkt_ioctl(struct block_device *bdev, blk_mode_t mode, + unsigned int cmd, unsigned long arg) { struct pktcdvd_device *pd = bdev->bd_disk->private_data; struct device *ddev = disk_to_dev(pd->disk); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 5215eff94fe9..39f2903fe25f 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -660,7 +660,7 @@ static bool pending_result_dec(struct pending_result *pending, int *result) return true; } -static int rbd_open(struct gendisk *disk, fmode_t mode) +static int rbd_open(struct gendisk *disk, blk_mode_t mode) { struct rbd_device *rbd_dev = disk->private_data; bool removing = false; diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c index d5261d36d786..b0550b68645d 100644 --- a/drivers/block/rnbd/rnbd-clt.c +++ b/drivers/block/rnbd/rnbd-clt.c @@ -921,11 +921,11 @@ rnbd_clt_session *find_or_create_sess(const char *sessname, bool *first) return sess; } -static int rnbd_client_open(struct gendisk *disk, fmode_t mode) +static int rnbd_client_open(struct gendisk *disk, blk_mode_t mode) { struct rnbd_clt_dev *dev = disk->private_data; - if (get_disk_ro(dev->gd) && (mode & FMODE_WRITE)) + if (get_disk_ro(dev->gd) && (mode & BLK_OPEN_WRITE)) return -EPERM; if (dev->dev_state == DEV_STATE_UNMAPPED || diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c index 591a1be370c4..c186df0ec641 100644 --- a/drivers/block/rnbd/rnbd-srv.c +++ b/drivers/block/rnbd/rnbd-srv.c @@ -677,14 +677,14 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess, struct rnbd_srv_sess_dev *srv_sess_dev; const struct rnbd_msg_open *open_msg = msg; struct block_device *bdev; - fmode_t open_flags = FMODE_READ; + blk_mode_t open_flags = BLK_OPEN_READ; char *full_path; struct rnbd_msg_open_rsp *rsp = data; trace_process_msg_open(srv_sess, open_msg); if (open_msg->access_mode != RNBD_ACCESS_RO) - open_flags |= FMODE_WRITE; + open_flags |= BLK_OPEN_WRITE; mutex_lock(&srv_sess->lock); diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 9fa821fa76b0..7bf4b48e2282 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -139,7 +139,7 @@ static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo) * when vdisk_mtype is VD_MEDIA_TYPE_CD or VD_MEDIA_TYPE_DVD. * Needed to be able to install inside an ldom from an iso image. */ -static int vdc_ioctl(struct block_device *bdev, fmode_t mode, +static int vdc_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned command, unsigned long argument) { struct vdc_port *port = bdev->bd_disk->private_data; diff --git a/drivers/block/swim.c b/drivers/block/swim.c index a629b38dec66..651009b3a601 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -608,20 +608,18 @@ static void setup_medium(struct floppy_state *fs) } } -static int floppy_open(struct gendisk *disk, fmode_t mode) +static int floppy_open(struct gendisk *disk, blk_mode_t mode) { struct floppy_state *fs = disk->private_data; struct swim __iomem *base = fs->swd->base; int err; - if (fs->ref_count == -1 || (fs->ref_count && mode & FMODE_EXCL)) + if (fs->ref_count == -1 || (fs->ref_count && mode & BLK_OPEN_EXCL)) return -EBUSY; - - if (mode & FMODE_EXCL) + if (mode & BLK_OPEN_EXCL) fs->ref_count = -1; else fs->ref_count++; - swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2); udelay(10); swim_drive(base, fs->location); @@ -636,10 +634,10 @@ static int floppy_open(struct gendisk *disk, fmode_t mode) set_capacity(fs->disk, fs->total_secs); - if (mode & FMODE_NDELAY) + if (mode & BLK_OPEN_NDELAY) return 0; - if (mode & (FMODE_READ|FMODE_WRITE)) { + if (mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) { if (disk_check_media_change(disk) && fs->disk_in) fs->ejected = 0; if ((mode & FMODE_WRITE) && fs->write_protected) { @@ -659,7 +657,7 @@ out: return err; } -static int floppy_unlocked_open(struct gendisk *disk, fmode_t mode) +static int floppy_unlocked_open(struct gendisk *disk, blk_mode_t mode) { int ret; @@ -686,7 +684,7 @@ static void floppy_release(struct gendisk *disk) mutex_unlock(&swim_mutex); } -static int floppy_ioctl(struct block_device *bdev, fmode_t mode, +static int floppy_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long param) { struct floppy_state *fs = bdev->bd_disk->private_data; diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index b696deff3d8b..945a03154250 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -246,9 +246,9 @@ static int grab_drive(struct floppy_state *fs, enum swim_state state, int interruptible); static void release_drive(struct floppy_state *fs); static int fd_eject(struct floppy_state *fs); -static int floppy_ioctl(struct block_device *bdev, fmode_t mode, +static int floppy_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long param); -static int floppy_open(struct gendisk *disk, fmode_t mode); +static int floppy_open(struct gendisk *disk, blk_mode_t mode); static unsigned int floppy_check_events(struct gendisk *disk, unsigned int clearing); static int floppy_revalidate(struct gendisk *disk); @@ -882,7 +882,7 @@ static int fd_eject(struct floppy_state *fs) static struct floppy_struct floppy_type = { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */ -static int floppy_locked_ioctl(struct block_device *bdev, fmode_t mode, +static int floppy_locked_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long param) { struct floppy_state *fs = bdev->bd_disk->private_data; @@ -910,7 +910,7 @@ static int floppy_locked_ioctl(struct block_device *bdev, fmode_t mode, return -ENOTTY; } -static int floppy_ioctl(struct block_device *bdev, fmode_t mode, +static int floppy_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long param) { int ret; @@ -922,7 +922,7 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode, return ret; } -static int floppy_open(struct gendisk *disk, fmode_t mode) +static int floppy_open(struct gendisk *disk, blk_mode_t mode) { struct floppy_state *fs = disk->private_data; struct swim3 __iomem *sw = fs->swim3; @@ -957,18 +957,18 @@ static int floppy_open(struct gendisk *disk, fmode_t mode) swim3_action(fs, SETMFM); swim3_select(fs, RELAX); - } else if (fs->ref_count == -1 || mode & FMODE_EXCL) + } else if (fs->ref_count == -1 || mode & BLK_OPEN_EXCL) return -EBUSY; - if (err == 0 && (mode & FMODE_NDELAY) == 0 - && (mode & (FMODE_READ|FMODE_WRITE))) { + if (err == 0 && !(mode & BLK_OPEN_NDELAY) && + (mode & (BLK_OPEN_READ | BLK_OPEN_WRITE))) { if (disk_check_media_change(disk)) floppy_revalidate(disk); if (fs->ejected) err = -ENXIO; } - if (err == 0 && (mode & FMODE_WRITE)) { + if (err == 0 && (mode & BLK_OPEN_WRITE)) { if (fs->write_prot < 0) fs->write_prot = swim3_readbit(fs, WRITE_PROT); if (fs->write_prot) @@ -984,7 +984,7 @@ static int floppy_open(struct gendisk *disk, fmode_t mode) return err; } - if (mode & FMODE_EXCL) + if (mode & BLK_OPEN_EXCL) fs->ref_count = -1; else ++fs->ref_count; @@ -992,12 +992,12 @@ static int floppy_open(struct gendisk *disk, fmode_t mode) return 0; } -static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode) +static int floppy_unlocked_open(struct gendisk *disk, blk_mode_t mode) { int ret; mutex_lock(&swim3_mutex); - ret = floppy_open(bdev, mode); + ret = floppy_open(disk, mode); mutex_unlock(&swim3_mutex); return ret; diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 92c900ac2ebc..9fdc4c7f908d 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -447,7 +447,7 @@ static void ublk_store_owner_uid_gid(unsigned int *owner_uid, *owner_gid = from_kgid(&init_user_ns, gid); } -static int ublk_open(struct gendisk *disk, fmode_t mode) +static int ublk_open(struct gendisk *disk, blk_mode_t mode) { struct ublk_device *ub = disk->private_data; diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 141b60aad570..bb66178c432b 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -492,7 +492,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, vbd->pdevice = MKDEV(major, minor); bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ? - FMODE_READ : FMODE_WRITE, NULL, NULL); + BLK_OPEN_READ : BLK_OPEN_WRITE, NULL, NULL); if (IS_ERR(bdev)) { pr_warn("xen_vbd_create: device %08x could not be opened\n", diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 23ed258b57f0..52e74adbaad6 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -509,7 +509,7 @@ static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) return 0; } -static int blkif_ioctl(struct block_device *bdev, fmode_t mode, +static int blkif_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned command, unsigned long argument) { struct blkfront_info *info = bdev->bd_disk->private_data; diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index a2e41cc084ca..11493167b0a8 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c @@ -140,7 +140,7 @@ static void get_chipram(void) return; } -static int z2_open(struct gendisk *disk, fmode_t mode) +static int z2_open(struct gendisk *disk, blk_mode_t mode) { int device = disk->first_minor; int max_z2_map = (Z2RAM_SIZE / Z2RAM_CHUNKSIZE) * sizeof(z2ram_map[0]); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 21615d67a9bd..1867f378b319 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -507,8 +507,8 @@ static ssize_t backing_dev_store(struct device *dev, goto out; } - bdev = blkdev_get_by_dev(inode->i_rdev, FMODE_READ | FMODE_WRITE, zram, - NULL); + bdev = blkdev_get_by_dev(inode->i_rdev, BLK_OPEN_READ | BLK_OPEN_WRITE, + zram, NULL); if (IS_ERR(bdev)) { err = PTR_ERR(bdev); bdev = NULL; @@ -2097,7 +2097,7 @@ static ssize_t reset_store(struct device *dev, return len; } -static int zram_open(struct gendisk *disk, fmode_t mode) +static int zram_open(struct gendisk *disk, blk_mode_t mode) { struct zram *zram = disk->private_data; diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 998b03fe976e..bd8cd59c758a 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -1146,7 +1146,7 @@ clean_up_and_return: * is in their own interest: device control becomes a lot easier * this way. */ -int cdrom_open(struct cdrom_device_info *cdi, fmode_t mode) +int cdrom_open(struct cdrom_device_info *cdi, blk_mode_t mode) { int ret; @@ -1155,7 +1155,7 @@ int cdrom_open(struct cdrom_device_info *cdi, fmode_t mode) /* if this was a O_NONBLOCK open and we should honor the flags, * do a quick open without drive/disc integrity checks. */ cdi->use_count++; - if ((mode & FMODE_NDELAY) && (cdi->options & CDO_USE_FFLAGS)) { + if ((mode & BLK_OPEN_NDELAY) && (cdi->options & CDO_USE_FFLAGS)) { ret = cdi->ops->open(cdi, 1); } else { ret = open_for_data(cdi); @@ -1163,7 +1163,7 @@ int cdrom_open(struct cdrom_device_info *cdi, fmode_t mode) goto err; if (CDROM_CAN(CDC_GENERIC_PACKET)) cdrom_mmc3_profile(cdi); - if (mode & FMODE_WRITE) { + if (mode & BLK_OPEN_WRITE) { ret = -EROFS; if (cdrom_open_write(cdi)) goto err_release; diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index dac148d4d1fe..3a46e27479ff 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -474,7 +474,7 @@ static const struct cdrom_device_ops gdrom_ops = { CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R, }; -static int gdrom_bdops_open(struct gendisk *disk, fmode_t mode) +static int gdrom_bdops_open(struct gendisk *disk, blk_mode_t mode) { int ret; @@ -499,7 +499,7 @@ static unsigned int gdrom_bdops_check_events(struct gendisk *disk, return cdrom_check_events(gd.cd_info, clearing); } -static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode, +static int gdrom_bdops_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned cmd, unsigned long arg) { int ret; diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index aebb7ef10e63..700dc5588d5f 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -275,7 +275,7 @@ struct bcache_device { int (*cache_miss)(struct btree *b, struct search *s, struct bio *bio, unsigned int sectors); - int (*ioctl)(struct bcache_device *d, fmode_t mode, + int (*ioctl)(struct bcache_device *d, blk_mode_t mode, unsigned int cmd, unsigned long arg); }; diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 67a2e29e0b40..a9b1f3896249 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -1228,7 +1228,7 @@ void cached_dev_submit_bio(struct bio *bio) detached_dev_do_request(d, bio, orig_bdev, start_time); } -static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, +static int cached_dev_ioctl(struct bcache_device *d, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct cached_dev *dc = container_of(d, struct cached_dev, disk); @@ -1318,7 +1318,7 @@ void flash_dev_submit_bio(struct bio *bio) continue_at(cl, search_free, NULL); } -static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode, +static int flash_dev_ioctl(struct bcache_device *d, blk_mode_t mode, unsigned int cmd, unsigned long arg) { return -ENOTTY; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 7022fea396f2..1f829e74db0a 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -732,7 +732,7 @@ out: /* Bcache device */ -static int open_dev(struct gendisk *disk, fmode_t mode) +static int open_dev(struct gendisk *disk, blk_mode_t mode) { struct bcache_device *d = disk->private_data; @@ -750,7 +750,7 @@ static void release_dev(struct gendisk *b) closure_put(&d->cl); } -static int ioctl_dev(struct block_device *b, fmode_t mode, +static int ioctl_dev(struct block_device *b, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct bcache_device *d = b->bd_disk->private_data; @@ -2558,7 +2558,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, ret = -EINVAL; err = "failed to open device"; - bdev = blkdev_get_by_path(strim(path), FMODE_READ | FMODE_WRITE, + bdev = blkdev_get_by_path(strim(path), BLK_OPEN_READ | BLK_OPEN_WRITE, bcache_kobj, NULL); if (IS_ERR(bdev)) { if (bdev == ERR_PTR(-EBUSY)) { diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 872896218550..911f73f7ebba 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -2051,8 +2051,8 @@ static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as, if (!at_least_one_arg(as, error)) return -EINVAL; - r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, - &ca->metadata_dev); + r = dm_get_device(ca->ti, dm_shift_arg(as), + BLK_OPEN_READ | BLK_OPEN_WRITE, &ca->metadata_dev); if (r) { *error = "Error opening metadata device"; return r; @@ -2074,8 +2074,8 @@ static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as, if (!at_least_one_arg(as, error)) return -EINVAL; - r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, - &ca->cache_dev); + r = dm_get_device(ca->ti, dm_shift_arg(as), + BLK_OPEN_READ | BLK_OPEN_WRITE, &ca->cache_dev); if (r) { *error = "Error opening cache device"; return r; @@ -2093,8 +2093,8 @@ static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as, if (!at_least_one_arg(as, error)) return -EINVAL; - r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, - &ca->origin_dev); + r = dm_get_device(ca->ti, dm_shift_arg(as), + BLK_OPEN_READ | BLK_OPEN_WRITE, &ca->origin_dev); if (r) { *error = "Error opening origin device"; return r; diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index f467cdb5a022..94b2fc33f64b 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -1683,8 +1683,8 @@ static int parse_metadata_dev(struct clone *clone, struct dm_arg_set *as, char * int r; sector_t metadata_dev_size; - r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, - &clone->metadata_dev); + r = dm_get_device(clone->ti, dm_shift_arg(as), + BLK_OPEN_READ | BLK_OPEN_WRITE, &clone->metadata_dev); if (r) { *error = "Error opening metadata device"; return r; @@ -1703,8 +1703,8 @@ static int parse_dest_dev(struct clone *clone, struct dm_arg_set *as, char **err int r; sector_t dest_dev_size; - r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, - &clone->dest_dev); + r = dm_get_device(clone->ti, dm_shift_arg(as), + BLK_OPEN_READ | BLK_OPEN_WRITE, &clone->dest_dev); if (r) { *error = "Error opening destination device"; return r; @@ -1725,7 +1725,7 @@ static int parse_source_dev(struct clone *clone, struct dm_arg_set *as, char **e int r; sector_t source_dev_size; - r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ, + r = dm_get_device(clone->ti, dm_shift_arg(as), BLK_OPEN_READ, &clone->source_dev); if (r) { *error = "Error opening source device"; diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index aecab0c0720f..ce913ad91a52 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -207,11 +207,10 @@ struct dm_table { unsigned integrity_added:1; /* - * Indicates the rw permissions for the new logical - * device. This should be a combination of FMODE_READ - * and FMODE_WRITE. + * Indicates the rw permissions for the new logical device. This + * should be a combination of BLK_OPEN_READ and BLK_OPEN_WRITE. */ - fmode_t mode; + blk_mode_t mode; /* a list of devices used by this table */ struct list_head devices; diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index 0d70914217ee..6acfa5bf97a4 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -1482,14 +1482,16 @@ static int era_ctr(struct dm_target *ti, unsigned int argc, char **argv) era->ti = ti; - r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &era->metadata_dev); + r = dm_get_device(ti, argv[0], BLK_OPEN_READ | BLK_OPEN_WRITE, + &era->metadata_dev); if (r) { ti->error = "Error opening metadata device"; era_destroy(era); return -EINVAL; } - r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &era->origin_dev); + r = dm_get_device(ti, argv[1], BLK_OPEN_READ | BLK_OPEN_WRITE, + &era->origin_dev); if (r) { ti->error = "Error opening data device"; era_destroy(era); diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index cc77cf3d4109..8ba4cbb92351 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -861,7 +861,7 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param) table = dm_get_inactive_table(md, &srcu_idx); if (table) { - if (!(dm_table_get_mode(table) & FMODE_WRITE)) + if (!(dm_table_get_mode(table) & BLK_OPEN_WRITE)) param->flags |= DM_READONLY_FLAG; param->target_count = table->num_targets; } @@ -1192,7 +1192,7 @@ static int do_resume(struct dm_ioctl *param) if (old_size && new_size && old_size != new_size) need_resize_uevent = true; - if (dm_table_get_mode(new_map) & FMODE_WRITE) + if (dm_table_get_mode(new_map) & BLK_OPEN_WRITE) set_disk_ro(dm_disk(md), 0); else set_disk_ro(dm_disk(md), 1); @@ -1381,12 +1381,12 @@ static int dev_arm_poll(struct file *filp, struct dm_ioctl *param, size_t param_ return 0; } -static inline fmode_t get_mode(struct dm_ioctl *param) +static inline blk_mode_t get_mode(struct dm_ioctl *param) { - fmode_t mode = FMODE_READ | FMODE_WRITE; + blk_mode_t mode = BLK_OPEN_READ | BLK_OPEN_WRITE; if (param->flags & DM_READONLY_FLAG) - mode = FMODE_READ; + mode = BLK_OPEN_READ; return mode; } diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 7832974b73eb..bf7a574499a3 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1242,7 +1242,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) int r = -EINVAL; char *origin_path, *cow_path; unsigned int args_used, num_flush_bios = 1; - fmode_t origin_mode = FMODE_READ; + blk_mode_t origin_mode = BLK_OPEN_READ; if (argc < 4) { ti->error = "requires 4 or more arguments"; @@ -1252,7 +1252,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (dm_target_is_snapshot_merge(ti)) { num_flush_bios = 2; - origin_mode = FMODE_WRITE; + origin_mode = BLK_OPEN_WRITE; } s = kzalloc(sizeof(*s), GFP_KERNEL); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 2fd5826bfce1..7d208b2b1a19 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -126,7 +126,7 @@ static int alloc_targets(struct dm_table *t, unsigned int num) return 0; } -int dm_table_create(struct dm_table **result, fmode_t mode, +int dm_table_create(struct dm_table **result, blk_mode_t mode, unsigned int num_targets, struct mapped_device *md) { struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); @@ -304,7 +304,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, * device and not to touch the existing bdev field in case * it is accessed concurrently. */ -static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, +static int upgrade_mode(struct dm_dev_internal *dd, blk_mode_t new_mode, struct mapped_device *md) { int r; @@ -330,7 +330,7 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, * Note: the __ref annotation is because this function can call the __init * marked early_lookup_bdev when called during early boot code from dm-init.c. */ -int __ref dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, +int __ref dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode, struct dm_dev **result) { int r; @@ -662,7 +662,8 @@ int dm_table_add_target(struct dm_table *t, const char *type, t->singleton = true; } - if (dm_target_always_writeable(ti->type) && !(t->mode & FMODE_WRITE)) { + if (dm_target_always_writeable(ti->type) && + !(t->mode & BLK_OPEN_WRITE)) { ti->error = "target type may not be included in a read-only table"; goto bad; } @@ -2033,7 +2034,7 @@ struct list_head *dm_table_get_devices(struct dm_table *t) return &t->devices; } -fmode_t dm_table_get_mode(struct dm_table *t) +blk_mode_t dm_table_get_mode(struct dm_table *t) { return t->mode; } diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 2b13c949bd72..464c6b678417 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -3301,7 +3301,7 @@ static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv) unsigned long block_size; dm_block_t low_water_blocks; struct dm_dev *metadata_dev; - fmode_t metadata_mode; + blk_mode_t metadata_mode; /* * FIXME Remove validation from scope of lock. @@ -3334,7 +3334,8 @@ static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (r) goto out_unlock; - metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE); + metadata_mode = BLK_OPEN_READ | + ((pf.mode == PM_READ_ONLY) ? 0 : BLK_OPEN_WRITE); r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev); if (r) { ti->error = "Error opening metadata block device"; @@ -3342,7 +3343,7 @@ static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv) } warn_if_metadata_device_too_big(metadata_dev->bdev); - r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev); + r = dm_get_device(ti, argv[1], BLK_OPEN_READ | BLK_OPEN_WRITE, &data_dev); if (r) { ti->error = "Error getting data device"; goto out_metadata; @@ -4223,7 +4224,7 @@ static int thin_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_origin_dev; } - r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev); + r = dm_get_device(ti, argv[2], BLK_OPEN_READ, &origin_dev); if (r) { ti->error = "Error opening origin device"; goto bad_origin_dev; diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index a9ee2faa75a2..3ef9f018da60 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -607,7 +607,7 @@ int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v, (*argc)--; if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_DEV)) { - r = dm_get_device(ti, arg_value, FMODE_READ, &v->fec->dev); + r = dm_get_device(ti, arg_value, BLK_OPEN_READ, &v->fec->dev); if (r) { ti->error = "FEC device lookup failed"; return r; diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index e35c16e06d06..26adcfea0302 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -1196,7 +1196,7 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (r) goto bad; - if ((dm_table_get_mode(ti->table) & ~FMODE_READ)) { + if ((dm_table_get_mode(ti->table) & ~BLK_OPEN_READ)) { ti->error = "Device must be readonly"; r = -EINVAL; goto bad; @@ -1225,13 +1225,13 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv) } v->version = num; - r = dm_get_device(ti, argv[1], FMODE_READ, &v->data_dev); + r = dm_get_device(ti, argv[1], BLK_OPEN_READ, &v->data_dev); if (r) { ti->error = "Data device lookup failed"; goto bad; } - r = dm_get_device(ti, argv[2], FMODE_READ, &v->hash_dev); + r = dm_get_device(ti, argv[2], BLK_OPEN_READ, &v->hash_dev); if (r) { ti->error = "Hash device lookup failed"; goto bad; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index b16e37362c5a..ca2dc079c3f4 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -310,7 +310,7 @@ int dm_deleting_md(struct mapped_device *md) return test_bit(DMF_DELETING, &md->flags); } -static int dm_blk_open(struct gendisk *disk, fmode_t mode) +static int dm_blk_open(struct gendisk *disk, blk_mode_t mode) { struct mapped_device *md; @@ -448,7 +448,7 @@ static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) dm_put_live_table(md, srcu_idx); } -static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, +static int dm_blk_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct mapped_device *md = bdev->bd_disk->private_data; @@ -734,7 +734,7 @@ static char *_dm_claim_ptr = "I belong to device-mapper"; * Open a table device so we can use it as a map destination. */ static struct table_device *open_table_device(struct mapped_device *md, - dev_t dev, fmode_t mode) + dev_t dev, blk_mode_t mode) { struct table_device *td; struct block_device *bdev; @@ -791,7 +791,7 @@ static void close_table_device(struct table_device *td, struct mapped_device *md } static struct table_device *find_table_device(struct list_head *l, dev_t dev, - fmode_t mode) + blk_mode_t mode) { struct table_device *td; @@ -802,7 +802,7 @@ static struct table_device *find_table_device(struct list_head *l, dev_t dev, return NULL; } -int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, +int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode, struct dm_dev **result) { struct table_device *td; diff --git a/drivers/md/dm.h b/drivers/md/dm.h index a856e0aee73b..63d9010d8e61 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -203,7 +203,7 @@ int dm_open_count(struct mapped_device *md); int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred); int dm_cancel_deferred_remove(struct mapped_device *md); int dm_request_based(struct mapped_device *md); -int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, +int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode, struct dm_dev **result); void dm_put_table_device(struct mapped_device *md, struct dm_dev *d); diff --git a/drivers/md/md.c b/drivers/md/md.c index dad4a5539f9f..ca0de7ddd943 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3643,7 +3643,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe if (err) goto out_clear_rdev; - rdev->bdev = blkdev_get_by_dev(newdev, FMODE_READ | FMODE_WRITE, + rdev->bdev = blkdev_get_by_dev(newdev, BLK_OPEN_READ | BLK_OPEN_WRITE, super_format == -2 ? &claim_rdev : rdev, NULL); if (IS_ERR(rdev->bdev)) { pr_warn("md: could not open device unknown-block(%u,%u).\n", @@ -7488,7 +7488,7 @@ static int __md_set_array_info(struct mddev *mddev, void __user *argp) return err; } -static int md_ioctl(struct block_device *bdev, fmode_t mode, +static int md_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { int err = 0; @@ -7720,7 +7720,7 @@ out: return err; } #ifdef CONFIG_COMPAT -static int md_compat_ioctl(struct block_device *bdev, fmode_t mode, +static int md_compat_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { switch (cmd) { @@ -7769,7 +7769,7 @@ out_unlock: return err; } -static int md_open(struct gendisk *disk, fmode_t mode) +static int md_open(struct gendisk *disk, blk_mode_t mode) { struct mddev *mddev; int err; diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index b16eedf22d4e..2a33b5073cc4 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -357,7 +357,7 @@ static const struct attribute_group *mmc_disk_attr_groups[] = { NULL, }; -static int mmc_blk_open(struct gendisk *disk, fmode_t mode) +static int mmc_blk_open(struct gendisk *disk, blk_mode_t mode) { struct mmc_blk_data *md = mmc_blk_get(disk); int ret = -ENXIO; @@ -365,7 +365,7 @@ static int mmc_blk_open(struct gendisk *disk, fmode_t mode) mutex_lock(&block_mutex); if (md) { ret = 0; - if ((mode & FMODE_WRITE) && md->read_only) { + if ((mode & BLK_OPEN_WRITE) && md->read_only) { mmc_blk_put(md); ret = -EROFS; } @@ -754,7 +754,7 @@ static int mmc_blk_check_blkdev(struct block_device *bdev) return 0; } -static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, +static int mmc_blk_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct mmc_blk_data *md; @@ -791,7 +791,7 @@ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, } #ifdef CONFIG_COMPAT -static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode, +static int mmc_blk_compat_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c index 44fc23af4c3f..be106dc20ff3 100644 --- a/drivers/mtd/devices/block2mtd.c +++ b/drivers/mtd/devices/block2mtd.c @@ -220,7 +220,7 @@ static void block2mtd_free_device(struct block2mtd_dev *dev) * early_lookup_bdev when called from the early boot code. */ static struct block_device __ref *mdtblock_early_get_bdev(const char *devname, - fmode_t mode, int timeout, struct block2mtd_dev *dev) + blk_mode_t mode, int timeout, struct block2mtd_dev *dev) { struct block_device *bdev = ERR_PTR(-ENODEV); #ifndef MODULE @@ -261,7 +261,7 @@ static struct block_device __ref *mdtblock_early_get_bdev(const char *devname, static struct block2mtd_dev *add_device(char *devname, int erase_size, char *label, int timeout) { - const fmode_t mode = FMODE_READ | FMODE_WRITE; + const blk_mode_t mode = BLK_OPEN_READ | BLK_OPEN_WRITE; struct block_device *bdev; struct block2mtd_dev *dev; char *name; diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index bd0b75453643..ff18636e0889 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -182,7 +182,7 @@ static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx, return BLK_STS_OK; } -static int blktrans_open(struct gendisk *disk, fmode_t mode) +static int blktrans_open(struct gendisk *disk, blk_mode_t mode) { struct mtd_blktrans_dev *dev = disk->private_data; int ret = 0; @@ -208,7 +208,7 @@ static int blktrans_open(struct gendisk *disk, fmode_t mode) ret = __get_mtd_device(dev->mtd); if (ret) goto error_release; - dev->writable = mode & FMODE_WRITE; + dev->writable = mode & BLK_OPEN_WRITE; unlock: dev->open++; diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index e85fb9de0b70..437c5b83ffe5 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -227,7 +227,7 @@ static blk_status_t ubiblock_read(struct request *req) return BLK_STS_OK; } -static int ubiblock_open(struct gendisk *disk, fmode_t mode) +static int ubiblock_open(struct gendisk *disk, blk_mode_t mode) { struct ubiblock *dev = disk->private_data; int ret; @@ -246,11 +246,10 @@ static int ubiblock_open(struct gendisk *disk, fmode_t mode) * It's just a paranoid check, as write requests will get rejected * in any case. */ - if (mode & FMODE_WRITE) { + if (mode & BLK_OPEN_WRITE) { ret = -EROFS; goto out_unlock; } - dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY); if (IS_ERR(dev->desc)) { dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d", diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index fd7f8e6d66fd..c3d72fc677f7 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1591,7 +1591,7 @@ static void nvme_ns_release(struct nvme_ns *ns) nvme_put_ns(ns); } -static int nvme_open(struct gendisk *disk, fmode_t mode) +static int nvme_open(struct gendisk *disk, blk_mode_t mode) { return nvme_ns_open(disk->private_data); } diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 8bf09047348e..0fd0aa571cc9 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -709,11 +709,11 @@ static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd, } } -int nvme_ioctl(struct block_device *bdev, fmode_t mode, +int nvme_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct nvme_ns *ns = bdev->bd_disk->private_data; - bool open_for_write = mode & FMODE_WRITE; + bool open_for_write = mode & BLK_OPEN_WRITE; void __user *argp = (void __user *)arg; unsigned int flags = 0; @@ -817,11 +817,11 @@ static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, return ret; } -int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode, +int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct nvme_ns_head *head = bdev->bd_disk->private_data; - bool open_for_write = mode & FMODE_WRITE; + bool open_for_write = mode & BLK_OPEN_WRITE; void __user *argp = (void __user *)arg; struct nvme_ns *ns; int srcu_idx, ret = -EWOULDBLOCK; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 698c0e70bcfa..91a9a55227fa 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -402,7 +402,7 @@ static void nvme_ns_head_submit_bio(struct bio *bio) srcu_read_unlock(&head->srcu, srcu_idx); } -static int nvme_ns_head_open(struct gendisk *disk, fmode_t mode) +static int nvme_ns_head_open(struct gendisk *disk, blk_mode_t mode) { if (!nvme_tryget_ns_head(disk->private_data)) return -ENXIO; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index bf46f122e9e1..953e59f56139 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -836,10 +836,10 @@ void nvme_put_ns_head(struct nvme_ns_head *head); int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, const struct file_operations *fops, struct module *owner); void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device); -int nvme_ioctl(struct block_device *bdev, fmode_t mode, +int nvme_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg); long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg); -int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode, +int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg); long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg); diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 65ed2d478fac..2733e0158585 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -85,7 +85,7 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns) return -ENOTBLK; ns->bdev = blkdev_get_by_path(ns->device_path, - FMODE_READ | FMODE_WRITE, NULL, NULL); + BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL); if (IS_ERR(ns->bdev)) { ret = PTR_ERR(ns->bdev); if (ret != -ENOTBLK) { diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 19295b2df470..45788955c4e6 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -3234,7 +3234,7 @@ struct blk_mq_ops dasd_mq_ops = { .exit_hctx = dasd_exit_hctx, }; -static int dasd_open(struct gendisk *disk, fmode_t mode) +static int dasd_open(struct gendisk *disk, blk_mode_t mode) { struct dasd_device *base; int rc; @@ -3268,14 +3268,12 @@ static int dasd_open(struct gendisk *disk, fmode_t mode) rc = -ENODEV; goto out; } - - if ((mode & FMODE_WRITE) && + if ((mode & BLK_OPEN_WRITE) && (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || (base->features & DASD_FEATURE_READONLY))) { rc = -EROFS; goto out; } - dasd_put_device(base); return 0; diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index d2b27b84f854..fe5108a1b332 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -130,7 +130,8 @@ int dasd_scan_partitions(struct dasd_block *block) struct block_device *bdev; int rc; - bdev = blkdev_get_by_dev(disk_devt(block->gdp), FMODE_READ, NULL, NULL); + bdev = blkdev_get_by_dev(disk_devt(block->gdp), BLK_OPEN_READ, NULL, + NULL); if (IS_ERR(bdev)) { DBF_DEV_EVENT(DBF_ERR, block->base, "scan partitions error, blkdev_get returned %ld", diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 33f812f0e515..0aa56351da72 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -965,7 +965,8 @@ int dasd_scan_partitions(struct dasd_block *); void dasd_destroy_partitions(struct dasd_block *); /* externals in dasd_ioctl.c */ -int dasd_ioctl(struct block_device *, fmode_t, unsigned int, unsigned long); +int dasd_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, + unsigned long arg); int dasd_set_read_only(struct block_device *bdev, bool ro); /* externals in dasd_proc.c */ diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 9327dcdd6e5e..838c9f5313e6 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -612,7 +612,7 @@ static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd, return ret; } -int dasd_ioctl(struct block_device *bdev, fmode_t mode, +int dasd_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct dasd_block *block; diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 5aee3106bfda..200f88f0e451 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -28,7 +28,7 @@ #define DCSSBLK_PARM_LEN 400 #define DCSS_BUS_ID_SIZE 20 -static int dcssblk_open(struct gendisk *disk, fmode_t mode); +static int dcssblk_open(struct gendisk *disk, blk_mode_t mode); static void dcssblk_release(struct gendisk *disk); static void dcssblk_submit_bio(struct bio *bio); static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, @@ -809,7 +809,7 @@ out_buf: } static int -dcssblk_open(struct gendisk *disk, fmode_t mode) +dcssblk_open(struct gendisk *disk, blk_mode_t mode) { struct dcssblk_dev_info *dev_info = disk->private_data; int rc; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 02b6704ec2b4..ab216976dbdc 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1298,7 +1298,7 @@ static bool sd_need_revalidate(struct gendisk *disk, struct scsi_disk *sdkp) /** * sd_open - open a scsi disk device * @disk: disk to open - * @mode: FMODE_* mask + * @mode: open mode * * Returns 0 if successful. Returns a negated errno value in case * of error. @@ -1310,7 +1310,7 @@ static bool sd_need_revalidate(struct gendisk *disk, struct scsi_disk *sdkp) * * Locking: called with disk->open_mutex held. **/ -static int sd_open(struct gendisk *disk, fmode_t mode) +static int sd_open(struct gendisk *disk, blk_mode_t mode) { struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdev = sdkp->device; @@ -1336,7 +1336,8 @@ static int sd_open(struct gendisk *disk, fmode_t mode) * If the drive is empty, just let the open fail. */ retval = -ENOMEDIUM; - if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY)) + if (sdev->removable && !sdkp->media_present && + !(mode & BLK_OPEN_NDELAY)) goto error_out; /* @@ -1344,7 +1345,7 @@ static int sd_open(struct gendisk *disk, fmode_t mode) * if the user expects to be able to write to the thing. */ retval = -EROFS; - if (sdkp->write_prot && (mode & FMODE_WRITE)) + if (sdkp->write_prot && (mode & BLK_OPEN_WRITE)) goto error_out; /* @@ -1379,7 +1380,7 @@ error_out: * Note: may block (uninterruptible) if error recovery is underway * on this disk. * - * Locking: called with bdev->bd_disk->open_mutex held. + * Locking: called with disk->open_mutex held. **/ static void sd_release(struct gendisk *disk) { @@ -1424,7 +1425,7 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo) /** * sd_ioctl - process an ioctl * @bdev: target block device - * @mode: FMODE_* mask + * @mode: open mode * @cmd: ioctl command number * @arg: this is third argument given to ioctl(2) system call. * Often contains a pointer. @@ -1435,7 +1436,7 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo) * Note: most ioctls are forward onto the block subsystem or further * down in the scsi subsystem. **/ -static int sd_ioctl(struct block_device *bdev, fmode_t mode, +static int sd_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct gendisk *disk = bdev->bd_disk; @@ -1457,13 +1458,13 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode, * access to the device is prohibited. */ error = scsi_ioctl_block_when_processing_errors(sdp, cmd, - (mode & FMODE_NDELAY) != 0); + (mode & BLK_OPEN_NDELAY)); if (error) return error; if (is_sed_ioctl(cmd)) return sed_ioctl(sdkp->opal_dev, cmd, p); - return scsi_ioctl(sdp, mode & FMODE_WRITE, cmd, p); + return scsi_ioctl(sdp, mode & BLK_OPEN_WRITE, cmd, p); } static void set_media_not_present(struct scsi_disk *sdkp) diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 00aaafc8dd78..ce886c8c9dbe 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -484,7 +484,7 @@ static void sr_revalidate_disk(struct scsi_cd *cd) get_sectorsize(cd); } -static int sr_block_open(struct gendisk *disk, fmode_t mode) +static int sr_block_open(struct gendisk *disk, blk_mode_t mode) { struct scsi_cd *cd = scsi_cd(disk); struct scsi_device *sdev = cd->device; @@ -518,8 +518,8 @@ static void sr_block_release(struct gendisk *disk) scsi_device_put(cd->device); } -static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, - unsigned long arg) +static int sr_block_ioctl(struct block_device *bdev, blk_mode_t mode, + unsigned cmd, unsigned long arg) { struct scsi_cd *cd = scsi_cd(bdev->bd_disk); struct scsi_device *sdev = cd->device; @@ -532,7 +532,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, mutex_lock(&cd->lock); ret = scsi_ioctl_block_when_processing_errors(sdev, cmd, - (mode & FMODE_NDELAY) != 0); + (mode & BLK_OPEN_NDELAY)); if (ret) goto out; @@ -543,7 +543,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, if (ret != -ENOSYS) goto put; } - ret = scsi_ioctl(sdev, mode & FMODE_WRITE, cmd, argp); + ret = scsi_ioctl(sdev, mode & BLK_OPEN_WRITE, cmd, argp); put: scsi_autopm_put_device(sdev); diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index c62f961f46e3..3c462d69daca 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -90,7 +90,7 @@ static int iblock_configure_device(struct se_device *dev) struct request_queue *q; struct block_device *bd = NULL; struct blk_integrity *bi; - fmode_t mode; + blk_mode_t mode = BLK_OPEN_READ; unsigned int max_write_zeroes_sectors; int ret; @@ -108,9 +108,8 @@ static int iblock_configure_device(struct se_device *dev) pr_debug( "IBLOCK: Claiming struct block_device: %s\n", ib_dev->ibd_udev_path); - mode = FMODE_READ; if (!ib_dev->ibd_readonly) - mode |= FMODE_WRITE; + mode |= BLK_OPEN_WRITE; else dev->dev_flags |= DF_READ_ONLY; diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index da3b5512d7ae..0d4f09693ef4 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -366,8 +366,8 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) * Claim exclusive struct block_device access to struct scsi_device * for TYPE_DISK and TYPE_ZBC using supplied udev_path */ - bd = blkdev_get_by_path(dev->udev_path, FMODE_WRITE | FMODE_READ, pdv, - NULL); + bd = blkdev_get_by_path(dev->udev_path, BLK_OPEN_WRITE | BLK_OPEN_READ, + pdv, NULL); if (IS_ERR(bd)) { pr_err("pSCSI: blkdev_get_by_path() failed\n"); scsi_device_put(sd); diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 677e9d9e1527..2d00600ff413 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -257,7 +257,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, return -EINVAL; } - bdev = blkdev_get_by_path(device_path, FMODE_WRITE, + bdev = blkdev_get_by_path(device_path, BLK_OPEN_WRITE, fs_info->bdev_holder, NULL); if (IS_ERR(bdev)) { btrfs_err(fs_info, "target device %s is invalid!", device_path); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index fd02b92e3910..1c3c1d7ad68c 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -849,7 +849,7 @@ out: * All other options will be parsed on much later in the mount process and * only when we need to allocate a new super block. */ -static int btrfs_parse_device_options(const char *options, fmode_t flags) +static int btrfs_parse_device_options(const char *options, blk_mode_t flags) { substring_t args[MAX_OPT_ARGS]; char *device_name, *opts, *orig, *p; @@ -1440,7 +1440,7 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type, struct btrfs_fs_devices *fs_devices = NULL; struct btrfs_fs_info *fs_info = NULL; void *new_sec_opts = NULL; - fmode_t mode = sb_open_mode(flags); + blk_mode_t mode = sb_open_mode(flags); int error = 0; if (data) { @@ -2185,7 +2185,7 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd, switch (cmd) { case BTRFS_IOC_SCAN_DEV: mutex_lock(&uuid_mutex); - device = btrfs_scan_one_device(vol->name, FMODE_READ); + device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ); ret = PTR_ERR_OR_ZERO(device); mutex_unlock(&uuid_mutex); break; @@ -2199,7 +2199,7 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd, break; case BTRFS_IOC_DEVICES_READY: mutex_lock(&uuid_mutex); - device = btrfs_scan_one_device(vol->name, FMODE_READ); + device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ); if (IS_ERR(device)) { mutex_unlock(&uuid_mutex); ret = PTR_ERR(device); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 7b12e05cdbf0..c85e54f86035 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -490,7 +490,7 @@ static struct btrfs_fs_devices *find_fsid_with_metadata_uuid( static int -btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, +btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder, int flush, struct block_device **bdev, struct btrfs_super_block **disk_super) { @@ -590,7 +590,7 @@ static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device * fs_devices->device_list_mutex here. */ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, - struct btrfs_device *device, fmode_t flags, + struct btrfs_device *device, blk_mode_t flags, void *holder) { struct block_device *bdev; @@ -1207,7 +1207,7 @@ void btrfs_close_devices(struct btrfs_fs_devices *fs_devices) } static int open_fs_devices(struct btrfs_fs_devices *fs_devices, - fmode_t flags, void *holder) + blk_mode_t flags, void *holder) { struct btrfs_device *device; struct btrfs_device *latest_dev = NULL; @@ -1255,7 +1255,7 @@ static int devid_cmp(void *priv, const struct list_head *a, } int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, - fmode_t flags, void *holder) + blk_mode_t flags, void *holder) { int ret; @@ -1346,7 +1346,7 @@ int btrfs_forget_devices(dev_t devt) * and we are not allowed to call set_blocksize during the scan. The superblock * is read via pagecache */ -struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags) +struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags) { struct btrfs_super_block *disk_super; bool new_device_added = false; @@ -2378,7 +2378,7 @@ int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, return -ENOMEM; } - ret = btrfs_get_bdev_and_sb(path, FMODE_READ, NULL, 0, + ret = btrfs_get_bdev_and_sb(path, BLK_OPEN_READ, NULL, 0, &bdev, &disk_super); if (ret) { btrfs_put_dev_args_from_path(args); @@ -2625,7 +2625,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path if (sb_rdonly(sb) && !fs_devices->seeding) return -EROFS; - bdev = blkdev_get_by_path(device_path, FMODE_WRITE, + bdev = blkdev_get_by_path(device_path, BLK_OPEN_WRITE, fs_info->bdev_holder, NULL); if (IS_ERR(bdev)) return PTR_ERR(bdev); @@ -6907,7 +6907,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, if (IS_ERR(fs_devices)) return fs_devices; - ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); + ret = open_fs_devices(fs_devices, BLK_OPEN_READ, fs_info->bdev_holder); if (ret) { free_fs_devices(fs_devices); return ERR_PTR(ret); diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 840a8df39907..8227ba4d64b8 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -599,8 +599,8 @@ struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, u64 type); void btrfs_mapping_tree_free(struct extent_map_tree *tree); int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, - fmode_t flags, void *holder); -struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags); + blk_mode_t flags, void *holder); +struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags); int btrfs_forget_devices(dev_t devt); void btrfs_close_devices(struct btrfs_fs_devices *fs_devices); void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices); diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 54dba967a2d4..3f080f0afc02 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -254,7 +254,7 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb, return PTR_ERR(fscache); dif->fscache = fscache; } else if (!sbi->devs->flatdev) { - bdev = blkdev_get_by_path(dif->path, FMODE_READ, sb->s_type, + bdev = blkdev_get_by_path(dif->path, BLK_OPEN_READ, sb->s_type, NULL); if (IS_ERR(bdev)) return PTR_ERR(bdev); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 92dd699139a3..94a7b56ed876 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1112,7 +1112,7 @@ static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb) { struct block_device *bdev; - bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_WRITE, sb, + bdev = blkdev_get_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_WRITE, sb, &ext4_holder_ops); if (IS_ERR(bdev)) goto fail; diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 5a764fecd1c7..e34197a70dc1 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -3993,7 +3993,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi) struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); unsigned int max_devices = MAX_DEVICES; unsigned int logical_blksize; - fmode_t mode = sb_open_mode(sbi->sb->s_flags); + blk_mode_t mode = sb_open_mode(sbi->sb->s_flags); int i; /* Initialize single device information */ diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index 82f70d46f4e5..e855b8fde76c 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c @@ -1100,7 +1100,7 @@ int lmLogOpen(struct super_block *sb) * file systems to log may have n-to-1 relationship; */ - bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ | FMODE_WRITE, + bdev = blkdev_get_by_dev(sbi->logdev, BLK_OPEN_READ | BLK_OPEN_WRITE, log, NULL); if (IS_ERR(bdev)) { rc = PTR_ERR(bdev); diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c index 9be7f958f60e..70f5563a8e81 100644 --- a/fs/nfs/blocklayout/dev.c +++ b/fs/nfs/blocklayout/dev.c @@ -243,7 +243,8 @@ bl_parse_simple(struct nfs_server *server, struct pnfs_block_dev *d, if (!dev) return -EIO; - bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_WRITE, NULL, NULL); + bdev = blkdev_get_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, + NULL); if (IS_ERR(bdev)) { printk(KERN_WARNING "pNFS: failed to open device %d:%d (%ld)\n", MAJOR(dev), MINOR(dev), PTR_ERR(bdev)); @@ -312,7 +313,7 @@ bl_open_path(struct pnfs_block_volume *v, const char *prefix) if (!devname) return ERR_PTR(-ENOMEM); - bdev = blkdev_get_by_path(devname, FMODE_READ | FMODE_WRITE, NULL, + bdev = blkdev_get_by_path(devname, BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL); if (IS_ERR(bdev)) { pr_warn("pNFS: failed to open device %s (%ld)\n", diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index c6ae9aee01ed..21472e3ed182 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -1786,7 +1786,8 @@ static ssize_t o2hb_region_dev_store(struct config_item *item, goto out2; reg->hr_bdev = blkdev_get_by_dev(f.file->f_mapping->host->i_rdev, - FMODE_WRITE | FMODE_READ, NULL, NULL); + BLK_OPEN_WRITE | BLK_OPEN_READ, NULL, + NULL); if (IS_ERR(reg->hr_bdev)) { ret = PTR_ERR(reg->hr_bdev); reg->hr_bdev = NULL; diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 905297ea5545..62beee3c62b6 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -2598,7 +2598,7 @@ static int journal_init_dev(struct super_block *super, struct reiserfs_journal *journal, const char *jdev_name) { - fmode_t blkdev_mode = FMODE_READ; + blk_mode_t blkdev_mode = BLK_OPEN_READ; void *holder = journal; int result; dev_t jdev; @@ -2610,7 +2610,7 @@ static int journal_init_dev(struct super_block *super, new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev; if (!bdev_read_only(super->s_bdev)) - blkdev_mode |= FMODE_WRITE; + blkdev_mode |= BLK_OPEN_WRITE; /* there is no "jdev" option and journal is on separate device */ if ((!jdev_name || !jdev_name[0])) { diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 3b7cf8268057..67ad1c937637 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -396,7 +396,7 @@ xfs_blkdev_get( { int error = 0; - *bdevp = blkdev_get_by_path(name, FMODE_READ | FMODE_WRITE, mp, + *bdevp = blkdev_get_by_path(name, BLK_OPEN_READ | BLK_OPEN_WRITE, mp, &xfs_holder_ops); if (IS_ERR(*bdevp)) { error = PTR_ERR(*bdevp); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6b65623e447c..824e31dd752a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -112,6 +112,19 @@ struct blk_integrity { unsigned char tag_size; }; +typedef unsigned int __bitwise blk_mode_t; + +/* open for reading */ +#define BLK_OPEN_READ ((__force blk_mode_t)(1 << 0)) +/* open for writing */ +#define BLK_OPEN_WRITE ((__force blk_mode_t)(1 << 1)) +/* open exclusively (vs other exclusive openers */ +#define BLK_OPEN_EXCL ((__force blk_mode_t)(1 << 2)) +/* opened with O_NDELAY */ +#define BLK_OPEN_NDELAY ((__force blk_mode_t)(1 << 3)) +/* open for "writes" only for ioctls (specialy hack for floppy.c) */ +#define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4)) + struct gendisk { /* * major/first_minor/minors should not be set by any new driver, the @@ -187,6 +200,7 @@ struct gendisk { struct badblocks *bb; struct lockdep_map lockdep_map; u64 diskseq; + blk_mode_t open_mode; /* * Independent sector access ranges. This is always NULL for @@ -1363,10 +1377,12 @@ struct block_device_operations { void (*submit_bio)(struct bio *bio); int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob, unsigned int flags); - int (*open)(struct gendisk *disk, fmode_t mode); + int (*open)(struct gendisk *disk, blk_mode_t mode); void (*release)(struct gendisk *disk); - int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); - int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); + int (*ioctl)(struct block_device *bdev, blk_mode_t mode, + unsigned cmd, unsigned long arg); + int (*compat_ioctl)(struct block_device *bdev, blk_mode_t mode, + unsigned cmd, unsigned long arg); unsigned int (*check_events) (struct gendisk *disk, unsigned int clearing); void (*unlock_native_capacity) (struct gendisk *); @@ -1393,7 +1409,7 @@ struct block_device_operations { }; #ifdef CONFIG_COMPAT -extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t, +extern int blkdev_compat_ptr_ioctl(struct block_device *, blk_mode_t, unsigned int, unsigned long); #else #define blkdev_compat_ptr_ioctl NULL @@ -1455,11 +1471,11 @@ struct blk_holder_ops { * as stored in sb->s_flags. */ #define sb_open_mode(flags) \ - (FMODE_READ | (((flags) & SB_RDONLY) ? 0 : FMODE_WRITE)) + (BLK_OPEN_READ | (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE)) -struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder, +struct block_device *blkdev_get_by_dev(dev_t dev, blk_mode_t mode, void *holder, const struct blk_holder_ops *hops); -struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, +struct block_device *blkdev_get_by_path(const char *path, blk_mode_t mode, void *holder, const struct blk_holder_ops *hops); int bd_prepare_to_claim(struct block_device *bdev, void *holder, const struct blk_holder_ops *hops); diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h index 3c253b29f4aa..98c6fd0b39b6 100644 --- a/include/linux/cdrom.h +++ b/include/linux/cdrom.h @@ -13,6 +13,7 @@ #include /* not really needed, later.. */ #include +#include #include #include @@ -101,7 +102,7 @@ int cdrom_read_tocentry(struct cdrom_device_info *cdi, struct cdrom_tocentry *entry); /* the general block_device operations structure: */ -int cdrom_open(struct cdrom_device_info *cdi, fmode_t mode); +int cdrom_open(struct cdrom_device_info *cdi, blk_mode_t mode); void cdrom_release(struct cdrom_device_info *cdi); int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev, unsigned int cmd, unsigned long arg); diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index c27b84002d83..69d0435c7ebb 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -166,7 +166,7 @@ void dm_error(const char *message); struct dm_dev { struct block_device *bdev; struct dax_device *dax_dev; - fmode_t mode; + blk_mode_t mode; char name[16]; }; @@ -174,7 +174,7 @@ struct dm_dev { * Constructors should call these functions to ensure destination devices * are opened/closed correctly. */ -int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, +int dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode, struct dm_dev **result); void dm_put_device(struct dm_target *ti, struct dm_dev *d); @@ -543,7 +543,7 @@ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); /* * First create an empty table. */ -int dm_table_create(struct dm_table **result, fmode_t mode, +int dm_table_create(struct dm_table **result, blk_mode_t mode, unsigned int num_targets, struct mapped_device *md); /* @@ -586,7 +586,7 @@ void dm_sync_table(struct mapped_device *md); * Queries */ sector_t dm_table_get_size(struct dm_table *t); -fmode_t dm_table_get_mode(struct dm_table *t); +blk_mode_t dm_table_get_mode(struct dm_table *t); struct mapped_device *dm_table_get_md(struct dm_table *t); const char *dm_table_device_name(struct dm_table *t); diff --git a/kernel/power/swap.c b/kernel/power/swap.c index cc9259307c94..f6ebcd00c410 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -356,8 +356,8 @@ static int swsusp_swap_check(void) return res; root_swap = res; - hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, FMODE_WRITE, - NULL, NULL); + hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, + BLK_OPEN_WRITE, NULL, NULL); if (IS_ERR(hib_resume_bdev)) return PTR_ERR(hib_resume_bdev); @@ -1521,7 +1521,7 @@ int swsusp_check(bool snapshot_test) void *holder = snapshot_test ? &swsusp_holder : NULL; int error; - hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, FMODE_READ, + hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, BLK_OPEN_READ, holder, NULL); if (!IS_ERR(hib_resume_bdev)) { set_blocksize(hib_resume_bdev, PAGE_SIZE); diff --git a/mm/swapfile.c b/mm/swapfile.c index 16554256be65..6bc83060df9a 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2770,7 +2770,7 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode) if (S_ISBLK(inode->i_mode)) { p->bdev = blkdev_get_by_dev(inode->i_rdev, - FMODE_READ | FMODE_WRITE, p, NULL); + BLK_OPEN_READ | BLK_OPEN_WRITE, p, NULL); if (IS_ERR(p->bdev)) { error = PTR_ERR(p->bdev); p->bdev = NULL; -- cgit v1.2.3 From 4f572f0074b8be8a70bd150d96a749aa94c8d85f Mon Sep 17 00:00:00 2001 From: Haibo Li Date: Fri, 26 May 2023 10:21:25 +0800 Subject: mm/gup_test: fix ioctl fail for compat task When tools/testing/selftests/mm/gup_test.c is compiled as 32bit, then run on arm64 kernel, it reports "ioctl: Inappropriate ioctl for device". Fix it by filling compat_ioctl in gup_test_fops Link: https://lkml.kernel.org/r/20230526022125.175728-1-haibo.li@mediatek.com Signed-off-by: Haibo Li Acked-by: David Hildenbrand Cc: AngeloGioacchino Del Regno Cc: Matthias Brugger Cc: John Hubbard Cc: Signed-off-by: Andrew Morton --- mm/gup_test.c | 1 + 1 file changed, 1 insertion(+) (limited to 'mm') diff --git a/mm/gup_test.c b/mm/gup_test.c index 8ae7307a1bb6..c0421b786dcd 100644 --- a/mm/gup_test.c +++ b/mm/gup_test.c @@ -381,6 +381,7 @@ static int gup_test_release(struct inode *inode, struct file *file) static const struct file_operations gup_test_fops = { .open = nonseekable_open, .unlocked_ioctl = gup_test_ioctl, + .compat_ioctl = compat_ptr_ioctl, .release = gup_test_release, }; -- cgit v1.2.3 From 5ff6e2fff88ef9bf110c5e85a48e7b557bfc64c1 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Sat, 27 May 2023 11:21:01 +0800 Subject: mm/damon/core: fix divide error in damon_nr_accesses_to_accesses_bp() If 'aggr_interval' is smaller than 'sample_interval', max_nr_accesses in damon_nr_accesses_to_accesses_bp() becomes zero which leads to divide error, let's validate the values of them in damon_set_attrs() to fix it, which similar to others attrs check. Link: https://lkml.kernel.org/r/20230527032101.167788-1-wangkefeng.wang@huawei.com Fixes: 2f5bef5a590b ("mm/damon/core: update monitoring results for new monitoring attributes") Reported-by: syzbot+841a46899768ec7bec67@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=841a46899768ec7bec67 Link: https://lore.kernel.org/damon/00000000000055fc4e05fc975bc2@google.com/ Reviewed-by: SeongJae Park Signed-off-by: Kefeng Wang Cc: Signed-off-by: Andrew Morton --- mm/damon/core.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'mm') diff --git a/mm/damon/core.c b/mm/damon/core.c index d9ef62047bf5..91cff7f2997e 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -551,6 +551,8 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) return -EINVAL; if (attrs->min_nr_regions > attrs->max_nr_regions) return -EINVAL; + if (attrs->sample_interval > attrs->aggr_interval) + return -EINVAL; damon_update_monitoring_results(ctx, attrs); ctx->attrs = *attrs; -- cgit v1.2.3 From 9425c591e06a9ab27a145ba655fb50532cf0bcc9 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Fri, 2 Jun 2023 15:57:47 -0700 Subject: page cache: fix page_cache_next/prev_miss off by one Ackerley Tng reported an issue with hugetlbfs fallocate here[1]. The issue showed up after the conversion of hugetlb page cache lookup code to use page_cache_next_miss. Code in hugetlb fallocate, userfaultfd and GUP is now using page_cache_next_miss to determine if a page is present the page cache. The following statement is used. present = page_cache_next_miss(mapping, index, 1) != index; There are two issues with page_cache_next_miss when used in this way. 1) If the passed value for index is equal to the 'wrap-around' value, the same index will always be returned. This wrap-around value is 0, so 0 will be returned even if page is present at index 0. 2) If there is no gap in the range passed, the last index in the range will be returned. When passed a range of 1 as above, the passed index value will be returned even if the page is present. The end result is the statement above will NEVER indicate a page is present in the cache, even if it is. As noted by Ackerley in [1], users can see this by hugetlb fallocate incorrectly returning EEXIST if pages are already present in the file. In addition, hugetlb pages will not be included in core dumps if they need to be brought in via GUP. userfaultfd UFFDIO_COPY also uses this code and will not notice pages already present in the cache. It may try to allocate a new page and potentially return ENOMEM as opposed to EEXIST. Both page_cache_next_miss and page_cache_prev_miss have similar issues. Fix by: - Check for index equal to 'wrap-around' value and do not exit early. - If no gap is found in range, return index outside range. - Update function description to say 'wrap-around' value could be returned if passed as index. [1] https://lore.kernel.org/linux-mm/cover.1683069252.git.ackerleytng@google.com/ Link: https://lkml.kernel.org/r/20230602225747.103865-2-mike.kravetz@oracle.com Fixes: d0ce0e47b323 ("mm/hugetlb: convert hugetlb fault paths to use alloc_hugetlb_folio()") Signed-off-by: Mike Kravetz Reported-by: Ackerley Tng Reviewed-by: Ackerley Tng Tested-by: Ackerley Tng Cc: Erdem Aktas Cc: Matthew Wilcox (Oracle) Cc: Mike Kravetz Cc: Muchun Song Cc: Sidhartha Kumar Cc: Vishal Annapurve Cc: Greg Kroah-Hartman Signed-off-by: Andrew Morton --- mm/filemap.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index b4c9bd368b7e..83dda76d1fc3 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1760,7 +1760,9 @@ bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm, * * Return: The index of the gap if found, otherwise an index outside the * range specified (in which case 'return - index >= max_scan' will be true). - * In the rare case of index wrap-around, 0 will be returned. + * In the rare case of index wrap-around, 0 will be returned. 0 will also + * be returned if index == 0 and there is a gap at the index. We can not + * wrap-around if passed index == 0. */ pgoff_t page_cache_next_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan) @@ -1770,12 +1772,13 @@ pgoff_t page_cache_next_miss(struct address_space *mapping, while (max_scan--) { void *entry = xas_next(&xas); if (!entry || xa_is_value(entry)) - break; - if (xas.xa_index == 0) - break; + return xas.xa_index; + if (xas.xa_index == 0 && index != 0) + return xas.xa_index; } - return xas.xa_index; + /* No gaps in range and no wrap-around, return index beyond range */ + return xas.xa_index + 1; } EXPORT_SYMBOL(page_cache_next_miss); @@ -1796,7 +1799,9 @@ EXPORT_SYMBOL(page_cache_next_miss); * * Return: The index of the gap if found, otherwise an index outside the * range specified (in which case 'index - return >= max_scan' will be true). - * In the rare case of wrap-around, ULONG_MAX will be returned. + * In the rare case of wrap-around, ULONG_MAX will be returned. ULONG_MAX + * will also be returned if index == ULONG_MAX and there is a gap at the + * index. We can not wrap-around if passed index == ULONG_MAX. */ pgoff_t page_cache_prev_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan) @@ -1806,12 +1811,13 @@ pgoff_t page_cache_prev_miss(struct address_space *mapping, while (max_scan--) { void *entry = xas_prev(&xas); if (!entry || xa_is_value(entry)) - break; - if (xas.xa_index == ULONG_MAX) - break; + return xas.xa_index; + if (xas.xa_index == ULONG_MAX && index != ULONG_MAX) + return xas.xa_index; } - return xas.xa_index; + /* No gaps in range and no wrap-around, return index beyond range */ + return xas.xa_index - 1; } EXPORT_SYMBOL(page_cache_prev_miss); -- cgit v1.2.3 From 0bdf0efa180a9cb1361cbded4e2260a49306ac89 Mon Sep 17 00:00:00 2001 From: Nhat Pham Date: Tue, 30 May 2023 15:24:40 -0700 Subject: zswap: do not shrink if cgroup may not zswap Before storing a page, zswap first checks if the number of stored pages exceeds the limit specified by memory.zswap.max, for each cgroup in the hierarchy. If this limit is reached or exceeded, then zswap shrinking is triggered and short-circuits the store attempt. However, since the zswap's LRU is not memcg-aware, this can create the following pathological behavior: the cgroup whose zswap limit is 0 will evict pages from other cgroups continually, without lowering its own zswap usage. This means the shrinking will continue until the need for swap ceases or the pool becomes empty. As a result of this, we observe a disproportionate amount of zswap writeback and a perpetually small zswap pool in our experiments, even though the pool limit is never hit. More generally, a cgroup might unnecessarily evict pages from other cgroups before we drive the memcg back below its limit. This patch fixes the issue by rejecting zswap store attempt without shrinking the pool when obj_cgroup_may_zswap() returns false. [akpm@linux-foundation.org: fix return of unintialized value] [akpm@linux-foundation.org: s/ENOSPC/ENOMEM/] Link: https://lkml.kernel.org/r/20230530222440.2777700-1-nphamcs@gmail.com Link: https://lkml.kernel.org/r/20230530232435.3097106-1-nphamcs@gmail.com Fixes: f4840ccfca25 ("zswap: memcg accounting") Signed-off-by: Nhat Pham Cc: Dan Streetman Cc: Domenico Cerasuolo Cc: Johannes Weiner Cc: Seth Jennings Cc: Vitaly Wool Cc: Yosry Ahmed Cc: Signed-off-by: Andrew Morton --- mm/zswap.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/zswap.c b/mm/zswap.c index 59da2a415fbb..30092d9a3b23 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -1174,9 +1174,16 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, goto reject; } + /* + * XXX: zswap reclaim does not work with cgroups yet. Without a + * cgroup-aware entry LRU, we will push out entries system-wide based on + * local cgroup limits. + */ objcg = get_obj_cgroup_from_page(page); - if (objcg && !obj_cgroup_may_zswap(objcg)) - goto shrink; + if (objcg && !obj_cgroup_may_zswap(objcg)) { + ret = -ENOMEM; + goto reject; + } /* reclaim space if needed */ if (zswap_is_full()) { -- cgit v1.2.3 From d5bf485746991ce994994e5935b1cf223678f112 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 13 Jun 2023 12:28:21 +0200 Subject: mm/slab_common: use SLAB_NO_MERGE instead of negative refcount When CONFIG_MEMCG_KMEM is enabled, we disable cache merging for KMALLOC_NORMAL caches so they don't end up merged with a cache that uses ad-hoc __GFP_ACCOUNT when allocating. This was implemented by setting the refcount to -1, but now we have a proper SLAB_NO_MERGE flag, so use that instead. Signed-off-by: Vlastimil Babka Acked-by: David Rientjes --- mm/slab_common.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/slab_common.c b/mm/slab_common.c index 0e0a617eae7d..67caf0bc3194 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -878,17 +878,17 @@ new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags) flags |= SLAB_CACHE_DMA; } - kmalloc_caches[type][idx] = create_kmalloc_cache( - kmalloc_info[idx].name[type], - kmalloc_info[idx].size, flags, 0, - kmalloc_info[idx].size); - /* * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for * KMALLOC_NORMAL caches. */ if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_NORMAL)) - kmalloc_caches[type][idx]->refcount = -1; + flags |= SLAB_NO_MERGE; + + kmalloc_caches[type][idx] = create_kmalloc_cache( + kmalloc_info[idx].name[type], + kmalloc_info[idx].size, flags, 0, + kmalloc_info[idx].size); } /* -- cgit v1.2.3 From 7a7f094635349a7d0314364ad50bdeb770b6df4f Mon Sep 17 00:00:00 2001 From: Arjun Roy Date: Fri, 16 Jun 2023 12:34:27 -0700 Subject: tcp: Use per-vma locking for receive zerocopy Per-VMA locking allows us to lock a struct vm_area_struct without taking the process-wide mmap lock in read mode. Consider a process workload where the mmap lock is taken constantly in write mode. In this scenario, all zerocopy receives are periodically blocked during that period of time - though in principle, the memory ranges being used by TCP are not touched by the operations that need the mmap write lock. This results in performance degradation. Now consider another workload where the mmap lock is never taken in write mode, but there are many TCP connections using receive zerocopy that are concurrently receiving. These connections all take the mmap lock in read mode, but this does induce a lot of contention and atomic ops for this process-wide lock. This results in additional CPU overhead caused by contending on the cache line for this lock. However, with per-vma locking, both of these problems can be avoided. As a test, I ran an RPC-style request/response workload with 4KB payloads and receive zerocopy enabled, with 100 simultaneous TCP connections. I measured perf cycles within the find_tcp_vma/mmap_read_lock/mmap_read_unlock codepath, with and without per-vma locking enabled. When using process-wide mmap semaphore read locking, about 1% of measured perf cycles were within this path. With per-VMA locking, this value dropped to about 0.45%. Signed-off-by: Arjun Roy Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- MAINTAINERS | 1 + include/linux/net_mm.h | 17 +++++++++++++++++ include/net/tcp.h | 1 + mm/memory.c | 7 ++++--- net/ipv4/tcp.c | 45 +++++++++++++++++++++++++++++++++++++-------- 5 files changed, 60 insertions(+), 11 deletions(-) create mode 100644 include/linux/net_mm.h (limited to 'mm') diff --git a/MAINTAINERS b/MAINTAINERS index 7322963b0670..cb14589d14ab 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14743,6 +14743,7 @@ NETWORKING [TCP] M: Eric Dumazet L: netdev@vger.kernel.org S: Maintained +F: include/linux/net_mm.h F: include/linux/tcp.h F: include/net/tcp.h F: include/trace/events/tcp.h diff --git a/include/linux/net_mm.h b/include/linux/net_mm.h new file mode 100644 index 000000000000..b298998bd5a0 --- /dev/null +++ b/include/linux/net_mm.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifdef CONFIG_MMU + +#ifdef CONFIG_INET +extern const struct vm_operations_struct tcp_vm_ops; +static inline bool vma_is_tcp(const struct vm_area_struct *vma) +{ + return vma->vm_ops == &tcp_vm_ops; +} +#else +static inline bool vma_is_tcp(const struct vm_area_struct *vma) +{ + return false; +} +#endif /* CONFIG_INET*/ + +#endif /* CONFIG_MMU */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 9c08eab647a2..31b534370787 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -45,6 +45,7 @@ #include #include #include +#include extern struct inet_hashinfo tcp_hashinfo; diff --git a/mm/memory.c b/mm/memory.c index f69fbc251198..3e46b4d881dc 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -77,6 +77,7 @@ #include #include #include +#include #include @@ -5280,12 +5281,12 @@ retry: if (!vma) goto inval; - /* Only anonymous vmas are supported for now */ - if (!vma_is_anonymous(vma)) + /* Only anonymous and tcp vmas are supported for now */ + if (!vma_is_anonymous(vma) && !vma_is_tcp(vma)) goto inval; /* find_mergeable_anon_vma uses adjacent vmas which are not locked */ - if (!vma->anon_vma) + if (!vma->anon_vma && !vma_is_tcp(vma)) goto inval; if (!vma_start_read(vma)) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 0e21ea92dc1d..71b42eef9dbf 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1774,7 +1774,7 @@ void tcp_update_recv_tstamps(struct sk_buff *skb, } #ifdef CONFIG_MMU -static const struct vm_operations_struct tcp_vm_ops = { +const struct vm_operations_struct tcp_vm_ops = { }; int tcp_mmap(struct file *file, struct socket *sock, @@ -2073,6 +2073,34 @@ static void tcp_zc_finalize_rx_tstamp(struct sock *sk, } } +static struct vm_area_struct *find_tcp_vma(struct mm_struct *mm, + unsigned long address, + bool *mmap_locked) +{ + struct vm_area_struct *vma = NULL; + +#ifdef CONFIG_PER_VMA_LOCK + vma = lock_vma_under_rcu(mm, address); +#endif + if (vma) { + if (!vma_is_tcp(vma)) { + vma_end_read(vma); + return NULL; + } + *mmap_locked = false; + return vma; + } + + mmap_read_lock(mm); + vma = vma_lookup(mm, address); + if (!vma || !vma_is_tcp(vma)) { + mmap_read_unlock(mm); + return NULL; + } + *mmap_locked = true; + return vma; +} + #define TCP_ZEROCOPY_PAGE_BATCH_SIZE 32 static int tcp_zerocopy_receive(struct sock *sk, struct tcp_zerocopy_receive *zc, @@ -2090,6 +2118,7 @@ static int tcp_zerocopy_receive(struct sock *sk, u32 seq = tp->copied_seq; u32 total_bytes_to_map; int inq = tcp_inq(sk); + bool mmap_locked; int ret; zc->copybuf_len = 0; @@ -2114,13 +2143,10 @@ static int tcp_zerocopy_receive(struct sock *sk, return 0; } - mmap_read_lock(current->mm); - - vma = vma_lookup(current->mm, address); - if (!vma || vma->vm_ops != &tcp_vm_ops) { - mmap_read_unlock(current->mm); + vma = find_tcp_vma(current->mm, address, &mmap_locked); + if (!vma) return -EINVAL; - } + vma_len = min_t(unsigned long, zc->length, vma->vm_end - address); avail_len = min_t(u32, vma_len, inq); total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1); @@ -2194,7 +2220,10 @@ static int tcp_zerocopy_receive(struct sock *sk, zc, total_bytes_to_map); } out: - mmap_read_unlock(current->mm); + if (mmap_locked) + mmap_read_unlock(current->mm); + else + vma_end_read(vma); /* Try to copy straggler data. */ if (!ret) copylen = tcp_zc_handle_leftover(zc, sk, skb, &seq, copybuf_len, tss); -- cgit v1.2.3 From 606c812eb1d5b5fb0dd9e330ca94b52d7c227830 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Sat, 17 Jun 2023 20:47:08 -0400 Subject: mm/mmap: Fix error path in do_vmi_align_munmap() The error unrolling was leaving the VMAs detached in many cases and leaving the locked_vm statistic altered, and skipping the unrolling entirely in the case of the vma tree write failing. Fix the error path by re-attaching the detached VMAs and adding the necessary goto for the failed vma tree write, and fix the locked_vm statistic by only updating after the vma tree write succeeds. Fixes: 763ecb035029 ("mm: remove the vma linked list") Reported-by: Vegard Nossum Signed-off-by: Liam R. Howlett Signed-off-by: Linus Torvalds --- mm/mmap.c | 37 +++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 20 deletions(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index 13678edaa22c..d600404580b2 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2318,21 +2318,6 @@ int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, return __split_vma(vmi, vma, addr, new_below); } -static inline int munmap_sidetree(struct vm_area_struct *vma, - struct ma_state *mas_detach) -{ - vma_start_write(vma); - mas_set_range(mas_detach, vma->vm_start, vma->vm_end - 1); - if (mas_store_gfp(mas_detach, vma, GFP_KERNEL)) - return -ENOMEM; - - vma_mark_detached(vma, true); - if (vma->vm_flags & VM_LOCKED) - vma->vm_mm->locked_vm -= vma_pages(vma); - - return 0; -} - /* * do_vmi_align_munmap() - munmap the aligned region from @start to @end. * @vmi: The vma iterator @@ -2354,6 +2339,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, struct maple_tree mt_detach; int count = 0; int error = -ENOMEM; + unsigned long locked_vm = 0; MA_STATE(mas_detach, &mt_detach, 0, 0); mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); mt_set_external_lock(&mt_detach, &mm->mmap_lock); @@ -2399,9 +2385,13 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, if (error) goto end_split_failed; } - error = munmap_sidetree(next, &mas_detach); - if (error) - goto munmap_sidetree_failed; + vma_start_write(next); + mas_set_range(&mas_detach, next->vm_start, next->vm_end - 1); + if (mas_store_gfp(&mas_detach, next, GFP_KERNEL)) + goto munmap_gather_failed; + vma_mark_detached(next, true); + if (next->vm_flags & VM_LOCKED) + locked_vm += vma_pages(next); count++; #ifdef CONFIG_DEBUG_VM_MAPLE_TREE @@ -2447,10 +2437,12 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, } #endif /* Point of no return */ + error = -ENOMEM; vma_iter_set(vmi, start); if (vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL)) - return -ENOMEM; + goto clear_tree_failed; + mm->locked_vm -= locked_vm; mm->map_count -= count; /* * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or @@ -2480,9 +2472,14 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, validate_mm(mm); return downgrade ? 1 : 0; +clear_tree_failed: userfaultfd_error: -munmap_sidetree_failed: +munmap_gather_failed: end_split_failed: + mas_set(&mas_detach, 0); + mas_for_each(&mas_detach, next, end) + vma_mark_detached(next, false); + __mt_destroy(&mt_detach); start_split_failed: map_count_exceeded: -- cgit v1.2.3 From 77795f900e2a07c1cbedc375789aefb43843b6c2 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 6 Jun 2023 14:29:12 -0400 Subject: mm/mprotect: fix do_mprotect_pkey() limit check The return of do_mprotect_pkey() can still be incorrectly returned as success if there is a gap that spans to or beyond the end address passed in. Update the check to ensure that the end address has indeed been seen. Link: https://lore.kernel.org/all/CABi2SkXjN+5iFoBhxk71t3cmunTk-s=rB4T7qo0UQRh17s49PQ@mail.gmail.com/ Link: https://lkml.kernel.org/r/20230606182912.586576-1-Liam.Howlett@oracle.com Fixes: 82f951340f25 ("mm/mprotect: fix do_mprotect_pkey() return on error") Signed-off-by: Liam R. Howlett Reported-by: Jeff Xu Reviewed-by: Lorenzo Stoakes Acked-by: David Hildenbrand Acked-by: Vlastimil Babka Cc: Signed-off-by: Andrew Morton --- mm/mprotect.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/mprotect.c b/mm/mprotect.c index 92d3d3ca390a..c59e7561698c 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -867,7 +867,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len, } tlb_finish_mmu(&tlb); - if (!error && vma_iter_end(&vmi) < end) + if (!error && tmp < end) error = -ENOMEM; out: -- cgit v1.2.3 From 95a301eefa82057571207edd06ea36218985a75e Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Mon, 5 Jun 2023 21:11:07 +0100 Subject: mm/vmalloc: do not output a spurious warning when huge vmalloc() fails In __vmalloc_area_node() we always warn_alloc() when an allocation performed by vm_area_alloc_pages() fails unless it was due to a pending fatal signal. However, huge page allocations instigated either by vmalloc_huge() or __vmalloc_node_range() (or a caller that invokes this like kvmalloc() or kvmalloc_node()) always falls back to order-0 allocations if the huge page allocation fails. This renders the warning useless and noisy, especially as all callers appear to be aware that this may fallback. This has already resulted in at least one bug report from a user who was confused by this (see link). Therefore, simply update the code to only output this warning for order-0 pages when no fatal signal is pending. Link: https://bugzilla.suse.com/show_bug.cgi?id=1211410 Link: https://lkml.kernel.org/r/20230605201107.83298-1-lstoakes@gmail.com Fixes: 80b1d8fdfad1 ("mm: vmalloc: correct use of __GFP_NOWARN mask in __vmalloc_area_node()") Signed-off-by: Lorenzo Stoakes Acked-by: Vlastimil Babka Reviewed-by: Baoquan He Acked-by: Michal Hocko Reviewed-by: Uladzislau Rezki (Sony) Reviewed-by: David Hildenbrand Cc: Christoph Hellwig Cc: Signed-off-by: Andrew Morton --- mm/vmalloc.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 9683573f1225..1d13d71687d7 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3098,11 +3098,20 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, * allocation request, free them via vfree() if any. */ if (area->nr_pages != nr_small_pages) { - /* vm_area_alloc_pages() can also fail due to a fatal signal */ - if (!fatal_signal_pending(current)) + /* + * vm_area_alloc_pages() can fail due to insufficient memory but + * also:- + * + * - a pending fatal signal + * - insufficient huge page-order pages + * + * Since we always retry allocations at order-0 in the huge page + * case a warning for either is spurious. + */ + if (!fatal_signal_pending(current) && page_order == 0) warn_alloc(gfp_mask, NULL, - "vmalloc error: size %lu, page order %u, failed to allocate pages", - area->nr_pages * PAGE_SIZE, page_order); + "vmalloc error: size %lu, failed to allocate pages", + area->nr_pages * PAGE_SIZE); goto fail; } -- cgit v1.2.3 From 935d44acf621aa0688fef8312dec3e5940f38f4e Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Wed, 7 Jun 2023 15:24:27 +0200 Subject: memfd: check for non-NULL file_seals in memfd_create() syscall Ensure that file_seals is non-NULL before using it in the memfd_create() syscall. One situation in which memfd_file_seals_ptr() could return a NULL pointer when CONFIG_SHMEM=n, oopsing the kernel. Link: https://lkml.kernel.org/r/20230607132427.2867435-1-roberto.sassu@huaweicloud.com Fixes: 47b9012ecdc7 ("shmem: add sealing support to hugetlb-backed memfd") Signed-off-by: Roberto Sassu Cc: Marc-Andr Lureau Cc: Mike Kravetz Cc: Signed-off-by: Andrew Morton --- mm/memfd.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/memfd.c b/mm/memfd.c index 69b90c31d38c..e763e76f1106 100644 --- a/mm/memfd.c +++ b/mm/memfd.c @@ -371,12 +371,15 @@ SYSCALL_DEFINE2(memfd_create, inode->i_mode &= ~0111; file_seals = memfd_file_seals_ptr(file); - *file_seals &= ~F_SEAL_SEAL; - *file_seals |= F_SEAL_EXEC; + if (file_seals) { + *file_seals &= ~F_SEAL_SEAL; + *file_seals |= F_SEAL_EXEC; + } } else if (flags & MFD_ALLOW_SEALING) { /* MFD_EXEC and MFD_ALLOW_SEALING are set */ file_seals = memfd_file_seals_ptr(file); - *file_seals &= ~F_SEAL_SEAL; + if (file_seals) + *file_seals &= ~F_SEAL_SEAL; } fd_install(fd, file); -- cgit v1.2.3 From c8a8f3b4a95ace7683b615ad9c9aa0eac59013ae Mon Sep 17 00:00:00 2001 From: David Stevens Date: Wed, 7 Jun 2023 14:31:35 +0900 Subject: mm/khugepaged: fix iteration in collapse_file Remove an unnecessary call to xas_set(index) when iterating over the target range in collapse_file. The extra call to xas_set reset the xas cursor to the top of the tree, causing the xas_next call on the next iteration to walk the tree to index instead of advancing to index+1. This returned the same page again, which would cause collapse_file to fail because the page is already locked. This bug was hidden when CONFIG_DEBUG_VM was set. When that config was used, the xas_load in a subsequent VM_BUG_ON assert would walk xas from the top of the tree to index, causing the xas_next call on the next loop iteration to advance the cursor as expected. Link: https://lkml.kernel.org/r/20230607053135.2087354-1-stevensd@google.com Fixes: a2e17cc2efc7 ("mm/khugepaged: maintain page cache uptodate flag") Signed-off-by: David Stevens Reviewed-by: Peter Xu Cc: David Hildenbrand Cc: Hugh Dickins Cc: Jiaqi Yan Cc: Kirill A . Shutemov Cc: Matthew Wilcox Cc: Yang Shi Cc: Mike Kravetz Signed-off-by: Andrew Morton --- mm/khugepaged.c | 1 - 1 file changed, 1 deletion(-) (limited to 'mm') diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 6b9d39d65b73..2d0d58fb4e7f 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -2070,7 +2070,6 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH); xas_lock_irq(&xas); - xas_set(&xas, index); VM_BUG_ON_PAGE(page != xas_load(&xas), page); -- cgit v1.2.3 From 47a7c01c3efc6581f5dcca40928baeb38e1e40c2 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Fri, 9 Jun 2023 08:15:12 +0000 Subject: Revert "mm: shrinkers: convert shrinker_rwsem to mutex" Patch series "revert shrinker_srcu related changes". This patch (of 7): This reverts commit cf2e309ebca7bb0916771839f9b580b06c778530. Kernel test robot reports -88.8% regression in stress-ng.ramfs.ops_per_sec test case [1], which is caused by commit f95bdb700bc6 ("mm: vmscan: make global slab shrink lockless"). The root cause is that SRCU has to be careful to not frequently check for SRCU read-side critical section exits. Therefore, even if no one is currently in the SRCU read-side critical section, synchronize_srcu() cannot return quickly. That's why unregister_shrinker() has become slower. After discussion, we will try to use the refcount+RCU method [2] proposed by Dave Chinner to continue to re-implement the lockless slab shrink. So revert the shrinker_mutex back to shrinker_rwsem first. [1]. https://lore.kernel.org/lkml/202305230837.db2c233f-yujie.liu@intel.com/ [2]. https://lore.kernel.org/lkml/ZIJhou1d55d4H1s0@dread.disaster.area/ Link: https://lkml.kernel.org/r/20230609081518.3039120-1-qi.zheng@linux.dev Link: https://lkml.kernel.org/r/20230609081518.3039120-2-qi.zheng@linux.dev Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-lkp/202305230837.db2c233f-yujie.liu@intel.com Signed-off-by: Qi Zheng Cc: Dave Chinner Cc: Kirill Tkhai Cc: Muchun Song Cc: Roman Gushchin Cc: Vlastimil Babka Cc: Yujie Liu Signed-off-by: Andrew Morton --- drivers/md/dm-cache-metadata.c | 2 +- drivers/md/dm-thin-metadata.c | 2 +- fs/super.c | 2 +- mm/shrinker_debug.c | 14 +++++++------- mm/vmscan.c | 34 +++++++++++++++++----------------- 5 files changed, 27 insertions(+), 27 deletions(-) (limited to 'mm') diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 9e0c69958587..acffed750e3e 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -1828,7 +1828,7 @@ int dm_cache_metadata_abort(struct dm_cache_metadata *cmd) * Replacement block manager (new_bm) is created and old_bm destroyed outside of * cmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of * shrinker associated with the block manager's bufio client vs cmd root_lock). - * - must take shrinker_mutex without holding cmd->root_lock + * - must take shrinker_rwsem without holding cmd->root_lock */ new_bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT, CACHE_MAX_CONCURRENT_LOCKS); diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 9f5cb52c5763..fd464fb024c3 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -1887,7 +1887,7 @@ int dm_pool_abort_metadata(struct dm_pool_metadata *pmd) * Replacement block manager (new_bm) is created and old_bm destroyed outside of * pmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of * shrinker associated with the block manager's bufio client vs pmd root_lock). - * - must take shrinker_mutex without holding pmd->root_lock + * - must take shrinker_rwsem without holding pmd->root_lock */ new_bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT, THIN_MAX_CONCURRENT_LOCKS); diff --git a/fs/super.c b/fs/super.c index 34afe411cf2b..04bc62ab7dfe 100644 --- a/fs/super.c +++ b/fs/super.c @@ -54,7 +54,7 @@ static char *sb_writers_name[SB_FREEZE_LEVELS] = { * One thing we have to be careful of with a per-sb shrinker is that we don't * drop the last active reference to the superblock from within the shrinker. * If that happens we could trigger unregistering the shrinker from within the - * shrinker path and that leads to deadlock on the shrinker_mutex. Hence we + * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we * take a passive reference to the superblock to avoid this from occurring. */ static unsigned long super_cache_scan(struct shrinker *shrink, diff --git a/mm/shrinker_debug.c b/mm/shrinker_debug.c index fe10436d9911..2be15b8a6d0b 100644 --- a/mm/shrinker_debug.c +++ b/mm/shrinker_debug.c @@ -8,7 +8,7 @@ #include /* defined in vmscan.c */ -extern struct mutex shrinker_mutex; +extern struct rw_semaphore shrinker_rwsem; extern struct list_head shrinker_list; extern struct srcu_struct shrinker_srcu; @@ -168,7 +168,7 @@ int shrinker_debugfs_add(struct shrinker *shrinker) char buf[128]; int id; - lockdep_assert_held(&shrinker_mutex); + lockdep_assert_held(&shrinker_rwsem); /* debugfs isn't initialized yet, add debugfs entries later. */ if (!shrinker_debugfs_root) @@ -211,7 +211,7 @@ int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...) if (!new) return -ENOMEM; - mutex_lock(&shrinker_mutex); + down_write(&shrinker_rwsem); old = shrinker->name; shrinker->name = new; @@ -229,7 +229,7 @@ int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...) shrinker->debugfs_entry = entry; } - mutex_unlock(&shrinker_mutex); + up_write(&shrinker_rwsem); kfree_const(old); @@ -242,7 +242,7 @@ struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker, { struct dentry *entry = shrinker->debugfs_entry; - lockdep_assert_held(&shrinker_mutex); + lockdep_assert_held(&shrinker_rwsem); kfree_const(shrinker->name); shrinker->name = NULL; @@ -271,14 +271,14 @@ static int __init shrinker_debugfs_init(void) shrinker_debugfs_root = dentry; /* Create debugfs entries for shrinkers registered at boot */ - mutex_lock(&shrinker_mutex); + down_write(&shrinker_rwsem); list_for_each_entry(shrinker, &shrinker_list, list) if (!shrinker->debugfs_entry) { ret = shrinker_debugfs_add(shrinker); if (ret) break; } - mutex_unlock(&shrinker_mutex); + up_write(&shrinker_rwsem); return ret; } diff --git a/mm/vmscan.c b/mm/vmscan.c index 6d0cd2840cf0..4730dba253c8 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -35,7 +35,7 @@ #include #include #include -#include +#include #include #include #include @@ -190,7 +190,7 @@ struct scan_control { int vm_swappiness = 60; LIST_HEAD(shrinker_list); -DEFINE_MUTEX(shrinker_mutex); +DECLARE_RWSEM(shrinker_rwsem); DEFINE_SRCU(shrinker_srcu); static atomic_t shrinker_srcu_generation = ATOMIC_INIT(0); @@ -213,7 +213,7 @@ static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg, { return srcu_dereference_check(memcg->nodeinfo[nid]->shrinker_info, &shrinker_srcu, - lockdep_is_held(&shrinker_mutex)); + lockdep_is_held(&shrinker_rwsem)); } static struct shrinker_info *shrinker_info_srcu(struct mem_cgroup *memcg, @@ -292,7 +292,7 @@ int alloc_shrinker_info(struct mem_cgroup *memcg) int nid, size, ret = 0; int map_size, defer_size = 0; - mutex_lock(&shrinker_mutex); + down_write(&shrinker_rwsem); map_size = shrinker_map_size(shrinker_nr_max); defer_size = shrinker_defer_size(shrinker_nr_max); size = map_size + defer_size; @@ -308,7 +308,7 @@ int alloc_shrinker_info(struct mem_cgroup *memcg) info->map_nr_max = shrinker_nr_max; rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info); } - mutex_unlock(&shrinker_mutex); + up_write(&shrinker_rwsem); return ret; } @@ -324,7 +324,7 @@ static int expand_shrinker_info(int new_id) if (!root_mem_cgroup) goto out; - lockdep_assert_held(&shrinker_mutex); + lockdep_assert_held(&shrinker_rwsem); map_size = shrinker_map_size(new_nr_max); defer_size = shrinker_defer_size(new_nr_max); @@ -374,7 +374,7 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker) if (mem_cgroup_disabled()) return -ENOSYS; - mutex_lock(&shrinker_mutex); + down_write(&shrinker_rwsem); id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL); if (id < 0) goto unlock; @@ -388,7 +388,7 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker) shrinker->id = id; ret = 0; unlock: - mutex_unlock(&shrinker_mutex); + up_write(&shrinker_rwsem); return ret; } @@ -398,7 +398,7 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker) BUG_ON(id < 0); - lockdep_assert_held(&shrinker_mutex); + lockdep_assert_held(&shrinker_rwsem); idr_remove(&shrinker_idr, id); } @@ -433,7 +433,7 @@ void reparent_shrinker_deferred(struct mem_cgroup *memcg) parent = root_mem_cgroup; /* Prevent from concurrent shrinker_info expand */ - mutex_lock(&shrinker_mutex); + down_write(&shrinker_rwsem); for_each_node(nid) { child_info = shrinker_info_protected(memcg, nid); parent_info = shrinker_info_protected(parent, nid); @@ -442,7 +442,7 @@ void reparent_shrinker_deferred(struct mem_cgroup *memcg) atomic_long_add(nr, &parent_info->nr_deferred[i]); } } - mutex_unlock(&shrinker_mutex); + up_write(&shrinker_rwsem); } static bool cgroup_reclaim(struct scan_control *sc) @@ -743,9 +743,9 @@ void free_prealloced_shrinker(struct shrinker *shrinker) shrinker->name = NULL; #endif if (shrinker->flags & SHRINKER_MEMCG_AWARE) { - mutex_lock(&shrinker_mutex); + down_write(&shrinker_rwsem); unregister_memcg_shrinker(shrinker); - mutex_unlock(&shrinker_mutex); + up_write(&shrinker_rwsem); return; } @@ -755,11 +755,11 @@ void free_prealloced_shrinker(struct shrinker *shrinker) void register_shrinker_prepared(struct shrinker *shrinker) { - mutex_lock(&shrinker_mutex); + down_write(&shrinker_rwsem); list_add_tail_rcu(&shrinker->list, &shrinker_list); shrinker->flags |= SHRINKER_REGISTERED; shrinker_debugfs_add(shrinker); - mutex_unlock(&shrinker_mutex); + up_write(&shrinker_rwsem); } static int __register_shrinker(struct shrinker *shrinker) @@ -810,13 +810,13 @@ void unregister_shrinker(struct shrinker *shrinker) if (!(shrinker->flags & SHRINKER_REGISTERED)) return; - mutex_lock(&shrinker_mutex); + down_write(&shrinker_rwsem); list_del_rcu(&shrinker->list); shrinker->flags &= ~SHRINKER_REGISTERED; if (shrinker->flags & SHRINKER_MEMCG_AWARE) unregister_memcg_shrinker(shrinker); debugfs_entry = shrinker_debugfs_detach(shrinker, &debugfs_id); - mutex_unlock(&shrinker_mutex); + up_write(&shrinker_rwsem); atomic_inc(&shrinker_srcu_generation); synchronize_srcu(&shrinker_srcu); -- cgit v1.2.3 From 07252b0f97150b03050f74f42250f275566d70b9 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Fri, 9 Jun 2023 08:15:13 +0000 Subject: Revert "mm: vmscan: remove shrinker_rwsem from synchronize_shrinkers()" This reverts commit 1643db98d9b314e0a592d152603094fbf7ab906e. Kernel test robot reports -88.8% regression in stress-ng.ramfs.ops_per_sec test case [1], which is caused by commit f95bdb700bc6 ("mm: vmscan: make global slab shrink lockless"). The root cause is that SRCU has to be careful to not frequently check for SRCU read-side critical section exits. Therefore, even if no one is currently in the SRCU read-side critical section, synchronize_srcu() cannot return quickly. That's why unregister_shrinker() has become slower. We will try to use the refcount+RCU method [2] proposed by Dave Chinner to continue to re-implement the lockless slab shrink. So we still need shrinker_rwsem in synchronize_shrinkers() after reverting the shrinker_srcu related changes. [1]. https://lore.kernel.org/lkml/202305230837.db2c233f-yujie.liu@intel.com/ [2]. https://lore.kernel.org/lkml/ZIJhou1d55d4H1s0@dread.disaster.area/ Link: https://lkml.kernel.org/r/20230609081518.3039120-3-qi.zheng@linux.dev Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-lkp/202305230837.db2c233f-yujie.liu@intel.com Signed-off-by: Qi Zheng Cc: Dave Chinner Cc: Kirill Tkhai Cc: Muchun Song Cc: Roman Gushchin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/vmscan.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 4730dba253c8..0ba0e1180f3f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -831,11 +831,15 @@ EXPORT_SYMBOL(unregister_shrinker); /** * synchronize_shrinkers - Wait for all running shrinkers to complete. * - * This is useful to guarantee that all shrinker invocations have seen an - * update, before freeing memory. + * This is equivalent to calling unregister_shrink() and register_shrinker(), + * but atomically and with less overhead. This is useful to guarantee that all + * shrinker invocations have seen an update, before freeing memory, similar to + * rcu. */ void synchronize_shrinkers(void) { + down_write(&shrinker_rwsem); + up_write(&shrinker_rwsem); atomic_inc(&shrinker_srcu_generation); synchronize_srcu(&shrinker_srcu); } -- cgit v1.2.3 From c534f7cca6b9b1c0dc97d6e9c5587858d4330cd9 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Fri, 9 Jun 2023 08:15:14 +0000 Subject: Revert "mm: vmscan: hold write lock to reparent shrinker nr_deferred" This reverts commit b3cabea3c9153fd42fe5cb851ac58b51ea2b32b8. Kernel test robot reports -88.8% regression in stress-ng.ramfs.ops_per_sec test case [1], which is caused by commit f95bdb700bc6 ("mm: vmscan: make global slab shrink lockless"). The root cause is that SRCU has to be careful to not frequently check for SRCU read-side critical section exits. Therefore, even if no one is currently in the SRCU read-side critical section, synchronize_srcu() cannot return quickly. That's why unregister_shrinker() has become slower. We will try to use the refcount+RCU method [2] proposed by Dave Chinner to continue to re-implement the lockless slab shrink. Because there will be other readers after reverting the shrinker_srcu related changes, so it is better to restore to hold read lock to reparent shrinker nr_deferred. [1]. https://lore.kernel.org/lkml/202305230837.db2c233f-yujie.liu@intel.com/ [2]. https://lore.kernel.org/lkml/ZIJhou1d55d4H1s0@dread.disaster.area/ Link: https://lkml.kernel.org/r/20230609081518.3039120-4-qi.zheng@linux.dev Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-lkp/202305230837.db2c233f-yujie.liu@intel.com Signed-off-by: Qi Zheng Cc: Dave Chinner Cc: Kirill Tkhai Cc: Muchun Song Cc: Roman Gushchin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/vmscan.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 0ba0e1180f3f..d1d309fc3212 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -433,7 +433,7 @@ void reparent_shrinker_deferred(struct mem_cgroup *memcg) parent = root_mem_cgroup; /* Prevent from concurrent shrinker_info expand */ - down_write(&shrinker_rwsem); + down_read(&shrinker_rwsem); for_each_node(nid) { child_info = shrinker_info_protected(memcg, nid); parent_info = shrinker_info_protected(parent, nid); @@ -442,7 +442,7 @@ void reparent_shrinker_deferred(struct mem_cgroup *memcg) atomic_long_add(nr, &parent_info->nr_deferred[i]); } } - up_write(&shrinker_rwsem); + up_read(&shrinker_rwsem); } static bool cgroup_reclaim(struct scan_control *sc) -- cgit v1.2.3 From 1a554ecc971406e291cea867112f7f2e377e810e Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Fri, 9 Jun 2023 08:15:15 +0000 Subject: Revert "mm: shrinkers: make count and scan in shrinker debugfs lockless" This reverts commit 20cd1892fcc3efc10a7ac327cc3790494bec46b5. Kernel test robot reports -88.8% regression in stress-ng.ramfs.ops_per_sec test case [1], which is caused by commit f95bdb700bc6 ("mm: vmscan: make global slab shrink lockless"). The root cause is that SRCU has to be careful to not frequently check for SRCU read-side critical section exits. Therefore, even if no one is currently in the SRCU read-side critical section, synchronize_srcu() cannot return quickly. That's why unregister_shrinker() has become slower. We will try to use the refcount+RCU method [2] proposed by Dave Chinner to continue to re-implement the lockless slab shrink. So revert the shrinker_srcu related changes first. [1]. https://lore.kernel.org/lkml/202305230837.db2c233f-yujie.liu@intel.com/ [2]. https://lore.kernel.org/lkml/ZIJhou1d55d4H1s0@dread.disaster.area/ Link: https://lkml.kernel.org/r/20230609081518.3039120-5-qi.zheng@linux.dev Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-lkp/202305230837.db2c233f-yujie.liu@intel.com Signed-off-by: Qi Zheng Cc: Dave Chinner Cc: Kirill Tkhai Cc: Muchun Song Cc: Roman Gushchin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/shrinker_debug.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/shrinker_debug.c b/mm/shrinker_debug.c index 2be15b8a6d0b..3ab53fad8876 100644 --- a/mm/shrinker_debug.c +++ b/mm/shrinker_debug.c @@ -5,12 +5,10 @@ #include #include #include -#include /* defined in vmscan.c */ extern struct rw_semaphore shrinker_rwsem; extern struct list_head shrinker_list; -extern struct srcu_struct shrinker_srcu; static DEFINE_IDA(shrinker_debugfs_ida); static struct dentry *shrinker_debugfs_root; @@ -51,13 +49,18 @@ static int shrinker_debugfs_count_show(struct seq_file *m, void *v) struct mem_cgroup *memcg; unsigned long total; bool memcg_aware; - int ret = 0, nid, srcu_idx; + int ret, nid; count_per_node = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); if (!count_per_node) return -ENOMEM; - srcu_idx = srcu_read_lock(&shrinker_srcu); + ret = down_read_killable(&shrinker_rwsem); + if (ret) { + kfree(count_per_node); + return ret; + } + rcu_read_lock(); memcg_aware = shrinker->flags & SHRINKER_MEMCG_AWARE; @@ -88,7 +91,8 @@ static int shrinker_debugfs_count_show(struct seq_file *m, void *v) } } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); - srcu_read_unlock(&shrinker_srcu, srcu_idx); + rcu_read_unlock(); + up_read(&shrinker_rwsem); kfree(count_per_node); return ret; @@ -111,8 +115,9 @@ static ssize_t shrinker_debugfs_scan_write(struct file *file, .gfp_mask = GFP_KERNEL, }; struct mem_cgroup *memcg = NULL; - int nid, srcu_idx; + int nid; char kbuf[72]; + ssize_t ret; read_len = size < (sizeof(kbuf) - 1) ? size : (sizeof(kbuf) - 1); if (copy_from_user(kbuf, buf, read_len)) @@ -141,7 +146,11 @@ static ssize_t shrinker_debugfs_scan_write(struct file *file, return -EINVAL; } - srcu_idx = srcu_read_lock(&shrinker_srcu); + ret = down_read_killable(&shrinker_rwsem); + if (ret) { + mem_cgroup_put(memcg); + return ret; + } sc.nid = nid; sc.memcg = memcg; @@ -150,7 +159,7 @@ static ssize_t shrinker_debugfs_scan_write(struct file *file, shrinker->scan_objects(shrinker, &sc); - srcu_read_unlock(&shrinker_srcu, srcu_idx); + up_read(&shrinker_rwsem); mem_cgroup_put(memcg); return size; -- cgit v1.2.3 From d6ecbcd70fffcffe79d45f3be7b8fcf5d8e04a3d Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Fri, 9 Jun 2023 08:15:16 +0000 Subject: Revert "mm: vmscan: add shrinker_srcu_generation" This reverts commit 475733dda5aedba9e086379aafe6b5ffd53e8f5e. Kernel test robot reports -88.8% regression in stress-ng.ramfs.ops_per_sec test case [1], which is caused by commit f95bdb700bc6 ("mm: vmscan: make global slab shrink lockless"). The root cause is that SRCU has to be careful to not frequently check for SRCU read-side critical section exits. Therefore, even if no one is currently in the SRCU read-side critical section, synchronize_srcu() cannot return quickly. That's why unregister_shrinker() has become slower. We will try to use the refcount+RCU method [2] proposed by Dave Chinner to continue to re-implement the lockless slab shrink. So revert the shrinker_srcu related changes first. [1]. https://lore.kernel.org/lkml/202305230837.db2c233f-yujie.liu@intel.com/ [2]. https://lore.kernel.org/lkml/ZIJhou1d55d4H1s0@dread.disaster.area/ Link: https://lkml.kernel.org/r/20230609081518.3039120-6-qi.zheng@linux.dev Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-lkp/202305230837.db2c233f-yujie.liu@intel.com Signed-off-by: Qi Zheng Cc: Dave Chinner Cc: Kirill Tkhai Cc: Muchun Song Cc: Roman Gushchin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/vmscan.c | 24 ++++-------------------- 1 file changed, 4 insertions(+), 20 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index d1d309fc3212..50775b73d0c7 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -192,7 +192,6 @@ int vm_swappiness = 60; LIST_HEAD(shrinker_list); DECLARE_RWSEM(shrinker_rwsem); DEFINE_SRCU(shrinker_srcu); -static atomic_t shrinker_srcu_generation = ATOMIC_INIT(0); #ifdef CONFIG_MEMCG static int shrinker_nr_max; @@ -818,7 +817,6 @@ void unregister_shrinker(struct shrinker *shrinker) debugfs_entry = shrinker_debugfs_detach(shrinker, &debugfs_id); up_write(&shrinker_rwsem); - atomic_inc(&shrinker_srcu_generation); synchronize_srcu(&shrinker_srcu); shrinker_debugfs_remove(debugfs_entry, debugfs_id); @@ -840,7 +838,6 @@ void synchronize_shrinkers(void) { down_write(&shrinker_rwsem); up_write(&shrinker_rwsem); - atomic_inc(&shrinker_srcu_generation); synchronize_srcu(&shrinker_srcu); } EXPORT_SYMBOL(synchronize_shrinkers); @@ -950,20 +947,18 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, { struct shrinker_info *info; unsigned long ret, freed = 0; - int srcu_idx, generation; - int i = 0; + int srcu_idx; + int i; if (!mem_cgroup_online(memcg)) return 0; -again: srcu_idx = srcu_read_lock(&shrinker_srcu); info = shrinker_info_srcu(memcg, nid); if (unlikely(!info)) goto unlock; - generation = atomic_read(&shrinker_srcu_generation); - for_each_set_bit_from(i, info->map, info->map_nr_max) { + for_each_set_bit(i, info->map, info->map_nr_max) { struct shrink_control sc = { .gfp_mask = gfp_mask, .nid = nid, @@ -1009,11 +1004,6 @@ again: set_shrinker_bit(memcg, nid, i); } freed += ret; - if (atomic_read(&shrinker_srcu_generation) != generation) { - srcu_read_unlock(&shrinker_srcu, srcu_idx); - i++; - goto again; - } } unlock: srcu_read_unlock(&shrinker_srcu, srcu_idx); @@ -1053,7 +1043,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid, { unsigned long ret, freed = 0; struct shrinker *shrinker; - int srcu_idx, generation; + int srcu_idx; /* * The root memcg might be allocated even though memcg is disabled @@ -1067,7 +1057,6 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid, srcu_idx = srcu_read_lock(&shrinker_srcu); - generation = atomic_read(&shrinker_srcu_generation); list_for_each_entry_srcu(shrinker, &shrinker_list, list, srcu_read_lock_held(&shrinker_srcu)) { struct shrink_control sc = { @@ -1080,11 +1069,6 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid, if (ret == SHRINK_EMPTY) ret = 0; freed += ret; - - if (atomic_read(&shrinker_srcu_generation) != generation) { - freed = freed ? : 1; - break; - } } srcu_read_unlock(&shrinker_srcu, srcu_idx); -- cgit v1.2.3 From 7cee3603192a198b23c034a7372b599081cbee88 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Fri, 9 Jun 2023 08:15:17 +0000 Subject: Revert "mm: vmscan: make memcg slab shrink lockless" This reverts commit caa05325c9126c77ebf114edce51536a0d0a9a08. Kernel test robot reports -88.8% regression in stress-ng.ramfs.ops_per_sec test case [1], which is caused by commit f95bdb700bc6 ("mm: vmscan: make global slab shrink lockless"). The root cause is that SRCU has to be careful to not frequently check for SRCU read-side critical section exits. Therefore, even if no one is currently in the SRCU read-side critical section, synchronize_srcu() cannot return quickly. That's why unregister_shrinker() has become slower. After discussion, we will try to use the refcount+RCU method [2] proposed by Dave Chinner to continue to re-implement the lockless slab shrink. So revert the shrinker_srcu related changes first. [1]. https://lore.kernel.org/lkml/202305230837.db2c233f-yujie.liu@intel.com/ [2]. https://lore.kernel.org/lkml/ZIJhou1d55d4H1s0@dread.disaster.area/ Link: https://lkml.kernel.org/r/20230609081518.3039120-7-qi.zheng@linux.dev Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-lkp/202305230837.db2c233f-yujie.liu@intel.com Signed-off-by: Qi Zheng Cc: Dave Chinner Cc: Kirill Tkhai Cc: Muchun Song Cc: Roman Gushchin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/vmscan.c | 45 +++++++++++++++++++-------------------------- 1 file changed, 19 insertions(+), 26 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 50775b73d0c7..a008d7f2d0fc 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -210,21 +210,8 @@ static inline int shrinker_defer_size(int nr_items) static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg, int nid) { - return srcu_dereference_check(memcg->nodeinfo[nid]->shrinker_info, - &shrinker_srcu, - lockdep_is_held(&shrinker_rwsem)); -} - -static struct shrinker_info *shrinker_info_srcu(struct mem_cgroup *memcg, - int nid) -{ - return srcu_dereference(memcg->nodeinfo[nid]->shrinker_info, - &shrinker_srcu); -} - -static void free_shrinker_info_rcu(struct rcu_head *head) -{ - kvfree(container_of(head, struct shrinker_info, rcu)); + return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info, + lockdep_is_held(&shrinker_rwsem)); } static int expand_one_shrinker_info(struct mem_cgroup *memcg, @@ -265,7 +252,7 @@ static int expand_one_shrinker_info(struct mem_cgroup *memcg, defer_size - old_defer_size); rcu_assign_pointer(pn->shrinker_info, new); - call_srcu(&shrinker_srcu, &old->rcu, free_shrinker_info_rcu); + kvfree_rcu(old, rcu); } return 0; @@ -351,16 +338,15 @@ void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) { if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { struct shrinker_info *info; - int srcu_idx; - srcu_idx = srcu_read_lock(&shrinker_srcu); - info = shrinker_info_srcu(memcg, nid); + rcu_read_lock(); + info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info); if (!WARN_ON_ONCE(shrinker_id >= info->map_nr_max)) { /* Pairs with smp mb in shrink_slab() */ smp_mb__before_atomic(); set_bit(shrinker_id, info->map); } - srcu_read_unlock(&shrinker_srcu, srcu_idx); + rcu_read_unlock(); } } @@ -374,6 +360,7 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker) return -ENOSYS; down_write(&shrinker_rwsem); + /* This may call shrinker, so it must use down_read_trylock() */ id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL); if (id < 0) goto unlock; @@ -407,7 +394,7 @@ static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker, { struct shrinker_info *info; - info = shrinker_info_srcu(memcg, nid); + info = shrinker_info_protected(memcg, nid); return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0); } @@ -416,7 +403,7 @@ static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker, { struct shrinker_info *info; - info = shrinker_info_srcu(memcg, nid); + info = shrinker_info_protected(memcg, nid); return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]); } @@ -947,14 +934,15 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, { struct shrinker_info *info; unsigned long ret, freed = 0; - int srcu_idx; int i; if (!mem_cgroup_online(memcg)) return 0; - srcu_idx = srcu_read_lock(&shrinker_srcu); - info = shrinker_info_srcu(memcg, nid); + if (!down_read_trylock(&shrinker_rwsem)) + return 0; + + info = shrinker_info_protected(memcg, nid); if (unlikely(!info)) goto unlock; @@ -1004,9 +992,14 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, set_shrinker_bit(memcg, nid, i); } freed += ret; + + if (rwsem_is_contended(&shrinker_rwsem)) { + freed = freed ? : 1; + break; + } } unlock: - srcu_read_unlock(&shrinker_srcu, srcu_idx); + up_read(&shrinker_rwsem); return freed; } #else /* CONFIG_MEMCG */ -- cgit v1.2.3 From 71c3ad65fabec9620d3f548b2da948c79c7ad9d5 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Fri, 9 Jun 2023 08:15:18 +0000 Subject: Revert "mm: vmscan: make global slab shrink lockless" This reverts commit f95bdb700bc6bb74e1199b1f5f90c613e152cfa7. Kernel test robot reports -88.8% regression in stress-ng.ramfs.ops_per_sec test case [1], which is caused by commit f95bdb700bc6 ("mm: vmscan: make global slab shrink lockless"). The root cause is that SRCU has to be careful to not frequently check for SRCU read-side critical section exits. Therefore, even if no one is currently in the SRCU read-side critical section, synchronize_srcu() cannot return quickly. That's why unregister_shrinker() has become slower. After discussion, we will try to use the refcount+RCU method [2] proposed by Dave Chinner to continue to re-implement the lockless slab shrink. So revert the shrinker_srcu related changes first. [1]. https://lore.kernel.org/lkml/202305230837.db2c233f-yujie.liu@intel.com/ [2]. https://lore.kernel.org/lkml/ZIJhou1d55d4H1s0@dread.disaster.area/ Link: https://lkml.kernel.org/r/20230609081518.3039120-8-qi.zheng@linux.dev Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-lkp/202305230837.db2c233f-yujie.liu@intel.com Signed-off-by: Qi Zheng Cc: Dave Chinner Cc: Kirill Tkhai Cc: Muchun Song Cc: Roman Gushchin Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/vmscan.c | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index a008d7f2d0fc..5bf98d0a22c9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -57,7 +57,6 @@ #include #include #include -#include #include #include @@ -191,7 +190,6 @@ int vm_swappiness = 60; LIST_HEAD(shrinker_list); DECLARE_RWSEM(shrinker_rwsem); -DEFINE_SRCU(shrinker_srcu); #ifdef CONFIG_MEMCG static int shrinker_nr_max; @@ -742,7 +740,7 @@ void free_prealloced_shrinker(struct shrinker *shrinker) void register_shrinker_prepared(struct shrinker *shrinker) { down_write(&shrinker_rwsem); - list_add_tail_rcu(&shrinker->list, &shrinker_list); + list_add_tail(&shrinker->list, &shrinker_list); shrinker->flags |= SHRINKER_REGISTERED; shrinker_debugfs_add(shrinker); up_write(&shrinker_rwsem); @@ -797,15 +795,13 @@ void unregister_shrinker(struct shrinker *shrinker) return; down_write(&shrinker_rwsem); - list_del_rcu(&shrinker->list); + list_del(&shrinker->list); shrinker->flags &= ~SHRINKER_REGISTERED; if (shrinker->flags & SHRINKER_MEMCG_AWARE) unregister_memcg_shrinker(shrinker); debugfs_entry = shrinker_debugfs_detach(shrinker, &debugfs_id); up_write(&shrinker_rwsem); - synchronize_srcu(&shrinker_srcu); - shrinker_debugfs_remove(debugfs_entry, debugfs_id); kfree(shrinker->nr_deferred); @@ -825,7 +821,6 @@ void synchronize_shrinkers(void) { down_write(&shrinker_rwsem); up_write(&shrinker_rwsem); - synchronize_srcu(&shrinker_srcu); } EXPORT_SYMBOL(synchronize_shrinkers); @@ -1036,7 +1031,6 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid, { unsigned long ret, freed = 0; struct shrinker *shrinker; - int srcu_idx; /* * The root memcg might be allocated even though memcg is disabled @@ -1048,10 +1042,10 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid, if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg)) return shrink_slab_memcg(gfp_mask, nid, memcg, priority); - srcu_idx = srcu_read_lock(&shrinker_srcu); + if (!down_read_trylock(&shrinker_rwsem)) + goto out; - list_for_each_entry_srcu(shrinker, &shrinker_list, list, - srcu_read_lock_held(&shrinker_srcu)) { + list_for_each_entry(shrinker, &shrinker_list, list) { struct shrink_control sc = { .gfp_mask = gfp_mask, .nid = nid, @@ -1062,9 +1056,19 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid, if (ret == SHRINK_EMPTY) ret = 0; freed += ret; + /* + * Bail out if someone want to register a new shrinker to + * prevent the registration from being stalled for long periods + * by parallel ongoing shrinking. + */ + if (rwsem_is_contended(&shrinker_rwsem)) { + freed = freed ? : 1; + break; + } } - srcu_read_unlock(&shrinker_srcu, srcu_idx); + up_read(&shrinker_rwsem); +out: cond_resched(); return freed; } -- cgit v1.2.3 From ce5df7764b3b2abaf3687c460a9a1922daaed5b7 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 19 May 2023 13:16:52 +0200 Subject: mm: page_isolation: write proper kerneldoc And remove the incorrect header comments. [akpm@linux-foundation.org: s/lower/first/, s/upper/last/, per Mike] Link: https://lkml.kernel.org/r/20230519111652.40658-1-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Cc: Mike Rapoport Signed-off-by: Andrew Morton --- include/linux/page-isolation.h | 24 ++++++------------------ mm/page_isolation.c | 33 ++++++++++++++++++++++++++------- 2 files changed, 32 insertions(+), 25 deletions(-) (limited to 'mm') diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 5456b7be38ae..0ab089e89db4 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -37,24 +37,12 @@ void set_pageblock_migratetype(struct page *page, int migratetype); int move_freepages_block(struct zone *zone, struct page *page, int migratetype, int *num_movable); -/* - * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. - */ -int -start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, - int migratetype, int flags, gfp_t gfp_flags); - -/* - * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. - * target range is [start_pfn, end_pfn) - */ -void -undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, - int migratetype); - -/* - * Test all pages in [start_pfn, end_pfn) are isolated or not. - */ +int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, + int migratetype, int flags, gfp_t gfp_flags); + +void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, + int migratetype); + int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, int isol_flags); diff --git a/mm/page_isolation.c b/mm/page_isolation.c index c6f3605e37ab..6599cc965e21 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -481,10 +481,9 @@ failed: } /** - * start_isolate_page_range() - make page-allocation-type of range of pages to - * be MIGRATE_ISOLATE. - * @start_pfn: The lower PFN of the range to be isolated. - * @end_pfn: The upper PFN of the range to be isolated. + * start_isolate_page_range() - mark page range MIGRATE_ISOLATE + * @start_pfn: The first PFN of the range to be isolated. + * @end_pfn: The last PFN of the range to be isolated. * @migratetype: Migrate type to set in error recovery. * @flags: The following flags are allowed (they can be combined in * a bit mask) @@ -571,8 +570,14 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, return 0; } -/* - * Make isolated pages available again. +/** + * undo_isolate_page_range - undo effects of start_isolate_page_range() + * @start_pfn: The first PFN of the isolated range + * @end_pfn: The last PFN of the isolated range + * @migratetype: New migrate type to set on the range + * + * This finds every MIGRATE_ISOLATE page block in the given range + * and switches it to @migratetype. */ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, int migratetype) @@ -631,7 +636,21 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, return pfn; } -/* Caller should ensure that requested range is in a single zone */ +/** + * test_pages_isolated - check if pageblocks in range are isolated + * @start_pfn: The first PFN of the isolated range + * @end_pfn: The first PFN *after* the isolated range + * @isol_flags: Testing mode flags + * + * This tests if all in the specified range are free. + * + * If %MEMORY_OFFLINE is specified in @flags, it will consider + * poisoned and offlined pages free as well. + * + * Caller must ensure the requested range doesn't span zones. + * + * Returns 0 if true, -EBUSY if one or more pages are in use. + */ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, int isol_flags) { -- cgit v1.2.3 From 5da226dbfce3a2f44978c2c7cf88166e69a6788b Mon Sep 17 00:00:00 2001 From: Zhaoyang Huang Date: Wed, 31 May 2023 10:51:01 +0800 Subject: mm: skip CMA pages when they are not available This patch fixes unproductive reclaiming of CMA pages by skipping them when they are not available for current context. It arises from the below OOM issue, which was caused by a large proportion of MIGRATE_CMA pages among free pages. [ 36.172486] [03-19 10:05:52.172] ActivityManager: page allocation failure: order:0, mode:0xc00(GFP_NOIO), nodemask=(null),cpuset=foreground,mems_allowed=0 [ 36.189447] [03-19 10:05:52.189] DMA32: 0*4kB 447*8kB (C) 217*16kB (C) 124*32kB (C) 136*64kB (C) 70*128kB (C) 22*256kB (C) 3*512kB (C) 0*1024kB 0*2048kB 0*4096kB = 35848kB [ 36.193125] [03-19 10:05:52.193] Normal: 231*4kB (UMEH) 49*8kB (MEH) 14*16kB (H) 13*32kB (H) 8*64kB (H) 2*128kB (H) 0*256kB 1*512kB (H) 0*1024kB 0*2048kB 0*4096kB = 3236kB ... [ 36.234447] [03-19 10:05:52.234] SLUB: Unable to allocate memory on node -1, gfp=0xa20(GFP_ATOMIC) [ 36.234455] [03-19 10:05:52.234] cache: ext4_io_end, object size: 64, buffer size: 64, default order: 0, min order: 0 [ 36.234459] [03-19 10:05:52.234] node 0: slabs: 53,objs: 3392, free: 0 This change further decreases the chance for wrong OOMs in the presence of a lot of CMA memory. [david@redhat.com: changelog addition] Link: https://lkml.kernel.org/r/1685501461-19290-1-git-send-email-zhaoyang.huang@unisoc.com Signed-off-by: Zhaoyang Huang Acked-by: David Hildenbrand Cc: ke.wang Cc: Matthew Wilcox Cc: Minchan Kim Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton --- mm/vmscan.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 5993c0bae162..9048739c41db 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2271,6 +2271,25 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec, } +#ifdef CONFIG_CMA +/* + * It is waste of effort to scan and reclaim CMA pages if it is not available + * for current allocation context. Kswapd can not be enrolled as it can not + * distinguish this scenario by using sc->gfp_mask = GFP_KERNEL + */ +static bool skip_cma(struct folio *folio, struct scan_control *sc) +{ + return !current_is_kswapd() && + gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE && + get_pageblock_migratetype(&folio->page) == MIGRATE_CMA; +} +#else +static bool skip_cma(struct folio *folio, struct scan_control *sc) +{ + return false; +} +#endif + /* * Isolating page from the lruvec to fill in @dst list by nr_to_scan times. * @@ -2317,7 +2336,8 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan, nr_pages = folio_nr_pages(folio); total_scan += nr_pages; - if (folio_zonenum(folio) > sc->reclaim_idx) { + if (folio_zonenum(folio) > sc->reclaim_idx || + skip_cma(folio, sc)) { nr_skipped[folio_zonenum(folio)] += nr_pages; move_to = &folios_skipped; goto move; -- cgit v1.2.3 From e5797dc011182f8b25420bc977f37cd92fc6e755 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Tue, 6 Jun 2023 20:18:13 +0800 Subject: mm: vmscan: mark kswapd_run() and kswapd_stop() __meminit Add __meminit to kswapd_run() and kswapd_stop() to ensure they're default to __init when memory hotplug is not enabled. Link: https://lkml.kernel.org/r/20230606121813.242163-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Acked-by: Yu Zhao Acked-by: David Hildenbrand Signed-off-by: Andrew Morton --- include/linux/swap.h | 4 ++-- mm/vmscan.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/include/linux/swap.h b/include/linux/swap.h index 2ddbfd85f6c7..b5f6f2916de1 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -460,8 +460,8 @@ static inline bool node_reclaim_enabled(void) void check_move_unevictable_folios(struct folio_batch *fbatch); void check_move_unevictable_pages(struct pagevec *pvec); -extern void kswapd_run(int nid); -extern void kswapd_stop(int nid); +extern void __meminit kswapd_run(int nid); +extern void __meminit kswapd_stop(int nid); #ifdef CONFIG_SWAP diff --git a/mm/vmscan.c b/mm/vmscan.c index 9048739c41db..a4e2936f6d35 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -7871,7 +7871,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim) /* * This kswapd start function will be called by init and node-hot-add. */ -void kswapd_run(int nid) +void __meminit kswapd_run(int nid) { pg_data_t *pgdat = NODE_DATA(nid); @@ -7892,7 +7892,7 @@ void kswapd_run(int nid) * Called by memory hotplug when all memory in a node is offlined. Caller must * be holding mem_hotplug_begin/done(). */ -void kswapd_stop(int nid) +void __meminit kswapd_stop(int nid) { pg_data_t *pgdat = NODE_DATA(nid); struct task_struct *kswapd; -- cgit v1.2.3 From e6c715abb4ee0ffe291c83e2e3d22866f387cda9 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Tue, 6 Jun 2023 20:07:24 +0800 Subject: memory tier: remove unneeded disable_all_demotion_targets() when !CONFIG_MIGRATION There's no caller of disable_all_demotion_targets() when CONFIG_MIGRATION is disabled. Remove it. Link: https://lkml.kernel.org/r/20230606120724.208552-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Signed-off-by: Andrew Morton --- mm/memory-tiers.c | 1 - 1 file changed, 1 deletion(-) (limited to 'mm') diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c index e593e56e530b..dd04f0ce5277 100644 --- a/mm/memory-tiers.c +++ b/mm/memory-tiers.c @@ -451,7 +451,6 @@ static void establish_demotion_targets(void) } #else -static inline void disable_all_demotion_targets(void) {} static inline void establish_demotion_targets(void) {} #endif /* CONFIG_MIGRATION */ -- cgit v1.2.3 From ad27ce206af731f6854b3d8a1760c573b217e363 Mon Sep 17 00:00:00 2001 From: ZhangPeng Date: Tue, 6 Jun 2023 14:20:11 +0800 Subject: mm/hugetlb: use a folio in copy_hugetlb_page_range() Patch series "Convert several functions in hugetlb.c to use a folio", v2. This patch series converts three functions in hugetlb.c to use a folio, which can remove several implicit calls to compound_head(). This patch (of 3): We can replace five implict calls to compound_head() with one by using pte_folio. The page we get back is always a head page, so we just convert ptepage to pte_folio. Link: https://lkml.kernel.org/r/20230606062013.2947002-1-zhangpeng362@huawei.com Link: https://lkml.kernel.org/r/20230606062013.2947002-2-zhangpeng362@huawei.com Signed-off-by: ZhangPeng Suggested-by: Matthew Wilcox (Oracle) Reviewed-by: Muchun Song Reviewed-by: Sidhartha Kumar Reviewed-by: Matthew Wilcox (Oracle) Cc: Kefeng Wang Cc: Mike Kravetz Cc: Nanyong Sun Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/hugetlb.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ea24718db4af..d6f6d19958a5 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5016,7 +5016,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *src_vma) { pte_t *src_pte, *dst_pte, entry; - struct page *ptepage; + struct folio *pte_folio; unsigned long addr; bool cow = is_cow_mapping(src_vma->vm_flags); struct hstate *h = hstate_vma(src_vma); @@ -5115,8 +5115,8 @@ again: set_huge_pte_at(dst, addr, dst_pte, entry); } else { entry = huge_ptep_get(src_pte); - ptepage = pte_page(entry); - get_page(ptepage); + pte_folio = page_folio(pte_page(entry)); + folio_get(pte_folio); /* * Failing to duplicate the anon rmap is a rare case @@ -5128,10 +5128,10 @@ again: * need to be without the pgtable locks since we could * sleep during the process. */ - if (!PageAnon(ptepage)) { - page_dup_file_rmap(ptepage, true); - } else if (page_try_dup_anon_rmap(ptepage, true, - src_vma)) { + if (!folio_test_anon(pte_folio)) { + page_dup_file_rmap(&pte_folio->page, true); + } else if (page_try_dup_anon_rmap(&pte_folio->page, + true, src_vma)) { pte_t src_pte_old = entry; struct folio *new_folio; @@ -5140,14 +5140,14 @@ again: /* Do not use reserve as it's private owned */ new_folio = alloc_hugetlb_folio(dst_vma, addr, 1); if (IS_ERR(new_folio)) { - put_page(ptepage); + folio_put(pte_folio); ret = PTR_ERR(new_folio); break; } ret = copy_user_large_folio(new_folio, - page_folio(ptepage), - addr, dst_vma); - put_page(ptepage); + pte_folio, + addr, dst_vma); + folio_put(pte_folio); if (ret) { folio_put(new_folio); break; -- cgit v1.2.3 From 959a78b6dd4526fb11d3cacf2de909479b06a4f4 Mon Sep 17 00:00:00 2001 From: ZhangPeng Date: Tue, 6 Jun 2023 14:20:12 +0800 Subject: mm/hugetlb: use a folio in hugetlb_wp() We can replace nine implict calls to compound_head() with one by using old_folio. The page we get back is always a head page, so we just convert old_page to old_folio. Link: https://lkml.kernel.org/r/20230606062013.2947002-3-zhangpeng362@huawei.com Signed-off-by: ZhangPeng Suggested-by: Matthew Wilcox (Oracle) Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: Muchun Song Reviewed-by: Sidhartha Kumar Cc: Kefeng Wang Cc: Mike Kravetz Cc: Nanyong Sun Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/hugetlb.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index d6f6d19958a5..e58f8001fd92 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5540,7 +5540,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, const bool unshare = flags & FAULT_FLAG_UNSHARE; pte_t pte = huge_ptep_get(ptep); struct hstate *h = hstate_vma(vma); - struct page *old_page; + struct folio *old_folio; struct folio *new_folio; int outside_reserve = 0; vm_fault_t ret = 0; @@ -5571,7 +5571,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, return 0; } - old_page = pte_page(pte); + old_folio = page_folio(pte_page(pte)); delayacct_wpcopy_start(); @@ -5580,17 +5580,17 @@ retry_avoidcopy: * If no-one else is actually using this page, we're the exclusive * owner and can reuse this page. */ - if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { - if (!PageAnonExclusive(old_page)) - page_move_anon_rmap(old_page, vma); + if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) { + if (!PageAnonExclusive(&old_folio->page)) + page_move_anon_rmap(&old_folio->page, vma); if (likely(!unshare)) set_huge_ptep_writable(vma, haddr, ptep); delayacct_wpcopy_end(); return 0; } - VM_BUG_ON_PAGE(PageAnon(old_page) && PageAnonExclusive(old_page), - old_page); + VM_BUG_ON_PAGE(folio_test_anon(old_folio) && + PageAnonExclusive(&old_folio->page), &old_folio->page); /* * If the process that created a MAP_PRIVATE mapping is about to @@ -5602,10 +5602,10 @@ retry_avoidcopy: * of the full address range. */ if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && - page_folio(old_page) != pagecache_folio) + old_folio != pagecache_folio) outside_reserve = 1; - get_page(old_page); + folio_get(old_folio); /* * Drop page table lock as buddy allocator may be called. It will @@ -5627,7 +5627,7 @@ retry_avoidcopy: pgoff_t idx; u32 hash; - put_page(old_page); + folio_put(old_folio); /* * Drop hugetlb_fault_mutex and vma_lock before * unmapping. unmapping needs to hold vma_lock @@ -5642,7 +5642,7 @@ retry_avoidcopy: hugetlb_vma_unlock_read(vma); mutex_unlock(&hugetlb_fault_mutex_table[hash]); - unmap_ref_private(mm, vma, old_page, haddr); + unmap_ref_private(mm, vma, &old_folio->page, haddr); mutex_lock(&hugetlb_fault_mutex_table[hash]); hugetlb_vma_lock_read(vma); @@ -5672,7 +5672,7 @@ retry_avoidcopy: goto out_release_all; } - if (copy_user_large_folio(new_folio, page_folio(old_page), address, vma)) { + if (copy_user_large_folio(new_folio, old_folio, address, vma)) { ret = VM_FAULT_HWPOISON_LARGE; goto out_release_all; } @@ -5694,14 +5694,14 @@ retry_avoidcopy: /* Break COW or unshare */ huge_ptep_clear_flush(vma, haddr, ptep); mmu_notifier_invalidate_range(mm, range.start, range.end); - page_remove_rmap(old_page, vma, true); + page_remove_rmap(&old_folio->page, vma, true); hugepage_add_new_anon_rmap(new_folio, vma, haddr); if (huge_pte_uffd_wp(pte)) newpte = huge_pte_mkuffd_wp(newpte); set_huge_pte_at(mm, haddr, ptep, newpte); folio_set_hugetlb_migratable(new_folio); /* Make the old page be freed below */ - new_folio = page_folio(old_page); + new_folio = old_folio; } spin_unlock(ptl); mmu_notifier_invalidate_range_end(&range); @@ -5710,11 +5710,11 @@ out_release_all: * No restore in case of successful pagetable update (Break COW or * unshare) */ - if (new_folio != page_folio(old_page)) + if (new_folio != old_folio) restore_reserve_on_error(h, vma, haddr, new_folio); folio_put(new_folio); out_release_old: - put_page(old_page); + folio_put(old_folio); spin_lock(ptl); /* Caller expects lock to be held */ -- cgit v1.2.3 From 061e62e8180d3fab378a52d868e29ceebe2fe1d2 Mon Sep 17 00:00:00 2001 From: ZhangPeng Date: Tue, 6 Jun 2023 14:20:13 +0800 Subject: mm/hugetlb: use a folio in hugetlb_fault() We can replace seven implicit calls to compound_head() with one by using folio. [akpm@linux-foundation.org: update comment, per Sidhartha] Link: https://lkml.kernel.org/r/20230606062013.2947002-4-zhangpeng362@huawei.com Signed-off-by: ZhangPeng Reviewed-by Sidhartha Kumar Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: Muchun Song Cc: Kefeng Wang Cc: Mike Kravetz Cc: Nanyong Sun Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/hugetlb.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e58f8001fd92..dfa412d8cb30 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6062,7 +6062,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, vm_fault_t ret; u32 hash; pgoff_t idx; - struct page *page = NULL; + struct folio *folio = NULL; struct folio *pagecache_folio = NULL; struct hstate *h = hstate_vma(vma); struct address_space *mapping; @@ -6179,16 +6179,16 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, /* * hugetlb_wp() requires page locks of pte_page(entry) and * pagecache_folio, so here we need take the former one - * when page != pagecache_folio or !pagecache_folio. + * when folio != pagecache_folio or !pagecache_folio. */ - page = pte_page(entry); - if (page_folio(page) != pagecache_folio) - if (!trylock_page(page)) { + folio = page_folio(pte_page(entry)); + if (folio != pagecache_folio) + if (!folio_trylock(folio)) { need_wait_lock = 1; goto out_ptl; } - get_page(page); + folio_get(folio); if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { if (!huge_pte_write(entry)) { @@ -6204,9 +6204,9 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, flags & FAULT_FLAG_WRITE)) update_mmu_cache(vma, haddr, ptep); out_put_page: - if (page_folio(page) != pagecache_folio) - unlock_page(page); - put_page(page); + if (folio != pagecache_folio) + folio_unlock(folio); + folio_put(folio); out_ptl: spin_unlock(ptl); @@ -6225,7 +6225,7 @@ out_mutex: * here without taking refcount. */ if (need_wait_lock) - wait_on_page_locked(page); + folio_wait_locked(folio); return ret; } -- cgit v1.2.3 From bd5f79ab39367665f40e10c2486aa15e7a841490 Mon Sep 17 00:00:00 2001 From: Yajun Deng Date: Wed, 7 Jun 2023 10:39:52 +0800 Subject: mm/sparse: remove unused parameters in sparse_remove_section() These parameters ms and map_offset are not used in sparse_remove_section(), so remove them. The __remove_section() is only called by __remove_pages(), remove it. And put the WARN_ON_ONCE() in sparse_remove_section(). Link: https://lkml.kernel.org/r/20230607023952.2247489-1-yajun.deng@linux.dev Signed-off-by: Yajun Deng Reviewed-by: David Hildenbrand Cc: Oscar Salvador Signed-off-by: Andrew Morton --- include/linux/memory_hotplug.h | 5 ++--- mm/memory_hotplug.c | 18 +----------------- mm/sparse.c | 10 +++++++--- 3 files changed, 10 insertions(+), 23 deletions(-) (limited to 'mm') diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 04bc286eed42..013c69753c91 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -344,9 +344,8 @@ extern void remove_pfn_range_from_zone(struct zone *zone, extern int sparse_add_section(int nid, unsigned long pfn, unsigned long nr_pages, struct vmem_altmap *altmap, struct dev_pagemap *pgmap); -extern void sparse_remove_section(struct mem_section *ms, - unsigned long pfn, unsigned long nr_pages, - unsigned long map_offset, struct vmem_altmap *altmap); +extern void sparse_remove_section(unsigned long pfn, unsigned long nr_pages, + struct vmem_altmap *altmap); extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum); extern struct zone *zone_for_pfn_range(int online_type, int nid, diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 9061ac69b1b6..8877734b5f2f 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -492,18 +492,6 @@ void __ref remove_pfn_range_from_zone(struct zone *zone, set_zone_contiguous(zone); } -static void __remove_section(unsigned long pfn, unsigned long nr_pages, - unsigned long map_offset, - struct vmem_altmap *altmap) -{ - struct mem_section *ms = __pfn_to_section(pfn); - - if (WARN_ON_ONCE(!valid_section(ms))) - return; - - sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap); -} - /** * __remove_pages() - remove sections of pages * @pfn: starting pageframe (must be aligned to start of a section) @@ -520,9 +508,6 @@ void __remove_pages(unsigned long pfn, unsigned long nr_pages, { const unsigned long end_pfn = pfn + nr_pages; unsigned long cur_nr_pages; - unsigned long map_offset = 0; - - map_offset = vmem_altmap_offset(altmap); if (check_pfn_span(pfn, nr_pages)) { WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1); @@ -534,8 +519,7 @@ void __remove_pages(unsigned long pfn, unsigned long nr_pages, /* Select all remaining pages up to the next section boundary */ cur_nr_pages = min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn); - __remove_section(pfn, cur_nr_pages, map_offset, altmap); - map_offset = 0; + sparse_remove_section(pfn, cur_nr_pages, altmap); } } diff --git a/mm/sparse.c b/mm/sparse.c index c2afdb26039e..7a29e10193fe 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -922,10 +922,14 @@ int __meminit sparse_add_section(int nid, unsigned long start_pfn, return 0; } -void sparse_remove_section(struct mem_section *ms, unsigned long pfn, - unsigned long nr_pages, unsigned long map_offset, - struct vmem_altmap *altmap) +void sparse_remove_section(unsigned long pfn, unsigned long nr_pages, + struct vmem_altmap *altmap) { + struct mem_section *ms = __pfn_to_section(pfn); + + if (WARN_ON_ONCE(!valid_section(ms))) + return; + section_deactivate(pfn, nr_pages, altmap); } #endif /* CONFIG_MEMORY_HOTPLUG */ -- cgit v1.2.3 From 91ff4d754a1895feb4216e94028edd76cbbc0770 Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Wed, 7 Jun 2023 03:24:02 +0000 Subject: mm/mm_init.c: drop 'nid' parameter from check_for_memory() The node_id in pgdat has already been set in free_area_init_node(), so use it internally instead of passing a redundant parameter. Link: https://lkml.kernel.org/r/20230607032402.4679-1-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Reviewed-by: Mike Rapoport (IBM) Acked-by: Michal Hocko Signed-off-by: Andrew Morton --- mm/mm_init.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/mm_init.c b/mm/mm_init.c index 015355dfdc0b..25a585cb0c0a 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1724,7 +1724,7 @@ static void __init free_area_init_node(int nid) } /* Any regular or high memory on that node ? */ -static void check_for_memory(pg_data_t *pgdat, int nid) +static void check_for_memory(pg_data_t *pgdat) { enum zone_type zone_type; @@ -1732,9 +1732,9 @@ static void check_for_memory(pg_data_t *pgdat, int nid) struct zone *zone = &pgdat->node_zones[zone_type]; if (populated_zone(zone)) { if (IS_ENABLED(CONFIG_HIGHMEM)) - node_set_state(nid, N_HIGH_MEMORY); + node_set_state(pgdat->node_id, N_HIGH_MEMORY); if (zone_type <= ZONE_NORMAL) - node_set_state(nid, N_NORMAL_MEMORY); + node_set_state(pgdat->node_id, N_NORMAL_MEMORY); break; } } @@ -1886,7 +1886,7 @@ void __init free_area_init(unsigned long *max_zone_pfn) /* Any memory on that node */ if (pgdat->node_present_pages) node_set_state(nid, N_MEMORY); - check_for_memory(pgdat, nid); + check_for_memory(pgdat); } memmap_init(); -- cgit v1.2.3 From 36ce9d76b0a93bae799e27e4f5ac35478c676592 Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Wed, 7 Jun 2023 18:15:23 +0200 Subject: shmem: use ramfs_kill_sb() for kill_sb method of ramfs-based tmpfs As the ramfs-based tmpfs uses ramfs_init_fs_context() for the init_fs_context method, which allocates fc->s_fs_info, use ramfs_kill_sb() to free it and avoid a memory leak. Link: https://lkml.kernel.org/r/20230607161523.2876433-1-roberto.sassu@huaweicloud.com Fixes: c3b1b1cbf002 ("ramfs: add support for "mode=" mount option") Signed-off-by: Roberto Sassu Cc: Hugh Dickins Cc: David Howells Cc: Al Viro Cc: Signed-off-by: Andrew Morton --- fs/ramfs/inode.c | 2 +- include/linux/ramfs.h | 1 + mm/shmem.c | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c index 5ba580c78835..fef477c78107 100644 --- a/fs/ramfs/inode.c +++ b/fs/ramfs/inode.c @@ -278,7 +278,7 @@ int ramfs_init_fs_context(struct fs_context *fc) return 0; } -static void ramfs_kill_sb(struct super_block *sb) +void ramfs_kill_sb(struct super_block *sb) { kfree(sb->s_fs_info); kill_litter_super(sb); diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h index 917528d102c4..d506dc63dd47 100644 --- a/include/linux/ramfs.h +++ b/include/linux/ramfs.h @@ -7,6 +7,7 @@ struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir, umode_t mode, dev_t dev); extern int ramfs_init_fs_context(struct fs_context *fc); +extern void ramfs_kill_sb(struct super_block *sb); #ifdef CONFIG_MMU static inline int diff --git a/mm/shmem.c b/mm/shmem.c index 5e54ab5f61f2..c606ab89693a 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -4199,7 +4199,7 @@ static struct file_system_type shmem_fs_type = { .name = "tmpfs", .init_fs_context = ramfs_init_fs_context, .parameters = ramfs_fs_parameters, - .kill_sb = kill_litter_super, + .kill_sb = ramfs_kill_sb, .fs_flags = FS_USERNS_MOUNT, }; -- cgit v1.2.3 From a668968f84265e698a122656c433809ab9f023fa Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Wed, 7 Jun 2023 02:45:48 +0000 Subject: mm/memory_hotplug: remove reset_node_managed_pages() in hotadd_init_pgdat() managed pages has already been set to 0 in free_area_init_core_hotplug(), via zone_init_internals() on each zone. It's pointless to reset again. Furthermore, reset_node_managed_pages() no longer needs to be exposed outside of mm/memblock.c. Remove declaration in include/linux/memblock.h and define it as static. In addtion to this, the only caller of reset_node_managed_pages() is reset_all_zones_managed_pages(), which is annotated with __init, so it should be safe to also mark reset_node_managed_pages() as __init. Link: https://lkml.kernel.org/r/20230607024548.1240-1-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Suggested-by: David Hildenbrand Cc: Michal Hocko Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Signed-off-by: Andrew Morton --- include/linux/memblock.h | 1 - mm/memblock.c | 2 +- mm/memory_hotplug.c | 1 - 3 files changed, 1 insertion(+), 3 deletions(-) (limited to 'mm') diff --git a/include/linux/memblock.h b/include/linux/memblock.h index f82ee3fac1cd..f71ff9f0ec81 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -128,7 +128,6 @@ int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); void memblock_free_all(void); void memblock_free(void *ptr, size_t size); -void reset_node_managed_pages(pg_data_t *pgdat); void reset_all_zones_managed_pages(void); /* Low level functions */ diff --git a/mm/memblock.c b/mm/memblock.c index 3feafea06ab2..da4264528e1e 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -2122,7 +2122,7 @@ static unsigned long __init free_low_memory_core_early(void) static int reset_managed_pages_done __initdata; -void reset_node_managed_pages(pg_data_t *pgdat) +static void __init reset_node_managed_pages(pg_data_t *pgdat) { struct zone *z; diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 8877734b5f2f..5248323fc0f7 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1194,7 +1194,6 @@ static pg_data_t __ref *hotadd_init_pgdat(int nid) * online_pages() and offline_pages(). * TODO: should be in free_area_init_core_hotplug? */ - reset_node_managed_pages(pgdat); reset_node_present_pages(pgdat); return pgdat; -- cgit v1.2.3 From 32b6a4a1745a46918f748f6fb7641e588fbec6f2 Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Wed, 7 Jun 2023 02:50:56 +0000 Subject: mm/mm_init.c: remove reset_node_present_pages() reset_node_present_pages() only get called in hotadd_init_pgdat(), move the action that clear present pages to free_area_init_core_hotplug(), so the helper can be removed. Link: https://lkml.kernel.org/r/20230607025056.1348-1-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Suggested-by: David Hildenbrand Cc: Michal Hocko Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Signed-off-by: Andrew Morton --- mm/memory_hotplug.c | 18 ------------------ mm/mm_init.c | 15 +++++++++++++-- 2 files changed, 13 insertions(+), 20 deletions(-) (limited to 'mm') diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 5248323fc0f7..35db4108bb15 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1156,16 +1156,6 @@ failed_addition: return ret; } -static void reset_node_present_pages(pg_data_t *pgdat) -{ - struct zone *z; - - for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) - z->present_pages = 0; - - pgdat->node_present_pages = 0; -} - /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ static pg_data_t __ref *hotadd_init_pgdat(int nid) { @@ -1188,14 +1178,6 @@ static pg_data_t __ref *hotadd_init_pgdat(int nid) */ build_all_zonelists(pgdat); - /* - * When memory is hot-added, all the memory is in offline state. So - * clear all zones' present_pages because they will be updated in - * online_pages() and offline_pages(). - * TODO: should be in free_area_init_core_hotplug? - */ - reset_node_present_pages(pgdat); - return pgdat; } diff --git a/mm/mm_init.c b/mm/mm_init.c index 25a585cb0c0a..122e9bf3fa73 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1509,6 +1509,8 @@ void __ref free_area_init_core_hotplug(struct pglist_data *pgdat) pgdat->kswapd_order = 0; pgdat->kswapd_highest_zoneidx = 0; pgdat->node_start_pfn = 0; + pgdat->node_present_pages = 0; + for_each_online_cpu(cpu) { struct per_cpu_nodestat *p; @@ -1516,8 +1518,17 @@ void __ref free_area_init_core_hotplug(struct pglist_data *pgdat) memset(p, 0, sizeof(*p)); } - for (z = 0; z < MAX_NR_ZONES; z++) - zone_init_internals(&pgdat->node_zones[z], z, nid, 0); + /* + * When memory is hot-added, all the memory is in offline state. So + * clear all zones' present_pages and managed_pages because they will + * be updated in online_pages() and offline_pages(). + */ + for (z = 0; z < MAX_NR_ZONES; z++) { + struct zone *zone = pgdat->node_zones + z; + + zone->present_pages = 0; + zone_init_internals(zone, z, nid, 0); + } } #endif -- cgit v1.2.3 From b9c91c43412f2e07a5287dfe7027acdd8fb0b1ef Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Wed, 7 Jun 2023 19:51:43 +0000 Subject: mm: zswap: support exclusive loads Commit 71024cb4a0bf ("frontswap: remove frontswap_tmem_exclusive_gets") removed support for exclusive loads from frontswap as it was not used. Bring back exclusive loads support to frontswap by adding an "exclusive" output parameter to frontswap_ops->load. On the zswap side, add a module parameter to enable/disable exclusive loads, and a config option to control the boot default value. Refactor zswap entry invalidation in zswap_frontswap_invalidate_page() into zswap_invalidate_entry() to reuse it in zswap_frontswap_load() if exclusive loads are enabled. With exclusive loads, we avoid having two copies of the same page in memory (compressed & uncompressed) after faulting it in from zswap. On the other hand, if the page is to be reclaimed again without being dirtied, it will be re-compressed. Compression is not usually slow, and a page that was just faulted in is less likely to be reclaimed again soon. Link: https://lkml.kernel.org/r/20230607195143.1473802-1-yosryahmed@google.com Signed-off-by: Yosry Ahmed Suggested-by: Yu Zhao Acked-by: Johannes Weiner Cc: Dan Streetman Cc: Domenico Cerasuolo Cc: Konrad Rzeszutek Wilk Cc: Nhat Pham Cc: Seth Jennings Cc: Vitaly Wool Signed-off-by: Andrew Morton --- include/linux/frontswap.h | 2 +- mm/Kconfig | 16 ++++++++++++++++ mm/frontswap.c | 10 ++++++++-- mm/zswap.c | 28 ++++++++++++++++++++-------- 4 files changed, 45 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h index a631bac12220..eaa0ac5f9003 100644 --- a/include/linux/frontswap.h +++ b/include/linux/frontswap.h @@ -10,7 +10,7 @@ struct frontswap_ops { void (*init)(unsigned); /* this swap type was just swapon'ed */ int (*store)(unsigned, pgoff_t, struct page *); /* store a page */ - int (*load)(unsigned, pgoff_t, struct page *); /* load a page */ + int (*load)(unsigned, pgoff_t, struct page *, bool *); /* load a page */ void (*invalidate_page)(unsigned, pgoff_t); /* page no longer needed */ void (*invalidate_area)(unsigned); /* swap type just swapoff'ed */ }; diff --git a/mm/Kconfig b/mm/Kconfig index 7672a22647b4..12f32f8d26bf 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -46,6 +46,22 @@ config ZSWAP_DEFAULT_ON The selection made here can be overridden by using the kernel command line 'zswap.enabled=' option. +config ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON + bool "Invalidate zswap entries when pages are loaded" + depends on ZSWAP + help + If selected, exclusive loads for zswap will be enabled at boot, + otherwise it will be disabled. + + If exclusive loads are enabled, when a page is loaded from zswap, + the zswap entry is invalidated at once, as opposed to leaving it + in zswap until the swap entry is freed. + + This avoids having two copies of the same page in memory + (compressed and uncompressed) after faulting in a page from zswap. + The cost is that if the page was never dirtied and needs to be + swapped out again, it will be re-compressed. + choice prompt "Default compressor" depends on ZSWAP diff --git a/mm/frontswap.c b/mm/frontswap.c index 279e55b4ed87..2fb5df3384b8 100644 --- a/mm/frontswap.c +++ b/mm/frontswap.c @@ -206,6 +206,7 @@ int __frontswap_load(struct page *page) int type = swp_type(entry); struct swap_info_struct *sis = swap_info[type]; pgoff_t offset = swp_offset(entry); + bool exclusive = false; VM_BUG_ON(!frontswap_ops); VM_BUG_ON(!PageLocked(page)); @@ -215,9 +216,14 @@ int __frontswap_load(struct page *page) return -1; /* Try loading from each implementation, until one succeeds. */ - ret = frontswap_ops->load(type, offset, page); - if (ret == 0) + ret = frontswap_ops->load(type, offset, page, &exclusive); + if (ret == 0) { inc_frontswap_loads(); + if (exclusive) { + SetPageDirty(page); + __frontswap_clear(sis, offset); + } + } return ret; } diff --git a/mm/zswap.c b/mm/zswap.c index bcb82e09eb64..9fa86265f6dd 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -138,6 +138,10 @@ static bool zswap_non_same_filled_pages_enabled = true; module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled, bool, 0644); +static bool zswap_exclusive_loads_enabled = IS_ENABLED( + CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON); +module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644); + /********************************* * data structures **********************************/ @@ -1340,12 +1344,22 @@ shrink: goto reject; } +static void zswap_invalidate_entry(struct zswap_tree *tree, + struct zswap_entry *entry) +{ + /* remove from rbtree */ + zswap_rb_erase(&tree->rbroot, entry); + + /* drop the initial reference from entry creation */ + zswap_entry_put(tree, entry); +} + /* * returns 0 if the page was successfully decompressed * return -1 on entry not found or error */ static int zswap_frontswap_load(unsigned type, pgoff_t offset, - struct page *page) + struct page *page, bool *exclusive) { struct zswap_tree *tree = zswap_trees[type]; struct zswap_entry *entry; @@ -1415,6 +1429,10 @@ stats: freeentry: spin_lock(&tree->lock); zswap_entry_put(tree, entry); + if (!ret && zswap_exclusive_loads_enabled) { + zswap_invalidate_entry(tree, entry); + *exclusive = true; + } spin_unlock(&tree->lock); return ret; @@ -1434,13 +1452,7 @@ static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset) spin_unlock(&tree->lock); return; } - - /* remove from rbtree */ - zswap_rb_erase(&tree->rbroot, entry); - - /* drop the initial reference from entry creation */ - zswap_entry_put(tree, entry); - + zswap_invalidate_entry(tree, entry); spin_unlock(&tree->lock); } -- cgit v1.2.3 From be5e015d107d5336f298b74ea5a4f0b1773bc6f9 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 7 Jun 2023 17:28:07 -0300 Subject: vmstat: skip periodic vmstat update for isolated CPUs Problem: The interruption caused by vmstat_update is undesirable for certain applications. With workloads that are running on isolated cpus with nohz full mode to shield off any kernel interruption. For example, a VM running a time sensitive application with a 50us maximum acceptable interruption (use case: soft PLC). oslat 1094.456862: sys_mlock(start: 7f7ed0000b60, len: 1000) oslat 1094.456971: workqueue_queue_work: ... function=vmstat_update ... oslat 1094.456974: sched_switch: prev_comm=oslat ... ==> next_comm=kworker/5:1 ... kworker 1094.456978: sched_switch: prev_comm=kworker/5:1 ==> next_comm=oslat ... The example above shows an additional 7us for the oslat -> kworker -> oslat switches. In the case of a virtualized CPU, and the vmstat_update interruption in the host (of a qemu-kvm vcpu), the latency penalty observed in the guest is higher than 50us, violating the acceptable latency threshold. The isolated vCPU can perform operations that modify per-CPU page counters, for example to complete I/O operations: CPU 11/KVM-9540 [001] dNh1. 2314.248584: mod_zone_page_state <-__folio_end_writeback CPU 11/KVM-9540 [001] dNh1. 2314.248585: => 0xffffffffc042b083 => mod_zone_page_state => __folio_end_writeback => folio_end_writeback => iomap_finish_ioend => blk_mq_end_request_batch => nvme_irq => __handle_irq_event_percpu => handle_irq_event => handle_edge_irq => __common_interrupt => common_interrupt => asm_common_interrupt => vmx_do_interrupt_nmi_irqoff => vmx_handle_exit_irqoff => vcpu_enter_guest => vcpu_run => kvm_arch_vcpu_ioctl_run => kvm_vcpu_ioctl => __x64_sys_ioctl => do_syscall_64 => entry_SYSCALL_64_after_hwframe In kernel users of vmstat counters either require the precise value and they are using zone_page_state_snapshot interface or they can live with an imprecision as the regular flushing can happen at arbitrary time and cumulative error can grow (see calculate_normal_threshold). From that POV the regular flushing can be postponed for CPUs that have been isolated from the kernel interference without critical infrastructure ever noticing. Skip regular flushing from vmstat_shepherd for all isolated CPUs to avoid interference with the isolated workload. Suggested by Michal Hocko. Link: https://lkml.kernel.org/r/ZIDoV/zxFKVmQl7W@tpad Signed-off-by: Marcelo Tosatti Acked-by: Michal Hocko Cc: Frederic Weisbecker Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/vmstat.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'mm') diff --git a/mm/vmstat.c b/mm/vmstat.c index c28046371b45..c7499e3ee9d5 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "internal.h" @@ -2022,6 +2023,20 @@ static void vmstat_shepherd(struct work_struct *w) for_each_online_cpu(cpu) { struct delayed_work *dw = &per_cpu(vmstat_work, cpu); + /* + * In kernel users of vmstat counters either require the precise value and + * they are using zone_page_state_snapshot interface or they can live with + * an imprecision as the regular flushing can happen at arbitrary time and + * cumulative error can grow (see calculate_normal_threshold). + * + * From that POV the regular flushing can be postponed for CPUs that have + * been isolated from the kernel interference without critical + * infrastructure ever noticing. Skip regular flushing from vmstat_shepherd + * for all isolated CPUs to avoid interference with the isolated workload. + */ + if (cpu_is_isolated(cpu)) + continue; + if (!delayed_work_pending(dw) && need_update(cpu)) queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0); -- cgit v1.2.3 From e3b7bf972d632288ccad95b116628e3141be676e Mon Sep 17 00:00:00 2001 From: Tarun Sahu Date: Fri, 9 Jun 2023 21:59:07 +0530 Subject: mm/folio: avoid special handling for order value 0 in folio_set_order folio_set_order(folio, 0) is used in kernel at two places __destroy_compound_gigantic_folio and __prep_compound_gigantic_folio. Currently, It is called to clear out the folio->_folio_nr_pages and folio->_folio_order. For __destroy_compound_gigantic_folio: In past, folio_set_order(folio, 0) was needed because page->mapping used to overlap with _folio_nr_pages and _folio_order. So if these fields were left uncleared during freeing gigantic hugepages, they were causing "BUG: bad page state" due to non-zero page->mapping. Now, After Commit a01f43901cfb ("hugetlb: be sure to free demoted CMA pages to CMA") page->mapping has explicitly been cleared out for tail pages. Also, _folio_order and _folio_nr_pages no longer overlaps with page->mapping. So, folio_set_order(folio, 0) can be removed from freeing gigantic folio path (__destroy_compound_gigantic_folio). Another place, folio_set_order(folio, 0) is called inside __prep_compound_gigantic_folio during error path. Here, folio_set_order(folio, 0) can also be removed if we move folio_set_order(folio, order) after for loop. The patch also moves _folio_set_head call in __prep_compound_gigantic_folio() such that we avoid clearing them in the error path. Also, as Mike pointed out: "It would actually be better to move the calls _folio_set_head and folio_set_order in __prep_compound_gigantic_folio() as suggested here. Why? In the current code, the ref count on the 'head page' is still 1 (or more) while those calls are made. So, someone could take a speculative ref on the page BEFORE the tail pages are set up." This way, folio_set_order(folio, 0) is no more needed. And it will also helps removing the confusion of folio order being set to 0 (as _folio_order field is part of first tail page). Testing: I have run LTP tests, which all passes. and also I have written the test in LTP which tests the bug caused by compound_nr and page->mapping overlapping. https://github.com/linux-test-project/ltp/blob/master/testcases/kernel/mem/hugetlb/hugemmap/hugemmap32.c Running on older kernel ( < 5.10-rc7) with the above bug this fails while on newer kernel and, also with this patch it passes. Link: https://lkml.kernel.org/r/20230609162907.111756-1-tsahu@linux.ibm.com Signed-off-by: Tarun Sahu Reviewed-by: Mike Kravetz Cc: Aneesh Kumar K.V Cc: Gerald Schaefer Cc: Matthew Wilcox Cc: Muchun Song Cc: Sidhartha Kumar Signed-off-by: Andrew Morton --- mm/hugetlb.c | 9 +++------ mm/internal.h | 8 ++------ 2 files changed, 5 insertions(+), 12 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index dfa412d8cb30..de96cd936b12 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1489,7 +1489,6 @@ static void __destroy_compound_gigantic_folio(struct folio *folio, set_page_refcounted(p); } - folio_set_order(folio, 0); __folio_clear_head(folio); } @@ -1951,9 +1950,6 @@ static bool __prep_compound_gigantic_folio(struct folio *folio, struct page *p; __folio_clear_reserved(folio); - __folio_set_head(folio); - /* we rely on prep_new_hugetlb_folio to set the destructor */ - folio_set_order(folio, order); for (i = 0; i < nr_pages; i++) { p = folio_page(folio, i); @@ -1999,6 +1995,9 @@ static bool __prep_compound_gigantic_folio(struct folio *folio, if (i != 0) set_compound_head(p, &folio->page); } + __folio_set_head(folio); + /* we rely on prep_new_hugetlb_folio to set the destructor */ + folio_set_order(folio, order); atomic_set(&folio->_entire_mapcount, -1); atomic_set(&folio->_nr_pages_mapped, 0); atomic_set(&folio->_pincount, 0); @@ -2017,8 +2016,6 @@ out_error: p = folio_page(folio, j); __ClearPageReserved(p); } - folio_set_order(folio, 0); - __folio_clear_head(folio); return false; } diff --git a/mm/internal.h b/mm/internal.h index f45f5eb4514f..faf0508d89a5 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -438,16 +438,12 @@ int split_free_page(struct page *free_page, */ static inline void folio_set_order(struct folio *folio, unsigned int order) { - if (WARN_ON_ONCE(!folio_test_large(folio))) + if (WARN_ON_ONCE(!order || !folio_test_large(folio))) return; folio->_folio_order = order; #ifdef CONFIG_64BIT - /* - * When hugetlb dissolves a folio, we need to clear the tail - * page, rather than setting nr_pages to 1. - */ - folio->_folio_nr_pages = order ? 1U << order : 0; + folio->_folio_nr_pages = 1U << order; #endif } -- cgit v1.2.3 From 0e4bc271110e0c58c010071a9bbf150f39851dac Mon Sep 17 00:00:00 2001 From: Lu Hongfei Date: Fri, 9 Jun 2023 17:30:57 +0800 Subject: mm/vmalloc: replace the ternary conditional operator with min() It would be better to replace the traditional ternary conditional operator with min() in zero_iter Link: https://lkml.kernel.org/r/20230609093057.27777-1-luhongfei@vivo.com Signed-off-by: Lu Hongfei Reviewed-by: Lorenzo Stoakes Reviewed-by: David Hildenbrand Cc: Christoph Hellwig Cc: Uladzislau Rezki (Sony) Signed-off-by: Andrew Morton --- mm/vmalloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 9d64a4098c36..d07d0140ebd3 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3562,7 +3562,7 @@ static size_t zero_iter(struct iov_iter *iter, size_t count) while (remains > 0) { size_t num, copied; - num = remains < PAGE_SIZE ? remains : PAGE_SIZE; + num = min_t(size_t, remains, PAGE_SIZE); copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter); remains -= copied; -- cgit v1.2.3 From 26e1a0c3277d7f43856ec424902423be212cc178 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:06:53 -0700 Subject: mm: use pmdp_get_lockless() without surplus barrier() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "mm: allow pte_offset_map[_lock]() to fail", v2. What is it all about? Some mmap_lock avoidance i.e. latency reduction. Initially just for the case of collapsing shmem or file pages to THPs; but likely to be relied upon later in other contexts e.g. freeing of empty page tables (but that's not work I'm doing). mmap_write_lock avoidance when collapsing to anon THPs? Perhaps, but again that's not work I've done: a quick attempt was not as easy as the shmem/file case. I would much prefer not to have to make these small but wide-ranging changes for such a niche case; but failed to find another way, and have heard that shmem MADV_COLLAPSE's usefulness is being limited by that mmap_write_lock it currently requires. These changes (though of course not these exact patches) have been in Google's data centre kernel for three years now: we do rely upon them. What is this preparatory series about? The current mmap locking will not be enough to guard against that tricky transition between pmd entry pointing to page table, and empty pmd entry, and pmd entry pointing to huge page: pte_offset_map() will have to validate the pmd entry for itself, returning NULL if no page table is there. What to do about that varies: sometimes nearby error handling indicates just to skip it; but in many cases an ACTION_AGAIN or "goto again" is appropriate (and if that risks an infinite loop, then there must have been an oops, or pfn 0 mistaken for page table, before). Given the likely extension to freeing empty page tables, I have not limited this set of changes to a THP config; and it has been easier, and sets a better example, if each site is given appropriate handling: even where deeper study might prove that failure could only happen if the pmd table were corrupted. Several of the patches are, or include, cleanup on the way; and by the end, pmd_trans_unstable() and suchlike are deleted: pte_offset_map() and pte_offset_map_lock() then handle those original races and more. Most uses of pte_lockptr() are deprecated, with pte_offset_map_nolock() taking its place. This patch (of 32): Use pmdp_get_lockless() in preference to READ_ONCE(*pmdp), to get a more reliable result with PAE (or READ_ONCE as before without PAE); and remove the unnecessary extra barrier()s which got left behind in its callers. HOWEVER: Note the small print in linux/pgtable.h, where it was designed specifically for fast GUP, and depends on interrupts being disabled for its full guarantee: most callers which have been added (here and before) do NOT have interrupts disabled, so there is still some need for caution. Link: https://lkml.kernel.org/r/f35279a9-9ac0-de22-d245-591afbfb4dc@google.com Signed-off-by: Hugh Dickins Acked-by: Yu Zhao Acked-by: Peter Xu Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Zack Rusin Signed-off-by: Andrew Morton --- fs/userfaultfd.c | 10 +--------- include/linux/pgtable.h | 17 ----------------- mm/gup.c | 6 +----- mm/hmm.c | 2 +- mm/khugepaged.c | 5 ----- mm/ksm.c | 3 +-- mm/memory.c | 14 ++------------ mm/mprotect.c | 5 ----- mm/page_vma_mapped.c | 2 +- 9 files changed, 7 insertions(+), 57 deletions(-) (limited to 'mm') diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 0fd96d6e39ce..f7a0817b1ec0 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -349,15 +349,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, if (!pud_present(*pud)) goto out; pmd = pmd_offset(pud, address); - /* - * READ_ONCE must function as a barrier with narrower scope - * and it must be equivalent to: - * _pmd = *pmd; barrier(); - * - * This is to deal with the instability (as in - * pmd_trans_unstable) of the pmd. - */ - _pmd = READ_ONCE(*pmd); + _pmd = pmdp_get_lockless(pmd); if (pmd_none(_pmd)) goto out; diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index c5a51481bbb9..8ec27fe69dc8 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1344,23 +1344,6 @@ static inline int pud_trans_unstable(pud_t *pud) static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) { pmd_t pmdval = pmdp_get_lockless(pmd); - /* - * The barrier will stabilize the pmdval in a register or on - * the stack so that it will stop changing under the code. - * - * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE, - * pmdp_get_lockless is allowed to return a not atomic pmdval - * (for example pointing to an hugepage that has never been - * mapped in the pmd). The below checks will only care about - * the low part of the pmd with 32bit PAE x86 anyway, with the - * exception of pmd_none(). So the important thing is that if - * the low part of the pmd is found null, the high part will - * be also null or the pmd_none() check below would be - * confused. - */ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - barrier(); -#endif /* * !pmd_present() checks for pmd migration entries * diff --git a/mm/gup.c b/mm/gup.c index a718b956edbe..d448fd286b8c 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -654,11 +654,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma, struct mm_struct *mm = vma->vm_mm; pmd = pmd_offset(pudp, address); - /* - * The READ_ONCE() will stabilize the pmdval in a register or - * on the stack so that it will stop changing under the code. - */ - pmdval = READ_ONCE(*pmd); + pmdval = pmdp_get_lockless(pmd); if (pmd_none(pmdval)) return no_page_table(vma, flags); if (!pmd_present(pmdval)) diff --git a/mm/hmm.c b/mm/hmm.c index 6a151c09de5e..e23043345615 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -332,7 +332,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, pmd_t pmd; again: - pmd = READ_ONCE(*pmdp); + pmd = pmdp_get_lockless(pmdp); if (pmd_none(pmd)) return hmm_vma_walk_hole(start, end, -1, walk); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 3649ba12a235..2d206e62d358 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -959,11 +959,6 @@ static int find_pmd_or_thp_or_none(struct mm_struct *mm, return SCAN_PMD_NULL; pmde = pmdp_get_lockless(*pmd); - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - /* See comments in pmd_none_or_trans_huge_or_clear_bad() */ - barrier(); -#endif if (pmd_none(pmde)) return SCAN_PMD_NONE; if (!pmd_present(pmde)) diff --git a/mm/ksm.c b/mm/ksm.c index 0156bded3a66..df2aa281d49d 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1194,8 +1194,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, * without holding anon_vma lock for write. So when looking for a * genuine pmde (in which to find pte), test present and !THP together. */ - pmde = *pmd; - barrier(); + pmde = pmdp_get_lockless(pmd); if (!pmd_present(pmde) || pmd_trans_huge(pmde)) goto out; diff --git a/mm/memory.c b/mm/memory.c index 36082fd42df4..221b21623644 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4923,18 +4923,9 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) * So now it's safe to run pte_offset_map(). */ vmf->pte = pte_offset_map(vmf->pmd, vmf->address); - vmf->orig_pte = *vmf->pte; + vmf->orig_pte = ptep_get_lockless(vmf->pte); vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID; - /* - * some architectures can have larger ptes than wordsize, - * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and - * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic - * accesses. The code below just needs a consistent view - * for the ifs and we later double check anyway with the - * ptl lock held. So here a barrier will do. - */ - barrier(); if (pte_none(vmf->orig_pte)) { pte_unmap(vmf->pte); vmf->pte = NULL; @@ -5058,9 +5049,8 @@ retry_pud: if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { - vmf.orig_pmd = *vmf.pmd; + vmf.orig_pmd = pmdp_get_lockless(vmf.pmd); - barrier(); if (unlikely(is_swap_pmd(vmf.orig_pmd))) { VM_BUG_ON(thp_migration_supported() && !is_pmd_migration_entry(vmf.orig_pmd)); diff --git a/mm/mprotect.c b/mm/mprotect.c index 92d3d3ca390a..c5a13c0f1017 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -309,11 +309,6 @@ static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd) { pmd_t pmdval = pmdp_get_lockless(pmd); - /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - barrier(); -#endif - if (pmd_none(pmdval)) return 1; if (pmd_trans_huge(pmdval)) diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 4e448cfbc6ef..64aff6718bdb 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -210,7 +210,7 @@ restart: * compiler and used as a stale value after we've observed a * subsequent update. */ - pmde = READ_ONCE(*pvmw->pmd); + pmde = pmdp_get_lockless(pvmw->pmd); if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) || (pmd_present(pmde) && pmd_devmap(pmde))) { -- cgit v1.2.3 From 0cb8fd4d14165a7e654048e43983d86f75b90879 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:08:20 -0700 Subject: mm/migrate: remove cruft from migration_entry_wait()s MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit migration_entry_wait_on_locked() does not need to take a mapped pte pointer, its callers can do the unmap first. Annotate it with __releases(ptl) to reduce sparse warnings. Fold __migration_entry_wait_huge() into migration_entry_wait_huge(). Fold __migration_entry_wait() into migration_entry_wait(), preferring the tighter pte_offset_map_lock() to pte_offset_map() and pte_lockptr(). Link: https://lkml.kernel.org/r/b0e2a532-cdf2-561b-e999-f3b13b8d6d3@google.com Signed-off-by: Hugh Dickins Reviewed-by: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- include/linux/migrate.h | 4 ++-- include/linux/swapops.h | 17 +++-------------- mm/filemap.c | 13 ++++--------- mm/migrate.c | 37 +++++++++++++------------------------ 4 files changed, 22 insertions(+), 49 deletions(-) (limited to 'mm') diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 6de5756d8533..711dd9412561 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -75,8 +75,8 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode); int migrate_huge_page_move_mapping(struct address_space *mapping, struct folio *dst, struct folio *src); -void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep, - spinlock_t *ptl); +void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl) + __releases(ptl); void folio_migrate_flags(struct folio *newfolio, struct folio *folio); void folio_migrate_copy(struct folio *newfolio, struct folio *folio); int folio_migrate_mapping(struct address_space *mapping, diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 3a451b7afcb3..4c932cb45e0b 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -332,15 +332,9 @@ static inline bool is_migration_entry_dirty(swp_entry_t entry) return false; } -extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, - spinlock_t *ptl); extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address); -#ifdef CONFIG_HUGETLB_PAGE -extern void __migration_entry_wait_huge(struct vm_area_struct *vma, - pte_t *ptep, spinlock_t *ptl); extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte); -#endif /* CONFIG_HUGETLB_PAGE */ #else /* CONFIG_MIGRATION */ static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) { @@ -362,15 +356,10 @@ static inline int is_migration_entry(swp_entry_t swp) return 0; } -static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, - spinlock_t *ptl) { } static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, - unsigned long address) { } -#ifdef CONFIG_HUGETLB_PAGE -static inline void __migration_entry_wait_huge(struct vm_area_struct *vma, - pte_t *ptep, spinlock_t *ptl) { } -static inline void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) { } -#endif /* CONFIG_HUGETLB_PAGE */ + unsigned long address) { } +static inline void migration_entry_wait_huge(struct vm_area_struct *vma, + pte_t *pte) { } static inline int is_writable_migration_entry(swp_entry_t entry) { return 0; diff --git a/mm/filemap.c b/mm/filemap.c index 916b7c6444fe..e0259fb823a5 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1362,8 +1362,6 @@ repeat: /** * migration_entry_wait_on_locked - Wait for a migration entry to be removed * @entry: migration swap entry. - * @ptep: mapped pte pointer. Will return with the ptep unmapped. Only required - * for pte entries, pass NULL for pmd entries. * @ptl: already locked ptl. This function will drop the lock. * * Wait for a migration entry referencing the given page to be removed. This is @@ -1372,13 +1370,13 @@ repeat: * should be called while holding the ptl for the migration entry referencing * the page. * - * Returns after unmapping and unlocking the pte/ptl with pte_unmap_unlock(). + * Returns after unlocking the ptl. * * This follows the same logic as folio_wait_bit_common() so see the comments * there. */ -void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep, - spinlock_t *ptl) +void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl) + __releases(ptl) { struct wait_page_queue wait_page; wait_queue_entry_t *wait = &wait_page.wait; @@ -1412,10 +1410,7 @@ void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep, * a valid reference to the page, and it must take the ptl to remove the * migration entry. So the page is valid until the ptl is dropped. */ - if (ptep) - pte_unmap_unlock(ptep, ptl); - else - spin_unlock(ptl); + spin_unlock(ptl); for (;;) { unsigned int flags; diff --git a/mm/migrate.c b/mm/migrate.c index 30b5ce10935e..c1f2c40441e1 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -296,14 +296,18 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked) * get to the page and wait until migration is finished. * When we return from this function the fault will be retried. */ -void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, - spinlock_t *ptl) +void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, + unsigned long address) { + spinlock_t *ptl; + pte_t *ptep; pte_t pte; swp_entry_t entry; - spin_lock(ptl); + ptep = pte_offset_map_lock(mm, pmd, address, &ptl); pte = *ptep; + pte_unmap(ptep); + if (!is_swap_pte(pte)) goto out; @@ -311,18 +315,10 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, if (!is_migration_entry(entry)) goto out; - migration_entry_wait_on_locked(entry, ptep, ptl); + migration_entry_wait_on_locked(entry, ptl); return; out: - pte_unmap_unlock(ptep, ptl); -} - -void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, - unsigned long address) -{ - spinlock_t *ptl = pte_lockptr(mm, pmd); - pte_t *ptep = pte_offset_map(pmd, address); - __migration_entry_wait(mm, ptep, ptl); + spin_unlock(ptl); } #ifdef CONFIG_HUGETLB_PAGE @@ -332,9 +328,9 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, * * This function will release the vma lock before returning. */ -void __migration_entry_wait_huge(struct vm_area_struct *vma, - pte_t *ptep, spinlock_t *ptl) +void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep) { + spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep); pte_t pte; hugetlb_vma_assert_locked(vma); @@ -352,16 +348,9 @@ void __migration_entry_wait_huge(struct vm_area_struct *vma, * lock release in migration_entry_wait_on_locked(). */ hugetlb_vma_unlock_read(vma); - migration_entry_wait_on_locked(pte_to_swp_entry(pte), NULL, ptl); + migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl); } } - -void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) -{ - spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte); - - __migration_entry_wait_huge(vma, pte, ptl); -} #endif #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION @@ -372,7 +361,7 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) ptl = pmd_lock(mm, pmd); if (!is_pmd_migration_entry(*pmd)) goto unlock; - migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl); + migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl); return; unlock: spin_unlock(ptl); -- cgit v1.2.3 From 0d940a9b270b9220dcff74d8e9123c9788365751 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:10:32 -0700 Subject: mm/pgtable: allow pte_offset_map[_lock]() to fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make pte_offset_map() a wrapper for __pte_offset_map() (optionally outputs pmdval), pte_offset_map_lock() a sparse __cond_lock wrapper for __pte_offset_map_lock(): those __funcs added in mm/pgtable-generic.c. __pte_offset_map() do pmdval validation (including pmd_clear_bad() when pmd_bad()), returning NULL if pmdval is not for a page table. __pte_offset_map_lock() verify pmdval unchanged after getting the lock, trying again if it changed. No #ifdef CONFIG_TRANSPARENT_HUGEPAGE around them: that could be done to cover the imminent case, but we expect to generalize it later, and it makes a mess of where to do the pmd_bad() clearing. Add pte_offset_map_nolock(): outputs ptl like pte_offset_map_lock(), without actually taking the lock. This will be preferred to open uses of pte_lockptr(), because (when split ptlock is in page table's struct page) it points to the right lock for the returned pte pointer, even if *pmd gets changed racily afterwards. Update corresponding Documentation. Do not add the anticipated rcu_read_lock() and rcu_read_unlock()s yet: they have to wait until all architectures are balancing pte_offset_map()s with pte_unmap()s (as in the arch series posted earlier). But comment where they will go, so that it's easy to add them for experiments. And only when those are in place can transient racy failure cases be enabled. Add more safety for the PAE mismatched pmd_low pmd_high case at that time. Link: https://lkml.kernel.org/r/2929bfd-9893-a374-e463-4c3127ff9b9d@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- Documentation/mm/split_page_table_lock.rst | 17 +++++---- include/linux/mm.h | 27 +++++++++----- include/linux/pgtable.h | 22 ++++++++---- mm/pgtable-generic.c | 56 ++++++++++++++++++++++++++++++ 4 files changed, 101 insertions(+), 21 deletions(-) (limited to 'mm') diff --git a/Documentation/mm/split_page_table_lock.rst b/Documentation/mm/split_page_table_lock.rst index 50ee0dfc95be..a834fad9de12 100644 --- a/Documentation/mm/split_page_table_lock.rst +++ b/Documentation/mm/split_page_table_lock.rst @@ -14,15 +14,20 @@ tables. Access to higher level tables protected by mm->page_table_lock. There are helpers to lock/unlock a table and other accessor functions: - pte_offset_map_lock() - maps pte and takes PTE table lock, returns pointer to the taken - lock; + maps PTE and takes PTE table lock, returns pointer to PTE with + pointer to its PTE table lock, or returns NULL if no PTE table; + - pte_offset_map_nolock() + maps PTE, returns pointer to PTE with pointer to its PTE table + lock (not taken), or returns NULL if no PTE table; + - pte_offset_map() + maps PTE, returns pointer to PTE, or returns NULL if no PTE table; + - pte_unmap() + unmaps PTE table; - pte_unmap_unlock() unlocks and unmaps PTE table; - pte_alloc_map_lock() - allocates PTE table if needed and take the lock, returns pointer - to taken lock or NULL if allocation failed; - - pte_lockptr() - returns pointer to PTE table lock; + allocates PTE table if needed and takes its lock, returns pointer to + PTE with pointer to its lock, or returns NULL if allocation failed; - pmd_lock() takes PMD table lock, returns pointer to taken lock; - pmd_lockptr() diff --git a/include/linux/mm.h b/include/linux/mm.h index 66032f0d515c..a08dc8cc48fb 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2827,14 +2827,25 @@ static inline void pgtable_pte_page_dtor(struct page *page) dec_lruvec_page_state(page, NR_PAGETABLE); } -#define pte_offset_map_lock(mm, pmd, address, ptlp) \ -({ \ - spinlock_t *__ptl = pte_lockptr(mm, pmd); \ - pte_t *__pte = pte_offset_map(pmd, address); \ - *(ptlp) = __ptl; \ - spin_lock(__ptl); \ - __pte; \ -}) +pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp); +static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr) +{ + return __pte_offset_map(pmd, addr, NULL); +} + +pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, spinlock_t **ptlp); +static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, spinlock_t **ptlp) +{ + pte_t *pte; + + __cond_lock(*ptlp, pte = __pte_offset_map_lock(mm, pmd, addr, ptlp)); + return pte; +} + +pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, spinlock_t **ptlp); #define pte_unmap_unlock(pte, ptl) do { \ spin_unlock(ptl); \ diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 94235ff2706e..3fabbb018557 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -94,14 +94,22 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) #define pte_offset_kernel pte_offset_kernel #endif -#if defined(CONFIG_HIGHPTE) -#define pte_offset_map(dir, address) \ - ((pte_t *)kmap_local_page(pmd_page(*(dir))) + \ - pte_index((address))) -#define pte_unmap(pte) kunmap_local((pte)) +#ifdef CONFIG_HIGHPTE +#define __pte_map(pmd, address) \ + ((pte_t *)kmap_local_page(pmd_page(*(pmd))) + pte_index((address))) +#define pte_unmap(pte) do { \ + kunmap_local((pte)); \ + /* rcu_read_unlock() to be added later */ \ +} while (0) #else -#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) -#define pte_unmap(pte) ((void)(pte)) /* NOP */ +static inline pte_t *__pte_map(pmd_t *pmd, unsigned long address) +{ + return pte_offset_kernel(pmd, address); +} +static inline void pte_unmap(pte_t *pte) +{ + /* rcu_read_unlock() to be added later */ +} #endif /* Find an entry in the second-level page table.. */ diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index d2fc52bffafc..c7ab18a5fb77 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c @@ -10,6 +10,8 @@ #include #include #include +#include +#include #include #include @@ -229,3 +231,57 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, } #endif #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp) +{ + pmd_t pmdval; + + /* rcu_read_lock() to be added later */ + pmdval = pmdp_get_lockless(pmd); + if (pmdvalp) + *pmdvalp = pmdval; + if (unlikely(pmd_none(pmdval) || is_pmd_migration_entry(pmdval))) + goto nomap; + if (unlikely(pmd_trans_huge(pmdval) || pmd_devmap(pmdval))) + goto nomap; + if (unlikely(pmd_bad(pmdval))) { + pmd_clear_bad(pmd); + goto nomap; + } + return __pte_map(&pmdval, addr); +nomap: + /* rcu_read_unlock() to be added later */ + return NULL; +} + +pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, spinlock_t **ptlp) +{ + pmd_t pmdval; + pte_t *pte; + + pte = __pte_offset_map(pmd, addr, &pmdval); + if (likely(pte)) + *ptlp = pte_lockptr(mm, &pmdval); + return pte; +} + +pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, spinlock_t **ptlp) +{ + spinlock_t *ptl; + pmd_t pmdval; + pte_t *pte; +again: + pte = __pte_offset_map(pmd, addr, &pmdval); + if (unlikely(!pte)) + return pte; + ptl = pte_lockptr(mm, &pmdval); + spin_lock(ptl); + if (likely(pmd_same(pmdval, pmdp_get_lockless(pmd)))) { + *ptlp = ptl; + return pte; + } + pte_unmap_unlock(pte, ptl); + goto again; +} -- cgit v1.2.3 From 65747aaf42b7db6acb8e57a2b8e9959928f404dd Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:11:29 -0700 Subject: mm/filemap: allow pte_offset_map_lock() to fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit filemap_map_pages() allow pte_offset_map_lock() to fail; and remove the pmd_devmap_trans_unstable() check from filemap_map_pmd(), which can safely return to filemap_map_pages() and let pte_offset_map_lock() discover that. Link: https://lkml.kernel.org/r/54607cf4-ddb6-7ef3-043-1d2de1a9a71@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/filemap.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index e0259fb823a5..1893048ec9ff 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3414,13 +3414,6 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio, if (pmd_none(*vmf->pmd)) pmd_install(mm, vmf->pmd, &vmf->prealloc_pte); - /* See comment in handle_pte_fault() */ - if (pmd_devmap_trans_unstable(vmf->pmd)) { - folio_unlock(folio); - folio_put(folio); - return true; - } - return false; } @@ -3507,6 +3500,11 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); + if (!vmf->pte) { + folio_unlock(folio); + folio_put(folio); + goto out; + } do { again: page = folio_file_page(folio, xas.xa_index); -- cgit v1.2.3 From 45fe85e9811ede2d65b21724cae50d6a0563e452 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:12:52 -0700 Subject: mm/page_vma_mapped: delete bogosity in page_vma_mapped_walk() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Revert commit a7a69d8ba88d ("mm/thp: another PVMW_SYNC fix in page_vma_mapped_walk()"): I was proud of that "Aha!" commit at the time, but in revisiting page_vma_mapped_walk() for pte_offset_map() failure, that block raised a doubt: and it now seems utterly bogus. The prior map_pte() has taken ptl unconditionally when PVMW_SYNC: I must have forgotten that when making the change. It did no harm, but could not have fixed a BUG or WARN, and is hard to reconcile with coming changes. Link: https://lkml.kernel.org/r/87475a22-e59e-2d8b-d78a-df376d314bd@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/page_vma_mapped.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'mm') diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 64aff6718bdb..007dc7456f0e 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -275,10 +275,6 @@ next_pte: goto restart; } pvmw->pte++; - if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) { - pvmw->ptl = pte_lockptr(mm, pvmw->pmd); - spin_lock(pvmw->ptl); - } } while (pte_none(*pvmw->pte)); if (!pvmw->ptl) { -- cgit v1.2.3 From 90f43b0a13cddb09e2686f4d976751c0a9b8b197 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:14:12 -0700 Subject: mm/page_vma_mapped: reformat map_pte() with less indentation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No functional change here, but adjust the format of map_pte() so that the following commit will be easier to read: separate out the PVMW_SYNC case first, and remove two levels of indentation from the ZONE_DEVICE case. Link: https://lkml.kernel.org/r/bf723f59-e3fc-6839-1cc3-c0631ee248bc@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/page_vma_mapped.c | 65 +++++++++++++++++++++++++++------------------------- 1 file changed, 34 insertions(+), 31 deletions(-) (limited to 'mm') diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 007dc7456f0e..947dc7491815 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -15,38 +15,41 @@ static inline bool not_found(struct page_vma_mapped_walk *pvmw) static bool map_pte(struct page_vma_mapped_walk *pvmw) { - pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); - if (!(pvmw->flags & PVMW_SYNC)) { - if (pvmw->flags & PVMW_MIGRATION) { - if (!is_swap_pte(*pvmw->pte)) - return false; - } else { - /* - * We get here when we are trying to unmap a private - * device page from the process address space. Such - * page is not CPU accessible and thus is mapped as - * a special swap entry, nonetheless it still does - * count as a valid regular mapping for the page (and - * is accounted as such in page maps count). - * - * So handle this special case as if it was a normal - * page mapping ie lock CPU page table and returns - * true. - * - * For more details on device private memory see HMM - * (include/linux/hmm.h or mm/hmm.c). - */ - if (is_swap_pte(*pvmw->pte)) { - swp_entry_t entry; + if (pvmw->flags & PVMW_SYNC) { + /* Use the stricter lookup */ + pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd, + pvmw->address, &pvmw->ptl); + return true; + } - /* Handle un-addressable ZONE_DEVICE memory */ - entry = pte_to_swp_entry(*pvmw->pte); - if (!is_device_private_entry(entry) && - !is_device_exclusive_entry(entry)) - return false; - } else if (!pte_present(*pvmw->pte)) - return false; - } + pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); + if (pvmw->flags & PVMW_MIGRATION) { + if (!is_swap_pte(*pvmw->pte)) + return false; + } else if (is_swap_pte(*pvmw->pte)) { + swp_entry_t entry; + /* + * Handle un-addressable ZONE_DEVICE memory. + * + * We get here when we are trying to unmap a private + * device page from the process address space. Such + * page is not CPU accessible and thus is mapped as + * a special swap entry, nonetheless it still does + * count as a valid regular mapping for the page + * (and is accounted as such in page maps count). + * + * So handle this special case as if it was a normal + * page mapping ie lock CPU page table and return true. + * + * For more details on device private memory see HMM + * (include/linux/hmm.h or mm/hmm.c). + */ + entry = pte_to_swp_entry(*pvmw->pte); + if (!is_device_private_entry(entry) && + !is_device_exclusive_entry(entry)) + return false; + } else if (!pte_present(*pvmw->pte)) { + return false; } pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); spin_lock(pvmw->ptl); -- cgit v1.2.3 From 2798bbe75b9c2752b46d292e5c2a49f49da36418 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:15:43 -0700 Subject: mm/page_vma_mapped: pte_offset_map_nolock() not pte_lockptr() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit map_pte() use pte_offset_map_nolock(), to make sure of the ptl belonging to pte, even if pmd entry is then changed racily: page_vma_mapped_walk() use that instead of getting pte_lockptr() later, or restart if map_pte() found no page table. Link: https://lkml.kernel.org/r/cba186e0-5ed7-e81b-6cd-dade4c33c248@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/page_vma_mapped.c | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 947dc7491815..2af734274073 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -13,16 +13,28 @@ static inline bool not_found(struct page_vma_mapped_walk *pvmw) return false; } -static bool map_pte(struct page_vma_mapped_walk *pvmw) +static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) { if (pvmw->flags & PVMW_SYNC) { /* Use the stricter lookup */ pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd, pvmw->address, &pvmw->ptl); - return true; + *ptlp = pvmw->ptl; + return !!pvmw->pte; } - pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); + /* + * It is important to return the ptl corresponding to pte, + * in case *pvmw->pmd changes underneath us; so we need to + * return it even when choosing not to lock, in case caller + * proceeds to loop over next ptes, and finds a match later. + * Though, in most cases, page lock already protects this. + */ + pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd, + pvmw->address, ptlp); + if (!pvmw->pte) + return false; + if (pvmw->flags & PVMW_MIGRATION) { if (!is_swap_pte(*pvmw->pte)) return false; @@ -51,7 +63,7 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw) } else if (!pte_present(*pvmw->pte)) { return false; } - pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); + pvmw->ptl = *ptlp; spin_lock(pvmw->ptl); return true; } @@ -156,6 +168,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) struct vm_area_struct *vma = pvmw->vma; struct mm_struct *mm = vma->vm_mm; unsigned long end; + spinlock_t *ptl; pgd_t *pgd; p4d_t *p4d; pud_t *pud; @@ -257,8 +270,11 @@ restart: step_forward(pvmw, PMD_SIZE); continue; } - if (!map_pte(pvmw)) + if (!map_pte(pvmw, &ptl)) { + if (!pvmw->pte) + goto restart; goto next_pte; + } this_pte: if (check_pte(pvmw)) return true; @@ -281,7 +297,7 @@ next_pte: } while (pte_none(*pvmw->pte)); if (!pvmw->ptl) { - pvmw->ptl = pte_lockptr(mm, pvmw->pmd); + pvmw->ptl = ptl; spin_lock(pvmw->ptl); } goto this_pte; -- cgit v1.2.3 From 7780d04046a2288ab85d88bedacc60fa4fad9971 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:17:26 -0700 Subject: mm/pagewalkers: ACTION_AGAIN if pte_offset_map_lock() fails MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Simple walk_page_range() users should set ACTION_AGAIN to retry when pte_offset_map_lock() fails. No need to check pmd_trans_unstable(): that was precisely to avoid the possiblity of calling pte_offset_map() on a racily removed or inserted THP entry, but such cases are now safely handled inside it. Likewise there is no need to check pmd_none() or pmd_bad() before calling it. Link: https://lkml.kernel.org/r/c77d9d10-3aad-e3ce-4896-99e91c7947f3@google.com Signed-off-by: Hugh Dickins Reviewed-by: SeongJae Park for mm/damon part Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- fs/proc/task_mmu.c | 32 ++++++++++++++++---------------- mm/damon/vaddr.c | 12 ++++++++---- mm/mempolicy.c | 7 ++++--- mm/mincore.c | 9 ++++----- mm/mlock.c | 4 ++++ 5 files changed, 36 insertions(+), 28 deletions(-) (limited to 'mm') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 6259dd432eeb..0d63b6a0f0d8 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -631,14 +631,11 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, goto out; } - if (pmd_trans_unstable(pmd)) - goto out; - /* - * The mmap_lock held all the way back in m_start() is what - * keeps khugepaged out of here and from collapsing things - * in here. - */ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + if (!pte) { + walk->action = ACTION_AGAIN; + return 0; + } for (; addr != end; pte++, addr += PAGE_SIZE) smaps_pte_entry(pte, addr, walk); pte_unmap_unlock(pte - 1, ptl); @@ -1191,10 +1188,11 @@ out: return 0; } - if (pmd_trans_unstable(pmd)) - return 0; - pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + if (!pte) { + walk->action = ACTION_AGAIN; + return 0; + } for (; addr != end; pte++, addr += PAGE_SIZE) { ptent = *pte; @@ -1538,9 +1536,6 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, spin_unlock(ptl); return err; } - - if (pmd_trans_unstable(pmdp)) - return 0; #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* @@ -1548,6 +1543,10 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, * goes beyond vma->vm_end. */ orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); + if (!pte) { + walk->action = ACTION_AGAIN; + return err; + } for (; addr < end; pte++, addr += PAGE_SIZE) { pagemap_entry_t pme; @@ -1887,11 +1886,12 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, spin_unlock(ptl); return 0; } - - if (pmd_trans_unstable(pmd)) - return 0; #endif orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + if (!pte) { + walk->action = ACTION_AGAIN; + return 0; + } do { struct page *page = can_gather_numa_stats(*pte, vma, addr); if (!page) diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 37994fb6120c..e814f66dfc2e 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -318,9 +318,11 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, spin_unlock(ptl); } - if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) - return 0; pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + if (!pte) { + walk->action = ACTION_AGAIN; + return 0; + } if (!pte_present(*pte)) goto out; damon_ptep_mkold(pte, walk->vma, addr); @@ -464,9 +466,11 @@ huge_out: regular_page: #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) - return -EINVAL; pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + if (!pte) { + walk->action = ACTION_AGAIN; + return 0; + } if (!pte_present(*pte)) goto out; folio = damon_get_folio(pte_pfn(*pte)); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index f06ca8c18e62..0241bb64978b 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -514,10 +514,11 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr, if (ptl) return queue_folios_pmd(pmd, ptl, addr, end, walk); - if (pmd_trans_unstable(pmd)) - return 0; - mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + if (!pte) { + walk->action = ACTION_AGAIN; + return 0; + } for (; addr != end; pte++, addr += PAGE_SIZE) { if (!pte_present(*pte)) continue; diff --git a/mm/mincore.c b/mm/mincore.c index 2d5be013a25a..f33f6a0b1ded 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -113,12 +113,11 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, goto out; } - if (pmd_trans_unstable(pmd)) { - __mincore_unmapped_range(addr, end, vma, vec); - goto out; - } - ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + if (!ptep) { + walk->action = ACTION_AGAIN; + return 0; + } for (; addr != end; ptep++, addr += PAGE_SIZE) { pte_t pte = *ptep; diff --git a/mm/mlock.c b/mm/mlock.c index 40b43f8740df..9f2b1173b1b1 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -329,6 +329,10 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr, } start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + if (!start_pte) { + walk->action = ACTION_AGAIN; + return 0; + } for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) { if (!pte_present(*pte)) continue; -- cgit v1.2.3 From be872f83bf571f4f9a0ac25e2c9c36e905a36619 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:18:49 -0700 Subject: mm/pagewalk: walk_pte_range() allow for pte_offset_map() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit walk_pte_range() has a no_vma option to serve walk_page_range_novma(). I don't know of any problem, but it looks safer to check for init_mm, and use pte_offset_kernel() rather than pte_offset_map() in that case: pte_offset_map()'s pmdval validation is intended for userspace. Allow for its pte_offset_map() or pte_offset_map_lock() to fail, and retry with ACTION_AGAIN if so. Add a second check for ACTION_AGAIN in walk_pmd_range(), to catch it after return from walk_pte_range(). Remove the pmd_trans_unstable() check after split_huge_pmd() in walk_pmd_range(): walk_pte_range() now handles those cases safely (and they must fail powerpc's is_hugepd() check). Link: https://lkml.kernel.org/r/3eba6f0-2b-fb66-6bb6-2ee8533e221@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/pagewalk.c | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/pagewalk.c b/mm/pagewalk.c index cb23f8a15c13..64437105fe0d 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -46,15 +46,27 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, spinlock_t *ptl; if (walk->no_vma) { - pte = pte_offset_map(pmd, addr); - err = walk_pte_range_inner(pte, addr, end, walk); - pte_unmap(pte); + /* + * pte_offset_map() might apply user-specific validation. + */ + if (walk->mm == &init_mm) + pte = pte_offset_kernel(pmd, addr); + else + pte = pte_offset_map(pmd, addr); + if (pte) { + err = walk_pte_range_inner(pte, addr, end, walk); + if (walk->mm != &init_mm) + pte_unmap(pte); + } } else { pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); - err = walk_pte_range_inner(pte, addr, end, walk); - pte_unmap_unlock(pte, ptl); + if (pte) { + err = walk_pte_range_inner(pte, addr, end, walk); + pte_unmap_unlock(pte, ptl); + } } - + if (!pte) + walk->action = ACTION_AGAIN; return err; } @@ -141,11 +153,8 @@ again: !(ops->pte_entry)) continue; - if (walk->vma) { + if (walk->vma) split_huge_pmd(walk->vma, pmd, addr); - if (pmd_trans_unstable(pmd)) - goto again; - } if (is_hugepd(__hugepd(pmd_val(*pmd)))) err = walk_hugepd_range((hugepd_t *)pmd, addr, next, walk, PMD_SHIFT); @@ -153,6 +162,10 @@ again: err = walk_pte_range(pmd, addr, next, walk); if (err) break; + + if (walk->action == ACTION_AGAIN) + goto again; + } while (pmd++, addr = next, addr != end); return err; -- cgit v1.2.3 From e5ad581c7f1c32d309ae4e895eea0cd1a3d9f363 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:20:04 -0700 Subject: mm/vmwgfx: simplify pmd & pud mapping dirty helpers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit wp_clean_pmd_entry() need not check pmd_trans_unstable() or pmd_none(), wp_clean_pud_entry() need not check pud_trans_unstable() or pud_none(): it's just the ACTION_CONTINUE when trans_huge or devmap that's needed to prevent splitting, and we're hoping to remove pmd_trans_unstable(). Is that PUD #ifdef necessary? Maybe some configs are missing a stub. Link: https://lkml.kernel.org/r/d3379c7-65db-26d3-1764-8e866490925f@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/mapping_dirty_helpers.c | 34 +++++++++------------------------- 1 file changed, 9 insertions(+), 25 deletions(-) (limited to 'mm') diff --git a/mm/mapping_dirty_helpers.c b/mm/mapping_dirty_helpers.c index e1eb33f49059..87b4beeda4fa 100644 --- a/mm/mapping_dirty_helpers.c +++ b/mm/mapping_dirty_helpers.c @@ -128,19 +128,11 @@ static int wp_clean_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end, { pmd_t pmdval = pmdp_get_lockless(pmd); - if (!pmd_trans_unstable(&pmdval)) - return 0; - - if (pmd_none(pmdval)) { - walk->action = ACTION_AGAIN; - return 0; - } - - /* Huge pmd, present or migrated */ - walk->action = ACTION_CONTINUE; - if (pmd_trans_huge(pmdval) || pmd_devmap(pmdval)) + /* Do not split a huge pmd, present or migrated */ + if (pmd_trans_huge(pmdval) || pmd_devmap(pmdval)) { WARN_ON(pmd_write(pmdval) || pmd_dirty(pmdval)); - + walk->action = ACTION_CONTINUE; + } return 0; } @@ -156,23 +148,15 @@ static int wp_clean_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end, static int wp_clean_pud_entry(pud_t *pud, unsigned long addr, unsigned long end, struct mm_walk *walk) { +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD pud_t pudval = READ_ONCE(*pud); - if (!pud_trans_unstable(&pudval)) - return 0; - - if (pud_none(pudval)) { - walk->action = ACTION_AGAIN; - return 0; - } - -#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD - /* Huge pud */ - walk->action = ACTION_CONTINUE; - if (pud_trans_huge(pudval) || pud_devmap(pudval)) + /* Do not split a huge pud */ + if (pud_trans_huge(pudval) || pud_devmap(pudval)) { WARN_ON(pud_write(pudval) || pud_dirty(pudval)); + walk->action = ACTION_CONTINUE; + } #endif - return 0; } -- cgit v1.2.3 From 0d1c81edc61e553ed7a5db18fb8074c8b78e1538 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:21:41 -0700 Subject: mm/vmalloc: vmalloc_to_page() use pte_offset_kernel() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit vmalloc_to_page() was using pte_offset_map() (followed by pte_unmap()), but it's intended for userspace page tables: prefer pte_offset_kernel(). Link: https://lkml.kernel.org/r/696386a-84f8-b33c-82e5-f865ed6eb39@google.com Signed-off-by: Hugh Dickins Reviewed-by: Lorenzo Stoakes Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/vmalloc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d07d0140ebd3..7382e0a60ce1 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -703,11 +703,10 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) if (WARN_ON_ONCE(pmd_bad(*pmd))) return NULL; - ptep = pte_offset_map(pmd, addr); + ptep = pte_offset_kernel(pmd, addr); pte = *ptep; if (pte_present(pte)) page = pte_page(pte); - pte_unmap(ptep); return page; } -- cgit v1.2.3 From 6ec1905f6ec7f9f79ca3eaeaf04584b4dcddd743 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:23:19 -0700 Subject: mm/hmm: retry if pte_offset_map() fails MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit hmm_vma_walk_pmd() is called through mm_walk, but already has a goto again loop of its own, so take part in that if pte_offset_map() fails. Link: https://lkml.kernel.org/r/d6c6dd68-25d4-653b-f94b-a45c53ee04b@google.com Signed-off-by: Hugh Dickins Reviewed-by: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/hmm.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'mm') diff --git a/mm/hmm.c b/mm/hmm.c index e23043345615..b1a9159d7c92 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -381,6 +381,8 @@ again: } ptep = pte_offset_map(pmdp, addr); + if (!ptep) + goto again; for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) { int r; -- cgit v1.2.3 From 3622d3cde30898c1b6eafde281c122b994718c58 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:26:04 -0700 Subject: mm/userfaultfd: allow pte_offset_map_lock() to fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit mfill_atomic_install_pte() and mfill_atomic_pte_zeropage() treat failed pte_offset_map_lock() as -EAGAIN, which mfill_atomic() already returns to user for a similar race. Link: https://lkml.kernel.org/r/50cf3930-1bfa-4de9-a079-3da47b7ce17b@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/userfaultfd.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'mm') diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index e97a0b4889fc..5fd787158c70 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -76,7 +76,10 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd, if (flags & MFILL_ATOMIC_WP) _dst_pte = pte_mkuffd_wp(_dst_pte); + ret = -EAGAIN; dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); + if (!dst_pte) + goto out; if (vma_is_shmem(dst_vma)) { /* serialize against truncate with the page table lock */ @@ -121,6 +124,7 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd, ret = 0; out_unlock: pte_unmap_unlock(dst_pte, ptl); +out: return ret; } @@ -212,7 +216,10 @@ static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd, _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), dst_vma->vm_page_prot)); + ret = -EAGAIN; dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl); + if (!dst_pte) + goto out; if (dst_vma->vm_file) { /* the shmem MAP_PRIVATE case requires checking the i_size */ inode = dst_vma->vm_file->f_inode; @@ -231,6 +238,7 @@ static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd, ret = 0; out_unlock: pte_unmap_unlock(dst_pte, ptl); +out: return ret; } -- cgit v1.2.3 From 9f2bad096d2f84751fd4559fcd4cdda1a2af1976 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:27:52 -0700 Subject: mm/debug_vm_pgtable,page_table_check: warn pte map fails MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Failures here would be surprising: pte_advanced_tests() and pte_clear_tests() and __page_table_check_pte_clear_range() each issue a warning if pte_offset_map() or pte_offset_map_lock() fails. Link: https://lkml.kernel.org/r/3ea9e4f-e5cf-d7d9-4c2-291b3c5a3636@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/debug_vm_pgtable.c | 9 ++++++++- mm/page_table_check.c | 2 ++ 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c index c54177aabebd..ee119e33fef1 100644 --- a/mm/debug_vm_pgtable.c +++ b/mm/debug_vm_pgtable.c @@ -138,6 +138,9 @@ static void __init pte_advanced_tests(struct pgtable_debug_args *args) return; pr_debug("Validating PTE advanced\n"); + if (WARN_ON(!args->ptep)) + return; + pte = pfn_pte(args->pte_pfn, args->page_prot); set_pte_at(args->mm, args->vaddr, args->ptep, pte); flush_dcache_page(page); @@ -619,6 +622,9 @@ static void __init pte_clear_tests(struct pgtable_debug_args *args) * the unexpected overhead of cache flushing is acceptable. */ pr_debug("Validating PTE clear\n"); + if (WARN_ON(!args->ptep)) + return; + #ifndef CONFIG_RISCV pte = __pte(pte_val(pte) | RANDOM_ORVALUE); #endif @@ -1377,7 +1383,8 @@ static int __init debug_vm_pgtable(void) args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl); pte_clear_tests(&args); pte_advanced_tests(&args); - pte_unmap_unlock(args.ptep, ptl); + if (args.ptep) + pte_unmap_unlock(args.ptep, ptl); ptl = pmd_lock(args.mm, args.pmdp); pmd_clear_tests(&args); diff --git a/mm/page_table_check.c b/mm/page_table_check.c index 25d8610c0042..0c511330dbc9 100644 --- a/mm/page_table_check.c +++ b/mm/page_table_check.c @@ -240,6 +240,8 @@ void __page_table_check_pte_clear_range(struct mm_struct *mm, pte_t *ptep = pte_offset_map(&pmd, addr); unsigned long i; + if (WARN_ON(!ptep)) + return; for (i = 0; i < PTRS_PER_PTE; i++) { __page_table_check_pte_clear(mm, addr, *ptep); addr += PAGE_SIZE; -- cgit v1.2.3 From 04dee9e85cf50a2f24738e456d66b88de109b806 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:29:22 -0700 Subject: mm/various: give up if pte_offset_map[_lock]() fails MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Following the examples of nearby code, various functions can just give up if pte_offset_map() or pte_offset_map_lock() fails. And there's no need for a preliminary pmd_trans_unstable() or other such check, since such cases are now safely handled inside. Link: https://lkml.kernel.org/r/7b9bd85d-1652-cbf2-159d-f503b45e5b@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/gup.c | 9 ++++++--- mm/ksm.c | 7 ++++--- mm/memcontrol.c | 8 ++++---- mm/memory-failure.c | 8 +++++--- mm/migrate.c | 3 +++ 5 files changed, 22 insertions(+), 13 deletions(-) (limited to 'mm') diff --git a/mm/gup.c b/mm/gup.c index d448fd286b8c..598e8c98367b 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -545,10 +545,10 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == (FOLL_PIN | FOLL_GET))) return ERR_PTR(-EINVAL); - if (unlikely(pmd_bad(*pmd))) - return no_page_table(vma, flags); ptep = pte_offset_map_lock(mm, pmd, address, &ptl); + if (!ptep) + return no_page_table(vma, flags); pte = *ptep; if (!pte_present(pte)) goto no_page; @@ -852,8 +852,9 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return -EFAULT; - VM_BUG_ON(pmd_trans_huge(*pmd)); pte = pte_offset_map(pmd, address); + if (!pte) + return -EFAULT; if (pte_none(*pte)) goto unmap; *vma = get_gate_vma(mm); @@ -2468,6 +2469,8 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, pte_t *ptep, *ptem; ptem = ptep = pte_offset_map(&pmd, addr); + if (!ptep) + return 0; do { pte_t pte = ptep_get_lockless(ptep); struct page *page; diff --git a/mm/ksm.c b/mm/ksm.c index df2aa281d49d..3dc15459dd20 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -431,10 +431,9 @@ static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long nex pte_t *pte; int ret; - if (pmd_leaf(*pmd) || !pmd_present(*pmd)) - return 0; - pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + if (!pte) + return 0; if (pte_present(*pte)) { page = vm_normal_page(walk->vma, addr, *pte); } else if (!pte_none(*pte)) { @@ -1203,6 +1202,8 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, mmu_notifier_invalidate_range_start(&range); ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!ptep) + goto out_mn; if (!pte_same(*ptep, orig_pte)) { pte_unmap_unlock(ptep, ptl); goto out_mn; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index caf6ab55f8e3..77d8d2d14fcf 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -6021,9 +6021,9 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, return 0; } - if (pmd_trans_unstable(pmd)) - return 0; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + if (!pte) + return 0; for (; addr != end; pte++, addr += PAGE_SIZE) if (get_mctgt_type(vma, addr, *pte, NULL)) mc.precharge++; /* increment precharge temporarily */ @@ -6241,10 +6241,10 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, return 0; } - if (pmd_trans_unstable(pmd)) - return 0; retry: pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + if (!pte) + return 0; for (; addr != end; addr += PAGE_SIZE) { pte_t ptent = *(pte++); bool device = false; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 004a02f44271..d5116f0eb1b6 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -405,6 +405,8 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, if (pmd_devmap(*pmd)) return PMD_SHIFT; pte = pte_offset_map(pmd, address); + if (!pte) + return 0; if (pte_present(*pte) && pte_devmap(*pte)) ret = PAGE_SHIFT; pte_unmap(pte); @@ -791,11 +793,11 @@ static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr, goto out; } - if (pmd_trans_unstable(pmdp)) - goto out; - mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp, addr, &ptl); + if (!ptep) + goto out; + for (; addr != end; ptep++, addr += PAGE_SIZE) { ret = check_hwpoisoned_entry(*ptep, addr, PAGE_SHIFT, hwp->pfn, &hwp->tk); diff --git a/mm/migrate.c b/mm/migrate.c index c1f2c40441e1..363562992046 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -305,6 +305,9 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, swp_entry_t entry; ptep = pte_offset_map_lock(mm, pmd, address, &ptl); + if (!ptep) + return; + pte = *ptep; pte_unmap(ptep); -- cgit v1.2.3 From 670ddd8cdcbd1d07a4571266ae3517f821728c3a Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:30:48 -0700 Subject: mm/mprotect: delete pmd_none_or_clear_bad_unless_trans_huge() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit change_pmd_range() had special pmd_none_or_clear_bad_unless_trans_huge(), required to avoid "bad" choices when setting automatic NUMA hinting under mmap_read_lock(); but most of that is already covered in pte_offset_map() now. change_pmd_range() just wants a pmd_none() check before wasting time on MMU notifiers, then checks on the read-once _pmd value to work out what's needed for huge cases. If change_pte_range() returns -EAGAIN to retry if pte_offset_map_lock() fails, nothing more special is needed. Link: https://lkml.kernel.org/r/725a42a9-91e9-c868-925-e3a5fd40bb4f@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/mprotect.c | 74 ++++++++++++++--------------------------------------------- 1 file changed, 17 insertions(+), 57 deletions(-) (limited to 'mm') diff --git a/mm/mprotect.c b/mm/mprotect.c index c5a13c0f1017..64e1df0af514 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -93,22 +93,9 @@ static long change_pte_range(struct mmu_gather *tlb, bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; tlb_change_page_size(tlb, PAGE_SIZE); - - /* - * Can be called with only the mmap_lock for reading by - * prot_numa so we must check the pmd isn't constantly - * changing from under us from pmd_none to pmd_trans_huge - * and/or the other way around. - */ - if (pmd_trans_unstable(pmd)) - return 0; - - /* - * The pmd points to a regular pte so the pmd can't change - * from under us even if the mmap_lock is only hold for - * reading. - */ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + if (!pte) + return -EAGAIN; /* Get target node for single threaded private VMAs */ if (prot_numa && !(vma->vm_flags & VM_SHARED) && @@ -301,26 +288,6 @@ static long change_pte_range(struct mmu_gather *tlb, return pages; } -/* - * Used when setting automatic NUMA hinting protection where it is - * critical that a numa hinting PMD is not confused with a bad PMD. - */ -static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd) -{ - pmd_t pmdval = pmdp_get_lockless(pmd); - - if (pmd_none(pmdval)) - return 1; - if (pmd_trans_huge(pmdval)) - return 0; - if (unlikely(pmd_bad(pmdval))) { - pmd_clear_bad(pmd); - return 1; - } - - return 0; -} - /* * Return true if we want to split THPs into PTE mappings in change * protection procedure, false otherwise. @@ -398,7 +365,8 @@ static inline long change_pmd_range(struct mmu_gather *tlb, pmd = pmd_offset(pud, addr); do { long ret; - + pmd_t _pmd; +again: next = pmd_addr_end(addr, end); ret = change_pmd_prepare(vma, pmd, cp_flags); @@ -406,16 +374,8 @@ static inline long change_pmd_range(struct mmu_gather *tlb, pages = ret; break; } - /* - * Automatic NUMA balancing walks the tables with mmap_lock - * held for read. It's possible a parallel update to occur - * between pmd_trans_huge() and a pmd_none_or_clear_bad() - * check leading to a false positive and clearing. - * Hence, it's necessary to atomically read the PMD value - * for all the checks. - */ - if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) && - pmd_none_or_clear_bad_unless_trans_huge(pmd)) + + if (pmd_none(*pmd)) goto next; /* invoke the mmu notifier if the pmd is populated */ @@ -426,7 +386,8 @@ static inline long change_pmd_range(struct mmu_gather *tlb, mmu_notifier_invalidate_range_start(&range); } - if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { + _pmd = pmdp_get_lockless(pmd); + if (is_swap_pmd(_pmd) || pmd_trans_huge(_pmd) || pmd_devmap(_pmd)) { if ((next - addr != HPAGE_PMD_SIZE) || pgtable_split_needed(vma, cp_flags)) { __split_huge_pmd(vma, pmd, addr, false, NULL); @@ -441,15 +402,10 @@ static inline long change_pmd_range(struct mmu_gather *tlb, break; } } else { - /* - * change_huge_pmd() does not defer TLB flushes, - * so no need to propagate the tlb argument. - */ - int nr_ptes = change_huge_pmd(tlb, vma, pmd, + ret = change_huge_pmd(tlb, vma, pmd, addr, newprot, cp_flags); - - if (nr_ptes) { - if (nr_ptes == HPAGE_PMD_NR) { + if (ret) { + if (ret == HPAGE_PMD_NR) { pages += HPAGE_PMD_NR; nr_huge_updates++; } @@ -460,8 +416,12 @@ static inline long change_pmd_range(struct mmu_gather *tlb, } /* fall through, the trans huge pmd just split */ } - pages += change_pte_range(tlb, vma, pmd, addr, next, - newprot, cp_flags); + + ret = change_pte_range(tlb, vma, pmd, addr, next, newprot, + cp_flags); + if (ret < 0) + goto again; + pages += ret; next: cond_resched(); } while (pmd++, addr = next, addr != end); -- cgit v1.2.3 From a5be621ee2925b6ee2db455c45c2af2d8a195b0c Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:32:47 -0700 Subject: mm/mremap: retry if either pte_offset_map_*lock() fails MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit move_ptes() return -EAGAIN if pte_offset_map_lock() of old fails, or if pte_offset_map_nolock() of new fails: move_page_tables() retry if so. But that does need a pmd_none() check inside, to stop endless loop when huge shmem is truncated (thank you to syzbot); and move_huge_pmd() must tolerate that a page table might have been allocated there just before (of course it would be more satisfying to remove the empty page table, but this is not a path worth optimizing). Link: https://lkml.kernel.org/r/65e5e84a-f04-947-23f2-b97d3462e1e@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/huge_memory.c | 5 +++-- mm/mremap.c | 28 ++++++++++++++++++++-------- 2 files changed, 23 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9ccdb3fe3244..e21b3e3eb994 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1760,9 +1760,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, /* * The destination pmd shouldn't be established, free_pgtables() - * should have release it. + * should have released it; but move_page_tables() might have already + * inserted a page table, if racing against shmem/file collapse. */ - if (WARN_ON(!pmd_none(*new_pmd))) { + if (!pmd_none(*new_pmd)) { VM_BUG_ON(pmd_trans_huge(*new_pmd)); return false; } diff --git a/mm/mremap.c b/mm/mremap.c index da107f2c71bf..bfc3d1902a94 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -133,7 +133,7 @@ static pte_t move_soft_dirty_pte(pte_t pte) return pte; } -static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, +static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, unsigned long old_addr, unsigned long old_end, struct vm_area_struct *new_vma, pmd_t *new_pmd, unsigned long new_addr, bool need_rmap_locks) @@ -143,6 +143,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, spinlock_t *old_ptl, *new_ptl; bool force_flush = false; unsigned long len = old_end - old_addr; + int err = 0; /* * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma @@ -170,8 +171,16 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, * pte locks because exclusive mmap_lock prevents deadlock. */ old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); - new_pte = pte_offset_map(new_pmd, new_addr); - new_ptl = pte_lockptr(mm, new_pmd); + if (!old_pte) { + err = -EAGAIN; + goto out; + } + new_pte = pte_offset_map_nolock(mm, new_pmd, new_addr, &new_ptl); + if (!new_pte) { + pte_unmap_unlock(old_pte, old_ptl); + err = -EAGAIN; + goto out; + } if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); flush_tlb_batched_pending(vma->vm_mm); @@ -208,8 +217,10 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, spin_unlock(new_ptl); pte_unmap(new_pte - 1); pte_unmap_unlock(old_pte - 1, old_ptl); +out: if (need_rmap_locks) drop_rmap_locks(vma); + return err; } #ifndef arch_supports_page_table_move @@ -537,6 +548,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); if (!new_pmd) break; +again: if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || pmd_devmap(*old_pmd)) { if (extent == HPAGE_PMD_SIZE && @@ -544,8 +556,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma, old_pmd, new_pmd, need_rmap_locks)) continue; split_huge_pmd(vma, old_pmd, old_addr); - if (pmd_trans_unstable(old_pmd)) - continue; } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) && extent == PMD_SIZE) { /* @@ -556,11 +566,13 @@ unsigned long move_page_tables(struct vm_area_struct *vma, old_pmd, new_pmd, true)) continue; } - + if (pmd_none(*old_pmd)) + continue; if (pte_alloc(new_vma->vm_mm, new_pmd)) break; - move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, - new_pmd, new_addr, need_rmap_locks); + if (move_ptes(vma, old_pmd, old_addr, old_addr + extent, + new_vma, new_pmd, new_addr, need_rmap_locks) < 0) + goto again; } mmu_notifier_invalidate_range_end(&range); -- cgit v1.2.3 From f3cd4ab0aabf0c7f25ad438b37954db970174731 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:34:03 -0700 Subject: mm/madvise: clean up pte_offset_map_lock() scans MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Came here to make madvise's several pte_offset_map_lock() scans advance to next extent on failure, and remove superfluous pmd_trans_unstable() and pmd_none_or_trans_huge_or_clear_bad() calls. But also did some nearby cleanup. swapin_walk_pmd_entry(): don't name an address "index"; don't drop the lock after every pte, only when calling out to read_swap_cache_async(). madvise_cold_or_pageout_pte_range() and madvise_free_pte_range(): prefer "start_pte" for pointer, orig_pte usually denotes a saved pte value; leave lazy MMU mode before unlocking; merge the success and failure paths after split_folio(). Link: https://lkml.kernel.org/r/cc4d9a88-9da6-362-50d9-6735c2b125c6@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/madvise.c | 122 +++++++++++++++++++++++++++++++++-------------------------- 1 file changed, 68 insertions(+), 54 deletions(-) (limited to 'mm') diff --git a/mm/madvise.c b/mm/madvise.c index b5ffbaf616f5..0af64c4a8f82 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -188,37 +188,43 @@ success: #ifdef CONFIG_SWAP static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, - unsigned long end, struct mm_walk *walk) + unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->private; - unsigned long index; struct swap_iocb *splug = NULL; + pte_t *ptep = NULL; + spinlock_t *ptl; + unsigned long addr; - if (pmd_none_or_trans_huge_or_clear_bad(pmd)) - return 0; - - for (index = start; index != end; index += PAGE_SIZE) { + for (addr = start; addr < end; addr += PAGE_SIZE) { pte_t pte; swp_entry_t entry; struct page *page; - spinlock_t *ptl; - pte_t *ptep; - ptep = pte_offset_map_lock(vma->vm_mm, pmd, index, &ptl); - pte = *ptep; - pte_unmap_unlock(ptep, ptl); + if (!ptep++) { + ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + if (!ptep) + break; + } + pte = *ptep; if (!is_swap_pte(pte)) continue; entry = pte_to_swp_entry(pte); if (unlikely(non_swap_entry(entry))) continue; + pte_unmap_unlock(ptep, ptl); + ptep = NULL; + page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, - vma, index, false, &splug); + vma, addr, false, &splug); if (page) put_page(page); } + + if (ptep) + pte_unmap_unlock(ptep, ptl); swap_read_unplug(splug); cond_resched(); @@ -340,7 +346,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, bool pageout = private->pageout; struct mm_struct *mm = tlb->mm; struct vm_area_struct *vma = walk->vma; - pte_t *orig_pte, *pte, ptent; + pte_t *start_pte, *pte, ptent; spinlock_t *ptl; struct folio *folio = NULL; LIST_HEAD(folio_list); @@ -422,11 +428,11 @@ huge_unlock: } regular_folio: - if (pmd_trans_unstable(pmd)) - return 0; #endif tlb_change_page_size(tlb, PAGE_SIZE); - orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + if (!start_pte) + return 0; flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); for (; addr < end; pte++, addr += PAGE_SIZE) { @@ -447,25 +453,28 @@ regular_folio: * are sure it's worth. Split it if we are only owner. */ if (folio_test_large(folio)) { + int err; + if (folio_mapcount(folio) != 1) break; if (pageout_anon_only_filter && !folio_test_anon(folio)) break; - folio_get(folio); - if (!folio_trylock(folio)) { - folio_put(folio); - break; - } - pte_unmap_unlock(orig_pte, ptl); - if (split_folio(folio)) { - folio_unlock(folio); - folio_put(folio); - orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!folio_trylock(folio)) break; - } + folio_get(folio); + arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(start_pte, ptl); + start_pte = NULL; + err = split_folio(folio); folio_unlock(folio); folio_put(folio); - orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + if (err) + break; + start_pte = pte = + pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!start_pte) + break; + arch_enter_lazy_mmu_mode(); pte--; addr -= PAGE_SIZE; continue; @@ -510,8 +519,10 @@ regular_folio: folio_deactivate(folio); } - arch_leave_lazy_mmu_mode(); - pte_unmap_unlock(orig_pte, ptl); + if (start_pte) { + arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(start_pte, ptl); + } if (pageout) reclaim_pages(&folio_list); cond_resched(); @@ -612,7 +623,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, struct mm_struct *mm = tlb->mm; struct vm_area_struct *vma = walk->vma; spinlock_t *ptl; - pte_t *orig_pte, *pte, ptent; + pte_t *start_pte, *pte, ptent; struct folio *folio; int nr_swap = 0; unsigned long next; @@ -620,13 +631,12 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, next = pmd_addr_end(addr, end); if (pmd_trans_huge(*pmd)) if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) - goto next; - - if (pmd_trans_unstable(pmd)) - return 0; + return 0; tlb_change_page_size(tlb, PAGE_SIZE); - orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!start_pte) + return 0; flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); for (; addr != end; pte++, addr += PAGE_SIZE) { @@ -664,23 +674,26 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, * deactivate all pages. */ if (folio_test_large(folio)) { + int err; + if (folio_mapcount(folio) != 1) - goto out; + break; + if (!folio_trylock(folio)) + break; folio_get(folio); - if (!folio_trylock(folio)) { - folio_put(folio); - goto out; - } - pte_unmap_unlock(orig_pte, ptl); - if (split_folio(folio)) { - folio_unlock(folio); - folio_put(folio); - orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); - goto out; - } + arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(start_pte, ptl); + start_pte = NULL; + err = split_folio(folio); folio_unlock(folio); folio_put(folio); - orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + if (err) + break; + start_pte = pte = + pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!start_pte) + break; + arch_enter_lazy_mmu_mode(); pte--; addr -= PAGE_SIZE; continue; @@ -725,17 +738,18 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, } folio_mark_lazyfree(folio); } -out: + if (nr_swap) { if (current->mm == mm) sync_mm_rss(mm); - add_mm_counter(mm, MM_SWAPENTS, nr_swap); } - arch_leave_lazy_mmu_mode(); - pte_unmap_unlock(orig_pte, ptl); + if (start_pte) { + arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(start_pte, ptl); + } cond_resched(); -next: + return 0; } -- cgit v1.2.3 From 179d3e4f3bfa5947821c1b1bc6aa49a4797b7f21 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:35:14 -0700 Subject: mm/madvise: clean up force_shm_swapin_readahead() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some nearby MADV_WILLNEED cleanup unrelated to pte_offset_map_lock(). shmem_swapin_range() is a better name than force_shm_swapin_readahead(). Fix unimportant off-by-one on end_index. Call the swp_entry_t "entry" rather than "swap": either is okay, but entry is the name used elsewhere in mm/madvise.c. Do not assume GFP_HIGHUSER_MOVABLE: that's right for anon swap, but shmem should take gfp from mapping. Pass the actual vma and address to read_swap_cache_async(), in case a NUMA mempolicy applies. lru_add_drain() at outer level, like madvise_willneed()'s other branch. Link: https://lkml.kernel.org/r/67e18875-ffb3-ec27-346-f350e07bed87@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/madvise.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/mm/madvise.c b/mm/madvise.c index 0af64c4a8f82..9b3c9610052f 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -235,30 +235,34 @@ static const struct mm_walk_ops swapin_walk_ops = { .pmd_entry = swapin_walk_pmd_entry, }; -static void force_shm_swapin_readahead(struct vm_area_struct *vma, +static void shmem_swapin_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct address_space *mapping) { XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); - pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1); + pgoff_t end_index = linear_page_index(vma, end) - 1; struct page *page; struct swap_iocb *splug = NULL; rcu_read_lock(); xas_for_each(&xas, page, end_index) { - swp_entry_t swap; + unsigned long addr; + swp_entry_t entry; if (!xa_is_value(page)) continue; - swap = radix_to_swp_entry(page); + entry = radix_to_swp_entry(page); /* There might be swapin error entries in shmem mapping. */ - if (non_swap_entry(swap)) + if (non_swap_entry(entry)) continue; + + addr = vma->vm_start + + ((xas.xa_index - vma->vm_pgoff) << PAGE_SHIFT); xas_pause(&xas); rcu_read_unlock(); - page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE, - NULL, 0, false, &splug); + page = read_swap_cache_async(entry, mapping_gfp_mask(mapping), + vma, addr, false, &splug); if (page) put_page(page); @@ -266,8 +270,6 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma, } rcu_read_unlock(); swap_read_unplug(splug); - - lru_add_drain(); /* Push any new pages onto the LRU now */ } #endif /* CONFIG_SWAP */ @@ -291,8 +293,8 @@ static long madvise_willneed(struct vm_area_struct *vma, } if (shmem_mapping(file->f_mapping)) { - force_shm_swapin_readahead(vma, start, end, - file->f_mapping); + shmem_swapin_range(vma, start, end, file->f_mapping); + lru_add_drain(); /* Push any new pages onto the LRU now */ return 0; } #else -- cgit v1.2.3 From d850fa729873787a4030eed5fd875d00eb63946b Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:36:11 -0700 Subject: mm/swapoff: allow pte_offset_map[_lock]() to fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adjust unuse_pte() and unuse_pte_range() to allow pte_offset_map_lock() and pte_offset_map() failure; remove pmd_none_or_trans_huge_or_clear_bad() from unuse_pmd_range() now that pte_offset_map() does all that itself. Link: https://lkml.kernel.org/r/c4d831-13c3-9dfd-70c2-64514ad951fd@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/swapfile.c | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index df312534e239..74dd4d2337b7 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1757,7 +1757,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, hwposioned = true; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); - if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) { + if (unlikely(!pte || !pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) { ret = 0; goto out; } @@ -1810,7 +1810,8 @@ setpte: set_pte_at(vma->vm_mm, addr, pte, new_pte); swap_free(entry); out: - pte_unmap_unlock(pte, ptl); + if (pte) + pte_unmap_unlock(pte, ptl); if (page != swapcache) { unlock_page(page); put_page(page); @@ -1822,17 +1823,22 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned int type) { - swp_entry_t entry; - pte_t *pte; + pte_t *pte = NULL; struct swap_info_struct *si; - int ret = 0; si = swap_info[type]; - pte = pte_offset_map(pmd, addr); do { struct folio *folio; unsigned long offset; unsigned char swp_count; + swp_entry_t entry; + int ret; + + if (!pte++) { + pte = pte_offset_map(pmd, addr); + if (!pte) + break; + } if (!is_swap_pte(*pte)) continue; @@ -1843,6 +1849,8 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, offset = swp_offset(entry); pte_unmap(pte); + pte = NULL; + folio = swap_cache_get_folio(entry, vma, addr); if (!folio) { struct page *page; @@ -1861,8 +1869,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, if (!folio) { swp_count = READ_ONCE(si->swap_map[offset]); if (swp_count == 0 || swp_count == SWAP_MAP_BAD) - goto try_next; - + continue; return -ENOMEM; } @@ -1872,20 +1879,17 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, if (ret < 0) { folio_unlock(folio); folio_put(folio); - goto out; + return ret; } folio_free_swap(folio); folio_unlock(folio); folio_put(folio); -try_next: - pte = pte_offset_map(pmd, addr); - } while (pte++, addr += PAGE_SIZE, addr != end); - pte_unmap(pte - 1); + } while (addr += PAGE_SIZE, addr != end); - ret = 0; -out: - return ret; + if (pte) + pte_unmap(pte); + return 0; } static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, @@ -1900,8 +1904,6 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, do { cond_resched(); next = pmd_addr_end(addr, end); - if (pmd_none_or_trans_huge_or_clear_bad(pmd)) - continue; ret = unuse_pte_range(vma, pmd, addr, next, type); if (ret) return ret; -- cgit v1.2.3 From 52fc048320adf1b1c07a2627461dca9f7d7956ff Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:37:12 -0700 Subject: mm/mglru: allow pte_offset_map_nolock() to fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit MGLRU's walk_pte_range() use the safer pte_offset_map_nolock(), rather than pte_lockptr(), to get the ptl for its trylock. Just return false and move on to next extent if it fails, like when the trylock fails. Remove the VM_WARN_ON_ONCE(pmd_leaf) since that will happen, rarely. Link: https://lkml.kernel.org/r/51ece73e-7398-2e4a-2384-56708c87844f@google.com Signed-off-by: Hugh Dickins Acked-by: Yu Zhao Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/vmscan.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index a4e2936f6d35..3f64c8d9f629 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4024,15 +4024,15 @@ static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end, struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); int old_gen, new_gen = lru_gen_from_seq(walk->max_seq); - VM_WARN_ON_ONCE(pmd_leaf(*pmd)); - - ptl = pte_lockptr(args->mm, pmd); - if (!spin_trylock(ptl)) + pte = pte_offset_map_nolock(args->mm, pmd, start & PMD_MASK, &ptl); + if (!pte) return false; + if (!spin_trylock(ptl)) { + pte_unmap(pte); + return false; + } arch_enter_lazy_mmu_mode(); - - pte = pte_offset_map(pmd, start & PMD_MASK); restart: for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) { unsigned long pfn; @@ -4073,10 +4073,8 @@ restart: if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end)) goto restart; - pte_unmap(pte); - arch_leave_lazy_mmu_mode(); - spin_unlock(ptl); + pte_unmap_unlock(pte, ptl); return suitable_to_scan(total, young); } -- cgit v1.2.3 From 4b56069c95d69bfce0c0ffb2531d08216268a972 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:38:17 -0700 Subject: mm/migrate_device: allow pte_offset_map_lock() to fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit migrate_vma_collect_pmd(): remove the pmd_trans_unstable() handling after splitting huge zero pmd, and the pmd_none() handling after successfully splitting huge page: those are now managed inside pte_offset_map_lock(), and by "goto again" when it fails. But the skip after unsuccessful split_huge_page() must stay: it avoids an endless loop. The skip when pmd_bad()? Remove that: it will be treated as a hole rather than a skip once cleared by pte_offset_map_lock(), but with different timing that would be so anyway; and it's arguably best to leave the pmd_bad() handling centralized there. migrate_vma_insert_page(): remove comment on the old pte_offset_map() and old locking limitations; remove the pmd_trans_unstable() check and just proceed to pte_offset_map_lock(), aborting when it fails (page has been charged to memcg, but as in other cases, it's uncharged when freed). Link: https://lkml.kernel.org/r/1131be62-2e84-da2f-8f45-807b2cbeeec5@google.com Signed-off-by: Hugh Dickins Reviewed-by: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/migrate_device.c | 31 ++++--------------------------- 1 file changed, 4 insertions(+), 27 deletions(-) (limited to 'mm') diff --git a/mm/migrate_device.c b/mm/migrate_device.c index d30c9de60b0d..a14af6b12b04 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -83,9 +83,6 @@ again: if (is_huge_zero_page(page)) { spin_unlock(ptl); split_huge_pmd(vma, pmdp, addr); - if (pmd_trans_unstable(pmdp)) - return migrate_vma_collect_skip(start, end, - walk); } else { int ret; @@ -100,16 +97,12 @@ again: if (ret) return migrate_vma_collect_skip(start, end, walk); - if (pmd_none(*pmdp)) - return migrate_vma_collect_hole(start, end, -1, - walk); } } - if (unlikely(pmd_bad(*pmdp))) - return migrate_vma_collect_skip(start, end, walk); - ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); + if (!ptep) + goto again; arch_enter_lazy_mmu_mode(); for (; addr < end; addr += PAGE_SIZE, ptep++) { @@ -595,27 +588,10 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, pmdp = pmd_alloc(mm, pudp, addr); if (!pmdp) goto abort; - if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp)) goto abort; - - /* - * Use pte_alloc() instead of pte_alloc_map(). We can't run - * pte_offset_map() on pmds where a huge pmd might be created - * from a different thread. - * - * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when - * parallel threads are excluded by other means. - * - * Here we only have mmap_read_lock(mm). - */ if (pte_alloc(mm, pmdp)) goto abort; - - /* See the comment in pte_alloc_one_map() */ - if (unlikely(pmd_trans_unstable(pmdp))) - goto abort; - if (unlikely(anon_vma_prepare(vma))) goto abort; if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) @@ -650,7 +626,8 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, } ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); - + if (!ptep) + goto abort; if (check_stable_address_space(mm)) goto unlock_abort; -- cgit v1.2.3 From 2378118bd9da1d96d8569ff675183e2cf8973799 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:39:59 -0700 Subject: mm/gup: remove FOLL_SPLIT_PMD use of pmd_trans_unstable() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is now no reason for follow_pmd_mask()'s FOLL_SPLIT_PMD block to distinguish huge_zero_page from a normal THP: follow_page_pte() handles any instability, and here it's a good idea to replace any pmd_none(*pmd) by a page table a.s.a.p, in the huge_zero_page case as for a normal THP; and this removes an unnecessary possibility of -EBUSY failure. (Hmm, couldn't the normal THP case have hit an unstably refaulted THP before? But there are only two, exceptional, users of FOLL_SPLIT_PMD.) Link: https://lkml.kernel.org/r/59fd15dd-4d39-5ec-2043-1d5117f7f85@google.com Signed-off-by: Hugh Dickins Reviewed-by: Yang Shi Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/gup.c | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) (limited to 'mm') diff --git a/mm/gup.c b/mm/gup.c index 598e8c98367b..838db6c0bfc2 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -682,21 +682,10 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma, return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } if (flags & FOLL_SPLIT_PMD) { - int ret; - page = pmd_page(*pmd); - if (is_huge_zero_page(page)) { - spin_unlock(ptl); - ret = 0; - split_huge_pmd(vma, pmd, address); - if (pmd_trans_unstable(pmd)) - ret = -EBUSY; - } else { - spin_unlock(ptl); - split_huge_pmd(vma, pmd, address); - ret = pte_alloc(mm, pmd) ? -ENOMEM : 0; - } - - return ret ? ERR_PTR(ret) : + spin_unlock(ptl); + split_huge_pmd(vma, pmd, address); + /* If pmd was left empty, stuff a page table in there quickly */ + return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) : follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } page = follow_trans_huge_pmd(vma, address, pmd, flags); -- cgit v1.2.3 From c9c1ee20ee84b1a827437ca6dba2f06fe475d9b1 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:41:31 -0700 Subject: mm/huge_memory: split huge pmd under one pte_offset_map() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit __split_huge_zero_page_pmd() use a single pte_offset_map() to sweep the extent: it's already under pmd_lock(), so this is no worse for latency; and since it's supposed to have full control of the just-withdrawn page table, here choose to VM_BUG_ON if it were to fail. And please don't increment haddr by PAGE_SIZE, that should remain huge aligned: declare a separate addr (not a bugfix, but it was deceptive). __split_huge_pmd_locked() likewise (but it had declared a separate addr); and change its BUG_ON(!pte_none) to VM_BUG_ON, for consistency with zero (those deposited page tables are sometimes victims of random corruption). Link: https://lkml.kernel.org/r/90cbed7f-90d9-b779-4a46-d2485baf9595@google.com Signed-off-by: Hugh Dickins Reviewed-by: Yang Shi Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/huge_memory.c | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e21b3e3eb994..31bc8fa768e3 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2037,6 +2037,8 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, struct mm_struct *mm = vma->vm_mm; pgtable_t pgtable; pmd_t _pmd, old_pmd; + unsigned long addr; + pte_t *pte; int i; /* @@ -2052,17 +2054,20 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, pgtable = pgtable_trans_huge_withdraw(mm, pmd); pmd_populate(mm, &_pmd, pgtable); - for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { - pte_t *pte, entry; - entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); + pte = pte_offset_map(&_pmd, haddr); + VM_BUG_ON(!pte); + for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { + pte_t entry; + + entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot); entry = pte_mkspecial(entry); if (pmd_uffd_wp(old_pmd)) entry = pte_mkuffd_wp(entry); - pte = pte_offset_map(&_pmd, haddr); VM_BUG_ON(!pte_none(*pte)); - set_pte_at(mm, haddr, pte, entry); - pte_unmap(pte); + set_pte_at(mm, addr, pte, entry); + pte++; } + pte_unmap(pte - 1); smp_wmb(); /* make pte visible before pmd */ pmd_populate(mm, pmd, pgtable); } @@ -2077,6 +2082,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false; bool anon_exclusive = false, dirty = false; unsigned long addr; + pte_t *pte; int i; VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); @@ -2205,8 +2211,10 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, pgtable = pgtable_trans_huge_withdraw(mm, pmd); pmd_populate(mm, &_pmd, pgtable); + pte = pte_offset_map(&_pmd, haddr); + VM_BUG_ON(!pte); for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { - pte_t entry, *pte; + pte_t entry; /* * Note that NUMA hinting access restrictions are not * transferred to avoid any possibility of altering @@ -2249,11 +2257,11 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, entry = pte_mkuffd_wp(entry); page_add_anon_rmap(page + i, vma, addr, false); } - pte = pte_offset_map(&_pmd, addr); - BUG_ON(!pte_none(*pte)); + VM_BUG_ON(!pte_none(*pte)); set_pte_at(mm, addr, pte, entry); - pte_unmap(pte); + pte++; } + pte_unmap(pte - 1); if (!pmd_migration) page_remove_rmap(page, vma, true); -- cgit v1.2.3 From 895f5ee464cc90a5f38163720431c849e93ead97 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:42:40 -0700 Subject: mm/khugepaged: allow pte_offset_map[_lock]() to fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit __collapse_huge_page_swapin(): don't drop the map after every pte, it only has to be dropped by do_swap_page(); give up if pte_offset_map() fails; trace_mm_collapse_huge_page_swapin() at the end, with result; fix comment on returned result; fix vmf.pgoff, though it's not used. collapse_huge_page(): use pte_offset_map_lock() on the _pmd returned from clearing; allow failure, but it should be impossible there. hpage_collapse_scan_pmd() and collapse_pte_mapped_thp() allow for pte_offset_map_lock() failure. Link: https://lkml.kernel.org/r/6513e85-d798-34ec-3762-7c24ffb9329@google.com Signed-off-by: Hugh Dickins Reviewed-by: Yang Shi Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/khugepaged.c | 72 +++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 49 insertions(+), 23 deletions(-) (limited to 'mm') diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 2d206e62d358..04c22b5309d7 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -991,9 +991,8 @@ static int check_pmd_still_valid(struct mm_struct *mm, * Only done if hpage_collapse_scan_pmd believes it is worthwhile. * * Called and returns without pte mapped or spinlocks held. - * Note that if false is returned, mmap_lock will be released. + * Returns result: if not SCAN_SUCCEED, mmap_lock has been released. */ - static int __collapse_huge_page_swapin(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, @@ -1002,23 +1001,35 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm, int swapped_in = 0; vm_fault_t ret = 0; unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE); + int result; + pte_t *pte = NULL; for (address = haddr; address < end; address += PAGE_SIZE) { struct vm_fault vmf = { .vma = vma, .address = address, - .pgoff = linear_page_index(vma, haddr), + .pgoff = linear_page_index(vma, address), .flags = FAULT_FLAG_ALLOW_RETRY, .pmd = pmd, }; - vmf.pte = pte_offset_map(pmd, address); - vmf.orig_pte = *vmf.pte; - if (!is_swap_pte(vmf.orig_pte)) { - pte_unmap(vmf.pte); - continue; + if (!pte++) { + pte = pte_offset_map(pmd, address); + if (!pte) { + mmap_read_unlock(mm); + result = SCAN_PMD_NULL; + goto out; + } } + + vmf.orig_pte = *pte; + if (!is_swap_pte(vmf.orig_pte)) + continue; + + vmf.pte = pte; ret = do_swap_page(&vmf); + /* Which unmaps pte (after perhaps re-checking the entry) */ + pte = NULL; /* * do_swap_page returns VM_FAULT_RETRY with released mmap_lock. @@ -1027,24 +1038,29 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm, * resulting in later failure. */ if (ret & VM_FAULT_RETRY) { - trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); /* Likely, but not guaranteed, that page lock failed */ - return SCAN_PAGE_LOCK; + result = SCAN_PAGE_LOCK; + goto out; } if (ret & VM_FAULT_ERROR) { mmap_read_unlock(mm); - trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); - return SCAN_FAIL; + result = SCAN_FAIL; + goto out; } swapped_in++; } + if (pte) + pte_unmap(pte); + /* Drain LRU add pagevec to remove extra pin on the swapped in pages */ if (swapped_in) lru_add_drain(); - trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1); - return SCAN_SUCCEED; + result = SCAN_SUCCEED; +out: + trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, result); + return result; } static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, @@ -1144,9 +1160,6 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, address + HPAGE_PMD_SIZE); mmu_notifier_invalidate_range_start(&range); - pte = pte_offset_map(pmd, address); - pte_ptl = pte_lockptr(mm, pmd); - pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ /* * This removes any huge TLB entry from the CPU so we won't allow @@ -1161,13 +1174,18 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, mmu_notifier_invalidate_range_end(&range); tlb_remove_table_sync_one(); - spin_lock(pte_ptl); - result = __collapse_huge_page_isolate(vma, address, pte, cc, - &compound_pagelist); - spin_unlock(pte_ptl); + pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl); + if (pte) { + result = __collapse_huge_page_isolate(vma, address, pte, cc, + &compound_pagelist); + spin_unlock(pte_ptl); + } else { + result = SCAN_PMD_NULL; + } if (unlikely(result != SCAN_SUCCEED)) { - pte_unmap(pte); + if (pte) + pte_unmap(pte); spin_lock(pmd_ptl); BUG_ON(!pmd_none(*pmd)); /* @@ -1251,6 +1269,11 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, memset(cc->node_load, 0, sizeof(cc->node_load)); nodes_clear(cc->alloc_nmask); pte = pte_offset_map_lock(mm, pmd, address, &ptl); + if (!pte) { + result = SCAN_PMD_NULL; + goto out; + } + for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR; _pte++, _address += PAGE_SIZE) { pte_t pteval = *_pte; @@ -1620,8 +1643,10 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, * lockless_pages_from_mm() and the hardware page walker can access page * tables while all the high-level locks are held in write mode. */ - start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); result = SCAN_FAIL; + start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); + if (!start_pte) + goto drop_immap; /* step 1: check all mapped PTEs are to the right huge page */ for (i = 0, addr = haddr, pte = start_pte; @@ -1695,6 +1720,7 @@ drop_hpage: abort: pte_unmap_unlock(start_pte, ptl); +drop_immap: i_mmap_unlock_write(vma->vm_file->f_mapping); goto drop_hpage; } -- cgit v1.2.3 From 3db82b9374ca921b8b820a75e83809d5c4133d8f Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:43:38 -0700 Subject: mm/memory: allow pte_offset_map[_lock]() to fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit copy_pte_range(): use pte_offset_map_nolock(), and allow for it to fail; but with a comment on some further assumptions that are being made there. zap_pte_range() and zap_pmd_range(): adjust their interaction so that a pte_offset_map_lock() failure in zap_pte_range() leads to a retry in zap_pmd_range(); remove call to pmd_none_or_trans_huge_or_clear_bad(). Allow pte_offset_map_lock() to fail in many functions. Update comment on calling pte_alloc() in do_anonymous_page(). Remove redundant calls to pmd_trans_unstable(), pmd_devmap_trans_unstable(), pmd_none() and pmd_bad(); but leave pmd_none_or_clear_bad() calls in free_pmd_range() and copy_pmd_range(), those do simplify the next level down. Link: https://lkml.kernel.org/r/bb548d50-e99a-f29e-eab1-a43bef2a1287@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/memory.c | 167 +++++++++++++++++++++++++++++------------------------------- 1 file changed, 81 insertions(+), 86 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 221b21623644..4ab4de234b76 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1012,13 +1012,25 @@ again: progress = 0; init_rss_vec(rss); + /* + * copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the + * error handling here, assume that exclusive mmap_lock on dst and src + * protects anon from unexpected THP transitions; with shmem and file + * protected by mmap_lock-less collapse skipping areas with anon_vma + * (whereas vma_needs_copy() skips areas without anon_vma). A rework + * can remove such assumptions later, but this is good enough for now. + */ dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); if (!dst_pte) { ret = -ENOMEM; goto out; } - src_pte = pte_offset_map(src_pmd, addr); - src_ptl = pte_lockptr(src_mm, src_pmd); + src_pte = pte_offset_map_nolock(src_mm, src_pmd, addr, &src_ptl); + if (!src_pte) { + pte_unmap_unlock(dst_pte, dst_ptl); + /* ret == 0 */ + goto out; + } spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); orig_src_pte = src_pte; orig_dst_pte = dst_pte; @@ -1083,8 +1095,7 @@ again: } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); - spin_unlock(src_ptl); - pte_unmap(orig_src_pte); + pte_unmap_unlock(orig_src_pte, src_ptl); add_mm_rss_vec(dst_mm, rss); pte_unmap_unlock(orig_dst_pte, dst_ptl); cond_resched(); @@ -1388,10 +1399,11 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, swp_entry_t entry; tlb_change_page_size(tlb, PAGE_SIZE); -again: init_rss_vec(rss); - start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); - pte = start_pte; + start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!pte) + return addr; + flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); do { @@ -1507,17 +1519,10 @@ again: * If we forced a TLB flush (either due to running out of * batch buffers or because we needed to flush dirty TLB * entries before releasing the ptl), free the batched - * memory too. Restart if we didn't do everything. + * memory too. Come back again if we didn't do everything. */ - if (force_flush) { - force_flush = 0; + if (force_flush) tlb_flush_mmu(tlb); - } - - if (addr != end) { - cond_resched(); - goto again; - } return addr; } @@ -1536,8 +1541,10 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { if (next - addr != HPAGE_PMD_SIZE) __split_huge_pmd(vma, pmd, addr, false, NULL); - else if (zap_huge_pmd(tlb, vma, pmd, addr)) - goto next; + else if (zap_huge_pmd(tlb, vma, pmd, addr)) { + addr = next; + continue; + } /* fall through */ } else if (details && details->single_folio && folio_test_pmd_mappable(details->single_folio) && @@ -1550,20 +1557,14 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, */ spin_unlock(ptl); } - - /* - * Here there can be other concurrent MADV_DONTNEED or - * trans huge page faults running, and if the pmd is - * none or trans huge it can change under us. This is - * because MADV_DONTNEED holds the mmap_lock in read - * mode. - */ - if (pmd_none_or_trans_huge_or_clear_bad(pmd)) - goto next; - next = zap_pte_range(tlb, vma, pmd, addr, next, details); -next: - cond_resched(); - } while (pmd++, addr = next, addr != end); + if (pmd_none(*pmd)) { + addr = next; + continue; + } + addr = zap_pte_range(tlb, vma, pmd, addr, next, details); + if (addr != next) + pmd--; + } while (pmd++, cond_resched(), addr != end); return addr; } @@ -1905,6 +1906,10 @@ more: const int batch_size = min_t(int, pages_to_write_in_pmd, 8); start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); + if (!start_pte) { + ret = -EFAULT; + goto out; + } for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) { int err = insert_page_in_batch_locked(vma, pte, addr, pages[curr_page_idx], prot); @@ -2572,10 +2577,10 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, mapped_pte = pte = (mm == &init_mm) ? pte_offset_kernel(pmd, addr) : pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!pte) + return -EINVAL; } - BUG_ON(pmd_huge(*pmd)); - arch_enter_lazy_mmu_mode(); if (fn) { @@ -2804,7 +2809,6 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src, int ret; void *kaddr; void __user *uaddr; - bool locked = false; struct vm_area_struct *vma = vmf->vma; struct mm_struct *mm = vma->vm_mm; unsigned long addr = vmf->address; @@ -2830,12 +2834,12 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src, * On architectures with software "accessed" bits, we would * take a double page fault, so mark it accessed here. */ + vmf->pte = NULL; if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) { pte_t entry; vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); - locked = true; - if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { + if (unlikely(!vmf->pte || !pte_same(*vmf->pte, vmf->orig_pte))) { /* * Other thread has already handled the fault * and update local tlb only @@ -2857,13 +2861,12 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src, * zeroes. */ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { - if (locked) + if (vmf->pte) goto warn; /* Re-validate under PTL if the page is still mapped */ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); - locked = true; - if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { + if (unlikely(!vmf->pte || !pte_same(*vmf->pte, vmf->orig_pte))) { /* The PTE changed under us, update local tlb */ update_mmu_tlb(vma, addr, vmf->pte); ret = -EAGAIN; @@ -2888,7 +2891,7 @@ warn: ret = 0; pte_unlock: - if (locked) + if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); kunmap_atomic(kaddr); flush_dcache_page(dst); @@ -3110,7 +3113,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) * Re-check the pte - we dropped the lock */ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); - if (likely(pte_same(*vmf->pte, vmf->orig_pte))) { + if (likely(vmf->pte && pte_same(*vmf->pte, vmf->orig_pte))) { if (old_folio) { if (!folio_test_anon(old_folio)) { dec_mm_counter(mm, mm_counter_file(&old_folio->page)); @@ -3178,19 +3181,20 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) /* Free the old page.. */ new_folio = old_folio; page_copied = 1; - } else { + pte_unmap_unlock(vmf->pte, vmf->ptl); + } else if (vmf->pte) { update_mmu_tlb(vma, vmf->address, vmf->pte); + pte_unmap_unlock(vmf->pte, vmf->ptl); } - if (new_folio) - folio_put(new_folio); - - pte_unmap_unlock(vmf->pte, vmf->ptl); /* * No need to double call mmu_notifier->invalidate_range() callback as * the above ptep_clear_flush_notify() did already call it. */ mmu_notifier_invalidate_range_only_end(&range); + + if (new_folio) + folio_put(new_folio); if (old_folio) { if (page_copied) free_swap_cache(&old_folio->page); @@ -3230,6 +3234,8 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); + if (!vmf->pte) + return VM_FAULT_NOPAGE; /* * We might have raced with another page fault while we released the * pte_offset_map_lock. @@ -3591,10 +3597,11 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); - if (likely(pte_same(*vmf->pte, vmf->orig_pte))) + if (likely(vmf->pte && pte_same(*vmf->pte, vmf->orig_pte))) restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte); - pte_unmap_unlock(vmf->pte, vmf->ptl); + if (vmf->pte) + pte_unmap_unlock(vmf->pte, vmf->ptl); folio_unlock(folio); folio_put(folio); @@ -3625,6 +3632,8 @@ static vm_fault_t pte_marker_clear(struct vm_fault *vmf) { vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); + if (!vmf->pte) + return 0; /* * Be careful so that we will only recover a special uffd-wp pte into a * none pte. Otherwise it means the pte could have changed, so retry. @@ -3728,7 +3737,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) vmf->page = pfn_swap_entry_to_page(entry); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); - if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) + if (unlikely(!vmf->pte || + !pte_same(*vmf->pte, vmf->orig_pte))) goto unlock; /* @@ -3805,7 +3815,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) */ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); - if (likely(pte_same(*vmf->pte, vmf->orig_pte))) + if (likely(vmf->pte && pte_same(*vmf->pte, vmf->orig_pte))) ret = VM_FAULT_OOM; goto unlock; } @@ -3875,7 +3885,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) */ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); - if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) + if (unlikely(!vmf->pte || !pte_same(*vmf->pte, vmf->orig_pte))) goto out_nomap; if (unlikely(!folio_test_uptodate(folio))) { @@ -4001,13 +4011,15 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, vmf->address, vmf->pte); unlock: - pte_unmap_unlock(vmf->pte, vmf->ptl); + if (vmf->pte) + pte_unmap_unlock(vmf->pte, vmf->ptl); out: if (si) put_swap_device(si); return ret; out_nomap: - pte_unmap_unlock(vmf->pte, vmf->ptl); + if (vmf->pte) + pte_unmap_unlock(vmf->pte, vmf->ptl); out_page: folio_unlock(folio); out_release: @@ -4039,22 +4051,12 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) return VM_FAULT_SIGBUS; /* - * Use pte_alloc() instead of pte_alloc_map(). We can't run - * pte_offset_map() on pmds where a huge pmd might be created - * from a different thread. - * - * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when - * parallel threads are excluded by other means. - * - * Here we only have mmap_read_lock(mm). + * Use pte_alloc() instead of pte_alloc_map(), so that OOM can + * be distinguished from a transient failure of pte_offset_map(). */ if (pte_alloc(vma->vm_mm, vmf->pmd)) return VM_FAULT_OOM; - /* See comment in handle_pte_fault() */ - if (unlikely(pmd_trans_unstable(vmf->pmd))) - return 0; - /* Use the zero-page for reads */ if (!(vmf->flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(vma->vm_mm)) { @@ -4062,6 +4064,8 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) vma->vm_page_prot)); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); + if (!vmf->pte) + goto unlock; if (vmf_pte_changed(vmf)) { update_mmu_tlb(vma, vmf->address, vmf->pte); goto unlock; @@ -4102,6 +4106,8 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); + if (!vmf->pte) + goto release; if (vmf_pte_changed(vmf)) { update_mmu_tlb(vma, vmf->address, vmf->pte); goto release; @@ -4129,7 +4135,8 @@ setpte: /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, vmf->address, vmf->pte); unlock: - pte_unmap_unlock(vmf->pte, vmf->ptl); + if (vmf->pte) + pte_unmap_unlock(vmf->pte, vmf->ptl); return ret; release: folio_put(folio); @@ -4378,15 +4385,10 @@ vm_fault_t finish_fault(struct vm_fault *vmf) return VM_FAULT_OOM; } - /* - * See comment in handle_pte_fault() for how this scenario happens, we - * need to return NOPAGE so that we drop this page. - */ - if (pmd_devmap_trans_unstable(vmf->pmd)) - return VM_FAULT_NOPAGE; - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); + if (!vmf->pte) + return VM_FAULT_NOPAGE; /* Re-check under ptl */ if (likely(!vmf_pte_changed(vmf))) { @@ -4628,17 +4630,11 @@ static vm_fault_t do_fault(struct vm_fault *vmf) * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ if (!vma->vm_ops->fault) { - /* - * If we find a migration pmd entry or a none pmd entry, which - * should never happen, return SIGBUS - */ - if (unlikely(!pmd_present(*vmf->pmd))) + vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, + vmf->address, &vmf->ptl); + if (unlikely(!vmf->pte)) ret = VM_FAULT_SIGBUS; else { - vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, - vmf->pmd, - vmf->address, - &vmf->ptl); /* * Make sure this is not a temporary clearing of pte * by holding ptl and checking again. A R/M/W update @@ -5427,10 +5423,9 @@ int follow_pte(struct mm_struct *mm, unsigned long address, pmd = pmd_offset(pud, address); VM_BUG_ON(pmd_trans_huge(*pmd)); - if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) - goto out; - ptep = pte_offset_map_lock(mm, pmd, address, ptlp); + if (!ptep) + goto out; if (!pte_present(*ptep)) goto unlock; *ptepp = ptep; -- cgit v1.2.3 From c7ad08804fae5baa7f71c0790038e8259e1066a5 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:45:05 -0700 Subject: mm/memory: handle_pte_fault() use pte_offset_map_nolock() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit handle_pte_fault() use pte_offset_map_nolock() to get the vmf.ptl which corresponds to vmf.pte, instead of pte_lockptr() being used later, when there's a chance that the pmd entry might have changed, perhaps to none, or to a huge pmd, with no split ptlock in its struct page. Remove its pmd_devmap_trans_unstable() call: pte_offset_map_nolock() will handle that case by failing. Update the "morph" comment above, looking forward to when shmem or file collapse to THP may not take mmap_lock for write (or not at all). do_numa_page() use the vmf->ptl from handle_pte_fault() at first, but refresh it when refreshing vmf->pte. do_swap_page()'s pte_unmap_same() (the thing that takes ptl to verify a two-part PAE orig_pte) use the vmf->ptl from handle_pte_fault() too; but do_swap_page() is also used by anon THP's __collapse_huge_page_swapin(), so adjust that to set vmf->ptl by pte_offset_map_nolock(). Link: https://lkml.kernel.org/r/c1107654-3929-60ac-223e-6877cbb86065@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/khugepaged.c | 6 ++++-- mm/memory.c | 38 +++++++++++++------------------------- 2 files changed, 17 insertions(+), 27 deletions(-) (limited to 'mm') diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 04c22b5309d7..d1951ed572f8 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1003,6 +1003,7 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm, unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE); int result; pte_t *pte = NULL; + spinlock_t *ptl; for (address = haddr; address < end; address += PAGE_SIZE) { struct vm_fault vmf = { @@ -1014,7 +1015,7 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm, }; if (!pte++) { - pte = pte_offset_map(pmd, address); + pte = pte_offset_map_nolock(mm, pmd, address, &ptl); if (!pte) { mmap_read_unlock(mm); result = SCAN_PMD_NULL; @@ -1022,11 +1023,12 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm, } } - vmf.orig_pte = *pte; + vmf.orig_pte = ptep_get_lockless(pte); if (!is_swap_pte(vmf.orig_pte)) continue; vmf.pte = pte; + vmf.ptl = ptl; ret = do_swap_page(&vmf); /* Which unmaps pte (after perhaps re-checking the entry) */ pte = NULL; diff --git a/mm/memory.c b/mm/memory.c index 4ab4de234b76..11f221953690 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2786,10 +2786,9 @@ static inline int pte_unmap_same(struct vm_fault *vmf) int same = 1; #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION) if (sizeof(pte_t) > sizeof(unsigned long)) { - spinlock_t *ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); - spin_lock(ptl); + spin_lock(vmf->ptl); same = pte_same(*vmf->pte, vmf->orig_pte); - spin_unlock(ptl); + spin_unlock(vmf->ptl); } #endif pte_unmap(vmf->pte); @@ -4697,7 +4696,6 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) * validation through pte_unmap_same(). It's of NUMA type but * the pfn may be screwed if the read is non atomic. */ - vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); spin_lock(vmf->ptl); if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl); @@ -4768,8 +4766,10 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) flags |= TNF_MIGRATED; } else { flags |= TNF_MIGRATE_FAIL; - vmf->pte = pte_offset_map(vmf->pmd, vmf->address); - spin_lock(vmf->ptl); + vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, + vmf->address, &vmf->ptl); + if (unlikely(!vmf->pte)) + goto out; if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl); goto out; @@ -4898,27 +4898,16 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) vmf->pte = NULL; vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID; } else { - /* - * If a huge pmd materialized under us just retry later. Use - * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead - * of pmd_trans_huge() to ensure the pmd didn't become - * pmd_trans_huge under us and then back to pmd_none, as a - * result of MADV_DONTNEED running immediately after a huge pmd - * fault in a different thread of this mm, in turn leading to a - * misleading pmd_trans_huge() retval. All we have to ensure is - * that it is a regular pmd that we can walk with - * pte_offset_map() and we can do that through an atomic read - * in C, which is what pmd_trans_unstable() provides. - */ - if (pmd_devmap_trans_unstable(vmf->pmd)) - return 0; /* * A regular pmd is established and it can't morph into a huge - * pmd from under us anymore at this point because we hold the - * mmap_lock read mode and khugepaged takes it in write mode. - * So now it's safe to run pte_offset_map(). + * pmd by anon khugepaged, since that takes mmap_lock in write + * mode; but shmem or file collapse to THP could still morph + * it into a huge pmd: just retry later if so. */ - vmf->pte = pte_offset_map(vmf->pmd, vmf->address); + vmf->pte = pte_offset_map_nolock(vmf->vma->vm_mm, vmf->pmd, + vmf->address, &vmf->ptl); + if (unlikely(!vmf->pte)) + return 0; vmf->orig_pte = ptep_get_lockless(vmf->pte); vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID; @@ -4937,7 +4926,6 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) return do_numa_page(vmf); - vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); spin_lock(vmf->ptl); entry = vmf->orig_pte; if (unlikely(!pte_same(*vmf->pte, entry))) { -- cgit v1.2.3 From feda5c393a6c843c7bf1fc49e1381e2d3822b564 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:50:37 -0700 Subject: mm/pgtable: delete pmd_trans_unstable() and friends MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Delete pmd_trans_unstable, pmd_none_or_trans_huge_or_clear_bad() and pmd_devmap_trans_unstable(), all now unused. With mixed feelings, delete all the comments on pmd_trans_unstable(). That was very good documentation of a subtle state, and this series does not even eliminate that state: but rather, normalizes and extends it, asking pte_offset_map[_lock]() callers to anticipate failure, without regard for whether mmap_read_lock() or mmap_write_lock() is held. Retain pud_trans_unstable(), which has one use in __handle_mm_fault(), but delete its equivalent pud_none_or_trans_huge_or_dev_or_clear_bad(). While there, move the default arch_needs_pgtable_deposit() definition up near where pgtable_trans_huge_deposit() and withdraw() are declared. Link: https://lkml.kernel.org/r/5abdab3-3136-b42e-274d-9c6281bfb79@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- include/linux/pgtable.h | 103 ++++-------------------------------------------- mm/khugepaged.c | 4 -- 2 files changed, 7 insertions(+), 100 deletions(-) (limited to 'mm') diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 3fabbb018557..a1326e61d7ee 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -599,6 +599,10 @@ extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); #endif +#ifndef arch_needs_pgtable_deposit +#define arch_needs_pgtable_deposit() (false) +#endif + #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* * This is an implementation of pmdp_establish() that is only suitable for an @@ -1300,9 +1304,10 @@ static inline int pud_trans_huge(pud_t pud) } #endif -/* See pmd_none_or_trans_huge_or_clear_bad for discussion. */ -static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud) +static inline int pud_trans_unstable(pud_t *pud) { +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) pud_t pudval = READ_ONCE(*pud); if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval)) @@ -1311,104 +1316,10 @@ static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud) pud_clear_bad(pud); return 1; } - return 0; -} - -/* See pmd_trans_unstable for discussion. */ -static inline int pud_trans_unstable(pud_t *pud) -{ -#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ - defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) - return pud_none_or_trans_huge_or_dev_or_clear_bad(pud); -#else - return 0; #endif -} - -#ifndef arch_needs_pgtable_deposit -#define arch_needs_pgtable_deposit() (false) -#endif -/* - * This function is meant to be used by sites walking pagetables with - * the mmap_lock held in read mode to protect against MADV_DONTNEED and - * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd - * into a null pmd and the transhuge page fault can convert a null pmd - * into an hugepmd or into a regular pmd (if the hugepage allocation - * fails). While holding the mmap_lock in read mode the pmd becomes - * stable and stops changing under us only if it's not null and not a - * transhuge pmd. When those races occurs and this function makes a - * difference vs the standard pmd_none_or_clear_bad, the result is - * undefined so behaving like if the pmd was none is safe (because it - * can return none anyway). The compiler level barrier() is critically - * important to compute the two checks atomically on the same pmdval. - * - * For 32bit kernels with a 64bit large pmd_t this automatically takes - * care of reading the pmd atomically to avoid SMP race conditions - * against pmd_populate() when the mmap_lock is hold for reading by the - * caller (a special atomic read not done by "gcc" as in the generic - * version above, is also needed when THP is disabled because the page - * fault can populate the pmd from under us). - */ -static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) -{ - pmd_t pmdval = pmdp_get_lockless(pmd); - /* - * !pmd_present() checks for pmd migration entries - * - * The complete check uses is_pmd_migration_entry() in linux/swapops.h - * But using that requires moving current function and pmd_trans_unstable() - * to linux/swapops.h to resolve dependency, which is too much code move. - * - * !pmd_present() is equivalent to is_pmd_migration_entry() currently, - * because !pmd_present() pages can only be under migration not swapped - * out. - * - * pmd_none() is preserved for future condition checks on pmd migration - * entries and not confusing with this function name, although it is - * redundant with !pmd_present(). - */ - if (pmd_none(pmdval) || pmd_trans_huge(pmdval) || - (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval))) - return 1; - if (unlikely(pmd_bad(pmdval))) { - pmd_clear_bad(pmd); - return 1; - } return 0; } -/* - * This is a noop if Transparent Hugepage Support is not built into - * the kernel. Otherwise it is equivalent to - * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in - * places that already verified the pmd is not none and they want to - * walk ptes while holding the mmap sem in read mode (write mode don't - * need this). If THP is not enabled, the pmd can't go away under the - * code even if MADV_DONTNEED runs, but if THP is enabled we need to - * run a pmd_trans_unstable before walking the ptes after - * split_huge_pmd returns (because it may have run when the pmd become - * null, but then a page fault can map in a THP and not a regular page). - */ -static inline int pmd_trans_unstable(pmd_t *pmd) -{ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - return pmd_none_or_trans_huge_or_clear_bad(pmd); -#else - return 0; -#endif -} - -/* - * the ordering of these checks is important for pmds with _page_devmap set. - * if we check pmd_trans_unstable() first we will trip the bad_pmd() check - * inside of pmd_none_or_trans_huge_or_clear_bad(). this will end up correctly - * returning 1 but not before it spams dmesg with the pmd_clear_bad() output. - */ -static inline int pmd_devmap_trans_unstable(pmd_t *pmd) -{ - return pmd_devmap(*pmd) || pmd_trans_unstable(pmd); -} - #ifndef CONFIG_NUMA_BALANCING /* * Technically a PTE can be PROTNONE even when not doing NUMA balancing but diff --git a/mm/khugepaged.c b/mm/khugepaged.c index d1951ed572f8..881669e738c0 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -944,10 +944,6 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, return SCAN_SUCCEED; } -/* - * See pmd_trans_unstable() for how the result may change out from - * underneath us, even if we hold mmap_lock in read. - */ static int find_pmd_or_thp_or_none(struct mm_struct *mm, unsigned long address, pmd_t **pmd) -- cgit v1.2.3 From 4f8fcf4ced0b7184149045818dcc2f9e2689b775 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:52:17 -0700 Subject: mm/swap: swap_vma_readahead() do the pte_offset_map() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit swap_vma_readahead() has been proceeding in an unconventional way, its preliminary swap_ra_info() doing the pte_offset_map() and pte_unmap(), then relying on that pte pointer even after the pte_unmap() - in its CONFIG_64BIT case (I think !CONFIG_HIGHPTE was intended; whereas 32-bit copied ptes to stack while they were mapped, but had to limit how many). Though it would be difficult to construct a failing testcase, accessing page table after pte_unmap() will become bad practice, even on 64-bit: an rcu_read_unlock() in pte_unmap() will allow page table to be freed. Move relevant definitions from include/linux/swap.h to mm/swap_state.c, nothing else used them. Delete the CONFIG_64BIT distinction and buffer, delete all reference to ptes from swap_ra_info(), use pte_offset_map() repeatedly in swap_vma_readahead(), breaking from the loop if it fails. (Will the repeated "map" and "unmap" show up as a slowdown anywhere? If so, maybe modify __read_swap_cache_async() to do the pte_unmap() only when it does not find the page already in the swapcache.) Use ptep_get_lockless(), mainly for its READ_ONCE(). Correctly advance the address passed down to each call of __read__swap_cache_async(). Link: https://lkml.kernel.org/r/b7c64ab3-9e44-aac0-d2b-c57de578af1c@google.com Signed-off-by: Hugh Dickins Reviewed-by: "Huang, Ying" Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- include/linux/swap.h | 19 ------------------- mm/swap_state.c | 45 ++++++++++++++++++++++++--------------------- 2 files changed, 24 insertions(+), 40 deletions(-) (limited to 'mm') diff --git a/include/linux/swap.h b/include/linux/swap.h index b5f6f2916de1..ce7e82cf787f 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -337,25 +337,6 @@ struct swap_info_struct { */ }; -#ifdef CONFIG_64BIT -#define SWAP_RA_ORDER_CEILING 5 -#else -/* Avoid stack overflow, because we need to save part of page table */ -#define SWAP_RA_ORDER_CEILING 3 -#define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING) -#endif - -struct vma_swap_readahead { - unsigned short win; - unsigned short offset; - unsigned short nr_pte; -#ifdef CONFIG_64BIT - pte_t *ptes; -#else - pte_t ptes[SWAP_RA_PTE_CACHE_SIZE]; -#endif -}; - static inline swp_entry_t folio_swap_entry(struct folio *folio) { swp_entry_t entry = { .val = page_private(&folio->page) }; diff --git a/mm/swap_state.c b/mm/swap_state.c index ef32353c18a6..a33c60e0158f 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -709,6 +709,14 @@ void exit_swap_address_space(unsigned int type) swapper_spaces[type] = NULL; } +#define SWAP_RA_ORDER_CEILING 5 + +struct vma_swap_readahead { + unsigned short win; + unsigned short offset; + unsigned short nr_pte; +}; + static void swap_ra_info(struct vm_fault *vmf, struct vma_swap_readahead *ra_info) { @@ -716,11 +724,7 @@ static void swap_ra_info(struct vm_fault *vmf, unsigned long ra_val; unsigned long faddr, pfn, fpfn, lpfn, rpfn; unsigned long start, end; - pte_t *pte, *orig_pte; unsigned int max_win, hits, prev_win, win; -#ifndef CONFIG_64BIT - pte_t *tpte; -#endif max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster), SWAP_RA_ORDER_CEILING); @@ -739,12 +743,9 @@ static void swap_ra_info(struct vm_fault *vmf, max_win, prev_win); atomic_long_set(&vma->swap_readahead_info, SWAP_RA_VAL(faddr, win, 0)); - if (win == 1) return; - /* Copy the PTEs because the page table may be unmapped */ - orig_pte = pte = pte_offset_map(vmf->pmd, faddr); if (fpfn == pfn + 1) { lpfn = fpfn; rpfn = fpfn + win; @@ -764,15 +765,6 @@ static void swap_ra_info(struct vm_fault *vmf, ra_info->nr_pte = end - start; ra_info->offset = fpfn - start; - pte -= ra_info->offset; -#ifdef CONFIG_64BIT - ra_info->ptes = pte; -#else - tpte = ra_info->ptes; - for (pfn = start; pfn != end; pfn++) - *tpte++ = *pte++; -#endif - pte_unmap(orig_pte); } /** @@ -796,7 +788,8 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, struct swap_iocb *splug = NULL; struct vm_area_struct *vma = vmf->vma; struct page *page; - pte_t *pte, pentry; + pte_t *pte = NULL, pentry; + unsigned long addr; swp_entry_t entry; unsigned int i; bool page_allocated; @@ -808,17 +801,25 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, if (ra_info.win == 1) goto skip; + addr = vmf->address - (ra_info.offset * PAGE_SIZE); + blk_start_plug(&plug); - for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte; - i++, pte++) { - pentry = *pte; + for (i = 0; i < ra_info.nr_pte; i++, addr += PAGE_SIZE) { + if (!pte++) { + pte = pte_offset_map(vmf->pmd, addr); + if (!pte) + break; + } + pentry = ptep_get_lockless(pte); if (!is_swap_pte(pentry)) continue; entry = pte_to_swp_entry(pentry); if (unlikely(non_swap_entry(entry))) continue; + pte_unmap(pte); + pte = NULL; page = __read_swap_cache_async(entry, gfp_mask, vma, - vmf->address, &page_allocated); + addr, &page_allocated); if (!page) continue; if (page_allocated) { @@ -830,6 +831,8 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, } put_page(page); } + if (pte) + pte_unmap(pte); blk_finish_plug(&plug); swap_read_unplug(splug); lru_add_drain(); -- cgit v1.2.3 From a92cbb82c8d375d47fbaf0e1ad3fd4074a7cb156 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:53:23 -0700 Subject: perf/core: allow pte_offset_map() to fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In rare transient cases, not yet made possible, pte_offset_map() and pte_offet_map_lock() may not find a page table: handle appropriately. [hughd@google.com: __wp_page_copy_user(): don't call update_mmu_tlb() with NULL] Link: https://lkml.kernel.org/r/1a4db221-7872-3594-57ce-42369945ec8d@google.com Link: https://lkml.kernel.org/r/a194441b-63f3-adb6-5964-7ca3171ae7c2@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- kernel/events/core.c | 4 ++++ mm/memory.c | 6 ++++-- 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/kernel/events/core.c b/kernel/events/core.c index db016e418931..174be710f3b3 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7490,6 +7490,7 @@ static u64 perf_get_pgtable_size(struct mm_struct *mm, unsigned long addr) return pud_leaf_size(pud); pmdp = pmd_offset_lockless(pudp, pud, addr); +again: pmd = pmdp_get_lockless(pmdp); if (!pmd_present(pmd)) return 0; @@ -7498,6 +7499,9 @@ static u64 perf_get_pgtable_size(struct mm_struct *mm, unsigned long addr) return pmd_leaf_size(pmd); ptep = pte_offset_map(&pmd, addr); + if (!ptep) + goto again; + pte = ptep_get_lockless(ptep); if (pte_present(pte)) size = pte_leaf_size(pte); diff --git a/mm/memory.c b/mm/memory.c index 11f221953690..63c30f58142b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2843,7 +2843,8 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src, * Other thread has already handled the fault * and update local tlb only */ - update_mmu_tlb(vma, addr, vmf->pte); + if (vmf->pte) + update_mmu_tlb(vma, addr, vmf->pte); ret = -EAGAIN; goto pte_unlock; } @@ -2867,7 +2868,8 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src, vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); if (unlikely(!vmf->pte || !pte_same(*vmf->pte, vmf->orig_pte))) { /* The PTE changed under us, update local tlb */ - update_mmu_tlb(vma, addr, vmf->pte); + if (vmf->pte) + update_mmu_tlb(vma, addr, vmf->pte); ret = -EAGAIN; goto pte_unlock; } -- cgit v1.2.3 From b95826c9aa48b2997b3973b42a8716ba132b920e Mon Sep 17 00:00:00 2001 From: Sidhartha Kumar Date: Mon, 12 Jun 2023 09:34:05 -0700 Subject: mm: remove set_compound_page_dtor() All users can use the folio equivalent so this function can be safely removed. Link: https://lkml.kernel.org/r/20230612163405.99345-1-sidhartha.kumar@oracle.com Signed-off-by: Sidhartha Kumar Cc: Aneesh Kumar K.V Cc: Matthew Wilcox Cc: Tarun Sahu Signed-off-by: Andrew Morton --- include/linux/mm.h | 10 ---------- mm/huge_memory.c | 2 +- mm/internal.h | 2 +- 3 files changed, 2 insertions(+), 12 deletions(-) (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index a08dc8cc48fb..8f40bf17d597 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1223,16 +1223,6 @@ enum compound_dtor_id { }; extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS]; -static inline void set_compound_page_dtor(struct page *page, - enum compound_dtor_id compound_dtor) -{ - struct folio *folio = (struct folio *)page; - - VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page); - VM_BUG_ON_PAGE(!PageHead(page), page); - folio->_folio_dtor = compound_dtor; -} - static inline void folio_set_compound_dtor(struct folio *folio, enum compound_dtor_id compound_dtor) { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 31bc8fa768e3..76f970aa5b4d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -583,7 +583,7 @@ void prep_transhuge_page(struct page *page) VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio); INIT_LIST_HEAD(&folio->_deferred_list); - set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); + folio_set_compound_dtor(folio, TRANSHUGE_PAGE_DTOR); } static inline bool is_transparent_hugepage(struct page *page) diff --git a/mm/internal.h b/mm/internal.h index faf0508d89a5..33b8b8f66af3 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -391,7 +391,7 @@ static inline void prep_compound_head(struct page *page, unsigned int order) { struct folio *folio = (struct folio *)page; - set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); + folio_set_compound_dtor(folio, COMPOUND_PAGE_DTOR); set_compound_order(page, order); atomic_set(&folio->_entire_mapcount, -1); atomic_set(&folio->_nr_pages_mapped, 0); -- cgit v1.2.3 From 349d1670008d3dab99a11b015bef51ad3f26fb4f Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Mon, 12 Jun 2023 12:04:20 -0400 Subject: mm/hugetlb: fix pgtable lock on pmd sharing Huge pmd sharing operates on PUD not PMD, huge_pte_lock() is not suitable in this case because it should only work for last level pte changes, while pmd sharing is always one level higher. Meanwhile, here we're locking over the spte pgtable lock which is even not a lock for current mm but someone else's. It seems even racy on operating on the lock, as after put_page() of the spte pgtable page logically the page can be released, so at least the spin_unlock() needs to be done after the put_page(). No report I am aware, I'm not even sure whether it'll just work on taking the spte pmd lock, because while we're holding i_mmap read lock it probably means the vma interval tree is frozen, all pte allocators over this pud entry could always find the specific svma and spte page, so maybe they'll serialize on this spte page lock? Even so, doesn't seem to be expected. It just seems to be an accident of cb900f412154. Fix it with the proper pud lock (which is the mm's page_table_lock). Link: https://lkml.kernel.org/r/20230612160420.809818-1-peterx@redhat.com Fixes: cb900f412154 ("mm, hugetlb: convert hugetlbfs to use split pmd lock") Signed-off-by: Peter Xu Reviewed-by: Mike Kravetz Cc: Naoya Horiguchi Signed-off-by: Andrew Morton --- mm/hugetlb.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index de96cd936b12..1d3d8a61b336 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -7130,7 +7130,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long saddr; pte_t *spte = NULL; pte_t *pte; - spinlock_t *ptl; i_mmap_lock_read(mapping); vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { @@ -7151,7 +7150,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, if (!spte) goto out; - ptl = huge_pte_lock(hstate_vma(vma), mm, spte); + spin_lock(&mm->page_table_lock); if (pud_none(*pud)) { pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK)); @@ -7159,7 +7158,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, } else { put_page(virt_to_page(spte)); } - spin_unlock(ptl); + spin_unlock(&mm->page_table_lock); out: pte = (pte_t *)pmd_alloc(mm, pud, addr); i_mmap_unlock_read(mapping); -- cgit v1.2.3 From 0c474d31a6378f20cbe83f62d4177ebdc099c7fc Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 12 Jun 2023 16:31:47 +0100 Subject: mm/slab: simplify create_kmalloc_cache() args and make it static In the slab variant of kmem_cache_init(), call new_kmalloc_cache() instead of initialising the kmalloc_caches array directly. With this, create_kmalloc_cache() is now only called from new_kmalloc_cache() in the same file, so make it static. In addition, the useroffset argument is always 0 while usersize is the same as size. Remove them. Link: https://lkml.kernel.org/r/20230612153201.554742-4-catalin.marinas@arm.com Signed-off-by: Catalin Marinas Reviewed-by: Vlastimil Babka Tested-by: Isaac J. Manjarres Cc: Alasdair Kergon Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Christoph Hellwig Cc: Daniel Vetter Cc: Greg Kroah-Hartman Cc: Herbert Xu Cc: Jerry Snitselaar Cc: Joerg Roedel Cc: Jonathan Cameron Cc: Jonathan Cameron Cc: Lars-Peter Clausen Cc: Logan Gunthorpe Cc: Marc Zyngier Cc: Mark Brown Cc: Mike Snitzer Cc: "Rafael J. Wysocki" Cc: Robin Murphy Cc: Saravana Kannan Cc: Will Deacon Signed-off-by: Andrew Morton --- mm/slab.c | 6 +----- mm/slab.h | 5 ++--- mm/slab_common.c | 14 ++++++-------- 3 files changed, 9 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index bb57f7fdbae1..b7817dcba63e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1240,11 +1240,7 @@ void __init kmem_cache_init(void) * Initialize the caches that provide memory for the kmem_cache_node * structures first. Without this, further allocations will bug. */ - kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE] = create_kmalloc_cache( - kmalloc_info[INDEX_NODE].name[KMALLOC_NORMAL], - kmalloc_info[INDEX_NODE].size, - ARCH_KMALLOC_FLAGS, 0, - kmalloc_info[INDEX_NODE].size); + new_kmalloc_cache(INDEX_NODE, KMALLOC_NORMAL, ARCH_KMALLOC_FLAGS); slab_state = PARTIAL_NODE; setup_kmalloc_cache_index_table(); diff --git a/mm/slab.h b/mm/slab.h index f01ac256a8f5..592590fcddae 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -255,9 +255,8 @@ gfp_t kmalloc_fix_flags(gfp_t flags); /* Functions provided by the slab allocators */ int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); -struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size, - slab_flags_t flags, unsigned int useroffset, - unsigned int usersize); +void __init new_kmalloc_cache(int idx, enum kmalloc_cache_type type, + slab_flags_t flags); extern void create_boot_cache(struct kmem_cache *, const char *name, unsigned int size, slab_flags_t flags, unsigned int useroffset, unsigned int usersize); diff --git a/mm/slab_common.c b/mm/slab_common.c index 607249785c07..7f069159aee2 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -658,17 +658,16 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, s->refcount = -1; /* Exempt from merging for now */ } -struct kmem_cache *__init create_kmalloc_cache(const char *name, - unsigned int size, slab_flags_t flags, - unsigned int useroffset, unsigned int usersize) +static struct kmem_cache *__init create_kmalloc_cache(const char *name, + unsigned int size, + slab_flags_t flags) { struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); if (!s) panic("Out of memory when creating slab %s\n", name); - create_boot_cache(s, name, size, flags | SLAB_KMALLOC, useroffset, - usersize); + create_boot_cache(s, name, size, flags | SLAB_KMALLOC, 0, size); list_add(&s->list, &slab_caches); s->refcount = 1; return s; @@ -863,7 +862,7 @@ void __init setup_kmalloc_cache_index_table(void) } } -static void __init +void __init new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags) { if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) { @@ -880,8 +879,7 @@ new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags) kmalloc_caches[type][idx] = create_kmalloc_cache( kmalloc_info[idx].name[type], - kmalloc_info[idx].size, flags, 0, - kmalloc_info[idx].size); + kmalloc_info[idx].size, flags); /* * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for -- cgit v1.2.3 From 963e84b0f262297466dff440ccfbb868ecb971b3 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 12 Jun 2023 16:31:48 +0100 Subject: mm/slab: limit kmalloc() minimum alignment to dma_get_cache_alignment() Do not create kmalloc() caches which are not aligned to dma_get_cache_alignment(). There is no functional change since for current architectures defining ARCH_DMA_MINALIGN, ARCH_KMALLOC_MINALIGN equals ARCH_DMA_MINALIGN (and dma_get_cache_alignment()). On architectures without a specific ARCH_DMA_MINALIGN, dma_get_cache_alignment() is 1, so no change to the kmalloc() caches. Link: https://lkml.kernel.org/r/20230612153201.554742-5-catalin.marinas@arm.com Signed-off-by: Catalin Marinas Reviewed-by: Vlastimil Babka Tested-by: Isaac J. Manjarres Cc: Christoph Hellwig Cc: Robin Murphy Cc: Alasdair Kergon Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Daniel Vetter Cc: Greg Kroah-Hartman Cc: Herbert Xu Cc: Jerry Snitselaar Cc: Joerg Roedel Cc: Jonathan Cameron Cc: Jonathan Cameron Cc: Lars-Peter Clausen Cc: Logan Gunthorpe Cc: Marc Zyngier Cc: Mark Brown Cc: Mike Snitzer Cc: "Rafael J. Wysocki" Cc: Saravana Kannan Cc: Will Deacon Signed-off-by: Andrew Morton --- mm/slab_common.c | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/slab_common.c b/mm/slab_common.c index 7f069159aee2..7c6475847fdf 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -862,9 +863,18 @@ void __init setup_kmalloc_cache_index_table(void) } } +static unsigned int __kmalloc_minalign(void) +{ + return dma_get_cache_alignment(); +} + void __init new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags) { + unsigned int minalign = __kmalloc_minalign(); + unsigned int aligned_size = kmalloc_info[idx].size; + int aligned_idx = idx; + if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) { flags |= SLAB_RECLAIM_ACCOUNT; } else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) { @@ -877,9 +887,17 @@ new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags) flags |= SLAB_CACHE_DMA; } - kmalloc_caches[type][idx] = create_kmalloc_cache( - kmalloc_info[idx].name[type], - kmalloc_info[idx].size, flags); + if (minalign > ARCH_KMALLOC_MINALIGN) { + aligned_size = ALIGN(aligned_size, minalign); + aligned_idx = __kmalloc_index(aligned_size, false); + } + + if (!kmalloc_caches[type][aligned_idx]) + kmalloc_caches[type][aligned_idx] = create_kmalloc_cache( + kmalloc_info[aligned_idx].name[type], + aligned_size, flags); + if (idx != aligned_idx) + kmalloc_caches[type][idx] = kmalloc_caches[type][aligned_idx]; /* * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for -- cgit v1.2.3 From b035f5a6d8521bc77448d1c61db6244d91da3325 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 12 Jun 2023 16:32:00 +0100 Subject: mm: slab: reduce the kmalloc() minimum alignment if DMA bouncing possible If an architecture opted in to DMA bouncing of unaligned kmalloc() buffers (ARCH_WANT_KMALLOC_DMA_BOUNCE), reduce the minimum kmalloc() cache alignment below cache-line size to ARCH_KMALLOC_MINALIGN. Link: https://lkml.kernel.org/r/20230612153201.554742-17-catalin.marinas@arm.com Signed-off-by: Catalin Marinas Reviewed-by: Vlastimil Babka Tested-by: Isaac J. Manjarres Cc: Christoph Hellwig Cc: Robin Murphy Cc: Alasdair Kergon Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Daniel Vetter Cc: Greg Kroah-Hartman Cc: Herbert Xu Cc: Jerry Snitselaar Cc: Joerg Roedel Cc: Jonathan Cameron Cc: Jonathan Cameron Cc: Lars-Peter Clausen Cc: Logan Gunthorpe Cc: Marc Zyngier Cc: Mark Brown Cc: Mike Snitzer Cc: "Rafael J. Wysocki" Cc: Saravana Kannan Cc: Will Deacon Signed-off-by: Andrew Morton --- mm/slab_common.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'mm') diff --git a/mm/slab_common.c b/mm/slab_common.c index 7c6475847fdf..43c008165f56 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -865,6 +866,10 @@ void __init setup_kmalloc_cache_index_table(void) static unsigned int __kmalloc_minalign(void) { +#ifdef CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC + if (io_tlb_default_mem.nslabs) + return ARCH_KMALLOC_MINALIGN; +#endif return dma_get_cache_alignment(); } -- cgit v1.2.3 From 426931e7e5d96664616794c7c36efc748a7497e2 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 12 Jun 2023 16:15:43 +0100 Subject: mm: ptdump should use ptep_get_lockless() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "Encapsulate PTE contents from non-arch code", v3. A series to improve the encapsulation of pte entries by disallowing non-arch code from directly dereferencing pte_t pointers. This means that by default, the accesses change from a C dereference to a READ_ONCE(). This is technically the correct thing to do since where pgtables are modified by HW (for access/dirty) they are volatile and therefore we should always ensure READ_ONCE() semantics. But more importantly, by always using the helper, it can be overridden by the architecture to fully encapsulate the contents of the pte. Arch code is deliberately not converted, as the arch code knows best. It is intended that arch code (arm64) will override the default with its own implementation that can (e.g.) hide certain bits from the core code, or determine young/dirty status by mixing in state from another source. This patch (of 3): The page table dumper uses walk_page_range_novma() to walk the page tables, which does not lock the PTL before calling the pte_entry() callback. Therefore, the page table dumper's callback must use ptep_get_lockless() rather than ptep_get() to ensure that the pte it reads is not torn or otherwise corrupt when racing with writers. Link: https://lkml.kernel.org/r/20230612151545.3317766-1-ryan.roberts@arm.com Link: https://lkml.kernel.org/r/20230612151545.3317766-2-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Cc: Adrian Hunter Cc: Alexander Potapenko Cc: Alexander Shishkin Cc: Alex Williamson Cc: Al Viro Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Christian Brauner Cc: Christoph Hellwig Cc: Daniel Vetter Cc: Dave Airlie Cc: Dimitri Sivanich Cc: Dmitry Vyukov Cc: Ian Rogers Cc: Jason Gunthorpe Cc: Jérôme Glisse Cc: Jiri Olsa Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Mark Rutland Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Michal Hocko Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Muchun Song Cc: Namhyung Kim Cc: Naoya Horiguchi Cc: Oleksandr Tyshchenko Cc: Pavel Tatashin Cc: Roman Gushchin Cc: SeongJae Park Cc: Shakeel Butt Cc: Uladzislau Rezki (Sony) Cc: Vincenzo Frascino Cc: Yu Zhao Cc: kernel test robot Signed-off-by: Andrew Morton --- mm/ptdump.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/ptdump.c b/mm/ptdump.c index 8adab455a68b..03c1bdae4a43 100644 --- a/mm/ptdump.c +++ b/mm/ptdump.c @@ -119,7 +119,7 @@ static int ptdump_pte_entry(pte_t *pte, unsigned long addr, unsigned long next, struct mm_walk *walk) { struct ptdump_state *st = walk->private; - pte_t val = ptep_get(pte); + pte_t val = ptep_get_lockless(pte); if (st->effective_prot) st->effective_prot(st, 4, pte_val(val)); -- cgit v1.2.3 From c33c794828f21217f72ce6fc140e0d34e0d56bff Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 12 Jun 2023 16:15:45 +0100 Subject: mm: ptep_get() conversion MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Convert all instances of direct pte_t* dereferencing to instead use ptep_get() helper. This means that by default, the accesses change from a C dereference to a READ_ONCE(). This is technically the correct thing to do since where pgtables are modified by HW (for access/dirty) they are volatile and therefore we should always ensure READ_ONCE() semantics. But more importantly, by always using the helper, it can be overridden by the architecture to fully encapsulate the contents of the pte. Arch code is deliberately not converted, as the arch code knows best. It is intended that arch code (arm64) will override the default with its own implementation that can (e.g.) hide certain bits from the core code, or determine young/dirty status by mixing in state from another source. Conversion was done using Coccinelle: ---- // $ make coccicheck \ // COCCI=ptepget.cocci \ // SPFLAGS="--include-headers" \ // MODE=patch virtual patch @ depends on patch @ pte_t *v; @@ - *v + ptep_get(v) ---- Then reviewed and hand-edited to avoid multiple unnecessary calls to ptep_get(), instead opting to store the result of a single call in a variable, where it is correct to do so. This aims to negate any cost of READ_ONCE() and will benefit arch-overrides that may be more complex. Included is a fix for an issue in an earlier version of this patch that was pointed out by kernel test robot. The issue arose because config MMU=n elides definition of the ptep helper functions, including ptep_get(). HUGETLB_PAGE=n configs still define a simple huge_ptep_clear_flush() for linking purposes, which dereferences the ptep. So when both configs are disabled, this caused a build error because ptep_get() is not defined. Fix by continuing to do a direct dereference when MMU=n. This is safe because for this config the arch code cannot be trying to virtualize the ptes because none of the ptep helpers are defined. Link: https://lkml.kernel.org/r/20230612151545.3317766-4-ryan.roberts@arm.com Reported-by: kernel test robot Link: https://lore.kernel.org/oe-kbuild-all/202305120142.yXsNEo6H-lkp@intel.com/ Signed-off-by: Ryan Roberts Cc: Adrian Hunter Cc: Alexander Potapenko Cc: Alexander Shishkin Cc: Alex Williamson Cc: Al Viro Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Christian Brauner Cc: Christoph Hellwig Cc: Daniel Vetter Cc: Dave Airlie Cc: Dimitri Sivanich Cc: Dmitry Vyukov Cc: Ian Rogers Cc: Jason Gunthorpe Cc: Jérôme Glisse Cc: Jiri Olsa Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Mark Rutland Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Michal Hocko Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Muchun Song Cc: Namhyung Kim Cc: Naoya Horiguchi Cc: Oleksandr Tyshchenko Cc: Pavel Tatashin Cc: Roman Gushchin Cc: SeongJae Park Cc: Shakeel Butt Cc: Uladzislau Rezki (Sony) Cc: Vincenzo Frascino Cc: Yu Zhao Signed-off-by: Andrew Morton --- drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 8 +- drivers/misc/sgi-gru/grufault.c | 2 +- drivers/vfio/vfio_iommu_type1.c | 7 +- drivers/xen/privcmd.c | 2 +- fs/proc/task_mmu.c | 33 +++---- fs/userfaultfd.c | 6 +- include/linux/hugetlb.h | 4 + include/linux/mm_inline.h | 2 +- include/linux/pgtable.h | 6 +- kernel/events/uprobes.c | 2 +- mm/damon/ops-common.c | 2 +- mm/damon/paddr.c | 2 +- mm/damon/vaddr.c | 10 ++- mm/filemap.c | 2 +- mm/gup.c | 21 +++-- mm/highmem.c | 12 +-- mm/hmm.c | 2 +- mm/huge_memory.c | 4 +- mm/hugetlb.c | 2 +- mm/hugetlb_vmemmap.c | 6 +- mm/kasan/init.c | 9 +- mm/kasan/shadow.c | 10 +-- mm/khugepaged.c | 22 ++--- mm/ksm.c | 22 ++--- mm/madvise.c | 6 +- mm/mapping_dirty_helpers.c | 4 +- mm/memcontrol.c | 4 +- mm/memory-failure.c | 26 +++--- mm/memory.c | 100 +++++++++++---------- mm/mempolicy.c | 6 +- mm/migrate.c | 14 +-- mm/migrate_device.c | 15 ++-- mm/mincore.c | 2 +- mm/mlock.c | 6 +- mm/mprotect.c | 8 +- mm/mremap.c | 2 +- mm/page_table_check.c | 4 +- mm/page_vma_mapped.c | 27 +++--- mm/pgtable-generic.c | 2 +- mm/rmap.c | 34 ++++--- mm/sparse-vmemmap.c | 8 +- mm/swap_state.c | 8 +- mm/swapfile.c | 20 +++-- mm/userfaultfd.c | 4 +- mm/vmalloc.c | 6 +- mm/vmscan.c | 14 +-- virt/kvm/kvm_main.c | 11 ++- 47 files changed, 301 insertions(+), 228 deletions(-) (limited to 'mm') diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 56279908ed30..01e271b6ad21 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -1681,7 +1681,9 @@ static int igt_mmap_gpu(void *arg) static int check_present_pte(pte_t *pte, unsigned long addr, void *data) { - if (!pte_present(*pte) || pte_none(*pte)) { + pte_t ptent = ptep_get(pte); + + if (!pte_present(ptent) || pte_none(ptent)) { pr_err("missing PTE:%lx\n", (addr - (unsigned long)data) >> PAGE_SHIFT); return -EINVAL; @@ -1692,7 +1694,9 @@ static int check_present_pte(pte_t *pte, unsigned long addr, void *data) static int check_absent_pte(pte_t *pte, unsigned long addr, void *data) { - if (pte_present(*pte) && !pte_none(*pte)) { + pte_t ptent = ptep_get(pte); + + if (pte_present(ptent) && !pte_none(ptent)) { pr_err("present PTE:%lx; expected to be revoked\n", (addr - (unsigned long)data) >> PAGE_SHIFT); return -EINVAL; diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index 378cf02a2aa1..629edb6486de 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c @@ -228,7 +228,7 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr, goto err; #ifdef CONFIG_X86_64 if (unlikely(pmd_large(*pmdp))) - pte = *(pte_t *) pmdp; + pte = ptep_get((pte_t *)pmdp); else #endif pte = *pte_offset_kernel(pmdp, vaddr); diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 306e6f1d1c70..ebe0ad31d0b0 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -514,6 +514,7 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, bool write_fault) { pte_t *ptep; + pte_t pte; spinlock_t *ptl; int ret; @@ -536,10 +537,12 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, return ret; } - if (write_fault && !pte_write(*ptep)) + pte = ptep_get(ptep); + + if (write_fault && !pte_write(pte)) ret = -EFAULT; else - *pfn = pte_pfn(*ptep); + *pfn = pte_pfn(pte); pte_unmap_unlock(ptep, ptl); return ret; diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index e2f580e30a86..f447cd37cc4c 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -949,7 +949,7 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma) */ static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data) { - return pte_none(*pte) ? 0 : -EBUSY; + return pte_none(ptep_get(pte)) ? 0 : -EBUSY; } static int privcmd_vma_range_is_mapped( diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 0d63b6a0f0d8..507cd4e59d07 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -538,13 +538,14 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, bool locked = !!(vma->vm_flags & VM_LOCKED); struct page *page = NULL; bool migration = false, young = false, dirty = false; + pte_t ptent = ptep_get(pte); - if (pte_present(*pte)) { - page = vm_normal_page(vma, addr, *pte); - young = pte_young(*pte); - dirty = pte_dirty(*pte); - } else if (is_swap_pte(*pte)) { - swp_entry_t swpent = pte_to_swp_entry(*pte); + if (pte_present(ptent)) { + page = vm_normal_page(vma, addr, ptent); + young = pte_young(ptent); + dirty = pte_dirty(ptent); + } else if (is_swap_pte(ptent)) { + swp_entry_t swpent = pte_to_swp_entry(ptent); if (!non_swap_entry(swpent)) { int mapcount; @@ -732,11 +733,12 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, struct mem_size_stats *mss = walk->private; struct vm_area_struct *vma = walk->vma; struct page *page = NULL; + pte_t ptent = ptep_get(pte); - if (pte_present(*pte)) { - page = vm_normal_page(vma, addr, *pte); - } else if (is_swap_pte(*pte)) { - swp_entry_t swpent = pte_to_swp_entry(*pte); + if (pte_present(ptent)) { + page = vm_normal_page(vma, addr, ptent); + } else if (is_swap_pte(ptent)) { + swp_entry_t swpent = pte_to_swp_entry(ptent); if (is_pfn_swap_entry(swpent)) page = pfn_swap_entry_to_page(swpent); @@ -1105,7 +1107,7 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma, * Documentation/admin-guide/mm/soft-dirty.rst for full description * of how soft-dirty works. */ - pte_t ptent = *pte; + pte_t ptent = ptep_get(pte); if (pte_present(ptent)) { pte_t old_pte; @@ -1194,7 +1196,7 @@ out: return 0; } for (; addr != end; pte++, addr += PAGE_SIZE) { - ptent = *pte; + ptent = ptep_get(pte); if (cp->type == CLEAR_REFS_SOFT_DIRTY) { clear_soft_dirty(vma, addr, pte); @@ -1550,7 +1552,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, for (; addr < end; pte++, addr += PAGE_SIZE) { pagemap_entry_t pme; - pme = pte_to_pagemap_entry(pm, vma, addr, *pte); + pme = pte_to_pagemap_entry(pm, vma, addr, ptep_get(pte)); err = add_to_pagemap(addr, &pme, pm); if (err) break; @@ -1893,10 +1895,11 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, return 0; } do { - struct page *page = can_gather_numa_stats(*pte, vma, addr); + pte_t ptent = ptep_get(pte); + struct page *page = can_gather_numa_stats(ptent, vma, addr); if (!page) continue; - gather_stats(page, md, pte_dirty(*pte), 1); + gather_stats(page, md, pte_dirty(ptent), 1); } while (pte++, addr += PAGE_SIZE, addr != end); pte_unmap_unlock(orig_pte, ptl); diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index ca83423f8d54..478e2b169c13 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -335,6 +335,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, pud_t *pud; pmd_t *pmd, _pmd; pte_t *pte; + pte_t ptent; bool ret = true; mmap_assert_locked(mm); @@ -374,9 +375,10 @@ again: * changes under us. PTE markers should be handled the same as none * ptes here. */ - if (pte_none_mostly(*pte)) + ptent = ptep_get(pte); + if (pte_none_mostly(ptent)) ret = true; - if (!pte_write(*pte) && (reason & VM_UFFD_WP)) + if (!pte_write(ptent) && (reason & VM_UFFD_WP)) ret = true; pte_unmap(pte); diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 21f942025fec..beb7c63d2871 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -1185,7 +1185,11 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm) static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { +#ifdef CONFIG_MMU + return ptep_get(ptep); +#else return *ptep; +#endif } static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 0e1d239a882c..08c2bcefcb2b 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -555,7 +555,7 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, bool arm_uffd_pte = false; /* The current status of the pte should be "cleared" before calling */ - WARN_ON_ONCE(!pte_none(*pte)); + WARN_ON_ONCE(!pte_none(ptep_get(pte))); /* * NOTE: userfaultfd_wp_unpopulated() doesn't need this whole diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index fc06f6419661..5063b482e34f 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -231,7 +231,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { - pte_t pte = *ptep; + pte_t pte = ptep_get(ptep); int r = 1; if (!pte_young(pte)) r = 0; @@ -318,7 +318,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long address, pte_t *ptep) { - pte_t pte = *ptep; + pte_t pte = ptep_get(ptep); pte_clear(mm, address, ptep); page_table_check_pte_clear(mm, address, pte); return pte; @@ -519,7 +519,7 @@ extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, struct mm_struct; static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) { - pte_t old_pte = *ptep; + pte_t old_pte = ptep_get(ptep); set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); } #endif diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 607d742caa61..f0ac5b874919 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -192,7 +192,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, inc_mm_counter(mm, MM_ANONPAGES); } - flush_cache_page(vma, addr, pte_pfn(*pvmw.pte)); + flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte))); ptep_clear_flush_notify(vma, addr, pvmw.pte); if (new_page) set_pte_at_notify(mm, addr, pvmw.pte, diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c index d4ab81229136..e940802a15a4 100644 --- a/mm/damon/ops-common.c +++ b/mm/damon/ops-common.c @@ -39,7 +39,7 @@ struct folio *damon_get_folio(unsigned long pfn) void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr) { - struct folio *folio = damon_get_folio(pte_pfn(*pte)); + struct folio *folio = damon_get_folio(pte_pfn(ptep_get(pte))); if (!folio) return; diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 5b3a3463d078..40801e38fcf0 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -89,7 +89,7 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma, while (page_vma_mapped_walk(&pvmw)) { addr = pvmw.address; if (pvmw.pte) { - *accessed = pte_young(*pvmw.pte) || + *accessed = pte_young(ptep_get(pvmw.pte)) || !folio_test_idle(folio) || mmu_notifier_test_young(vma->vm_mm, addr); } else { diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index e814f66dfc2e..2fcc9731528a 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -323,7 +323,7 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, walk->action = ACTION_AGAIN; return 0; } - if (!pte_present(*pte)) + if (!pte_present(ptep_get(pte))) goto out; damon_ptep_mkold(pte, walk->vma, addr); out: @@ -433,6 +433,7 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk) { pte_t *pte; + pte_t ptent; spinlock_t *ptl; struct folio *folio; struct damon_young_walk_private *priv = walk->private; @@ -471,12 +472,13 @@ regular_page: walk->action = ACTION_AGAIN; return 0; } - if (!pte_present(*pte)) + ptent = ptep_get(pte); + if (!pte_present(ptent)) goto out; - folio = damon_get_folio(pte_pfn(*pte)); + folio = damon_get_folio(pte_pfn(ptent)); if (!folio) goto out; - if (pte_young(*pte) || !folio_test_idle(folio) || + if (pte_young(ptent) || !folio_test_idle(folio) || mmu_notifier_test_young(walk->mm, addr)) priv->young = true; *priv->folio_sz = folio_size(folio); diff --git a/mm/filemap.c b/mm/filemap.c index 1893048ec9ff..00933089b8b6 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3523,7 +3523,7 @@ again: * handled in the specific fault path, and it'll prohibit the * fault-around logic. */ - if (!pte_none(*vmf->pte)) + if (!pte_none(ptep_get(vmf->pte))) goto unlock; /* We're about to handle the fault */ diff --git a/mm/gup.c b/mm/gup.c index 838db6c0bfc2..38986e522d34 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -477,13 +477,14 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, unsigned int flags) { if (flags & FOLL_TOUCH) { - pte_t entry = *pte; + pte_t orig_entry = ptep_get(pte); + pte_t entry = orig_entry; if (flags & FOLL_WRITE) entry = pte_mkdirty(entry); entry = pte_mkyoung(entry); - if (!pte_same(*pte, entry)) { + if (!pte_same(orig_entry, entry)) { set_pte_at(vma->vm_mm, address, pte, entry); update_mmu_cache(vma, address, pte); } @@ -549,7 +550,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, ptep = pte_offset_map_lock(mm, pmd, address, &ptl); if (!ptep) return no_page_table(vma, flags); - pte = *ptep; + pte = ptep_get(ptep); if (!pte_present(pte)) goto no_page; if (pte_protnone(pte) && !gup_can_follow_protnone(flags)) @@ -821,6 +822,7 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, pud_t *pud; pmd_t *pmd; pte_t *pte; + pte_t entry; int ret = -EFAULT; /* user gate pages are read-only */ @@ -844,16 +846,17 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, pte = pte_offset_map(pmd, address); if (!pte) return -EFAULT; - if (pte_none(*pte)) + entry = ptep_get(pte); + if (pte_none(entry)) goto unmap; *vma = get_gate_vma(mm); if (!page) goto out; - *page = vm_normal_page(*vma, address, *pte); + *page = vm_normal_page(*vma, address, entry); if (!*page) { - if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) + if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(entry))) goto unmap; - *page = pte_page(*pte); + *page = pte_page(entry); } ret = try_grab_page(*page, gup_flags); if (unlikely(ret)) @@ -2496,7 +2499,7 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, } if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) || - unlikely(pte_val(pte) != pte_val(*ptep))) { + unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { gup_put_folio(folio, 1, flags); goto pte_unmap; } @@ -2693,7 +2696,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, if (!folio) return 0; - if (unlikely(pte_val(pte) != pte_val(*ptep))) { + if (unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { gup_put_folio(folio, refs, flags); return 0; } diff --git a/mm/highmem.c b/mm/highmem.c index db251e77f98f..e19269093a93 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -161,7 +161,7 @@ struct page *__kmap_to_page(void *vaddr) /* kmap() mappings */ if (WARN_ON_ONCE(addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP))) - return pte_page(pkmap_page_table[PKMAP_NR(addr)]); + return pte_page(ptep_get(&pkmap_page_table[PKMAP_NR(addr)])); /* kmap_local_page() mappings */ if (WARN_ON_ONCE(base >= __fix_to_virt(FIX_KMAP_END) && @@ -191,6 +191,7 @@ static void flush_all_zero_pkmaps(void) for (i = 0; i < LAST_PKMAP; i++) { struct page *page; + pte_t ptent; /* * zero means we don't have anything to do, @@ -203,7 +204,8 @@ static void flush_all_zero_pkmaps(void) pkmap_count[i] = 0; /* sanity check */ - BUG_ON(pte_none(pkmap_page_table[i])); + ptent = ptep_get(&pkmap_page_table[i]); + BUG_ON(pte_none(ptent)); /* * Don't need an atomic fetch-and-clear op here; @@ -212,7 +214,7 @@ static void flush_all_zero_pkmaps(void) * getting the kmap_lock (which is held here). * So no dangers, even with speculative execution. */ - page = pte_page(pkmap_page_table[i]); + page = pte_page(ptent); pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]); set_page_address(page, NULL); @@ -511,7 +513,7 @@ static inline bool kmap_high_unmap_local(unsigned long vaddr) { #ifdef ARCH_NEEDS_KMAP_HIGH_GET if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { - kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); + kunmap_high(pte_page(ptep_get(&pkmap_page_table[PKMAP_NR(vaddr)]))); return true; } #endif @@ -548,7 +550,7 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); kmap_pte = kmap_get_pte(vaddr, idx); - BUG_ON(!pte_none(*kmap_pte)); + BUG_ON(!pte_none(ptep_get(kmap_pte))); pteval = pfn_pte(pfn, prot); arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte, pteval); arch_kmap_local_post_map(vaddr, pteval); diff --git a/mm/hmm.c b/mm/hmm.c index b1a9159d7c92..855e25e59d8f 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -228,7 +228,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, struct hmm_range *range = hmm_vma_walk->range; unsigned int required_fault; unsigned long cpu_flags; - pte_t pte = *ptep; + pte_t pte = ptep_get(ptep); uint64_t pfn_req_flags = *hmm_pfn; if (pte_none_mostly(pte)) { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 76f970aa5b4d..e94fe292f30a 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2063,7 +2063,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, entry = pte_mkspecial(entry); if (pmd_uffd_wp(old_pmd)) entry = pte_mkuffd_wp(entry); - VM_BUG_ON(!pte_none(*pte)); + VM_BUG_ON(!pte_none(ptep_get(pte))); set_pte_at(mm, addr, pte, entry); pte++; } @@ -2257,7 +2257,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, entry = pte_mkuffd_wp(entry); page_add_anon_rmap(page + i, vma, addr, false); } - VM_BUG_ON(!pte_none(*pte)); + VM_BUG_ON(!pte_none(ptep_get(pte))); set_pte_at(mm, addr, pte, entry); pte++; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 1d3d8a61b336..d76574425da3 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -7246,7 +7246,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, pte = (pte_t *)pmd_alloc(mm, pud, addr); } } - BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte)); + BUG_ON(pte && pte_present(ptep_get(pte)) && !pte_huge(ptep_get(pte))); return pte; } diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index f42079b73f82..c2007ef5e9b0 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -105,7 +105,7 @@ static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr, * remapping (which is calling @walk->remap_pte). */ if (!walk->reuse_page) { - walk->reuse_page = pte_page(*pte); + walk->reuse_page = pte_page(ptep_get(pte)); /* * Because the reuse address is part of the range that we are * walking, skip the reuse address range. @@ -239,7 +239,7 @@ static void vmemmap_remap_pte(pte_t *pte, unsigned long addr, * to the tail pages. */ pgprot_t pgprot = PAGE_KERNEL_RO; - struct page *page = pte_page(*pte); + struct page *page = pte_page(ptep_get(pte)); pte_t entry; /* Remapping the head page requires r/w */ @@ -286,7 +286,7 @@ static void vmemmap_restore_pte(pte_t *pte, unsigned long addr, struct page *page; void *to; - BUG_ON(pte_page(*pte) != walk->reuse_page); + BUG_ON(pte_page(ptep_get(pte)) != walk->reuse_page); page = list_first_entry(walk->vmemmap_pages, struct page, lru); list_del(&page->lru); diff --git a/mm/kasan/init.c b/mm/kasan/init.c index cc64ed6858c6..dcfec277e839 100644 --- a/mm/kasan/init.c +++ b/mm/kasan/init.c @@ -286,7 +286,7 @@ static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd) for (i = 0; i < PTRS_PER_PTE; i++) { pte = pte_start + i; - if (!pte_none(*pte)) + if (!pte_none(ptep_get(pte))) return; } @@ -343,16 +343,19 @@ static void kasan_remove_pte_table(pte_t *pte, unsigned long addr, unsigned long end) { unsigned long next; + pte_t ptent; for (; addr < end; addr = next, pte++) { next = (addr + PAGE_SIZE) & PAGE_MASK; if (next > end) next = end; - if (!pte_present(*pte)) + ptent = ptep_get(pte); + + if (!pte_present(ptent)) continue; - if (WARN_ON(!kasan_early_shadow_page_entry(*pte))) + if (WARN_ON(!kasan_early_shadow_page_entry(ptent))) continue; pte_clear(&init_mm, addr, pte); } diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index 3e62728ae25d..dd772f9d0f08 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -226,7 +226,7 @@ static bool shadow_mapped(unsigned long addr) if (pmd_bad(*pmd)) return true; pte = pte_offset_kernel(pmd, addr); - return !pte_none(*pte); + return !pte_none(ptep_get(pte)); } static int __meminit kasan_mem_notifier(struct notifier_block *nb, @@ -317,7 +317,7 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, unsigned long page; pte_t pte; - if (likely(!pte_none(*ptep))) + if (likely(!pte_none(ptep_get(ptep)))) return 0; page = __get_free_page(GFP_KERNEL); @@ -328,7 +328,7 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL); spin_lock(&init_mm.page_table_lock); - if (likely(pte_none(*ptep))) { + if (likely(pte_none(ptep_get(ptep)))) { set_pte_at(&init_mm, addr, ptep, pte); page = 0; } @@ -418,11 +418,11 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, { unsigned long page; - page = (unsigned long)__va(pte_pfn(*ptep) << PAGE_SHIFT); + page = (unsigned long)__va(pte_pfn(ptep_get(ptep)) << PAGE_SHIFT); spin_lock(&init_mm.page_table_lock); - if (likely(!pte_none(*ptep))) { + if (likely(!pte_none(ptep_get(ptep)))) { pte_clear(&init_mm, addr, ptep); free_page(page); } diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 881669e738c0..0b4f00712895 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -511,7 +511,7 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte, struct folio *folio, *tmp; while (--_pte >= pte) { - pte_t pteval = *_pte; + pte_t pteval = ptep_get(_pte); unsigned long pfn; if (pte_none(pteval)) @@ -555,7 +555,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, for (_pte = pte; _pte < pte + HPAGE_PMD_NR; _pte++, address += PAGE_SIZE) { - pte_t pteval = *_pte; + pte_t pteval = ptep_get(_pte); if (pte_none(pteval) || (pte_present(pteval) && is_zero_pfn(pte_pfn(pteval)))) { ++none_or_zero; @@ -699,7 +699,7 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte, for (_pte = pte; _pte < pte + HPAGE_PMD_NR; _pte++, address += PAGE_SIZE) { - pteval = *_pte; + pteval = ptep_get(_pte); if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); if (is_zero_pfn(pte_pfn(pteval))) { @@ -797,7 +797,7 @@ static int __collapse_huge_page_copy(pte_t *pte, */ for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR; _pte++, page++, _address += PAGE_SIZE) { - pteval = *_pte; + pteval = ptep_get(_pte); if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { clear_user_highpage(page, _address); continue; @@ -1274,7 +1274,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR; _pte++, _address += PAGE_SIZE) { - pte_t pteval = *_pte; + pte_t pteval = ptep_get(_pte); if (is_swap_pte(pteval)) { ++unmapped; if (!cc->is_khugepaged || @@ -1650,18 +1650,19 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, for (i = 0, addr = haddr, pte = start_pte; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { struct page *page; + pte_t ptent = ptep_get(pte); /* empty pte, skip */ - if (pte_none(*pte)) + if (pte_none(ptent)) continue; /* page swapped out, abort */ - if (!pte_present(*pte)) { + if (!pte_present(ptent)) { result = SCAN_PTE_NON_PRESENT; goto abort; } - page = vm_normal_page(vma, addr, *pte); + page = vm_normal_page(vma, addr, ptent); if (WARN_ON_ONCE(page && is_zone_device_page(page))) page = NULL; /* @@ -1677,10 +1678,11 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, for (i = 0, addr = haddr, pte = start_pte; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { struct page *page; + pte_t ptent = ptep_get(pte); - if (pte_none(*pte)) + if (pte_none(ptent)) continue; - page = vm_normal_page(vma, addr, *pte); + page = vm_normal_page(vma, addr, ptent); if (WARN_ON_ONCE(page && is_zone_device_page(page))) goto abort; page_remove_rmap(page, vma, false); diff --git a/mm/ksm.c b/mm/ksm.c index 3dc15459dd20..d995779dc1fe 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -429,15 +429,17 @@ static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long nex struct page *page = NULL; spinlock_t *ptl; pte_t *pte; + pte_t ptent; int ret; pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); if (!pte) return 0; - if (pte_present(*pte)) { - page = vm_normal_page(walk->vma, addr, *pte); - } else if (!pte_none(*pte)) { - swp_entry_t entry = pte_to_swp_entry(*pte); + ptent = ptep_get(pte); + if (pte_present(ptent)) { + page = vm_normal_page(walk->vma, addr, ptent); + } else if (!pte_none(ptent)) { + swp_entry_t entry = pte_to_swp_entry(ptent); /* * As KSM pages remain KSM pages until freed, no need to wait @@ -1085,6 +1087,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, int err = -EFAULT; struct mmu_notifier_range range; bool anon_exclusive; + pte_t entry; pvmw.address = page_address_in_vma(page, vma); if (pvmw.address == -EFAULT) @@ -1102,10 +1105,9 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, goto out_unlock; anon_exclusive = PageAnonExclusive(page); - if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || + entry = ptep_get(pvmw.pte); + if (pte_write(entry) || pte_dirty(entry) || anon_exclusive || mm_tlb_flush_pending(mm)) { - pte_t entry; - swapped = PageSwapCache(page); flush_cache_page(vma, pvmw.address, page_to_pfn(page)); /* @@ -1147,7 +1149,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); } - *orig_pte = *pvmw.pte; + *orig_pte = entry; err = 0; out_unlock: @@ -1204,7 +1206,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); if (!ptep) goto out_mn; - if (!pte_same(*ptep, orig_pte)) { + if (!pte_same(ptep_get(ptep), orig_pte)) { pte_unmap_unlock(ptep, ptl); goto out_mn; } @@ -1231,7 +1233,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, dec_mm_counter(mm, MM_ANONPAGES); } - flush_cache_page(vma, addr, pte_pfn(*ptep)); + flush_cache_page(vma, addr, pte_pfn(ptep_get(ptep))); /* * No need to notify as we are replacing a read only page with another * read only page with the same content. diff --git a/mm/madvise.c b/mm/madvise.c index 9b3c9610052f..886f06066622 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -207,7 +207,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, break; } - pte = *ptep; + pte = ptep_get(ptep); if (!is_swap_pte(pte)) continue; entry = pte_to_swp_entry(pte); @@ -438,7 +438,7 @@ regular_folio: flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); for (; addr < end; pte++, addr += PAGE_SIZE) { - ptent = *pte; + ptent = ptep_get(pte); if (pte_none(ptent)) continue; @@ -642,7 +642,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); for (; addr != end; pte++, addr += PAGE_SIZE) { - ptent = *pte; + ptent = ptep_get(pte); if (pte_none(ptent)) continue; diff --git a/mm/mapping_dirty_helpers.c b/mm/mapping_dirty_helpers.c index 87b4beeda4fa..a26dd8bcfcdb 100644 --- a/mm/mapping_dirty_helpers.c +++ b/mm/mapping_dirty_helpers.c @@ -35,7 +35,7 @@ static int wp_pte(pte_t *pte, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct wp_walk *wpwalk = walk->private; - pte_t ptent = *pte; + pte_t ptent = ptep_get(pte); if (pte_write(ptent)) { pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte); @@ -91,7 +91,7 @@ static int clean_record_pte(pte_t *pte, unsigned long addr, { struct wp_walk *wpwalk = walk->private; struct clean_walk *cwalk = to_clean_walk(wpwalk); - pte_t ptent = *pte; + pte_t ptent = ptep_get(pte); if (pte_dirty(ptent)) { pgoff_t pgoff = ((addr - walk->vma->vm_start) >> PAGE_SHIFT) + diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 77d8d2d14fcf..93056918e956 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -6025,7 +6025,7 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, if (!pte) return 0; for (; addr != end; pte++, addr += PAGE_SIZE) - if (get_mctgt_type(vma, addr, *pte, NULL)) + if (get_mctgt_type(vma, addr, ptep_get(pte), NULL)) mc.precharge++; /* increment precharge temporarily */ pte_unmap_unlock(pte - 1, ptl); cond_resched(); @@ -6246,7 +6246,7 @@ retry: if (!pte) return 0; for (; addr != end; addr += PAGE_SIZE) { - pte_t ptent = *(pte++); + pte_t ptent = ptep_get(pte++); bool device = false; swp_entry_t ent; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index d5116f0eb1b6..e245191e6b04 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -6,16 +6,16 @@ * High level machine check handler. Handles pages reported by the * hardware as being corrupted usually due to a multi-bit ECC memory or cache * failure. - * + * * In addition there is a "soft offline" entry point that allows stop using * not-yet-corrupted-by-suspicious pages without killing anything. * * Handles page cache pages in various states. The tricky part - * here is that we can access any page asynchronously in respect to - * other VM users, because memory failures could happen anytime and - * anywhere. This could violate some of their assumptions. This is why - * this code has to be extremely careful. Generally it tries to use - * normal locking rules, as in get the standard locks, even if that means + * here is that we can access any page asynchronously in respect to + * other VM users, because memory failures could happen anytime and + * anywhere. This could violate some of their assumptions. This is why + * this code has to be extremely careful. Generally it tries to use + * normal locking rules, as in get the standard locks, even if that means * the error handling takes potentially a long time. * * It can be very tempting to add handling for obscure cases here. @@ -25,12 +25,12 @@ * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/ * - The case actually shows up as a frequent (top 10) page state in * tools/mm/page-types when running a real workload. - * + * * There are several operations here with exponential complexity because - * of unsuitable VM data structures. For example the operation to map back - * from RMAP chains to processes has to walk the complete process list and + * of unsuitable VM data structures. For example the operation to map back + * from RMAP chains to processes has to walk the complete process list and * has non linear complexity with the number. But since memory corruptions - * are rare we hope to get away with this. This avoids impacting the core + * are rare we hope to get away with this. This avoids impacting the core * VM. */ @@ -386,6 +386,7 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, pud_t *pud; pmd_t *pmd; pte_t *pte; + pte_t ptent; VM_BUG_ON_VMA(address == -EFAULT, vma); pgd = pgd_offset(vma->vm_mm, address); @@ -407,7 +408,8 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, pte = pte_offset_map(pmd, address); if (!pte) return 0; - if (pte_present(*pte) && pte_devmap(*pte)) + ptent = ptep_get(pte); + if (pte_present(ptent) && pte_devmap(ptent)) ret = PAGE_SHIFT; pte_unmap(pte); return ret; @@ -799,7 +801,7 @@ static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr, goto out; for (; addr != end; ptep++, addr += PAGE_SIZE) { - ret = check_hwpoisoned_entry(*ptep, addr, PAGE_SHIFT, + ret = check_hwpoisoned_entry(ptep_get(ptep), addr, PAGE_SHIFT, hwp->pfn, &hwp->tk); if (ret == 1) break; diff --git a/mm/memory.c b/mm/memory.c index 63c30f58142b..3d78b552866d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -699,15 +699,17 @@ static void restore_exclusive_pte(struct vm_area_struct *vma, struct page *page, unsigned long address, pte_t *ptep) { + pte_t orig_pte; pte_t pte; swp_entry_t entry; + orig_pte = ptep_get(ptep); pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); - if (pte_swp_soft_dirty(*ptep)) + if (pte_swp_soft_dirty(orig_pte)) pte = pte_mksoft_dirty(pte); - entry = pte_to_swp_entry(*ptep); - if (pte_swp_uffd_wp(*ptep)) + entry = pte_to_swp_entry(orig_pte); + if (pte_swp_uffd_wp(orig_pte)) pte = pte_mkuffd_wp(pte); else if (is_writable_device_exclusive_entry(entry)) pte = maybe_mkwrite(pte_mkdirty(pte), vma); @@ -744,7 +746,7 @@ static int try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma, unsigned long addr) { - swp_entry_t entry = pte_to_swp_entry(*src_pte); + swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte)); struct page *page = pfn_swap_entry_to_page(entry); if (trylock_page(page)) { @@ -768,9 +770,10 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, struct vm_area_struct *src_vma, unsigned long addr, int *rss) { unsigned long vm_flags = dst_vma->vm_flags; - pte_t pte = *src_pte; + pte_t orig_pte = ptep_get(src_pte); + pte_t pte = orig_pte; struct page *page; - swp_entry_t entry = pte_to_swp_entry(pte); + swp_entry_t entry = pte_to_swp_entry(orig_pte); if (likely(!non_swap_entry(entry))) { if (swap_duplicate(entry) < 0) @@ -785,8 +788,8 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, spin_unlock(&mmlist_lock); } /* Mark the swap entry as shared. */ - if (pte_swp_exclusive(*src_pte)) { - pte = pte_swp_clear_exclusive(*src_pte); + if (pte_swp_exclusive(orig_pte)) { + pte = pte_swp_clear_exclusive(orig_pte); set_pte_at(src_mm, addr, src_pte, pte); } rss[MM_SWAPENTS]++; @@ -805,9 +808,9 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, entry = make_readable_migration_entry( swp_offset(entry)); pte = swp_entry_to_pte(entry); - if (pte_swp_soft_dirty(*src_pte)) + if (pte_swp_soft_dirty(orig_pte)) pte = pte_swp_mksoft_dirty(pte); - if (pte_swp_uffd_wp(*src_pte)) + if (pte_swp_uffd_wp(orig_pte)) pte = pte_swp_mkuffd_wp(pte); set_pte_at(src_mm, addr, src_pte, pte); } @@ -840,7 +843,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, entry = make_readable_device_private_entry( swp_offset(entry)); pte = swp_entry_to_pte(entry); - if (pte_swp_uffd_wp(*src_pte)) + if (pte_swp_uffd_wp(orig_pte)) pte = pte_swp_mkuffd_wp(pte); set_pte_at(src_mm, addr, src_pte, pte); } @@ -904,7 +907,7 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma /* All done, just insert the new page copy in the child */ pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot); pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma); - if (userfaultfd_pte_wp(dst_vma, *src_pte)) + if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte))) /* Uffd-wp needs to be delivered to dest pte as well */ pte = pte_mkuffd_wp(pte); set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); @@ -922,7 +925,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, { struct mm_struct *src_mm = src_vma->vm_mm; unsigned long vm_flags = src_vma->vm_flags; - pte_t pte = *src_pte; + pte_t pte = ptep_get(src_pte); struct page *page; struct folio *folio; @@ -1002,6 +1005,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, struct mm_struct *src_mm = src_vma->vm_mm; pte_t *orig_src_pte, *orig_dst_pte; pte_t *src_pte, *dst_pte; + pte_t ptent; spinlock_t *src_ptl, *dst_ptl; int progress, ret = 0; int rss[NR_MM_COUNTERS]; @@ -1047,17 +1051,18 @@ again: spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) break; } - if (pte_none(*src_pte)) { + ptent = ptep_get(src_pte); + if (pte_none(ptent)) { progress++; continue; } - if (unlikely(!pte_present(*src_pte))) { + if (unlikely(!pte_present(ptent))) { ret = copy_nonpresent_pte(dst_mm, src_mm, dst_pte, src_pte, dst_vma, src_vma, addr, rss); if (ret == -EIO) { - entry = pte_to_swp_entry(*src_pte); + entry = pte_to_swp_entry(ptep_get(src_pte)); break; } else if (ret == -EBUSY) { break; @@ -1407,7 +1412,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); do { - pte_t ptent = *pte; + pte_t ptent = ptep_get(pte); struct page *page; if (pte_none(ptent)) @@ -1822,7 +1827,7 @@ static int validate_page_before_insert(struct page *page) static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, unsigned long addr, struct page *page, pgprot_t prot) { - if (!pte_none(*pte)) + if (!pte_none(ptep_get(pte))) return -EBUSY; /* Ok, finally just insert the thing.. */ get_page(page); @@ -2116,7 +2121,8 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, pte = get_locked_pte(mm, addr, &ptl); if (!pte) return VM_FAULT_OOM; - if (!pte_none(*pte)) { + entry = ptep_get(pte); + if (!pte_none(entry)) { if (mkwrite) { /* * For read faults on private mappings the PFN passed @@ -2128,11 +2134,11 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, * allocation and mapping invalidation so just skip the * update. */ - if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) { - WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte))); + if (pte_pfn(entry) != pfn_t_to_pfn(pfn)) { + WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry))); goto out_unlock; } - entry = pte_mkyoung(*pte); + entry = pte_mkyoung(entry); entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (ptep_set_access_flags(vma, addr, pte, entry, 1)) update_mmu_cache(vma, addr, pte); @@ -2344,7 +2350,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, return -ENOMEM; arch_enter_lazy_mmu_mode(); do { - BUG_ON(!pte_none(*pte)); + BUG_ON(!pte_none(ptep_get(pte))); if (!pfn_modify_allowed(pfn, prot)) { err = -EACCES; break; @@ -2585,7 +2591,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, if (fn) { do { - if (create || !pte_none(*pte)) { + if (create || !pte_none(ptep_get(pte))) { err = fn(pte++, addr, data); if (err) break; @@ -2787,7 +2793,7 @@ static inline int pte_unmap_same(struct vm_fault *vmf) #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION) if (sizeof(pte_t) > sizeof(unsigned long)) { spin_lock(vmf->ptl); - same = pte_same(*vmf->pte, vmf->orig_pte); + same = pte_same(ptep_get(vmf->pte), vmf->orig_pte); spin_unlock(vmf->ptl); } #endif @@ -2838,7 +2844,7 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src, pte_t entry; vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); - if (unlikely(!vmf->pte || !pte_same(*vmf->pte, vmf->orig_pte))) { + if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { /* * Other thread has already handled the fault * and update local tlb only @@ -2866,7 +2872,7 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src, /* Re-validate under PTL if the page is still mapped */ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); - if (unlikely(!vmf->pte || !pte_same(*vmf->pte, vmf->orig_pte))) { + if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { /* The PTE changed under us, update local tlb */ if (vmf->pte) update_mmu_tlb(vma, addr, vmf->pte); @@ -3114,7 +3120,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) * Re-check the pte - we dropped the lock */ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); - if (likely(vmf->pte && pte_same(*vmf->pte, vmf->orig_pte))) { + if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { if (old_folio) { if (!folio_test_anon(old_folio)) { dec_mm_counter(mm, mm_counter_file(&old_folio->page)); @@ -3241,7 +3247,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) * We might have raced with another page fault while we released the * pte_offset_map_lock. */ - if (!pte_same(*vmf->pte, vmf->orig_pte)) { + if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) { update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); pte_unmap_unlock(vmf->pte, vmf->ptl); return VM_FAULT_NOPAGE; @@ -3336,7 +3342,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) struct folio *folio = NULL; if (likely(!unshare)) { - if (userfaultfd_pte_wp(vma, *vmf->pte)) { + if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl); return handle_userfault(vmf, VM_UFFD_WP); } @@ -3598,7 +3604,7 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); - if (likely(vmf->pte && pte_same(*vmf->pte, vmf->orig_pte))) + if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte); if (vmf->pte) @@ -3643,7 +3649,7 @@ static vm_fault_t pte_marker_clear(struct vm_fault *vmf) * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_SWAPIN_ERROR. * So is_pte_marker() check is not enough to safely drop the pte. */ - if (pte_same(vmf->orig_pte, *vmf->pte)) + if (pte_same(vmf->orig_pte, ptep_get(vmf->pte))) pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; @@ -3739,7 +3745,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte || - !pte_same(*vmf->pte, vmf->orig_pte))) + !pte_same(ptep_get(vmf->pte), + vmf->orig_pte))) goto unlock; /* @@ -3816,7 +3823,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) */ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); - if (likely(vmf->pte && pte_same(*vmf->pte, vmf->orig_pte))) + if (likely(vmf->pte && + pte_same(ptep_get(vmf->pte), vmf->orig_pte))) ret = VM_FAULT_OOM; goto unlock; } @@ -3886,7 +3894,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) */ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); - if (unlikely(!vmf->pte || !pte_same(*vmf->pte, vmf->orig_pte))) + if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) goto out_nomap; if (unlikely(!folio_test_uptodate(folio))) { @@ -4331,9 +4339,9 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr) static bool vmf_pte_changed(struct vm_fault *vmf) { if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID) - return !pte_same(*vmf->pte, vmf->orig_pte); + return !pte_same(ptep_get(vmf->pte), vmf->orig_pte); - return !pte_none(*vmf->pte); + return !pte_none(ptep_get(vmf->pte)); } /** @@ -4643,7 +4651,7 @@ static vm_fault_t do_fault(struct vm_fault *vmf) * we don't have concurrent modification by hardware * followed by an update. */ - if (unlikely(pte_none(*vmf->pte))) + if (unlikely(pte_none(ptep_get(vmf->pte)))) ret = VM_FAULT_SIGBUS; else ret = VM_FAULT_NOPAGE; @@ -4699,7 +4707,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) * the pfn may be screwed if the read is non atomic. */ spin_lock(vmf->ptl); - if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { + if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl); goto out; } @@ -4772,7 +4780,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) vmf->address, &vmf->ptl); if (unlikely(!vmf->pte)) goto out; - if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { + if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl); goto out; } @@ -4930,7 +4938,7 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) spin_lock(vmf->ptl); entry = vmf->orig_pte; - if (unlikely(!pte_same(*vmf->pte, entry))) { + if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) { update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); goto unlock; } @@ -5416,7 +5424,7 @@ int follow_pte(struct mm_struct *mm, unsigned long address, ptep = pte_offset_map_lock(mm, pmd, address, ptlp); if (!ptep) goto out; - if (!pte_present(*ptep)) + if (!pte_present(ptep_get(ptep))) goto unlock; *ptepp = ptep; return 0; @@ -5453,7 +5461,7 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address, ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); if (ret) return ret; - *pfn = pte_pfn(*ptep); + *pfn = pte_pfn(ptep_get(ptep)); pte_unmap_unlock(ptep, ptl); return 0; } @@ -5473,7 +5481,7 @@ int follow_phys(struct vm_area_struct *vma, if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) goto out; - pte = *ptep; + pte = ptep_get(ptep); if ((flags & FOLL_WRITE) && !pte_write(pte)) goto unlock; @@ -5517,7 +5525,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, retry: if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) return -EINVAL; - pte = *ptep; + pte = ptep_get(ptep); pte_unmap_unlock(ptep, ptl); prot = pgprot_val(pte_pgprot(pte)); @@ -5533,7 +5541,7 @@ retry: if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) goto out_unmap; - if (!pte_same(pte, *ptep)) { + if (!pte_same(pte, ptep_get(ptep))) { pte_unmap_unlock(ptep, ptl); iounmap(maddr); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 0241bb64978b..edc25195f5bd 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -508,6 +508,7 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr, unsigned long flags = qp->flags; bool has_unmovable = false; pte_t *pte, *mapped_pte; + pte_t ptent; spinlock_t *ptl; ptl = pmd_trans_huge_lock(pmd, vma); @@ -520,9 +521,10 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr, return 0; } for (; addr != end; pte++, addr += PAGE_SIZE) { - if (!pte_present(*pte)) + ptent = ptep_get(pte); + if (!pte_present(ptent)) continue; - folio = vm_normal_folio(vma, addr, *pte); + folio = vm_normal_folio(vma, addr, ptent); if (!folio || folio_is_zone_device(folio)) continue; /* diff --git a/mm/migrate.c b/mm/migrate.c index 363562992046..ce35afdbc1e3 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -188,6 +188,7 @@ static bool remove_migration_pte(struct folio *folio, while (page_vma_mapped_walk(&pvmw)) { rmap_t rmap_flags = RMAP_NONE; + pte_t old_pte; pte_t pte; swp_entry_t entry; struct page *new; @@ -210,17 +211,18 @@ static bool remove_migration_pte(struct folio *folio, folio_get(folio); pte = mk_pte(new, READ_ONCE(vma->vm_page_prot)); - if (pte_swp_soft_dirty(*pvmw.pte)) + old_pte = ptep_get(pvmw.pte); + if (pte_swp_soft_dirty(old_pte)) pte = pte_mksoft_dirty(pte); - entry = pte_to_swp_entry(*pvmw.pte); + entry = pte_to_swp_entry(old_pte); if (!is_migration_entry_young(entry)) pte = pte_mkold(pte); if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) pte = pte_mkdirty(pte); if (is_writable_migration_entry(entry)) pte = pte_mkwrite(pte); - else if (pte_swp_uffd_wp(*pvmw.pte)) + else if (pte_swp_uffd_wp(old_pte)) pte = pte_mkuffd_wp(pte); if (folio_test_anon(folio) && !is_readable_migration_entry(entry)) @@ -234,9 +236,9 @@ static bool remove_migration_pte(struct folio *folio, entry = make_readable_device_private_entry( page_to_pfn(new)); pte = swp_entry_to_pte(entry); - if (pte_swp_soft_dirty(*pvmw.pte)) + if (pte_swp_soft_dirty(old_pte)) pte = pte_swp_mksoft_dirty(pte); - if (pte_swp_uffd_wp(*pvmw.pte)) + if (pte_swp_uffd_wp(old_pte)) pte = pte_swp_mkuffd_wp(pte); } @@ -308,7 +310,7 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, if (!ptep) return; - pte = *ptep; + pte = ptep_get(ptep); pte_unmap(ptep); if (!is_swap_pte(pte)) diff --git a/mm/migrate_device.c b/mm/migrate_device.c index a14af6b12b04..02d272b909b5 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -111,7 +111,7 @@ again: swp_entry_t entry; pte_t pte; - pte = *ptep; + pte = ptep_get(ptep); if (pte_none(pte)) { if (vma_is_anonymous(vma)) { @@ -194,7 +194,7 @@ again: bool anon_exclusive; pte_t swp_pte; - flush_cache_page(vma, addr, pte_pfn(*ptep)); + flush_cache_page(vma, addr, pte_pfn(pte)); anon_exclusive = PageAnon(page) && PageAnonExclusive(page); if (anon_exclusive) { pte = ptep_clear_flush(vma, addr, ptep); @@ -573,6 +573,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, pud_t *pudp; pmd_t *pmdp; pte_t *ptep; + pte_t orig_pte; /* Only allow populating anonymous memory */ if (!vma_is_anonymous(vma)) @@ -628,16 +629,18 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); if (!ptep) goto abort; + orig_pte = ptep_get(ptep); + if (check_stable_address_space(mm)) goto unlock_abort; - if (pte_present(*ptep)) { - unsigned long pfn = pte_pfn(*ptep); + if (pte_present(orig_pte)) { + unsigned long pfn = pte_pfn(orig_pte); if (!is_zero_pfn(pfn)) goto unlock_abort; flush = true; - } else if (!pte_none(*ptep)) + } else if (!pte_none(orig_pte)) goto unlock_abort; /* @@ -654,7 +657,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, get_page(page); if (flush) { - flush_cache_page(vma, addr, pte_pfn(*ptep)); + flush_cache_page(vma, addr, pte_pfn(orig_pte)); ptep_clear_flush_notify(vma, addr, ptep); set_pte_at_notify(mm, addr, ptep, entry); update_mmu_cache(vma, addr, ptep); diff --git a/mm/mincore.c b/mm/mincore.c index f33f6a0b1ded..b7f7a516b26c 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -119,7 +119,7 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, return 0; } for (; addr != end; ptep++, addr += PAGE_SIZE) { - pte_t pte = *ptep; + pte_t pte = ptep_get(ptep); /* We need to do cache lookup too for pte markers */ if (pte_none_mostly(pte)) diff --git a/mm/mlock.c b/mm/mlock.c index 9f2b1173b1b1..d7db94519884 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -312,6 +312,7 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr, struct vm_area_struct *vma = walk->vma; spinlock_t *ptl; pte_t *start_pte, *pte; + pte_t ptent; struct folio *folio; ptl = pmd_trans_huge_lock(pmd, vma); @@ -334,9 +335,10 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr, return 0; } for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) { - if (!pte_present(*pte)) + ptent = ptep_get(pte); + if (!pte_present(ptent)) continue; - folio = vm_normal_folio(vma, addr, *pte); + folio = vm_normal_folio(vma, addr, ptent); if (!folio || folio_is_zone_device(folio)) continue; if (folio_test_large(folio)) diff --git a/mm/mprotect.c b/mm/mprotect.c index 64e1df0af514..327a6eb90afb 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -105,7 +105,7 @@ static long change_pte_range(struct mmu_gather *tlb, flush_tlb_batched_pending(vma->vm_mm); arch_enter_lazy_mmu_mode(); do { - oldpte = *pte; + oldpte = ptep_get(pte); if (pte_present(oldpte)) { pte_t ptent; @@ -544,7 +544,8 @@ long change_protection(struct mmu_gather *tlb, static int prot_none_pte_entry(pte_t *pte, unsigned long addr, unsigned long next, struct mm_walk *walk) { - return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? + return pfn_modify_allowed(pte_pfn(ptep_get(pte)), + *(pgprot_t *)(walk->private)) ? 0 : -EACCES; } @@ -552,7 +553,8 @@ static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long next, struct mm_walk *walk) { - return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? + return pfn_modify_allowed(pte_pfn(ptep_get(pte)), + *(pgprot_t *)(walk->private)) ? 0 : -EACCES; } diff --git a/mm/mremap.c b/mm/mremap.c index bfc3d1902a94..8ec184ac90ff 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -188,7 +188,7 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, new_pte++, new_addr += PAGE_SIZE) { - if (pte_none(*old_pte)) + if (pte_none(ptep_get(old_pte))) continue; pte = ptep_get_and_clear(mm, old_addr, old_pte); diff --git a/mm/page_table_check.c b/mm/page_table_check.c index 0c511330dbc9..8f89f9c8f0df 100644 --- a/mm/page_table_check.c +++ b/mm/page_table_check.c @@ -190,7 +190,7 @@ void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr, if (&init_mm == mm) return; - __page_table_check_pte_clear(mm, addr, *ptep); + __page_table_check_pte_clear(mm, addr, ptep_get(ptep)); if (pte_user_accessible_page(pte)) { page_table_check_set(mm, addr, pte_pfn(pte), PAGE_SIZE >> PAGE_SHIFT, @@ -243,7 +243,7 @@ void __page_table_check_pte_clear_range(struct mm_struct *mm, if (WARN_ON(!ptep)) return; for (i = 0; i < PTRS_PER_PTE; i++) { - __page_table_check_pte_clear(mm, addr, *ptep); + __page_table_check_pte_clear(mm, addr, ptep_get(ptep)); addr += PAGE_SIZE; ptep++; } diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 2af734274073..49e0d28f0379 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -15,6 +15,8 @@ static inline bool not_found(struct page_vma_mapped_walk *pvmw) static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) { + pte_t ptent; + if (pvmw->flags & PVMW_SYNC) { /* Use the stricter lookup */ pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd, @@ -35,10 +37,12 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) if (!pvmw->pte) return false; + ptent = ptep_get(pvmw->pte); + if (pvmw->flags & PVMW_MIGRATION) { - if (!is_swap_pte(*pvmw->pte)) + if (!is_swap_pte(ptent)) return false; - } else if (is_swap_pte(*pvmw->pte)) { + } else if (is_swap_pte(ptent)) { swp_entry_t entry; /* * Handle un-addressable ZONE_DEVICE memory. @@ -56,11 +60,11 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) * For more details on device private memory see HMM * (include/linux/hmm.h or mm/hmm.c). */ - entry = pte_to_swp_entry(*pvmw->pte); + entry = pte_to_swp_entry(ptent); if (!is_device_private_entry(entry) && !is_device_exclusive_entry(entry)) return false; - } else if (!pte_present(*pvmw->pte)) { + } else if (!pte_present(ptent)) { return false; } pvmw->ptl = *ptlp; @@ -90,33 +94,34 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) static bool check_pte(struct page_vma_mapped_walk *pvmw) { unsigned long pfn; + pte_t ptent = ptep_get(pvmw->pte); if (pvmw->flags & PVMW_MIGRATION) { swp_entry_t entry; - if (!is_swap_pte(*pvmw->pte)) + if (!is_swap_pte(ptent)) return false; - entry = pte_to_swp_entry(*pvmw->pte); + entry = pte_to_swp_entry(ptent); if (!is_migration_entry(entry) && !is_device_exclusive_entry(entry)) return false; pfn = swp_offset_pfn(entry); - } else if (is_swap_pte(*pvmw->pte)) { + } else if (is_swap_pte(ptent)) { swp_entry_t entry; /* Handle un-addressable ZONE_DEVICE memory */ - entry = pte_to_swp_entry(*pvmw->pte); + entry = pte_to_swp_entry(ptent); if (!is_device_private_entry(entry) && !is_device_exclusive_entry(entry)) return false; pfn = swp_offset_pfn(entry); } else { - if (!pte_present(*pvmw->pte)) + if (!pte_present(ptent)) return false; - pfn = pte_pfn(*pvmw->pte); + pfn = pte_pfn(ptent); } return (pfn - pvmw->pfn) < pvmw->nr_pages; @@ -294,7 +299,7 @@ next_pte: goto restart; } pvmw->pte++; - } while (pte_none(*pvmw->pte)); + } while (pte_none(ptep_get(pvmw->pte))); if (!pvmw->ptl) { pvmw->ptl = ptl; diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index c7ab18a5fb77..4d454953046f 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c @@ -68,7 +68,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t entry, int dirty) { - int changed = !pte_same(*ptep, entry); + int changed = !pte_same(ptep_get(ptep), entry); if (changed) { set_pte_at(vma->vm_mm, address, ptep, entry); flush_tlb_fix_spurious_fault(vma, address, ptep); diff --git a/mm/rmap.c b/mm/rmap.c index cd918cb9a431..0c0d8857dfce 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -826,7 +826,8 @@ static bool folio_referenced_one(struct folio *folio, } if (pvmw.pte) { - if (lru_gen_enabled() && pte_young(*pvmw.pte)) { + if (lru_gen_enabled() && + pte_young(ptep_get(pvmw.pte))) { lru_gen_look_around(&pvmw); referenced++; } @@ -956,13 +957,13 @@ static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) address = pvmw->address; if (pvmw->pte) { - pte_t entry; pte_t *pte = pvmw->pte; + pte_t entry = ptep_get(pte); - if (!pte_dirty(*pte) && !pte_write(*pte)) + if (!pte_dirty(entry) && !pte_write(entry)) continue; - flush_cache_page(vma, address, pte_pfn(*pte)); + flush_cache_page(vma, address, pte_pfn(entry)); entry = ptep_clear_flush(vma, address, pte); entry = pte_wrprotect(entry); entry = pte_mkclean(entry); @@ -1137,7 +1138,7 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) * @folio: Folio which contains page. * @page: Page to add to rmap. * @vma: VM area to add page to. - * @address: User virtual address of the mapping + * @address: User virtual address of the mapping * @exclusive: the page is exclusively owned by the current process */ static void __page_set_anon_rmap(struct folio *folio, struct page *page, @@ -1458,6 +1459,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, bool anon_exclusive, ret = true; struct mmu_notifier_range range; enum ttu_flags flags = (enum ttu_flags)(long)arg; + unsigned long pfn; /* * When racing against e.g. zap_pte_range() on another cpu, @@ -1508,8 +1510,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, break; } - subpage = folio_page(folio, - pte_pfn(*pvmw.pte) - folio_pfn(folio)); + pfn = pte_pfn(ptep_get(pvmw.pte)); + subpage = folio_page(folio, pfn - folio_pfn(folio)); address = pvmw.address; anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(subpage); @@ -1571,7 +1573,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, } pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); } else { - flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); + flush_cache_page(vma, address, pfn); /* Nuke the page table entry. */ if (should_defer_flush(mm, flags)) { /* @@ -1818,6 +1820,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, bool anon_exclusive, ret = true; struct mmu_notifier_range range; enum ttu_flags flags = (enum ttu_flags)(long)arg; + unsigned long pfn; /* * When racing against e.g. zap_pte_range() on another cpu, @@ -1877,6 +1880,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, /* Unexpected PMD-mapped THP? */ VM_BUG_ON_FOLIO(!pvmw.pte, folio); + pfn = pte_pfn(ptep_get(pvmw.pte)); + if (folio_is_zone_device(folio)) { /* * Our PTE is a non-present device exclusive entry and @@ -1891,8 +1896,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio); subpage = &folio->page; } else { - subpage = folio_page(folio, - pte_pfn(*pvmw.pte) - folio_pfn(folio)); + subpage = folio_page(folio, pfn - folio_pfn(folio)); } address = pvmw.address; anon_exclusive = folio_test_anon(folio) && @@ -1952,7 +1956,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, /* Nuke the hugetlb page table entry */ pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); } else { - flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); + flush_cache_page(vma, address, pfn); /* Nuke the page table entry. */ if (should_defer_flush(mm, flags)) { /* @@ -2187,6 +2191,7 @@ static bool page_make_device_exclusive_one(struct folio *folio, struct mmu_notifier_range range; swp_entry_t entry; pte_t swp_pte; + pte_t ptent; mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma->vm_mm, address, min(vma->vm_end, @@ -2198,18 +2203,19 @@ static bool page_make_device_exclusive_one(struct folio *folio, /* Unexpected PMD-mapped THP? */ VM_BUG_ON_FOLIO(!pvmw.pte, folio); - if (!pte_present(*pvmw.pte)) { + ptent = ptep_get(pvmw.pte); + if (!pte_present(ptent)) { ret = false; page_vma_mapped_walk_done(&pvmw); break; } subpage = folio_page(folio, - pte_pfn(*pvmw.pte) - folio_pfn(folio)); + pte_pfn(ptent) - folio_pfn(folio)); address = pvmw.address; /* Nuke the page table entry. */ - flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); + flush_cache_page(vma, address, pte_pfn(ptent)); pteval = ptep_clear_flush(vma, address, pvmw.pte); /* Set the dirty flag on the folio now the pte is gone. */ diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 10d73a0dfcec..a044a130405b 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -133,7 +133,7 @@ static void * __meminit altmap_alloc_block_buf(unsigned long size, void __meminit vmemmap_verify(pte_t *pte, int node, unsigned long start, unsigned long end) { - unsigned long pfn = pte_pfn(*pte); + unsigned long pfn = pte_pfn(ptep_get(pte)); int actual_node = early_pfn_to_nid(pfn); if (node_distance(actual_node, node) > LOCAL_DISTANCE) @@ -146,7 +146,7 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, struct page *reuse) { pte_t *pte = pte_offset_kernel(pmd, addr); - if (pte_none(*pte)) { + if (pte_none(ptep_get(pte))) { pte_t entry; void *p; @@ -414,7 +414,7 @@ static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn, * with just tail struct pages. */ return vmemmap_populate_range(start, end, node, NULL, - pte_page(*pte)); + pte_page(ptep_get(pte))); } size = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page)); @@ -438,7 +438,7 @@ static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn, */ next += PAGE_SIZE; rc = vmemmap_populate_range(next, last, node, NULL, - pte_page(*pte)); + pte_page(ptep_get(pte))); if (rc) return -ENOMEM; } diff --git a/mm/swap_state.c b/mm/swap_state.c index a33c60e0158f..4a5c7b748051 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -275,9 +275,9 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin, } } -/* - * If we are the only user, then try to free up the swap cache. - * +/* + * If we are the only user, then try to free up the swap cache. + * * Its ok to check the swapcache flag without the folio lock * here because we are going to recheck again inside * folio_free_swap() _with_ the lock. @@ -294,7 +294,7 @@ void free_swap_cache(struct page *page) } } -/* +/* * Perform a free_page(), also freeing any swap cache associated with * this page if it is the last user of the page. */ diff --git a/mm/swapfile.c b/mm/swapfile.c index 74dd4d2337b7..a6945c2e0d03 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1745,7 +1745,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, struct page *page = folio_file_page(folio, swp_offset(entry)); struct page *swapcache; spinlock_t *ptl; - pte_t *pte, new_pte; + pte_t *pte, new_pte, old_pte; bool hwposioned = false; int ret = 1; @@ -1757,11 +1757,14 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, hwposioned = true; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); - if (unlikely(!pte || !pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) { + if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte), + swp_entry_to_pte(entry)))) { ret = 0; goto out; } + old_pte = ptep_get(pte); + if (unlikely(hwposioned || !PageUptodate(page))) { swp_entry_t swp_entry; @@ -1793,7 +1796,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, * call and have the page locked. */ VM_BUG_ON_PAGE(PageWriteback(page), page); - if (pte_swp_exclusive(*pte)) + if (pte_swp_exclusive(old_pte)) rmap_flags |= RMAP_EXCLUSIVE; page_add_anon_rmap(page, vma, addr, rmap_flags); @@ -1802,9 +1805,9 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, lru_cache_add_inactive_or_unevictable(page, vma); } new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot)); - if (pte_swp_soft_dirty(*pte)) + if (pte_swp_soft_dirty(old_pte)) new_pte = pte_mksoft_dirty(new_pte); - if (pte_swp_uffd_wp(*pte)) + if (pte_swp_uffd_wp(old_pte)) new_pte = pte_mkuffd_wp(new_pte); setpte: set_pte_at(vma->vm_mm, addr, pte, new_pte); @@ -1833,6 +1836,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned char swp_count; swp_entry_t entry; int ret; + pte_t ptent; if (!pte++) { pte = pte_offset_map(pmd, addr); @@ -1840,10 +1844,12 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, break; } - if (!is_swap_pte(*pte)) + ptent = ptep_get_lockless(pte); + + if (!is_swap_pte(ptent)) continue; - entry = pte_to_swp_entry(*pte); + entry = pte_to_swp_entry(ptent); if (swp_type(entry) != type) continue; diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 5fd787158c70..a2bf37ee276d 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -97,7 +97,7 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd, * registered, we firstly wr-protect a none pte which has no page cache * page backing it, then access the page. */ - if (!pte_none_mostly(*dst_pte)) + if (!pte_none_mostly(ptep_get(dst_pte))) goto out_unlock; folio = page_folio(page); @@ -230,7 +230,7 @@ static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd, goto out_unlock; } ret = -EEXIST; - if (!pte_none(*dst_pte)) + if (!pte_none(ptep_get(dst_pte))) goto out_unlock; set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte); /* No need to invalidate - it was non-present before */ diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 7382e0a60ce1..5a3bf408251b 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -103,7 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, if (!pte) return -ENOMEM; do { - BUG_ON(!pte_none(*pte)); + BUG_ON(!pte_none(ptep_get(pte))); #ifdef CONFIG_HUGETLB_PAGE size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift); @@ -472,7 +472,7 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, do { struct page *page = pages[*nr]; - if (WARN_ON(!pte_none(*pte))) + if (WARN_ON(!pte_none(ptep_get(pte)))) return -EBUSY; if (WARN_ON(!page)) return -ENOMEM; @@ -704,7 +704,7 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) return NULL; ptep = pte_offset_kernel(pmd, addr); - pte = *ptep; + pte = ptep_get(ptep); if (pte_present(pte)) page = pte_page(pte); diff --git a/mm/vmscan.c b/mm/vmscan.c index 3f64c8d9f629..e305c11ec8fc 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4037,15 +4037,16 @@ restart: for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) { unsigned long pfn; struct folio *folio; + pte_t ptent = ptep_get(pte + i); total++; walk->mm_stats[MM_LEAF_TOTAL]++; - pfn = get_pte_pfn(pte[i], args->vma, addr); + pfn = get_pte_pfn(ptent, args->vma, addr); if (pfn == -1) continue; - if (!pte_young(pte[i])) { + if (!pte_young(ptent)) { walk->mm_stats[MM_LEAF_OLD]++; continue; } @@ -4060,7 +4061,7 @@ restart: young++; walk->mm_stats[MM_LEAF_YOUNG]++; - if (pte_dirty(pte[i]) && !folio_test_dirty(folio) && + if (pte_dirty(ptent) && !folio_test_dirty(folio) && !(folio_test_anon(folio) && folio_test_swapbacked(folio) && !folio_test_swapcache(folio))) folio_mark_dirty(folio); @@ -4703,12 +4704,13 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) { unsigned long pfn; + pte_t ptent = ptep_get(pte + i); - pfn = get_pte_pfn(pte[i], pvmw->vma, addr); + pfn = get_pte_pfn(ptent, pvmw->vma, addr); if (pfn == -1) continue; - if (!pte_young(pte[i])) + if (!pte_young(ptent)) continue; folio = get_pfn_folio(pfn, memcg, pgdat, !walk || walk->can_swap); @@ -4720,7 +4722,7 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) young++; - if (pte_dirty(pte[i]) && !folio_test_dirty(folio) && + if (pte_dirty(ptent) && !folio_test_dirty(folio) && !(folio_test_anon(folio) && folio_test_swapbacked(folio) && !folio_test_swapcache(folio))) folio_mark_dirty(folio); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 51e4882d0873..fb37adecfc91 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2578,6 +2578,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, { kvm_pfn_t pfn; pte_t *ptep; + pte_t pte; spinlock_t *ptl; int r; @@ -2601,14 +2602,16 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, return r; } - if (write_fault && !pte_write(*ptep)) { + pte = ptep_get(ptep); + + if (write_fault && !pte_write(pte)) { pfn = KVM_PFN_ERR_RO_FAULT; goto out; } if (writable) - *writable = pte_write(*ptep); - pfn = pte_pfn(*ptep); + *writable = pte_write(pte); + pfn = pte_pfn(pte); /* * Get a reference here because callers of *hva_to_pfn* and @@ -2626,7 +2629,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, * tail pages of non-compound higher order allocations, which * would then underflow the refcount when the caller does the * required put_page. Don't allow those pages here. - */ + */ if (!kvm_try_get_pfn(pfn)) r = -EFAULT; -- cgit v1.2.3 From f999f38b4e6f6fd444acdc289a156ad781919a9c Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Mon, 12 Jun 2023 11:38:09 +0200 Subject: mm: zswap: add pool shrinking mechanism Patch series "mm: zswap: move writeback LRU from zpool to zswap", v3. This series aims to improve the zswap reclaim mechanism by reorganizing the LRU management. In the current implementation, the LRU is maintained within each zpool driver, resulting in duplicated code across the three drivers. The proposed change consists in moving the LRU management from the individual implementations up to the zswap layer. The primary objective of this refactoring effort is to simplify the codebase. By unifying the reclaim loop and consolidating LRU handling within zswap, we can eliminate redundant code and improve maintainability. Additionally, this change enables the reclamation of stored pages in their actual LRU order. Presently, the zpool drivers link backing pages in an LRU, causing compressed pages with different LRU positions to be written back simultaneously. The series consists of several patches. The first patch implements the LRU and the reclaim loop in zswap, but it is not used yet because all three driver implementations are marked as zpool_evictable. The following three commits modify each zpool driver to be not zpool_evictable, allowing the use of the reclaim loop in zswap. As the drivers removed their shrink functions, the zpool interface is then trimmed by removing zpool_evictable, zpool_ops, and zpool_shrink. Finally, the code in zswap is further cleaned up by simplifying the writeback function and removing the now unnecessary zswap_header. This patch (of 7): Each zpool driver (zbud, z3fold and zsmalloc) implements its own shrink function, which is called from zpool_shrink. However, with this commit, a unified shrink function is added to zswap. The ultimate goal is to eliminate the need for zpool_shrink once all zpool implementations have dropped their shrink code. To ensure the functionality of each commit, this change focuses solely on adding the mechanism itself. No modifications are made to the backends, meaning that functionally, there are no immediate changes. The zswap mechanism will only come into effect once the backends have removed their shrink code. The subsequent commits will address the modifications needed in the backends. Link: https://lkml.kernel.org/r/20230612093815.133504-1-cerasuolodomenico@gmail.com Link: https://lkml.kernel.org/r/20230612093815.133504-2-cerasuolodomenico@gmail.com Signed-off-by: Domenico Cerasuolo Acked-by: Nhat Pham Tested-by: Yosry Ahmed Acked-by: Johannes Weiner Reviewed-by: Yosry Ahmed Reviewed-by: Sergey Senozhatsky Cc: Dan Streetman Cc: Minchan Kim Cc: Seth Jennings Cc: Vitaly Wool Signed-off-by: Andrew Morton --- mm/zswap.c | 97 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 92 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/zswap.c b/mm/zswap.c index 9fa86265f6dd..0024ec5ed574 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -154,6 +154,12 @@ struct crypto_acomp_ctx { struct mutex *mutex; }; +/* + * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock. + * The only case where lru_lock is not acquired while holding tree.lock is + * when a zswap_entry is taken off the lru for writeback, in that case it + * needs to be verified that it's still valid in the tree. + */ struct zswap_pool { struct zpool *zpool; struct crypto_acomp_ctx __percpu *acomp_ctx; @@ -163,6 +169,8 @@ struct zswap_pool { struct work_struct shrink_work; struct hlist_node node; char tfm_name[CRYPTO_MAX_ALG_NAME]; + struct list_head lru; + spinlock_t lru_lock; }; /* @@ -180,10 +188,12 @@ struct zswap_pool { * be held while changing the refcount. Since the lock must * be held, there is no reason to also make refcount atomic. * length - the length in bytes of the compressed page data. Needed during - * decompression. For a same value filled page length is 0. + * decompression. For a same value filled page length is 0, and both + * pool and lru are invalid and must be ignored. * pool - the zswap_pool the entry's data is in * handle - zpool allocation handle that stores the compressed page data * value - value of the same-value filled pages which have same content + * lru - handle to the pool's lru used to evict pages. */ struct zswap_entry { struct rb_node rbnode; @@ -196,6 +206,7 @@ struct zswap_entry { unsigned long value; }; struct obj_cgroup *objcg; + struct list_head lru; }; struct zswap_header { @@ -368,6 +379,12 @@ static void zswap_free_entry(struct zswap_entry *entry) if (!entry->length) atomic_dec(&zswap_same_filled_pages); else { + /* zpool_evictable will be removed once all 3 backends have migrated */ + if (!zpool_evictable(entry->pool->zpool)) { + spin_lock(&entry->pool->lru_lock); + list_del(&entry->lru); + spin_unlock(&entry->pool->lru_lock); + } zpool_free(entry->pool->zpool, entry->handle); zswap_pool_put(entry->pool); } @@ -588,14 +605,72 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) return NULL; } +static int zswap_reclaim_entry(struct zswap_pool *pool) +{ + struct zswap_header *zhdr; + struct zswap_entry *entry; + struct zswap_tree *tree; + pgoff_t swpoffset; + int ret; + + /* Get an entry off the LRU */ + spin_lock(&pool->lru_lock); + if (list_empty(&pool->lru)) { + spin_unlock(&pool->lru_lock); + return -EINVAL; + } + entry = list_last_entry(&pool->lru, struct zswap_entry, lru); + list_del_init(&entry->lru); + zhdr = zpool_map_handle(pool->zpool, entry->handle, ZPOOL_MM_RO); + tree = zswap_trees[swp_type(zhdr->swpentry)]; + zpool_unmap_handle(pool->zpool, entry->handle); + /* + * Once the lru lock is dropped, the entry might get freed. The + * swpoffset is copied to the stack, and entry isn't deref'd again + * until the entry is verified to still be alive in the tree. + */ + swpoffset = swp_offset(zhdr->swpentry); + spin_unlock(&pool->lru_lock); + + /* Check for invalidate() race */ + spin_lock(&tree->lock); + if (entry != zswap_rb_search(&tree->rbroot, swpoffset)) { + ret = -EAGAIN; + goto unlock; + } + /* Hold a reference to prevent a free during writeback */ + zswap_entry_get(entry); + spin_unlock(&tree->lock); + + ret = zswap_writeback_entry(pool->zpool, entry->handle); + + spin_lock(&tree->lock); + if (ret) { + /* Writeback failed, put entry back on LRU */ + spin_lock(&pool->lru_lock); + list_move(&entry->lru, &pool->lru); + spin_unlock(&pool->lru_lock); + } + + /* Drop local reference */ + zswap_entry_put(tree, entry); +unlock: + spin_unlock(&tree->lock); + return ret ? -EAGAIN : 0; +} + static void shrink_worker(struct work_struct *w) { struct zswap_pool *pool = container_of(w, typeof(*pool), shrink_work); int ret, failures = 0; + /* zpool_evictable will be removed once all 3 backends have migrated */ do { - ret = zpool_shrink(pool->zpool, 1, NULL); + if (zpool_evictable(pool->zpool)) + ret = zpool_shrink(pool->zpool, 1, NULL); + else + ret = zswap_reclaim_entry(pool); if (ret) { zswap_reject_reclaim_fail++; if (ret != -EAGAIN) @@ -659,6 +734,8 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor) */ kref_init(&pool->kref); INIT_LIST_HEAD(&pool->list); + INIT_LIST_HEAD(&pool->lru); + spin_lock_init(&pool->lru_lock); INIT_WORK(&pool->shrink_work, shrink_worker); zswap_pool_debug("created", pool); @@ -1274,7 +1351,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, } /* store */ - hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0; + hlen = sizeof(zhdr); gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; if (zpool_malloc_support_movable(entry->pool->zpool)) gfp |= __GFP_HIGHMEM | __GFP_MOVABLE; @@ -1317,6 +1394,12 @@ insert_entry: zswap_entry_put(tree, dupentry); } } while (ret == -EEXIST); + /* zpool_evictable will be removed once all 3 backends have migrated */ + if (entry->length && !zpool_evictable(entry->pool->zpool)) { + spin_lock(&entry->pool->lru_lock); + list_add(&entry->lru, &entry->pool->lru); + spin_unlock(&entry->pool->lru_lock); + } spin_unlock(&tree->lock); /* update stats */ @@ -1398,8 +1481,7 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset, /* decompress */ dlen = PAGE_SIZE; src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO); - if (zpool_evictable(entry->pool->zpool)) - src += sizeof(struct zswap_header); + src += sizeof(struct zswap_header); if (!zpool_can_sleep_mapped(entry->pool->zpool)) { memcpy(tmp, src, entry->length); @@ -1432,6 +1514,11 @@ freeentry: if (!ret && zswap_exclusive_loads_enabled) { zswap_invalidate_entry(tree, entry); *exclusive = true; + } else if (entry->length && !zpool_evictable(entry->pool->zpool)) { + /* zpool_evictable will be removed once all 3 backends have migrated */ + spin_lock(&entry->pool->lru_lock); + list_move(&entry->lru, &entry->pool->lru); + spin_unlock(&entry->pool->lru_lock); } spin_unlock(&tree->lock); -- cgit v1.2.3 From 1be537c6451b7203776063a8655c9096ebc50790 Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Mon, 12 Jun 2023 11:38:10 +0200 Subject: mm: zswap: remove page reclaim logic from zbud Switch zbud to the new generic zswap LRU and remove its custom implementation. Link: https://lkml.kernel.org/r/20230612093815.133504-3-cerasuolodomenico@gmail.com Signed-off-by: Domenico Cerasuolo Acked-by: Johannes Weiner Cc: Dan Streetman Cc: Minchan Kim Cc: Nhat Pham Cc: Sergey Senozhatsky Cc: Seth Jennings Cc: Vitaly Wool Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/zbud.c | 163 +------------------------------------------------------------- 1 file changed, 1 insertion(+), 162 deletions(-) (limited to 'mm') diff --git a/mm/zbud.c b/mm/zbud.c index 3acd26193920..9d35fd4091ed 100644 --- a/mm/zbud.c +++ b/mm/zbud.c @@ -83,11 +83,7 @@ struct zbud_pool; * its free region. * @buddied: list tracking the zbud pages that contain two buddies; * these zbud pages are full - * @lru: list tracking the zbud pages in LRU order by most recently - * added buddy. * @pages_nr: number of zbud pages in the pool. - * @zpool: zpool driver - * @zpool_ops: zpool operations structure with an evict callback * * This structure is allocated at pool creation time and maintains metadata * pertaining to a particular zbud pool. @@ -102,26 +98,20 @@ struct zbud_pool { struct list_head buddied; struct list_head unbuddied[NCHUNKS]; }; - struct list_head lru; u64 pages_nr; - struct zpool *zpool; - const struct zpool_ops *zpool_ops; }; /* * struct zbud_header - zbud page metadata occupying the first chunk of each * zbud page. * @buddy: links the zbud page into the unbuddied/buddied lists in the pool - * @lru: links the zbud page into the lru list in the pool * @first_chunks: the size of the first buddy in chunks, 0 if free * @last_chunks: the size of the last buddy in chunks, 0 if free */ struct zbud_header { struct list_head buddy; - struct list_head lru; unsigned int first_chunks; unsigned int last_chunks; - bool under_reclaim; }; /***************** @@ -149,8 +139,6 @@ static struct zbud_header *init_zbud_page(struct page *page) zhdr->first_chunks = 0; zhdr->last_chunks = 0; INIT_LIST_HEAD(&zhdr->buddy); - INIT_LIST_HEAD(&zhdr->lru); - zhdr->under_reclaim = false; return zhdr; } @@ -221,7 +209,6 @@ static struct zbud_pool *zbud_create_pool(gfp_t gfp) for_each_unbuddied_list(i, 0) INIT_LIST_HEAD(&pool->unbuddied[i]); INIT_LIST_HEAD(&pool->buddied); - INIT_LIST_HEAD(&pool->lru); pool->pages_nr = 0; return pool; } @@ -310,11 +297,6 @@ found: list_add(&zhdr->buddy, &pool->buddied); } - /* Add/move zbud page to beginning of LRU */ - if (!list_empty(&zhdr->lru)) - list_del(&zhdr->lru); - list_add(&zhdr->lru, &pool->lru); - *handle = encode_handle(zhdr, bud); spin_unlock(&pool->lock); @@ -325,11 +307,6 @@ found: * zbud_free() - frees the allocation associated with the given handle * @pool: pool in which the allocation resided * @handle: handle associated with the allocation returned by zbud_alloc() - * - * In the case that the zbud page in which the allocation resides is under - * reclaim, as indicated by the PG_reclaim flag being set, this function - * only sets the first|last_chunks to 0. The page is actually freed - * once both buddies are evicted (see zbud_reclaim_page() below). */ static void zbud_free(struct zbud_pool *pool, unsigned long handle) { @@ -345,18 +322,11 @@ static void zbud_free(struct zbud_pool *pool, unsigned long handle) else zhdr->first_chunks = 0; - if (zhdr->under_reclaim) { - /* zbud page is under reclaim, reclaim will free */ - spin_unlock(&pool->lock); - return; - } - /* Remove from existing buddy list */ list_del(&zhdr->buddy); if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { /* zbud page is empty, free */ - list_del(&zhdr->lru); free_zbud_page(zhdr); pool->pages_nr--; } else { @@ -368,110 +338,6 @@ static void zbud_free(struct zbud_pool *pool, unsigned long handle) spin_unlock(&pool->lock); } -/** - * zbud_reclaim_page() - evicts allocations from a pool page and frees it - * @pool: pool from which a page will attempt to be evicted - * @retries: number of pages on the LRU list for which eviction will - * be attempted before failing - * - * zbud reclaim is different from normal system reclaim in that the reclaim is - * done from the bottom, up. This is because only the bottom layer, zbud, has - * information on how the allocations are organized within each zbud page. This - * has the potential to create interesting locking situations between zbud and - * the user, however. - * - * To avoid these, this is how zbud_reclaim_page() should be called: - * - * The user detects a page should be reclaimed and calls zbud_reclaim_page(). - * zbud_reclaim_page() will remove a zbud page from the pool LRU list and call - * the user-defined eviction handler with the pool and handle as arguments. - * - * If the handle can not be evicted, the eviction handler should return - * non-zero. zbud_reclaim_page() will add the zbud page back to the - * appropriate list and try the next zbud page on the LRU up to - * a user defined number of retries. - * - * If the handle is successfully evicted, the eviction handler should - * return 0 _and_ should have called zbud_free() on the handle. zbud_free() - * contains logic to delay freeing the page if the page is under reclaim, - * as indicated by the setting of the PG_reclaim flag on the underlying page. - * - * If all buddies in the zbud page are successfully evicted, then the - * zbud page can be freed. - * - * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are - * no pages to evict or an eviction handler is not registered, -EAGAIN if - * the retry limit was hit. - */ -static int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries) -{ - int i, ret, freechunks; - struct zbud_header *zhdr; - unsigned long first_handle = 0, last_handle = 0; - - spin_lock(&pool->lock); - if (list_empty(&pool->lru)) { - spin_unlock(&pool->lock); - return -EINVAL; - } - for (i = 0; i < retries; i++) { - zhdr = list_last_entry(&pool->lru, struct zbud_header, lru); - list_del(&zhdr->lru); - list_del(&zhdr->buddy); - /* Protect zbud page against free */ - zhdr->under_reclaim = true; - /* - * We need encode the handles before unlocking, since we can - * race with free that will set (first|last)_chunks to 0 - */ - first_handle = 0; - last_handle = 0; - if (zhdr->first_chunks) - first_handle = encode_handle(zhdr, FIRST); - if (zhdr->last_chunks) - last_handle = encode_handle(zhdr, LAST); - spin_unlock(&pool->lock); - - /* Issue the eviction callback(s) */ - if (first_handle) { - ret = pool->zpool_ops->evict(pool->zpool, first_handle); - if (ret) - goto next; - } - if (last_handle) { - ret = pool->zpool_ops->evict(pool->zpool, last_handle); - if (ret) - goto next; - } -next: - spin_lock(&pool->lock); - zhdr->under_reclaim = false; - if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { - /* - * Both buddies are now free, free the zbud page and - * return success. - */ - free_zbud_page(zhdr); - pool->pages_nr--; - spin_unlock(&pool->lock); - return 0; - } else if (zhdr->first_chunks == 0 || - zhdr->last_chunks == 0) { - /* add to unbuddied list */ - freechunks = num_free_chunks(zhdr); - list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); - } else { - /* add to buddied list */ - list_add(&zhdr->buddy, &pool->buddied); - } - - /* add to beginning of LRU */ - list_add(&zhdr->lru, &pool->lru); - } - spin_unlock(&pool->lock); - return -EAGAIN; -} - /** * zbud_map() - maps the allocation associated with the given handle * @pool: pool in which the allocation resides @@ -518,14 +384,7 @@ static void *zbud_zpool_create(const char *name, gfp_t gfp, const struct zpool_ops *zpool_ops, struct zpool *zpool) { - struct zbud_pool *pool; - - pool = zbud_create_pool(gfp); - if (pool) { - pool->zpool = zpool; - pool->zpool_ops = zpool_ops; - } - return pool; + return zbud_create_pool(gfp); } static void zbud_zpool_destroy(void *pool) @@ -543,25 +402,6 @@ static void zbud_zpool_free(void *pool, unsigned long handle) zbud_free(pool, handle); } -static int zbud_zpool_shrink(void *pool, unsigned int pages, - unsigned int *reclaimed) -{ - unsigned int total = 0; - int ret = -EINVAL; - - while (total < pages) { - ret = zbud_reclaim_page(pool, 8); - if (ret < 0) - break; - total++; - } - - if (reclaimed) - *reclaimed = total; - - return ret; -} - static void *zbud_zpool_map(void *pool, unsigned long handle, enum zpool_mapmode mm) { @@ -585,7 +425,6 @@ static struct zpool_driver zbud_zpool_driver = { .destroy = zbud_zpool_destroy, .malloc = zbud_zpool_malloc, .free = zbud_zpool_free, - .shrink = zbud_zpool_shrink, .map = zbud_zpool_map, .unmap = zbud_zpool_unmap, .total_size = zbud_zpool_total_size, -- cgit v1.2.3 From e774a7bc7f0adba785d814ce9a329b6c5c32706a Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Mon, 12 Jun 2023 11:38:11 +0200 Subject: mm: zswap: remove page reclaim logic from z3fold Switch z3fold to the new generic zswap LRU and remove its custom implementation. Link: https://lkml.kernel.org/r/20230612093815.133504-4-cerasuolodomenico@gmail.com Signed-off-by: Domenico Cerasuolo Acked-by: Johannes Weiner Cc: Dan Streetman Cc: Minchan Kim Cc: Nhat Pham Cc: Sergey Senozhatsky Cc: Seth Jennings Cc: Vitaly Wool Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/z3fold.c | 245 +----------------------------------------------------------- 1 file changed, 2 insertions(+), 243 deletions(-) (limited to 'mm') diff --git a/mm/z3fold.c b/mm/z3fold.c index 0cef845d397b..238a214de59f 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -125,13 +125,11 @@ struct z3fold_header { /** * struct z3fold_pool - stores metadata for each z3fold pool * @name: pool name - * @lock: protects pool unbuddied/lru lists + * @lock: protects pool unbuddied lists * @stale_lock: protects pool stale page list * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2- * buddies; the list each z3fold page is added to depends on * the size of its free region. - * @lru: list tracking the z3fold pages in LRU order by most recently - * added buddy. * @stale: list of pages marked for freeing * @pages_nr: number of z3fold pages in the pool. * @c_handle: cache for z3fold_buddy_slots allocation @@ -149,12 +147,9 @@ struct z3fold_pool { spinlock_t lock; spinlock_t stale_lock; struct list_head *unbuddied; - struct list_head lru; struct list_head stale; atomic64_t pages_nr; struct kmem_cache *c_handle; - struct zpool *zpool; - const struct zpool_ops *zpool_ops; struct workqueue_struct *compact_wq; struct workqueue_struct *release_wq; struct work_struct work; @@ -329,7 +324,6 @@ static struct z3fold_header *init_z3fold_page(struct page *page, bool headless, struct z3fold_header *zhdr = page_address(page); struct z3fold_buddy_slots *slots; - INIT_LIST_HEAD(&page->lru); clear_bit(PAGE_HEADLESS, &page->private); clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); clear_bit(NEEDS_COMPACTING, &page->private); @@ -451,8 +445,6 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) set_bit(PAGE_STALE, &page->private); clear_bit(NEEDS_COMPACTING, &page->private); spin_lock(&pool->lock); - if (!list_empty(&page->lru)) - list_del_init(&page->lru); spin_unlock(&pool->lock); if (locked) @@ -930,7 +922,6 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp) for_each_unbuddied_list(i, 0) INIT_LIST_HEAD(&unbuddied[i]); } - INIT_LIST_HEAD(&pool->lru); INIT_LIST_HEAD(&pool->stale); atomic64_set(&pool->pages_nr, 0); pool->name = name; @@ -1073,12 +1064,6 @@ found: headless: spin_lock(&pool->lock); - /* Add/move z3fold page to beginning of LRU */ - if (!list_empty(&page->lru)) - list_del(&page->lru); - - list_add(&page->lru, &pool->lru); - *handle = encode_handle(zhdr, bud); spin_unlock(&pool->lock); if (bud != HEADLESS) @@ -1115,9 +1100,6 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) * immediately so we don't care about its value any more. */ if (!page_claimed) { - spin_lock(&pool->lock); - list_del(&page->lru); - spin_unlock(&pool->lock); put_z3fold_header(zhdr); free_z3fold_page(page, true); atomic64_dec(&pool->pages_nr); @@ -1172,194 +1154,6 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) put_z3fold_header(zhdr); } -/** - * z3fold_reclaim_page() - evicts allocations from a pool page and frees it - * @pool: pool from which a page will attempt to be evicted - * @retries: number of pages on the LRU list for which eviction will - * be attempted before failing - * - * z3fold reclaim is different from normal system reclaim in that it is done - * from the bottom, up. This is because only the bottom layer, z3fold, has - * information on how the allocations are organized within each z3fold page. - * This has the potential to create interesting locking situations between - * z3fold and the user, however. - * - * To avoid these, this is how z3fold_reclaim_page() should be called: - * - * The user detects a page should be reclaimed and calls z3fold_reclaim_page(). - * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and - * call the user-defined eviction handler with the pool and handle as - * arguments. - * - * If the handle can not be evicted, the eviction handler should return - * non-zero. z3fold_reclaim_page() will add the z3fold page back to the - * appropriate list and try the next z3fold page on the LRU up to - * a user defined number of retries. - * - * If the handle is successfully evicted, the eviction handler should - * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free() - * contains logic to delay freeing the page if the page is under reclaim, - * as indicated by the setting of the PG_reclaim flag on the underlying page. - * - * If all buddies in the z3fold page are successfully evicted, then the - * z3fold page can be freed. - * - * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are - * no pages to evict or an eviction handler is not registered, -EAGAIN if - * the retry limit was hit. - */ -static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) -{ - int i, ret = -1; - struct z3fold_header *zhdr = NULL; - struct page *page = NULL; - struct list_head *pos; - unsigned long first_handle = 0, middle_handle = 0, last_handle = 0; - struct z3fold_buddy_slots slots __attribute__((aligned(SLOTS_ALIGN))); - - rwlock_init(&slots.lock); - slots.pool = (unsigned long)pool | (1 << HANDLES_NOFREE); - - spin_lock(&pool->lock); - for (i = 0; i < retries; i++) { - if (list_empty(&pool->lru)) { - spin_unlock(&pool->lock); - return -EINVAL; - } - list_for_each_prev(pos, &pool->lru) { - page = list_entry(pos, struct page, lru); - - zhdr = page_address(page); - if (test_bit(PAGE_HEADLESS, &page->private)) { - /* - * For non-headless pages, we wait to do this - * until we have the page lock to avoid racing - * with __z3fold_alloc(). Headless pages don't - * have a lock (and __z3fold_alloc() will never - * see them), but we still need to test and set - * PAGE_CLAIMED to avoid racing with - * z3fold_free(), so just do it now before - * leaving the loop. - */ - if (test_and_set_bit(PAGE_CLAIMED, &page->private)) - continue; - - break; - } - - if (!z3fold_page_trylock(zhdr)) { - zhdr = NULL; - continue; /* can't evict at this point */ - } - - /* test_and_set_bit is of course atomic, but we still - * need to do it under page lock, otherwise checking - * that bit in __z3fold_alloc wouldn't make sense - */ - if (zhdr->foreign_handles || - test_and_set_bit(PAGE_CLAIMED, &page->private)) { - z3fold_page_unlock(zhdr); - zhdr = NULL; - continue; /* can't evict such page */ - } - list_del_init(&zhdr->buddy); - zhdr->cpu = -1; - /* See comment in __z3fold_alloc. */ - kref_get(&zhdr->refcount); - break; - } - - if (!zhdr) - break; - - list_del_init(&page->lru); - spin_unlock(&pool->lock); - - if (!test_bit(PAGE_HEADLESS, &page->private)) { - /* - * We need encode the handles before unlocking, and - * use our local slots structure because z3fold_free - * can zero out zhdr->slots and we can't do much - * about that - */ - first_handle = 0; - last_handle = 0; - middle_handle = 0; - memset(slots.slot, 0, sizeof(slots.slot)); - if (zhdr->first_chunks) - first_handle = __encode_handle(zhdr, &slots, - FIRST); - if (zhdr->middle_chunks) - middle_handle = __encode_handle(zhdr, &slots, - MIDDLE); - if (zhdr->last_chunks) - last_handle = __encode_handle(zhdr, &slots, - LAST); - /* - * it's safe to unlock here because we hold a - * reference to this page - */ - z3fold_page_unlock(zhdr); - } else { - first_handle = encode_handle(zhdr, HEADLESS); - last_handle = middle_handle = 0; - } - /* Issue the eviction callback(s) */ - if (middle_handle) { - ret = pool->zpool_ops->evict(pool->zpool, middle_handle); - if (ret) - goto next; - } - if (first_handle) { - ret = pool->zpool_ops->evict(pool->zpool, first_handle); - if (ret) - goto next; - } - if (last_handle) { - ret = pool->zpool_ops->evict(pool->zpool, last_handle); - if (ret) - goto next; - } -next: - if (test_bit(PAGE_HEADLESS, &page->private)) { - if (ret == 0) { - free_z3fold_page(page, true); - atomic64_dec(&pool->pages_nr); - return 0; - } - spin_lock(&pool->lock); - list_add(&page->lru, &pool->lru); - spin_unlock(&pool->lock); - clear_bit(PAGE_CLAIMED, &page->private); - } else { - struct z3fold_buddy_slots *slots = zhdr->slots; - z3fold_page_lock(zhdr); - if (kref_put(&zhdr->refcount, - release_z3fold_page_locked)) { - kmem_cache_free(pool->c_handle, slots); - return 0; - } - /* - * if we are here, the page is still not completely - * free. Take the global pool lock then to be able - * to add it back to the lru list - */ - spin_lock(&pool->lock); - list_add(&page->lru, &pool->lru); - spin_unlock(&pool->lock); - if (list_empty(&zhdr->buddy)) - add_to_unbuddied(pool, zhdr); - clear_bit(PAGE_CLAIMED, &page->private); - z3fold_page_unlock(zhdr); - } - - /* We started off locked to we need to lock the pool back */ - spin_lock(&pool->lock); - } - spin_unlock(&pool->lock); - return -EAGAIN; -} - /** * z3fold_map() - maps the allocation associated with the given handle * @pool: pool in which the allocation resides @@ -1470,8 +1264,6 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) spin_lock(&pool->lock); if (!list_empty(&zhdr->buddy)) list_del_init(&zhdr->buddy); - if (!list_empty(&page->lru)) - list_del_init(&page->lru); spin_unlock(&pool->lock); kref_get(&zhdr->refcount); @@ -1531,9 +1323,6 @@ static int z3fold_page_migrate(struct page *newpage, struct page *page, encode_handle(new_zhdr, MIDDLE); set_bit(NEEDS_COMPACTING, &newpage->private); new_zhdr->cpu = smp_processor_id(); - spin_lock(&pool->lock); - list_add(&newpage->lru, &pool->lru); - spin_unlock(&pool->lock); __SetPageMovable(newpage, &z3fold_mops); z3fold_page_unlock(new_zhdr); @@ -1559,9 +1348,6 @@ static void z3fold_page_putback(struct page *page) INIT_LIST_HEAD(&page->lru); if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) return; - spin_lock(&pool->lock); - list_add(&page->lru, &pool->lru); - spin_unlock(&pool->lock); if (list_empty(&zhdr->buddy)) add_to_unbuddied(pool, zhdr); clear_bit(PAGE_CLAIMED, &page->private); @@ -1582,14 +1368,7 @@ static void *z3fold_zpool_create(const char *name, gfp_t gfp, const struct zpool_ops *zpool_ops, struct zpool *zpool) { - struct z3fold_pool *pool; - - pool = z3fold_create_pool(name, gfp); - if (pool) { - pool->zpool = zpool; - pool->zpool_ops = zpool_ops; - } - return pool; + return z3fold_create_pool(name, gfp); } static void z3fold_zpool_destroy(void *pool) @@ -1607,25 +1386,6 @@ static void z3fold_zpool_free(void *pool, unsigned long handle) z3fold_free(pool, handle); } -static int z3fold_zpool_shrink(void *pool, unsigned int pages, - unsigned int *reclaimed) -{ - unsigned int total = 0; - int ret = -EINVAL; - - while (total < pages) { - ret = z3fold_reclaim_page(pool, 8); - if (ret < 0) - break; - total++; - } - - if (reclaimed) - *reclaimed = total; - - return ret; -} - static void *z3fold_zpool_map(void *pool, unsigned long handle, enum zpool_mapmode mm) { @@ -1649,7 +1409,6 @@ static struct zpool_driver z3fold_zpool_driver = { .destroy = z3fold_zpool_destroy, .malloc = z3fold_zpool_malloc, .free = z3fold_zpool_free, - .shrink = z3fold_zpool_shrink, .map = z3fold_zpool_map, .unmap = z3fold_zpool_unmap, .total_size = z3fold_zpool_total_size, -- cgit v1.2.3 From b3067742ae36b36b959835a33937c4dc458f8183 Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Mon, 12 Jun 2023 11:38:12 +0200 Subject: mm: zswap: remove page reclaim logic from zsmalloc Switch zsmalloc to the new generic zswap LRU and remove its custom implementation. Link: https://lkml.kernel.org/r/20230612093815.133504-5-cerasuolodomenico@gmail.com Signed-off-by: Domenico Cerasuolo Acked-by: Johannes Weiner Acked-by: Nhat Pham Acked-by: Minchan Kim Tested-by: Yosry Ahmed Acked-by: Sergey Senozhatsky Cc: Dan Streetman Cc: Seth Jennings Cc: Vitaly Wool Signed-off-by: Andrew Morton --- mm/zsmalloc.c | 392 ++-------------------------------------------------------- 1 file changed, 12 insertions(+), 380 deletions(-) (limited to 'mm') diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index c0d433541636..e4d1ad521738 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -107,21 +107,8 @@ */ #define OBJ_ALLOCATED_TAG 1 -#ifdef CONFIG_ZPOOL -/* - * The second least-significant bit in the object's header identifies if the - * value stored at the header is a deferred handle from the last reclaim - * attempt. - * - * As noted above, this is valid because we have room for two bits. - */ -#define OBJ_DEFERRED_HANDLE_TAG 2 -#define OBJ_TAG_BITS 2 -#define OBJ_TAG_MASK (OBJ_ALLOCATED_TAG | OBJ_DEFERRED_HANDLE_TAG) -#else #define OBJ_TAG_BITS 1 #define OBJ_TAG_MASK OBJ_ALLOCATED_TAG -#endif /* CONFIG_ZPOOL */ #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS) #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1) @@ -227,12 +214,6 @@ struct link_free { * Handle of allocated object. */ unsigned long handle; -#ifdef CONFIG_ZPOOL - /* - * Deferred handle of a reclaimed object. - */ - unsigned long deferred_handle; -#endif }; }; @@ -250,13 +231,6 @@ struct zs_pool { /* Compact classes */ struct shrinker shrinker; -#ifdef CONFIG_ZPOOL - /* List tracking the zspages in LRU order by most recently added object */ - struct list_head lru; - struct zpool *zpool; - const struct zpool_ops *zpool_ops; -#endif - #ifdef CONFIG_ZSMALLOC_STAT struct dentry *stat_dentry; #endif @@ -279,13 +253,6 @@ struct zspage { unsigned int freeobj; struct page *first_page; struct list_head list; /* fullness list */ - -#ifdef CONFIG_ZPOOL - /* links the zspage to the lru list in the pool */ - struct list_head lru; - bool under_reclaim; -#endif - struct zs_pool *pool; rwlock_t lock; }; @@ -393,14 +360,7 @@ static void *zs_zpool_create(const char *name, gfp_t gfp, * different contexts and its caller must provide a valid * gfp mask. */ - struct zs_pool *pool = zs_create_pool(name); - - if (pool) { - pool->zpool = zpool; - pool->zpool_ops = zpool_ops; - } - - return pool; + return zs_create_pool(name); } static void zs_zpool_destroy(void *pool) @@ -422,27 +382,6 @@ static void zs_zpool_free(void *pool, unsigned long handle) zs_free(pool, handle); } -static int zs_reclaim_page(struct zs_pool *pool, unsigned int retries); - -static int zs_zpool_shrink(void *pool, unsigned int pages, - unsigned int *reclaimed) -{ - unsigned int total = 0; - int ret = -EINVAL; - - while (total < pages) { - ret = zs_reclaim_page(pool, 8); - if (ret < 0) - break; - total++; - } - - if (reclaimed) - *reclaimed = total; - - return ret; -} - static void *zs_zpool_map(void *pool, unsigned long handle, enum zpool_mapmode mm) { @@ -481,7 +420,6 @@ static struct zpool_driver zs_zpool_driver = { .malloc_support_movable = true, .malloc = zs_zpool_malloc, .free = zs_zpool_free, - .shrink = zs_zpool_shrink, .map = zs_zpool_map, .unmap = zs_zpool_unmap, .total_size = zs_zpool_total_size, @@ -884,14 +822,6 @@ static inline bool obj_allocated(struct page *page, void *obj, unsigned long *ph return obj_tagged(page, obj, phandle, OBJ_ALLOCATED_TAG); } -#ifdef CONFIG_ZPOOL -static bool obj_stores_deferred_handle(struct page *page, void *obj, - unsigned long *phandle) -{ - return obj_tagged(page, obj, phandle, OBJ_DEFERRED_HANDLE_TAG); -} -#endif - static void reset_page(struct page *page) { __ClearPageMovable(page); @@ -922,39 +852,6 @@ unlock: return 0; } -#ifdef CONFIG_ZPOOL -static unsigned long find_deferred_handle_obj(struct size_class *class, - struct page *page, int *obj_idx); - -/* - * Free all the deferred handles whose objects are freed in zs_free. - */ -static void free_handles(struct zs_pool *pool, struct size_class *class, - struct zspage *zspage) -{ - int obj_idx = 0; - struct page *page = get_first_page(zspage); - unsigned long handle; - - while (1) { - handle = find_deferred_handle_obj(class, page, &obj_idx); - if (!handle) { - page = get_next_page(page); - if (!page) - break; - obj_idx = 0; - continue; - } - - cache_free_handle(pool, handle); - obj_idx++; - } -} -#else -static inline void free_handles(struct zs_pool *pool, struct size_class *class, - struct zspage *zspage) {} -#endif - static void __free_zspage(struct zs_pool *pool, struct size_class *class, struct zspage *zspage) { @@ -969,9 +866,6 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class, VM_BUG_ON(get_zspage_inuse(zspage)); VM_BUG_ON(fg != ZS_INUSE_RATIO_0); - /* Free all deferred handles from zs_free */ - free_handles(pool, class, zspage); - next = page = get_first_page(zspage); do { VM_BUG_ON_PAGE(!PageLocked(page), page); @@ -1006,9 +900,6 @@ static void free_zspage(struct zs_pool *pool, struct size_class *class, } remove_zspage(class, zspage, ZS_INUSE_RATIO_0); -#ifdef CONFIG_ZPOOL - list_del(&zspage->lru); -#endif __free_zspage(pool, class, zspage); } @@ -1054,11 +945,6 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) off %= PAGE_SIZE; } -#ifdef CONFIG_ZPOOL - INIT_LIST_HEAD(&zspage->lru); - zspage->under_reclaim = false; -#endif - set_freeobj(zspage, 0); } @@ -1525,20 +1411,13 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) /* We completely set up zspage so mark them as movable */ SetZsPageMovable(pool, zspage); out: -#ifdef CONFIG_ZPOOL - /* Add/move zspage to beginning of LRU */ - if (!list_empty(&zspage->lru)) - list_del(&zspage->lru); - list_add(&zspage->lru, &pool->lru); -#endif - spin_unlock(&pool->lock); return handle; } EXPORT_SYMBOL_GPL(zs_malloc); -static void obj_free(int class_size, unsigned long obj, unsigned long *handle) +static void obj_free(int class_size, unsigned long obj) { struct link_free *link; struct zspage *zspage; @@ -1554,25 +1433,12 @@ static void obj_free(int class_size, unsigned long obj, unsigned long *handle) vaddr = kmap_atomic(f_page); link = (struct link_free *)(vaddr + f_offset); - if (handle) { -#ifdef CONFIG_ZPOOL - /* Stores the (deferred) handle in the object's header */ - *handle |= OBJ_DEFERRED_HANDLE_TAG; - *handle &= ~OBJ_ALLOCATED_TAG; - - if (likely(!ZsHugePage(zspage))) - link->deferred_handle = *handle; - else - f_page->index = *handle; -#endif - } else { - /* Insert this object in containing zspage's freelist */ - if (likely(!ZsHugePage(zspage))) - link->next = get_freeobj(zspage) << OBJ_TAG_BITS; - else - f_page->index = 0; - set_freeobj(zspage, f_objidx); - } + /* Insert this object in containing zspage's freelist */ + if (likely(!ZsHugePage(zspage))) + link->next = get_freeobj(zspage) << OBJ_TAG_BITS; + else + f_page->index = 0; + set_freeobj(zspage, f_objidx); kunmap_atomic(vaddr); mod_zspage_inuse(zspage, -1); @@ -1600,21 +1466,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle) class = zspage_class(pool, zspage); class_stat_dec(class, ZS_OBJS_INUSE, 1); - -#ifdef CONFIG_ZPOOL - if (zspage->under_reclaim) { - /* - * Reclaim needs the handles during writeback. It'll free - * them along with the zspage when it's done with them. - * - * Record current deferred handle in the object's header. - */ - obj_free(class->size, obj, &handle); - spin_unlock(&pool->lock); - return; - } -#endif - obj_free(class->size, obj, NULL); + obj_free(class->size, obj); fullness = fix_fullness_group(class, zspage); if (fullness == ZS_INUSE_RATIO_0) @@ -1735,18 +1587,6 @@ static unsigned long find_alloced_obj(struct size_class *class, return find_tagged_obj(class, page, obj_idx, OBJ_ALLOCATED_TAG); } -#ifdef CONFIG_ZPOOL -/* - * Find object storing a deferred handle in header in zspage from index object - * and return handle. - */ -static unsigned long find_deferred_handle_obj(struct size_class *class, - struct page *page, int *obj_idx) -{ - return find_tagged_obj(class, page, obj_idx, OBJ_DEFERRED_HANDLE_TAG); -} -#endif - struct zs_compact_control { /* Source spage for migration which could be a subpage of zspage */ struct page *s_page; @@ -1786,7 +1626,7 @@ static void migrate_zspage(struct zs_pool *pool, struct size_class *class, zs_object_copy(class, free_obj, used_obj); obj_idx++; record_obj(handle, free_obj); - obj_free(class->size, used_obj, NULL); + obj_free(class->size, used_obj); } /* Remember last position in this iteration */ @@ -1846,7 +1686,7 @@ static int putback_zspage(struct size_class *class, struct zspage *zspage) return fullness; } -#if defined(CONFIG_ZPOOL) || defined(CONFIG_COMPACTION) +#ifdef CONFIG_COMPACTION /* * To prevent zspage destroy during migration, zspage freeing should * hold locks of all pages in the zspage. @@ -1888,24 +1728,7 @@ static void lock_zspage(struct zspage *zspage) } migrate_read_unlock(zspage); } -#endif /* defined(CONFIG_ZPOOL) || defined(CONFIG_COMPACTION) */ - -#ifdef CONFIG_ZPOOL -/* - * Unlocks all the pages of the zspage. - * - * pool->lock must be held before this function is called - * to prevent the underlying pages from migrating. - */ -static void unlock_zspage(struct zspage *zspage) -{ - struct page *page = get_first_page(zspage); - - do { - unlock_page(page); - } while ((page = get_next_page(page)) != NULL); -} -#endif /* CONFIG_ZPOOL */ +#endif /* CONFIG_COMPACTION */ static void migrate_lock_init(struct zspage *zspage) { @@ -2126,9 +1949,6 @@ static void async_free_zspage(struct work_struct *work) VM_BUG_ON(fullness != ZS_INUSE_RATIO_0); class = pool->size_class[class_idx]; spin_lock(&pool->lock); -#ifdef CONFIG_ZPOOL - list_del(&zspage->lru); -#endif __free_zspage(pool, class, zspage); spin_unlock(&pool->lock); } @@ -2474,10 +2294,6 @@ struct zs_pool *zs_create_pool(const char *name) */ zs_register_shrinker(pool); -#ifdef CONFIG_ZPOOL - INIT_LIST_HEAD(&pool->lru); -#endif - return pool; err: @@ -2520,190 +2336,6 @@ void zs_destroy_pool(struct zs_pool *pool) } EXPORT_SYMBOL_GPL(zs_destroy_pool); -#ifdef CONFIG_ZPOOL -static void restore_freelist(struct zs_pool *pool, struct size_class *class, - struct zspage *zspage) -{ - unsigned int obj_idx = 0; - unsigned long handle, off = 0; /* off is within-page offset */ - struct page *page = get_first_page(zspage); - struct link_free *prev_free = NULL; - void *prev_page_vaddr = NULL; - - /* in case no free object found */ - set_freeobj(zspage, (unsigned int)(-1UL)); - - while (page) { - void *vaddr = kmap_atomic(page); - struct page *next_page; - - while (off < PAGE_SIZE) { - void *obj_addr = vaddr + off; - - /* skip allocated object */ - if (obj_allocated(page, obj_addr, &handle)) { - obj_idx++; - off += class->size; - continue; - } - - /* free deferred handle from reclaim attempt */ - if (obj_stores_deferred_handle(page, obj_addr, &handle)) - cache_free_handle(pool, handle); - - if (prev_free) - prev_free->next = obj_idx << OBJ_TAG_BITS; - else /* first free object found */ - set_freeobj(zspage, obj_idx); - - prev_free = (struct link_free *)vaddr + off / sizeof(*prev_free); - /* if last free object in a previous page, need to unmap */ - if (prev_page_vaddr) { - kunmap_atomic(prev_page_vaddr); - prev_page_vaddr = NULL; - } - - obj_idx++; - off += class->size; - } - - /* - * Handle the last (full or partial) object on this page. - */ - next_page = get_next_page(page); - if (next_page) { - if (!prev_free || prev_page_vaddr) { - /* - * There is no free object in this page, so we can safely - * unmap it. - */ - kunmap_atomic(vaddr); - } else { - /* update prev_page_vaddr since prev_free is on this page */ - prev_page_vaddr = vaddr; - } - } else { /* this is the last page */ - if (prev_free) { - /* - * Reset OBJ_TAG_BITS bit to last link to tell - * whether it's allocated object or not. - */ - prev_free->next = -1UL << OBJ_TAG_BITS; - } - - /* unmap previous page (if not done yet) */ - if (prev_page_vaddr) { - kunmap_atomic(prev_page_vaddr); - prev_page_vaddr = NULL; - } - - kunmap_atomic(vaddr); - } - - page = next_page; - off %= PAGE_SIZE; - } -} - -static int zs_reclaim_page(struct zs_pool *pool, unsigned int retries) -{ - int i, obj_idx, ret = 0; - unsigned long handle; - struct zspage *zspage; - struct page *page; - int fullness; - - /* Lock LRU and fullness list */ - spin_lock(&pool->lock); - if (list_empty(&pool->lru)) { - spin_unlock(&pool->lock); - return -EINVAL; - } - - for (i = 0; i < retries; i++) { - struct size_class *class; - - zspage = list_last_entry(&pool->lru, struct zspage, lru); - list_del(&zspage->lru); - - /* zs_free may free objects, but not the zspage and handles */ - zspage->under_reclaim = true; - - class = zspage_class(pool, zspage); - fullness = get_fullness_group(class, zspage); - - /* Lock out object allocations and object compaction */ - remove_zspage(class, zspage, fullness); - - spin_unlock(&pool->lock); - cond_resched(); - - /* Lock backing pages into place */ - lock_zspage(zspage); - - obj_idx = 0; - page = get_first_page(zspage); - while (1) { - handle = find_alloced_obj(class, page, &obj_idx); - if (!handle) { - page = get_next_page(page); - if (!page) - break; - obj_idx = 0; - continue; - } - - /* - * This will write the object and call zs_free. - * - * zs_free will free the object, but the - * under_reclaim flag prevents it from freeing - * the zspage altogether. This is necessary so - * that we can continue working with the - * zspage potentially after the last object - * has been freed. - */ - ret = pool->zpool_ops->evict(pool->zpool, handle); - if (ret) - goto next; - - obj_idx++; - } - -next: - /* For freeing the zspage, or putting it back in the pool and LRU list. */ - spin_lock(&pool->lock); - zspage->under_reclaim = false; - - if (!get_zspage_inuse(zspage)) { - /* - * Fullness went stale as zs_free() won't touch it - * while the page is removed from the pool. Fix it - * up for the check in __free_zspage(). - */ - zspage->fullness = ZS_INUSE_RATIO_0; - - __free_zspage(pool, class, zspage); - spin_unlock(&pool->lock); - return 0; - } - - /* - * Eviction fails on one of the handles, so we need to restore zspage. - * We need to rebuild its freelist (and free stored deferred handles), - * put it back to the correct size class, and add it to the LRU list. - */ - restore_freelist(pool, class, zspage); - putback_zspage(class, zspage); - list_add(&zspage->lru, &pool->lru); - unlock_zspage(zspage); - } - - spin_unlock(&pool->lock); - return -EAGAIN; -} -#endif /* CONFIG_ZPOOL */ - static int __init zs_init(void) { int ret; -- cgit v1.2.3 From 35499e2b79ffc51ea704c3268a5830164825a43e Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Mon, 12 Jun 2023 11:38:13 +0200 Subject: mm: zswap: remove shrink from zpool interface Now that all three zswap backends have removed their shrink code, it is no longer necessary for the zpool interface to include shrink/writeback endpoints. Link: https://lkml.kernel.org/r/20230612093815.133504-6-cerasuolodomenico@gmail.com Signed-off-by: Domenico Cerasuolo Reviewed-by: Yosry Ahmed Acked-by: Nhat Pham Acked-by: Johannes Weiner Reviewed-by: Sergey Senozhatsky Cc: Dan Streetman Cc: Minchan Kim Cc: Seth Jennings Cc: Vitaly Wool Signed-off-by: Andrew Morton --- include/linux/zpool.h | 20 ++------------------ mm/z3fold.c | 4 +--- mm/zbud.c | 4 +--- mm/zpool.c | 48 ++---------------------------------------------- mm/zsmalloc.c | 4 +--- mm/zswap.c | 27 +++++++-------------------- 6 files changed, 14 insertions(+), 93 deletions(-) (limited to 'mm') diff --git a/include/linux/zpool.h b/include/linux/zpool.h index e8997010612a..3296438eec06 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h @@ -14,10 +14,6 @@ struct zpool; -struct zpool_ops { - int (*evict)(struct zpool *pool, unsigned long handle); -}; - /* * Control how a handle is mapped. It will be ignored if the * implementation does not support it. Its use is optional. @@ -39,8 +35,7 @@ enum zpool_mapmode { bool zpool_has_pool(char *type); -struct zpool *zpool_create_pool(const char *type, const char *name, - gfp_t gfp, const struct zpool_ops *ops); +struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp); const char *zpool_get_type(struct zpool *pool); @@ -53,9 +48,6 @@ int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp, void zpool_free(struct zpool *pool, unsigned long handle); -int zpool_shrink(struct zpool *pool, unsigned int pages, - unsigned int *reclaimed); - void *zpool_map_handle(struct zpool *pool, unsigned long handle, enum zpool_mapmode mm); @@ -72,7 +64,6 @@ u64 zpool_get_total_size(struct zpool *pool); * @destroy: destroy a pool. * @malloc: allocate mem from a pool. * @free: free mem from a pool. - * @shrink: shrink the pool. * @sleep_mapped: whether zpool driver can sleep during map. * @map: map a handle. * @unmap: unmap a handle. @@ -87,10 +78,7 @@ struct zpool_driver { atomic_t refcount; struct list_head list; - void *(*create)(const char *name, - gfp_t gfp, - const struct zpool_ops *ops, - struct zpool *zpool); + void *(*create)(const char *name, gfp_t gfp); void (*destroy)(void *pool); bool malloc_support_movable; @@ -98,9 +86,6 @@ struct zpool_driver { unsigned long *handle); void (*free)(void *pool, unsigned long handle); - int (*shrink)(void *pool, unsigned int pages, - unsigned int *reclaimed); - bool sleep_mapped; void *(*map)(void *pool, unsigned long handle, enum zpool_mapmode mm); @@ -113,7 +98,6 @@ void zpool_register_driver(struct zpool_driver *driver); int zpool_unregister_driver(struct zpool_driver *driver); -bool zpool_evictable(struct zpool *pool); bool zpool_can_sleep_mapped(struct zpool *pool); #endif diff --git a/mm/z3fold.c b/mm/z3fold.c index 238a214de59f..e84de91ecccb 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -1364,9 +1364,7 @@ static const struct movable_operations z3fold_mops = { * zpool ****************/ -static void *z3fold_zpool_create(const char *name, gfp_t gfp, - const struct zpool_ops *zpool_ops, - struct zpool *zpool) +static void *z3fold_zpool_create(const char *name, gfp_t gfp) { return z3fold_create_pool(name, gfp); } diff --git a/mm/zbud.c b/mm/zbud.c index 9d35fd4091ed..2190cc1f37b3 100644 --- a/mm/zbud.c +++ b/mm/zbud.c @@ -380,9 +380,7 @@ static u64 zbud_get_pool_size(struct zbud_pool *pool) * zpool ****************/ -static void *zbud_zpool_create(const char *name, gfp_t gfp, - const struct zpool_ops *zpool_ops, - struct zpool *zpool) +static void *zbud_zpool_create(const char *name, gfp_t gfp) { return zbud_create_pool(gfp); } diff --git a/mm/zpool.c b/mm/zpool.c index 6a19c4a58f77..846410479c2f 100644 --- a/mm/zpool.c +++ b/mm/zpool.c @@ -133,7 +133,6 @@ EXPORT_SYMBOL(zpool_has_pool); * @type: The type of the zpool to create (e.g. zbud, zsmalloc) * @name: The name of the zpool (e.g. zram0, zswap) * @gfp: The GFP flags to use when allocating the pool. - * @ops: The optional ops callback. * * This creates a new zpool of the specified type. The gfp flags will be * used when allocating memory, if the implementation supports it. If the @@ -145,8 +144,7 @@ EXPORT_SYMBOL(zpool_has_pool); * * Returns: New zpool on success, NULL on failure. */ -struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp, - const struct zpool_ops *ops) +struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp) { struct zpool_driver *driver; struct zpool *zpool; @@ -173,7 +171,7 @@ struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp, } zpool->driver = driver; - zpool->pool = driver->create(name, gfp, ops, zpool); + zpool->pool = driver->create(name, gfp); if (!zpool->pool) { pr_err("couldn't create %s pool\n", type); @@ -279,30 +277,6 @@ void zpool_free(struct zpool *zpool, unsigned long handle) zpool->driver->free(zpool->pool, handle); } -/** - * zpool_shrink() - Shrink the pool size - * @zpool: The zpool to shrink. - * @pages: The number of pages to shrink the pool. - * @reclaimed: The number of pages successfully evicted. - * - * This attempts to shrink the actual memory size of the pool - * by evicting currently used handle(s). If the pool was - * created with no zpool_ops, or the evict call fails for any - * of the handles, this will fail. If non-NULL, the @reclaimed - * parameter will be set to the number of pages reclaimed, - * which may be more than the number of pages requested. - * - * Implementations must guarantee this to be thread-safe. - * - * Returns: 0 on success, negative value on error/failure. - */ -int zpool_shrink(struct zpool *zpool, unsigned int pages, - unsigned int *reclaimed) -{ - return zpool->driver->shrink ? - zpool->driver->shrink(zpool->pool, pages, reclaimed) : -EINVAL; -} - /** * zpool_map_handle() - Map a previously allocated handle into memory * @zpool: The zpool that the handle was allocated from @@ -359,24 +333,6 @@ u64 zpool_get_total_size(struct zpool *zpool) return zpool->driver->total_size(zpool->pool); } -/** - * zpool_evictable() - Test if zpool is potentially evictable - * @zpool: The zpool to test - * - * Zpool is only potentially evictable when it's created with struct - * zpool_ops.evict and its driver implements struct zpool_driver.shrink. - * - * However, it doesn't necessarily mean driver will use zpool_ops.evict - * in its implementation of zpool_driver.shrink. It could do internal - * defragmentation instead. - * - * Returns: true if potentially evictable; false otherwise. - */ -bool zpool_evictable(struct zpool *zpool) -{ - return zpool->driver->shrink; -} - /** * zpool_can_sleep_mapped - Test if zpool can sleep when do mapped. * @zpool: The zpool to test diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index e4d1ad521738..3f057970504e 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -351,9 +351,7 @@ static void record_obj(unsigned long handle, unsigned long obj) #ifdef CONFIG_ZPOOL -static void *zs_zpool_create(const char *name, gfp_t gfp, - const struct zpool_ops *zpool_ops, - struct zpool *zpool) +static void *zs_zpool_create(const char *name, gfp_t gfp) { /* * Ignore global gfp flags: zs_malloc() may be invoked from diff --git a/mm/zswap.c b/mm/zswap.c index 0024ec5ed574..a4f8c20e161b 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -258,10 +258,6 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle); static int zswap_pool_get(struct zswap_pool *pool); static void zswap_pool_put(struct zswap_pool *pool); -static const struct zpool_ops zswap_zpool_ops = { - .evict = zswap_writeback_entry -}; - static bool zswap_is_full(void) { return totalram_pages() * zswap_max_pool_percent / 100 < @@ -379,12 +375,9 @@ static void zswap_free_entry(struct zswap_entry *entry) if (!entry->length) atomic_dec(&zswap_same_filled_pages); else { - /* zpool_evictable will be removed once all 3 backends have migrated */ - if (!zpool_evictable(entry->pool->zpool)) { - spin_lock(&entry->pool->lru_lock); - list_del(&entry->lru); - spin_unlock(&entry->pool->lru_lock); - } + spin_lock(&entry->pool->lru_lock); + list_del(&entry->lru); + spin_unlock(&entry->pool->lru_lock); zpool_free(entry->pool->zpool, entry->handle); zswap_pool_put(entry->pool); } @@ -665,12 +658,8 @@ static void shrink_worker(struct work_struct *w) shrink_work); int ret, failures = 0; - /* zpool_evictable will be removed once all 3 backends have migrated */ do { - if (zpool_evictable(pool->zpool)) - ret = zpool_shrink(pool->zpool, 1, NULL); - else - ret = zswap_reclaim_entry(pool); + ret = zswap_reclaim_entry(pool); if (ret) { zswap_reject_reclaim_fail++; if (ret != -EAGAIN) @@ -708,7 +697,7 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor) /* unique name for each pool specifically required by zsmalloc */ snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count)); - pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops); + pool->zpool = zpool_create_pool(type, name, gfp); if (!pool->zpool) { pr_err("%s zpool not available\n", type); goto error; @@ -1394,8 +1383,7 @@ insert_entry: zswap_entry_put(tree, dupentry); } } while (ret == -EEXIST); - /* zpool_evictable will be removed once all 3 backends have migrated */ - if (entry->length && !zpool_evictable(entry->pool->zpool)) { + if (entry->length) { spin_lock(&entry->pool->lru_lock); list_add(&entry->lru, &entry->pool->lru); spin_unlock(&entry->pool->lru_lock); @@ -1514,8 +1502,7 @@ freeentry: if (!ret && zswap_exclusive_loads_enabled) { zswap_invalidate_entry(tree, entry); *exclusive = true; - } else if (entry->length && !zpool_evictable(entry->pool->zpool)) { - /* zpool_evictable will be removed once all 3 backends have migrated */ + } else if (entry->length) { spin_lock(&entry->pool->lru_lock); list_move(&entry->lru, &entry->pool->lru); spin_unlock(&entry->pool->lru_lock); -- cgit v1.2.3 From ff9d5ba202f98db53da98330b46b68a9228c63e4 Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Mon, 12 Jun 2023 11:38:14 +0200 Subject: mm: zswap: simplify writeback function zswap_writeback_entry() used to be a callback for the backends, which don't know about struct zswap_entry. Now that the only user is the generic zswap LRU reclaimer, it can be simplified: pass the pinned zswap_entry directly, and consolidate the refcount management in the shrink function. Link: https://lkml.kernel.org/r/20230612093815.133504-7-cerasuolodomenico@gmail.com Signed-off-by: Domenico Cerasuolo Tested-by: Yosry Ahmed Acked-by: Johannes Weiner Cc: Dan Streetman Cc: Minchan Kim Cc: Nhat Pham Cc: Sergey Senozhatsky Cc: Seth Jennings Cc: Vitaly Wool Signed-off-by: Andrew Morton --- mm/zswap.c | 70 +++++++++++++++++--------------------------------------------- 1 file changed, 19 insertions(+), 51 deletions(-) (limited to 'mm') diff --git a/mm/zswap.c b/mm/zswap.c index a4f8c20e161b..3a6b07a19262 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -254,7 +254,8 @@ static bool zswap_has_pool; pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \ zpool_get_type((p)->zpool)) -static int zswap_writeback_entry(struct zpool *pool, unsigned long handle); +static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header *zhdr, + struct zswap_tree *tree); static int zswap_pool_get(struct zswap_pool *pool); static void zswap_pool_put(struct zswap_pool *pool); @@ -635,7 +636,7 @@ static int zswap_reclaim_entry(struct zswap_pool *pool) zswap_entry_get(entry); spin_unlock(&tree->lock); - ret = zswap_writeback_entry(pool->zpool, entry->handle); + ret = zswap_writeback_entry(entry, zhdr, tree); spin_lock(&tree->lock); if (ret) { @@ -643,8 +644,17 @@ static int zswap_reclaim_entry(struct zswap_pool *pool) spin_lock(&pool->lru_lock); list_move(&entry->lru, &pool->lru); spin_unlock(&pool->lru_lock); + goto put_unlock; } + /* Check for invalidate() race */ + if (entry != zswap_rb_search(&tree->rbroot, swpoffset)) + goto put_unlock; + + /* Drop base reference */ + zswap_entry_put(tree, entry); + +put_unlock: /* Drop local reference */ zswap_entry_put(tree, entry); unlock: @@ -1045,16 +1055,14 @@ static int zswap_get_swap_cache_page(swp_entry_t entry, * the swap cache, the compressed version stored by zswap can be * freed. */ -static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) +static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header *zhdr, + struct zswap_tree *tree) { - struct zswap_header *zhdr; - swp_entry_t swpentry; - struct zswap_tree *tree; - pgoff_t offset; - struct zswap_entry *entry; + swp_entry_t swpentry = zhdr->swpentry; struct page *page; struct scatterlist input, output; struct crypto_acomp_ctx *acomp_ctx; + struct zpool *pool = entry->pool->zpool; u8 *src, *tmp = NULL; unsigned int dlen; @@ -1069,25 +1077,6 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) return -ENOMEM; } - /* extract swpentry from data */ - zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO); - swpentry = zhdr->swpentry; /* here */ - tree = zswap_trees[swp_type(swpentry)]; - offset = swp_offset(swpentry); - zpool_unmap_handle(pool, handle); - - /* find and ref zswap entry */ - spin_lock(&tree->lock); - entry = zswap_entry_find_get(&tree->rbroot, offset); - if (!entry) { - /* entry was invalidated */ - spin_unlock(&tree->lock); - kfree(tmp); - return 0; - } - spin_unlock(&tree->lock); - BUG_ON(offset != entry->offset); - /* try to allocate swap cache page */ switch (zswap_get_swap_cache_page(swpentry, &page)) { case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */ @@ -1121,12 +1110,12 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); dlen = PAGE_SIZE; - zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO); + zhdr = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO); src = (u8 *)zhdr + sizeof(struct zswap_header); if (!zpool_can_sleep_mapped(pool)) { memcpy(tmp, src, entry->length); src = tmp; - zpool_unmap_handle(pool, handle); + zpool_unmap_handle(pool, entry->handle); } mutex_lock(acomp_ctx->mutex); @@ -1141,7 +1130,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) if (!zpool_can_sleep_mapped(pool)) kfree(tmp); else - zpool_unmap_handle(pool, handle); + zpool_unmap_handle(pool, entry->handle); BUG_ON(ret); BUG_ON(dlen != PAGE_SIZE); @@ -1158,23 +1147,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) put_page(page); zswap_written_back_pages++; - spin_lock(&tree->lock); - /* drop local reference */ - zswap_entry_put(tree, entry); - - /* - * There are two possible situations for entry here: - * (1) refcount is 1(normal case), entry is valid and on the tree - * (2) refcount is 0, entry is freed and not on the tree - * because invalidate happened during writeback - * search the tree and free the entry if find entry - */ - if (entry == zswap_rb_search(&tree->rbroot, offset)) - zswap_entry_put(tree, entry); - spin_unlock(&tree->lock); - return ret; - fail: if (!zpool_can_sleep_mapped(pool)) kfree(tmp); @@ -1183,13 +1156,8 @@ fail: * if we get here due to ZSWAP_SWAPCACHE_EXIST * a load may be happening concurrently. * it is safe and okay to not free the entry. - * if we free the entry in the following put * it is also okay to return !0 */ - spin_lock(&tree->lock); - zswap_entry_put(tree, entry); - spin_unlock(&tree->lock); - return ret; } -- cgit v1.2.3 From 0bb488498c989e0d912d38b624df31922027c8c5 Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Mon, 12 Jun 2023 11:38:15 +0200 Subject: mm: zswap: remove zswap_header Previously, zswap_header served the purpose of storing the swpentry within zpool pages. This allowed zpool implementations to pass relevant information to the writeback function. However, with the current implementation, writeback is directly handled within zswap. Consequently, there is no longer a necessity for zswap_header, as the swp_entry_t can be stored directly in zswap_entry. Link: https://lkml.kernel.org/r/20230612093815.133504-8-cerasuolodomenico@gmail.com Signed-off-by: Domenico Cerasuolo Tested-by: Yosry Ahmed Suggested-by: Yosry Ahmed Acked-by: Johannes Weiner Cc: Dan Streetman Cc: Minchan Kim Cc: Nhat Pham Cc: Sergey Senozhatsky Cc: Seth Jennings Cc: Vitaly Wool Signed-off-by: Andrew Morton --- mm/zswap.c | 52 ++++++++++++++++++++++------------------------------ 1 file changed, 22 insertions(+), 30 deletions(-) (limited to 'mm') diff --git a/mm/zswap.c b/mm/zswap.c index 3a6b07a19262..02d0a6f46f4a 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -197,7 +197,7 @@ struct zswap_pool { */ struct zswap_entry { struct rb_node rbnode; - pgoff_t offset; + swp_entry_t swpentry; int refcount; unsigned int length; struct zswap_pool *pool; @@ -209,10 +209,6 @@ struct zswap_entry { struct list_head lru; }; -struct zswap_header { - swp_entry_t swpentry; -}; - /* * The tree lock in the zswap_tree struct protects a few things: * - the rbtree @@ -254,7 +250,7 @@ static bool zswap_has_pool; pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \ zpool_get_type((p)->zpool)) -static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header *zhdr, +static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_tree *tree); static int zswap_pool_get(struct zswap_pool *pool); static void zswap_pool_put(struct zswap_pool *pool); @@ -315,12 +311,14 @@ static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset) { struct rb_node *node = root->rb_node; struct zswap_entry *entry; + pgoff_t entry_offset; while (node) { entry = rb_entry(node, struct zswap_entry, rbnode); - if (entry->offset > offset) + entry_offset = swp_offset(entry->swpentry); + if (entry_offset > offset) node = node->rb_left; - else if (entry->offset < offset) + else if (entry_offset < offset) node = node->rb_right; else return entry; @@ -337,13 +335,15 @@ static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry, { struct rb_node **link = &root->rb_node, *parent = NULL; struct zswap_entry *myentry; + pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry); while (*link) { parent = *link; myentry = rb_entry(parent, struct zswap_entry, rbnode); - if (myentry->offset > entry->offset) + myentry_offset = swp_offset(myentry->swpentry); + if (myentry_offset > entry_offset) link = &(*link)->rb_left; - else if (myentry->offset < entry->offset) + else if (myentry_offset < entry_offset) link = &(*link)->rb_right; else { *dupentry = myentry; @@ -601,7 +601,6 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) static int zswap_reclaim_entry(struct zswap_pool *pool) { - struct zswap_header *zhdr; struct zswap_entry *entry; struct zswap_tree *tree; pgoff_t swpoffset; @@ -615,15 +614,13 @@ static int zswap_reclaim_entry(struct zswap_pool *pool) } entry = list_last_entry(&pool->lru, struct zswap_entry, lru); list_del_init(&entry->lru); - zhdr = zpool_map_handle(pool->zpool, entry->handle, ZPOOL_MM_RO); - tree = zswap_trees[swp_type(zhdr->swpentry)]; - zpool_unmap_handle(pool->zpool, entry->handle); /* * Once the lru lock is dropped, the entry might get freed. The * swpoffset is copied to the stack, and entry isn't deref'd again * until the entry is verified to still be alive in the tree. */ - swpoffset = swp_offset(zhdr->swpentry); + swpoffset = swp_offset(entry->swpentry); + tree = zswap_trees[swp_type(entry->swpentry)]; spin_unlock(&pool->lru_lock); /* Check for invalidate() race */ @@ -636,7 +633,7 @@ static int zswap_reclaim_entry(struct zswap_pool *pool) zswap_entry_get(entry); spin_unlock(&tree->lock); - ret = zswap_writeback_entry(entry, zhdr, tree); + ret = zswap_writeback_entry(entry, tree); spin_lock(&tree->lock); if (ret) { @@ -1055,10 +1052,10 @@ static int zswap_get_swap_cache_page(swp_entry_t entry, * the swap cache, the compressed version stored by zswap can be * freed. */ -static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header *zhdr, +static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_tree *tree) { - swp_entry_t swpentry = zhdr->swpentry; + swp_entry_t swpentry = entry->swpentry; struct page *page; struct scatterlist input, output; struct crypto_acomp_ctx *acomp_ctx; @@ -1098,7 +1095,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header * writing. */ spin_lock(&tree->lock); - if (zswap_rb_search(&tree->rbroot, entry->offset) != entry) { + if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) { spin_unlock(&tree->lock); delete_from_swap_cache(page_folio(page)); ret = -ENOMEM; @@ -1110,8 +1107,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); dlen = PAGE_SIZE; - zhdr = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO); - src = (u8 *)zhdr + sizeof(struct zswap_header); + src = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO); if (!zpool_can_sleep_mapped(pool)) { memcpy(tmp, src, entry->length); src = tmp; @@ -1205,11 +1201,10 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, struct obj_cgroup *objcg = NULL; struct zswap_pool *pool; int ret; - unsigned int hlen, dlen = PAGE_SIZE; + unsigned int dlen = PAGE_SIZE; unsigned long handle, value; char *buf; u8 *src, *dst; - struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) }; gfp_t gfp; /* THP isn't supported */ @@ -1254,7 +1249,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, src = kmap_atomic(page); if (zswap_is_page_same_filled(src, &value)) { kunmap_atomic(src); - entry->offset = offset; + entry->swpentry = swp_entry(type, offset); entry->length = 0; entry->value = value; atomic_inc(&zswap_same_filled_pages); @@ -1308,11 +1303,10 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, } /* store */ - hlen = sizeof(zhdr); gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; if (zpool_malloc_support_movable(entry->pool->zpool)) gfp |= __GFP_HIGHMEM | __GFP_MOVABLE; - ret = zpool_malloc(entry->pool->zpool, hlen + dlen, gfp, &handle); + ret = zpool_malloc(entry->pool->zpool, dlen, gfp, &handle); if (ret == -ENOSPC) { zswap_reject_compress_poor++; goto put_dstmem; @@ -1322,13 +1316,12 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, goto put_dstmem; } buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_WO); - memcpy(buf, &zhdr, hlen); - memcpy(buf + hlen, dst, dlen); + memcpy(buf, dst, dlen); zpool_unmap_handle(entry->pool->zpool, handle); mutex_unlock(acomp_ctx->mutex); /* populate entry */ - entry->offset = offset; + entry->swpentry = swp_entry(type, offset); entry->handle = handle; entry->length = dlen; @@ -1437,7 +1430,6 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset, /* decompress */ dlen = PAGE_SIZE; src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO); - src += sizeof(struct zswap_header); if (!zpool_can_sleep_mapped(entry->pool->zpool)) { memcpy(tmp, src, entry->length); -- cgit v1.2.3 From 1e3be4856f49d55c60b6cd500297b06acfe216a9 Mon Sep 17 00:00:00 2001 From: Tarun Sahu Date: Mon, 12 Jun 2023 15:05:14 +0530 Subject: mm/folio: replace set_compound_order with folio_set_order The patch ("mm/folio: Avoid special handling for order value 0 in folio_set_order") [1] removed the need for special handling of order = 0 in folio_set_order. Now, folio_set_order and set_compound_order becomes similar function. This patch removes the set_compound_order and uses folio_set_order instead. [1] https://lore.kernel.org/all/20230609183032.13E08C433D2@smtp.kernel.org/ Link: https://lkml.kernel.org/r/20230612093514.689846-1-tsahu@linux.ibm.com Signed-off-by: Tarun Sahu Reviewed-by Sidhartha Kumar Reviewed-by: Muchun Song Cc: Aneesh Kumar K.V Cc: Gerald Schaefer Cc: Matthew Wilcox Cc: Mike Kravetz Signed-off-by: Andrew Morton --- include/linux/mm.h | 10 ---------- mm/internal.h | 32 ++++++++++++++++---------------- 2 files changed, 16 insertions(+), 26 deletions(-) (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index 8f40bf17d597..ab04756b2240 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1232,16 +1232,6 @@ static inline void folio_set_compound_dtor(struct folio *folio, void destroy_large_folio(struct folio *folio); -static inline void set_compound_order(struct page *page, unsigned int order) -{ - struct folio *folio = (struct folio *)page; - - folio->_folio_order = order; -#ifdef CONFIG_64BIT - folio->_folio_nr_pages = 1U << order; -#endif -} - /* Returns the number of bytes in this potentially compound page. */ static inline unsigned long page_size(struct page *page) { diff --git a/mm/internal.h b/mm/internal.h index 33b8b8f66af3..b0d8778dd910 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -387,12 +387,27 @@ extern void memblock_free_pages(struct page *page, unsigned long pfn, unsigned int order); extern void __free_pages_core(struct page *page, unsigned int order); +/* + * This will have no effect, other than possibly generating a warning, if the + * caller passes in a non-large folio. + */ +static inline void folio_set_order(struct folio *folio, unsigned int order) +{ + if (WARN_ON_ONCE(!order || !folio_test_large(folio))) + return; + + folio->_folio_order = order; +#ifdef CONFIG_64BIT + folio->_folio_nr_pages = 1U << order; +#endif +} + static inline void prep_compound_head(struct page *page, unsigned int order) { struct folio *folio = (struct folio *)page; folio_set_compound_dtor(folio, COMPOUND_PAGE_DTOR); - set_compound_order(page, order); + folio_set_order(folio, order); atomic_set(&folio->_entire_mapcount, -1); atomic_set(&folio->_nr_pages_mapped, 0); atomic_set(&folio->_pincount, 0); @@ -432,21 +447,6 @@ void memmap_init_range(unsigned long, int, unsigned long, unsigned long, int split_free_page(struct page *free_page, unsigned int order, unsigned long split_pfn_offset); -/* - * This will have no effect, other than possibly generating a warning, if the - * caller passes in a non-large folio. - */ -static inline void folio_set_order(struct folio *folio, unsigned int order) -{ - if (WARN_ON_ONCE(!order || !folio_test_large(folio))) - return; - - folio->_folio_order = order; -#ifdef CONFIG_64BIT - folio->_folio_nr_pages = 1U << order; -#endif -} - #if defined CONFIG_COMPACTION || defined CONFIG_CMA /* -- cgit v1.2.3 From 65ac132027a884c411b8f9f96d240ba2dde34dec Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Wed, 31 May 2023 21:54:02 -0400 Subject: userfaultfd: fix regression in userfaultfd_unmap_prep() Android reported a performance regression in the userfaultfd unmap path. A closer inspection on the userfaultfd_unmap_prep() change showed that a second tree walk would be necessary in the reworked code. Fix the regression by passing each VMA that will be unmapped through to the userfaultfd_unmap_prep() function as they are added to the unmap list, instead of re-walking the tree for the VMA. Link: https://lkml.kernel.org/r/20230601015402.2819343-1-Liam.Howlett@oracle.com Fixes: 69dbe6daf104 ("userfaultfd: use maple tree iterator to iterate VMAs") Signed-off-by: Liam R. Howlett Reported-by: Suren Baghdasaryan Suggested-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- fs/userfaultfd.c | 35 +++++++++++++++-------------------- include/linux/userfaultfd_k.h | 6 +++--- mm/mmap.c | 31 +++++++++++++++---------------- 3 files changed, 33 insertions(+), 39 deletions(-) (limited to 'mm') diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 478e2b169c13..0aa5caac5164 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -852,31 +852,26 @@ static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps, return false; } -int userfaultfd_unmap_prep(struct mm_struct *mm, unsigned long start, +int userfaultfd_unmap_prep(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct list_head *unmaps) { - VMA_ITERATOR(vmi, mm, start); - struct vm_area_struct *vma; - - for_each_vma_range(vmi, vma, end) { - struct userfaultfd_unmap_ctx *unmap_ctx; - struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; + struct userfaultfd_unmap_ctx *unmap_ctx; + struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; - if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) || - has_unmap_ctx(ctx, unmaps, start, end)) - continue; + if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) || + has_unmap_ctx(ctx, unmaps, start, end)) + return 0; - unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL); - if (!unmap_ctx) - return -ENOMEM; + unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL); + if (!unmap_ctx) + return -ENOMEM; - userfaultfd_ctx_get(ctx); - atomic_inc(&ctx->mmap_changing); - unmap_ctx->ctx = ctx; - unmap_ctx->start = start; - unmap_ctx->end = end; - list_add_tail(&unmap_ctx->list, unmaps); - } + userfaultfd_ctx_get(ctx); + atomic_inc(&ctx->mmap_changing); + unmap_ctx->ctx = ctx; + unmap_ctx->start = start; + unmap_ctx->end = end; + list_add_tail(&unmap_ctx->list, unmaps); return 0; } diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index d78b01524349..ac7b0c96d351 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -188,8 +188,8 @@ extern bool userfaultfd_remove(struct vm_area_struct *vma, unsigned long start, unsigned long end); -extern int userfaultfd_unmap_prep(struct mm_struct *mm, unsigned long start, - unsigned long end, struct list_head *uf); +extern int userfaultfd_unmap_prep(struct vm_area_struct *vma, + unsigned long start, unsigned long end, struct list_head *uf); extern void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf); extern bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma); @@ -271,7 +271,7 @@ static inline bool userfaultfd_remove(struct vm_area_struct *vma, return true; } -static inline int userfaultfd_unmap_prep(struct mm_struct *mm, +static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct list_head *uf) { diff --git a/mm/mmap.c b/mm/mmap.c index f084b7940431..4fc496bc5b95 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2417,6 +2417,21 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, goto munmap_sidetree_failed; count++; + if (unlikely(uf)) { + /* + * If userfaultfd_unmap_prep returns an error the vmas + * will remain split, but userland will get a + * highly unexpected error anyway. This is no + * different than the case where the first of the two + * __split_vma fails, but we don't undo the first + * split, despite we could. This is unlikely enough + * failure that it's not worth optimizing it for. + */ + error = userfaultfd_unmap_prep(next, start, end, uf); + + if (error) + goto userfaultfd_error; + } #ifdef CONFIG_DEBUG_VM_MAPLE_TREE BUG_ON(next->vm_start < start); BUG_ON(next->vm_start > end); @@ -2429,22 +2444,6 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, if (!next) next = vma_next(vmi); - if (unlikely(uf)) { - /* - * If userfaultfd_unmap_prep returns an error the vmas - * will remain split, but userland will get a - * highly unexpected error anyway. This is no - * different than the case where the first of the two - * __split_vma fails, but we don't undo the first - * split, despite we could. This is unlikely enough - * failure that it's not worth optimizing it for. - */ - error = userfaultfd_unmap_prep(mm, start, end, uf); - - if (error) - goto userfaultfd_error; - } - #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) /* Make sure no VMAs are about to be lost. */ { -- cgit v1.2.3 From 833dfc0090b3f8017ddac82d818b2d8e5ceb61db Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 10 Jun 2023 11:46:15 +0800 Subject: mm: compaction: mark kcompactd_run() and kcompactd_stop() __meminit Add __meminit to kcompactd_run() and kcompactd_stop() to ensure they're default to __init when memory hotplug is not enabled. Link: https://lkml.kernel.org/r/20230610034615.997813-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: Baolin Wang Signed-off-by: Andrew Morton --- include/linux/compaction.h | 4 ++-- mm/compaction.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 57b16e69c19a..e94776496049 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -98,8 +98,8 @@ extern void compaction_defer_reset(struct zone *zone, int order, bool compaction_zonelist_suitable(struct alloc_context *ac, int order, int alloc_flags); -extern void kcompactd_run(int nid); -extern void kcompactd_stop(int nid); +extern void __meminit kcompactd_run(int nid); +extern void __meminit kcompactd_stop(int nid); extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx); #else diff --git a/mm/compaction.c b/mm/compaction.c index 767b0815c874..6149a2d324be 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -3050,7 +3050,7 @@ static int kcompactd(void *p) * This kcompactd start function will be called by init and node-hot-add. * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added. */ -void kcompactd_run(int nid) +void __meminit kcompactd_run(int nid) { pg_data_t *pgdat = NODE_DATA(nid); @@ -3068,7 +3068,7 @@ void kcompactd_run(int nid) * Called by memory hotplug when all memory in a node is offlined. Caller must * be holding mem_hotplug_begin/done(). */ -void kcompactd_stop(int nid) +void __meminit kcompactd_stop(int nid) { struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; -- cgit v1.2.3 From 33ee4f185827d99fd0957ab3cea00b2266fc5abc Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 10 Jun 2023 11:41:14 +0800 Subject: memory tier: remove unneeded !IS_ENABLED(CONFIG_MIGRATION) check establish_demotion_targets() is defined while CONFIG_MIGRATION is enabled. There's no need to check it again. Link: https://lkml.kernel.org/r/20230610034114.981861-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: David Hildenbrand Reviewed-by: Yang Shi Signed-off-by: Andrew Morton --- mm/memory-tiers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c index dd04f0ce5277..a516e303e304 100644 --- a/mm/memory-tiers.c +++ b/mm/memory-tiers.c @@ -366,7 +366,7 @@ static void establish_demotion_targets(void) lockdep_assert_held_once(&memory_tier_lock); - if (!node_demotion || !IS_ENABLED(CONFIG_MIGRATION)) + if (!node_demotion) return; disable_all_demotion_targets(); -- cgit v1.2.3 From 3a6358c0dbe6a286a4f4504ba392a6039a9fbd12 Mon Sep 17 00:00:00 2001 From: Yu Ma Date: Fri, 9 Jun 2023 23:07:30 -0400 Subject: percpu-internal/pcpu_chunk: re-layout pcpu_chunk structure to reduce false sharing When running UnixBench/Execl throughput case, false sharing is observed due to frequent read on base_addr and write on free_bytes, chunk_md. UnixBench/Execl represents a class of workload where bash scripts are spawned frequently to do some short jobs. It will do system call on execl frequently, and execl will call mm_init to initialize mm_struct of the process. mm_init will call __percpu_counter_init for percpu_counters initialization. Then pcpu_alloc is called to read the base_addr of pcpu_chunk for memory allocation. Inside pcpu_alloc, it will call pcpu_alloc_area to allocate memory from a specified chunk. This function will update "free_bytes" and "chunk_md" to record the rest free bytes and other meta data for this chunk. Correspondingly, pcpu_free_area will also update these 2 members when free memory. Call trace from perf is as below: + 57.15% 0.01% execl [kernel.kallsyms] [k] __percpu_counter_init + 57.13% 0.91% execl [kernel.kallsyms] [k] pcpu_alloc - 55.27% 54.51% execl [kernel.kallsyms] [k] osq_lock - 53.54% 0x654278696e552f34 main __execve entry_SYSCALL_64_after_hwframe do_syscall_64 __x64_sys_execve do_execveat_common.isra.47 alloc_bprm mm_init __percpu_counter_init pcpu_alloc - __mutex_lock.isra.17 In current pcpu_chunk layout, `base_addr' is in the same cache line with `free_bytes' and `chunk_md', and `base_addr' is at the last 8 bytes. This patch moves `bound_map' up to `base_addr', to let `base_addr' locate in a new cacheline. With this change, on Intel Sapphire Rapids 112C/224T platform, based on v6.4-rc4, the 160 parallel score improves by 24%. The pcpu_chunk struct is a backing data structure per chunk, so the additional memory should not be dramatic. A chunk covers ballpark between 64kb and 512kb memory depending on some config and boot time stuff, so I believe the additional memory used here is nominal at best. Working the #s on my desktop: Percpu: 58624 kB 28 cores -> ~2.1MB of percpu memory. At say ~128KB per chunk -> 33 chunks, generously 40 chunks. Adding alignment might bump the chunk size ~64 bytes, so in total ~2KB of overhead? I believe we can do a little better to avoid eating that full padding, so likely less than that. [dennis@kernel.org: changelog details] Link: https://lkml.kernel.org/r/20230610030730.110074-1-yu.ma@intel.com Signed-off-by: Yu Ma Reviewed-by: Tim Chen Acked-by: Dennis Zhou Cc: Dan Williams Cc: Dave Hansen Cc: Liam R. Howlett Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/percpu-internal.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/percpu-internal.h b/mm/percpu-internal.h index f9847c131998..cdd0aa597a81 100644 --- a/mm/percpu-internal.h +++ b/mm/percpu-internal.h @@ -41,10 +41,17 @@ struct pcpu_chunk { struct list_head list; /* linked to pcpu_slot lists */ int free_bytes; /* free bytes in the chunk */ struct pcpu_block_md chunk_md; - void *base_addr; /* base address of this chunk */ + unsigned long *bound_map; /* boundary map */ + + /* + * base_addr is the base address of this chunk. + * To reduce false sharing, current layout is optimized to make sure + * base_addr locate in the different cacheline with free_bytes and + * chunk_md. + */ + void *base_addr ____cacheline_aligned_in_smp; unsigned long *alloc_map; /* allocation map */ - unsigned long *bound_map; /* boundary map */ struct pcpu_block_md *md_blocks; /* metadata blocks */ void *data; /* chunk data */ -- cgit v1.2.3 From e1ad3e66676479d6a0af6be953767f865c902111 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Fri, 9 Jun 2023 16:44:45 -0700 Subject: mm/khugepaged: use DEFINE_READ_MOSTLY_HASHTABLE macro These are equivalent, but DEFINE_READ_MOSTLY_HASHTABLE exists to define a hashtable in the .data..read_mostly section. Link: https://lkml.kernel.org/r/20230609-khugepage-v1-1-dad4e8382298@google.com Signed-off-by: Nick Desaulniers Reviewed-by: Yang Shi Signed-off-by: Andrew Morton --- mm/khugepaged.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 0b4f00712895..87e962c79a52 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -88,7 +88,7 @@ static unsigned int khugepaged_max_ptes_swap __read_mostly; static unsigned int khugepaged_max_ptes_shared __read_mostly; #define MM_SLOTS_HASH_BITS 10 -static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); +static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); static struct kmem_cache *mm_slot_cache __read_mostly; -- cgit v1.2.3 From 399fd496c49b399ff9b6fc87fefadb66e156aa5e Mon Sep 17 00:00:00 2001 From: Kassey Li Date: Tue, 13 Jun 2023 17:25:33 +0800 Subject: mm/page_owner/cma: show pfn in cma/page_owner with hex format cma: display pfn as well as pfn_to_page(pfn) page_owner: display pfn in hex rather than decimal Link: https://lkml.kernel.org/r/20230613092533.15449-1-quic_yingangl@quicinc.com Signed-off-by: Kassey Li Cc: Joonsoo Kim Cc: Minchan Kim Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/cma.c | 4 ++-- mm/page_owner.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/cma.c b/mm/cma.c index 6268d6620254..a4cfe995e11e 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -483,8 +483,8 @@ struct page *cma_alloc(struct cma *cma, unsigned long count, if (ret != -EBUSY) break; - pr_debug("%s(): memory range at %p is busy, retrying\n", - __func__, pfn_to_page(pfn)); + pr_debug("%s(): memory range at pfn 0x%lx %p is busy, retrying\n", + __func__, pfn, pfn_to_page(pfn)); trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn), count, align); diff --git a/mm/page_owner.c b/mm/page_owner.c index 31169b3e7f06..c93baef0148f 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -418,7 +418,7 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn, pageblock_mt = get_pageblock_migratetype(page); page_mt = gfp_migratetype(page_owner->gfp_mask); ret += scnprintf(kbuf + ret, count - ret, - "PFN %lu type %s Block %lu type %s Flags %pGp\n", + "PFN 0x%lx type %s Block %lu type %s Flags %pGp\n", pfn, migratetype_names[page_mt], pfn >> pageblock_order, -- cgit v1.2.3 From 6c77b607ee26472fb945aa41734281c39d06d68f Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 14 Jun 2023 22:36:12 +0800 Subject: mm: kill lock|unlock_page_memcg() Since commit c7c3dec1c9db ("mm: rmap: remove lock_page_memcg()"), no more user, kill lock_page_memcg() and unlock_page_memcg(). Link: https://lkml.kernel.org/r/20230614143612.62575-1-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Acked-by: Johannes Weiner Reviewed-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- Documentation/admin-guide/cgroup-v1/memory.rst | 2 +- include/linux/memcontrol.h | 12 +----------- mm/filemap.c | 2 +- mm/memcontrol.c | 18 ++++-------------- mm/page-writeback.c | 6 +++--- 5 files changed, 10 insertions(+), 30 deletions(-) (limited to 'mm') diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst index 47d1d7d932a8..fabaad3fd9c2 100644 --- a/Documentation/admin-guide/cgroup-v1/memory.rst +++ b/Documentation/admin-guide/cgroup-v1/memory.rst @@ -297,7 +297,7 @@ Lock order is as follows:: Page lock (PG_locked bit of page->flags) mm->page_table_lock or split pte_lock - lock_page_memcg (memcg->move_lock) + folio_memcg_lock (memcg->move_lock) mapping->i_pages lock lruvec->lru_lock. diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 00a88cf947e1..c3d3a0c09315 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -419,7 +419,7 @@ static inline struct obj_cgroup *__folio_objcg(struct folio *folio) * * - the folio lock * - LRU isolation - * - lock_page_memcg() + * - folio_memcg_lock() * - exclusive reference * - mem_cgroup_trylock_pages() * @@ -949,8 +949,6 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); void folio_memcg_lock(struct folio *folio); void folio_memcg_unlock(struct folio *folio); -void lock_page_memcg(struct page *page); -void unlock_page_memcg(struct page *page); void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); @@ -1438,14 +1436,6 @@ mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) { } -static inline void lock_page_memcg(struct page *page) -{ -} - -static inline void unlock_page_memcg(struct page *page) -{ -} - static inline void folio_memcg_lock(struct folio *folio) { } diff --git a/mm/filemap.c b/mm/filemap.c index 00933089b8b6..758bbdf300e7 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -117,7 +117,7 @@ * ->i_pages lock (page_remove_rmap->set_page_dirty) * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) * ->inode->i_lock (page_remove_rmap->set_page_dirty) - * ->memcg->move_lock (page_remove_rmap->lock_page_memcg) + * ->memcg->move_lock (page_remove_rmap->folio_memcg_lock) * bdi.wb->list_lock (zap_pte_range->set_page_dirty) * ->inode->i_lock (zap_pte_range->set_page_dirty) * ->private_lock (zap_pte_range->block_dirty_folio) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 93056918e956..cf06b1c9b3bb 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2148,17 +2148,12 @@ again: * When charge migration first begins, we can have multiple * critical sections holding the fast-path RCU lock and one * holding the slowpath move_lock. Track the task who has the - * move_lock for unlock_page_memcg(). + * move_lock for folio_memcg_unlock(). */ memcg->move_lock_task = current; memcg->move_lock_flags = flags; } -void lock_page_memcg(struct page *page) -{ - folio_memcg_lock(page_folio(page)); -} - static void __folio_memcg_unlock(struct mem_cgroup *memcg) { if (memcg && memcg->move_lock_task == current) { @@ -2186,11 +2181,6 @@ void folio_memcg_unlock(struct folio *folio) __folio_memcg_unlock(folio_memcg(folio)); } -void unlock_page_memcg(struct page *page) -{ - folio_memcg_unlock(page_folio(page)); -} - struct memcg_stock_pcp { local_lock_t stock_lock; struct mem_cgroup *cached; /* this never be root cgroup */ @@ -2866,7 +2856,7 @@ static void commit_charge(struct folio *folio, struct mem_cgroup *memcg) * * - the page lock * - LRU isolation - * - lock_page_memcg() + * - folio_memcg_lock() * - exclusive reference * - mem_cgroup_trylock_pages() */ @@ -5829,7 +5819,7 @@ static int mem_cgroup_move_account(struct page *page, * with (un)charging, migration, LRU putback, or anything else * that would rely on a stable page's memory cgroup. * - * Note that lock_page_memcg is a memcg lock, not a page lock, + * Note that folio_memcg_lock is a memcg lock, not a page lock, * to save space. As soon as we switch page's memory cgroup to a * new memcg that isn't locked, the above state can change * concurrently again. Make sure we're truly done with it. @@ -6320,7 +6310,7 @@ static void mem_cgroup_move_charge(void) { lru_add_drain_all(); /* - * Signal lock_page_memcg() to take the memcg's move_lock + * Signal folio_memcg_lock() to take the memcg's move_lock * while we're moving its pages to another memcg. Then wait * for already started RCU-only updates to finish. */ diff --git a/mm/page-writeback.c b/mm/page-writeback.c index db7943999007..1d17fb1ec863 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2597,7 +2597,7 @@ EXPORT_SYMBOL(noop_dirty_folio); /* * Helper function for set_page_dirty family. * - * Caller must hold lock_page_memcg(). + * Caller must hold folio_memcg_lock(). * * NOTE: This relies on being atomic wrt interrupts. */ @@ -2631,7 +2631,7 @@ static void folio_account_dirtied(struct folio *folio, /* * Helper function for deaccounting dirty page without writeback. * - * Caller must hold lock_page_memcg(). + * Caller must hold folio_memcg_lock(). */ void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb) { @@ -2650,7 +2650,7 @@ void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb) * If warn is true, then emit a warning if the folio is not uptodate and has * not been truncated. * - * The caller must hold lock_page_memcg(). Most callers have the folio + * The caller must hold folio_memcg_lock(). Most callers have the folio * locked. A few have the folio blocked from truncation through other * means (eg zap_vma_pages() has it mapped and is holding the page table * lock). This can also be called from mark_buffer_dirty(), which I -- cgit v1.2.3 From 418fd29d9de53b143e28a07430e08dd414b74c3d Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Wed, 14 Jun 2023 16:31:22 +0200 Subject: mm: zswap: invaldiate entry after writeback When an entry started writeback, it used to be invalidated with ref count logic alone, meaning that it would stay on the tree until all references were put. The problem with this behavior is that as soon as the writeback started, the ownership of the data held by the entry is passed to the swapcache and should not be left in zswap too. Currently there are no known issues because of this, but this change explicitly invalidates an entry that started writeback to reduce opportunities for future bugs. This patch is a follow up on the series titled "mm: zswap: move writeback LRU from zpool to zswap" + commit f090b7949768("mm: zswap: support exclusive loads"). Link: https://lkml.kernel.org/r/20230614143122.74471-1-cerasuolodomenico@gmail.com Signed-off-by: Domenico Cerasuolo Suggested-by: Johannes Weiner Acked-by: Johannes Weiner Cc: Dan Streetman Cc: Nhat Pham Cc: Seth Jennings Cc: Vitaly Wool Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/zswap.c | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/zswap.c b/mm/zswap.c index 02d0a6f46f4a..c122f042a49d 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -599,6 +599,16 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) return NULL; } +static void zswap_invalidate_entry(struct zswap_tree *tree, + struct zswap_entry *entry) +{ + /* remove from rbtree */ + zswap_rb_erase(&tree->rbroot, entry); + + /* drop the initial reference from entry creation */ + zswap_entry_put(tree, entry); +} + static int zswap_reclaim_entry(struct zswap_pool *pool) { struct zswap_entry *entry; @@ -644,12 +654,13 @@ static int zswap_reclaim_entry(struct zswap_pool *pool) goto put_unlock; } - /* Check for invalidate() race */ - if (entry != zswap_rb_search(&tree->rbroot, swpoffset)) - goto put_unlock; - - /* Drop base reference */ - zswap_entry_put(tree, entry); + /* + * Writeback started successfully, the page now belongs to the + * swapcache. Drop the entry from zswap - unless invalidate already + * took it out while we had the tree->lock released for IO. + */ + if (entry == zswap_rb_search(&tree->rbroot, swpoffset)) + zswap_invalidate_entry(tree, entry); put_unlock: /* Drop local reference */ @@ -1376,16 +1387,6 @@ shrink: goto reject; } -static void zswap_invalidate_entry(struct zswap_tree *tree, - struct zswap_entry *entry) -{ - /* remove from rbtree */ - zswap_rb_erase(&tree->rbroot, entry); - - /* drop the initial reference from entry creation */ - zswap_entry_put(tree, entry); -} - /* * returns 0 if the page was successfully decompressed * return -1 on entry not found or error -- cgit v1.2.3 From 452c03fdbed0d19f907c877a6a9edd226b1ebad9 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Wed, 14 Jun 2023 11:51:16 +0200 Subject: kasan: add support for kasan.fault=panic_on_write KASAN's boot time kernel parameter 'kasan.fault=' currently supports 'report' and 'panic', which results in either only reporting bugs or also panicking on reports. However, some users may wish to have more control over when KASAN reports result in a kernel panic: in particular, KASAN reported invalid _writes_ are of special interest, because they have greater potential to corrupt random kernel memory or be more easily exploited. To panic on invalid writes only, introduce 'kasan.fault=panic_on_write', which allows users to choose to continue running on invalid reads, but panic only on invalid writes. Link: https://lkml.kernel.org/r/20230614095158.1133673-1-elver@google.com Signed-off-by: Marco Elver Reviewed-by: Alexander Potapenko Cc: Aleksandr Nogikh Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Jonathan Corbet Cc: Taras Madan Cc: Vincenzo Frascino Signed-off-by: Andrew Morton --- Documentation/dev-tools/kasan.rst | 7 ++++--- mm/kasan/report.c | 31 ++++++++++++++++++++++++++----- 2 files changed, 30 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst index e66916a483cd..7f37a46af574 100644 --- a/Documentation/dev-tools/kasan.rst +++ b/Documentation/dev-tools/kasan.rst @@ -107,9 +107,10 @@ effectively disables ``panic_on_warn`` for KASAN reports. Alternatively, independent of ``panic_on_warn``, the ``kasan.fault=`` boot parameter can be used to control panic and reporting behaviour: -- ``kasan.fault=report`` or ``=panic`` controls whether to only print a KASAN - report or also panic the kernel (default: ``report``). The panic happens even - if ``kasan_multi_shot`` is enabled. +- ``kasan.fault=report``, ``=panic``, or ``=panic_on_write`` controls whether + to only print a KASAN report, panic the kernel, or panic the kernel on + invalid writes only (default: ``report``). The panic happens even if + ``kasan_multi_shot`` is enabled. Software and Hardware Tag-Based KASAN modes (see the section about various modes below) support altering stack trace collection behavior: diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 84d9f3b37014..ca4b6ff080a6 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -43,6 +43,7 @@ enum kasan_arg_fault { KASAN_ARG_FAULT_DEFAULT, KASAN_ARG_FAULT_REPORT, KASAN_ARG_FAULT_PANIC, + KASAN_ARG_FAULT_PANIC_ON_WRITE, }; static enum kasan_arg_fault kasan_arg_fault __ro_after_init = KASAN_ARG_FAULT_DEFAULT; @@ -57,6 +58,8 @@ static int __init early_kasan_fault(char *arg) kasan_arg_fault = KASAN_ARG_FAULT_REPORT; else if (!strcmp(arg, "panic")) kasan_arg_fault = KASAN_ARG_FAULT_PANIC; + else if (!strcmp(arg, "panic_on_write")) + kasan_arg_fault = KASAN_ARG_FAULT_PANIC_ON_WRITE; else return -EINVAL; @@ -211,7 +214,7 @@ static void start_report(unsigned long *flags, bool sync) pr_err("==================================================================\n"); } -static void end_report(unsigned long *flags, const void *addr) +static void end_report(unsigned long *flags, const void *addr, bool is_write) { if (addr) trace_error_report_end(ERROR_DETECTOR_KASAN, @@ -220,8 +223,18 @@ static void end_report(unsigned long *flags, const void *addr) spin_unlock_irqrestore(&report_lock, *flags); if (!test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) check_panic_on_warn("KASAN"); - if (kasan_arg_fault == KASAN_ARG_FAULT_PANIC) + switch (kasan_arg_fault) { + case KASAN_ARG_FAULT_DEFAULT: + case KASAN_ARG_FAULT_REPORT: + break; + case KASAN_ARG_FAULT_PANIC: panic("kasan.fault=panic set ...\n"); + break; + case KASAN_ARG_FAULT_PANIC_ON_WRITE: + if (is_write) + panic("kasan.fault=panic_on_write set ...\n"); + break; + } add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); lockdep_on(); report_suppress_stop(); @@ -536,7 +549,11 @@ void kasan_report_invalid_free(void *ptr, unsigned long ip, enum kasan_report_ty print_report(&info); - end_report(&flags, ptr); + /* + * Invalid free is considered a "write" since the allocator's metadata + * updates involves writes. + */ + end_report(&flags, ptr, true); } /* @@ -570,7 +587,7 @@ bool kasan_report(const void *addr, size_t size, bool is_write, print_report(&info); - end_report(&irq_flags, (void *)addr); + end_report(&irq_flags, (void *)addr, is_write); out: user_access_restore(ua_flags); @@ -596,7 +613,11 @@ void kasan_report_async(void) pr_err("Asynchronous fault: no details available\n"); pr_err("\n"); dump_stack_lvl(KERN_ERR); - end_report(&flags, NULL); + /* + * Conservatively set is_write=true, because no details are available. + * In this mode, kasan.fault=panic_on_write is like kasan.fault=panic. + */ + end_report(&flags, NULL, true); } #endif /* CONFIG_KASAN_HW_TAGS */ -- cgit v1.2.3 From c9223a4aede774b0cea2a9944bc5dac48683e802 Mon Sep 17 00:00:00 2001 From: "Vishal Moola (Oracle)" Date: Tue, 13 Jun 2023 19:13:10 -0700 Subject: mm/gup_test.c: convert verify_dma_pinned() to us folios verify_dma_pinned() checks that pages are dma-pinned. We can convert this to use folios. Link: https://lkml.kernel.org/r/20230614021312.34085-4-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: Lorenzo Stoakes Signed-off-by: Andrew Morton --- mm/gup_test.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/gup_test.c b/mm/gup_test.c index 1668ce0e0783..26f45fe45c48 100644 --- a/mm/gup_test.c +++ b/mm/gup_test.c @@ -40,24 +40,25 @@ static void verify_dma_pinned(unsigned int cmd, struct page **pages, unsigned long nr_pages) { unsigned long i; - struct page *page; + struct folio *folio; switch (cmd) { case PIN_FAST_BENCHMARK: case PIN_BASIC_TEST: case PIN_LONGTERM_BENCHMARK: for (i = 0; i < nr_pages; i++) { - page = pages[i]; - if (WARN(!page_maybe_dma_pinned(page), + folio = page_folio(pages[i]); + + if (WARN(!folio_maybe_dma_pinned(folio), "pages[%lu] is NOT dma-pinned\n", i)) { - dump_page(page, "gup_test failure"); + dump_page(&folio->page, "gup_test failure"); break; } else if (cmd == PIN_LONGTERM_BENCHMARK && - WARN(!is_longterm_pinnable_page(page), + WARN(!folio_is_longterm_pinnable(folio), "pages[%lu] is NOT pinnable but pinned\n", i)) { - dump_page(page, "gup_test failure"); + dump_page(&folio->page, "gup_test failure"); break; } } -- cgit v1.2.3 From 503670ee6d0670e114a66b8cf2bcd3f71f53c2f7 Mon Sep 17 00:00:00 2001 From: "Vishal Moola (Oracle)" Date: Tue, 13 Jun 2023 19:13:11 -0700 Subject: mm/gup.c: reorganize try_get_folio() try_get_folio() takes in a page, then chooses to do some folio operations based on the flags (either FOLL_GET or FOLL_PIN). We can rewrite this function to be more purpose oriented. After calling try_get_folio(), if neither FOLL_GET nor FOLL_PIN are set, warn and fail. If FOLL_GET is set we can return the result. If FOLL_GET is not set then FOLL_PIN is set, so we pin the folio. This change assists with folio conversions, and makes the function more readable. Link: https://lkml.kernel.org/r/20230614021312.34085-5-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- mm/gup.c | 86 ++++++++++++++++++++++++++++++++-------------------------------- 1 file changed, 43 insertions(+), 43 deletions(-) (limited to 'mm') diff --git a/mm/gup.c b/mm/gup.c index 38986e522d34..ce14d4d28503 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -124,58 +124,58 @@ retry: */ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags) { + struct folio *folio; + + if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0)) + return NULL; + if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page))) return NULL; - if (flags & FOLL_GET) - return try_get_folio(page, refs); - else if (flags & FOLL_PIN) { - struct folio *folio; + folio = try_get_folio(page, refs); - /* - * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a - * right zone, so fail and let the caller fall back to the slow - * path. - */ - if (unlikely((flags & FOLL_LONGTERM) && - !is_longterm_pinnable_page(page))) - return NULL; + if (flags & FOLL_GET) + return folio; - /* - * CAUTION: Don't use compound_head() on the page before this - * point, the result won't be stable. - */ - folio = try_get_folio(page, refs); - if (!folio) - return NULL; + /* FOLL_PIN is set */ + if (!folio) + return NULL; - /* - * When pinning a large folio, use an exact count to track it. - * - * However, be sure to *also* increment the normal folio - * refcount field at least once, so that the folio really - * is pinned. That's why the refcount from the earlier - * try_get_folio() is left intact. - */ - if (folio_test_large(folio)) - atomic_add(refs, &folio->_pincount); - else - folio_ref_add(folio, - refs * (GUP_PIN_COUNTING_BIAS - 1)); - /* - * Adjust the pincount before re-checking the PTE for changes. - * This is essentially a smp_mb() and is paired with a memory - * barrier in page_try_share_anon_rmap(). - */ - smp_mb__after_atomic(); + /* + * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a + * right zone, so fail and let the caller fall back to the slow + * path. + */ + if (unlikely((flags & FOLL_LONGTERM) && + !folio_is_longterm_pinnable(folio))) { + if (!put_devmap_managed_page_refs(&folio->page, refs)) + folio_put_refs(folio, refs); + return NULL; + } - node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); + /* + * When pinning a large folio, use an exact count to track it. + * + * However, be sure to *also* increment the normal folio + * refcount field at least once, so that the folio really + * is pinned. That's why the refcount from the earlier + * try_get_folio() is left intact. + */ + if (folio_test_large(folio)) + atomic_add(refs, &folio->_pincount); + else + folio_ref_add(folio, + refs * (GUP_PIN_COUNTING_BIAS - 1)); + /* + * Adjust the pincount before re-checking the PTE for changes. + * This is essentially a smp_mb() and is paired with a memory + * barrier in page_try_share_anon_rmap(). + */ + smp_mb__after_atomic(); - return folio; - } + node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); - WARN_ON_ONCE(1); - return NULL; + return folio; } static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) -- cgit v1.2.3 From aa13779be6b7d1ce0e6ba96b400b351de6f1a326 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 15 Jun 2023 18:33:22 +0000 Subject: mm/damon/core-test: add a test for damon_set_attrs() Commit 5ff6e2fff88e ("mm/damon/core: fix divide error in damon_nr_accesses_to_accesses_bp()") fixed a bug by adding arguments validation in damon_set_attrs(). Add a unit test for the added validation to ensure the bug cannot occur again. Link: https://lkml.kernel.org/r/20230615183323.87561-1-sj@kernel.org Signed-off-by: SeongJae Park Reviewed-by: Kefeng Wang Signed-off-by: Andrew Morton --- mm/damon/core-test.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'mm') diff --git a/mm/damon/core-test.h b/mm/damon/core-test.h index fae64d32b925..c11210124344 100644 --- a/mm/damon/core-test.h +++ b/mm/damon/core-test.h @@ -318,6 +318,29 @@ static void damon_test_update_monitoring_result(struct kunit *test) KUNIT_EXPECT_EQ(test, r->age, 20); } +static void damon_test_set_attrs(struct kunit *test) +{ + struct damon_ctx ctx; + struct damon_attrs valid_attrs = { + .min_nr_regions = 10, .max_nr_regions = 1000, + .sample_interval = 5000, .aggr_interval = 100000,}; + struct damon_attrs invalid_attrs; + + KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &valid_attrs), 0); + + invalid_attrs = valid_attrs; + invalid_attrs.min_nr_regions = 1; + KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &invalid_attrs), -EINVAL); + + invalid_attrs = valid_attrs; + invalid_attrs.max_nr_regions = 9; + KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &invalid_attrs), -EINVAL); + + invalid_attrs = valid_attrs; + invalid_attrs.aggr_interval = 4999; + KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &invalid_attrs), -EINVAL); +} + static struct kunit_case damon_test_cases[] = { KUNIT_CASE(damon_test_target), KUNIT_CASE(damon_test_regions), @@ -329,6 +352,7 @@ static struct kunit_case damon_test_cases[] = { KUNIT_CASE(damon_test_ops_registration), KUNIT_CASE(damon_test_set_regions), KUNIT_CASE(damon_test_update_monitoring_result), + KUNIT_CASE(damon_test_set_attrs), {}, }; -- cgit v1.2.3 From 025b7799b35d32e46988ba0614ea2f91b85d6375 Mon Sep 17 00:00:00 2001 From: ZhangPeng Date: Fri, 16 Jun 2023 14:30:30 +0800 Subject: mm/memcg: remove return value of mem_cgroup_scan_tasks() No user checks the return value of mem_cgroup_scan_tasks(). Make the return value void. Link: https://lkml.kernel.org/r/20230616063030.977586-1-zhangpeng362@huawei.com Signed-off-by: ZhangPeng Cc: Johannes Weiner Cc: Kefeng Wang Cc: Michal Hocko Cc: Muchun Song Cc: Nanyong Sun Cc: Roman Gushchin Cc: Shakeel Butt Signed-off-by: Andrew Morton --- include/linux/memcontrol.h | 7 +++---- mm/memcontrol.c | 9 ++++----- 2 files changed, 7 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index c3d3a0c09315..5818af8eca5a 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -820,8 +820,8 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, struct mem_cgroup *, struct mem_cgroup_reclaim_cookie *); void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); -int mem_cgroup_scan_tasks(struct mem_cgroup *, - int (*)(struct task_struct *, void *), void *); +void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, + int (*)(struct task_struct *, void *), void *arg); static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) { @@ -1364,10 +1364,9 @@ static inline void mem_cgroup_iter_break(struct mem_cgroup *root, { } -static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, +static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, int (*fn)(struct task_struct *, void *), void *arg) { - return 0; } static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index cf06b1c9b3bb..a834b1edcde9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1259,13 +1259,13 @@ static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) * * This function iterates over tasks attached to @memcg or to any of its * descendants and calls @fn for each task. If @fn returns a non-zero - * value, the function breaks the iteration loop and returns the value. - * Otherwise, it will iterate over all tasks and return 0. + * value, the function breaks the iteration loop. Otherwise, it will iterate + * over all tasks and return 0. * * This function must not be called for the root memory cgroup. */ -int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, - int (*fn)(struct task_struct *, void *), void *arg) +void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, + int (*fn)(struct task_struct *, void *), void *arg) { struct mem_cgroup *iter; int ret = 0; @@ -1285,7 +1285,6 @@ int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, break; } } - return ret; } #ifdef CONFIG_DEBUG_VM -- cgit v1.2.3 From cf01724e2d73a90524450e3dd8798cfb9d7aca05 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 17 Jun 2023 11:46:22 +0800 Subject: mm: page_alloc: make compound_page_dtors static It's only used inside page_alloc.c now. So make it static and remove the declaration in mm.h. Link: https://lkml.kernel.org/r/20230617034622.1235913-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Signed-off-by: Andrew Morton --- include/linux/mm.h | 1 - mm/page_alloc.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index a8baa34d0747..cf43deb25553 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1220,7 +1220,6 @@ enum compound_dtor_id { #endif NR_COMPOUND_DTORS, }; -extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS]; static inline void folio_set_compound_dtor(struct folio *folio, enum compound_dtor_id compound_dtor) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6a18f2232e3e..5b8a9d610b72 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -284,7 +284,7 @@ const char * const migratetype_names[MIGRATE_TYPES] = { #endif }; -compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = { +static compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = { [NULL_COMPOUND_DTOR] = NULL, [COMPOUND_PAGE_DTOR] = free_compound_page, #ifdef CONFIG_HUGETLB_PAGE -- cgit v1.2.3 From c30034054a1ad394273f3fffe14752f83251600a Mon Sep 17 00:00:00 2001 From: Jonathan Corbet Date: Mon, 12 Jun 2023 06:12:24 -0600 Subject: mm: Fix a dangling Documentation/arm64 reference The arm64 documentation has moved under Documentation/arch/. Fix up a reference in mm/mremap.c to match. Cc: Andrew Morton Cc: linux-mm@kvack.org Acked-by: Mike Rapoport (IBM) Acked-by: Catalin Marinas Signed-off-by: Jonathan Corbet --- mm/mremap.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/mremap.c b/mm/mremap.c index b11ce6c92099..3185724d8b13 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -914,7 +914,8 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, * mapping address intact. A non-zero tag will cause the subsequent * range checks to reject the address as invalid. * - * See Documentation/arm64/tagged-address-abi.rst for more information. + * See Documentation/arch/arm64/tagged-address-abi.rst for more + * information. */ addr = untagged_addr(addr); -- cgit v1.2.3 From 814bc1de03ea4361101408e63a68e4b82aef22cb Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Mon, 19 Jun 2023 13:38:21 -0600 Subject: mm/mglru: make memcg_lru->lock irq safe lru_gen_rotate_memcg() can happen in softirq if memory.soft_limit_in_bytes is set. This requires memcg_lru->lock to be irq safe. Lockdep warns on this. This problem only affects memcg v1. Link: https://lkml.kernel.org/r/20230619193821.2710944-1-yuzhao@google.com Fixes: e4dde56cd208 ("mm: multi-gen LRU: per-node lru_gen_folio lists") Signed-off-by: Yu Zhao Reported-by: syzbot+87c490fd2be656269b6a@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=87c490fd2be656269b6a Reviewed-by: Yosry Ahmed Cc: Signed-off-by: Andrew Morton --- mm/vmscan.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 5bf98d0a22c9..6114a1fc6c68 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4728,10 +4728,11 @@ static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op) { int seg; int old, new; + unsigned long flags; int bin = get_random_u32_below(MEMCG_NR_BINS); struct pglist_data *pgdat = lruvec_pgdat(lruvec); - spin_lock(&pgdat->memcg_lru.lock); + spin_lock_irqsave(&pgdat->memcg_lru.lock, flags); VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list)); @@ -4766,7 +4767,7 @@ static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op) if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq)) WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); - spin_unlock(&pgdat->memcg_lru.lock); + spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags); } void lru_gen_online_memcg(struct mem_cgroup *memcg) @@ -4779,7 +4780,7 @@ void lru_gen_online_memcg(struct mem_cgroup *memcg) struct pglist_data *pgdat = NODE_DATA(nid); struct lruvec *lruvec = get_lruvec(memcg, nid); - spin_lock(&pgdat->memcg_lru.lock); + spin_lock_irq(&pgdat->memcg_lru.lock); VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list)); @@ -4790,7 +4791,7 @@ void lru_gen_online_memcg(struct mem_cgroup *memcg) lruvec->lrugen.gen = gen; - spin_unlock(&pgdat->memcg_lru.lock); + spin_unlock_irq(&pgdat->memcg_lru.lock); } } @@ -4814,7 +4815,7 @@ void lru_gen_release_memcg(struct mem_cgroup *memcg) struct pglist_data *pgdat = NODE_DATA(nid); struct lruvec *lruvec = get_lruvec(memcg, nid); - spin_lock(&pgdat->memcg_lru.lock); + spin_lock_irq(&pgdat->memcg_lru.lock); VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list)); @@ -4826,7 +4827,7 @@ void lru_gen_release_memcg(struct mem_cgroup *memcg) if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq)) WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); - spin_unlock(&pgdat->memcg_lru.lock); + spin_unlock_irq(&pgdat->memcg_lru.lock); } } -- cgit v1.2.3 From 9721fd82351d47a37ba982272e128101f24efd7c Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Wed, 14 Jun 2023 16:40:20 +0800 Subject: mm: compaction: skip memory hole rapidly when isolating migratable pages On some machines, the normal zone can have a large memory hole like below memory layout, and we can see the range from 0x100000000 to 0x1800000000 is a hole. So when isolating some migratable pages, the scanner can meet the hole and it will take more time to skip the large hole. From my measurement, I can see the isolation scanner will take 80us ~ 100us to skip the large hole [0x100000000 - 0x1800000000]. So adding a new helper to fast search next online memory section to skip the large hole can help to find next suitable pageblock efficiently. With this patch, I can see the large hole scanning only takes < 1us. [ 0.000000] Zone ranges: [ 0.000000] DMA [mem 0x0000000040000000-0x00000000ffffffff] [ 0.000000] DMA32 empty [ 0.000000] Normal [mem 0x0000000100000000-0x0000001fa7ffffff] [ 0.000000] Movable zone start for each node [ 0.000000] Early memory node ranges [ 0.000000] node 0: [mem 0x0000000040000000-0x0000000fffffffff] [ 0.000000] node 0: [mem 0x0000001800000000-0x0000001fa3c7ffff] [ 0.000000] node 0: [mem 0x0000001fa3c80000-0x0000001fa3ffffff] [ 0.000000] node 0: [mem 0x0000001fa4000000-0x0000001fa402ffff] [ 0.000000] node 0: [mem 0x0000001fa4030000-0x0000001fa40effff] [ 0.000000] node 0: [mem 0x0000001fa40f0000-0x0000001fa73cffff] [ 0.000000] node 0: [mem 0x0000001fa73d0000-0x0000001fa745ffff] [ 0.000000] node 0: [mem 0x0000001fa7460000-0x0000001fa746ffff] [ 0.000000] node 0: [mem 0x0000001fa7470000-0x0000001fa758ffff] [ 0.000000] node 0: [mem 0x0000001fa7590000-0x0000001fa7ffffff] [baolin.wang@linux.alibaba.com: limit next_ptn to not exceed cc->free_pfn] Link: https://lkml.kernel.org/r/a1d859c28af0c7e85e91795e7473f553eb180a9d.1686813379.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/75b4c8ca36bf44ad8c42bf0685ac19d272e426ec.1686705221.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Suggested-by: David Hildenbrand Acked-by: David Hildenbrand Acked-by: "Huang, Ying" Cc: Mel Gorman Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/compaction.c | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 6149a2d324be..0fb3b89b3967 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -229,6 +229,33 @@ static void reset_cached_positions(struct zone *zone) pageblock_start_pfn(zone_end_pfn(zone) - 1); } +#ifdef CONFIG_SPARSEMEM +/* + * If the PFN falls into an offline section, return the start PFN of the + * next online section. If the PFN falls into an online section or if + * there is no next online section, return 0. + */ +static unsigned long skip_offline_sections(unsigned long start_pfn) +{ + unsigned long start_nr = pfn_to_section_nr(start_pfn); + + if (online_section_nr(start_nr)) + return 0; + + while (++start_nr <= __highest_present_section_nr) { + if (online_section_nr(start_nr)) + return section_nr_to_pfn(start_nr); + } + + return 0; +} +#else +static unsigned long skip_offline_sections(unsigned long start_pfn) +{ + return 0; +} +#endif + /* * Compound pages of >= pageblock_order should consistently be skipped until * released. It is always pointless to compact pages of such order (if they are @@ -1955,8 +1982,14 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc) page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, cc->zone); - if (!page) + if (!page) { + unsigned long next_pfn; + + next_pfn = skip_offline_sections(block_start_pfn); + if (next_pfn) + block_end_pfn = min(next_pfn, cc->free_pfn); continue; + } /* * If isolation recently failed, do not retry. Only check the -- cgit v1.2.3 From 726ccdba1521007fab4b2b7565d255fa0f2b770c Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sun, 28 May 2023 00:25:31 +0900 Subject: kasan,kmsan: remove __GFP_KSWAPD_RECLAIM usage from kasan/kmsan syzbot is reporting lockdep warning in __stack_depot_save(), for the caller of __stack_depot_save() (i.e. __kasan_record_aux_stack() in this report) is responsible for masking __GFP_KSWAPD_RECLAIM flag in order not to wake kswapd which in turn wakes kcompactd. Since kasan/kmsan functions might be called with arbitrary locks held, mask __GFP_KSWAPD_RECLAIM flag from all GFP_NOWAIT/GFP_ATOMIC allocations in kasan/kmsan. Note that kmsan_save_stack_with_flags() is changed to mask both __GFP_DIRECT_RECLAIM flag and __GFP_KSWAPD_RECLAIM flag, for wakeup_kswapd() from wake_all_kswapds() from __alloc_pages_slowpath() calls wakeup_kcompactd() if __GFP_KSWAPD_RECLAIM flag is set and __GFP_DIRECT_RECLAIM flag is not set. Link: https://lkml.kernel.org/r/656cb4f5-998b-c8d7-3c61-c2d37aa90f9a@I-love.SAKURA.ne.jp Signed-off-by: Tetsuo Handa Reported-by: syzbot Closes: https://syzkaller.appspot.com/bug?extid=ece2915262061d6e0ac1 Reviewed-by: "Huang, Ying" Reviewed-by: Alexander Potapenko Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Cc: Mel Gorman Cc: Vincenzo Frascino Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/kasan/generic.c | 4 ++-- mm/kasan/tags.c | 2 +- mm/kmsan/core.c | 6 +++--- mm/kmsan/instrumentation.c | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 224d161a5a22..5b4c97baa656 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -488,7 +488,7 @@ static void __kasan_record_aux_stack(void *addr, bool can_alloc) return; alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0]; - alloc_meta->aux_stack[0] = kasan_save_stack(GFP_NOWAIT, can_alloc); + alloc_meta->aux_stack[0] = kasan_save_stack(0, can_alloc); } void kasan_record_aux_stack(void *addr) @@ -518,7 +518,7 @@ void kasan_save_free_info(struct kmem_cache *cache, void *object) if (!free_meta) return; - kasan_set_track(&free_meta->free_track, GFP_NOWAIT); + kasan_set_track(&free_meta->free_track, 0); /* The object was freed and has free track set. */ *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREETRACK; } diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c index 67a222586846..7dcfe341d48e 100644 --- a/mm/kasan/tags.c +++ b/mm/kasan/tags.c @@ -140,5 +140,5 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) void kasan_save_free_info(struct kmem_cache *cache, void *object) { - save_stack_info(cache, object, GFP_NOWAIT, true); + save_stack_info(cache, object, 0, true); } diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c index 7d1e4aa30bae..3adb4c1d3b19 100644 --- a/mm/kmsan/core.c +++ b/mm/kmsan/core.c @@ -74,7 +74,7 @@ depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags, nr_entries = stack_trace_save(entries, KMSAN_STACK_DEPTH, 0); /* Don't sleep. */ - flags &= ~__GFP_DIRECT_RECLAIM; + flags &= ~(__GFP_DIRECT_RECLAIM | __GFP_KSWAPD_RECLAIM); handle = __stack_depot_save(entries, nr_entries, flags, true); return stack_depot_set_extra_bits(handle, extra); @@ -245,7 +245,7 @@ depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id) extra_bits = kmsan_extra_bits(depth, uaf); entries[0] = KMSAN_CHAIN_MAGIC_ORIGIN; - entries[1] = kmsan_save_stack_with_flags(GFP_ATOMIC, 0); + entries[1] = kmsan_save_stack_with_flags(__GFP_HIGH, 0); entries[2] = id; /* * @entries is a local var in non-instrumented code, so KMSAN does not @@ -253,7 +253,7 @@ depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id) * positives when __stack_depot_save() passes it to instrumented code. */ kmsan_internal_unpoison_memory(entries, sizeof(entries), false); - handle = __stack_depot_save(entries, ARRAY_SIZE(entries), GFP_ATOMIC, + handle = __stack_depot_save(entries, ARRAY_SIZE(entries), __GFP_HIGH, true); return stack_depot_set_extra_bits(handle, extra_bits); } diff --git a/mm/kmsan/instrumentation.c b/mm/kmsan/instrumentation.c index cf12e9616b24..cc3907a9c33a 100644 --- a/mm/kmsan/instrumentation.c +++ b/mm/kmsan/instrumentation.c @@ -282,7 +282,7 @@ void __msan_poison_alloca(void *address, uintptr_t size, char *descr) /* stack_depot_save() may allocate memory. */ kmsan_enter_runtime(); - handle = stack_depot_save(entries, ARRAY_SIZE(entries), GFP_ATOMIC); + handle = stack_depot_save(entries, ARRAY_SIZE(entries), __GFP_HIGH); kmsan_leave_runtime(); kmsan_internal_set_shadow_origin(address, size, -1, handle, -- cgit v1.2.3 From 91f0dccef141483f8399299c39ce9114d19bb147 Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Mon, 19 Jun 2023 13:04:42 +0000 Subject: mm/memcontrol: do not tweak node in mem_cgroup_init() mem_cgroup_init() request for allocations from each possible node, and it's used to be a problem because NODE_DATA is not allocated for offline node. Things have already changed since commit 09f49dca570a9 ("mm: handle uninitialized numa nodes gracefully"), so it's unnecessary to check for !node_online nodes here. How to test? qemu-system-x86_64 \ -kernel vmlinux \ -initrd full.rootfs.cpio.gz \ -append "console=ttyS0,115200 root=/dev/ram0 nokaslr earlyprintk=serial oops=panic panic_on_warn" \ -drive format=qcow2,file=vm_disk.qcow2,media=disk,if=ide \ -enable-kvm \ -cpu host \ -m 8G,slots=2,maxmem=16G \ -smp cores=4,threads=1,sockets=2 \ -object memory-backend-ram,id=mem0,size=4G \ -object memory-backend-ram,id=mem1,size=4G \ -numa node,memdev=mem0,cpus=0-3,nodeid=0 \ -numa node,memdev=mem1,cpus=4-7,nodeid=1 \ -numa node,nodeid=2 \ -net nic,model=virtio,macaddr=52:54:00:12:34:58 \ -net user \ -nographic \ -rtc base=localtime \ -gdb tcp::6000 Guest state when booting: [ 0.048881] NUMA: Node 0 [mem 0x00000000-0x0009ffff] + [mem 0x00100000-0xbfffffff] -> [mem 0x00000000-0xbfffffff] [ 0.050489] NUMA: Node 0 [mem 0x00000000-0xbfffffff] + [mem 0x100000000-0x13fffffff] -> [mem 0x00000000-0x13fffffff] [ 0.052173] NODE_DATA(0) allocated [mem 0x13fffc000-0x13fffffff] [ 0.053164] NODE_DATA(1) allocated [mem 0x23fffa000-0x23fffdfff] [ 0.054187] Zone ranges: [ 0.054587] DMA [mem 0x0000000000001000-0x0000000000ffffff] [ 0.055551] DMA32 [mem 0x0000000001000000-0x00000000ffffffff] [ 0.056515] Normal [mem 0x0000000100000000-0x000000023fffffff] [ 0.057484] Movable zone start for each node [ 0.058149] Early memory node ranges [ 0.058705] node 0: [mem 0x0000000000001000-0x000000000009efff] [ 0.059679] node 0: [mem 0x0000000000100000-0x00000000bffdffff] [ 0.060659] node 0: [mem 0x0000000100000000-0x000000013fffffff] [ 0.061649] node 1: [mem 0x0000000140000000-0x000000023fffffff] [ 0.062638] Initmem setup node 0 [mem 0x0000000000001000-0x000000013fffffff] [ 0.063745] Initmem setup node 1 [mem 0x0000000140000000-0x000000023fffffff] [ 0.064855] DMA zone: 158 reserved pages exceeds freesize 0 [ 0.065746] Initializing node 2 as memoryless [ 0.066437] Initmem setup node 2 as memoryless [ 0.067132] DMA zone: 158 reserved pages exceeds freesize 0 [ 0.068037] On node 0, zone DMA: 1 pages in unavailable ranges [ 0.068265] On node 0, zone DMA: 97 pages in unavailable ranges [ 0.124755] On node 0, zone Normal: 32 pages in unavailable ranges cat /sys/devices/system/node/online 0-1 cat /sys/devices/system/node/possible 0-2 Link: https://lkml.kernel.org/r/20230619130442.2487-1-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Acked-by: Michal Hocko Cc: Johannes Weiner Cc: Roman Gushchin Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/memcontrol.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a834b1edcde9..e8ca4bdcb03c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -7376,8 +7376,7 @@ static int __init mem_cgroup_init(void) for_each_node(node) { struct mem_cgroup_tree_per_node *rtpn; - rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, - node_online(node) ? node : NUMA_NO_NODE); + rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node); rtpn->rb_root = RB_ROOT; rtpn->rb_rightmost = NULL; -- cgit v1.2.3 From 0b52c420350e8f9873ba62768cd8246827184408 Mon Sep 17 00:00:00 2001 From: Jan Glauber Date: Mon, 19 Jun 2023 12:33:51 +0200 Subject: mm: fix shmem THP counters on migration The per node numa_stat values for shmem don't change on page migration for THP: grep shmem /sys/fs/cgroup/machine.slice/.../memory.numa_stat: shmem N0=1092616192 N1=10485760 shmem_thp N0=1092616192 N1=10485760 migratepages 9181 0 1: shmem N0=0 N1=1103101952 shmem_thp N0=1092616192 N1=10485760 Fix that by updating shmem_thp counters likewise to shmem counters on page migration. [jglauber@digitalocean.com: use folio_test_pmd_mappable instead of folio_test_transhuge] Link: https://lkml.kernel.org/r/20230622094720.510540-1-jglauber@digitalocean.com Link: https://lkml.kernel.org/r/20230619103351.234837-1-jglauber@digitalocean.com Signed-off-by: Jan Glauber Reviewed-by: Baolin Wang Cc: "Huang, Ying" Signed-off-by: Andrew Morton --- mm/migrate.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'mm') diff --git a/mm/migrate.c b/mm/migrate.c index ce35afdbc1e3..eca3bf0e93b8 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -486,6 +486,11 @@ int folio_migrate_mapping(struct address_space *mapping, if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); + + if (folio_test_pmd_mappable(folio)) { + __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr); + __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr); + } } #ifdef CONFIG_SWAP if (folio_test_swapcache(folio)) { -- cgit v1.2.3 From 9883c7f84053cec2826ca3c56254601b5ce9cdbe Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 19 Jun 2023 15:27:25 -0300 Subject: mm/gup: do not return 0 from pin_user_pages_fast() for bad args These routines are not intended to return zero, the callers cannot do anything sane with a 0 return. They should return an error which means future calls to GUP will not succeed, or they should return some non-zero number of pinned pages which means GUP should be called again. If start + nr_pages overflows it should return -EOVERFLOW to signal the arguments are invalid. Syzkaller keeps tripping on this when fuzzing GUP arguments. Link: https://lkml.kernel.org/r/0-v1-3d5ed1f20d50+104-gup_overflow_jgg@nvidia.com Signed-off-by: Jason Gunthorpe Reported-by: syzbot+353c7be4964c6253f24a@syzkaller.appspotmail.com Closes: https://lore.kernel.org/all/000000000000094fdd05faa4d3a4@google.com Reviewed-by: John Hubbard Reviewed-by: Lorenzo Stoakes Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton --- mm/gup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/gup.c b/mm/gup.c index ce14d4d28503..a8336b39d6b5 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -3080,7 +3080,7 @@ static int internal_get_user_pages_fast(unsigned long start, start = untagged_addr(start) & PAGE_MASK; len = nr_pages << PAGE_SHIFT; if (check_add_overflow(start, len, &end)) - return 0; + return -EOVERFLOW; if (end > TASK_SIZE_MAX) return -EFAULT; if (unlikely(!access_ok((void __user *)start, len))) -- cgit v1.2.3 From 61167ad5fecdeaa037f3df1ba354dddd5f66a1ed Mon Sep 17 00:00:00 2001 From: Yajun Deng Date: Mon, 19 Jun 2023 10:34:06 +0800 Subject: mm: pass nid to reserve_bootmem_region() early_pfn_to_nid() is called frequently in init_reserved_page(), it returns the node id of the PFN. These PFN are probably from the same memory region, they have the same node id. It's not necessary to call early_pfn_to_nid() for each PFN. Pass nid to reserve_bootmem_region() and drop the call to early_pfn_to_nid() in init_reserved_page(). Also, set nid on all reserved pages before doing this, as some reserved memory regions may not be set nid. The most beneficial function is memmap_init_reserved_pages() if CONFIG_DEFERRED_STRUCT_PAGE_INIT is enabled. The following data was tested on an x86 machine with 190GB of RAM. before: memmap_init_reserved_pages() 67ms after: memmap_init_reserved_pages() 20ms Link: https://lkml.kernel.org/r/20230619023406.424298-1-yajun.deng@linux.dev Signed-off-by: Yajun Deng Reviewed-by: Mike Rapoport (IBM) Signed-off-by: Andrew Morton --- include/linux/mm.h | 3 ++- mm/memblock.c | 31 +++++++++++++++++++++---------- mm/mm_init.c | 30 +++++++++++++++++------------- 3 files changed, 40 insertions(+), 24 deletions(-) (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index cf43deb25553..9ecb8b9c07f6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2940,7 +2940,8 @@ extern unsigned long free_reserved_area(void *start, void *end, extern void adjust_managed_page_count(struct page *page, long count); -extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); +extern void reserve_bootmem_region(phys_addr_t start, + phys_addr_t end, int nid); /* Free the reserved page into the buddy system, so it gets managed. */ static inline void free_reserved_page(struct page *page) diff --git a/mm/memblock.c b/mm/memblock.c index da4264528e1e..46739551d4d1 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -2082,19 +2082,30 @@ static void __init memmap_init_reserved_pages(void) { struct memblock_region *region; phys_addr_t start, end; - u64 i; + int nid; + + /* + * set nid on all reserved pages and also treat struct + * pages for the NOMAP regions as PageReserved + */ + for_each_mem_region(region) { + nid = memblock_get_region_node(region); + start = region->base; + end = start + region->size; + + if (memblock_is_nomap(region)) + reserve_bootmem_region(start, end, nid); + + memblock_set_node(start, end, &memblock.reserved, nid); + } /* initialize struct pages for the reserved regions */ - for_each_reserved_mem_range(i, &start, &end) - reserve_bootmem_region(start, end); + for_each_reserved_mem_region(region) { + nid = memblock_get_region_node(region); + start = region->base; + end = start + region->size; - /* and also treat struct pages for the NOMAP regions as PageReserved */ - for_each_mem_region(region) { - if (memblock_is_nomap(region)) { - start = region->base; - end = start + region->size; - reserve_bootmem_region(start, end); - } + reserve_bootmem_region(start, end, nid); } } diff --git a/mm/mm_init.c b/mm/mm_init.c index 122e9bf3fa73..7ffa609673ea 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -646,10 +646,8 @@ static inline void pgdat_set_deferred_range(pg_data_t *pgdat) } /* Returns true if the struct page for the pfn is initialised */ -static inline bool __meminit early_page_initialised(unsigned long pfn) +static inline bool __meminit early_page_initialised(unsigned long pfn, int nid) { - int nid = early_pfn_to_nid(pfn); - if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) return false; @@ -695,15 +693,14 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn) return false; } -static void __meminit init_reserved_page(unsigned long pfn) +static void __meminit init_reserved_page(unsigned long pfn, int nid) { pg_data_t *pgdat; - int nid, zid; + int zid; - if (early_page_initialised(pfn)) + if (early_page_initialised(pfn, nid)) return; - nid = early_pfn_to_nid(pfn); pgdat = NODE_DATA(nid); for (zid = 0; zid < MAX_NR_ZONES; zid++) { @@ -717,7 +714,7 @@ static void __meminit init_reserved_page(unsigned long pfn) #else static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} -static inline bool early_page_initialised(unsigned long pfn) +static inline bool early_page_initialised(unsigned long pfn, int nid) { return true; } @@ -727,7 +724,7 @@ static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) return false; } -static inline void init_reserved_page(unsigned long pfn) +static inline void init_reserved_page(unsigned long pfn, int nid) { } #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ @@ -738,7 +735,8 @@ static inline void init_reserved_page(unsigned long pfn) * marks the pages PageReserved. The remaining valid pages are later * sent to the buddy page allocator. */ -void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) +void __meminit reserve_bootmem_region(phys_addr_t start, + phys_addr_t end, int nid) { unsigned long start_pfn = PFN_DOWN(start); unsigned long end_pfn = PFN_UP(end); @@ -747,7 +745,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) if (pfn_valid(start_pfn)) { struct page *page = pfn_to_page(start_pfn); - init_reserved_page(start_pfn); + init_reserved_page(start_pfn, nid); /* Avoid false-positive PageTail() */ INIT_LIST_HEAD(&page->lru); @@ -2572,8 +2570,14 @@ void __init set_dma_reserve(unsigned long new_dma_reserve) void __init memblock_free_pages(struct page *page, unsigned long pfn, unsigned int order) { - if (!early_page_initialised(pfn)) - return; + + if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) { + int nid = early_pfn_to_nid(pfn); + + if (!early_page_initialised(pfn, nid)) + return; + } + if (!kmsan_memblock_free_pages(page, order)) { /* KMSAN will take care of these pages. */ return; -- cgit v1.2.3 From 3fda49e89f1702df6bb6a2470076b1a7bf3a29de Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 19 Jun 2023 23:50:00 -0700 Subject: mm/swapfile: delete outdated pte_offset_map() comment Delete a triply out-of-date comment from add_swap_count_continuation(): 1. vmalloc_to_page() changed from pte_offset_map() to pte_offset_kernel() 2. pte_offset_map() changed from using kmap_atomic() to kmap_local_page() 3. kmap_atomic() changed from using fixed FIX_KMAP addresses in 2.6.37. Link: https://lkml.kernel.org/r/9022632b-ba9d-8cb0-c25-4be9786481b5@google.com Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton --- mm/swapfile.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index a6945c2e0d03..92ed7cba2268 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3456,11 +3456,6 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) goto out; } - /* - * We are fortunate that although vmalloc_to_page uses pte_offset_map, - * no architecture is using highmem pages for kernel page tables: so it - * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps. - */ head = vmalloc_to_page(si->swap_map + offset); offset &= ~PAGE_MASK; -- cgit v1.2.3 From b5665cf936bf3955fec18c09a6aa53c8a57ea8b7 Mon Sep 17 00:00:00 2001 From: Ivan Orlov Date: Tue, 20 Jun 2023 20:33:15 +0200 Subject: mm: backing-dev: make bdi_class a static const structure Now that the driver core allows for struct class to be in read-only memory, move the bdi_class structure to be declared at build time placing it into read-only memory, instead of having to be dynamically allocated at load time. Link: https://lkml.kernel.org/r/20230620183314.682822-2-gregkh@linuxfoundation.org Signed-off-by: Ivan Orlov Signed-off-by: Greg Kroah-Hartman Suggested-by: Greg Kroah-Hartman Cc: Jens Axboe Cc: Christoph Hellwig Signed-off-by: Andrew Morton --- mm/backing-dev.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 7da9727fcdf3..3ffc3cfa7a14 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -20,7 +20,6 @@ struct backing_dev_info noop_backing_dev_info; EXPORT_SYMBOL_GPL(noop_backing_dev_info); -static struct class *bdi_class; static const char *bdi_unknown_name = "(unknown)"; /* @@ -345,13 +344,19 @@ static struct attribute *bdi_dev_attrs[] = { }; ATTRIBUTE_GROUPS(bdi_dev); +static const struct class bdi_class = { + .name = "bdi", + .dev_groups = bdi_dev_groups, +}; + static __init int bdi_class_init(void) { - bdi_class = class_create("bdi"); - if (IS_ERR(bdi_class)) - return PTR_ERR(bdi_class); + int ret; + + ret = class_register(&bdi_class); + if (ret) + return ret; - bdi_class->dev_groups = bdi_dev_groups; bdi_debug_init(); return 0; @@ -1001,7 +1006,7 @@ int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args) return 0; vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args); - dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name); + dev = device_create(&bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name); if (IS_ERR(dev)) return PTR_ERR(dev); -- cgit v1.2.3 From 1bf61092bc90a9054d01cfdf35b42c1bf6fe47c7 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Wed, 21 Jun 2023 16:14:28 +0800 Subject: mm: page_alloc: use the correct type of list for free pages Commit bf75f200569d ("mm/page_alloc: add page->buddy_list and page->pcp_list") introduces page->buddy_list and page->pcp_list as a union with page->lru, but missed to change get_page_from_free_area() to use page->buddy_list to clarify the correct type of list for a free page. Link: https://lkml.kernel.org/r/7e7ab533247d40c0ea0373c18a6a48e5667f9e10.1687333557.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: Mel Gorman Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5b8a9d610b72..158f24d186e6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -727,7 +727,7 @@ static inline struct page *get_page_from_free_area(struct free_area *area, int migratetype) { return list_first_entry_or_null(&area->free_list[migratetype], - struct page, lru); + struct page, buddy_list); } /* -- cgit v1.2.3 From e0b72c14d8dcc9477e580c261041dae86d4906fe Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 21 Jun 2023 17:45:50 +0100 Subject: mm: remove check_move_unevictable_pages() All callers have now been converted to call check_move_unevictable_folios(). Link: https://lkml.kernel.org/r/20230621164557.3510324-7-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- include/linux/swap.h | 1 - mm/vmscan.c | 17 ----------------- 2 files changed, 18 deletions(-) (limited to 'mm') diff --git a/include/linux/swap.h b/include/linux/swap.h index ce7e82cf787f..456546443f1f 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -439,7 +439,6 @@ static inline bool node_reclaim_enabled(void) } void check_move_unevictable_folios(struct folio_batch *fbatch); -void check_move_unevictable_pages(struct pagevec *pvec); extern void __meminit kswapd_run(int nid); extern void __meminit kswapd_stop(int nid); diff --git a/mm/vmscan.c b/mm/vmscan.c index 27f90896f789..049342b6317c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -8075,23 +8075,6 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) } #endif -void check_move_unevictable_pages(struct pagevec *pvec) -{ - struct folio_batch fbatch; - unsigned i; - - folio_batch_init(&fbatch); - for (i = 0; i < pvec->nr; i++) { - struct page *page = pvec->pages[i]; - - if (PageTransTail(page)) - continue; - folio_batch_add(&fbatch, page_folio(page)); - } - check_move_unevictable_folios(&fbatch); -} -EXPORT_SYMBOL_GPL(check_move_unevictable_pages); - /** * check_move_unevictable_folios - Move evictable folios to appropriate zone * lru list -- cgit v1.2.3 From 1e0877d58b1e22517d8939b22b963c043e6c63fd Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 21 Jun 2023 17:45:54 +0100 Subject: mm: remove struct pagevec All users are now converted to use the folio_batch so we can get rid of this data structure. Link: https://lkml.kernel.org/r/20230621164557.3510324-11-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- include/linux/pagevec.h | 63 ++++--------------------------------------------- mm/swap.c | 18 +++++++------- 2 files changed, 13 insertions(+), 68 deletions(-) (limited to 'mm') diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index 3a9d29dd28a3..87cc678adc85 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h @@ -3,65 +3,18 @@ * include/linux/pagevec.h * * In many places it is efficient to batch an operation up against multiple - * pages. A pagevec is a multipage container which is used for that. + * folios. A folio_batch is a container which is used for that. */ #ifndef _LINUX_PAGEVEC_H #define _LINUX_PAGEVEC_H -#include +#include -/* 15 pointers + header align the pagevec structure to a power of two */ +/* 15 pointers + header align the folio_batch structure to a power of two */ #define PAGEVEC_SIZE 15 -struct page; struct folio; -struct address_space; - -/* Layout must match folio_batch */ -struct pagevec { - unsigned char nr; - bool percpu_pvec_drained; - struct page *pages[PAGEVEC_SIZE]; -}; - -void __pagevec_release(struct pagevec *pvec); - -static inline void pagevec_init(struct pagevec *pvec) -{ - pvec->nr = 0; - pvec->percpu_pvec_drained = false; -} - -static inline void pagevec_reinit(struct pagevec *pvec) -{ - pvec->nr = 0; -} - -static inline unsigned pagevec_count(struct pagevec *pvec) -{ - return pvec->nr; -} - -static inline unsigned pagevec_space(struct pagevec *pvec) -{ - return PAGEVEC_SIZE - pvec->nr; -} - -/* - * Add a page to a pagevec. Returns the number of slots still available. - */ -static inline unsigned pagevec_add(struct pagevec *pvec, struct page *page) -{ - pvec->pages[pvec->nr++] = page; - return pagevec_space(pvec); -} - -static inline void pagevec_release(struct pagevec *pvec) -{ - if (pagevec_count(pvec)) - __pagevec_release(pvec); -} /** * struct folio_batch - A collection of folios. @@ -78,11 +31,6 @@ struct folio_batch { struct folio *folios[PAGEVEC_SIZE]; }; -/* Layout must match pagevec */ -static_assert(sizeof(struct pagevec) == sizeof(struct folio_batch)); -static_assert(offsetof(struct pagevec, pages) == - offsetof(struct folio_batch, folios)); - /** * folio_batch_init() - Initialise a batch of folios * @fbatch: The folio batch. @@ -127,10 +75,7 @@ static inline unsigned folio_batch_add(struct folio_batch *fbatch, return folio_batch_space(fbatch); } -static inline void __folio_batch_release(struct folio_batch *fbatch) -{ - __pagevec_release((struct pagevec *)fbatch); -} +void __folio_batch_release(struct folio_batch *pvec); static inline void folio_batch_release(struct folio_batch *fbatch) { diff --git a/mm/swap.c b/mm/swap.c index 423199ee8478..10348c1cf9c5 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -1044,25 +1044,25 @@ void release_pages(release_pages_arg arg, int nr) EXPORT_SYMBOL(release_pages); /* - * The pages which we're about to release may be in the deferred lru-addition + * The folios which we're about to release may be in the deferred lru-addition * queues. That would prevent them from really being freed right now. That's - * OK from a correctness point of view but is inefficient - those pages may be + * OK from a correctness point of view but is inefficient - those folios may be * cache-warm and we want to give them back to the page allocator ASAP. * - * So __pagevec_release() will drain those queues here. + * So __folio_batch_release() will drain those queues here. * folio_batch_move_lru() calls folios_put() directly to avoid * mutual recursion. */ -void __pagevec_release(struct pagevec *pvec) +void __folio_batch_release(struct folio_batch *fbatch) { - if (!pvec->percpu_pvec_drained) { + if (!fbatch->percpu_pvec_drained) { lru_add_drain(); - pvec->percpu_pvec_drained = true; + fbatch->percpu_pvec_drained = true; } - release_pages(pvec->pages, pagevec_count(pvec)); - pagevec_reinit(pvec); + release_pages(fbatch->folios, folio_batch_count(fbatch)); + folio_batch_reinit(fbatch); } -EXPORT_SYMBOL(__pagevec_release); +EXPORT_SYMBOL(__folio_batch_release); /** * folio_batch_remove_exceptionals() - Prune non-folios from a batch. -- cgit v1.2.3 From 1a0fc811f5f5addf54499826bd1b6e34e917491c Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 21 Jun 2023 17:45:55 +0100 Subject: mm: rename invalidate_mapping_pagevec to mapping_try_invalidate We don't use pagevecs for the LRU cache any more, and we don't know that the failed invalidations were due to the folio being in an LRU cache. So rename it to be more accurate. Link: https://lkml.kernel.org/r/20230621164557.3510324-12-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- mm/fadvise.c | 16 +++++++--------- mm/internal.h | 4 ++-- mm/truncate.c | 25 ++++++++++++------------- 3 files changed, 21 insertions(+), 24 deletions(-) (limited to 'mm') diff --git a/mm/fadvise.c b/mm/fadvise.c index fb7c5f43fd2a..f684ffd7f9c9 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -143,7 +143,7 @@ int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice) } if (end_index >= start_index) { - unsigned long nr_pagevec = 0; + unsigned long nr_failed = 0; /* * It's common to FADV_DONTNEED right after @@ -156,17 +156,15 @@ int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice) */ lru_add_drain(); - invalidate_mapping_pagevec(mapping, - start_index, end_index, - &nr_pagevec); + mapping_try_invalidate(mapping, start_index, end_index, + &nr_failed); /* - * If fewer pages were invalidated than expected then - * it is possible that some of the pages were on - * a per-cpu pagevec for a remote CPU. Drain all - * pagevecs and try again. + * The failures may be due to the folio being + * in the LRU cache of a remote CPU. Drain all + * caches and try again. */ - if (nr_pagevec) { + if (nr_failed) { lru_add_drain_all(); invalidate_mapping_pages(mapping, start_index, end_index); diff --git a/mm/internal.h b/mm/internal.h index b0d8778dd910..a19255615bf2 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -133,8 +133,8 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio); bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end); long invalidate_inode_page(struct page *page); -unsigned long invalidate_mapping_pagevec(struct address_space *mapping, - pgoff_t start, pgoff_t end, unsigned long *nr_pagevec); +unsigned long mapping_try_invalidate(struct address_space *mapping, + pgoff_t start, pgoff_t end, unsigned long *nr_failed); /** * folio_evictable - Test whether a folio is evictable. diff --git a/mm/truncate.c b/mm/truncate.c index 86de31ed4d32..4a917570887f 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -486,18 +486,17 @@ void truncate_inode_pages_final(struct address_space *mapping) EXPORT_SYMBOL(truncate_inode_pages_final); /** - * invalidate_mapping_pagevec - Invalidate all the unlocked pages of one inode - * @mapping: the address_space which holds the pages to invalidate + * mapping_try_invalidate - Invalidate all the evictable folios of one inode + * @mapping: the address_space which holds the folios to invalidate * @start: the offset 'from' which to invalidate * @end: the offset 'to' which to invalidate (inclusive) - * @nr_pagevec: invalidate failed page number for caller + * @nr_failed: How many folio invalidations failed * - * This helper is similar to invalidate_mapping_pages(), except that it accounts - * for pages that are likely on a pagevec and counts them in @nr_pagevec, which - * will be used by the caller. + * This function is similar to invalidate_mapping_pages(), except that it + * returns the number of folios which could not be evicted in @nr_failed. */ -unsigned long invalidate_mapping_pagevec(struct address_space *mapping, - pgoff_t start, pgoff_t end, unsigned long *nr_pagevec) +unsigned long mapping_try_invalidate(struct address_space *mapping, + pgoff_t start, pgoff_t end, unsigned long *nr_failed) { pgoff_t indices[PAGEVEC_SIZE]; struct folio_batch fbatch; @@ -527,9 +526,9 @@ unsigned long invalidate_mapping_pagevec(struct address_space *mapping, */ if (!ret) { deactivate_file_folio(folio); - /* It is likely on the pagevec of a remote CPU */ - if (nr_pagevec) - (*nr_pagevec)++; + /* Likely in the lru cache of a remote CPU */ + if (nr_failed) + (*nr_failed)++; } count += ret; } @@ -552,12 +551,12 @@ unsigned long invalidate_mapping_pagevec(struct address_space *mapping, * If you want to remove all the pages of one inode, regardless of * their use and writeback state, use truncate_inode_pages(). * - * Return: the number of the cache entries that were invalidated + * Return: The number of indices that had their contents invalidated */ unsigned long invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end) { - return invalidate_mapping_pagevec(mapping, start, end, NULL); + return mapping_try_invalidate(mapping, start, end, NULL); } EXPORT_SYMBOL(invalidate_mapping_pages); -- cgit v1.2.3 From 1fec6890bf2247ecc93f5491c2d3f33c333d5c6e Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 21 Jun 2023 17:45:56 +0100 Subject: mm: remove references to pagevec Most of these should just refer to the LRU cache rather than the data structure used to implement the LRU cache. Link: https://lkml.kernel.org/r/20230621164557.3510324-13-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- mm/huge_memory.c | 2 +- mm/khugepaged.c | 6 +++--- mm/ksm.c | 6 +++--- mm/memory.c | 6 +++--- mm/migrate_device.c | 2 +- mm/swap.c | 2 +- mm/truncate.c | 2 +- 7 files changed, 13 insertions(+), 13 deletions(-) (limited to 'mm') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e94fe292f30a..eb3678360b97 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1344,7 +1344,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) /* * See do_wp_page(): we can only reuse the folio exclusively if * there are no additional references. Note that we always drain - * the LRU pagevecs immediately after adding a THP. + * the LRU cache immediately after adding a THP. */ if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio) * folio_nr_pages(folio)) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 5ef1e08b2a06..3beb4ad2ee5e 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1051,7 +1051,7 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm, if (pte) pte_unmap(pte); - /* Drain LRU add pagevec to remove extra pin on the swapped in pages */ + /* Drain LRU cache to remove extra pin on the swapped in pages */ if (swapped_in) lru_add_drain(); @@ -1972,7 +1972,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, result = SCAN_FAIL; goto xa_unlocked; } - /* drain pagevecs to help isolate_lru_page() */ + /* drain lru cache to help isolate_lru_page() */ lru_add_drain(); page = folio_file_page(folio, index); } else if (trylock_page(page)) { @@ -1988,7 +1988,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, page_cache_sync_readahead(mapping, &file->f_ra, file, index, end - index); - /* drain pagevecs to help isolate_lru_page() */ + /* drain lru cache to help isolate_lru_page() */ lru_add_drain(); page = find_lock_page(mapping, index); if (unlikely(page == NULL)) { diff --git a/mm/ksm.c b/mm/ksm.c index d995779dc1fe..ba266359da55 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -932,7 +932,7 @@ static int remove_stable_node(struct ksm_stable_node *stable_node) * The stable node did not yet appear stale to get_ksm_page(), * since that allows for an unmapped ksm page to be recognized * right up until it is freed; but the node is safe to remove. - * This page might be in a pagevec waiting to be freed, + * This page might be in an LRU cache waiting to be freed, * or it might be PageSwapCache (perhaps under writeback), * or it might have been removed from swapcache a moment ago. */ @@ -2303,8 +2303,8 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page) trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items); /* - * A number of pages can hang around indefinitely on per-cpu - * pagevecs, raised page count preventing write_protect_page + * A number of pages can hang around indefinitely in per-cpu + * LRU cache, raised page count preventing write_protect_page * from merging them. Though it doesn't really matter much, * it is puzzling to see some stuck in pages_volatile until * other activity jostles them out, and they also prevented diff --git a/mm/memory.c b/mm/memory.c index 3d78b552866d..f758f59f3704 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3401,8 +3401,8 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) goto copy; if (!folio_test_lru(folio)) /* - * Note: We cannot easily detect+handle references from - * remote LRU pagevecs or references to LRU folios. + * We cannot easily detect+handle references from + * remote LRU caches or references to LRU folios. */ lru_add_drain(); if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio)) @@ -3880,7 +3880,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) * If we want to map a page that's in the swapcache writable, we * have to detect via the refcount if we're really the exclusive * owner. Try removing the extra reference from the local LRU - * pagevecs if required. + * caches if required. */ if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache && !folio_test_ksm(folio) && !folio_test_lru(folio)) diff --git a/mm/migrate_device.c b/mm/migrate_device.c index 02d272b909b5..8365158460ed 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -376,7 +376,7 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns, /* ZONE_DEVICE pages are not on LRU */ if (!is_zone_device_page(page)) { if (!PageLRU(page) && allow_drain) { - /* Drain CPU's pagevec */ + /* Drain CPU's lru cache */ lru_add_drain_all(); allow_drain = false; } diff --git a/mm/swap.c b/mm/swap.c index 10348c1cf9c5..cd8f0150ba3a 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -76,7 +76,7 @@ static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = { /* * This path almost never happens for VM activity - pages are normally freed - * via pagevecs. But it gets used by networking - and for compound pages. + * in batches. But it gets used by networking - and for compound pages. */ static void __page_cache_release(struct folio *folio) { diff --git a/mm/truncate.c b/mm/truncate.c index 4a917570887f..95d1291d269b 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -565,7 +565,7 @@ EXPORT_SYMBOL(invalidate_mapping_pages); * refcount. We do this because invalidate_inode_pages2() needs stronger * invalidation guarantees, and cannot afford to leave pages behind because * shrink_page_list() has a temp ref on them, or because they're transiently - * sitting in the folio_add_lru() pagevecs. + * sitting in the folio_add_lru() caches. */ static int invalidate_complete_folio2(struct address_space *mapping, struct folio *folio) -- cgit v1.2.3 From 994ec4e29b3de188d11fe60d17403285fcc8917a Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 21 Jun 2023 17:45:57 +0100 Subject: mm: remove unnecessary pagevec includes These files no longer need pagevec.h, mostly due to function declarations being moved out of it. Link: https://lkml.kernel.org/r/20230621164557.3510324-14-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- mm/fadvise.c | 1 - mm/memory_hotplug.c | 1 - mm/migrate.c | 1 - mm/readahead.c | 1 - mm/swap_state.c | 1 - 5 files changed, 5 deletions(-) (limited to 'mm') diff --git a/mm/fadvise.c b/mm/fadvise.c index f684ffd7f9c9..6c39d42f16dc 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 35db4108bb15..3f231cf1b410 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/migrate.c b/mm/migrate.c index eca3bf0e93b8..24baad2571e3 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/readahead.c b/mm/readahead.c index 47afbca1d122..a9c999aa19af 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -120,7 +120,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/swap_state.c b/mm/swap_state.c index 4a5c7b748051..f8ea7015bad4 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From 18a937076c6991a21b88d99af95b779b5027b29b Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Wed, 21 Jun 2023 09:30:09 +0000 Subject: mm: zswap: fix double invalidate with exclusive loads If exclusive loads are enabled for zswap, we invalidate the entry before returning from zswap_frontswap_load(), after dropping the local reference. However, the tree lock is dropped during decompression after the local reference is acquired, so the entry could be invalidated before we drop the local ref. If this happens, the entry is freed once we drop the local ref, and zswap_invalidate_entry() tries to invalidate an already freed entry. Fix this by: (a) Making sure zswap_invalidate_entry() is always called with a local ref held, to avoid being called on a freed entry. (b) Making sure zswap_invalidate_entry() only drops the ref if the entry was actually on the rbtree. Otherwise, another invalidation could have already happened, and the initial ref is already dropped. With these changes, there is no need to check that there is no need to make sure the entry still exists in the tree in zswap_reclaim_entry() before invalidating it, as zswap_reclaim_entry() will make this check internally. Link: https://lkml.kernel.org/r/20230621093009.637544-1-yosryahmed@google.com Fixes: b9c91c43412f ("mm: zswap: support exclusive loads") Signed-off-by: Yosry Ahmed Reported-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Dan Streetman Cc: Domenico Cerasuolo Cc: Johannes Weiner Cc: Konrad Rzeszutek Wilk Cc: Nhat Pham Cc: Seth Jennings Cc: Vitaly Wool Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/zswap.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/zswap.c b/mm/zswap.c index 87b204233115..62195f72bf56 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -355,12 +355,14 @@ static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry, return 0; } -static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry) +static bool zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry) { if (!RB_EMPTY_NODE(&entry->rbnode)) { rb_erase(&entry->rbnode, root); RB_CLEAR_NODE(&entry->rbnode); + return true; } + return false; } /* @@ -599,14 +601,16 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) return NULL; } +/* + * If the entry is still valid in the tree, drop the initial ref and remove it + * from the tree. This function must be called with an additional ref held, + * otherwise it may race with another invalidation freeing the entry. + */ static void zswap_invalidate_entry(struct zswap_tree *tree, struct zswap_entry *entry) { - /* remove from rbtree */ - zswap_rb_erase(&tree->rbroot, entry); - - /* drop the initial reference from entry creation */ - zswap_entry_put(tree, entry); + if (zswap_rb_erase(&tree->rbroot, entry)) + zswap_entry_put(tree, entry); } static int zswap_reclaim_entry(struct zswap_pool *pool) @@ -659,8 +663,7 @@ static int zswap_reclaim_entry(struct zswap_pool *pool) * swapcache. Drop the entry from zswap - unless invalidate already * took it out while we had the tree->lock released for IO. */ - if (entry == zswap_rb_search(&tree->rbroot, swpoffset)) - zswap_invalidate_entry(tree, entry); + zswap_invalidate_entry(tree, entry); put_unlock: /* Drop local reference */ @@ -1466,7 +1469,6 @@ stats: count_objcg_event(entry->objcg, ZSWPIN); freeentry: spin_lock(&tree->lock); - zswap_entry_put(tree, entry); if (!ret && zswap_exclusive_loads_enabled) { zswap_invalidate_entry(tree, entry); *exclusive = true; @@ -1475,6 +1477,7 @@ freeentry: list_move(&entry->lru, &entry->pool->lru); spin_unlock(&entry->pool->lru_lock); } + zswap_entry_put(tree, entry); spin_unlock(&tree->lock); return ret; -- cgit v1.2.3 From 56ae0bb349b4eeb172674d4876f2b6290d505a25 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Mon, 19 Jun 2023 19:07:17 +0800 Subject: mm: compaction: convert to use a folio in isolate_migratepages_block() Directly use a folio instead of page_folio() when page successfully isolated (hugepage and movable page) and after folio_get_nontail_page(), which removes several calls to compound_head(). Link: https://lkml.kernel.org/r/20230619110718.65679-1-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: Baolin Wang Cc: James Gowans Cc: Matthew Wilcox Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/compaction.c | 84 ++++++++++++++++++++++++++++++--------------------------- 1 file changed, 44 insertions(+), 40 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 0fb3b89b3967..dbc9f86b1934 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -822,6 +822,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, struct lruvec *lruvec; unsigned long flags = 0; struct lruvec *locked = NULL; + struct folio *folio = NULL; struct page *page = NULL, *valid_page = NULL; struct address_space *mapping; unsigned long start_pfn = low_pfn; @@ -918,7 +919,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, if (!valid_page && pageblock_aligned(low_pfn)) { if (!isolation_suitable(cc, page)) { low_pfn = end_pfn; - page = NULL; + folio = NULL; goto isolate_abort; } valid_page = page; @@ -950,7 +951,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * Hugepage was successfully isolated and placed * on the cc->migratepages list. */ - low_pfn += compound_nr(page) - 1; + folio = page_folio(page); + low_pfn += folio_nr_pages(folio) - 1; goto isolate_success_no_list; } @@ -1018,8 +1020,10 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, locked = NULL; } - if (isolate_movable_page(page, mode)) + if (isolate_movable_page(page, mode)) { + folio = page_folio(page); goto isolate_success; + } } goto isolate_fail; @@ -1030,7 +1034,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * sure the page is not being freed elsewhere -- the * page release code relies on it. */ - if (unlikely(!get_page_unless_zero(page))) + folio = folio_get_nontail_page(page); + if (unlikely(!folio)) goto isolate_fail; /* @@ -1038,8 +1043,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * so avoid taking lru_lock and isolating it unnecessarily in an * admittedly racy check. */ - mapping = page_mapping(page); - if (!mapping && (page_count(page) - 1) > total_mapcount(page)) + mapping = folio_mapping(folio); + if (!mapping && (folio_ref_count(folio) - 1) > folio_mapcount(folio)) goto isolate_fail_put; /* @@ -1050,11 +1055,11 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, goto isolate_fail_put; /* Only take pages on LRU: a check now makes later tests safe */ - if (!PageLRU(page)) + if (!folio_test_lru(folio)) goto isolate_fail_put; /* Compaction might skip unevictable pages but CMA takes them */ - if (!(mode & ISOLATE_UNEVICTABLE) && PageUnevictable(page)) + if (!(mode & ISOLATE_UNEVICTABLE) && folio_test_unevictable(folio)) goto isolate_fail_put; /* @@ -1063,10 +1068,10 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * it will be able to migrate without blocking - clean pages * for the most part. PageWriteback would require blocking. */ - if ((mode & ISOLATE_ASYNC_MIGRATE) && PageWriteback(page)) + if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_writeback(folio)) goto isolate_fail_put; - if ((mode & ISOLATE_ASYNC_MIGRATE) && PageDirty(page)) { + if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_dirty(folio)) { bool migrate_dirty; /* @@ -1078,22 +1083,22 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * the page lock until after the page is removed * from the page cache. */ - if (!trylock_page(page)) + if (!folio_trylock(folio)) goto isolate_fail_put; - mapping = page_mapping(page); + mapping = folio_mapping(folio); migrate_dirty = !mapping || mapping->a_ops->migrate_folio; - unlock_page(page); + folio_unlock(folio); if (!migrate_dirty) goto isolate_fail_put; } - /* Try isolate the page */ - if (!TestClearPageLRU(page)) + /* Try isolate the folio */ + if (!folio_test_clear_lru(folio)) goto isolate_fail_put; - lruvec = folio_lruvec(page_folio(page)); + lruvec = folio_lruvec(folio); /* If we already hold the lock, we can skip some rechecking */ if (lruvec != locked) { @@ -1103,7 +1108,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, compact_lock_irqsave(&lruvec->lru_lock, &flags, cc); locked = lruvec; - lruvec_memcg_debug(lruvec, page_folio(page)); + lruvec_memcg_debug(lruvec, folio); /* * Try get exclusive access under lock. If marked for @@ -1119,34 +1124,33 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, } /* - * Page become compound since the non-locked check, - * and it's on LRU. It can only be a THP so the order - * is safe to read and it's 0 for tail pages. + * folio become large since the non-locked check, + * and it's on LRU. */ - if (unlikely(PageCompound(page) && !cc->alloc_contig)) { - low_pfn += compound_nr(page) - 1; - nr_scanned += compound_nr(page) - 1; - SetPageLRU(page); + if (unlikely(folio_test_large(folio) && !cc->alloc_contig)) { + low_pfn += folio_nr_pages(folio) - 1; + nr_scanned += folio_nr_pages(folio) - 1; + folio_set_lru(folio); goto isolate_fail_put; } } - /* The whole page is taken off the LRU; skip the tail pages. */ - if (PageCompound(page)) - low_pfn += compound_nr(page) - 1; + /* The folio is taken off the LRU */ + if (folio_test_large(folio)) + low_pfn += folio_nr_pages(folio) - 1; /* Successfully isolated */ - del_page_from_lru_list(page, lruvec); - mod_node_page_state(page_pgdat(page), - NR_ISOLATED_ANON + page_is_file_lru(page), - thp_nr_pages(page)); + lruvec_del_folio(lruvec, folio); + node_stat_mod_folio(folio, + NR_ISOLATED_ANON + folio_is_file_lru(folio), + folio_nr_pages(folio)); isolate_success: - list_add(&page->lru, &cc->migratepages); + list_add(&folio->lru, &cc->migratepages); isolate_success_no_list: - cc->nr_migratepages += compound_nr(page); - nr_isolated += compound_nr(page); - nr_scanned += compound_nr(page) - 1; + cc->nr_migratepages += folio_nr_pages(folio); + nr_isolated += folio_nr_pages(folio); + nr_scanned += folio_nr_pages(folio) - 1; /* * Avoid isolating too much unless this block is being @@ -1168,7 +1172,7 @@ isolate_fail_put: unlock_page_lruvec_irqrestore(locked, flags); locked = NULL; } - put_page(page); + folio_put(folio); isolate_fail: if (!skip_on_failure && ret != -ENOMEM) @@ -1209,14 +1213,14 @@ isolate_fail: if (unlikely(low_pfn > end_pfn)) low_pfn = end_pfn; - page = NULL; + folio = NULL; isolate_abort: if (locked) unlock_page_lruvec_irqrestore(locked, flags); - if (page) { - SetPageLRU(page); - put_page(page); + if (folio) { + folio_set_lru(folio); + folio_put(folio); } /* -- cgit v1.2.3 From 7a704474b3022dabbb68f72bf18a3d89ec1c0a24 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Wed, 21 Jun 2023 02:30:53 +0000 Subject: mm: memcg: rename and document global_reclaim() Evidently, global_reclaim() can be a confusing name. Especially that it used to exist before with a subtly different definition (removed by commit b5ead35e7e1d ("mm: vmscan: naming fixes: global_reclaim() and sane_reclaim()"). It can be interpreted as non-cgroup reclaim, even though it returns true for cgroup reclaim on the root memcg (through memory.reclaim). Rename it to root_reclaim() in an attempt to make it less ambiguous, and add documentation to it as well as cgroup_reclaim. Link: https://lkml.kernel.org/r/20230621023053.432374-1-yosryahmed@google.com Signed-off-by: Yosry Ahmed Reported-by: Johannes Weiner Closes: https://lore.kernel.org/lkml/20230405200150.GA35884@cmpxchg.org/ Acked-by: Yu Zhao Cc: Michal Hocko Cc: Muchun Song Cc: Roman Gushchin Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/vmscan.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 049342b6317c..b7068be8a034 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -429,12 +429,17 @@ void reparent_shrinker_deferred(struct mem_cgroup *memcg) up_read(&shrinker_rwsem); } +/* Returns true for reclaim through cgroup limits or cgroup interfaces. */ static bool cgroup_reclaim(struct scan_control *sc) { return sc->target_mem_cgroup; } -static bool global_reclaim(struct scan_control *sc) +/* + * Returns true for reclaim on the root cgroup. This is true for direct + * allocator reclaim and reclaim through cgroup interfaces on the root cgroup. + */ +static bool root_reclaim(struct scan_control *sc) { return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup); } @@ -489,7 +494,7 @@ static bool cgroup_reclaim(struct scan_control *sc) return false; } -static bool global_reclaim(struct scan_control *sc) +static bool root_reclaim(struct scan_control *sc) { return true; } @@ -546,7 +551,7 @@ static void flush_reclaim_state(struct scan_control *sc) * memcg reclaim, to make reporting more accurate and reduce * underestimation, but it's probably not worth the complexity for now. */ - if (current->reclaim_state && global_reclaim(sc)) { + if (current->reclaim_state && root_reclaim(sc)) { sc->nr_reclaimed += current->reclaim_state->reclaimed; current->reclaim_state->reclaimed = 0; } @@ -5325,7 +5330,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool static unsigned long get_nr_to_reclaim(struct scan_control *sc) { /* don't abort memcg reclaim to ensure fairness */ - if (!global_reclaim(sc)) + if (!root_reclaim(sc)) return -1; return max(sc->nr_to_reclaim, compact_gap(sc->order)); @@ -5477,7 +5482,7 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc { struct blk_plug plug; - VM_WARN_ON_ONCE(global_reclaim(sc)); + VM_WARN_ON_ONCE(root_reclaim(sc)); VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap); lru_add_drain(); @@ -5538,7 +5543,7 @@ static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control * struct blk_plug plug; unsigned long reclaimed = sc->nr_reclaimed; - VM_WARN_ON_ONCE(!global_reclaim(sc)); + VM_WARN_ON_ONCE(!root_reclaim(sc)); /* * Unmapped clean folios are already prioritized. Scanning for more of @@ -6260,7 +6265,7 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) bool proportional_reclaim; struct blk_plug plug; - if (lru_gen_enabled() && !global_reclaim(sc)) { + if (lru_gen_enabled() && !root_reclaim(sc)) { lru_gen_shrink_lruvec(lruvec, sc); return; } @@ -6501,7 +6506,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) struct lruvec *target_lruvec; bool reclaimable = false; - if (lru_gen_enabled() && global_reclaim(sc)) { + if (lru_gen_enabled() && root_reclaim(sc)) { lru_gen_shrink_node(pgdat, sc); return; } -- cgit v1.2.3 From 1bc545bff45ce9eefc176ccf663074462a209cb6 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Wed, 21 Jun 2023 02:31:01 +0000 Subject: mm/vmscan: fix root proactive reclaim unthrottling unbalanced node When memory.reclaim was introduced, it became the first case where cgroup_reclaim() is true for the root cgroup. Johannes concluded [1] that for most cases this is okay, except for one case. Historically, kswapd would throttle reclaim on a node if a lot of pages marked for reclaim are under writeback (aka the node is congested). This occurred by setting LRUVEC_CONGESTED bit in lruvec->flags. The bit would be cleared when the node is balanced. Similarly, cgroup reclaim would set the same bit when an lruvec is congested, and clear it on the way out of reclaim (to throttle local reclaimers). Before the introduction of memory.reclaim, the root memcg was the only target of kswapd reclaim, and non-root memcgs were the only targets of cgroup reclaim, so they would never interfere. Using the same bit for both was fine. After memory.reclaim, it is possible for cgroup reclaim on the root cgroup to clear the bit set by kswapd. This would result in reclaim on the node to be unthrottled before the node is balanced. Fix this by introducing separate bits for cgroup-level and node-level congestion. kswapd can unthrottle an lruvec that is marked as congested by cgroup reclaim (as the entire node should no longer be congested), but not vice versa (to prevent premature unthrottling before the entire node is balanced). [1]https://lore.kernel.org/lkml/20230405200150.GA35884@cmpxchg.org/ Link: https://lkml.kernel.org/r/20230621023101.432780-1-yosryahmed@google.com Signed-off-by: Yosry Ahmed Reported-by: Johannes Weiner Closes: https://lore.kernel.org/lkml/20230405200150.GA35884@cmpxchg.org/ Cc: Michal Hocko Cc: Roman Gushchin Cc: Shakeel Butt Cc: Muchun Song Cc: Yu Zhao Signed-off-by: Andrew Morton --- include/linux/mmzone.h | 18 +++++++++++++++--- mm/vmscan.c | 19 ++++++++++++------- 2 files changed, 27 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 3e822335f214..d863698a84e0 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -293,9 +293,21 @@ static inline bool is_active_lru(enum lru_list lru) #define ANON_AND_FILE 2 enum lruvec_flags { - LRUVEC_CONGESTED, /* lruvec has many dirty pages - * backed by a congested BDI - */ + /* + * An lruvec has many dirty pages backed by a congested BDI: + * 1. LRUVEC_CGROUP_CONGESTED is set by cgroup-level reclaim. + * It can be cleared by cgroup reclaim or kswapd. + * 2. LRUVEC_NODE_CONGESTED is set by kswapd node-level reclaim. + * It can only be cleared by kswapd. + * + * Essentially, kswapd can unthrottle an lruvec throttled by cgroup + * reclaim, but not vice versa. This only applies to the root cgroup. + * The goal is to prevent cgroup reclaim on the root cgroup (e.g. + * memory.reclaim) to unthrottle an unbalanced node (that was throttled + * by kswapd). + */ + LRUVEC_CGROUP_CONGESTED, + LRUVEC_NODE_CONGESTED, }; #endif /* !__GENERATING_BOUNDS_H */ diff --git a/mm/vmscan.c b/mm/vmscan.c index b7068be8a034..1080209a568b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -6578,10 +6578,13 @@ again: * Legacy memcg will stall in page writeback so avoid forcibly * stalling in reclaim_throttle(). */ - if ((current_is_kswapd() || - (cgroup_reclaim(sc) && writeback_throttling_sane(sc))) && - sc->nr.dirty && sc->nr.dirty == sc->nr.congested) - set_bit(LRUVEC_CONGESTED, &target_lruvec->flags); + if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) { + if (cgroup_reclaim(sc) && writeback_throttling_sane(sc)) + set_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags); + + if (current_is_kswapd()) + set_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags); + } /* * Stall direct reclaim for IO completions if the lruvec is @@ -6591,7 +6594,8 @@ again: */ if (!current_is_kswapd() && current_may_throttle() && !sc->hibernation_mode && - test_bit(LRUVEC_CONGESTED, &target_lruvec->flags)) + (test_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags) || + test_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags))) reclaim_throttle(pgdat, VMSCAN_THROTTLE_CONGESTED); if (should_continue_reclaim(pgdat, nr_node_reclaimed, sc)) @@ -6848,7 +6852,7 @@ retry: lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, zone->zone_pgdat); - clear_bit(LRUVEC_CONGESTED, &lruvec->flags); + clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); } } @@ -7237,7 +7241,8 @@ static void clear_pgdat_congested(pg_data_t *pgdat) { struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat); - clear_bit(LRUVEC_CONGESTED, &lruvec->flags); + clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags); + clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); clear_bit(PGDAT_DIRTY, &pgdat->flags); clear_bit(PGDAT_WRITEBACK, &pgdat->flags); } -- cgit v1.2.3 From 16f8eb3eea9eb2a1568279d64ca4dc977e7aa538 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Wed, 21 Jun 2023 14:24:02 -0700 Subject: Revert "page cache: fix page_cache_next/prev_miss off by one" This reverts commit 9425c591e06a9ab27a145ba655fb50532cf0bcc9 The reverted commit fixed up routines primarily used by readahead code such that they could also be used by hugetlb. Unfortunately, this caused a performance regression as pointed out by the Closes: tag. The hugetlb code which uses page_cache_next_miss will be addressed in a subsequent patch. Link: https://lkml.kernel.org/r/20230621212403.174710-1-mike.kravetz@oracle.com Fixes: 9425c591e06a ("page cache: fix page_cache_next/prev_miss off by one") Signed-off-by: Mike Kravetz Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-lkp/202306211346.1e9ff03e-oliver.sang@intel.com Reviewed-by: Sidhartha Kumar Cc: Ackerley Tng Cc: Erdem Aktas Cc: Greg Kroah-Hartman Cc: Matthew Wilcox Cc: Muchun Song Cc: Vishal Annapurve Signed-off-by: Andrew Morton --- mm/filemap.c | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index c20e0b1997e8..758bbdf300e7 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1728,9 +1728,7 @@ bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm, * * Return: The index of the gap if found, otherwise an index outside the * range specified (in which case 'return - index >= max_scan' will be true). - * In the rare case of index wrap-around, 0 will be returned. 0 will also - * be returned if index == 0 and there is a gap at the index. We can not - * wrap-around if passed index == 0. + * In the rare case of index wrap-around, 0 will be returned. */ pgoff_t page_cache_next_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan) @@ -1740,13 +1738,12 @@ pgoff_t page_cache_next_miss(struct address_space *mapping, while (max_scan--) { void *entry = xas_next(&xas); if (!entry || xa_is_value(entry)) - return xas.xa_index; - if (xas.xa_index == 0 && index != 0) - return xas.xa_index; + break; + if (xas.xa_index == 0) + break; } - /* No gaps in range and no wrap-around, return index beyond range */ - return xas.xa_index + 1; + return xas.xa_index; } EXPORT_SYMBOL(page_cache_next_miss); @@ -1767,9 +1764,7 @@ EXPORT_SYMBOL(page_cache_next_miss); * * Return: The index of the gap if found, otherwise an index outside the * range specified (in which case 'index - return >= max_scan' will be true). - * In the rare case of wrap-around, ULONG_MAX will be returned. ULONG_MAX - * will also be returned if index == ULONG_MAX and there is a gap at the - * index. We can not wrap-around if passed index == ULONG_MAX. + * In the rare case of wrap-around, ULONG_MAX will be returned. */ pgoff_t page_cache_prev_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan) @@ -1779,13 +1774,12 @@ pgoff_t page_cache_prev_miss(struct address_space *mapping, while (max_scan--) { void *entry = xas_prev(&xas); if (!entry || xa_is_value(entry)) - return xas.xa_index; - if (xas.xa_index == ULONG_MAX && index != ULONG_MAX) - return xas.xa_index; + break; + if (xas.xa_index == ULONG_MAX) + break; } - /* No gaps in range and no wrap-around, return index beyond range */ - return xas.xa_index - 1; + return xas.xa_index; } EXPORT_SYMBOL(page_cache_prev_miss); -- cgit v1.2.3 From fd4aed8d985a3236d0877ff6d0c80ad39d4ce81a Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Wed, 21 Jun 2023 14:24:03 -0700 Subject: hugetlb: revert use of page_cache_next_miss() Ackerley Tng reported an issue with hugetlbfs fallocate as noted in the Closes tag. The issue showed up after the conversion of hugetlb page cache lookup code to use page_cache_next_miss. User visible effects are: - hugetlbfs fallocate incorrectly returns -EEXIST if pages are presnet in the file. - hugetlb pages will not be included in core dumps if they need to be brought in via GUP. - userfaultfd UFFDIO_COPY will not notice pages already present in the cache. It may try to allocate a new page and potentially return ENOMEM as opposed to EEXIST. Revert the use page_cache_next_miss() in hugetlb code. IMPORTANT NOTE FOR STABLE BACKPORTS: This patch will apply cleanly to v6.3. However, due to the change of filemap_get_folio() return values, it will not function correctly. This patch must be modified for stable backports. [dan.carpenter@linaro.org: fix hugetlbfs_pagecache_present()] Link: https://lkml.kernel.org/r/efa86091-6a2c-4064-8f55-9b44e1313015@moroto.mountain Link: https://lkml.kernel.org/r/20230621212403.174710-2-mike.kravetz@oracle.com Fixes: d0ce0e47b323 ("mm/hugetlb: convert hugetlb fault paths to use alloc_hugetlb_folio()") Signed-off-by: Mike Kravetz Signed-off-by: Dan Carpenter Reported-by: Ackerley Tng Closes: https://lore.kernel.org/linux-mm/cover.1683069252.git.ackerleytng@google.com Reviewed-by: Sidhartha Kumar Cc: Erdem Aktas Cc: Greg Kroah-Hartman Cc: Matthew Wilcox Cc: Muchun Song Cc: Vishal Annapurve Signed-off-by: Andrew Morton --- fs/hugetlbfs/inode.c | 8 +++----- mm/hugetlb.c | 12 ++++++------ 2 files changed, 9 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 90361a922cec..7b17ccfa039d 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -821,7 +821,6 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, */ struct folio *folio; unsigned long addr; - bool present; cond_resched(); @@ -842,10 +841,9 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, mutex_lock(&hugetlb_fault_mutex_table[hash]); /* See if already present in mapping to avoid alloc/free */ - rcu_read_lock(); - present = page_cache_next_miss(mapping, index, 1) != index; - rcu_read_unlock(); - if (present) { + folio = filemap_get_folio(mapping, index); + if (!IS_ERR(folio)) { + folio_put(folio); mutex_unlock(&hugetlb_fault_mutex_table[hash]); continue; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index d76574425da3..bce28cca73a1 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5728,13 +5728,13 @@ static bool hugetlbfs_pagecache_present(struct hstate *h, { struct address_space *mapping = vma->vm_file->f_mapping; pgoff_t idx = vma_hugecache_offset(h, vma, address); - bool present; - - rcu_read_lock(); - present = page_cache_next_miss(mapping, idx, 1) != idx; - rcu_read_unlock(); + struct folio *folio; - return present; + folio = filemap_get_folio(mapping, idx); + if (IS_ERR(folio)) + return false; + folio_put(folio); + return true; } int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, -- cgit v1.2.3 From c2508ec5a58db67093f4fb8bf89a9a7c53a109e9 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 15 Jun 2023 15:17:36 -0700 Subject: mm: introduce new 'lock_mm_and_find_vma()' page fault helper .. and make x86 use it. This basically extracts the existing x86 "find and expand faulting vma" code, but extends it to also take the mmap lock for writing in case we actually do need to expand the vma. We've historically short-circuited that case, and have some rather ugly special logic to serialize the stack segment expansion (since we only hold the mmap lock for reading) that doesn't match the normal VM locking. That slight violation of locking worked well, right up until it didn't: the maple tree code really does want proper locking even for simple extension of an existing vma. So extract the code for "look up the vma of the fault" from x86, fix it up to do the necessary write locking, and make it available as a helper function for other architectures that can use the common helper. Note: I say "common helper", but it really only handles the normal stack-grows-down case. Which is all architectures except for PA-RISC and IA64. So some rare architectures can't use the helper, but if they care they'll just need to open-code this logic. It's also worth pointing out that this code really would like to have an optimistic "mmap_upgrade_trylock()" to make it quicker to go from a read-lock (for the common case) to taking the write lock (for having to extend the vma) in the normal single-threaded situation where there is no other locking activity. But that _is_ all the very uncommon special case, so while it would be nice to have such an operation, it probably doesn't matter in reality. I did put in the skeleton code for such a possible future expansion, even if it only acts as pseudo-documentation for what we're doing. Signed-off-by: Linus Torvalds --- arch/x86/Kconfig | 1 + arch/x86/mm/fault.c | 52 +--------------------- include/linux/mm.h | 2 + mm/Kconfig | 4 ++ mm/memory.c | 121 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 130 insertions(+), 50 deletions(-) (limited to 'mm') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 53bab123a8ee..cb1031018afa 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -276,6 +276,7 @@ config X86 select HAVE_GENERIC_VDSO select HOTPLUG_SMT if SMP select IRQ_FORCED_THREADING + select LOCK_MM_AND_FIND_VMA select NEED_PER_CPU_EMBED_FIRST_CHUNK select NEED_PER_CPU_PAGE_FIRST_CHUNK select NEED_SG_DMA_LENGTH diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index e4399983c50c..e8711b2cafaf 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -880,12 +880,6 @@ __bad_area(struct pt_regs *regs, unsigned long error_code, __bad_area_nosemaphore(regs, error_code, address, pkey, si_code); } -static noinline void -bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) -{ - __bad_area(regs, error_code, address, 0, SEGV_MAPERR); -} - static inline bool bad_area_access_from_pkeys(unsigned long error_code, struct vm_area_struct *vma) { @@ -1366,51 +1360,10 @@ void do_user_addr_fault(struct pt_regs *regs, lock_mmap: #endif /* CONFIG_PER_VMA_LOCK */ - /* - * Kernel-mode access to the user address space should only occur - * on well-defined single instructions listed in the exception - * tables. But, an erroneous kernel fault occurring outside one of - * those areas which also holds mmap_lock might deadlock attempting - * to validate the fault against the address space. - * - * Only do the expensive exception table search when we might be at - * risk of a deadlock. This happens if we - * 1. Failed to acquire mmap_lock, and - * 2. The access did not originate in userspace. - */ - if (unlikely(!mmap_read_trylock(mm))) { - if (!user_mode(regs) && !search_exception_tables(regs->ip)) { - /* - * Fault from code in kernel from - * which we do not expect faults. - */ - bad_area_nosemaphore(regs, error_code, address); - return; - } retry: - mmap_read_lock(mm); - } else { - /* - * The above down_read_trylock() might have succeeded in - * which case we'll have missed the might_sleep() from - * down_read(): - */ - might_sleep(); - } - - vma = find_vma(mm, address); + vma = lock_mm_and_find_vma(mm, address, regs); if (unlikely(!vma)) { - bad_area(regs, error_code, address); - return; - } - if (likely(vma->vm_start <= address)) - goto good_area; - if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { - bad_area(regs, error_code, address); - return; - } - if (unlikely(expand_stack(vma, address))) { - bad_area(regs, error_code, address); + bad_area_nosemaphore(regs, error_code, address); return; } @@ -1418,7 +1371,6 @@ retry: * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ -good_area: if (unlikely(access_error(error_code, vma))) { bad_area_access_error(regs, error_code, address, vma); return; diff --git a/include/linux/mm.h b/include/linux/mm.h index 27ce77080c79..570cf906fbcc 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2325,6 +2325,8 @@ void unmap_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t nr, bool even_cows); void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); +struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, + unsigned long address, struct pt_regs *regs); #else static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags, diff --git a/mm/Kconfig b/mm/Kconfig index 7672a22647b4..e3454087fd31 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -1206,6 +1206,10 @@ config PER_VMA_LOCK This feature allows locking each virtual memory area separately when handling page faults instead of taking mmap_lock. +config LOCK_MM_AND_FIND_VMA + bool + depends on !STACK_GROWSUP + source "mm/damon/Kconfig" endmenu diff --git a/mm/memory.c b/mm/memory.c index f69fbc251198..1a427097b71f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5262,6 +5262,127 @@ out: } EXPORT_SYMBOL_GPL(handle_mm_fault); +#ifdef CONFIG_LOCK_MM_AND_FIND_VMA +#include + +static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs) +{ + /* Even if this succeeds, make it clear we *might* have slept */ + if (likely(mmap_read_trylock(mm))) { + might_sleep(); + return true; + } + + if (regs && !user_mode(regs)) { + unsigned long ip = instruction_pointer(regs); + if (!search_exception_tables(ip)) + return false; + } + + mmap_read_lock(mm); + return true; +} + +static inline bool mmap_upgrade_trylock(struct mm_struct *mm) +{ + /* + * We don't have this operation yet. + * + * It should be easy enough to do: it's basically a + * atomic_long_try_cmpxchg_acquire() + * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but + * it also needs the proper lockdep magic etc. + */ + return false; +} + +static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs) +{ + mmap_read_unlock(mm); + if (regs && !user_mode(regs)) { + unsigned long ip = instruction_pointer(regs); + if (!search_exception_tables(ip)) + return false; + } + mmap_write_lock(mm); + return true; +} + +/* + * Helper for page fault handling. + * + * This is kind of equivalend to "mmap_read_lock()" followed + * by "find_extend_vma()", except it's a lot more careful about + * the locking (and will drop the lock on failure). + * + * For example, if we have a kernel bug that causes a page + * fault, we don't want to just use mmap_read_lock() to get + * the mm lock, because that would deadlock if the bug were + * to happen while we're holding the mm lock for writing. + * + * So this checks the exception tables on kernel faults in + * order to only do this all for instructions that are actually + * expected to fault. + * + * We can also actually take the mm lock for writing if we + * need to extend the vma, which helps the VM layer a lot. + */ +struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, + unsigned long addr, struct pt_regs *regs) +{ + struct vm_area_struct *vma; + + if (!get_mmap_lock_carefully(mm, regs)) + return NULL; + + vma = find_vma(mm, addr); + if (likely(vma && (vma->vm_start <= addr))) + return vma; + + /* + * Well, dang. We might still be successful, but only + * if we can extend a vma to do so. + */ + if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) { + mmap_read_unlock(mm); + return NULL; + } + + /* + * We can try to upgrade the mmap lock atomically, + * in which case we can continue to use the vma + * we already looked up. + * + * Otherwise we'll have to drop the mmap lock and + * re-take it, and also look up the vma again, + * re-checking it. + */ + if (!mmap_upgrade_trylock(mm)) { + if (!upgrade_mmap_lock_carefully(mm, regs)) + return NULL; + + vma = find_vma(mm, addr); + if (!vma) + goto fail; + if (vma->vm_start <= addr) + goto success; + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto fail; + } + + if (expand_stack(vma, addr)) + goto fail; + +success: + mmap_write_downgrade(mm); + return vma; + +fail: + mmap_write_unlock(mm); + return NULL; +} +#endif + #ifdef CONFIG_PER_VMA_LOCK /* * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be -- cgit v1.2.3 From eda0047296a16d65a7f2bc60a408f70d178b2014 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 15 Jun 2023 16:17:48 -0700 Subject: mm: make the page fault mmap locking killable This is done as a separate patch from introducing the new lock_mm_and_find_vma() helper, because while it's an obvious change, it's not what x86 used to do in this area. We already abort the page fault on fatal signals anyway, so why should we wait for the mmap lock only to then abort later? With the new helper function that returns without the lock held on failure anyway, this is particularly easy and straightforward. Signed-off-by: Linus Torvalds --- mm/memory.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 1a427097b71f..1dff248805bf 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5279,8 +5279,7 @@ static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs return false; } - mmap_read_lock(mm); - return true; + return !mmap_read_lock_killable(mm); } static inline bool mmap_upgrade_trylock(struct mm_struct *mm) @@ -5304,8 +5303,7 @@ static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_r if (!search_exception_tables(ip)) return false; } - mmap_write_lock(mm); - return true; + return !mmap_write_lock_killable(mm); } /* -- cgit v1.2.3 From 8b35ca3e45e35a26a21427f35d4093606e93ad0a Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 22 Jun 2023 21:24:30 +0200 Subject: arm/mm: Convert to using lock_mm_and_find_vma() arm has an additional check for address < FIRST_USER_ADDRESS before expanding the stack. Since FIRST_USER_ADDRESS is defined everywhere (generally as 0), move that check to the generic expand_downwards(). Signed-off-by: Ben Hutchings Signed-off-by: Linus Torvalds --- arch/arm/Kconfig | 1 + arch/arm/mm/fault.c | 63 ++++++++++++----------------------------------------- mm/mmap.c | 2 +- 3 files changed, 16 insertions(+), 50 deletions(-) (limited to 'mm') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 0fb4b218f665..9ed7f03ba15a 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -125,6 +125,7 @@ config ARM select HAVE_UID16 select HAVE_VIRT_CPU_ACCOUNTING_GEN select IRQ_FORCED_THREADING + select LOCK_MM_AND_FIND_VMA select MODULES_USE_ELF_REL select NEED_DMA_MAP_STATE select OF_EARLY_FLATTREE if OF diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 2418f1efabd8..0860eeba8bd3 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -232,37 +232,11 @@ static inline bool is_permission_fault(unsigned int fsr) return false; } -static vm_fault_t __kprobes -__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int flags, - unsigned long vma_flags, struct pt_regs *regs) -{ - struct vm_area_struct *vma = find_vma(mm, addr); - if (unlikely(!vma)) - return VM_FAULT_BADMAP; - - if (unlikely(vma->vm_start > addr)) { - if (!(vma->vm_flags & VM_GROWSDOWN)) - return VM_FAULT_BADMAP; - if (addr < FIRST_USER_ADDRESS) - return VM_FAULT_BADMAP; - if (expand_stack(vma, addr)) - return VM_FAULT_BADMAP; - } - - /* - * ok, we have a good vm_area for this memory access, check the - * permissions on the VMA allow for the fault which occurred. - */ - if (!(vma->vm_flags & vma_flags)) - return VM_FAULT_BADACCESS; - - return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs); -} - static int __kprobes do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; int sig, code; vm_fault_t fault; unsigned int flags = FAULT_FLAG_DEFAULT; @@ -301,31 +275,21 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); - /* - * As per x86, we may deadlock here. However, since the kernel only - * validly references user space from well defined areas of the code, - * we can bug out early if this is from code which shouldn't. - */ - if (!mmap_read_trylock(mm)) { - if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc)) - goto no_context; retry: - mmap_read_lock(mm); - } else { - /* - * The above down_read_trylock() might have succeeded in - * which case, we'll have missed the might_sleep() from - * down_read() - */ - might_sleep(); -#ifdef CONFIG_DEBUG_VM - if (!user_mode(regs) && - !search_exception_tables(regs->ARM_pc)) - goto no_context; -#endif + vma = lock_mm_and_find_vma(mm, addr, regs); + if (unlikely(!vma)) { + fault = VM_FAULT_BADMAP; + goto bad_area; } - fault = __do_page_fault(mm, addr, flags, vm_flags, regs); + /* + * ok, we have a good vm_area for this memory access, check the + * permissions on the VMA allow for the fault which occurred. + */ + if (!(vma->vm_flags & vm_flags)) + fault = VM_FAULT_BADACCESS; + else + fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs); /* If we need to retry but a fatal signal is pending, handle the * signal first. We do not need to release the mmap_lock because @@ -356,6 +320,7 @@ retry: if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS)))) return 0; +bad_area: /* * If we are in kernel mode at this point, we * have no context to handle this fault with. diff --git a/mm/mmap.c b/mm/mmap.c index d600404580b2..6d120bf1d0bc 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2036,7 +2036,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address) int error = 0; address &= PAGE_MASK; - if (address < mmap_min_addr) + if (address < mmap_min_addr || address < FIRST_USER_ADDRESS) return -EPERM; /* Enforce stack_guard_gap */ -- cgit v1.2.3 From f440fa1ac955e2898893f9301568435eb5cdfc4b Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Fri, 16 Jun 2023 15:58:54 -0700 Subject: mm: make find_extend_vma() fail if write lock not held Make calls to extend_vma() and find_extend_vma() fail if the write lock is required. To avoid making this a flag-day event, this still allows the old read-locking case for the trivial situations, and passes in a flag to say "is it write-locked". That way write-lockers can say "yes, I'm being careful", and legacy users will continue to work in all the common cases until they have been fully converted to the new world order. Co-Developed-by: Matthew Wilcox (Oracle) Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Signed-off-by: Linus Torvalds --- fs/binfmt_elf.c | 6 +++--- fs/exec.c | 5 +++-- include/linux/mm.h | 10 +++++++--- mm/memory.c | 2 +- mm/mmap.c | 50 +++++++++++++++++++++++++++++++++----------------- mm/nommu.c | 3 ++- 6 files changed, 49 insertions(+), 27 deletions(-) (limited to 'mm') diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 1033fbdfdbec..869c3aa0e455 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -320,10 +320,10 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, * Grow the stack manually; some architectures have a limit on how * far ahead a user-space access may be in order to grow the stack. */ - if (mmap_read_lock_killable(mm)) + if (mmap_write_lock_killable(mm)) return -EINTR; - vma = find_extend_vma(mm, bprm->p); - mmap_read_unlock(mm); + vma = find_extend_vma_locked(mm, bprm->p, true); + mmap_write_unlock(mm); if (!vma) return -EFAULT; diff --git a/fs/exec.c b/fs/exec.c index a466e797c8e2..a61eb256e5e4 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -205,7 +205,8 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, #ifdef CONFIG_STACK_GROWSUP if (write) { - ret = expand_downwards(bprm->vma, pos); + /* We claim to hold the lock - nobody to race with */ + ret = expand_downwards(bprm->vma, pos, true); if (ret < 0) return NULL; } @@ -853,7 +854,7 @@ int setup_arg_pages(struct linux_binprm *bprm, stack_base = vma->vm_end - stack_expand; #endif current->mm->start_stack = bprm->p; - ret = expand_stack(vma, stack_base); + ret = expand_stack_locked(vma, stack_base, true); if (ret) ret = -EFAULT; diff --git a/include/linux/mm.h b/include/linux/mm.h index 570cf906fbcc..01a016521b60 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3192,11 +3192,13 @@ extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf); extern unsigned long stack_guard_gap; /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ -extern int expand_stack(struct vm_area_struct *vma, unsigned long address); +int expand_stack_locked(struct vm_area_struct *vma, unsigned long address, + bool write_locked); +#define expand_stack(vma,addr) expand_stack_locked(vma,addr,false) /* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */ -extern int expand_downwards(struct vm_area_struct *vma, - unsigned long address); +int expand_downwards(struct vm_area_struct *vma, unsigned long address, + bool write_locked); #if VM_GROWSUP extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); #else @@ -3297,6 +3299,8 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, #endif struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); +struct vm_area_struct *find_extend_vma_locked(struct mm_struct *, + unsigned long addr, bool write_locked); int remap_pfn_range(struct vm_area_struct *, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t); int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, diff --git a/mm/memory.c b/mm/memory.c index 1dff248805bf..a81f5d0997ad 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5368,7 +5368,7 @@ struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, goto fail; } - if (expand_stack(vma, addr)) + if (expand_stack_locked(vma, addr, true)) goto fail; success: diff --git a/mm/mmap.c b/mm/mmap.c index 6d120bf1d0bc..2c44ac108a3c 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1935,7 +1935,8 @@ static int acct_stack_growth(struct vm_area_struct *vma, * PA-RISC uses this for its stack; IA64 for its Register Backing Store. * vma is the last one with address > vma->vm_end. Have to extend vma. */ -int expand_upwards(struct vm_area_struct *vma, unsigned long address) +int expand_upwards(struct vm_area_struct *vma, unsigned long address, + bool write_locked) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *next; @@ -1959,6 +1960,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) if (gap_addr < address || gap_addr > TASK_SIZE) gap_addr = TASK_SIZE; + if (!write_locked) + return -EAGAIN; next = find_vma_intersection(mm, vma->vm_end, gap_addr); if (next && vma_is_accessible(next)) { if (!(next->vm_flags & VM_GROWSUP)) @@ -2028,7 +2031,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) /* * vma is the first one with address < vma->vm_start. Have to extend vma. */ -int expand_downwards(struct vm_area_struct *vma, unsigned long address) +int expand_downwards(struct vm_area_struct *vma, unsigned long address, + bool write_locked) { struct mm_struct *mm = vma->vm_mm; MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start); @@ -2042,10 +2046,13 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address) /* Enforce stack_guard_gap */ prev = mas_prev(&mas, 0); /* Check that both stack segments have the same anon_vma? */ - if (prev && !(prev->vm_flags & VM_GROWSDOWN) && - vma_is_accessible(prev)) { - if (address - prev->vm_end < stack_guard_gap) + if (prev) { + if (!(prev->vm_flags & VM_GROWSDOWN) && + vma_is_accessible(prev) && + (address - prev->vm_end < stack_guard_gap)) return -ENOMEM; + if (!write_locked && (prev->vm_end == address)) + return -EAGAIN; } if (mas_preallocate(&mas, GFP_KERNEL)) @@ -2124,13 +2131,14 @@ static int __init cmdline_parse_stack_guard_gap(char *p) __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); #ifdef CONFIG_STACK_GROWSUP -int expand_stack(struct vm_area_struct *vma, unsigned long address) +int expand_stack_locked(struct vm_area_struct *vma, unsigned long address, + bool write_locked) { - return expand_upwards(vma, address); + return expand_upwards(vma, address, write_locked); } -struct vm_area_struct * -find_extend_vma(struct mm_struct *mm, unsigned long addr) +struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, + unsigned long addr, bool write_locked) { struct vm_area_struct *vma, *prev; @@ -2138,20 +2146,25 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) vma = find_vma_prev(mm, addr, &prev); if (vma && (vma->vm_start <= addr)) return vma; - if (!prev || expand_stack(prev, addr)) + if (!prev) + return NULL; + if (expand_stack_locked(prev, addr, write_locked)) return NULL; if (prev->vm_flags & VM_LOCKED) populate_vma_page_range(prev, addr, prev->vm_end, NULL); return prev; } #else -int expand_stack(struct vm_area_struct *vma, unsigned long address) +int expand_stack_locked(struct vm_area_struct *vma, unsigned long address, + bool write_locked) { - return expand_downwards(vma, address); + if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) + return -EINVAL; + return expand_downwards(vma, address, write_locked); } -struct vm_area_struct * -find_extend_vma(struct mm_struct *mm, unsigned long addr) +struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, + unsigned long addr, bool write_locked) { struct vm_area_struct *vma; unsigned long start; @@ -2162,10 +2175,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) return NULL; if (vma->vm_start <= addr) return vma; - if (!(vma->vm_flags & VM_GROWSDOWN)) - return NULL; start = vma->vm_start; - if (expand_stack(vma, addr)) + if (expand_stack_locked(vma, addr, write_locked)) return NULL; if (vma->vm_flags & VM_LOCKED) populate_vma_page_range(vma, addr, start, NULL); @@ -2173,6 +2184,11 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) } #endif +struct vm_area_struct *find_extend_vma(struct mm_struct *mm, + unsigned long addr) +{ + return find_extend_vma_locked(mm, addr, false); +} EXPORT_SYMBOL_GPL(find_extend_vma); /* diff --git a/mm/nommu.c b/mm/nommu.c index f670d9979a26..f476c9ed36b3 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -643,7 +643,8 @@ struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) * expand a stack to a given address * - not supported under NOMMU conditions */ -int expand_stack(struct vm_area_struct *vma, unsigned long address) +int expand_stack_locked(struct vm_area_struct *vma, unsigned long address, + bool write_locked) { return -ENOMEM; } -- cgit v1.2.3 From 8d7071af890768438c14db6172cc8f9f4d04e184 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 24 Jun 2023 13:45:51 -0700 Subject: mm: always expand the stack with the mmap write lock held This finishes the job of always holding the mmap write lock when extending the user stack vma, and removes the 'write_locked' argument from the vm helper functions again. For some cases, we just avoid expanding the stack at all: drivers and page pinning really shouldn't be extending any stacks. Let's see if any strange users really wanted that. It's worth noting that architectures that weren't converted to the new lock_mm_and_find_vma() helper function are left using the legacy "expand_stack()" function, but it has been changed to drop the mmap_lock and take it for writing while expanding the vma. This makes it fairly straightforward to convert the remaining architectures. As a result of dropping and re-taking the lock, the calling conventions for this function have also changed, since the old vma may no longer be valid. So it will now return the new vma if successful, and NULL - and the lock dropped - if the area could not be extended. Tested-by: Vegard Nossum Tested-by: John Paul Adrian Glaubitz # ia64 Tested-by: Frank Scheiner # ia64 Signed-off-by: Linus Torvalds --- arch/ia64/mm/fault.c | 36 +++---------- arch/m68k/mm/fault.c | 9 ++-- arch/microblaze/mm/fault.c | 5 +- arch/openrisc/mm/fault.c | 5 +- arch/parisc/mm/fault.c | 23 ++++---- arch/s390/mm/fault.c | 5 +- arch/sparc/mm/fault_64.c | 8 +-- arch/um/kernel/trap.c | 11 ++-- drivers/iommu/amd/iommu_v2.c | 4 +- drivers/iommu/iommu-sva.c | 2 +- fs/binfmt_elf.c | 2 +- fs/exec.c | 4 +- include/linux/mm.h | 16 ++---- mm/gup.c | 6 +-- mm/memory.c | 10 +++- mm/mmap.c | 121 ++++++++++++++++++++++++++++++++++--------- mm/nommu.c | 18 +++---- 17 files changed, 169 insertions(+), 116 deletions(-) (limited to 'mm') diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 85c4d9ac8686..5458b52b4009 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -110,10 +110,12 @@ retry: * register backing store that needs to expand upwards, in * this case vma will be null, but prev_vma will ne non-null */ - if (( !vma && prev_vma ) || (address < vma->vm_start) ) - goto check_expansion; + if (( !vma && prev_vma ) || (address < vma->vm_start) ) { + vma = expand_stack(mm, address); + if (!vma) + goto bad_area_nosemaphore; + } - good_area: code = SEGV_ACCERR; /* OK, we've got a good vm_area for this memory area. Check the access permissions: */ @@ -177,35 +179,9 @@ retry: mmap_read_unlock(mm); return; - check_expansion: - if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) { - if (!vma) - goto bad_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) - || REGION_OFFSET(address) >= RGN_MAP_LIMIT) - goto bad_area; - if (expand_stack(vma, address)) - goto bad_area; - } else { - vma = prev_vma; - if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) - || REGION_OFFSET(address) >= RGN_MAP_LIMIT) - goto bad_area; - /* - * Since the register backing store is accessed sequentially, - * we disallow growing it by more than a page at a time. - */ - if (address > vma->vm_end + PAGE_SIZE - sizeof(long)) - goto bad_area; - if (expand_upwards(vma, address)) - goto bad_area; - } - goto good_area; - bad_area: mmap_read_unlock(mm); + bad_area_nosemaphore: if ((isr & IA64_ISR_SP) || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) { diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 228128e45c67..c290c5c0cfb9 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -105,8 +105,9 @@ retry: if (address + 256 < rdusp()) goto map_err; } - if (expand_stack(vma, address)) - goto map_err; + vma = expand_stack(mm, address); + if (!vma) + goto map_err_nosemaphore; /* * Ok, we have a good vm_area for this memory access, so @@ -196,10 +197,12 @@ bus_err: goto send_sig; map_err: + mmap_read_unlock(mm); +map_err_nosemaphore: current->thread.signo = SIGSEGV; current->thread.code = SEGV_MAPERR; current->thread.faddr = address; - goto send_sig; + return send_fault_sig(regs); acc_err: current->thread.signo = SIGSEGV; diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 687714db6f4d..d3c3c33b73a6 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -192,8 +192,9 @@ retry: && (kernel_mode(regs) || !store_updates_sp(regs))) goto bad_area; } - if (expand_stack(vma, address)) - goto bad_area; + vma = expand_stack(mm, address); + if (!vma) + goto bad_area_nosemaphore; good_area: code = SEGV_ACCERR; diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index 6734fee3134f..a9dcd4381d1a 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -127,8 +127,9 @@ retry: if (address + PAGE_SIZE < regs->sp) goto bad_area; } - if (expand_stack(vma, address)) - goto bad_area; + vma = expand_stack(mm, address); + if (!vma) + goto bad_area_nosemaphore; /* * Ok, we have a good vm_area for this memory access, so diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 6941fdbf2517..6e894afa4249 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -288,15 +288,19 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, retry: mmap_read_lock(mm); vma = find_vma_prev(mm, address, &prev_vma); - if (!vma || address < vma->vm_start) - goto check_expansion; + if (!vma || address < vma->vm_start) { + if (!prev || !(prev->vm_flags & VM_GROWSUP)) + goto bad_area; + vma = expand_stack(mm, address); + if (!vma) + goto bad_area_nosemaphore; + } + /* * Ok, we have a good vm_area for this memory access. We still need to * check the access permissions. */ -good_area: - if ((vma->vm_flags & acc_type) != acc_type) goto bad_area; @@ -347,17 +351,13 @@ good_area: mmap_read_unlock(mm); return; -check_expansion: - vma = prev_vma; - if (vma && (expand_stack(vma, address) == 0)) - goto good_area; - /* * Something tried to access memory that isn't in our memory map.. */ bad_area: mmap_read_unlock(mm); +bad_area_nosemaphore: if (user_mode(regs)) { int signo, si_code; @@ -449,7 +449,7 @@ handle_nadtlb_fault(struct pt_regs *regs) { unsigned long insn = regs->iir; int breg, treg, xreg, val = 0; - struct vm_area_struct *vma, *prev_vma; + struct vm_area_struct *vma; struct task_struct *tsk; struct mm_struct *mm; unsigned long address; @@ -485,7 +485,7 @@ handle_nadtlb_fault(struct pt_regs *regs) /* Search for VMA */ address = regs->ior; mmap_read_lock(mm); - vma = find_vma_prev(mm, address, &prev_vma); + vma = vma_lookup(mm, address); mmap_read_unlock(mm); /* @@ -494,7 +494,6 @@ handle_nadtlb_fault(struct pt_regs *regs) */ acc_type = (insn & 0x40) ? VM_WRITE : VM_READ; if (vma - && address >= vma->vm_start && (vma->vm_flags & acc_type) == acc_type) val = 1; } diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index b65144c392b0..dbe8394234e2 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -457,8 +457,9 @@ retry: if (unlikely(vma->vm_start > address)) { if (!(vma->vm_flags & VM_GROWSDOWN)) goto out_up; - if (expand_stack(vma, address)) - goto out_up; + vma = expand_stack(mm, address); + if (!vma) + goto out; } /* diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index d91305de694c..69ff07bc6c07 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -383,8 +383,9 @@ continue_fault: goto bad_area; } } - if (expand_stack(vma, address)) - goto bad_area; + vma = expand_stack(mm, address); + if (!vma) + goto bad_area_nosemaphore; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. @@ -487,8 +488,9 @@ exit_exception: * Fix it, but check if it's kernel or user first.. */ bad_area: - insn = get_fault_insn(regs, insn); mmap_read_unlock(mm); +bad_area_nosemaphore: + insn = get_fault_insn(regs, insn); handle_kernel_fault: do_kernel_fault(regs, si_code, fault_code, insn, address); diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index d3ce21c4ca32..6d8ae86ae978 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -47,14 +47,15 @@ retry: vma = find_vma(mm, address); if (!vma) goto out; - else if (vma->vm_start <= address) + if (vma->vm_start <= address) goto good_area; - else if (!(vma->vm_flags & VM_GROWSDOWN)) + if (!(vma->vm_flags & VM_GROWSDOWN)) goto out; - else if (is_user && !ARCH_IS_STACKGROW(address)) - goto out; - else if (expand_stack(vma, address)) + if (is_user && !ARCH_IS_STACKGROW(address)) goto out; + vma = expand_stack(mm, address); + if (!vma) + goto out_nosemaphore; good_area: *code_out = SEGV_ACCERR; diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index 864e4ffb6aa9..261352a23271 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -485,8 +485,8 @@ static void do_fault(struct work_struct *work) flags |= FAULT_FLAG_REMOTE; mmap_read_lock(mm); - vma = find_extend_vma(mm, address); - if (!vma || address < vma->vm_start) + vma = vma_lookup(mm, address); + if (!vma) /* failed to get a vma in the right range */ goto out; diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c index 9821bc44f5ac..3ebd4b6586b3 100644 --- a/drivers/iommu/iommu-sva.c +++ b/drivers/iommu/iommu-sva.c @@ -175,7 +175,7 @@ iommu_sva_handle_iopf(struct iommu_fault *fault, void *data) mmap_read_lock(mm); - vma = find_extend_vma(mm, prm->addr); + vma = vma_lookup(mm, prm->addr); if (!vma) /* Unmapped area */ goto out_put_mm; diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 869c3aa0e455..befa93582ed7 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -322,7 +322,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, */ if (mmap_write_lock_killable(mm)) return -EINTR; - vma = find_extend_vma_locked(mm, bprm->p, true); + vma = find_extend_vma_locked(mm, bprm->p); mmap_write_unlock(mm); if (!vma) return -EFAULT; diff --git a/fs/exec.c b/fs/exec.c index 66e3e22ffb8a..b84b4fee0f82 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -211,7 +211,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, */ if (write && pos < vma->vm_start) { mmap_write_lock(mm); - ret = expand_downwards(vma, pos, true); + ret = expand_downwards(vma, pos); if (unlikely(ret < 0)) { mmap_write_unlock(mm); return NULL; @@ -859,7 +859,7 @@ int setup_arg_pages(struct linux_binprm *bprm, stack_base = vma->vm_end - stack_expand; #endif current->mm->start_stack = bprm->p; - ret = expand_stack_locked(vma, stack_base, true); + ret = expand_stack_locked(vma, stack_base); if (ret) ret = -EFAULT; diff --git a/include/linux/mm.h b/include/linux/mm.h index 01a016521b60..4a9533efbd5d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3192,18 +3192,11 @@ extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf); extern unsigned long stack_guard_gap; /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ -int expand_stack_locked(struct vm_area_struct *vma, unsigned long address, - bool write_locked); -#define expand_stack(vma,addr) expand_stack_locked(vma,addr,false) +int expand_stack_locked(struct vm_area_struct *vma, unsigned long address); +struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr); /* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */ -int expand_downwards(struct vm_area_struct *vma, unsigned long address, - bool write_locked); -#if VM_GROWSUP -extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); -#else - #define expand_upwards(vma, address) (0) -#endif +int expand_downwards(struct vm_area_struct *vma, unsigned long address); /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); @@ -3298,9 +3291,8 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, unsigned long start, unsigned long end); #endif -struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); struct vm_area_struct *find_extend_vma_locked(struct mm_struct *, - unsigned long addr, bool write_locked); + unsigned long addr); int remap_pfn_range(struct vm_area_struct *, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t); int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, diff --git a/mm/gup.c b/mm/gup.c index bbe416236593..e6cdfee4451f 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1096,7 +1096,7 @@ static long __get_user_pages(struct mm_struct *mm, /* first iteration or cross vma bound */ if (!vma || start >= vma->vm_end) { - vma = find_extend_vma(mm, start); + vma = vma_lookup(mm, start); if (!vma && in_gate_area(mm, start)) { ret = get_gate_page(mm, start & PAGE_MASK, gup_flags, &vma, @@ -1265,8 +1265,8 @@ int fixup_user_fault(struct mm_struct *mm, fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; retry: - vma = find_extend_vma(mm, address); - if (!vma || address < vma->vm_start) + vma = vma_lookup(mm, address); + if (!vma) return -EFAULT; if (!vma_permits_fault(vma, fault_flags)) diff --git a/mm/memory.c b/mm/memory.c index a81f5d0997ad..5ce82a76201d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5368,7 +5368,7 @@ struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, goto fail; } - if (expand_stack_locked(vma, addr, true)) + if (expand_stack_locked(vma, addr)) goto fail; success: @@ -5713,6 +5713,14 @@ int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, if (mmap_read_lock_killable(mm)) return 0; + /* We might need to expand the stack to access it */ + vma = vma_lookup(mm, addr); + if (!vma) { + vma = expand_stack(mm, addr); + if (!vma) + return 0; + } + /* ignore errors, just check how much was successfully transferred */ while (len) { int bytes, ret, offset; diff --git a/mm/mmap.c b/mm/mmap.c index 2c44ac108a3c..bc510361acec 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1935,8 +1935,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, * PA-RISC uses this for its stack; IA64 for its Register Backing Store. * vma is the last one with address > vma->vm_end. Have to extend vma. */ -int expand_upwards(struct vm_area_struct *vma, unsigned long address, - bool write_locked) +static int expand_upwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *next; @@ -1960,8 +1959,6 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address, if (gap_addr < address || gap_addr > TASK_SIZE) gap_addr = TASK_SIZE; - if (!write_locked) - return -EAGAIN; next = find_vma_intersection(mm, vma->vm_end, gap_addr); if (next && vma_is_accessible(next)) { if (!(next->vm_flags & VM_GROWSUP)) @@ -2030,15 +2027,18 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address, /* * vma is the first one with address < vma->vm_start. Have to extend vma. + * mmap_lock held for writing. */ -int expand_downwards(struct vm_area_struct *vma, unsigned long address, - bool write_locked) +int expand_downwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start); struct vm_area_struct *prev; int error = 0; + if (!(vma->vm_flags & VM_GROWSDOWN)) + return -EFAULT; + address &= PAGE_MASK; if (address < mmap_min_addr || address < FIRST_USER_ADDRESS) return -EPERM; @@ -2051,8 +2051,6 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address, vma_is_accessible(prev) && (address - prev->vm_end < stack_guard_gap)) return -ENOMEM; - if (!write_locked && (prev->vm_end == address)) - return -EAGAIN; } if (mas_preallocate(&mas, GFP_KERNEL)) @@ -2131,14 +2129,12 @@ static int __init cmdline_parse_stack_guard_gap(char *p) __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); #ifdef CONFIG_STACK_GROWSUP -int expand_stack_locked(struct vm_area_struct *vma, unsigned long address, - bool write_locked) +int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) { - return expand_upwards(vma, address, write_locked); + return expand_upwards(vma, address); } -struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, - unsigned long addr, bool write_locked) +struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma, *prev; @@ -2148,23 +2144,21 @@ struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, return vma; if (!prev) return NULL; - if (expand_stack_locked(prev, addr, write_locked)) + if (expand_stack_locked(prev, addr)) return NULL; if (prev->vm_flags & VM_LOCKED) populate_vma_page_range(prev, addr, prev->vm_end, NULL); return prev; } #else -int expand_stack_locked(struct vm_area_struct *vma, unsigned long address, - bool write_locked) +int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) { if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) return -EINVAL; - return expand_downwards(vma, address, write_locked); + return expand_downwards(vma, address); } -struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, - unsigned long addr, bool write_locked) +struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma; unsigned long start; @@ -2176,7 +2170,7 @@ struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, if (vma->vm_start <= addr) return vma; start = vma->vm_start; - if (expand_stack_locked(vma, addr, write_locked)) + if (expand_stack_locked(vma, addr)) return NULL; if (vma->vm_flags & VM_LOCKED) populate_vma_page_range(vma, addr, start, NULL); @@ -2184,12 +2178,91 @@ struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, } #endif -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, - unsigned long addr) +/* + * IA64 has some horrid mapping rules: it can expand both up and down, + * but with various special rules. + * + * We'll get rid of this architecture eventually, so the ugliness is + * temporary. + */ +#ifdef CONFIG_IA64 +static inline bool vma_expand_ok(struct vm_area_struct *vma, unsigned long addr) +{ + return REGION_NUMBER(addr) == REGION_NUMBER(vma->vm_start) && + REGION_OFFSET(addr) < RGN_MAP_LIMIT; +} + +/* + * IA64 stacks grow down, but there's a special register backing store + * that can grow up. Only sequentially, though, so the new address must + * match vm_end. + */ +static inline int vma_expand_up(struct vm_area_struct *vma, unsigned long addr) +{ + if (!vma_expand_ok(vma, addr)) + return -EFAULT; + if (vma->vm_end != (addr & PAGE_MASK)) + return -EFAULT; + return expand_upwards(vma, addr); +} + +static inline bool vma_expand_down(struct vm_area_struct *vma, unsigned long addr) +{ + if (!vma_expand_ok(vma, addr)) + return -EFAULT; + return expand_downwards(vma, addr); +} + +#elif defined(CONFIG_STACK_GROWSUP) + +#define vma_expand_up(vma,addr) expand_upwards(vma, addr) +#define vma_expand_down(vma, addr) (-EFAULT) + +#else + +#define vma_expand_up(vma,addr) (-EFAULT) +#define vma_expand_down(vma, addr) expand_downwards(vma, addr) + +#endif + +/* + * expand_stack(): legacy interface for page faulting. Don't use unless + * you have to. + * + * This is called with the mm locked for reading, drops the lock, takes + * the lock for writing, tries to look up a vma again, expands it if + * necessary, and downgrades the lock to reading again. + * + * If no vma is found or it can't be expanded, it returns NULL and has + * dropped the lock. + */ +struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) { - return find_extend_vma_locked(mm, addr, false); + struct vm_area_struct *vma, *prev; + + mmap_read_unlock(mm); + if (mmap_write_lock_killable(mm)) + return NULL; + + vma = find_vma_prev(mm, addr, &prev); + if (vma && vma->vm_start <= addr) + goto success; + + if (prev && !vma_expand_up(prev, addr)) { + vma = prev; + goto success; + } + + if (vma && !vma_expand_down(vma, addr)) + goto success; + + mmap_write_unlock(mm); + return NULL; + +success: + mmap_write_downgrade(mm); + return vma; } -EXPORT_SYMBOL_GPL(find_extend_vma); /* * Ok - we have the memory areas we should free on a maple tree so release them, diff --git a/mm/nommu.c b/mm/nommu.c index f476c9ed36b3..37d0b03143f1 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -630,25 +630,21 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) } EXPORT_SYMBOL(find_vma); -/* - * find a VMA - * - we don't extend stack VMAs under NOMMU conditions - */ -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) -{ - return find_vma(mm, addr); -} - /* * expand a stack to a given address * - not supported under NOMMU conditions */ -int expand_stack_locked(struct vm_area_struct *vma, unsigned long address, - bool write_locked) +int expand_stack_locked(struct vm_area_struct *vma, unsigned long addr) { return -ENOMEM; } +struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) +{ + mmap_read_unlock(mm); + return NULL; +} + /* * look up the first VMA exactly that exactly matches addr * - should be called with mm->mmap_lock at least held readlocked -- cgit v1.2.3 From a425ac5365f6cb3cc47bf83e6bff0213c10445f7 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 25 Jun 2023 14:02:25 -0700 Subject: gup: add warning if some caller would seem to want stack expansion It feels very unlikely that anybody would want to do a GUP in an unmapped area under the stack pointer, but real users sometimes do some really strange things. So add a (temporary) warning for the case where a GUP fails and expanding the stack might have made it work. It's trivial to do the expansion in the caller as part of getting the mm lock in the first place - see __access_remote_vm() for ptrace, for example - it's just that it's unnecessarily painful to do it deep in the guts of the GUP lookup when we might have to drop and re-take the lock. I doubt anybody actually does anything quite this strange, but let's be proactive: adding these warnings is simple, and will make debugging it much easier if they trigger. Signed-off-by: Linus Torvalds --- mm/gup.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/gup.c b/mm/gup.c index e6cdfee4451f..94102390b273 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1096,7 +1096,11 @@ static long __get_user_pages(struct mm_struct *mm, /* first iteration or cross vma bound */ if (!vma || start >= vma->vm_end) { - vma = vma_lookup(mm, start); + vma = find_vma(mm, start); + if (vma && (start < vma->vm_start)) { + WARN_ON_ONCE(vma->vm_flags & VM_GROWSDOWN); + vma = NULL; + } if (!vma && in_gate_area(mm, start)) { ret = get_gate_page(mm, start & PAGE_MASK, gup_flags, &vma, @@ -1265,9 +1269,13 @@ int fixup_user_fault(struct mm_struct *mm, fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; retry: - vma = vma_lookup(mm, address); + vma = find_vma(mm, address); if (!vma) return -EFAULT; + if (address < vma->vm_start ) { + WARN_ON_ONCE(vma->vm_flags & VM_GROWSDOWN); + return -EFAULT; + } if (!vma_permits_fault(vma, fault_flags)) return -EFAULT; -- cgit v1.2.3 From 6c26bd4384da24841bac4f067741bbca18b0fb74 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Wed, 28 Jun 2023 10:55:03 +0100 Subject: mm/mmap: Fix error return in do_vmi_align_munmap() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If mas_store_gfp() in the gather loop failed, the 'error' variable that ultimately gets returned was not being set. In many cases, its original value of -ENOMEM was still in place, and that was fine. But if VMAs had been split at the start or end of the range, then 'error' could be zero. Change to the 'error = foo(); if (error) goto …' idiom to fix the bug. Also clean up a later case which avoided the same bug by *explicitly* setting error = -ENOMEM right before calling the function that might return -ENOMEM. In a final cosmetic change, move the 'Point of no return' comment to *after* the goto. That's been in the wrong place since the preallocation was removed, and this new error path was added. Fixes: 606c812eb1d5 ("mm/mmap: Fix error path in do_vmi_align_munmap()") Signed-off-by: David Woodhouse Cc: stable@vger.kernel.org Reviewed-by: Greg Kroah-Hartman Reviewed-by: Liam R. Howlett --- mm/mmap.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index d600404580b2..13128e908470 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2387,7 +2387,8 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, } vma_start_write(next); mas_set_range(&mas_detach, next->vm_start, next->vm_end - 1); - if (mas_store_gfp(&mas_detach, next, GFP_KERNEL)) + error = mas_store_gfp(&mas_detach, next, GFP_KERNEL); + if (error) goto munmap_gather_failed; vma_mark_detached(next, true); if (next->vm_flags & VM_LOCKED) @@ -2436,12 +2437,12 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, BUG_ON(count != test_count); } #endif - /* Point of no return */ - error = -ENOMEM; vma_iter_set(vmi, start); - if (vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL)) + error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL); + if (error) goto clear_tree_failed; + /* Point of no return */ mm->locked_vm -= locked_vm; mm->map_count -= count; /* -- cgit v1.2.3 From 6581ccf03e717926be97dc3d27182ce351232f3c Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 28 Jun 2023 12:20:24 -0700 Subject: mm: fix __access_remote_vm() GUP failure case Commit ca5e863233e8 ("mm/gup: remove vmas parameter from get_user_pages_remote()") removed the vma argument from GUP handling, and instead added a helper function (get_user_page_vma_remote()) that looks it up separately using 'vma_lookup()'. And then converted existing users that needed a vma to use the helper instead. However, the helper function intentionally acts exactly like the old get_user_pages_remote() did, and only fills in 'vma' on successful page lookup. Fine so far. However, __access_remote_vm() wants the vma even for the unsuccessful case, and used to do a vma = vma_lookup(mm, addr); explicitly to look it up when the get_user_page() failed. However, that conversion commit incorrectly removed that vma lookup, thinking that get_user_page_vma_remote() would have done it. Not so. So add the vma_lookup() back in. Fixes: ca5e863233e8 ("mm/gup: remove vmas parameter from get_user_pages_remote()") Cc: Lorenzo Stoakes Cc: David Hildenbrand Cc: Christoph Hellwig Cc: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 1 + 1 file changed, 1 insertion(+) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index f758f59f3704..11882189864e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5592,6 +5592,7 @@ int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, * Check if this is a VM_IO | VM_PFNMAP VMA, which * we can access using slightly different code. */ + vma = vma_lookup(mm, addr); if (!vma) break; if (vma->vm_ops && vma->vm_ops->access) -- cgit v1.2.3 From e8c716bc6812202ccf4ce0f0bad3428b794fb39c Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 28 Jun 2023 21:31:35 -0700 Subject: mm/khugepaged: fix regression in collapse_file() There is no xas_pause(&xas) in collapse_file()'s main loop, at the points where it does xas_unlock_irq(&xas) and then continues. That would explain why, once two weeks ago and twice yesterday, I have hit the VM_BUG_ON_PAGE(page != xas_load(&xas), page) since "mm/khugepaged: fix iteration in collapse_file" removed the xas_set(&xas, index) just before it: xas.xa_node could be left pointing to a stale node, if there was concurrent activity on the file which transformed its xarray. I tried inserting xas_pause()s, but then even bootup crashed on that VM_BUG_ON_PAGE(): there appears to be a subtle "nextness" implicit in xas_pause(). xas_next() and xas_pause() are good for use in simple loops, but not in this one: xas_set() worked well until now, so use xas_set(&xas, index) explicitly at the head of the loop; and change that VM_BUG_ON_PAGE() not to need its own xas_set(), and not to interfere with the xa_state (which would probably stop the crashes from xas_pause(), but I trust that less). The user-visible effects of this bug (if VM_BUG_ONs are configured out) would be data loss and data leak - potentially - though in practice I expect it is more likely that a subsequent check (e.g. on mapping or on nr_none) would notice an inconsistency, and just abandon the collapse. Link: https://lore.kernel.org/linux-mm/f18e4b64-3f88-a8ab-56cc-d1f5f9c58d4@google.com/ Fixes: c8a8f3b4a95a ("mm/khugepaged: fix iteration in collapse_file") Signed-off-by: Hugh Dickins Cc: stable@kernel.org Cc: Andrew Morton Cc: Matthew Wilcox Cc: David Stevens Cc: Peter Xu Signed-off-by: Linus Torvalds --- mm/khugepaged.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 3beb4ad2ee5e..78c8d5d8b628 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1937,9 +1937,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, } } while (1); - xas_set(&xas, start); for (index = start; index < end; index++) { - page = xas_next(&xas); + xas_set(&xas, index); + page = xas_load(&xas); VM_BUG_ON(index != xas.xa_index); if (is_shmem) { @@ -1954,7 +1954,6 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, result = SCAN_TRUNCATED; goto xa_locked; } - xas_set(&xas, index + 1); } if (!shmem_charge(mapping->host, 1)) { result = SCAN_FAIL; @@ -2090,7 +2089,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, xas_lock_irq(&xas); - VM_BUG_ON_PAGE(page != xas_load(&xas), page); + VM_BUG_ON_PAGE(page != xa_load(xas.xa, index), page); /* * We control three references to the page: -- cgit v1.2.3 From eee9c708cc89b4600c6e6cdda5bc2b8b4dad96cb Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 29 Jun 2023 12:36:47 -0700 Subject: gup: avoid stack expansion warning for known-good case In commit a425ac5365f6 ("gup: add warning if some caller would seem to want stack expansion") I added a temporary warning to catch any strange GUP users that would be impacted by the fact that GUP no longer extends the stack. But it turns out that the warning is most easily triggered through __access_remote_vm(), that already knows to expand the stack - it just does it *after* calling GUP. So the warning is easy to trigger by just running gdb (or similar) and accessing things remotely under the stack. This just adds a temporary extra "expand stack early" to avoid the warning for the already converted case - not because the warning is bad, but because getting the warning for this known good case would then hide any subsequent warnings for any actually interesting cases. Let's try to remember to revert this change when we remove the warnings. Signed-off-by: Linus Torvalds --- mm/memory.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index d8a9a770b1f1..0ae594703021 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5694,6 +5694,10 @@ int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, if (mmap_read_lock_killable(mm)) return 0; + /* Avoid triggering the temporary warning in __get_user_pages */ + if (!vma_lookup(mm, addr) && !expand_stack(mm, addr)) + return 0; + /* ignore errors, just check how much was successfully transferred */ while (len) { int bytes, offset; -- cgit v1.2.3 From d85a143b69abb4d7544227e26d12c4c7735ab27d Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 30 Jun 2023 18:24:49 -0700 Subject: xtensa: fix NOMMU build with lock_mm_and_find_vma() conversion MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It turns out that xtensa has a really odd configuration situation: you can do a no-MMU config, but still have the page fault code enabled. Which doesn't sound all that sensible, but it turns out that xtensa can have protection faults even without the MMU, and we have this: config PFAULT bool "Handle protection faults" if EXPERT && !MMU default y help Handle protection faults. MMU configurations must enable it. noMMU configurations may disable it if used memory map never generates protection faults or faults are always fatal. If unsure, say Y. which completely violated my expectations of the page fault handling. End result: Guenter reports that the xtensa no-MMU builds all fail with arch/xtensa/mm/fault.c: In function ‘do_page_fault’: arch/xtensa/mm/fault.c:133:8: error: implicit declaration of function ‘lock_mm_and_find_vma’ because I never exposed the new lock_mm_and_find_vma() function for the no-MMU case. Doing so is simple enough, and fixes the problem. Reported-and-tested-by: Guenter Roeck Fixes: a050ba1e7422 ("mm/fault: convert remaining simple cases to lock_mm_and_find_vma()") Signed-off-by: Linus Torvalds --- include/linux/mm.h | 5 +++-- mm/nommu.c | 11 +++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index 39aa409e84d5..4f2c33c273eb 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2323,6 +2323,9 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); int generic_error_remove_page(struct address_space *mapping, struct page *page); +struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, + unsigned long address, struct pt_regs *regs); + #ifdef CONFIG_MMU extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags, @@ -2334,8 +2337,6 @@ void unmap_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t nr, bool even_cows); void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); -struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, - unsigned long address, struct pt_regs *regs); #else static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags, diff --git a/mm/nommu.c b/mm/nommu.c index 37d0b03143f1..fdc392735ec6 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -630,6 +630,17 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) } EXPORT_SYMBOL(find_vma); +/* + * At least xtensa ends up having protection faults even with no + * MMU.. No stack expansion, at least. + */ +struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, + unsigned long addr, struct pt_regs *regs) +{ + mmap_read_lock(mm); + return vma_lookup(mm, addr); +} + /* * expand a stack to a given address * - not supported under NOMMU conditions -- cgit v1.2.3 From 03f889378f33aa9a9d8e5f49ba94134cf6158090 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Sat, 1 Jul 2023 03:31:55 -0700 Subject: xtensa: fix lock_mm_and_find_vma in case VMA not found MMU version of lock_mm_and_find_vma releases the mm lock before returning when VMA is not found. Do the same in noMMU version. This fixes hang on an attempt to handle protection fault. Fixes: d85a143b69ab ("xtensa: fix NOMMU build with lock_mm_and_find_vma() conversion") Signed-off-by: Max Filippov Signed-off-by: Linus Torvalds --- mm/nommu.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/nommu.c b/mm/nommu.c index fdc392735ec6..c072a660ec2c 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -637,8 +637,13 @@ EXPORT_SYMBOL(find_vma); struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, unsigned long addr, struct pt_regs *regs) { + struct vm_area_struct *vma; + mmap_read_lock(mm); - return vma_lookup(mm, addr); + vma = vma_lookup(mm, addr); + if (!vma) + mmap_read_unlock(mm); + return vma; } /* -- cgit v1.2.3 From e4bd84c069f212c01258e405f86e91f327888e41 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 29 Jun 2023 20:14:14 +0100 Subject: mm: Always downgrade mmap_lock if requested Now that stack growth must always hold the mmap_lock for write, we can always downgrade the mmap_lock to read and safely unmap pages from the page table, even if we're next to a stack. Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Linus Torvalds --- mm/mmap.c | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index 3e5793ebbaae..141c618847ac 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2551,19 +2551,8 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, /* Point of no return */ mm->locked_vm -= locked_vm; mm->map_count -= count; - /* - * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or - * VM_GROWSUP VMA. Such VMAs can change their size under - * down_read(mmap_lock) and collide with the VMA we are about to unmap. - */ - if (downgrade) { - if (next && (next->vm_flags & VM_GROWSDOWN)) - downgrade = false; - else if (prev && (prev->vm_flags & VM_GROWSUP)) - downgrade = false; - else - mmap_write_downgrade(mm); - } + if (downgrade) + mmap_write_downgrade(mm); /* * We can free page tables without write-locking mmap_lock because VMAs -- cgit v1.2.3 From 408579cd627a15bd703fe3eeb8485fd02726e9d3 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Thu, 29 Jun 2023 22:28:16 -0400 Subject: mm: Update do_vmi_align_munmap() return semantics Since do_vmi_align_munmap() will always honor the downgrade request on the success, the callers no longer have to deal with confusing return codes. Since all callers that request downgrade actually want the lock to be dropped, change the downgrade to an unlock request. Note that the lock still needs to be held in read mode during the page table clean up to avoid races with a map request. Update do_vmi_align_munmap() to return 0 for success. Clean up the callers and comments to always expect the unlock to be honored on the success path. The error path will always leave the lock untouched. As part of the cleanup, the wrapper function do_vmi_munmap() and callers to the wrapper are also updated. Suggested-by: Linus Torvalds Link: https://lore.kernel.org/linux-mm/20230629191414.1215929-1-willy@infradead.org/ Signed-off-by: Liam R. Howlett Signed-off-by: Linus Torvalds --- include/linux/mm.h | 4 +-- mm/mmap.c | 94 +++++++++++++++++++++++++----------------------------- mm/mremap.c | 28 +++++++--------- 3 files changed, 57 insertions(+), 69 deletions(-) (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index 4f2c33c273eb..703ba8203da3 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3177,7 +3177,7 @@ extern unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long pgoff, unsigned long *populate, struct list_head *uf); extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf, - bool downgrade); + bool unlock); extern int do_munmap(struct mm_struct *, unsigned long, size_t, struct list_head *uf); extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior); @@ -3185,7 +3185,7 @@ extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, #ifdef CONFIG_MMU extern int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start, unsigned long end, - struct list_head *uf, bool downgrade); + struct list_head *uf, bool unlock); extern int __mm_populate(unsigned long addr, unsigned long len, int ignore_errors); static inline void mm_populate(unsigned long addr, unsigned long len) diff --git a/mm/mmap.c b/mm/mmap.c index 141c618847ac..51e70fa98450 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -193,8 +193,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) struct mm_struct *mm = current->mm; struct vm_area_struct *brkvma, *next = NULL; unsigned long min_brk; - bool populate; - bool downgraded = false; + bool populate = false; LIST_HEAD(uf); struct vma_iterator vmi; @@ -236,13 +235,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) goto success; } - /* - * Always allow shrinking brk. - * do_vma_munmap() may downgrade mmap_lock to read. - */ + /* Always allow shrinking brk. */ if (brk <= mm->brk) { - int ret; - /* Search one past newbrk */ vma_iter_init(&vmi, mm, newbrk); brkvma = vma_find(&vmi, oldbrk); @@ -250,19 +244,14 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) goto out; /* mapping intersects with an existing non-brk vma. */ /* * mm->brk must be protected by write mmap_lock. - * do_vma_munmap() may downgrade the lock, so update it + * do_vma_munmap() will drop the lock on success, so update it * before calling do_vma_munmap(). */ mm->brk = brk; - ret = do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true); - if (ret == 1) { - downgraded = true; - goto success; - } else if (!ret) - goto success; - - mm->brk = origbrk; - goto out; + if (do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true)) + goto out; + + goto success_unlocked; } if (check_brk_limits(oldbrk, newbrk - oldbrk)) @@ -283,19 +272,19 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) goto out; mm->brk = brk; + if (mm->def_flags & VM_LOCKED) + populate = true; success: - populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0; - if (downgraded) - mmap_read_unlock(mm); - else - mmap_write_unlock(mm); + mmap_write_unlock(mm); +success_unlocked: userfaultfd_unmap_complete(mm, &uf); if (populate) mm_populate(oldbrk, newbrk - oldbrk); return brk; out: + mm->brk = origbrk; mmap_write_unlock(mm); return origbrk; } @@ -2428,14 +2417,16 @@ int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, * @start: The aligned start address to munmap. * @end: The aligned end address to munmap. * @uf: The userfaultfd list_head - * @downgrade: Set to true to attempt a write downgrade of the mmap_lock + * @unlock: Set to true to drop the mmap_lock. unlocking only happens on + * success. * - * If @downgrade is true, check return code for potential release of the lock. + * Return: 0 on success and drops the lock if so directed, error and leaves the + * lock held otherwise. */ static int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, struct mm_struct *mm, unsigned long start, - unsigned long end, struct list_head *uf, bool downgrade) + unsigned long end, struct list_head *uf, bool unlock) { struct vm_area_struct *prev, *next = NULL; struct maple_tree mt_detach; @@ -2551,22 +2542,24 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, /* Point of no return */ mm->locked_vm -= locked_vm; mm->map_count -= count; - if (downgrade) + if (unlock) mmap_write_downgrade(mm); /* * We can free page tables without write-locking mmap_lock because VMAs * were isolated before we downgraded mmap_lock. */ - unmap_region(mm, &mt_detach, vma, prev, next, start, end, !downgrade); + unmap_region(mm, &mt_detach, vma, prev, next, start, end, !unlock); /* Statistics and freeing VMAs */ mas_set(&mas_detach, start); remove_mt(mm, &mas_detach); __mt_destroy(&mt_detach); + if (unlock) + mmap_read_unlock(mm); validate_mm(mm); - return downgrade ? 1 : 0; + return 0; clear_tree_failed: userfaultfd_error: @@ -2589,18 +2582,18 @@ map_count_exceeded: * @start: The start address to munmap * @len: The length of the range to munmap * @uf: The userfaultfd list_head - * @downgrade: set to true if the user wants to attempt to write_downgrade the - * mmap_lock + * @unlock: set to true if the user wants to drop the mmap_lock on success * * This function takes a @mas that is either pointing to the previous VMA or set * to MA_START and sets it up to remove the mapping(s). The @len will be * aligned and any arch_unmap work will be preformed. * - * Returns: -EINVAL on failure, 1 on success and unlock, 0 otherwise. + * Return: 0 on success and drops the lock if so directed, error and leaves the + * lock held otherwise. */ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf, - bool downgrade) + bool unlock) { unsigned long end; struct vm_area_struct *vma; @@ -2617,10 +2610,13 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, /* Find the first overlapping VMA */ vma = vma_find(vmi, end); - if (!vma) + if (!vma) { + if (unlock) + mmap_write_unlock(mm); return 0; + } - return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, downgrade); + return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); } /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls. @@ -2628,6 +2624,8 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, * @start: The start address to munmap * @len: The length to be munmapped. * @uf: The userfaultfd list_head + * + * Return: 0 on success, error otherwise. */ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf) @@ -2888,7 +2886,7 @@ unacct_error: return error; } -static int __vm_munmap(unsigned long start, size_t len, bool downgrade) +static int __vm_munmap(unsigned long start, size_t len, bool unlock) { int ret; struct mm_struct *mm = current->mm; @@ -2898,16 +2896,8 @@ static int __vm_munmap(unsigned long start, size_t len, bool downgrade) if (mmap_write_lock_killable(mm)) return -EINTR; - ret = do_vmi_munmap(&vmi, mm, start, len, &uf, downgrade); - /* - * Returning 1 indicates mmap_lock is downgraded. - * But 1 is not legal return value of vm_munmap() and munmap(), reset - * it to 0 before return. - */ - if (ret == 1) { - mmap_read_unlock(mm); - ret = 0; - } else + ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock); + if (ret || !unlock) mmap_write_unlock(mm); userfaultfd_unmap_complete(mm, &uf); @@ -3017,21 +3007,23 @@ out: * @start: the start of the address to unmap * @end: The end of the address to unmap * @uf: The userfaultfd list_head - * @downgrade: Attempt to downgrade or not + * @unlock: Drop the lock on success * - * Returns: 0 on success and not downgraded, 1 on success and downgraded. * unmaps a VMA mapping when the vma iterator is already in position. * Does not handle alignment. + * + * Return: 0 on success drops the lock of so directed, error on failure and will + * still hold the lock. */ int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, - unsigned long start, unsigned long end, - struct list_head *uf, bool downgrade) + unsigned long start, unsigned long end, struct list_head *uf, + bool unlock) { struct mm_struct *mm = vma->vm_mm; int ret; arch_unmap(mm, start, end); - ret = do_vmi_align_munmap(vmi, vma, mm, start, end, uf, downgrade); + ret = do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); validate_mm(mm); return ret; } diff --git a/mm/mremap.c b/mm/mremap.c index fe6b722ae633..11e06e4ab33b 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -715,7 +715,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, } vma_iter_init(&vmi, mm, old_addr); - if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) { + if (!do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false)) { /* OOM: unable to split vma, just get accounts right */ if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) vm_acct_memory(old_len >> PAGE_SHIFT); @@ -913,7 +913,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, struct vm_area_struct *vma; unsigned long ret = -EINVAL; bool locked = false; - bool downgraded = false; struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; LIST_HEAD(uf_unmap_early); LIST_HEAD(uf_unmap); @@ -999,24 +998,23 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, * Always allow a shrinking remap: that just unmaps * the unnecessary pages.. * do_vmi_munmap does all the needed commit accounting, and - * downgrades mmap_lock to read if so directed. + * unlocks the mmap_lock if so directed. */ if (old_len >= new_len) { - int retval; VMA_ITERATOR(vmi, mm, addr + new_len); - retval = do_vmi_munmap(&vmi, mm, addr + new_len, - old_len - new_len, &uf_unmap, true); - /* Returning 1 indicates mmap_lock is downgraded to read. */ - if (retval == 1) { - downgraded = true; - } else if (retval < 0 && old_len != new_len) { - ret = retval; + if (old_len == new_len) { + ret = addr; goto out; } + ret = do_vmi_munmap(&vmi, mm, addr + new_len, old_len - new_len, + &uf_unmap, true); + if (ret) + goto out; + ret = addr; - goto out; + goto out_unlocked; } /* @@ -1101,12 +1099,10 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, out: if (offset_in_page(ret)) locked = false; - if (downgraded) - mmap_read_unlock(current->mm); - else - mmap_write_unlock(current->mm); + mmap_write_unlock(current->mm); if (locked && new_len > old_len) mm_populate(new_addr + old_len, new_len - old_len); +out_unlocked: userfaultfd_unmap_complete(mm, &uf_unmap_early); mremap_userfaultfd_complete(&uf, addr, ret, old_len); userfaultfd_unmap_complete(mm, &uf_unmap); -- cgit v1.2.3 From ae80b404198434e49e903dc3b1ba83e2c7bb3ee2 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 3 Jul 2023 10:08:50 -0700 Subject: mm: validate the mm before dropping the mmap lock Commit 408579cd627a ("mm: Update do_vmi_align_munmap() return semantics") made the return value and locking semantics of do_vmi_align_munmap() more straightforward, but in the process it ended up unlocking the mmap lock just a tad too early: the debug code doing the mmap layout validation still needs to run with the lock held, or things might change under it while it's trying to validate things. So just move the unlocking to after the validate_mm() call. Reported-by: kernel test robot Link: https://lore.kernel.org/lkml/ZKIsoMOT71uwCIZX@xsang-OptiPlex-9020/ Fixes: 408579cd627a ("mm: Update do_vmi_align_munmap() return semantics") Signed-off-by: Linus Torvalds --- mm/mmap.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index 51e70fa98450..547b40531791 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2554,11 +2554,10 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, mas_set(&mas_detach, start); remove_mt(mm, &mas_detach); __mt_destroy(&mt_detach); + validate_mm(mm); if (unlock) mmap_read_unlock(mm); - - validate_mm(mm); return 0; clear_tree_failed: -- cgit v1.2.3 From b5641a5d8b8b14643bfe3d017d64da90a5c55479 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 3 Jul 2023 19:29:48 -0700 Subject: mm: don't do validate_mm() unnecessarily and without mmap locking This is an addition to commit ae80b4041984 ("mm: validate the mm before dropping the mmap lock"), because it turns out there were two problems, but lockdep just stopped complaining after finding the first one. The do_vmi_align_munmap() function now drops the mmap lock after doing the validate_mm() call, but it turns out that one of the callers then immediately calls validate_mm() again. That's both a bit silly, and now (again) happens without the mmap lock held. So just remove that validate_mm() call from the caller, but make sure to not lose any coverage by doing that mm sanity checking in the error path of do_vmi_align_munmap() too. Reported-and-tested-by: kernel test robot Link: https://lore.kernel.org/lkml/ZKN6CdkKyxBShPHi@xsang-OptiPlex-9020/ Fixes: 408579cd627a ("mm: Update do_vmi_align_munmap() return semantics") Signed-off-by: Linus Torvalds --- mm/mmap.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index 547b40531791..204ddcd52625 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2571,6 +2571,7 @@ end_split_failed: __mt_destroy(&mt_detach); start_split_failed: map_count_exceeded: + validate_mm(mm); return error; } @@ -3019,12 +3020,9 @@ int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, bool unlock) { struct mm_struct *mm = vma->vm_mm; - int ret; arch_unmap(mm, start, end); - ret = do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); - validate_mm(mm); - return ret; + return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); } /* -- cgit v1.2.3 From 6cd06ab12d1afdab3847e7981f301bd0404aaa5c Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 5 Jul 2023 09:33:31 -0700 Subject: gup: make the stack expansion warning a bit more targeted I added a warning about about GUP no longer expanding the stack in commit a425ac5365f6 ("gup: add warning if some caller would seem to want stack expansion"), but didn't really expect anybody to hit it. And it's true that nobody seems to have hit a _real_ case yet, but we certainly have a number of reports of false positives. Which not only causes extra noise in itself, but might also end up hiding any real cases if they do exist. So let's tighten up the warning condition, and replace the simplistic vma = find_vma(mm, start); if (vma && (start < vma->vm_start)) { WARN_ON_ONCE(vma->vm_flags & VM_GROWSDOWN); with a vma = gup_vma_lookup(mm, start); helper function which works otherwise like just "vma_lookup()", but with some heuristics for when to warn about gup no longer causing stack expansion. In particular, don't just warn for "below the stack", but warn if it's _just_ below the stack (with "just below" arbitrarily defined as 64kB, because why not?). And rate-limit it to at most once per hour, which means that any false positives shouldn't completely hide subsequent reports, but we won't be flooding the logs about it either. The previous code triggered when some GUP user (chromium crashpad) accessing past the end of the previous vma, for example. That has never expanded the stack, it just causes GUP to return early, and as such we shouldn't be warning about it. This is still going trigger the randomized testers, but to mitigate the noise from that, use "dump_stack()" instead of "WARN_ON_ONCE()" to get the kernel call chain. We'll get the relevant information, but syzbot shouldn't get too upset about it. Also, don't even bother with the GROWSUP case, which would be using different heuristics entirely, but only happens on parisc. Reported-by: kernel test robot Reported-by: John Hubbard Reported-by: syzbot+6cf44e127903fdf9d929@syzkaller.appspotmail.com Signed-off-by: Linus Torvalds --- mm/gup.c | 51 +++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 41 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/gup.c b/mm/gup.c index ef29641671c7..76d222ccc3ff 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1091,6 +1091,45 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) return 0; } +/* + * This is "vma_lookup()", but with a warning if we would have + * historically expanded the stack in the GUP code. + */ +static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm, + unsigned long addr) +{ +#ifdef CONFIG_STACK_GROWSUP + return vma_lookup(mm, addr); +#else + static volatile unsigned long next_warn; + struct vm_area_struct *vma; + unsigned long now, next; + + vma = find_vma(mm, addr); + if (!vma || (addr >= vma->vm_start)) + return vma; + + /* Only warn for half-way relevant accesses */ + if (!(vma->vm_flags & VM_GROWSDOWN)) + return NULL; + if (vma->vm_start - addr > 65536) + return NULL; + + /* Let's not warn more than once an hour.. */ + now = jiffies; next = next_warn; + if (next && time_before(now, next)) + return NULL; + next_warn = now + 60*60*HZ; + + /* Let people know things may have changed. */ + pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n", + current->comm, task_pid_nr(current), + vma->vm_start, vma->vm_end, addr); + dump_stack(); + return NULL; +#endif +} + /** * __get_user_pages() - pin user pages in memory * @mm: mm_struct of target mm @@ -1168,11 +1207,7 @@ static long __get_user_pages(struct mm_struct *mm, /* first iteration or cross vma bound */ if (!vma || start >= vma->vm_end) { - vma = find_vma(mm, start); - if (vma && (start < vma->vm_start)) { - WARN_ON_ONCE(vma->vm_flags & VM_GROWSDOWN); - vma = NULL; - } + vma = gup_vma_lookup(mm, start); if (!vma && in_gate_area(mm, start)) { ret = get_gate_page(mm, start & PAGE_MASK, gup_flags, &vma, @@ -1337,13 +1372,9 @@ int fixup_user_fault(struct mm_struct *mm, fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; retry: - vma = find_vma(mm, address); + vma = gup_vma_lookup(mm, address); if (!vma) return -EFAULT; - if (address < vma->vm_start ) { - WARN_ON_ONCE(vma->vm_flags & VM_GROWSDOWN); - return -EFAULT; - } if (!vma_permits_fault(vma, fault_flags)) return -EFAULT; -- cgit v1.2.3 From f96c48670319d685d18d50819ed0c1ef751ed2ac Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Wed, 5 Jul 2023 18:14:00 -0700 Subject: mm: disable CONFIG_PER_VMA_LOCK until its fixed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A memory corruption was reported in [1] with bisection pointing to the patch [2] enabling per-VMA locks for x86. Disable per-VMA locks config to prevent this issue until the fix is confirmed. This is expected to be a temporary measure. [1] https://bugzilla.kernel.org/show_bug.cgi?id=217624 [2] https://lore.kernel.org/all/20230227173632.3292573-30-surenb@google.com Link: https://lkml.kernel.org/r/20230706011400.2949242-3-surenb@google.com Reported-by: Jiri Slaby Closes: https://lore.kernel.org/all/dbdef34c-3a07-5951-e1ae-e9c6e3cdf51b@kernel.org/ Reported-by: Jacob Young Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217624 Fixes: 0bff0aaea03e ("x86/mm: try VMA lock-based page fault handling first") Signed-off-by: Suren Baghdasaryan Cc: David Hildenbrand Cc: Holger Hoffstätte Cc: Signed-off-by: Andrew Morton --- mm/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/Kconfig b/mm/Kconfig index 09130434e30d..0abc6c71dd89 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -1224,8 +1224,9 @@ config ARCH_SUPPORTS_PER_VMA_LOCK def_bool n config PER_VMA_LOCK - def_bool y + bool "Enable per-vma locking during page fault handling." depends on ARCH_SUPPORTS_PER_VMA_LOCK && MMU && SMP + depends on BROKEN help Allow per-vma locking during page fault handling. -- cgit v1.2.3 From 191fcdb6c9cf8b738b1628cbcf3af63d545c825c Mon Sep 17 00:00:00 2001 From: John Hubbard Date: Fri, 30 Jun 2023 18:04:42 -0700 Subject: mm/hugetlb.c: fix a bug within a BUG(): inconsistent pte comparison The following crash happens for me when running the -mm selftests (below). Specifically, it happens while running the uffd-stress subtests: kernel BUG at mm/hugetlb.c:7249! invalid opcode: 0000 [#1] PREEMPT SMP NOPTI CPU: 0 PID: 3238 Comm: uffd-stress Not tainted 6.4.0-hubbard-github+ #109 Hardware name: ASUS X299-A/PRIME X299-A, BIOS 1503 08/03/2018 RIP: 0010:huge_pte_alloc+0x12c/0x1a0 ... Call Trace: ? __die_body+0x63/0xb0 ? die+0x9f/0xc0 ? do_trap+0xab/0x180 ? huge_pte_alloc+0x12c/0x1a0 ? do_error_trap+0xc6/0x110 ? huge_pte_alloc+0x12c/0x1a0 ? handle_invalid_op+0x2c/0x40 ? huge_pte_alloc+0x12c/0x1a0 ? exc_invalid_op+0x33/0x50 ? asm_exc_invalid_op+0x16/0x20 ? __pfx_put_prev_task_idle+0x10/0x10 ? huge_pte_alloc+0x12c/0x1a0 hugetlb_fault+0x1a3/0x1120 ? finish_task_switch+0xb3/0x2a0 ? lock_is_held_type+0xdb/0x150 handle_mm_fault+0xb8a/0xd40 ? find_vma+0x5d/0xa0 do_user_addr_fault+0x257/0x5d0 exc_page_fault+0x7b/0x1f0 asm_exc_page_fault+0x22/0x30 That happens because a BUG() statement in huge_pte_alloc() attempts to check that a pte, if present, is a hugetlb pte, but it does so in a non-lockless-safe manner that leads to a false BUG() report. We got here due to a couple of bugs, each of which by itself was not quite enough to cause a problem: First of all, before commit c33c794828f2("mm: ptep_get() conversion"), the BUG() statement in huge_pte_alloc() was itself fragile: it relied upon compiler behavior to only read the pte once, despite using it twice in the same conditional. Next, commit c33c794828f2 ("mm: ptep_get() conversion") broke that delicate situation, by causing all direct pte reads to be done via READ_ONCE(). And so READ_ONCE() got called twice within the same BUG() conditional, leading to comparing (potentially, occasionally) different versions of the pte, and thus to false BUG() reports. Fix this by taking a single snapshot of the pte before using it in the BUG conditional. Now, that commit is only partially to blame here but, people doing bisections will invariably land there, so this will help them find a fix for a real crash. And also, the previous behavior was unlikely to ever expose this bug--it was fragile, yet not actually broken. So that's why I chose this commit for the Fixes tag, rather than the commit that created the original BUG() statement. Link: https://lkml.kernel.org/r/20230701010442.2041858-1-jhubbard@nvidia.com Fixes: c33c794828f2 ("mm: ptep_get() conversion") Signed-off-by: John Hubbard Acked-by: James Houghton Acked-by: Muchun Song Reviewed-by: Ryan Roberts Acked-by: Mike Kravetz Cc: Adrian Hunter Cc: Al Viro Cc: Alex Williamson Cc: Alexander Potapenko Cc: Alexander Shishkin Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Christian Brauner Cc: Christoph Hellwig Cc: Daniel Vetter Cc: Dave Airlie Cc: Dimitri Sivanich Cc: Dmitry Vyukov Cc: Ian Rogers Cc: Jason Gunthorpe Cc: Jiri Olsa Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Mark Rutland Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Michal Hocko Cc: Mike Rapoport (IBM) Cc: Namhyung Kim Cc: Naoya Horiguchi Cc: Oleksandr Tyshchenko Cc: Pavel Tatashin Cc: Roman Gushchin Cc: SeongJae Park Cc: Shakeel Butt Cc: Uladzislau Rezki (Sony) Cc: Vincenzo Frascino Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/hugetlb.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index bce28cca73a1..64a3239b6407 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -7246,7 +7246,12 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, pte = (pte_t *)pmd_alloc(mm, pud, addr); } } - BUG_ON(pte && pte_present(ptep_get(pte)) && !pte_huge(ptep_get(pte))); + + if (pte) { + pte_t pteval = ptep_get_lockless(pte); + + BUG_ON(pte_present(pteval) && !pte_huge(pteval)); + } return pte; } -- cgit v1.2.3 From 6dca4ac6fc91fd41ea4d6c4511838d37f4e0eab2 Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Mon, 22 May 2023 17:43:08 -0700 Subject: mm: call arch_swap_restore() from do_swap_page() Commit c145e0b47c77 ("mm: streamline COW logic in do_swap_page()") moved the call to swap_free() before the call to set_pte_at(), which meant that the MTE tags could end up being freed before set_pte_at() had a chance to restore them. Fix it by adding a call to the arch_swap_restore() hook before the call to swap_free(). Link: https://lkml.kernel.org/r/20230523004312.1807357-2-pcc@google.com Link: https://linux-review.googlesource.com/id/I6470efa669e8bd2f841049b8c61020c510678965 Fixes: c145e0b47c77 ("mm: streamline COW logic in do_swap_page()") Signed-off-by: Peter Collingbourne Reported-by: Qun-wei Lin Closes: https://lore.kernel.org/all/5050805753ac469e8d727c797c2218a9d780d434.camel@mediatek.com/ Acked-by: David Hildenbrand Acked-by: "Huang, Ying" Reviewed-by: Steven Price Acked-by: Catalin Marinas Cc: [6.1+] Signed-off-by: Andrew Morton --- mm/memory.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 0ae594703021..01f39e8144ef 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3950,6 +3950,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) } } + /* + * Some architectures may have to restore extra metadata to the page + * when reading from swap. This metadata may be indexed by swap entry + * so this must be called before swap_free(). + */ + arch_swap_restore(entry, folio); + /* * Remove the swap entry and conditionally try to free up the swapcache. * We're already holding a reference on the page but haven't mapped it -- cgit v1.2.3 From 8344a3d44be3d18671e18c4ba23bb03dd21e14ad Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 28 Jun 2023 19:55:48 +0100 Subject: writeback: account the number of pages written back nr_to_write is a count of pages, so we need to decrease it by the number of pages in the folio we just wrote, not by 1. Most callers specify either LONG_MAX or 1, so are unaffected, but writeback_sb_inodes() might end up writing 512x as many pages as it asked for. Dave added: : XFS is the only filesystem this would affect, right? AFAIA, nothing : else enables large folios and uses writeback through : write_cache_pages() at this point... : : In which case, I'd be surprised if much difference, if any, gets : noticed by anyone. Link: https://lkml.kernel.org/r/20230628185548.981888-1-willy@infradead.org Fixes: 793917d997df ("mm/readahead: Add large folio readahead") Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Cc: Jan Kara Cc: Dave Chinner Signed-off-by: Andrew Morton --- mm/page-writeback.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 1d17fb1ec863..d3f42009bb70 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2434,6 +2434,7 @@ int write_cache_pages(struct address_space *mapping, for (i = 0; i < nr_folios; i++) { struct folio *folio = fbatch.folios[i]; + unsigned long nr; done_index = folio->index; @@ -2471,6 +2472,7 @@ continue_unlock: trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); error = writepage(folio, wbc, data); + nr = folio_nr_pages(folio); if (unlikely(error)) { /* * Handle errors according to the type of @@ -2489,8 +2491,7 @@ continue_unlock: error = 0; } else if (wbc->sync_mode != WB_SYNC_ALL) { ret = error; - done_index = folio->index + - folio_nr_pages(folio); + done_index = folio->index + nr; done = 1; break; } @@ -2504,7 +2505,8 @@ continue_unlock: * keep going until we have written all the pages * we tagged for writeback prior to entering this loop. */ - if (--wbc->nr_to_write <= 0 && + wbc->nr_to_write -= nr; + if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { done = 1; break; -- cgit v1.2.3 From 05c56e7b4319d7f6352f27da876a1acdc8fa5cc4 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 4 Jul 2023 02:52:05 +0200 Subject: kasan: fix type cast in memory_is_poisoned_n Commit bb6e04a173f0 ("kasan: use internal prototypes matching gcc-13 builtins") introduced a bug into the memory_is_poisoned_n implementation: it effectively removed the cast to a signed integer type after applying KASAN_GRANULE_MASK. As a result, KASAN started failing to properly check memset, memcpy, and other similar functions. Fix the bug by adding the cast back (through an additional signed integer variable to make the code more readable). Link: https://lkml.kernel.org/r/8c9e0251c2b8b81016255709d4ec42942dcaf018.1688431866.git.andreyknvl@google.com Fixes: bb6e04a173f0 ("kasan: use internal prototypes matching gcc-13 builtins") Signed-off-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Arnd Bergmann Cc: Dmitry Vyukov Cc: Marco Elver Cc: Signed-off-by: Andrew Morton --- mm/kasan/generic.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 5b4c97baa656..4d837ab83f08 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -130,9 +130,10 @@ static __always_inline bool memory_is_poisoned_n(const void *addr, size_t size) if (unlikely(ret)) { const void *last_byte = addr + size - 1; s8 *last_shadow = (s8 *)kasan_mem_to_shadow(last_byte); + s8 last_accessible_byte = (unsigned long)last_byte & KASAN_GRANULE_MASK; if (unlikely(ret != (unsigned long)last_shadow || - (((long)last_byte & KASAN_GRANULE_MASK) >= *last_shadow))) + last_accessible_byte >= *last_shadow)) return true; } return false; -- cgit v1.2.3 From fdb54d96600aafe45951f549866cd6fc1af59954 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Wed, 5 Jul 2023 14:44:02 +0200 Subject: kasan, slub: fix HW_TAGS zeroing with slub_debug Commit 946fa0dbf2d8 ("mm/slub: extend redzone check to extra allocated kmalloc space than requested") added precise kmalloc redzone poisoning to the slub_debug functionality. However, this commit didn't account for HW_TAGS KASAN fully initializing the object via its built-in memory initialization feature. Even though HW_TAGS KASAN memory initialization contains special memory initialization handling for when slub_debug is enabled, it does not account for in-object slub_debug redzones. As a result, HW_TAGS KASAN can overwrite these redzones and cause false-positive slub_debug reports. To fix the issue, avoid HW_TAGS KASAN memory initialization when slub_debug is enabled altogether. Implement this by moving the __slub_debug_enabled check to slab_post_alloc_hook. Common slab code seems like a more appropriate place for a slub_debug check anyway. Link: https://lkml.kernel.org/r/678ac92ab790dba9198f9ca14f405651b97c8502.1688561016.git.andreyknvl@google.com Fixes: 946fa0dbf2d8 ("mm/slub: extend redzone check to extra allocated kmalloc space than requested") Signed-off-by: Andrey Konovalov Reported-by: Will Deacon Acked-by: Marco Elver Cc: Mark Rutland Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Catalin Marinas Cc: Christoph Lameter Cc: David Rientjes Cc: Dmitry Vyukov Cc: Feng Tang Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Joonsoo Kim Cc: kasan-dev@googlegroups.com Cc: Pekka Enberg Cc: Peter Collingbourne Cc: Roman Gushchin Cc: Vincenzo Frascino Cc: Vlastimil Babka Cc: Signed-off-by: Andrew Morton --- mm/kasan/kasan.h | 12 ------------ mm/slab.h | 16 ++++++++++++++-- 2 files changed, 14 insertions(+), 14 deletions(-) (limited to 'mm') diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index b799f11e45dc..2e973b36fe07 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -466,18 +466,6 @@ static inline void kasan_unpoison(const void *addr, size_t size, bool init) if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) return; - /* - * Explicitly initialize the memory with the precise object size to - * avoid overwriting the slab redzone. This disables initialization in - * the arch code and may thus lead to performance penalty. This penalty - * does not affect production builds, as slab redzones are not enabled - * there. - */ - if (__slub_debug_enabled() && - init && ((unsigned long)size & KASAN_GRANULE_MASK)) { - init = false; - memzero_explicit((void *)addr, size); - } size = round_up(size, KASAN_GRANULE_SIZE); hw_set_mem_tag_range((void *)addr, size, tag, init); diff --git a/mm/slab.h b/mm/slab.h index 6a5633b25eb5..9c0e09d0f81f 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -723,6 +723,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, unsigned int orig_size) { unsigned int zero_size = s->object_size; + bool kasan_init = init; size_t i; flags &= gfp_allowed_mask; @@ -739,6 +740,17 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, (s->flags & SLAB_KMALLOC)) zero_size = orig_size; + /* + * When slub_debug is enabled, avoid memory initialization integrated + * into KASAN and instead zero out the memory via the memset below with + * the proper size. Otherwise, KASAN might overwrite SLUB redzones and + * cause false-positive reports. This does not lead to a performance + * penalty on production builds, as slub_debug is not intended to be + * enabled there. + */ + if (__slub_debug_enabled()) + kasan_init = false; + /* * As memory initialization might be integrated into KASAN, * kasan_slab_alloc and initialization memset must be @@ -747,8 +759,8 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, * As p[i] might get tagged, memset and kmemleak hook come after KASAN. */ for (i = 0; i < size; i++) { - p[i] = kasan_slab_alloc(s, p[i], flags, init); - if (p[i] && init && !kasan_has_integrated_init()) + p[i] = kasan_slab_alloc(s, p[i], flags, kasan_init); + if (p[i] && init && (!kasan_init || !kasan_has_integrated_init())) memset(p[i], 0, zero_size); kmemleak_alloc_recursive(p[i], s->object_size, 1, s->flags, flags); -- cgit v1.2.3 From c137381f71aec755fbf47cd4e9bd4dce752c054c Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Sat, 8 Jul 2023 12:12:10 -0700 Subject: mm: lock a vma before stack expansion With recent changes necessitating mmap_lock to be held for write while expanding a stack, per-VMA locks should follow the same rules and be write-locked to prevent page faults into the VMA being expanded. Add the necessary locking. Cc: stable@vger.kernel.org Signed-off-by: Suren Baghdasaryan Signed-off-by: Linus Torvalds --- mm/mmap.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index 204ddcd52625..c66e4622a557 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1977,6 +1977,8 @@ static int expand_upwards(struct vm_area_struct *vma, unsigned long address) return -ENOMEM; } + /* Lock the VMA before expanding to prevent concurrent page faults */ + vma_start_write(vma); /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_lock in read mode. We need the @@ -2064,6 +2066,8 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address) return -ENOMEM; } + /* Lock the VMA before expanding to prevent concurrent page faults */ + vma_start_write(vma); /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_lock in read mode. We need the -- cgit v1.2.3 From 33313a747e81af9f31d0d45de78c9397fa3655eb Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Sat, 8 Jul 2023 12:12:11 -0700 Subject: mm: lock newly mapped VMA which can be modified after it becomes visible mmap_region adds a newly created VMA into VMA tree and might modify it afterwards before dropping the mmap_lock. This poses a problem for page faults handled under per-VMA locks because they don't take the mmap_lock and can stumble on this VMA while it's still being modified. Currently this does not pose a problem since post-addition modifications are done only for file-backed VMAs, which are not handled under per-VMA lock. However, once support for handling file-backed page faults with per-VMA locks is added, this will become a race. Fix this by write-locking the VMA before inserting it into the VMA tree. Other places where a new VMA is added into VMA tree do not modify it after the insertion, so do not need the same locking. Cc: stable@vger.kernel.org Signed-off-by: Suren Baghdasaryan Signed-off-by: Linus Torvalds --- mm/mmap.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index c66e4622a557..84c71431a527 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2812,6 +2812,8 @@ cannot_expand: if (vma->vm_file) i_mmap_lock_write(vma->vm_file->f_mapping); + /* Lock the VMA since it is modified after insertion into VMA tree */ + vma_start_write(vma); vma_iter_store(&vmi, vma); mm->map_count++; if (vma->vm_file) { -- cgit v1.2.3 From 1c7873e3364570ec89343ff4877e0f27a7b21a61 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Sat, 8 Jul 2023 16:04:00 -0700 Subject: mm: lock newly mapped VMA with corrected ordering Lockdep is certainly right to complain about (&vma->vm_lock->lock){++++}-{3:3}, at: vma_start_write+0x2d/0x3f but task is already holding lock: (&mapping->i_mmap_rwsem){+.+.}-{3:3}, at: mmap_region+0x4dc/0x6db Invert those to the usual ordering. Fixes: 33313a747e81 ("mm: lock newly mapped VMA which can be modified after it becomes visible") Cc: stable@vger.kernel.org Signed-off-by: Hugh Dickins Tested-by: Suren Baghdasaryan Signed-off-by: Linus Torvalds --- mm/mmap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index 84c71431a527..3eda23c9ebe7 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2809,11 +2809,11 @@ cannot_expand: if (vma_iter_prealloc(&vmi)) goto close_and_free_vma; + /* Lock the VMA since it is modified after insertion into VMA tree */ + vma_start_write(vma); if (vma->vm_file) i_mmap_lock_write(vma->vm_file->f_mapping); - /* Lock the VMA since it is modified after insertion into VMA tree */ - vma_start_write(vma); vma_iter_store(&vmi, vma); mm->map_count++; if (vma->vm_file) { -- cgit v1.2.3